hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
301116e135cdc96e7513a7feeafc579b995a7df4 | 48,460 | py | Python | Autocoders/Python/bin/codegen.py | sthagen/nasa-fprime | 7762d633d1c0728e68ef9217fb12a7c3070b61ac | [
"Apache-2.0"
] | 1 | 2022-03-15T16:17:15.000Z | 2022-03-15T16:17:15.000Z | Autocoders/Python/bin/codegen.py | sthagen/nasa-fprime | 7762d633d1c0728e68ef9217fb12a7c3070b61ac | [
"Apache-2.0"
] | null | null | null | Autocoders/Python/bin/codegen.py | sthagen/nasa-fprime | 7762d633d1c0728e68ef9217fb12a7c3070b61ac | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# ===============================================================================
# NAME: codegen.py
#
# DESCRIPTION: This script is used to generate components, ports and connectors
# from XML definition files.
#
# ===============================================================================
import glob
import logging
import os
import sys
import time
import traceback
from optparse import OptionParser
# Meta-model for Component only generation
from fprime_ac.models import CompFactory, PortFactory, Serialize, TopoFactory
# Parsers to read the XML
from fprime_ac.parsers import (
XmlComponentParser,
XmlParser,
XmlPortsParser,
XmlSerializeParser,
XmlTopologyParser,
)
from fprime_ac.utils import (
ArrayGenerator,
ConfigManager,
EnumGenerator,
Logger,
TopDictGenerator,
)
from fprime_ac.utils.buildroot import get_build_roots, search_for_file, set_build_roots
from fprime_ac.utils.version import get_fprime_version, get_project_version
from lxml import etree
# Generators to produce the code
try:
from fprime_ac.generators import GenFactory
except ImportError as ime:
print("[ERROR] Cheetah templates need to be generated.\n\t", ime, file=sys.stderr)
sys.exit(1)
# Flag to indicate verbose mode.
VERBOSE = False
# Global logger init. below.
PRINT = logging.getLogger("output")
DEBUG = logging.getLogger("debug")
# Used by unit test to disable things.
TEST = False
# After catching exception this is set True
# so a clean up routine deletes *_ac_*.[ch]
# and *_ac_*.xml files within module.
ERROR = False
# Configuration manager object.
CONFIG = ConfigManager.ConfigManager.getInstance()
# Build a default log file name
SYS_TIME = time.gmtime()
# Deployment name from topology XML only
DEPLOYMENT = None
# Version label for now
class Version:
id = "0.1"
comment = "Initial prototype"
VERSION = Version()
def moveCAndHFiles(path_prefix):
"""
Moves the generated c and h files
to a specific directory.
"""
for f in glob.glob("*.c"):
os.rename(f, os.path.join(path_prefix, f))
for f in glob.glob("*.h"):
os.rename(f, os.path.join(path_prefix, f))
def cleanUp():
"""
If something failed then clean up files generated.
"""
PRINT.info("ERROR: Cleaning up partially created files.")
for file in glob.glob("*_ac_*.new"):
os.remove(file)
for file in glob.glob("*_token.data"):
os.remove(file)
for file in glob.glob("*_opcode_offset.data"):
os.remove(file)
PRINT.info("Completed.")
sys.exit(-1)
def pinit():
"""
Initialize the option parser and return it.
"""
current_dir = os.getcwd()
usage = "usage: %prog [options] [xml_filename]"
vers = f"%prog {VERSION.id} {VERSION.comment}"
parser = OptionParser(usage, version=vers)
parser.add_option(
"-b",
"--build_root",
dest="build_root_flag",
help="Enable search for environment variable BUILD_ROOT to establish absolute XML directory path",
action="store_true",
default=False,
)
parser.add_option(
"-p",
"--path",
dest="work_path",
type="string",
help=f"Switch to new working directory (def: {current_dir}).",
action="store",
default=current_dir,
)
parser.add_option(
"-v",
"--verbose",
dest="verbose_flag",
help="Enable verbose mode showing more runtime detail (def: False)",
action="store_true",
default=False,
)
parser.add_option(
"-t",
"--template",
dest="impl_flag",
help="Enable generation of *Impl_[hpp,cpp].template implementation template files (def: False)",
action="store_true",
default=False,
)
parser.add_option(
"-u",
"--unit-test",
dest="unit_test",
help="Enable generation of unit test component files (def: False)",
action="store_true",
default=False,
)
parser.add_option(
"-l",
"--logger",
dest="logger",
default="QUIET",
help="Set the logging level <DEBUG | INFO | QUIET> (def: 'QUIET').",
)
parser.add_option(
"-L",
"--logger-output-file",
dest="logger_output",
default=None,
help="Set the logger output file. (def: defaultgen.log).",
)
parser.add_option(
"-d",
"--dependency-file",
dest="dependency_file",
default=None,
help="Set the output file for build dependencies",
)
parser.add_option(
"-g",
"--default_dict",
dest="default_dict",
help="Generate default GDS dictionary classes",
action="store_true",
default=False,
)
parser.add_option(
"-x",
"--xml_topology_dict",
dest="xml_topology_dict",
help="Generate XML GDS dictionary file",
action="store_true",
default=False,
)
parser.add_option(
"-T",
"--default_topology_dict",
dest="default_topology_dict",
help="Generate default GDS topology dictionary classes",
action="store_true",
default=False,
)
parser.add_option(
"-o",
"--dict_dir",
dest="dict_dir",
help="Output directory for dictionary. Needed for -g.",
default=None,
)
parser.add_option(
"-H",
"--html_docs",
dest="html_docs",
help="Generate HTML docs for commands, telemetry, events, and parameters",
action="store_true",
default=False,
)
parser.add_option(
"-D",
"--html_doc_dir",
dest="html_doc_dir",
help="Directory for HTML documentation",
default=None,
)
parser.add_option(
"-m",
"--md_docs",
dest="md_docs",
help="Generate MarkDown docs for commands, telemetry, events, and parameters",
action="store_true",
default=False,
)
parser.add_option(
"-M",
"--md_doc_dir",
dest="md_doc_dir",
help="Directory for MarkDown documentation",
default=None,
)
parser.add_option(
"-P",
"--is_ptr",
dest="is_ptr",
help="Generate component ptr's in topology.",
action="store_true",
default=False,
)
parser.add_option(
"-C",
"--connect_only",
dest="connect_only",
help="Only generate port connections in topology.",
action="store_true",
default=False,
)
parser.add_option(
"-r",
"--gen_report",
dest="gen_report",
help="Generate reports on component interfaces",
action="store_true",
default=False,
)
# author = os.environ['USER']
# parser.add_option("-a", "--author", dest="author", type="string",
# help="Specify the new FSW author (def: %s)." % author,
# action="store", default=author)
# CONFIG = ConfigManager.ConfigManager.getInstance()
# v = CONFIG.get('ipc','FSW_version_id') + '_' + time.strftime("%Y%m%d")
# parser.add_option("-f", "--FSW_version_id", dest="fsw_ver", type="string",
# help="Specify the version ID here (def: %s)." % v,
# action="store", default=v)
return parser
def generate_topology(the_parsed_topology_xml, xml_filename, opt):
DEBUG.debug(f"Topology xml type description file: {xml_filename}")
generator = TopoFactory.TopoFactory.getInstance()
if not (opt.default_topology_dict or opt.xml_topology_dict):
generator.set_generate_ID(False)
topology_model = generator.create(the_parsed_topology_xml)
if opt.is_ptr:
PRINT.info("Topology Components will be initialized as Pointers. ")
topology_model.is_ptr = opt.is_ptr
if opt.connect_only:
PRINT.info("Only port connections will be generated for Topology.")
topology_model.connect_only = opt.connect_only
generator = GenFactory.GenFactory.getInstance()
if "Ai" in xml_filename:
base = xml_filename.split("Ai")[0]
h_instance_name = f"{base}_H"
cpp_instance_name = f"{base}_Cpp"
csv_instance_name = f"{base}_ID"
cmd_html_instance_name = f"{base}_Cmd_HTML"
channel_html_instance_name = f"{base}_Channel_HTML"
event_html_instance_name = f"{base}_Event_HTML"
else:
PRINT.info("Missing Ai at end of file name...")
raise OSError
# Figures out what visitor to use
if opt.default_topology_dict or opt.xml_topology_dict:
generator.configureVisitor(
h_instance_name, "InstanceTopologyHVisitor", True, True
)
generator.configureVisitor(
cpp_instance_name, "InstanceTopologyCppVisitor", True, True
)
else:
generator.configureVisitor(h_instance_name, "TopologyHVisitor", True, True)
generator.configureVisitor(cpp_instance_name, "TopologyCppVisitor", True, True)
# Used to generate base ID/base ID window CSV files
generator.configureVisitor(csv_instance_name, "TopologyIDVisitor", True, True)
# Used to generate HTML tables of ID's etc.
if opt.default_topology_dict or opt.xml_topology_dict:
generator.configureVisitor(
cmd_html_instance_name, "InstanceTopologyCmdHTMLVisitor", True, True
)
generator.configureVisitor(
channel_html_instance_name, "InstanceTopologyChannelsTMLVisitor", True, True
)
generator.configureVisitor(
event_html_instance_name, "InstanceTopologyEventsHTMLVisitor", True, True
)
# uses the topology model to process the items
if opt.default_topology_dict or opt.xml_topology_dict:
# create list of used parsed component xmls
parsed_xml_dict = {}
for comp in the_parsed_topology_xml.get_instances():
if comp.get_type() in topology_model.get_base_id_dict():
parsed_xml_dict[comp.get_type()] = comp.get_comp_xml()
# comp.set_component_object(comp.)
else:
PRINT.info(
f"Components with type {comp.get_type()} aren't in the topology model."
)
# Hack to set up deployment path for instanced dictionaries (if one exists remove old one)
#
if opt.default_topology_dict:
for build_root in get_build_roots():
if not os.path.exists(os.path.join(build_root, DEPLOYMENT)):
continue
os.environ["DICT_DIR"] = os.path.join(build_root, DEPLOYMENT, "py_dict")
break
else:
raise FileNotFoundError(
f"{DEPLOYMENT} not found in any of: {get_build_roots()}"
)
dict_dir = os.environ["DICT_DIR"]
PRINT.info(f"Removing old instanced topology dictionaries in: {dict_dir}")
import shutil
if os.path.exists(dict_dir):
shutil.rmtree(dict_dir)
PRINT.info(
f"Overriding for instanced topology dictionaries the --dict_dir option with xml derived path: {dict_dir}"
)
#
xml_list = []
for parsed_xml_type in parsed_xml_dict:
if parsed_xml_dict[parsed_xml_type] is None:
PRINT.info(
f"XML of type {parsed_xml_type} is being used, but has not been parsed correctly. Check if file exists or add xml file with the 'import_component_type' tag to the Topology file."
)
raise Exception()
xml_list.append(parsed_xml_dict[parsed_xml_type])
generate_component_instance_dictionary(
parsed_xml_dict[parsed_xml_type], opt, topology_model
)
topology_model.set_instance_xml_list(xml_list)
if opt.xml_topology_dict:
topology_dict = etree.Element("dictionary")
topology_dict.attrib["topology"] = the_parsed_topology_xml.get_name()
topology_dict.attrib["framework_version"] = get_fprime_version().lstrip("v")
topology_dict.attrib["project_version"] = get_project_version().lstrip("v")
top_dict_gen = TopDictGenerator.TopDictGenerator(
parsed_xml_dict, PRINT.debug
)
for comp in the_parsed_topology_xml.get_instances():
comp_type = comp.get_type()
comp_name = comp.get_name()
comp_id = int(comp.get_base_id(), 0)
PRINT.debug(f"Processing {comp_name} [{comp_type}] ({hex(comp_id)})")
top_dict_gen.set_current_comp(comp)
top_dict_gen.check_for_enum_xml()
top_dict_gen.check_for_serial_xml()
top_dict_gen.check_for_commands()
top_dict_gen.check_for_channels()
top_dict_gen.check_for_events()
top_dict_gen.check_for_parameters()
top_dict_gen.check_for_arrays()
top_dict_gen.remove_duplicate_enums()
topology_dict.append(top_dict_gen.get_enum_list())
topology_dict.append(top_dict_gen.get_serializable_list())
topology_dict.append(top_dict_gen.get_array_list())
topology_dict.append(top_dict_gen.get_command_list())
topology_dict.append(top_dict_gen.get_event_list())
topology_dict.append(top_dict_gen.get_telemetry_list())
topology_dict.append(top_dict_gen.get_parameter_list())
fileName = the_parsed_topology_xml.get_xml_filename().replace(
"Ai.xml", "Dictionary.xml"
)
PRINT.info(f"Generating XML dictionary {fileName}")
fd = open(
fileName, "wb"
) # Note: binary forces the same encoding of the source files
fd.write(etree.tostring(topology_dict, pretty_print=True))
initFiles = generator.create("initFiles")
# startSource = generator.create("startSource")
includes1 = generator.create("includes1")
# includes2 = generator.create("includes2")
# namespace = generator.create("namespace")
public = generator.create("public")
finishSource = generator.create("finishSource")
#
# Generate the source code products here.
#
# 1. Open all the files
initFiles(topology_model)
#
# 2. Generate includes and static code here.
includes1(topology_model)
#
# 3. Generate public function to instance components and connect them here.
public(topology_model)
#
# 4. Generate final code here and close all files.
finishSource(topology_model)
return topology_model
def generate_component_instance_dictionary(
the_parsed_component_xml, opt, topology_model
):
global DEPLOYMENT
#
parsed_port_xml_list = []
parsed_serializable_xml_list = []
# uses the topology model to process the items
# checks if the topology model exists
if topology_model is None:
PRINT.info(
"Topology model was not specified. Please also input a topology model when running this command."
)
raise OSError
port_type_files_list = the_parsed_component_xml.get_port_type_files()
for port_file in port_type_files_list:
port_file = search_for_file("Port", port_file)
xml_parser_obj = XmlPortsParser.XmlPortsParser(port_file)
# print xml_parser_obj.get_args()
parsed_port_xml_list.append(xml_parser_obj)
del xml_parser_obj
serializable_type_files_list = (
the_parsed_component_xml.get_serializable_type_files()
)
for serializable_file in serializable_type_files_list:
serializable_file = search_for_file("Serializable", serializable_file)
xml_parser_obj = XmlSerializeParser.XmlSerializeParser(
serializable_file
) # Telemetry/Params can only use generated serializable types
# check to make sure that the serializables don't have things that channels and parameters can't have
# can't have external non-xml members
if len(xml_parser_obj.get_include_header_files()):
PRINT.info(
f"ERROR: Component include serializables cannot use user-defined types. file: {serializable_file}"
)
sys.exit(-1)
# print xml_parser_obj.get_args()
parsed_serializable_xml_list.append(xml_parser_obj)
del xml_parser_obj
generator = CompFactory.CompFactory.getInstance()
component_model = generator.create(
the_parsed_component_xml, parsed_port_xml_list, parsed_serializable_xml_list
)
if opt.default_topology_dict:
default_dict_generator = GenFactory.GenFactory.getInstance()
# iterate through command instances
default_dict_generator.configureVisitor(
"Commands", "InstanceCommandVisitor", True, True
)
for command_model in component_model.get_commands():
DEBUG.info(f"Processing command {command_model.get_mnemonic()}")
defaultStartCmd = default_dict_generator.create("InstanceDictStart")
defaultCmdHeader = default_dict_generator.create("InstanceDictHeader")
defaultCmdBody = default_dict_generator.create("InstanceDictBody")
defaultStartCmd(command_model, topology_model)
defaultCmdHeader(command_model, topology_model)
defaultCmdBody(command_model, topology_model)
for parameter_model in component_model.get_parameters():
DEBUG.info(f"Processing parameter {parameter_model.get_name()}")
defaultStartCmd = default_dict_generator.create("InstanceDictStart")
defaultCmdHeader = default_dict_generator.create("InstanceDictHeader")
defaultCmdBody = default_dict_generator.create("InstanceDictBody")
defaultStartCmd(parameter_model, topology_model)
defaultCmdHeader(parameter_model, topology_model)
defaultCmdBody(parameter_model, topology_model)
default_dict_generator = GenFactory.GenFactory.getInstance()
# iterate through command instances
default_dict_generator.configureVisitor(
"Events", "InstanceEventVisitor", True, True
)
for event_model in component_model.get_events():
DEBUG.info(f"Processing event {event_model.get_name()}")
defaultStartEvent = default_dict_generator.create("InstanceDictStart")
defaultEventHeader = default_dict_generator.create("InstanceDictHeader")
defaultEventBody = default_dict_generator.create("InstanceDictBody")
defaultStartEvent(event_model, topology_model)
defaultEventHeader(event_model, topology_model)
defaultEventBody(event_model, topology_model)
default_dict_generator = GenFactory.GenFactory.getInstance()
# iterate through command instances
default_dict_generator.configureVisitor(
"Channels", "InstanceChannelVisitor", True, True
)
for channel_model in component_model.get_channels():
DEBUG.info(f"Processing channel {channel_model.get_name()}")
defaultStartChannel = default_dict_generator.create("InstanceDictStart")
defaultChannelHeader = default_dict_generator.create("InstanceDictHeader")
defaultChannelBody = default_dict_generator.create("InstanceDictBody")
defaultStartChannel(channel_model, topology_model)
defaultChannelHeader(channel_model, topology_model)
defaultChannelBody(channel_model, topology_model)
def generate_component(
the_parsed_component_xml, xml_filename, opt, topology_model=None
):
"""
Creates a component meta-model, configures visitors and
generates the component files. Nothing is returned.
"""
parsed_port_xml_list = []
if opt.gen_report:
report_file = open(f"{xml_filename.replace('Ai.xml', '')}Report.txt", "w")
num_input_ports = 0
num_output_ports = 0
# Count ports
for port in the_parsed_component_xml.get_ports():
if port.get_direction() == "input":
num_input_ports = num_input_ports + int(port.get_max_number())
if port.get_direction() == "output":
num_output_ports = num_output_ports + int(port.get_max_number())
if len(the_parsed_component_xml.get_ports()):
if num_input_ports:
report_file.write(f"Input Ports: {num_input_ports}\n")
if num_output_ports:
report_file.write(f"Output Ports: {num_output_ports}\n")
# Count regular commands
commands = 0
idList = ""
if len(the_parsed_component_xml.get_commands()):
for command in the_parsed_component_xml.get_commands():
commands += len(command.get_opcodes())
for opcode in command.get_opcodes():
idList += opcode + ","
# Count parameter commands
if len(the_parsed_component_xml.get_parameters()):
for parameter in the_parsed_component_xml.get_parameters():
commands += len(parameter.get_set_opcodes())
for opcode in parameter.get_set_opcodes():
idList += opcode + ","
commands += len(parameter.get_save_opcodes())
for opcode in parameter.get_save_opcodes():
idList += opcode + ","
if commands > 0:
report_file.write(f"Commands: {commands}\n OpCodes: {idList[:-1]}\n")
if len(the_parsed_component_xml.get_channels()):
idList = ""
channels = 0
for channel in the_parsed_component_xml.get_channels():
channels += len(channel.get_ids())
for id in channel.get_ids():
idList += id + ","
report_file.write(f"Channels: {channels}\n ChanIds: {idList[:-1]}\n")
if len(the_parsed_component_xml.get_events()):
idList = ""
events = 0
for event in the_parsed_component_xml.get_events():
events += len(event.get_ids())
for id in event.get_ids():
idList += id + ","
report_file.write(f"Events: {events}\n EventIds: {idList[:-1]}\n")
if len(the_parsed_component_xml.get_parameters()):
idList = ""
parameters = 0
for parameter in the_parsed_component_xml.get_parameters():
parameters += len(parameter.get_ids())
for id in parameter.get_ids():
idList += id + ","
report_file.write(f"Parameters: {parameters}\n ParamIds: {idList[:-1]}\n")
#
# Configure the meta-model for the component
#
port_type_files_list = the_parsed_component_xml.get_port_type_files()
for port_file in port_type_files_list:
port_file = search_for_file("Port", port_file)
xml_parser_obj = XmlPortsParser.XmlPortsParser(port_file)
# print xml_parser_obj.get_args()
parsed_port_xml_list.append(xml_parser_obj)
del xml_parser_obj
parsed_serializable_xml_list = []
#
# Configure the meta-model for the component
#
serializable_type_files_list = (
the_parsed_component_xml.get_serializable_type_files()
)
for serializable_file in serializable_type_files_list:
serializable_file = search_for_file("Serializable", serializable_file)
xml_parser_obj = XmlSerializeParser.XmlSerializeParser(
serializable_file
) # Telemetry/Params can only use generated serializable types
# check to make sure that the serializables don't have things that channels and parameters can't have
# can't have external non-xml members
if len(xml_parser_obj.get_include_header_files()):
PRINT.info(
f"ERROR: Component include serializables cannot use user-defined types. file: {serializable_file}"
)
sys.exit(-1)
# print xml_parser_obj.get_args()
parsed_serializable_xml_list.append(xml_parser_obj)
del xml_parser_obj
#
# for p in the_parsed_component_xml.get_ports():
# print p.get_name(), p.get_type()
# print parsed_port_xml_list
# for p in parsed_port_xml_list:
# print p.get_interface().get_name(), p.get_interface().get_namespace()
# print p.get_args()
# print p.get_include_header_files()
#
generator = CompFactory.CompFactory.getInstance()
component_model = generator.create(
the_parsed_component_xml, parsed_port_xml_list, parsed_serializable_xml_list
)
# tv = [x for x in component_model.get_events()]
# for event_model in component_model.get_events():
# event_model.set_ids([1,2,3])
# tv.append(event_model)
#
# Configure and create the visitors that will generate the code.
#
generator = GenFactory.GenFactory.getInstance()
#
# Configure each visitor here.
#
if "Ai" in xml_filename:
base = xml_filename.split("Ai")[0]
h_instance_name = base + "_H"
cpp_instance_name = base + "_Cpp"
h_instance_name_tmpl = base + "_Impl_H"
cpp_instance_name_tmpl = base + "_Impl_Cpp"
h_instance_test_name = base + "_Test_H"
cpp_instance_test_name = base + "_Test_Cpp"
h_instance_gtest_name = base + "_GTest_H"
cpp_instance_gtest_name = base + "_GTest_Cpp"
h_instance_test_impl_name = base + "_TestImpl_H"
cpp_instance_test_impl_name = base + "_TestImpl_Cpp"
else:
PRINT.info("Missing Ai at end of file name...")
raise OSError
#
if opt.impl_flag:
PRINT.info("Enabled generation of implementation template files...")
generator.configureVisitor(h_instance_name_tmpl, "ImplHVisitor", True, True)
generator.configureVisitor(cpp_instance_name_tmpl, "ImplCppVisitor", True, True)
elif opt.unit_test:
PRINT.info("Enabled generation of unit test component files...")
generator.configureVisitor(
h_instance_test_name, "ComponentTestHVisitor", True, True
)
generator.configureVisitor(
cpp_instance_test_name, "ComponentTestCppVisitor", True, True
)
generator.configureVisitor(h_instance_gtest_name, "GTestHVisitor", True, True)
generator.configureVisitor(
cpp_instance_gtest_name, "GTestCppVisitor", True, True
)
generator.configureVisitor(
h_instance_test_impl_name, "TestImplHVisitor", True, True
)
generator.configureVisitor(
cpp_instance_test_impl_name, "TestImplCppVisitor", True, True
)
else:
generator.configureVisitor(h_instance_name, "ComponentHVisitor", True, True)
generator.configureVisitor(cpp_instance_name, "ComponentCppVisitor", True, True)
# for port_file in port_type_files_list:
# if "Ai" in port_file:
# base = port_file.split("Ai")[0]
# h_instance_name = base + "_H"
# cpp_instance_name = base + "_Cpp"
# else:
# PRINT.info("Missing Ai at end of file: %s" % port_file)
# raise IOError
# generator.configureVisitor(h_instance_name, "PortCppVisitor", True, True)
# generator.configureVisitor(cpp_instance_name, "PortHVisitor", True, True)
#
# The idea here is that each of these generators is used to create
# a certain portion of each output file.
#
initFiles = generator.create("initFiles")
startSource = generator.create("startSource")
includes1 = generator.create("includes1")
includes2 = generator.create("includes2")
namespace = generator.create("namespace")
public = generator.create("public")
protected = generator.create("protected")
private = generator.create("private")
finishSource = generator.create("finishSource")
#
# Generate the source code products here.
#
# 1. Open all the files
initFiles(component_model)
#
# 2. Produce caltech notice here and other starting stuff.
startSource(component_model)
#
# 3. Generate includes that all files get here.
includes1(component_model)
#
# 4. Generate includes from model that a specific here.
includes2(component_model)
#
# 5. Generate start of namespace here.
namespace(component_model)
#
# 6. Generate public class code here.
public(component_model)
#
# 7. Generate protected class code here.
protected(component_model)
#
# 8. Generate private class code here.
private(component_model)
#
# 9. Generate final code here and close all files.
finishSource(component_model)
#
# if requested, generate ground system dictionary
if opt.default_dict:
if opt.dict_dir is None:
PRINT.info("Dictionary output directory not specified!")
raise OSError
os.environ["DICT_DIR"] = opt.dict_dir
default_dict_generator = GenFactory.GenFactory.getInstance()
# iterate through command instances
default_dict_generator.configureVisitor(
"Commands", "CommandVisitor", True, True
)
for command_model in component_model.get_commands():
DEBUG.info(f"Processing command {command_model.get_mnemonic()}")
defaultStartCmd = default_dict_generator.create("DictStart")
defaultCmdHeader = default_dict_generator.create("DictHeader")
defaultCmdBody = default_dict_generator.create("DictBody")
defaultStartCmd(command_model)
defaultCmdHeader(command_model)
defaultCmdBody(command_model)
for parameter_model in component_model.get_parameters():
DEBUG.info(f"Processing parameter {parameter_model.get_name()}")
defaultStartCmd = default_dict_generator.create("DictStart")
defaultCmdHeader = default_dict_generator.create("DictHeader")
defaultCmdBody = default_dict_generator.create("DictBody")
defaultStartCmd(parameter_model)
defaultCmdHeader(parameter_model)
defaultCmdBody(parameter_model)
default_dict_generator = GenFactory.GenFactory.getInstance()
# iterate through command instances
default_dict_generator.configureVisitor("Events", "EventVisitor", True, True)
for event_model in component_model.get_events():
DEBUG.info(f"Processing event {event_model.get_name()}")
defaultStartEvent = default_dict_generator.create("DictStart")
defaultEventHeader = default_dict_generator.create("DictHeader")
defaultEventBody = default_dict_generator.create("DictBody")
defaultStartEvent(event_model)
defaultEventHeader(event_model)
defaultEventBody(event_model)
default_dict_generator = GenFactory.GenFactory.getInstance()
# iterate through command instances
default_dict_generator.configureVisitor(
"Channels", "ChannelVisitor", True, True
)
for channel_model in component_model.get_channels():
DEBUG.info(f"Processing channel {channel_model.get_name()}")
defaultStartChannel = default_dict_generator.create("DictStart")
defaultChannelHeader = default_dict_generator.create("DictHeader")
defaultChannelBody = default_dict_generator.create("DictBody")
defaultStartChannel(channel_model)
defaultChannelHeader(channel_model)
defaultChannelBody(channel_model)
if opt.html_docs:
if opt.html_doc_dir is None:
PRINT.info("HTML documentation output directory not specified!")
raise OSError
os.environ["HTML_DOC_SUBDIR"] = opt.html_doc_dir
html_doc_generator = GenFactory.GenFactory.getInstance()
html_doc_generator.configureVisitor(
base + "_Html", "HtmlDocVisitor", True, True
)
htmlStart = html_doc_generator.create("HtmlStart")
htmlDoc = html_doc_generator.create("HtmlDoc")
finisher = html_doc_generator.create("finishSource")
htmlStart(component_model)
htmlDoc(component_model)
finisher(component_model)
if opt.md_docs:
if opt.md_doc_dir is None:
PRINT.info("MD documentation output directory not specified!")
raise OSError
os.environ["MD_DOC_SUBDIR"] = opt.md_doc_dir
md_doc_generator = GenFactory.GenFactory.getInstance()
md_doc_generator.configureVisitor(base + "_Md", "MdDocVisitor", True, True)
mdStart = md_doc_generator.create("MdStart")
mdDoc = md_doc_generator.create("MdDoc")
finisher = md_doc_generator.create("finishSource")
mdStart(component_model)
mdDoc(component_model)
finisher(component_model)
def generate_port(the_parsed_port_xml, port_file):
"""
Creates a port meta-model, configures visitors and
generates the port/interface type files. Nothing is returned.
"""
#
# Configure the meta-model for the component
#
DEBUG.debug(f"Port xml type description file: {port_file}")
generator = PortFactory.PortFactory.getInstance()
port_model = generator.create(the_parsed_port_xml)
#
# Configure and create the visitors that will generate the code.
#
generator = GenFactory.GenFactory.getInstance()
#
# Configure file names and each visitor here.
#
the_type = the_parsed_port_xml.get_interface().get_name()
#
# Configure each visitor here.
#
if "Ai" in port_file:
base = the_type
h_instance_name = base + "_H"
cpp_instance_name = base + "_Cpp"
else:
PRINT.info("Missing Ai at end of file name...")
raise OSError
#
generator.configureVisitor(h_instance_name, "PortCppVisitor", True, True)
generator.configureVisitor(cpp_instance_name, "PortHVisitor", True, True)
#
# The idea here is that each of these generators is used to create
# a certain portion of each output file.
#
initFiles = generator.create("initFiles")
startSource = generator.create("startSource")
includes1 = generator.create("includes1")
includes2 = generator.create("includes2")
namespace = generator.create("namespace")
public = generator.create("public")
protected = generator.create("protected")
private = generator.create("private")
finishSource = generator.create("finishSource")
#
# Generate the source code products here.
#
# 1. Open all the files
initFiles(port_model)
#
# 2. Produce caltech notice here and other starting stuff.
startSource(port_model)
#
# 3. Generate includes that all files get here.
includes1(port_model)
#
# 4. Generate includes from model that a specific here.
includes2(port_model)
#
# 5. Generate start of namespace here.
namespace(port_model)
#
# 6. Generate public class code here.
public(port_model)
#
# 7. Generate protected class code here.
protected(port_model)
#
# 8. Generate private class code here.
private(port_model)
#
# 9. Generate final code here and close all files.
finishSource(port_model)
def generate_serializable(the_serial_xml, opt):
"""
Creates a serializable meta-model class, configures visitors and
generates the serializable class files. Nothing is returned.
"""
#
# Configure the meta-model for the serializable here
#
f = the_serial_xml.get_xml_filename()
DEBUG.debug(f"Serializable xml type description file: {f}")
n = the_serial_xml.get_name()
ns = the_serial_xml.get_namespace()
c = the_serial_xml.get_comment()
i = (
the_serial_xml.get_includes()
+ the_serial_xml.get_include_enums()
+ the_serial_xml.get_include_arrays()
)
i2 = the_serial_xml.get_include_header_files()
m = the_serial_xml.get_members()
t = the_serial_xml.get_typeid()
model = Serialize.Serialize(f, n, ns, c, i, i2, m, t)
#
# Configure each visitor here.
#
t = f.split(".")[0][-2:]
if ("Ai" in f) & (t == "Ai"):
base = n
h_instance_name = base + "_H"
cpp_instance_name = base + "_Cpp"
else:
PRINT.info("Missing Ai at end of file name...")
raise OSError
#
generator = GenFactory.GenFactory.getInstance()
generator.configureVisitor(h_instance_name, "SerialCppVisitor", True, True)
generator.configureVisitor(cpp_instance_name, "SerialHVisitor", True, True)
# only generate if serializable is usable for dictionary. Can't have includes of other types
if opt.default_dict:
if len(i) != 0 or len(i2) != 0:
PRINT.info(f"Dictionary: Skipping {f} because of external includes")
else:
# borrow source visitor pattern for serializable dictionary
if opt.dict_dir is None:
PRINT.info("Dictionary output directory not specified!")
raise OSError
os.environ["DICT_DIR"] = opt.dict_dir
generator.configureVisitor("SerialDict", "SerializableVisitor", True, True)
if opt.default_topology_dict:
if len(i) != 0 or len(i2) != 0:
PRINT.info(f"Dictionary: Skipping {f} because of external includes")
else:
# borrow source visitor pattern for serializable dictionary
if opt.dict_dir is None:
PRINT.info("Dictionary output directory not specified!")
raise OSError
os.environ["DICT_DIR"] = opt.dict_dir
print("\n")
print(opt.dict_dir)
print("\n")
generator.configureVisitor(
"SerialDict", "InstanceSerializableVisitor", True, True
)
#
# The idea here is that each of these generators is used to create
# a certain portion of each output file.
#
initFiles = generator.create("initFiles")
startSource = generator.create("startSource")
includes1 = generator.create("includes1")
includes2 = generator.create("includes2")
namespace = generator.create("namespace")
public = generator.create("public")
protected = generator.create("protected")
private = generator.create("private")
finishSource = generator.create("finishSource")
#
# Generate the source code products here.
#
# 1. Open all the files
initFiles(model)
#
# 2. Produce caltech notice here and other starting stuff.
startSource(model)
#
# 3. Generate includes that all files get here.
includes1(model)
#
# 4. Generate includes from model that a specific here.
includes2(model)
#
# 5. Generate start of namespace here.
namespace(model)
#
# 6. Generate public class code here.
public(model)
#
# 7. Generate protected class code here.
protected(model)
#
# 8. Generate private class code here.
private(model)
#
# 9. Generate final code here and close all files.
finishSource(model)
def generate_dependency_file(filename, target_file, subst_path, parser, the_type):
# verify directory exists for dependency file and is directory
if not os.path.isdir(os.path.dirname(filename)):
PRINT.info(
f"ERROR: Dependency file path {os.path.dirname(filename)} does not exist!"
)
sys.exit(-1)
# open dependency file
dep_file = open(filename, "w")
# get working directory and normalize path
target_directory = os.getcwd().replace("\\", "/")
target_file_local = target_file.replace("\\", "/").replace("Ai.xml", "Ac.cpp")
subst_path_local = subst_path.replace("\\", "/")
# normalize path to target file
full_path = os.path.abspath(target_directory + "/" + target_file_local).replace(
"\\", "/"
)
# if path to substitute is specified, replace with build root
if subst_path_local is not None:
full_path = full_path.replace(subst_path_local, "$(BUILD_ROOT)")
# print("sub: %s\ndep_file: %s\ntdir: %s\ntfile: %s\nfp: %s"%(subst_path_local,filename,target_directory,target_file_local,full_path))
# write target to file
dep_file.write(f"{full_path}:")
# assemble list of files
if the_type == "interface":
file_list = (
parser.get_include_header_files()
+ parser.get_includes_serial_files()
+ parser.get_include_enum_files()
+ parser.get_include_array_files()
)
elif the_type == "component":
file_list = (
parser.get_port_type_files()
+ parser.get_header_files()
+ parser.get_serializable_type_files()
+ parser.get_imported_dictionary_files()
+ parser.get_enum_type_files()
+ parser.get_array_type_files()
)
elif the_type == "serializable":
file_list = (
parser.get_include_header_files()
+ parser.get_includes()
+ parser.get_include_enums()
+ parser.get_include_arrays()
)
elif the_type == "assembly" or the_type == "deployment":
# get list of dependency files from XML/header file list
file_list_tmp = list(parser.get_comp_type_file_header_dict().keys())
file_list = file_list_tmp
# file_list = []
# for f in file_list_tmp:
# file_list.append(f.replace("Ai.xml","Ac.hpp"))
else:
PRINT.info(f"ERROR: Unrecognized dependency type {the_type}!")
sys.exit(-1)
# write dependencies
for include in file_list:
# print("include %s\n"%include)
if subst_path_local is not None:
full_path = "$(BUILD_ROOT)/" + include.replace("\\", "/")
else:
PRINT.info(
"ERROR: No build root to attach. Not sure how to generate dependency."
)
sys.exit(-1)
dep_file.write(f"\\\n {full_path} ")
# carriage return
dep_file.write("\n\n")
# close file
dep_file.close()
def main():
"""
Main program.
"""
global ERROR # prevent local creation of variable
global VERBOSE # prevent local creation of variable
global GEN_TEST_CODE # indicate if test code should be generated
global DEPLOYMENT # deployment set in topology xml only and used to install new instance dicts
ERROR = False
# Sets up the initial (singleton) instance
ConfigManager.ConfigManager.getInstance()
Parser = pinit()
(opt, args) = Parser.parse_args()
VERBOSE = opt.verbose_flag
# Check that the specified working directory exists. Remember, the
# default working directory is the current working directory which
# always exists. We are basically only checking for when the user
# specifies an alternate working directory.
if os.path.exists(opt.work_path) == False:
Parser.error(f"Specified path does not exist ({opt.work_path})!")
working_dir = opt.work_path
# Get the current working directory so that we can return to it when
# the program completes. We always want to return to the place where
# we started.
starting_directory = os.getcwd()
os.chdir(working_dir)
# print working_dir
# print os.getcwd()
# Configure the logging.
log_level = opt.logger.upper()
log_level_dict = {}
log_level_dict["QUIET"] = None
log_level_dict["DEBUG"] = logging.DEBUG
log_level_dict["INFO"] = logging.INFO
log_level_dict["WARNING"] = logging.WARN
log_level_dict["ERROR"] = logging.ERROR
log_level_dict["CRITICAL"] = logging.CRITICAL
if log_level_dict[log_level] is None:
stdout_enable = False
else:
stdout_enable = True
log_fd = opt.logger_output
# For now no log file
Logger.connectDebugLogger(log_level_dict[log_level], log_fd, stdout_enable)
Logger.connectOutputLogger(log_fd)
#
# Parse the input Component XML file and create internal meta-model
#
if len(args) == 0:
PRINT.info(f"Usage: {sys.argv[0]} [options] xml_filename")
return
else:
xml_filenames = args[0:]
#
# Check for BUILD_ROOT variable for XML port searches
#
if opt.build_root_flag == True:
# Check for BUILD_ROOT env. variable
if ("BUILD_ROOT" in list(os.environ.keys())) == False:
PRINT.info(
"ERROR: The -b command option requires that BUILD_ROOT environmental variable be set to root build path..."
)
sys.exit(-1)
else:
set_build_roots(os.environ.get("BUILD_ROOT"))
for xml_filename in xml_filenames:
xml_type = XmlParser.XmlParser(xml_filename)()
if xml_type == "component":
DEBUG.info("Detected Component XML so Generating Component C++ Files...")
the_parsed_component_xml = XmlComponentParser.XmlComponentParser(
xml_filename
)
generate_component(
the_parsed_component_xml, os.path.basename(xml_filename), opt
)
dependency_parser = the_parsed_component_xml
elif xml_type == "interface":
DEBUG.info("Detected Port type XML so Generating Port type C++ Files...")
the_parsed_port_xml = XmlPortsParser.XmlPortsParser(xml_filename)
generate_port(the_parsed_port_xml, os.path.basename(xml_filename))
dependency_parser = the_parsed_port_xml
elif xml_type == "serializable":
DEBUG.info(
"Detected Serializable XML so Generating Serializable C++ Files..."
)
the_serial_xml = XmlSerializeParser.XmlSerializeParser(xml_filename)
generate_serializable(the_serial_xml, opt)
dependency_parser = the_serial_xml
elif xml_type == "assembly" or xml_type == "deployment":
DEBUG.info("Detected Topology XML so Generating Topology C++ Files...")
the_parsed_topology_xml = XmlTopologyParser.XmlTopologyParser(xml_filename)
DEPLOYMENT = the_parsed_topology_xml.get_deployment()
print("Found assembly or deployment named: %s\n" % DEPLOYMENT)
generate_topology(
the_parsed_topology_xml, os.path.basename(xml_filename), opt
)
dependency_parser = the_parsed_topology_xml
elif xml_type == "enum":
DEBUG.info("Detected Enum XML so Generating hpp, cpp, and py files...")
curdir = os.getcwd()
if EnumGenerator.generate_enum(xml_filename):
ERROR = False
PRINT.info(
f"Completed generating files for {xml_filename} Enum XML...."
)
else:
ERROR = True
os.chdir(curdir)
elif xml_type == "array":
DEBUG.info("Detected Array XML so Generating hpp, cpp, and py files...")
curdir = os.getcwd()
if ArrayGenerator.generate_array(xml_filename):
ERROR = False
PRINT.info(
f"Completed generating files for {xml_filename} Array XML..."
)
else:
ERROR = True
os.chdir(curdir)
else:
PRINT.info("Invalid XML found...this format not supported")
ERROR = True
if opt.dependency_file is not None:
if opt.build_root_flag:
generate_dependency_file(
opt.dependency_file,
os.path.basename(xml_filename),
list(get_build_roots())[0],
dependency_parser,
xml_type,
)
# Always return to directory where we started.
os.chdir(starting_directory)
if ERROR == True:
sys.exit(-1)
else:
sys.exit(0)
if __name__ == "__main__":
try:
main()
except Exception as exc:
print(exc, file=sys.stderr)
traceback.print_exc(file=sys.stdout)
sys.exit(-1)
| 36.245325 | 198 | 0.640631 |
30455c24d40d043c2b2e52a29833a6122193cce5 | 3,062 | py | Python | src/pycomposite/composite_decorator.py | BstLabs/py-composite | d839303de51000c9afd1568f63ba02e7bdebbc8c | [
"MIT"
] | 4 | 2022-03-10T12:43:53.000Z | 2022-03-11T21:22:16.000Z | src/pycomposite/composite_decorator.py | BstLabs/py-composite | d839303de51000c9afd1568f63ba02e7bdebbc8c | [
"MIT"
] | null | null | null | src/pycomposite/composite_decorator.py | BstLabs/py-composite | d839303de51000c9afd1568f63ba02e7bdebbc8c | [
"MIT"
] | null | null | null | from collections import deque
from functools import reduce
from inspect import getmembers, isfunction, signature
from typing import Any, Iterable, List
from deepmerge import always_merger
def _constructor(self, *parts: List[Iterable[Any]]) -> None:
self._parts = parts
def _make_iterator(cls):
def _iterator(self):
# Simple depth-first composite Iterator
# Recursive version did not work for some mysterious reason
# This one proved to be more reliable
# Credit: https://stackoverflow.com/questions/26145678/implementing-a-depth-first-tree-iterator-in-python
stack = deque(self._parts)
while stack:
# Pop out the first element in the stack
part = stack.popleft()
if cls == type(part): # The same composite exactly
stack.extendleft(reversed(part._parts))
elif isinstance(part, cls) or not isinstance(part, Iterable):
yield part # derived classes presumably have overloads
else: # Iterable
stack.extendleft(reversed(part))
return _iterator
def _make_initializer(rt: type) -> Any:
return getattr(rt, "__origin__", rt)()
def _make_method(name: str, func: callable) -> callable:
def _make_reduce(m: str, rt: type) -> callable:
def _reduce_parts(self, *args, **kwargs) -> Any:
# self is iterable, results come out flattened
return reduce(
lambda acc, obj: always_merger.merge(
acc, getattr(obj, m)(*args, **kwargs)
)
if rt is dict
else acc + getattr(obj, m)(*args, **kwargs),
self,
_make_initializer(rt),
)
return _reduce_parts
def _make_foreach(m) -> callable:
def _foreach_parts(self, *args, **kwargs) -> callable:
# self is iterable, concrete functions invoked depth first
for obj in self:
getattr(obj, m)(*args, **kwargs)
return _foreach_parts
rt: type = signature(func).return_annotation
return _make_foreach(name) if rt is None else _make_reduce(name, rt)
# TODO: type annotation for parts (have to be descendants from the original class)
def composite(cls: type) -> type:
"""
Generic class decorator to create a Composite from original class.
Notes:
1. the constructor does not make copy, so do not pass generators,
if you plan to invoke more than one operation.
2. it will return always flattened results of any operation.
:param cls: original class
:return: Composite version of original class
"""
setattr(cls, "__init__", _constructor)
base = cls.__bases__[0]
attrs = {
n: _make_method(n, f)
for n, f in getmembers(cls, predicate=isfunction)
if not n.startswith("_")
}
attrs["__init__"] = _constructor
composite_cls = type(cls.__name__, (base,), attrs)
composite_cls.__iter__ = _make_iterator(composite_cls)
return composite_cls
| 34.022222 | 113 | 0.635206 |
aebde630ab31c710b410f433d55102e723b65ac3 | 266 | py | Python | nipype/interfaces/dipy/__init__.py | Conxz/nipype | 1281723ae56eacd103597ff4081a205583706e62 | [
"Apache-2.0"
] | 2 | 2019-01-25T18:20:51.000Z | 2019-07-30T20:51:51.000Z | nipype/interfaces/dipy/__init__.py | Conxz/nipype | 1281723ae56eacd103597ff4081a205583706e62 | [
"Apache-2.0"
] | null | null | null | nipype/interfaces/dipy/__init__.py | Conxz/nipype | 1281723ae56eacd103597ff4081a205583706e62 | [
"Apache-2.0"
] | 2 | 2018-01-25T19:48:17.000Z | 2019-01-25T18:20:52.000Z | # -*- coding: utf-8 -*-
from .tracks import StreamlineTractography, TrackDensityMap
from .tensors import TensorMode, DTI
from .preprocess import Resample, Denoise
from .reconstruction import RESTORE, EstimateResponseSH, CSD
from .simulate import SimulateMultiTensor
| 38 | 60 | 0.815789 |
a3ff04e397d6dd6eb121fc0a85e3d479498c7f58 | 1,553 | py | Python | src/endplay/types/rank.py | dominicprice/endplay | 9c57a68f83c2044ac0d9863dc2814dcaf4a46664 | [
"MIT"
] | 4 | 2021-12-07T12:30:44.000Z | 2022-03-26T13:38:25.000Z | src/endplay/types/rank.py | dominicprice/endplay | 9c57a68f83c2044ac0d9863dc2814dcaf4a46664 | [
"MIT"
] | 12 | 2021-11-15T23:06:27.000Z | 2022-03-28T15:46:55.000Z | src/endplay/types/rank.py | dominicprice/endplay | 9c57a68f83c2044ac0d9863dc2814dcaf4a46664 | [
"MIT"
] | 1 | 2021-12-07T13:28:40.000Z | 2021-12-07T13:28:40.000Z | __all__ = ["Rank", "AlternateRank"]
from enum import IntEnum
class Rank(IntEnum):
"""
Encodes the rank of a suit. The standard values use
powers of two, however some internal functions use
an alternative encoding AlternateRank using the
values 2-14.
"""
R2 = 0x0004
R3 = 0x0008
R4 = 0x0010
R5 = 0x0020
R6 = 0x0040
R7 = 0x0080
R8 = 0x0100
R9 = 0x0200
RT = 0x0400
RJ = 0x0800
RQ = 0x1000
RK = 0x2000
RA = 0x4000
@property
def abbr(self) -> str:
return self.name[1]
@staticmethod
def find(value: str) -> 'Rank':
try:
return Rank[f"R{value.upper()}"]
except KeyError:
raise ValueError(f"could not convert '{value}' to Rank")
def to_alternate(self) -> 'AlternateRank':
# Calculate integer log2
x, y = self.value, 0
while True:
x >>= 1
if x == 0:
break
y += 1
return AlternateRank(y)
class AlternateRank(IntEnum):
"""
Encodes the rank of a suit using the values 2-14. Used
for internal functions, for APIs use the Rank class.
"""
R2 = 2
R3 = 3
R4 = 4
R5 = 5
R6 = 6
R7 = 7
R8 = 8
R9 = 9
RT = 10
RJ = 11
RQ = 12
RK = 13
RA = 14
@property
def abbr(self) -> str:
return self.name[1]
@staticmethod
def find(value: str) -> 'AlternateRank':
try:
return AlternateRank[f"R{value.upper()}"]
except KeyError:
raise ValueError(f"could not convert '{value}' to AlternateRank")
def to_standard(self) -> Rank:
# Calculate integer power of 2
return Rank(2 << (self.value-1))
| 19.4125 | 69 | 0.611719 |
80660b42663f5c6e837b367e9a02a7fc35cf414e | 19,971 | py | Python | car/driver/py/drive.py | markcrump/jf | 7204abdbc09278fd208a448024c5bc0a60f3bda8 | [
"Apache-2.0"
] | 19 | 2019-03-04T21:35:00.000Z | 2021-10-09T14:48:36.000Z | car/driver/py/drive.py | markcrump/jf | 7204abdbc09278fd208a448024c5bc0a60f3bda8 | [
"Apache-2.0"
] | 14 | 2019-05-30T22:38:20.000Z | 2019-10-23T22:29:57.000Z | car/driver/py/drive.py | markcrump/jf | 7204abdbc09278fd208a448024c5bc0a60f3bda8 | [
"Apache-2.0"
] | 27 | 2019-03-05T18:01:19.000Z | 2021-03-19T11:50:31.000Z | #!/usr/bin/env python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This application pulls messages from the Cloud Pub/Sub API.
For more information, see the README.md .
"""
import argparse
import time
import json
import os
import datetime
import picamera
from collections import deque
from google.cloud import pubsub_v1
from google.cloud import storage
import six
import jwt
import ssl
import paho.mqtt.client as mqtt
from curtsies import Input
from robotderbycar import RobotDerbyCar
### for image processing
from PIL import Image
action_queue = deque([])
previous_command_timestamp = 0
# assigned mode from the most recent processed driving command - see drive-message.js for details
mode = "undefined"
sensor_rate = "undefined"
# If this is true, the car will be streaming sensor messages non stop, otherwise it will only send messages when asked to do so
stream_messages = False
print("*****************************************************")
print("*** Starting the car in NO message streaming mode ***")
print("*****************************************************")
# If this is true, then the car needs to send one sensor message to the server
# only used when stream_messages = False
send_next_message = False
obstacle_found = False
# How many balls have been collected so far
balls_collected = 0
def callback(message):
global previous_command_timestamp
global action_queue
global mode
global sensor_rate
global stream_messages
global balls_collected
envelope = json.loads(message.data.decode('utf-8'))
output = json.dumps(envelope)
print("callback()<---------------- received msg: {}".format(output))
#### process the data
dict_data = json.loads(envelope) # convert json to type dict
if 'cloudTimestampMs' in dict_data and 'actions' in dict_data and 'mode' in dict_data and 'sensorRate' in dict_data:
mode = dict_data['mode']
sensor_rate = dict_data['sensorRate']
print("callback(): command sensorRate: {}".format(sensor_rate))
print("callback(): command mode: {}".format(mode))
if sensor_rate == 'onDemand':
stream_messages = False
if sensor_rate == 'continuous':
stream_messages = True
if 'ballCaptured' in dict_data:
balls_collected += 1
### process only new commads and disgregard old messages
if dict_data['cloudTimestampMs'] > previous_command_timestamp:
previous_command_timestamp = dict_data['cloudTimestampMs']
for i in range(len(dict_data['actions'])):
for key in dict_data['actions'][i].keys():
new_action = previous_command_timestamp, key, dict_data['actions'][i][key]
print("callback(): new_action: {}".format(new_action))
action_queue.append(new_action)
else:
print('callback(): message received out of order. previous_command_timestamp: {}'.format(previous_command_timestamp) + '. Message ignored')
else:
print('callback(): message ignored. Missing necessary tokens: "cloudTimestampMs" or "actions" or "mode" or "sensorRate')
message.ack()
def takephoto(project_id,bucket_id,cam_pos):
image_file_name = str(datetime.datetime.now())
camera = picamera.PiCamera()
camera.resolution = (int(camera_horizontal_pixels), int(camera_vertical_pixels))
# box = (0.0, 0.0, 1.0, 1.9)
# camera.zoom = box
# camera.iso = 100
# camera.sharpness = 100
if cam_pos != "1":
camera.vflip = True
camera.hflip = True
image_file_name = 'image' + image_file_name + '.jpg'
image_file_name = image_file_name.replace(":", "") # Strip out the colon from date time.
image_file_name = image_file_name.replace(" ", "") # Strip out the space from date time.
print("takephoto(): image " + image_file_name)
camera.capture(image_file_name)
camera.close() # We need to close off the resources or we'll get an error.
client = storage.Client(project=project_id)
mybucket = client.bucket(bucket_id)
myblob = mybucket.blob(image_file_name)
print("takephoto(): uploading...")
start_time = time.time()
# See docs: http://google-cloud-python.readthedocs.io/en/latest/storage/blobs.html
myblob.upload_from_filename(image_file_name, content_type='image/jpeg')
print("takephoto(): completed upload in %s seconds" % (time.time() - start_time))
# Remove file from local directory to avoid overflow
os.remove(image_file_name)
# Process GCS URL
url = myblob.public_url
gcs_url = str(myblob.path).replace("/b/","gs://")
gcs_url = gcs_url.replace("/o/","/")
if isinstance(url, six.binary_type):
url = url.decode('utf-8')
if isinstance(gcs_url, six.binary_type):
gcs_url = gcs_url.decode('utf-8')
return gcs_url, url
def verifyEnv(var):
if var not in os.environ.keys():
print("The GCP '" + str(var) + "' Environment Variable has not been initialized. Terminating program")
print("Here are the available Environment Variables: " + os.environ.keys())
exit()
else:
return os.environ[var]
def create_jwt(project_id, private_key_file, algorithm):
"""Create a JWT (https://jwt.io) to establish an MQTT connection."""
token = {
'iat': datetime.datetime.utcnow(),
'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=60),
'aud': project_id
}
with open(private_key_file, 'r') as f:
private_key = f.read()
print('create_jwt(): creating JWT using {} from private key file {}'.format(
algorithm, private_key_file))
return jwt.encode(token, private_key, algorithm=algorithm)
def error_str(rc):
"""Convert a Paho error to a human readable string."""
return '{}: {}'.format(rc, mqtt.error_string(rc))
class Device(object):
"""Represents the state of a single device."""
def __init__(self):
self.connected = False
def wait_for_connection(self, timeout):
"""Wait for the device to become connected."""
total_time = 0
while not self.connected and total_time < timeout:
time.sleep(1)
total_time += 1
if not self.connected:
raise RuntimeError('Could not connect to MQTT bridge.')
def on_connect(self, unused_client, unused_userdata, unused_flags, rc):
"""Callback for when a device connects."""
print('on_connect(): connection Result:', error_str(rc))
self.connected = True
def on_disconnect(self, unused_client, unused_userdata, rc):
"""Callback for when a device disconnects."""
print('on_disconnect(): disconnected:', error_str(rc))
self.connected = False
def on_publish(self, unused_client, unused_userdata, unused_mid):
"""Callback when the device receives a PUBACK from the MQTT bridge."""
print('on_publish(): msg sent.')
def on_subscribe(self, unused_client, unused_userdata, unused_mid,
granted_qos):
"""Callback when the device receives a SUBACK from the MQTT bridge."""
print('on_subscribe(): subscribed: ', granted_qos)
if granted_qos[0] == 128:
print('Subscription failed.')
def on_message(self, unused_client, unused_userdata, message):
"""Callback when the device receives a message on a subscription."""
payload = message.payload
print('on_message(): received message \'{}\' on topic \'{}\' with Qos {}'.format(
payload, message.topic, str(message.qos)))
# The device will receive its latest config when it subscribes to the
# config topic. If there is no configuration for the device, the device
# will receive a config with an empty payload.
if not payload:
return
# The config is passed in the payload of the message. In this example,
# the server sends a serialized JSON string.
data = json.loads(payload)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('project', help='Your Google Cloud project ID')
parser.add_argument('topic', help='Your PubSub topic name')
parser.add_argument('--non-interactive', action="store_true", dest="nonInteractive", help='non-interactive mode')
args = parser.parse_args()
project_var = verifyEnv("PROJECT")
bucket_var = verifyEnv("CAR_CAMERA_BUCKET")
region_var = verifyEnv("REGION")
registry_id = verifyEnv("IOT_CORE_REGISTRY")
logical_car_id = verifyEnv("CAR_ID")
ball_color = verifyEnv("CAR_COLOR")
device_id = verifyEnv("IOT_CORE_DEVICE_ID")
carId = verifyEnv("CAR_ID")
sensor_topic = verifyEnv("SENSOR_TOPIC")
camera_position = verifyEnv("CAR_CAMERA_NORMAL")
camera_horizontal_pixels = verifyEnv("HORIZONTAL_RESOLUTION_PIXELS")
camera_vertical_pixels = verifyEnv("VERTICAL_RESOLUTION_PIXELS")
dist_limit = verifyEnv("BARRIER_DAMPENING")
counter = 1
print("Project ID: " + project_var)
print("Car ID: " + logical_car_id)
print("Ball color: " + ball_color)
print("Bucket: " + bucket_var)
print("Image vertical resolution: " + camera_vertical_pixels)
print("Image horizontal resolution: " + camera_horizontal_pixels)
# Initialize Cloud Derby Car System and Sensors
print("Initializing Cloud Derby Car...")
myCar = RobotDerbyCar()
print("Car Initialized.")
# Create the MQTT client and connect to Cloud IoT.
client = mqtt.Client(client_id=(
'projects/{}/locations/{}/registries/{}/devices/{}'.format(project_var, region_var, registry_id, device_id)))
# With Google Cloud IoT Core, the username field is ignored, and the
# password field is used to transmit a JWT to authorize the device.
client.username_pw_set(username='unused', password=create_jwt(project_var, "../rsa_private.pem", "RS256"))
# Enable SSL/TLS support.
client.tls_set(ca_certs="../roots.pem", tls_version=ssl.PROTOCOL_TLSv1_2)
device = Device()
client.on_connect = device.on_connect
client.on_publish = device.on_publish
client.on_disconnect = device.on_disconnect
client.on_subscribe = device.on_subscribe
client.on_message = device.on_message
# Connect to the Google MQTT bridge.
client.connect("mqtt.googleapis.com", int(443))
client.loop_start()
mqtt_telemetry_topic = '/devices/{}/events/{}'.format(device_id,sensor_topic)
# Wait up to 5 seconds for the device to connect.
device.wait_for_connection(5)
# Subscribe to the command topic.
subscriber = pubsub_v1.SubscriberClient()
subscription_path = subscriber.subscription_path(args.project, args.topic)
flow_control = pubsub_v1.types.FlowControl(max_messages=1)
subscription = subscriber.subscribe(subscription_path, callback=callback, flow_control=flow_control)
startup_time = int(time.time() * 1000)
# Flag that indicates we are processing a series of actions recieved from the cloud - will not be sending any messages until all actions are executed
action_sequence_complete = True
# Main Loop
try:
if (args.nonInteractive is False):
print("Initiating the GoPiGo processing logic in interactive mode. Press <ESC> at anytime to exit.\n")
input_generator = Input(keynames="curtsies", sigint_event=True)
else:
print("Initiating the GoPiGo processing logic in non-interactive mode.\n")
while True:
# End loop on <ESC> key
print("main(" + str(counter) + ")---> carId='" + carId + "' balls_collected='"+ str(balls_collected) +"' ball_color='" + ball_color + "' mode='" + mode + "' sensorRate='" + sensor_rate + "'")
counter += 1
if (args.nonInteractive is False):
key = input_generator.send(1 / 20)
if ((key is not None) and (key == '<ESC>')):
break
if (mode=="automatic"):
myCar.SetCarModeLED(myCar.GREEN)
elif (mode=="manual"):
myCar.SetCarModeLED(myCar.BLUE)
elif (mode=="debug"):
myCar.SetCarModeLED(myCar.RED)
# process any new commands in the queue
if (len(action_queue) > 0):
action_sequence_complete = False
# Processing older action first
action = action_queue.popleft()
# action_queue.clear()
command_timestamp = str(action[0])
# Only process commands that were received after time of startup.
# We should only be processing commands when we haven't sent any data
if(command_timestamp>=startup_time):
action_type = str(action[1])
action_value = action[2]
if (action_type == "driveForwardMm"):
print("main(): drive forward " + str(action_value) + " mm")
if (myCar.drive(int(action_value),dist_limit)):
send_next_message = True
obstacle_found = True
elif (action_type == "driveBackwardMm"):
print("main(): drive backward " + str(action_value) + " mm")
myCar.drive(int(action_value),dist_limit)
elif (action_type == "turnRight"):
print("main(): turn right by " + str(action_value) + " degrees")
myCar.turn_degrees(int(action_value))
time.sleep(0.5) # Short delay to prevent overlapping commands and car confusion
elif (action_type == "turnLeft"):
print("main(): turn left by " + str(action_value) + " degrees")
myCar.turn_degrees(int(action_value))
time.sleep(0.5) # Short delay to prevent overlapping commands and car confusion
elif (action_type == "setColor"):
print("main(): set color to " + str(action_value))
ball_color = str(action_value)
if (ball_color=="Red"):
myCar.SetBallModeLED(myCar.RED)
elif (ball_color=="Yellow"):
myCar.SetBallModeLED(myCar.YELLOW)
elif (ball_color=="Green"):
myCar.SetBallModeLED(myCar.GREEN)
elif (ball_color=="Blue"):
myCar.SetBallModeLED(myCar.BLUE)
else:
print("main(): Invalid ball color received")
print("main(): After changing the color of the ball, # of collected balls reset to 0")
balls_collected = 0
elif (action_type == "setSpeed"):
print("main(): set speed to " + str(action_value))
myCar.set_speed(int(action_value))
elif (action_type == "gripperPosition" and action_value == "open"):
print("main(): open gripper")
myCar.GripperOpen()
elif (action_type == "gripperPosition" and action_value == "close"):
print("main(): close gripper")
myCar.GripperClose()
time.sleep(0.3) # Short delay to prevent overlapping commands and car confusion
elif (action_type == "sendSensorMessage" and action_value == "true"):
send_next_message = True
else:
print("main(): received invalid action: " + str(action_type))
else:
print("main(): stale messages received from before startup. Ignoring and only processing new commands")
print("main()<--- completed action: '" + action[1] + " " + str(action[2]))
if len(action_queue) == 0:
action_sequence_complete = True
print("main(): no more actions in the queue")
######### Once commands are processed collect picture, distance, voltage
elif ((stream_messages or send_next_message) and action_sequence_complete):
print("main(): stream_messages='" + str(stream_messages) + "' send_next_message='" + str(send_next_message) + "'")
# Start the network loop.
voltage = myCar.ReadBatteryVoltage()
distance = myCar.ReadDistanceMM()
print("main(): distance Sensor (mm): " + str(distance))
# Sleep briefly before taking a photo to prevent blurry images
myCar.SetCarStatusLED(myCar.YELLOW)
time.sleep(0.1)
gcs_image_url, public_image_url = takephoto(project_var,bucket_var, camera_position)
print("main(): image URL: " + str(public_image_url))
print("main(): publishing message")
timestampMs = int(time.time() * 1000)
carId = logical_car_id
carState = {}
carState["color"] = ball_color
carState["batteryLeft"] = voltage
# Need to keep count of balls collected
carState["ballsCollected"] = balls_collected
if (obstacle_found):
carState["obstacleFound"] = True
obstacle_found = False
sensors = {}
sensors["frontLaserDistanceMm"] = distance
sensors["frontCameraImagePath"] = public_image_url
sensors["frontCameraImagePathGCS"] = gcs_image_url
data = {}
data["timestampMs"] = timestampMs
data["carId"] = carId
data["carState"] = carState
data["sensors"] = sensors
envelope = json.dumps(data)
payload = json.dumps(envelope).encode('utf8')
client.publish(mqtt_telemetry_topic, payload, qos=1)
# In case we are in a single message sensorRate - mark this message as being sent to prevent more messages
send_next_message = False
print("main()----------------------> msg published to the cloud")
myCar.SetCarStatusLED(myCar.GREEN)
else:
time.sleep(2)
except Exception as e:
print(
'Exception(): listening for messages on {} threw an Exception: {}.'.format(subscription, e))
raise
| 43.321041 | 207 | 0.597066 |
d6105bebddd0a1e3d60928ac6f5e32f990b5c7f7 | 8,603 | py | Python | main.py | BluBean/inky-impression-57-display | abb9ee077f6601f908681adf885568d7545db669 | [
"MIT"
] | 1 | 2022-03-28T22:05:58.000Z | 2022-03-28T22:05:58.000Z | main.py | BluBean/pimoroni-Inky-Impression-57-image-frame | abb9ee077f6601f908681adf885568d7545db669 | [
"MIT"
] | null | null | null | main.py | BluBean/pimoroni-Inky-Impression-57-image-frame | abb9ee077f6601f908681adf885568d7545db669 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
import glob
from random import randrange
import signal
import RPi.GPIO as GPIO
import textwrap
from inky.auto import auto
try:
from PIL import Image, ImageDraw, ImageFont
except ImportError:
print("""This script requires PIL/Pillow, try:
sudo apt install python3-pil
""")
print("""
inky_frame.py - Display a image files on the E-Ink.
""")
# Gpio pins for each button (from top to bottom)
BUTTONS = [5, 6, 16, 24]
# These correspond to buttons A, B, C and D respectively
LABELS = ['A', 'B', 'C', 'D']
# Set up RPi.GPIO with the "BCM" numbering scheme
GPIO.setmode(GPIO.BCM)
# Buttons connect to ground when pressed, so we should set them up
# with a "PULL UP", which weakly pulls the input signal to 3.3V.
GPIO.setup(BUTTONS, GPIO.IN, pull_up_down=GPIO.PUD_UP)
inky = auto(ask_user=True, verbose=True)
def draw_multiple_line_text(image, text, font, text_color, text_start_height):
"""
From ubuntu on [python PIL draw multiline text on image](https://stackoverflow.com/a/7698300/395857)
"""
draw = ImageDraw.Draw(image)
image_width, image_height = image.size
y_text = text_start_height
lines = textwrap.wrap(text, width=40)
for line in lines:
line_width, line_height = font.getsize(line)
draw.text(((image_width - line_width) / 2, y_text),
line, font=font, fill=text_color)
y_text += line_height
class ImageFrame:
displayWidth = 600
displayHeight = 448
images = []
images_albums = []
images_artwork = []
images_comics = []
images_postcards = []
images_default = []
image = None
image_file = None
current_image_list = "default"
image_file_extension = None
current_image_index = 0
def __init__(self, real_path, saturation=0.5, display_width=600, display_height=448):
self.current_image_index = None
self.image_file = None
self.displayWidth = display_width
self.displayHeight = display_height
self.realpath = real_path
self.saturation = saturation
self.init_files()
def init_files(self):
print(self.realpath)
extensions = ('*.png', '*.jpg') # extensions to load
for extension in extensions:
self.images_albums.extend(glob.glob("%s/albums/**/%s" % (realpath, extension), recursive=True))
print(self.images_albums)
for extension in extensions:
self.images_artwork.extend(glob.glob("%s/artwork/**/%s" % (realpath, extension), recursive=True))
print(self.images_artwork)
for extension in extensions:
self.images_comics.extend(glob.glob("%s/comics/**/%s" % (realpath, extension), recursive=True))
print(self.images_comics)
for extension in extensions:
self.images_postcards.extend(glob.glob("%s/postcards/**/%s" % (realpath, extension), recursive=True))
print(self.images_postcards)
for extension in extensions:
self.images_default.extend(glob.glob("%s/default/**/%s" % (realpath, extension), recursive=True))
print(self.images_default)
self.images = self.images_default
def switch_file_list(self):
if self.current_image_list == "default":
self.images = self.images_albums
self.current_image_list = "albums"
elif self.current_image_list == "albums":
self.images = self.images_artwork
self.current_image_list = "artwork"
elif self.current_image_list == "artwork":
self.images = self.images_comics
self.current_image_list = "comics"
elif self.current_image_list == "comics":
self.images = self.images_postcards
self.current_image_list = "postcards"
else:
self.images = self.images_default
self.current_image_list = "default"
if len(self.images) == 0:
error_message = "Error: folder \"%s\" contains no images" % self.current_image_list
print(error_message)
error_image = self.render_error_message(error_message)
inky.set_image(error_image, saturation=self.saturation)
inky.show()
exit(1)
self.image_file = self.images[0]
def display_next_image(self):
print("display_next_sprite")
next_sprite = self.current_image_index + 1
if next_sprite >= len(self.images):
next_sprite = 0
self.display_image_by_index(next_sprite)
def display_previous_image(self):
print("display_previous_sprite")
next_image_index = self.current_image_index - 1
if next_image_index < 0:
next_image_index = len(self.images) - 1
self.display_image_by_index(next_image_index)
def render_error_message(self, error_text, text_color=(0, 0, 0), text_start_height=0):
image_message = Image.new("RGB", (self.displayWidth, self.displayHeight), color=(200, 0, 0))
font = ImageFont.load_default()
draw_multiple_line_text(image_message, error_text, font, text_color, text_start_height)
return image_message
def display_image_by_index(self, number):
print("display_sprite_by_number: %s" % number)
self.current_image_index = number
self.image_file = self.images[number]
print('Loading image: {}...'.format(self.image_file))
try:
image = Image.open(self.image_file)
# upscale
if image.width < self.displayWidth or image.height < self.displayWidth:
image = image.resize((self.displayWidth, self.displayHeight))
# Resize the image
image_file_extension = self.image_file.lower().split(".")[-1]
if image_file_extension != "gif":
image = image.resize((self.displayWidth, self.displayHeight))
except BaseException as err:
error_text = f"Unexpected {err=}, {type(err)=}"
print(error_text)
image = self.render_error_message(error_text)
print('Draw image')
try:
inky.set_image(image, saturation=self.saturation)
inky.show()
except BaseException as err:
error_text = f"Unexpected {err=}, {type(err)=}"
print(error_text)
def display_random_image(self):
"""
random choose one of two lists and choose show one random image
:return:
"""
print("display_random_image")
# choose between two lists
image_list_index = randrange(2)
if image_list_index == 0:
self.images = self.images_artwork
self.current_image_list = "artwork"
else:
self.images = self.images_default
self.current_image_list = "default"
image_index_to_show = randrange(len(self.images))
self.display_image_by_index(image_index_to_show)
realpath = os.path.dirname(os.path.realpath(__file__))
imageFrame = ImageFrame(realpath,
saturation=0.5,
display_width=600,
display_height=448
)
imageFrame.display_image_by_index(0)
# "handle_button" will be called every time a button is pressed
# It receives one argument: the associated input pin.
def handle_button(pin):
last_button = LABELS[BUTTONS.index(pin)]
print("Button press detected on pin: {} label: {}".format(pin, last_button))
if last_button == "A":
print("random image")
imageFrame.display_random_image()
elif last_button == "B":
print("next image")
imageFrame.display_next_image()
elif last_button == "C":
print("switch list")
imageFrame.switch_file_list()
imageFrame.display_image_by_index(0)
elif last_button == "D":
print("Shutting down")
subprocess.run("sudo shutdown --poweroff now", shell=True)
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
# Loop through out buttons and attach the "handle_button" function to each
# We're watching the "FALLING" edge (transition from 3.3V to Ground) and
# picking a generous bouncetime of 250ms to smooth out button presses.
for pin in BUTTONS:
GPIO.add_event_detect(pin, GPIO.FALLING, handle_button, bouncetime=5000)
# Finally, since button handlers don't require a "while True" loop,
# we pause the script to prevent it exiting immediately.
signal.pause()
| 33.605469 | 113 | 0.641404 |
00786234b771e450b77c26fbf042c55f4eb590c5 | 110 | py | Python | Chapter 01/Example_1_4.py | bpbpublications/Advance-Core-Python-Programming | 8902ceb270f55c04c12e818032f90d641c14d7b1 | [
"MIT"
] | null | null | null | Chapter 01/Example_1_4.py | bpbpublications/Advance-Core-Python-Programming | 8902ceb270f55c04c12e818032f90d641c14d7b1 | [
"MIT"
] | null | null | null | Chapter 01/Example_1_4.py | bpbpublications/Advance-Core-Python-Programming | 8902ceb270f55c04c12e818032f90d641c14d7b1 | [
"MIT"
] | null | null | null | number = [1,2,3,4,5,6,13,7,8,9,0]
odd_number = list(filter(lambda x : (x%2!=0), number))
print(odd_number)
| 27.5 | 55 | 0.627273 |
562d958d3750f2a4d322a2e7d00068d65db50f32 | 11,214 | py | Python | docs/conf.py | johnnycakes79/pyops | 9eeda939e3f0d65a5dd220b3e439c8d2ba880d98 | [
"BSD-3-Clause"
] | 2 | 2016-03-23T14:35:16.000Z | 2016-05-17T04:56:28.000Z | docs/conf.py | johnnycakes79/pyops | 9eeda939e3f0d65a5dd220b3e439c8d2ba880d98 | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | johnnycakes79/pyops | 9eeda939e3f0d65a5dd220b3e439c8d2ba880d98 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# pyops documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 1 15:55:02 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pyops'
copyright = u'2015, Author'
author = u'Author'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyopsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pyops.tex', u'pyops Documentation',
u'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pyops', u'pyops Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pyops', u'pyops Documentation',
author, 'pyops', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
| 31.236769 | 80 | 0.717585 |
5265027b3eacd3a83586db212fbc8ff5f9a5ad4b | 1,456 | py | Python | pandas2neo4j/errors.py | wiatrak2/pandas2neo4j | 5436f60403511fcceb2bc5619209897b81195c5c | [
"Apache-2.0"
] | 1 | 2021-09-07T08:10:59.000Z | 2021-09-07T08:10:59.000Z | pandas2neo4j/errors.py | wiatrak2/pandas2neo4j | 5436f60403511fcceb2bc5619209897b81195c5c | [
"Apache-2.0"
] | null | null | null | pandas2neo4j/errors.py | wiatrak2/pandas2neo4j | 5436f60403511fcceb2bc5619209897b81195c5c | [
"Apache-2.0"
] | null | null | null | class Pandas2Neo4jError(Exception):
pass
class NotSupportedModelClassError(Pandas2Neo4jError):
pass
class NodeWithIdDoesNotExistError(Pandas2Neo4jError):
def __init__(self, node_class=None, node_id=None):
self.node_class = node_class
self.node_id = node_id
def __str__(self):
if self.node_class is None or self.node_id is None:
return "Could not find requested node in the graph."
return f"Node of {self.node_class} class with id value {self.node_id} was not found in the graph."
class RelationshipDoesNotExistError(Pandas2Neo4jError):
def __init__(self, relationship, from_node, to_node):
self.relationship = relationship
self.from_node = from_node
self.to_node = to_node
def __str__(self):
return f"Relationship {self.relationship} between {self.from_node} and {self.to_node} not found in the graph."
class PropertyValueWithInvalidTypeError(Pandas2Neo4jError):
pass
class NotNullPropertyError(Pandas2Neo4jError):
def __init__(self, property_instance):
self.property_instance = property_instance
def __str__(self):
return (
f"Property {self.property_instance.__class__.__name__} has `not_null` flag set to True.\n"
f"None value was provided hovewer. Please use value with type {self.property_instance.TYPE}"
)
class InvalidArgumentsConfigurationError(Pandas2Neo4jError):
pass
| 30.978723 | 118 | 0.721841 |
6012c2c973cc79235fe85e5e69ee64ee00349713 | 977 | tac | Python | demos/twistd/helloworld.tac | rolando-contribute/cyclone | a9a6511943c138895a3ae833a5c98431d4ea1962 | [
"Apache-2.0"
] | 1 | 2020-12-27T18:43:14.000Z | 2020-12-27T18:43:14.000Z | demos/twistd/helloworld.tac | rolando-contribute/cyclone | a9a6511943c138895a3ae833a5c98431d4ea1962 | [
"Apache-2.0"
] | null | null | null | demos/twistd/helloworld.tac | rolando-contribute/cyclone | a9a6511943c138895a3ae833a5c98431d4ea1962 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env twistd -ny
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import cyclone.web
from twisted.application import service, internet
class MainHandler(cyclone.web.RequestHandler):
def get(self):
self.write("Hello, world")
webapp = cyclone.web.Application([
(r"/", MainHandler),
])
application = service.Application("cyclone")
cycloneService = internet.TCPServer(8888, webapp)
cycloneService.setServiceParent(application)
| 31.516129 | 75 | 0.756397 |
6a319a64f21845f62830784a60fc891061af2bc0 | 4,178 | py | Python | src/interpolate_video.py | adamjm/sepconv | e4296d28673cfc05d233f628c82baf43a16b9af4 | [
"MIT"
] | 134 | 2018-09-18T20:12:16.000Z | 2021-08-05T16:39:06.000Z | src/interpolate_video.py | adamjm/sepconv | e4296d28673cfc05d233f628c82baf43a16b9af4 | [
"MIT"
] | 8 | 2019-03-03T20:02:38.000Z | 2021-11-24T09:32:29.000Z | src/interpolate_video.py | adamjm/sepconv | e4296d28673cfc05d233f628c82baf43a16b9af4 | [
"MIT"
] | 28 | 2018-10-31T17:30:04.000Z | 2022-03-23T13:42:15.000Z | #
# KTH Royal Institute of Technology
#
import argparse
import torch
import math
from torchvision.transforms import CenterCrop
from os.path import join, isdir
from timeit import default_timer as timer
from src.utilities import write_video
from src.interpolate import interpolate_batch
from src.data_manager import load_tuples
from src.extract_frames import extract_frames
def interpolate_video(src_path, dest_path, model_path, input_fps=None, input_limit=None, batch_size=None):
from src.model import Net
tick_t = timer()
print('===> Loading model...')
model = Net.from_file(model_path)
if isdir(src_path):
if input_fps is None:
raise Exception('Argument --inputfps is required if the source is a folder of frames')
print('===> Reading frames...')
input_frames = load_tuples(src_path, 1, 1, paths_only=False)
input_frames = [x[0] for x in input_frames]
else:
print('===> Reading video...')
input_frames, detected_fps = extract_frames(src_path)
if detected_fps is None:
if input_fps is None:
raise Exception('Argument --inputfps is required for this type of source')
else:
input_fps = detected_fps
if input_limit is not None:
input_frames = input_frames[:input_limit]
n_input_frames = len(input_frames)
if not torch.cuda.is_available():
crop_size = min(input_frames[0].size)
crop = CenterCrop(crop_size)
print(f'===> CUDA not available. Cropping input as {crop_size}x{crop_size}...')
input_frames = [crop(x) for x in input_frames]
if batch_size is not None and batch_size > 1:
batch_size = min(batch_size, n_input_frames)
else:
batch_size = n_input_frames
# FIXME: Change this monstrosity to something more elegant
n_batches = int(math.ceil(1.0 * n_input_frames / (batch_size - 1)))
if (batch_size-1)*(n_batches-1) >= n_input_frames - 1:
n_batches -= 1
print(f'Job split into {n_batches} batches')
print('===> Interpolating...')
middle_frames = []
for i in range(n_batches):
idx = (batch_size-1)*i
batch = input_frames[idx : idx+batch_size]
middle_frames += interpolate_batch(model, batch)
print('Batch {}/{} done'.format(i+1, n_batches))
print('===> Stitching frames...')
output_frames = input_frames[:1]
iters = len(middle_frames)
for i in range(iters):
frame2 = input_frames[i+1]
middle = middle_frames[i]
output_frames += [middle, frame2]
print('Frame {}/{} done'.format(i+1, iters))
if isdir(dest_path):
print('===> Saving frames...')
for i, frame in enumerate(output_frames):
file_name = '{:07d}.jpg'.format(i)
file_path = join(dest_path, file_name)
frame.save(file_path)
else:
print('===> Saving video...')
write_video(dest_path, output_frames, fps=(input_fps * 2))
tock_t = timer()
print("Done. Took ~{}s".format(round(tock_t - tick_t)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Video Frame Interpolation')
parser.add_argument('--src', dest='src_path', type=str, required=True, help='path to the video, either as a single file or as a folder')
parser.add_argument('--dest', dest='dest_path', type=str, required=True, help='output path of the resulting video, either as a single file or as a folder')
parser.add_argument('--model', dest='model_path', type=str, required=True, help='path of the trained model')
parser.add_argument('--inputfps', dest='input_fps', type=int, required=False, default=None, help='frame-rate of the input. Only used if the frames are read from a folder')
parser.add_argument('--inputlimit', dest='input_limit', type=int, required=False, default=None, help='maximum number of processed input frames')
parser.add_argument('--batchsize', dest='batch_size', type=int, required=False, default=None, help='number of frames to be processed at the same time (i.e. number of interpolations in parallel +1)')
interpolate_video(**vars(parser.parse_args()))
| 40.173077 | 202 | 0.671374 |
7c7069a6a18f5c5b42fde5d91659c44b4caa05b2 | 621 | py | Python | contrib/test_spin2dstar.py | miquelramirez/tulip-control | ce54897c242689f45ad33650f157bf1805b35ed6 | [
"BSD-3-Clause"
] | 1 | 2020-02-13T14:13:50.000Z | 2020-02-13T14:13:50.000Z | contrib/test_spin2dstar.py | miquelramirez/tulip-control | ce54897c242689f45ad33650f157bf1805b35ed6 | [
"BSD-3-Clause"
] | null | null | null | contrib/test_spin2dstar.py | miquelramirez/tulip-control | ce54897c242689f45ad33650f157bf1805b35ed6 | [
"BSD-3-Clause"
] | 1 | 2019-07-09T16:32:39.000Z | 2019-07-09T16:32:39.000Z | #!/usr/bin/env python
"""
Regression tests for spin2dstar.py
SCL; 1 Sep 2013
"""
import sys
import nose
from spin2dstar import spin_to_dstar
S2D_BATTERY = {
"[]<> a": "G F a",
"foo": "foo",
"p0 && p1": "& p0 p1",
"([] <> a) -> ([] <> b)": "i G F a G F b",
"(p U q) || (r U z)": "| U p q U r z"
}
def check_spin_to_dstar(informula, expected_outformula):
assert expected_outformula == spin_to_dstar(informula)
def test_spin_to_dstar():
for (k,v) in S2D_BATTERY.items():
yield check_spin_to_dstar, k, v
if __name__ == "__main__":
sys.argv.append("--verbose")
nose.run()
| 18.264706 | 58 | 0.595813 |
6513d4568ca71136c1e82717c869994a5b7b320f | 3,863 | py | Python | src/primaires/information/commandes/roadmap/__init__.py | stormi/tsunami | bdc853229834b52b2ee8ed54a3161a1a3133d926 | [
"BSD-3-Clause"
] | null | null | null | src/primaires/information/commandes/roadmap/__init__.py | stormi/tsunami | bdc853229834b52b2ee8ed54a3161a1a3133d926 | [
"BSD-3-Clause"
] | null | null | null | src/primaires/information/commandes/roadmap/__init__.py | stormi/tsunami | bdc853229834b52b2ee8ed54a3161a1a3133d926 | [
"BSD-3-Clause"
] | null | null | null | # -*-coding:Utf-8 -*
# Copyright (c) 2015 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'roadmap'."""
from primaires.interpreteur.commande.commande import Commande
from .creer import PrmCreer
from .editer import PrmEditer
from .supprimer import PrmSupprimer
class CmdRoadmap(Commande):
"""Commande 'roadmap'."""
def __init__(self):
"""Constructeur de la commande"""
Commande.__init__(self, "roadmap", "roadmap")
self.nom_categorie = "info"
self.aide_courte = "affiche la feuille de route"
self.aide_longue = \
"Cette commande permet d'afficher la feuille de route " \
"actuelle. Cette feuille de route affiche les améliorations " \
"sur lesquelles les immortels travaillent mais qui ne " \
"sont pas encore visibles par les joueurs. Cette feuille " \
"de route est mise à jour régulièrement et permet de " \
"suivre l'avancement du travail accompli par les " \
"bâtisseurs. Pour chaque élément de la feuille de route, " \
"vous le verrez précédé d'un astérisque (*) coloré en " \
"rouge pour vous indiquer que cette information a été " \
"mise à jour depuis la dernière fois que vous avez " \
"consulté cette feuille de route."
def ajouter_parametres(self):
"""Ajout des paramètres"""
self.ajouter_parametre(PrmCreer())
self.ajouter_parametre(PrmEditer())
self.ajouter_parametre(PrmSupprimer())
def erreur_validation(self, personnage, dic_masques):
"""Interprétation du paramètre"""
roadmaps = importeur.information.roadmaps
if roadmaps:
msg = "Feuille de route :"
for roadmap in roadmaps:
msg += "\n"
if personnage.est_immortel():
msg += " {:>2}".format(roadmap.no)
elif personnage in roadmap.joueurs_ayant_lu:
msg += " "
else:
msg += " |rg|*|ff| "
roadmap.joueurs_ayant_lu.append(personnage)
msg += " " + roadmap.titre.capitalize()
if roadmap.texte:
msg += " : " + roadmap.texte
else:
msg = "|att|La feuille de route actuelle est vide.|ff|"
return msg
| 43.897727 | 79 | 0.664768 |
d4af9d9a7be1a9cebde2f973c523ffae3dd417eb | 23,008 | py | Python | AutotestPlatform/website/models.py | yzypals/AutoTestingPlatform | cfb2c53337406347fad37bd65568b22cdc76fdca | [
"Apache-2.0"
] | null | null | null | AutotestPlatform/website/models.py | yzypals/AutoTestingPlatform | cfb2c53337406347fad37bd65568b22cdc76fdca | [
"Apache-2.0"
] | 2 | 2020-06-06T00:51:32.000Z | 2021-06-10T22:40:50.000Z | AutotestPlatform/website/models.py | yzypals/AutoTestingPlatform | cfb2c53337406347fad37bd65568b22cdc76fdca | [
"Apache-2.0"
] | 1 | 2020-05-31T03:49:24.000Z | 2020-05-31T03:49:24.000Z | from django.db import models
# Create your models here.
# 导航栏
class Navigation(models.Model):
id = models.IntegerField(primary_key=True) # ID
menu_name = models.CharField(max_length=20) # 菜单名称
parent_id = models.IntegerField() # 父级菜单ID
url = models.CharField(max_length=500) # 菜单URL
icon = models.CharField(max_length=15) # 菜单图标名称
order = models.IntegerField() # 菜单排序
# 测试项目配置
class Test_project_setting(models.Model):
id = models.AutoField(primary_key=True) # ID, 主键
project_name = models.CharField(max_length=50) # 项目名称
valid_flag = models.CharField(max_length=5) # 是否启用标识(启用|禁用)
order = models.IntegerField() # 顺序
# 存放不同树中选择的项目,作为默认项目
class Project_chosen(models.Model):
id = models.AutoField(primary_key=True)
project_id = models.IntegerField() # 选择的项目ID
project_name = models.CharField(max_length=50) # 选择的项目名称
tree_type = models.CharField(max_length=20) # 树类型
# 用于管理测试任务的开发迭代树
class Sprint_tree(models.Model):
id = models.AutoField(primary_key=True)
text = models.CharField(max_length=50) # 存放节点名称
state = models.CharField(max_length=10) # 存放节点状态(是否展开)
parent_id = models.IntegerField() # 存放父节点id
iconCls = models.CharField(max_length=20) # 存放节点图标名称
attributes = models.CharField(max_length=100) # 存放节点属性
project = models.ForeignKey(Test_project_setting, to_field='id', on_delete=models.CASCADE) # 关联项目ID
order = models.IntegerField() # 顺序
# UI自动化项目配置
class UI_project_setting(models.Model):
id = models.AutoField(primary_key=True) # ID, 主键
project_name = models.CharField(max_length=50) # 项目名称
home_page = models.CharField(max_length=500) # 项目主页
environment = models.CharField(max_length=20) # 所属环境
environment_id = models.IntegerField() # 所属环境ID
valid_flag = models.CharField(max_length=5) # 是否启用标识(启用|禁用)
order = models.IntegerField() # 顺序
# API自动化项目配置
class API_project_setting(models.Model):
id = models.AutoField(primary_key=True) # ID, 主键
project_name = models.CharField(max_length=50) # 项目名称
protocol = models.CharField(max_length=10) # 协议 http、http
host = models.CharField(max_length=200) # 主机地址
port = models.IntegerField() # 端口
environment = models.CharField(max_length=20) # 所属环境
environment_id = models.IntegerField() # 所属环境ID
valid_flag = models.CharField(max_length=5) # 是否启用标识(启用|禁用)
order = models.IntegerField() # 顺序
# 用于管理页面元素的页面树
class Page_tree(models.Model):
id = models.BigAutoField(primary_key=True)
text = models.CharField(max_length=50) # 存放节点名称
state = models.CharField(max_length=10) # 存放节点状态(是否展开)
parent_id = models.IntegerField() # 存放父节点id
iconCls = models.CharField(max_length=20) # 存放节点图标名称
attributes = models.CharField(max_length=100) # 存放节点属性
project = models.ForeignKey(UI_project_setting, to_field='id', on_delete=models.CASCADE) # 关联项目ID
order = models.IntegerField() # 存放节点顺序
# 用于管理UI测试用例的用例树
class UI_case_tree(models.Model):
id = models.BigAutoField(primary_key=True)
text = models.CharField(max_length=50) # 存放节点名称
state = models.CharField(max_length=10) # 存放节点状态(是否展开)
parent_id = models.IntegerField() # 存放父节点id
iconCls = models.CharField(max_length=20) # 存放节点图标名称
attributes = models.CharField(max_length=100) # 存放节点属性
project = models.ForeignKey(UI_project_setting, to_field='id', on_delete=models.CASCADE) # 关联项目ID
order = models.IntegerField() # 存放节点顺序
# 用于管理接口测试用例的用例树
class API_case_tree(models.Model):
id = models.BigAutoField(primary_key=True)
text = models.CharField(max_length=50) # 存放节点名称
state = models.CharField(max_length=10) # 存放节点状态(是否展开)
parent_id = models.IntegerField() # 存放父节点id
iconCls = models.CharField(max_length=20) # 存放节点图标名称
attributes = models.CharField(max_length=100) # 存放节点属性
project = models.ForeignKey(API_project_setting, to_field='id', on_delete=models.CASCADE) # 关联项目ID
order = models.IntegerField() # 存放节点顺序
# 敏捷开发测试任务概要
class Test_task_overview(models.Model):
id = models.BigAutoField(primary_key=True) # 任务ID
module = models.CharField(max_length=100) # 任务所属功能模块
progress = models.CharField(max_length=10) # 整体进度
requirement = models.CharField(max_length=100) #需求任务
sub_task = models.CharField(max_length=100) # 子任务
time_for_test = models.CharField(max_length=20) # 预估转测时间
real_time_for_test = models.CharField(max_length=20) # 实际转测时间
if_delay = models.CharField(max_length=2) # 是否延迟转测
developer_in_charge = models.CharField(max_length=50) # 开发负责人
tester_in_charge = models.CharField(max_length=20) # 测试负责人
pm_in_charge = models.CharField(max_length=10) # 产品负责人
mark = models.CharField(max_length=100) # 备注
order = models.IntegerField()
page = models.ForeignKey(Sprint_tree, to_field='id', on_delete=models.PROTECT)
# 敏捷开发测试任务明细
class Test_task_detail(models.Model):
id = models.BigAutoField(primary_key=True) # 任务ID
module = models.CharField(max_length=100) # 任务所属功能模块
requirement = models.CharField(max_length=100) #需求任务
person_in_charge = models.CharField(max_length=20) # 测试负责人
sub_task = models.CharField(max_length=100) # 子任务
progress = models.CharField(max_length=10) # 任务进度
time_took = models.CharField(max_length=10) # 预估耗时
deadline = models.CharField(max_length=20) # 预计截止时间
finish_time = models.CharField(max_length=20) # 实际完成时间
if_delay = models.CharField(max_length=4) # 是否超时
history_progress = models.CharField(max_length=400) # 进度更新历史记录
remark = models.CharField(max_length=200) # 备注
order = models.IntegerField()
page = models.ForeignKey(Sprint_tree, to_field='id', on_delete=models.PROTECT)
# 反馈问题管理
class Promble_feedback(models.Model):
id = models.BigAutoField(primary_key=True) # 问题ID
desc = models.CharField(max_length=300) # 问题描述
status = models.CharField(max_length=10) # 问题状态
issuer = models.CharField(max_length=10) # 问题发起人
tracer = models.CharField(max_length=10) # 问题跟进人
handler = models.CharField(max_length=10) # 问题处理人
record_time = models.CharField(max_length=20) # 问题录入时间
start_trace_time = models.CharField(max_length= 20) # 开始处理时间
solved_time = models.CharField(max_length= 20) # 处理完成时间
mark = models.CharField(max_length=100) # 备注
order = models.IntegerField() # 顺序
# 浏览器配置
class Env_setting(models.Model):
id = models.AutoField(primary_key=True) # ID, 主键
env = models.CharField(max_length=50) # 环境
order = models.IntegerField() # 顺序
# 浏览器配置
class Browser_setting(models.Model):
id = models.AutoField(primary_key=True) # ID, 主键
browser = models.CharField(max_length=20) # 浏览器
order = models.IntegerField() # 顺序
# 数据库配置
class Database_setting(models.Model):
id = models.AutoField(primary_key=True) # ID, 主键
db_type = models.CharField(max_length=10) # 数据库类型
db_alias = models.CharField(max_length=20) # 数据库别名,唯一
db_name = models.CharField(max_length=20) # 数据库名称
db_host = models.CharField(max_length=200) # ip,host
db_port = models.IntegerField() # 端口
db_user = models.CharField(max_length=20) # 数据库用户名
db_passwd = models.CharField(max_length=20) # 数据库用户密码
environment = models.CharField(max_length=20) # 所属环境
environment_id = models.IntegerField() # 所属环境ID
project_type = models.CharField(max_length=10) # 项目类型 API项目,UI项目
project_name = models.CharField(max_length=50) # 项目名称
project_id = models.CharField(max_length=300) # 关联项目ID
order = models.IntegerField() # 顺序
# 不同类型对象可执行的操作
class Operation_for_object(models.Model):
id = models.AutoField(primary_key=True) # ID, 主键
object_type = models.CharField(max_length=10) # 对象类型
operation = models.CharField(max_length=50) # 对象具备的操作
order = models.IntegerField() # 顺序
# 函数配置
class Function_setting(models.Model):
id = models.AutoField(primary_key=True) # ID, 主键
function_name = models.CharField(max_length=20) # 函数名称
param_style = models.CharField(max_length=100) # 参数样例
order = models.IntegerField() # 顺序
project_type = models.CharField(max_length=10) # 项目类型
# 断言类型配置
class Assertion_type_setting(models.Model):
id = models.AutoField(primary_key=True) # ID, 主键
op_type = models.CharField(max_length=10) # 页面操作|接口请求操作|数据库操作|系统函数调用
assertion_type = models.CharField(max_length=50) # 断言类型
assertion_pattern = models.CharField(max_length=2000) # 断言模式
order = models.IntegerField() # 顺序
# 全局变量配置(供接口自动化使用)
class Global_variable_setting(models.Model):
id = models.AutoField(primary_key=True) # ID,主键
name = models.CharField(max_length=50) # 变量名称
value = models.CharField(max_length=3000) # 变量值
remark = models.CharField(max_length=3000) # 备注
environment = models.CharField(max_length=20) # 所属环境
env_id = models.CharField(max_length=500) #关联环境ID
project_type = models.CharField(max_length=10) # 项目类型 API项目,UI项目, 所有项目
project_name = models.CharField(max_length=50) # 项目名称
project_id = models.CharField(max_length=500) # 关联项目ID
order = models.IntegerField() # 顺序
# UI自动化页面元素
class Page_element(models.Model):
id = models.BigAutoField(primary_key=True) # ID, 主键
element_name = models.CharField(max_length=100) # 元素名称
selector1 = models.CharField(max_length=150) # 元素选择器
selector2 = models.CharField(max_length=150)
order = models.IntegerField() # 顺序
page = models.ForeignKey(Page_tree, to_field='id', on_delete=models.PROTECT)
# UI自动化测试用例步骤
class UI_test_case_step(models.Model):
id = models.BigAutoField(primary_key=True) # 步骤ID
order = models.IntegerField() # 步序
object_type = models.CharField(max_length=10) # 操作对象类型
page_name = models.CharField(max_length=1000) # 归属页面
object = models.CharField(max_length=50) # 要操作的对象
exec_operation = models.CharField(max_length=50) # 要执行的操作
input_params = models.CharField(max_length=500) # 输入参数
output_params = models.CharField(max_length=100) # 输出参数
assert_type = models.CharField(max_length=20) # 预期结果-断言类型
assert_pattern = models.CharField(max_length=1000) # 预期结果-断言模式
run_times = models.IntegerField() # 运行次数
try_for_failure = models.IntegerField() # 失败重试次数
status = models.CharField(max_length=5) # 步骤状态 启用|禁用
object_id = models.IntegerField() # 对象ID(页面元素ID, 数据库ID,函数ID)
case = models.ForeignKey(UI_case_tree, to_field='id', on_delete=models.PROTECT) #节点ID,即用例ID
# API自动化测试用例步骤
class API_test_case_step(models.Model):
id = models.BigAutoField(primary_key=True) # 步骤ID
order = models.IntegerField() # 步序
step_type = models.CharField(max_length=10) # 步骤类型
op_object = models.CharField(max_length=5000) # 操作对象
object_id = models.BigIntegerField() # 对象ID(数据库ID,用例ID)
exec_operation = models.CharField(max_length=50) # 要执行的操作
request_header = models.CharField(max_length=2000) # 请求头
request_method = models.CharField(max_length=10) # 请求方法
url_or_sql = models.CharField(max_length=2000) # URL/SQL
input_params = models.CharField(max_length=3000) # 输入参数
response_to_check = models.CharField(max_length=10) # 检查响应
check_rule = models.CharField(max_length=20) # 校验规则
check_pattern = models.CharField(max_length=3000) # 校验模式
output_params = models.TextField(max_length=6000) # 输出
protocol = models.CharField(max_length=10) # 协议 http、https
host = models.CharField(max_length=200) # 主机地址
port = models.CharField(max_length=6) # 端口
run_times = models.IntegerField() # 运行次数
try_for_failure = models.IntegerField() # 失败重试次数
retry_frequency = models.IntegerField() # 失败重试频率
status = models.CharField(max_length=5) # 步骤状态 启用|禁用
case = models.ForeignKey(API_case_tree, to_field='id', on_delete=models.PROTECT) #节点ID,即用例ID
# UI自动化测试计划
class UI_test_plan(models.Model):
id = models.AutoField(primary_key=True) # 计划ID
project_name = models.CharField(max_length=100) # 关联项目的名称
plan_name = models.CharField(max_length=50) # 计划名称
plan_desc = models.CharField(max_length=200) # 计划描述
browsers = models.CharField(max_length=20) # 运行浏览器
browser_id = models.CharField(max_length=100) # 浏览器id
valid_flag = models.CharField(max_length=5) # 是否启用(启用|禁用)
order = models.IntegerField() # 顺序
project = models.ForeignKey(UI_project_setting,to_field='id', on_delete=models.PROTECT) # 所属项目ID
# API自动化测试计划
class API_test_plan(models.Model):
id = models.AutoField(primary_key=True) # 计划ID
project_name = models.CharField(max_length=100) # 关联项目的名称
plan_name = models.CharField(max_length=50) # 计划名称
plan_desc = models.CharField(max_length=200) # 计划描述
valid_flag = models.CharField(max_length=5) # 是否启用(启用|禁用)
order = models.IntegerField() # 顺序
project = models.ForeignKey(API_project_setting, to_field='id', on_delete=models.PROTECT) # 所属项目ID
# UI测试用例树和测试计划关联表
class UI_case_tree_test_plan(models.Model):
id = models.BigAutoField(primary_key=True) # 主键ID
plan_id = models.IntegerField() # 计划ID
node_name = models.CharField(max_length=50) # 存放节点名称
node_path = models.CharField(max_length=1000) # 存放节点“父级路径”
sub_node_num = models.IntegerField() # 存子节点数量,用于区分是否是用例
order = models.IntegerField(default=0) # 顺序
node = models.ForeignKey(UI_case_tree, to_field='id', on_delete=models.PROTECT) #节点ID
# 接口测试用例树和测试计划关联表
class API_case_tree_test_plan(models.Model):
id = models.BigAutoField(primary_key=True) # 主键ID
plan_id = models.IntegerField() # 计划ID
node_name = models.CharField(max_length=50) # 存放节点名称
node_path = models.CharField(max_length=5000) # 存放节点“父级路径”
sub_node_num = models.IntegerField() # 存子节点数量,用于区分是否是用例
order = models.IntegerField(default=0) # 顺序
node = models.ForeignKey(API_case_tree, to_field='id', on_delete=models.PROTECT) #节点ID
# 运行计划管理-运行计划
class Running_plan(models.Model):
id = models.BigAutoField(primary_key=True) # 主键ID
running_plan_num = models.BigIntegerField() # 运行计划编号
running_plan_name = models.CharField(max_length=50) # 运行计划名称
project_type = models.CharField(max_length=10) # 项目类型 API项目,UI项目
project_id = models.IntegerField() # 项目ID(前端隐藏)
project_name = models.CharField(max_length=50) # 项目名称
plan_name = models.CharField(max_length=50) # 计划名称,如果有多个,都号分隔
plan_id = models.CharField(max_length=500) # 计划ID,如果有多个,逗号分隔 (前端隐藏)
script_dirpath = models.CharField(max_length=200) # 脚本父级目录绝对路径
python_path = models.CharField(max_length=200) # python路径
valid_flag = models.CharField(max_length=5) # 是否启用(启用|禁用)
running_status = models.CharField(max_length=10) # 运行状态:未执行|执行中|
remark = models.CharField(max_length=1000) # 备注
order = models.IntegerField() # 顺序
# UI自动化测试报告-测试概况
class UI_test_report_for_summary(models.Model):
id = models.AutoField(primary_key=True)
execution_num = models.CharField(max_length=30) # 执行编号
project_id = models.IntegerField() # 项目ID(前端隐藏)
plan_id = models.IntegerField() # 计划ID
project_name = models.CharField(max_length=100) # 项目名称
plan_name = models.CharField(max_length=50) # 计划名称
browser = models.CharField(max_length=20) # 浏览器
start_time = models.CharField(max_length=30) # 开始运行时间
end_time = models.CharField(max_length=30) # 结束运行时间
time_took = models.CharField(max_length=20) # 运行耗时
case_total_num = models.IntegerField() # 用例总数
case_pass_num = models.IntegerField() # 用例执行成功数
case_fail_num = models.IntegerField() # 用例执行失败数
case_block_num = models.IntegerField() # 用例执行阻塞数
remark = models.CharField(max_length=3000) # 备注,说明计划运行失败的原因
# API自动化测试报告-测试概况
class API_test_report_for_summary(models.Model):
id = models.AutoField(primary_key=True)
execution_num = models.CharField(max_length=30) # 执行编号
project_id = models.IntegerField() # 项目ID(前端隐藏)
plan_id = models.IntegerField() # 计划ID
project_name = models.CharField(max_length=100) # 项目名称
plan_name = models.CharField(max_length=50) # 计划名称
start_time = models.CharField(max_length=30) # 开始运行时间
end_time = models.CharField(max_length=30) # 结束运行时间
time_took = models.CharField(max_length=20) # 运行耗时
case_total_num = models.IntegerField() # 用例总数
case_pass_num = models.IntegerField() # 用例执行成功数
case_fail_num = models.IntegerField() # 用例执行失败数
case_block_num = models.IntegerField() # 用例执行阻塞数
remark = models.CharField(max_length=3000) # 备注,说明计划运行失败的原因
# UI自动化测试报告-用例执行明细
class UI_test_report_for_case(models.Model):
id = models.BigAutoField(primary_key=True)
execution_num = models.CharField(max_length=30) # 执行编号(和ui_test_report_for_summary.execution_num保持一致
plan_id = models.IntegerField() # 计划ID(前端隐藏)
case_id = models.IntegerField() # 用例ID
case_path = models.CharField(max_length=1000) # 计划名称
case_name = models.CharField(max_length=100) # 用例名称
run_result = models.CharField(max_length=10) # 运行结果
run_time = models.CharField(max_length=30) # 运行时间
remark = models.CharField(max_length=3000) # 失败原因、补充说明,备注
time_took = models.CharField(max_length=20) # 运行耗时
# API自动化测试报告-用例执行明细
class API_test_report_for_case(models.Model):
id = models.BigAutoField(primary_key=True)
execution_num = models.CharField(max_length=30) # 执行编号(和api_test_report_for_summary.execution_num保持一致
plan_id = models.IntegerField() # 计划ID(前端隐藏)
case_id = models.IntegerField() # 用例ID
case_path = models.CharField(max_length=1000) # 计划名称
case_name = models.CharField(max_length=100) # 用例名称
run_result = models.CharField(max_length=10) # 运行结果
run_time = models.CharField(max_length=30) # 运行时间
remark = models.CharField(max_length=3000) # 失败原因、补充说明,备注
time_took = models.CharField(max_length=20) # 运行耗时
# UI自动化测试报告-用例步骤执行明细
class UI_test_report_for_case_step(models.Model):
id = models.BigAutoField(primary_key=True)
execution_num = models.CharField(max_length=30) # 执行编号(ui_test_report_for_case.execution_num保持一致
plan_id = models.IntegerField() # 计划ID(前端隐藏)
case_id = models.IntegerField() # 用例ID(前端隐藏)
step_id = models.IntegerField() # 用例步骤ID
order = models.IntegerField() # 步序
page = models.CharField(max_length=1000) # 所属页面
object = models.CharField(max_length=200) # 操作对象
exec_operation = models.CharField(max_length=10) # 执行操作
input_params = models.CharField(max_length=500) # 输入参数
output_params = models.CharField(max_length=500) # 输出参数
assert_type = models.CharField(max_length=100) # 预期结果-断言类型
check_pattern = models.CharField(max_length=500) # 预期结果-断言模式
run_times = models.IntegerField() # 运行次数
try_for_failure = models.IntegerField() # 失败重试次数
run_result = models.CharField(max_length=10) # 运行结果
remark = models.CharField(max_length=500) # 原因备注
run_time = models.CharField(max_length=30) # 运行时间
run_id = models.BigIntegerField() # 运行id,用于关联用例和用例步骤,值:UI_test_report_for_case.id
# UI自动化测试报告-用例步骤执行明细
class API_test_report_for_case_step(models.Model):
id = models.BigAutoField(primary_key=True) # 步骤ID
execution_num = models.CharField(max_length=30) # 执行编号(api_test_report_for_case.execution_num保持一致
plan_id = models.IntegerField() # 计划ID(前端隐藏)
case_id = models.IntegerField() # 用例ID(前端隐藏)
step_id = models.IntegerField() # 用例步骤ID
order = models.IntegerField() # 步序
step_type = models.CharField(max_length=10) # 步骤类型
op_object = models.CharField(max_length=5000) # 操作对象
object_id = models.BigIntegerField() # 对象ID(数据库ID,用例ID)
exec_operation = models.CharField(max_length=50) # 要执行的操作
protocol = models.CharField(max_length=10) # 协议 http、https
host = models.CharField(max_length=200) # 主机地址
port = models.CharField(max_length=6) # 端口
request_header = models.CharField(max_length=2000) # 请求头
request_method = models.CharField(max_length=10) # 请求方法
url_or_sql = models.CharField(max_length=2000) # URL/SQL
input_params = models.CharField(max_length=3000) # 输入参数
response_to_check = models.CharField(max_length=10) # 检查响应
check_rule = models.CharField(max_length=20) # 校验规则
check_pattern = models.CharField(max_length=3000) # 校验模式
output_params = models.TextField(max_length=7000) # 输出
run_result = models.CharField(max_length=10) # 运行结果
remark = models.CharField(max_length=3000) # 原因备注
run_time = models.CharField(max_length=30) # 运行时间
run_id = models.BigIntegerField() # 运行id,用于关联用例和用例步骤,值:API_test_report_for_case.id
| 52.172336 | 110 | 0.649426 |
1dd85fbaf49980f483cf4df777c212bc1103cc94 | 10,604 | py | Python | posix-compliance/posix_rst.py | richidubey/rtems-docs | 4476290486ae0258589adbe812de69a024403cbf | [
"BSD-2-Clause"
] | 1 | 2019-08-05T18:06:10.000Z | 2019-08-05T18:06:10.000Z | posix-compliance/posix_rst.py | richidubey/rtems-docs | 4476290486ae0258589adbe812de69a024403cbf | [
"BSD-2-Clause"
] | null | null | null | posix-compliance/posix_rst.py | richidubey/rtems-docs | 4476290486ae0258589adbe812de69a024403cbf | [
"BSD-2-Clause"
] | null | null | null | #! /usr/bin/env python
#
# Convert the CSV compliance data to ReST Format.
#
from __future__ import print_function
import copy
import csv
import os
import sys
standards = [
'RTEMS',
'POSIX-2008',
'POSIX-2003',
'PSE51',
'PSE52',
'PSE53',
'PSE54',
'C99',
'C11',
'FACE 2.1 Security',
'FACE 2.1 Safety Base',
'FACE 2.1 Safety Extended',
'FACE 2.1 General Purpose',
'FACE 3.0 Security',
'FACE 3.0 Safety Base',
'FACE 3.0 Safety Extended',
'FACE 3.0 General Purpose',
'SCA 2.2.2 AEP',
'SCA 4.1 Ultra Lightweight AEP',
'SCA 4.1 Lightweight AEP',
'SCA 4.1 [Full] AEP'
]
standard_names = {
'RTEMS' : 'RTEMS Complete Profile',
'POSIX-2008' : 'POSIX-2008',
'POSIX-2003' : 'POSIX-2003',
'PSE51' : 'POSIX PSE51 - Minimal',
'PSE52' : 'POSIX PSE52 - Real-Time Controller',
'PSE53' : 'POSIX PSE53 - Dedicated',
'PSE54' : 'POSIX PSE54 - Multipurpose',
'C99' : 'C99 Standard Library',
'C11' : 'C11 Standard Library',
'FACE 2.1 Security' : 'FACE 2.1 Security',
'FACE 2.1 Safety Base' : 'FACE 2.1 Safety Base',
'FACE 2.1 Safety Extended': 'FACE 2.1 Safety Extended',
'FACE 2.1 General Purpose': 'FACE 2.1 General Purpose',
'FACE 3.0 Security' : 'FACE 3.0 Security',
'FACE 3.0 Safety Base' : 'FACE 3.0 Safety Base',
'FACE 3.0 Safety Extended': 'FACE 3.0 Safety Extended',
'FACE 3.0 General Purpose': 'FACE 3.0 General Purpose',
'SCA 2.2.2 AEP' : 'SCA 2.2.2 AEP',
'SCA 4.1 Ultra Lightweight AEP' : 'SCA 4.1 Ultra Lightweight AEP',
'SCA 4.1 Lightweight AEP' : 'SCA 4.1 Lightweight AEP',
'SCA 4.1 [Full] AEP' : 'SCA 4.1 [Full] AEP'
}
col_names = {
'api' : 'Methods',
'header' : 'Header File',
'rtems-net' : 'RTEMS w/ Networking',
'rtems-impl' : 'RTEMS Impl Note',
'POSIX-2008' : 'IEEE Std 1003.1-2008',
'POSIX-2003' : 'IEEE Std 1003.1-2003',
'PSE51' : 'PSE51',
'PSE52' : 'PSE52',
'PSE53' : 'PSE53',
'PSE54' : 'PSE54',
'C99' : 'C99',
'C11' : 'C11',
'FACE 2.1 Security' : 'FACE 2.1 Security',
'FACE 2.1 Safety Base' : 'FACE 2.1 Safety Base',
'FACE 2.1 Safety Extended' : 'FACE 2.1 Safety Extended',
'FACE 2.1 General Purpose' : 'FACE 2.1 General Purpose',
'FACE 3.0 Security' : 'FACE 3.0 Security',
'FACE 3.0 Safety Base' : 'FACE 3.0 Safety Base',
'FACE 3.0 Safety Extended' : 'FACE 3.0 Safety Extended',
'FACE 3.0 General Purpose' : 'FACE 3.0 General Purpose',
'SCA 2.2.2 AEP' : 'SCA 2.2.2 AEP',
'SCA 4.1 Ultra Lightweight AEP' : 'SCA 4.1 Ultra Lightweight AEP',
'SCA 4.1 Lightweight AEP' : 'SCA 4.1 Lightweight AEP',
'SCA 4.1 [Full] AEP' : 'SCA 4.1 [Full] AEP'
}
#
# The columns here contain the logic to determine the
#
categories = {
'order': ['supported', 'enosys', 'not-supported'],
'name' : {
'supported' : 'Supported',
'enosys' : 'ENOSYS',
'not-supported': 'Not supported'
},
'supported': ['The following methods and variables in ``<@HEADER@>``',
'are supported:',
''],
'not-supported': ['The following methods and variables in ``<@HEADER@>``',
'are not supported:',
''],
'enosys': ['The following methods in ``<@HEADER@>`` are implemented as',
'stubs returning ``-1`` and setting ``errno`` to ``ENOSYS``:',
'']
}
cat_columns = {
'order': ['rtems-net', 'rtems-impl'],
'rtems-net': {
'supported' : {
'CTS-YES' : ['invalid'],
'RT-YES' : ['invalid'],
'HAND-YES': ['invalid']
},
'not-supported': {
'CTS-NO' : ['invalid'],
'RT-NO' : ['invalid'],
'HAND-NO': ['invalid']
}
},
'rtems-impl': {
'enosys': {
'ENOSYS': ['supported']
}
}
}
rst_defaults = {
'header': ['',
'This chapter has a subsection per header file to detail the methods',
'provided by RTEMS that are in that header file.',
'']
}
class error(Exception):
pass
class compliance:
def __init__(self):
self.data = None
def load(self, name):
with open(name, 'r') as f:
data = csv.reader(f, delimiter = ',', quotechar = '"')
hdr = None
rows = []
for row in data:
if hdr is None:
hdr = row
else:
rows += [row]
for col in col_names:
if col_names[col] not in hdr:
raise error('column not found: %s' % (col_names[col]))
cdata = { 'columns': hdr, 'headers': {}, 'apis': {} }
apic = hdr.index(col_names['api'])
hfc = hdr.index(col_names['header'])
for row in rows:
api = row[apic]
header = row[hfc]
if len(api) == 0 or len(header) == 0:
continue
if header not in cdata['headers']:
cdata['headers'][header] = [api]
else:
cdata['headers'][header] += [api]
if api in cdata['apis']:
raise error('duplicate api: %s' % (api))
cdata['apis'][api] = row
self.data = cdata
def summary(self, standard = 'RTEMS'):
results = { }
for header in self.data['headers']:
hr = self.process_header(header, standard)
if 'invalid' in hr:
error('header contains "invalid": %s' % (header))
for cat in hr:
if cat not in results:
results[cat] = len(hr[cat])
else:
results[cat] += len(hr[cat])
if standard == 'RTEMS':
std_line = 'The follow table summarizes RTEMS supported' \
' methods for all tracked standards:'
else:
std_line = 'The follow table summarizes alignment with ' \
'the %s standard:' % (standard_names[standard])
s = ['Summary',
'=======',
'',
std_line,
'']
cols = [0, 1]
for cat in categories['order']:
if len(categories['name'][cat]) > cols[0]:
cols[0] = len(categories['name'][cat])
if cat in results:
num = '%d' % results[cat]
if len(num) > cols[1]:
cols[1] = len(num)
table_def = ' %s %s' % ('=' * cols[0], '=' * cols[1])
s += [table_def]
for cat in categories['order']:
if cat in results:
s += [' %-*s %d' % (cols[0], categories['name'][cat], results[cat])]
else:
s += [' %-*s %d' % (cols[0], categories['name'][cat], 0)]
s += [table_def, '']
return s
def output(self, standard = 'RTEMS'):
def _category_filter(text, patterns):
for l in range(0, len(text)):
for pat in patterns:
if pat in text[l]:
text[l] = text[l].replace(pat, patterns[pat])
return text
if standard not in standards:
error('invalid standard": %s' % (standard))
s = rst_defaults['header'] + self.summary(standard)
for header in sorted(self.data['headers'].keys()):
hr = self.process_header(header, standard)
if 'invalid' in hr:
error('header contains "invalid": %s' % (header))
print_heading = True
for cat in categories['order']:
if cat in hr:
if print_heading:
s += ['``<%s>``' % (header),
'=' * (len(header) + 2),
'']
print_heading = False
patterns = { '@HEADER@': header }
cat_text = copy.copy(categories[cat])
_category_filter(cat_text, patterns)
s += cat_text
for api in hr[cat]:
s += ['* ``%s``' % (api)]
s += ['']
return s
def process_header(self, header, standard = 'RTEMS'):
results = { }
if standard != 'RTEMS':
std_col = self.data['columns'].index(col_names[standard])
else:
std_col = -1
for api in sorted(self.data['headers'][header]):
api_row = self.data['apis'][api]
if std_col > 0:
if api_row[std_col] != 'INCL':
continue
state = 'invalid'
for test in cat_columns['order']:
col = self.data['columns'].index(col_names[test])
value = api_row[col]
for test_state in cat_columns[test]:
if value in cat_columns[test][test_state]:
if state in cat_columns[test][test_state][value]:
state = test_state
if state not in results:
results[state] = [api]
else:
results[state] += [api]
return results
if __name__ == "__main__":
try:
import pprint
pp = pprint.PrettyPrinter(indent=2)
if len(sys.argv) != 2:
raise error('not enough arguments')
c = compliance()
c.load(sys.argv[1])
for h in sorted(c.data['headers']):
print('-- %s' % (h), '-' * 50)
hr = c.process_header(h)
if 'invalid' in hr:
error('header contains invalid: %s' % (h))
hr = c.process_header(h, 'PSE51')
if 'invalid' in hr:
error('header contains invalid: %s' % (h))
pp.pprint(hr)
print('=' * 80)
print(os.linesep.join(c.output('PSE51')))
print('=' * 80)
print(os.linesep.join(c.output()))
for s in standards:
print('=-' * 40)
print(os.linesep.join(c.summary(s)))
except error as e:
print('error: %s' % (e), file = sys.stderr)
| 35.703704 | 85 | 0.467654 |
1df1dc4c37d6f637d67ea5f031a6cb478c11b08e | 406 | py | Python | scraper/migrations/0010_auto_20210103_1851.py | gorkemyontem/SWE-573-2020 | 6a9ca57d294066fcc0db640f45d38d7341754a68 | [
"MIT"
] | null | null | null | scraper/migrations/0010_auto_20210103_1851.py | gorkemyontem/SWE-573-2020 | 6a9ca57d294066fcc0db640f45d38d7341754a68 | [
"MIT"
] | 35 | 2020-11-02T17:06:35.000Z | 2021-03-10T07:56:03.000Z | scraper/migrations/0010_auto_20210103_1851.py | gorkemyontem/SWE-573-2020 | 6a9ca57d294066fcc0db640f45d38d7341754a68 | [
"MIT"
] | 1 | 2021-02-02T14:38:27.000Z | 2021-02-02T14:38:27.000Z | # Generated by Django 3.1.4 on 2021-01-03 15:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scraper', '0009_auto_20210103_1850'),
]
operations = [
migrations.AlterField(
model_name='comments',
name='comment_type',
field=models.CharField(max_length=500, null=True),
),
]
| 21.368421 | 62 | 0.610837 |
f9d906007911c3b8eb623c743c9691a6327a497f | 4,298 | py | Python | tensorflow/virtual_root.__init__.py | Faagerholm/tensorflow | 98e30b8748eb018f33836ac9269db67ab60483ab | [
"Apache-2.0"
] | 1 | 2019-07-25T22:57:03.000Z | 2019-07-25T22:57:03.000Z | tensorflow/virtual_root.__init__.py | Faagerholm/tensorflow | 98e30b8748eb018f33836ac9269db67ab60483ab | [
"Apache-2.0"
] | null | null | null | tensorflow/virtual_root.__init__.py | Faagerholm/tensorflow | 98e30b8748eb018f33836ac9269db67ab60483ab | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow root package"""
from __future__ import absolute_import as _absolute_import
from __future__ import division as _division
from __future__ import print_function as _print_function
import sys as _sys
import importlib as _importlib
import types as _types
# Since TensorFlow Python code now resides in tensorflow_core but TensorFlow
# ecosystem code (e.g. estimator, but also even tensorflow) imports tensorflow
# we need to do forwarding between the two. To do so, we use a lazy loader to
# load and forward the top level modules. We cannot use the LazyLoader defined
# by tensorflow at tensorflow/python/util/lazy_loader.py as to use that we would
# already need to import tensorflow. Hence, we define it inline.
class _LazyLoader(_types.ModuleType):
"""Lazily import a module so that we can forward it."""
# The lint error here is incorrect.
def __init__(self, local_name, parent_module_globals, name): # pylint: disable=super-on-old-class
self._local_name = local_name
self._parent_module_globals = parent_module_globals
super(_LazyLoader, self).__init__(name)
def _load(self):
"""Import the target module and insert it into the parent's namespace."""
module = _importlib.import_module(self.__name__)
self._parent_module_globals[self._local_name] = module
self.__dict__.update(module.__dict__)
return module
def __getattr__(self, item):
module = self._load()
return getattr(module, item)
def __dir__(self):
module = self._load()
return dir(module)
# Forwarding a module is as simple as lazy loading the module from the new path
# and then registering it to sys.modules using the old path
def _forward_module(old_name):
parts = old_name.split(".")
parts[0] = parts[0] + "_core"
local_name = parts[-1]
existing_name = ".".join(parts)
_module = _LazyLoader(local_name, globals(), existing_name)
return _sys.modules.setdefault(old_name, _module)
# This list should contain all modules _immediately_ under tensorflow
_top_level_modules = [
"tensorflow._api",
"tensorflow.python",
"tensorflow.tools",
"tensorflow.core",
"tensorflow.compiler",
"tensorflow.lite",
"tensorflow.keras",
"tensorflow.contrib",
"tensorflow.compat",
]
# Estimator needs to be handled separatedly so we can still allow both
# import tensorflow_estimator and import tensorflow.estimator work
# Only in the second case do we actually need to do forwarding, the first case
# already defines most of the hierarchy and eagerly forwarding would result in
# an import loop.
if "tensorflow_estimator" not in _sys.modules:
_root_estimator = False
_top_level_modules.append("tensorflow.estimator")
else:
_root_estimator = True
# Lazy load all of the _top_level_modules, we don't need their names anymore
_top_level_modules = [_forward_module(m) for m in _top_level_modules]
# We still need all the names that are toplevel on tensorflow_core
from tensorflow_core import *
# We also need to bring in keras if available in tensorflow_core
# Above import * doesn't import it as __all__ is updated before keras is hooked
try:
from tensorflow_core import keras
except ImportError as e:
pass
# Similarly for estimator, but only if this file is not read via a
# import tensorflow_estimator (same reasoning as above when forwarding estimator
# separatedly from the rest of the top level modules)
if not _root_estimator:
try:
from tensorflow_core import estimator
except ImportError as e:
pass
# And again for tensorboard
try:
from tensorflow_core import tensorboard
except ImportError as e:
pass
| 36.423729 | 100 | 0.750349 |
9eef30c549e035d512514348e2737dec8792bb4a | 38 | py | Python | error.py | Odogwudozilla/Scripts | bd6752777a3e7ea6e00ca99788a6edf4c8efde20 | [
"MIT"
] | null | null | null | error.py | Odogwudozilla/Scripts | bd6752777a3e7ea6e00ca99788a6edf4c8efde20 | [
"MIT"
] | 5 | 2020-02-25T14:51:09.000Z | 2022-02-26T04:29:51.000Z | error.py | Odogwudozilla/Scripts | bd6752777a3e7ea6e00ca99788a6edf4c8efde20 | [
"MIT"
] | null | null | null | print(1)
int(9)
int 9
print(2)
print 3 | 7.6 | 8 | 0.684211 |
19cd3711e12ba0e153770fc5e1996bcc862a8073 | 2,317 | py | Python | tests/ti_deps/deps/test_pool_slots_available_dep.py | ChaseKnowlden/airflow | 6b71eac1997a7c0db3b8e3aed6b4e65d01871440 | [
"Apache-2.0"
] | 15,947 | 2019-01-05T13:51:02.000Z | 2022-03-31T23:33:16.000Z | tests/ti_deps/deps/test_pool_slots_available_dep.py | ChaseKnowlden/airflow | 6b71eac1997a7c0db3b8e3aed6b4e65d01871440 | [
"Apache-2.0"
] | 14,603 | 2019-01-05T09:43:19.000Z | 2022-03-31T23:11:59.000Z | tests/ti_deps/deps/test_pool_slots_available_dep.py | ChaseKnowlden/airflow | 6b71eac1997a7c0db3b8e3aed6b4e65d01871440 | [
"Apache-2.0"
] | 8,429 | 2019-01-05T19:45:47.000Z | 2022-03-31T22:13:01.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest.mock import Mock, patch
from airflow.models import Pool
from airflow.ti_deps.dependencies_states import EXECUTION_STATES
from airflow.ti_deps.deps.pool_slots_available_dep import PoolSlotsAvailableDep
from airflow.utils.session import create_session
from tests.test_utils import db
class TestPoolSlotsAvailableDep(unittest.TestCase):
def setUp(self):
db.clear_db_pools()
with create_session() as session:
test_pool = Pool(pool='test_pool')
session.add(test_pool)
session.commit()
def tearDown(self):
db.clear_db_pools()
@patch('airflow.models.Pool.open_slots', return_value=0)
def test_pooled_task_reached_concurrency(self, mock_open_slots):
ti = Mock(pool='test_pool', pool_slots=1)
assert not PoolSlotsAvailableDep().is_met(ti=ti)
@patch('airflow.models.Pool.open_slots', return_value=1)
def test_pooled_task_pass(self, mock_open_slots):
ti = Mock(pool='test_pool', pool_slots=1)
assert PoolSlotsAvailableDep().is_met(ti=ti)
@patch('airflow.models.Pool.open_slots', return_value=0)
def test_running_pooled_task_pass(self, mock_open_slots):
for state in EXECUTION_STATES:
ti = Mock(pool='test_pool', state=state, pool_slots=1)
assert PoolSlotsAvailableDep().is_met(ti=ti)
def test_task_with_nonexistent_pool(self):
ti = Mock(pool='nonexistent_pool', pool_slots=1)
assert not PoolSlotsAvailableDep().is_met(ti=ti)
| 38.616667 | 79 | 0.739318 |
149bde451ad4aa9fe367b20881058714849ebc68 | 20,468 | py | Python | tensorflow/python/ops/nn_grad.py | atfkaka/tensorflow | 5657d0dee8d87f4594b3e5902ed3e3ca8d6dfc0a | [
"Apache-2.0"
] | 101 | 2016-12-03T11:40:52.000Z | 2017-12-23T02:02:03.000Z | tensorflow/python/ops/nn_grad.py | atfkaka/tensorflow | 5657d0dee8d87f4594b3e5902ed3e3ca8d6dfc0a | [
"Apache-2.0"
] | 9 | 2016-12-14T03:27:46.000Z | 2017-09-13T02:29:07.000Z | tensorflow/python/ops/nn_grad.py | atfkaka/tensorflow | 5657d0dee8d87f4594b3e5902ed3e3ca8d6dfc0a | [
"Apache-2.0"
] | 47 | 2016-12-04T12:37:24.000Z | 2018-01-14T18:13:07.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in nn_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gen_math_ops
@ops.RegisterGradient("Conv2DBackpropInput")
def _Conv2DBackpropInputGrad(op, grad):
"""The derivatives for deconvolution.
Args:
op: the Deconvolution op.
grad: the tensor representing the gradient w.r.t. the output
Returns:
the gradients w.r.t. the input and the filter
"""
return [None,
nn_ops.conv2d_backprop_filter(grad, array_ops.shape(op.inputs[1]),
op.inputs[2], op.get_attr("strides"),
op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format")),
nn_ops.conv2d(grad, op.inputs[1], op.get_attr("strides"),
op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format"))]
@ops.RegisterGradient("Conv2DBackpropFilter")
def _Conv2DBackpropFilterGrad(op, grad):
return [
nn_ops.conv2d_backprop_input(
array_ops.shape(op.inputs[0]), grad, op.inputs[2],
op.get_attr("strides"),
op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format")),
None,
nn_ops.conv2d(
op.inputs[0], grad,
op.get_attr("strides"),
op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format"))
]
@ops.RegisterGradient("Conv3D")
def _Conv3DGrad(op, grad):
return [nn_ops.conv3d_backprop_input_v2(array_ops.shape(op.inputs[0]),
op.inputs[1],
grad,
strides=op.get_attr("strides"),
padding=op.get_attr("padding")),
nn_ops.conv3d_backprop_filter_v2(op.inputs[0],
array_ops.shape(op.inputs[1]),
grad,
strides=op.get_attr("strides"),
padding=op.get_attr("padding"))]
@ops.RegisterGradient("Conv3DBackpropInputV2")
def _Conv3DBackpropInputGrad(op, grad):
return [None,
nn_ops.conv3d_backprop_filter_v2(grad,
array_ops.shape(op.inputs[1]),
op.inputs[2],
strides=op.get_attr("strides"),
padding=op.get_attr("padding")),
nn_ops.conv3d(grad,
op.inputs[1],
strides=op.get_attr("strides"),
padding=op.get_attr("padding"))]
@ops.RegisterGradient("Conv3DBackpropFilterV2")
def _Conv3DBackpropFilterGrad(op, grad):
return [nn_ops.conv3d_backprop_input_v2(array_ops.shape(op.inputs[0]),
grad,
op.inputs[2],
strides=op.get_attr("strides"),
padding=op.get_attr("padding")),
None,
nn_ops.conv3d(op.inputs[0],
grad,
strides=op.get_attr("strides"),
padding=op.get_attr("padding"))]
@ops.RegisterGradient("AvgPool3D")
def _AvgPool3DGrad(op, grad):
return nn_ops.avg_pool3d_grad(
array_ops.shape(op.inputs[0]),
grad,
ksize=op.get_attr("ksize"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"))
@ops.RegisterGradient("MaxPool3D")
def _MaxPool3DGrad(op, grad):
return nn_ops.max_pool3d_grad(op.inputs[0],
op.outputs[0],
grad,
ksize=op.get_attr("ksize"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"))
@ops.RegisterGradient("Softmax")
def _SoftmaxGrad(op, grad_softmax):
"""The derivative of the softmax nonlinearity.
We assume that probs is of shape [batch_size * dim]
The formula for dsoftmax / dx = (diag(softmax) - softmax * softmax').
This matrix is diagonal minus a rank one matrix, so it is easy to implement
as follows:
grad_x = grad_softmax * softmax - sum(grad_softmax * softmax) * softmax
Args:
op: the Softmax op.
grad_softmax: the tensor representing the gradient w.r.t. the
softmax output.
Returns:
gradient w.r.t the input to the softmax
"""
# TODO(ilyasu): assert that the tensor has two dimensions at
# graph-construction time? Alternatively: do different things
# depending on the dimensionality of the input tensors.
softmax = op.outputs[0]
grad_x = ((grad_softmax - array_ops.reshape(
math_ops.reduce_sum(grad_softmax * softmax, [1]), [-1, 1])) * softmax)
return grad_x
@ops.RegisterGradient("LogSoftmax")
def _LogSoftmaxGrad(op, grad):
"""The gradient for log_softmax.
log_softmax = input - log(sum(exp(input))
dlog_softmax/dinput = diag - softmax(input)
Args:
op: The log softmax op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input.
"""
softmax = math_ops.exp(op.outputs[0])
return grad - math_ops.reduce_sum(grad, 1, keep_dims=True) * softmax
@ops.RegisterGradient("BiasAdd")
def _BiasAddGrad(op, received_grad):
"""Return the gradients for the 2 inputs of bias_op.
The first input of unused_bias_op is the tensor t, and its gradient is
just the gradient the unused_bias_op received.
The second input of unused_bias_op is the bias vector which has one fewer
dimension than "received_grad" (the batch dimension.) Its gradient is the
received gradient Summed on the batch dimension, which is the first dimension.
Args:
op: The BiasOp for which we need to generate gradients.
received_grad: Tensor. The gradients passed to the BiasOp.
Returns:
Two tensors, the first one for the "tensor" input of the BiasOp,
the second one for the "bias" input of the BiasOp.
"""
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
return (received_grad, gen_nn_ops.bias_add_grad(out_backprop=received_grad,
data_format=data_format))
@ops.RegisterGradient("BiasAddGrad")
def _BiasAddGradGrad(op, received_grad):
"""Gradient for the BiasAddGrad op.
Args:
op: BiasAddGrad op for which we are calculating gradients.
received_grad: The gradients passed to the BiasAddGrad op.
Returns:
A single gradient Tensor for the input to BiasAddGrad (which
is the gradient of the bias term in BiasAdd)
"""
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
shape = array_ops.shape(op.inputs[0])
rank = array_ops.rank(op.inputs[0])
bias_shape = array_ops.shape(received_grad)
if data_format == b"NCHW":
expanded_shape = array_ops.concat(
0,
[array_ops.ones_like(shape[:-3]), bias_shape, array_ops.ones_like(shape[-2:])]
)
tile_mults = array_ops.concat(0, [shape[:-3], [1], shape[-2:]])
else:
expanded_shape = array_ops.concat(0, [array_ops.ones_like(shape[:-1]), bias_shape])
tile_mults = array_ops.concat(0, [shape[:-1], [1]])
expanded_grad = array_ops.reshape(received_grad, expanded_shape)
return array_ops.tile(expanded_grad, tile_mults)
@ops.RegisterGradient("BiasAddV1")
def _BiasAddGradV1(unused_bias_op, received_grad):
"""Return the gradients for the 2 inputs of bias_op.
The first input of unused_bias_op is the tensor t, and its gradient is
just the gradient the unused_bias_op received.
The second input of unused_bias_op is the bias vector which has one fewer
dimension than "received_grad" (the batch dimension.) Its gradient is the
received gradient Summed on the batch dimension, which is the first dimension.
Args:
unused_bias_op: The BiasOp for which we need to generate gradients.
received_grad: Tensor. The gradients passed to the BiasOp.
Returns:
Two tensors, the first one for the "tensor" input of the BiasOp,
the second one for the "bias" input of the BiasOp.
"""
reduction_dim_tensor = math_ops.range(array_ops.rank(received_grad) - 1)
return (received_grad, math_ops.reduce_sum(received_grad,
reduction_dim_tensor))
@ops.RegisterGradient("Relu")
def _ReluGrad(op, grad):
return gen_nn_ops._relu_grad(grad, op.outputs[0])
@ops.RegisterGradient("EluGrad")
def _EluGradGrad(op, grad):
x = op.inputs[1]
return (gen_nn_ops._elu_grad(grad, op.outputs[0]),
gen_math_ops.select(x < 0., gen_nn_ops._elu_grad(grad, op.outputs[0] + 1),
array_ops.zeros(shape = array_ops.shape(x), dtype = x.dtype)))
@ops.RegisterGradient("Relu6")
def _Relu6Grad(op, grad):
return gen_nn_ops._relu6_grad(grad, op.inputs[0])
@ops.RegisterGradient("Elu")
def _EluGrad(op, grad):
return gen_nn_ops._elu_grad(grad, op.outputs[0])
@ops.RegisterGradient("Softplus")
def _SoftplusGrad(op, grad):
return gen_nn_ops._softplus_grad(grad, op.inputs[0])
@ops.RegisterGradient("Softsign")
def _SoftsignGrad(op, grad):
return gen_nn_ops._softsign_grad(grad, op.inputs[0])
@ops.RegisterGradient("ReluGrad")
def _ReluGradGrad(op, grad):
x = op.inputs[1]
return (gen_nn_ops._relu_grad(grad, x), array_ops.zeros(
shape=array_ops.shape(x), dtype=x.dtype))
def _BroadcastMul(vec, mat):
"""Multiply after broadcasting vec to match dimensions of mat.
Args:
vec: A 1-D tensor of dimension [D0]
mat: A 2-D tensor of dimension [D0, D1]
Returns:
A tensor of dimension [D0, D1], the result of vec * mat
"""
# Reshape vec to [D0, 1]
vec = array_ops.expand_dims(vec, -1)
return vec * mat
@ops.RegisterGradient("SoftmaxCrossEntropyWithLogits")
def _SoftmaxCrossEntropyWithLogitsGrad(op, grad_0, _):
# grad_0 is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# There is no gradient for the labels
return _BroadcastMul(grad_0, op.outputs[1]), None
@ops.RegisterGradient("SparseSoftmaxCrossEntropyWithLogits")
def _SparseSoftmaxCrossEntropyWithLogitsGrad(op, grad_0, _):
# grad_0 is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# There is no gradient for the labels
return _BroadcastMul(grad_0, op.outputs[1]), None
@ops.RegisterGradient("Conv2D")
def _Conv2DGrad(op, grad):
return [nn_ops.conv2d_backprop_input(
array_ops.shape(op.inputs[0]), op.inputs[1], grad, op.get_attr("strides"),
op.get_attr("padding"), op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format")),
nn_ops.conv2d_backprop_filter(op.inputs[0],
array_ops.shape(op.inputs[1]), grad,
op.get_attr("strides"),
op.get_attr("padding"),
op.get_attr("use_cudnn_on_gpu"),
op.get_attr("data_format"))]
@ops.RegisterGradient("DepthwiseConv2dNative")
def _DepthwiseConv2dNativeGrad(op, grad):
return [
nn_ops.depthwise_conv2d_native_backprop_input(
array_ops.shape(op.inputs[0]), op.inputs[1], grad,
op.get_attr("strides"), op.get_attr("padding")),
nn_ops.depthwise_conv2d_native_backprop_filter(
op.inputs[0], array_ops.shape(op.inputs[1]), grad,
op.get_attr("strides"), op.get_attr("padding"))
]
@ops.RegisterGradient("Dilation2D")
def _Dilation2DGrad(op, grad):
return [nn_ops.dilation2d_backprop_input(op.inputs[0], op.inputs[1], grad,
op.get_attr("strides"),
op.get_attr("rates"),
op.get_attr("padding")),
nn_ops.dilation2d_backprop_filter(op.inputs[0], op.inputs[1], grad,
op.get_attr("strides"),
op.get_attr("rates"),
op.get_attr("padding"))]
@ops.RegisterGradient("LRN")
def _LRNGrad(op, grad):
depth_radius = op.get_attr("depth_radius")
bias = op.get_attr("bias")
alpha = op.get_attr("alpha")
beta = op.get_attr("beta")
return [gen_nn_ops._lrn_grad(grad, op.inputs[0], op.outputs[0], depth_radius,
bias, alpha, beta)]
@ops.RegisterGradient("AvgPool")
def _AvgPoolGrad(op, grad):
return gen_nn_ops._avg_pool_grad(
array_ops.shape(op.inputs[0]),
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format"))
@ops.RegisterGradient("MaxPool")
def _MaxPoolGrad(op, grad):
return gen_nn_ops._max_pool_grad(op.inputs[0],
op.outputs[0],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format"))
@ops.RegisterGradient("FractionalMaxPool")
def _FractionalMaxPoolGrad(op, grad_0, unused_grad_1, unused_grad_2):
"""Returns gradient for FractionalMaxPool.
Since FractionalMaxPool has three outputs, there are three gradients passed in
for each of the outputs. Only the first one is useful, the other two gradients
are empty.
Args:
op: The FractionalMaxPoolOp.
grad_0: Gradient with respect to op.outputs[0]
unused_grad_1: Gradient with respect to op.outputs[1]/row_seq. It is empty.
unused_grad_2: Gradient with respect to op.outputs[2]/col_seq. It is empty.
Returns:
Input backprop for FractionalMaxPool op.
"""
# pylint: disable=protected-access
return gen_nn_ops._fractional_max_pool_grad(op.inputs[0], op.outputs[0],
grad_0, op.outputs[1],
op.outputs[2],
op.get_attr("overlapping"))
@ops.RegisterGradient("FractionalAvgPool")
def _FractionalAvgPoolGrad(op, grad_0, unused_grad_1, unused_grad_2):
"""Returns gradient for FractionalAvgPool.
Since FractionalAvgPool has three outputs, there are three gradients passed in
for each of the outputs. Only the first one is useful, the other two gradients
are empty.
Args:
op: The FractionalAvgPoolOp.
grad_0: Gradient with respect to op.outputs[0]
unused_grad_1: Gradient with respect to op.outputs[1]/row_seq. It is empty.
unused_grad_2: Gradient with respect to op.outputs[2]/col_seq. It is empty.
Returns:
Input backprop for FractionalAvgPool op.
"""
# pylint: disable=protected-access
return gen_nn_ops._fractional_avg_pool_grad(op.inputs[0].get_shape(), grad_0,
op.outputs[1], op.outputs[2],
op.get_attr("overlapping"))
@ops.RegisterGradient("BatchNormWithGlobalNormalization")
def _BatchNormWithGlobalNormalizationGrad(op, grad):
"""Return the gradients for the 5 inputs of BatchNormWithGlobalNormalization.
We do not backprop anything for the mean and var intentionally as they are
not being trained with backprop in the operation.
Args:
op: The BatchNormOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the BatchNormOp.
Returns:
dx: Backprop for input, which is (grad * (g * rsqrt(v + epsilon)))
dm: Backprop for mean, which is
sum_over_rest(grad * g) * (-1 / rsqrt(v + epsilon))
dv: Backprop for variance, which is
sum_over_rest(grad * g * (x - m)) * (-1/2) * (v + epsilon) ^ (-3/2)
db: Backprop for beta, which is grad reduced in all except the
last dimension.
dg: Backprop for gamma, which is (grad * ((x - m) * rsqrt(v + epsilon)))
"""
dx, dm, dv, db, dg = gen_nn_ops._batch_norm_with_global_normalization_grad(
op.inputs[0], op.inputs[1], op.inputs[2], op.inputs[4], grad,
op.get_attr("variance_epsilon"), op.get_attr("scale_after_normalization"))
return dx, dm, dv, db, dg
@ops.RegisterGradient("FusedBatchNorm")
def _FusedBatchNormGrad(op, *grad):
"""Return the gradients for the 3 inputs of BatchNorm.
Args:
op: The BatchNormOp for which we need to compute gradients.
*grad: An argument list for tensors of gradients wrt the outputs
with grad[0] as grad_y.
Returns:
grad_x: gradient for x, which is scale * rsqrt(variance + epsilon) *
[grad_y - mean(grad_y) - (x - mean(x)) *
mean(grad_y * (x - mean(x))) / (variance + epsilon)]
grad_scale: gradient for scale, which is sum(grad_y * (x - mean(x)) *
rsqrt(variance + epsilon))
grad_offset: gradient for offset, which is sum(grad_y)
"""
return gen_nn_ops.fused_batch_norm_grad(
grad[0],
op.inputs[0],
op.inputs[1],
op.outputs[3],
op.outputs[4],
epsilon=op.get_attr("epsilon"),
data_format=op.get_attr("data_format"),
is_training=op.get_attr("is_training"))
@ops.RegisterGradient("L2Loss")
def _L2LossGrad(op, grad):
"""Return the gradients for L2Loss.
Args:
op: The L2LossOp for which we need to generate gradients.
grad: Tensor containing a single number.
Returns:
The gradient, which is (x * grad).
"""
return op.inputs[0] * grad
@ops.RegisterGradient("TopK")
@ops.RegisterGradient("TopKV2")
def _TopKGrad(op, grad, _):
"""Return the gradients for TopK.
Args:
op: The TopKOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the TopKOp.
Returns:
A list of two tensors, the first being the gradient w.r.t to the input and
TopK, and the second being the gradient w.r.t. to the indices (all zero).
"""
in_shape = array_ops.shape(op.inputs[0])
ind_shape = array_ops.shape(op.outputs[1])
ind_lastdim = array_ops.gather(ind_shape, array_ops.size(ind_shape) - 1)
# Flatten indices to 2D.
ind_2d = array_ops.reshape(op.outputs[1], array_ops.pack([-1, ind_lastdim]))
in_lastdim = array_ops.gather(in_shape, array_ops.size(in_shape) - 1)
outerdim = array_ops.shape(ind_2d)[0]
# Compute linear indices (flattened to 1D).
ind = array_ops.reshape(ind_2d + array_ops.expand_dims(
math_ops.range(0, outerdim * in_lastdim, in_lastdim), -1), [-1])
# Substitute grad to appropriate locations and fill the rest with zeros,
# finally reshaping it to the original input shape.
return [array_ops.reshape(
sparse_ops.sparse_to_dense(ind,
array_ops.reshape(
math_ops.reduce_prod(in_shape), [1]),
array_ops.reshape(grad, [-1]),
validate_indices=False),
in_shape), array_ops.zeros(
[], dtype=dtypes.int32)]
| 36.484848 | 87 | 0.631474 |
6b4e7b1affa4a0e3e3441b4e47e62206e858d633 | 1,845 | py | Python | model/encoder.py | DeepLearnXMU/H-RNNSearch | a28cb555984e920e317fc9376cec12924f422f69 | [
"MIT"
] | 9 | 2018-11-16T12:46:10.000Z | 2022-02-04T16:22:39.000Z | model/encoder.py | DeepLearnXMU/H-RNNSearch | a28cb555984e920e317fc9376cec12924f422f69 | [
"MIT"
] | null | null | null | model/encoder.py | DeepLearnXMU/H-RNNSearch | a28cb555984e920e317fc9376cec12924f422f69 | [
"MIT"
] | null | null | null | import theano
import theano.tensor as T
import nn
import ops
def gru_encoder(cell, inputs, mask, initid, initial_state=None, dtype=None):
if not isinstance(cell, nn.rnn_cell.rnn_cell):
raise ValueError("cell is not an instance of rnn_cell")
if isinstance(inputs, (list, tuple)):
raise ValueError("inputs must be a tensor, not list or tuple")
def loop_fn(inputs, mask, initid, state):
mask = mask[:, None]
state = (1.0 - initid)[:, None] * state
output, next_state = cell(inputs, state)
next_state = (1.0 - mask) * state + mask * next_state
return next_state
if initial_state is None:
batch = inputs.shape[1]
state_size = cell.state_size
initial_state = theano.tensor.zeros([batch, state_size], dtype=dtype)
seq = [inputs, mask, initid]
states = ops.scan(loop_fn, seq, [initial_state])
return states
class Encoder:
def __init__(self, dim_x, dim_hid):
self.dim_x = dim_x
self.dim_hid = dim_hid
self.cell = nn.rnn_cell.gru_cell([dim_x, dim_hid])
def forward(self, x_embedded, mask, initid, initial_state=None, dtype=None, scope=None):
scope = scope or "encoder"
cell = self.cell
with ops.variable_scope(scope, dtype=dtype):
with ops.variable_scope("forward"):
fd_states = gru_encoder(cell, x_embedded, mask, initid, initial_state, dtype)
with ops.variable_scope("backward"):
initid = theano.tensor.roll(initid, -1, axis=0)
x_embedded = x_embedded[::-1]
mask = mask[::-1]
initid = initid[::-1]
bd_states = gru_encoder(cell, x_embedded, mask, initid, initial_state, dtype)
bd_states = bd_states[::-1]
return fd_states, bd_states
| 33.545455 | 93 | 0.619512 |
6c1007147cc91568ddc7fedefb205330ad11b65c | 2,500 | py | Python | python/rbin/generate_etfbeta.py | CodeApprenticeRai/newt | 0e07a87aa6b8d4b238c1a9fd3fef363133866c57 | [
"Apache-2.0"
] | 1 | 2021-06-21T12:23:55.000Z | 2021-06-21T12:23:55.000Z | python/rbin/generate_etfbeta.py | CodeApprenticeRai/newt | 0e07a87aa6b8d4b238c1a9fd3fef363133866c57 | [
"Apache-2.0"
] | null | null | null | python/rbin/generate_etfbeta.py | CodeApprenticeRai/newt | 0e07a87aa6b8d4b238c1a9fd3fef363133866c57 | [
"Apache-2.0"
] | 6 | 2019-10-17T21:16:21.000Z | 2020-10-19T08:27:01.000Z | #!/usr/bin/env python
import os
import sys
import util
from gzip import GzipFile
from data_sources import file_source
import datafiles
def main():
util.check_include()
util.set_log_file()
#get last calcres of previous day
prevDay = util.exchangeTradingOffset(os.environ['PRIMARY_EXCHANGE'],os.environ['DATE'],-1)
fs = file_source.FileSource(os.environ['RUN_DIR'] + '/../' + str(prevDay) + '/calcres')
calcresFiles = fs.list(r'calcres.*\.txt\.gz')
if len(calcresFiles) == 0:
util.error("Failed to locate calcres file")
sys.exit(1)
calcresFiles.sort(key=lambda x: x[0], reverse=True)
lastCalcresFile = os.environ['RUN_DIR'] + '/../' + str(prevDay) + '/calcres/' + calcresFiles[0][0]
secidParams = {}
for line in GzipFile(lastCalcresFile, 'r'):
if line.startswith('FCOV'): continue
secid, name, datatype, datetime, value, currency, born = line.split('|')
if int(secid) not in secidParams:
secidParams[int(secid)] = {}
if name == 'F:BBETA':
secidParams[int(secid)]['BBETA'] = float(value)
elif name == 'F:ASE_BETA90':
secidParams[int(secid)]['ASE_BETA'] = float(value)
elif name == 'CAPITALIZATION':
secidParams[int(secid)]['CAP'] = float(value)
#get tickers
tic2sec, sec2tic = datafiles.load_tickers(os.environ['RUN_DIR'] + '/tickers.txt')
etfbetaFilename = os.environ['RUN_DIR'] + '/exec/etfbetafile'
etfbetaFile = open(etfbetaFilename, 'w')
etfbetaFile.write('#ETF BETA MKT-CAP\n')
tickers = tic2sec.keys()
tickers.sort()
count = 0
for ticker in tickers:
secid = tic2sec.get(ticker, None)
if secid not in secidParams:
continue
bbeta = secidParams[secid].get('BBETA', None)
asebeta = secidParams[secid].get('ASE_BETA', None)
mcap = secidParams[secid].get('CAP', None)
if bbeta is None or asebeta is None or mcap is None:
util.error('Error while getting data for secid {}: ticker={}, bbeta={}, asebeta={}, mcap={}'.format(secid, ticker, bbeta, asebeta, mcap))
continue
beta = 0.5 * (bbeta + asebeta)
etfbetaFile.write('{0},SPY,{1:.3f},{2:.3f}\n'.format(ticker,beta,mcap))
count += 1
etfbetaFile.close()
print 'Finished writing etfbeta file: {} for {} tickers'.format(etfbetaFilename, count)
if __name__ == '__main__':
main()
| 37.313433 | 149 | 0.6108 |
9b5eba7c483fbee78330cf2dc466e675e141f93c | 2,685 | py | Python | example_problems/problems_to_be_done/map/makeInputs-suite.py | romeorizzi/TALight | 2b694cb487f41dd0d36d7aa39f5c9c5a21bfc18e | [
"MIT"
] | 4 | 2021-06-27T13:27:24.000Z | 2022-03-24T10:46:28.000Z | example_problems/problems_to_be_done/map/makeInputs-suite.py | romeorizzi/TALight | 2b694cb487f41dd0d36d7aa39f5c9c5a21bfc18e | [
"MIT"
] | 1 | 2021-01-23T06:50:31.000Z | 2021-03-17T15:35:18.000Z | example_problems/problems_to_be_done/map/makeInputs-suite.py | romeorizzi/TALight | 2b694cb487f41dd0d36d7aa39f5c9c5a21bfc18e | [
"MIT"
] | 5 | 2021-04-01T15:21:57.000Z | 2022-01-29T15:07:38.000Z | #!/usr/bin/env python3
from sys import argv, exit, stderr
import os
INPUT_FOLDER = argv[1]
INPUT_FORMAT = argv[2]
if INPUT_FORMAT == "dat":
GENERATOR="./instance-generators/dat-generator.sh"
if INPUT_FORMAT == "txt":
GENERATOR="./instance-generators/txt-generator.sh"
os.system(f"rm -rf {INPUT_FOLDER}")
os.system(f"mkdir {INPUT_FOLDER}")
def my_system_run(command_string):
print("makeInputs.py:"+command_string)
if os.system(command_string) != 0:
print("\nIl seguente comando lanciato da makeInputs.py ha avuto qualche problema.\nmakeInputs.py:"+command_string+"\nEsecuzione di makeInputs.py interrotta a questo punto.")
exit(1)
# Goal 1 instances:
# A few inputs hard-coded in the generator (could have been in separated files and here we could simply copy them):
my_system_run(f"cat examples/input_1.{INPUT_FORMAT} > {INPUT_FOLDER}/input_1.{INPUT_FORMAT}")
#parameters for generator.cpp:
# <N> <no_mines> <seed>
# Goal 2 instances:
for i in range(2,5):
my_system_run(f"{GENERATOR} {i} 1 {777+i} > {INPUT_FOLDER}/input_{i}.{INPUT_FORMAT}")
# my_system_run(f"./instance-generators/check_is_goal2-instance.py < {INPUT_FOLDER}/input_{i}.{INPUT_FORMAT}")
# Goal 3 instances:
for i in range(5,11):
my_system_run(f"{GENERATOR} {i//2} 0 {777+i} > {INPUT_FOLDER}/input_{i}.{INPUT_FORMAT}")
# my_system_run(f"./instance-generators/check_is_goal3-instance.py < {INPUT_FOLDER}/input_{i}.{INPUT_FORMAT}")
# Goal 4 instances:
for i in range(11,13):
my_system_run(f"{GENERATOR} 10 0 {777+i} > {INPUT_FOLDER}/input_{i}.{INPUT_FORMAT}")
# my_system_run(f"./instance-generators/check_is_goal4-instance.py < {INPUT_FOLDER}/input_{i}.{INPUT_FORMAT}")
# Goal 5 instances:
for i in range(13,16):
my_system_run(f"{GENERATOR} 20 0 {777+i} > {INPUT_FOLDER}/input_{i}.{INPUT_FORMAT}")
# my_system_run(f"./instance-generators/check_is_goal5-instance.py < {INPUT_FOLDER}/input_{i}.{INPUT_FORMAT}")
# Goal 6 instances:
for i in range(16,20):
my_system_run(f"{GENERATOR} 30 0 {777+i} > {INPUT_FOLDER}/input_{i}.{INPUT_FORMAT}")
# my_system_run(f"./instance-generators/check_is_goal6-instance.py < {INPUT_FOLDER}/input_{i}.{INPUT_FORMAT}")
# Goal 7 instances:
for i in range(20,21):
my_system_run(f"{GENERATOR} 50 0 {777+i} > {INPUT_FOLDER}/input_{i}.{INPUT_FORMAT}")
# my_system_run(f"./instance-generators/check_is_goal7-instance.py < {INPUT_FOLDER}/input_{i}.{INPUT_FORMAT}")
# Goal 8 instances:
for i in range(21,22):
my_system_run(f"{GENERATOR} 100 0 {777+i} > {INPUT_FOLDER}/input_{i}.{INPUT_FORMAT}")
# my_system_run(f"./instance-generators/check_is_goal8-instance.py < {INPUT_FOLDER}/input_{i}.{INPUT_FORMAT}")
| 42.619048 | 181 | 0.721415 |
c4be2cf01d3521e30089a4eebf95985d5ddd9f69 | 456 | py | Python | GUI/demo02.py | yueludanfeng/Test | 8ea2bd2a8602ed51379c7a89ea1fdf370b8f1ca7 | [
"Apache-2.0"
] | 10 | 2019-01-20T06:52:30.000Z | 2022-03-22T03:35:47.000Z | GUI/demo02.py | yueludanfeng/Test | 8ea2bd2a8602ed51379c7a89ea1fdf370b8f1ca7 | [
"Apache-2.0"
] | null | null | null | GUI/demo02.py | yueludanfeng/Test | 8ea2bd2a8602ed51379c7a89ea1fdf370b8f1ca7 | [
"Apache-2.0"
] | 11 | 2017-10-17T07:50:21.000Z | 2021-12-13T12:04:58.000Z | # coding=utf-8
import numpy as np
import matplotlib.pyplot as plt
plt.figure(1) # 创建图表1
plt.figure(2) # 创建图表2
ax1 = plt.subplot(211) # 在图表2中创建子图1
ax2 = plt.subplot(212) # 在图表2中创建子图2
x = np.linspace(0, 3, 100)
for i in xrange(5):
plt.figure(1) # ❶ # 选择图表1
plt.plot(x, np.exp(i * x / 3))
plt.sca(ax1) # ❷ # 选择图表2的子图1
plt.plot(x, np.sin(i * x))
plt.sca(ax2) # 选择图表2的子图2
plt.plot(x, np.cos(i * x))
plt.show() | 24 | 37 | 0.589912 |
fc84663a91f8ee7a23eb7ba84e222d14cf94b0ad | 9,570 | py | Python | dymos/examples/vanderpol/vanderpol_ode.py | pgkirsch/dymos | d4e68bd3db13972dcbf9462c37c55814f521d762 | [
"Apache-2.0"
] | null | null | null | dymos/examples/vanderpol/vanderpol_ode.py | pgkirsch/dymos | d4e68bd3db13972dcbf9462c37c55814f521d762 | [
"Apache-2.0"
] | 2 | 2020-02-18T17:16:48.000Z | 2020-02-18T18:26:05.000Z | dymos/examples/vanderpol/vanderpol_ode.py | wright/dymos | 9d253a16ffcc162a84ef1b4a7dddcebeda5522ac | [
"Apache-2.0"
] | null | null | null | import numpy as np
import openmdao.api as om
import time
from openmdao.utils.array_utils import evenly_distrib_idxs
class vanderpol_ode(om.ExplicitComponent):
"""ODE for optimal control of a Van der Pol oscillator
objective J:
minimize integral of (x0**2 + x1**2 + u**2) for 0.0 <= t <= 15
subject to:
x0dot = (1 - x1^2) * x0 - x1 + u
x1dot = x0
-0.75 <= u <= 1.0
initial conditions:
x0(0) = 1.0 x1(0) = 1.0
final conditions:
x0(15) = 0.0 x1(15) = 0.0
"""
def initialize(self):
self.options.declare('num_nodes', types=int)
def setup(self):
nn = self.options['num_nodes']
# inputs: 2 states and a control
self.add_input('x0', val=np.ones(nn), desc='derivative of Output', units='V/s')
self.add_input('x1', val=np.ones(nn), desc='Output', units='V')
self.add_input('u', val=np.ones(nn), desc='control', units=None)
# outputs: derivative of states
# the objective function will be treated as a state for computation, so its derivative is an output
self.add_output('x0dot', val=np.ones(nn), desc='second derivative of Output', units='V/s**2')
self.add_output('x1dot', val=np.ones(nn), desc='derivative of Output', units='V/s')
self.add_output('Jdot', val=np.ones(nn), desc='derivative of objective', units='1.0/s')
# partials
r = c = np.arange(nn)
self.declare_partials(of='x0dot', wrt='x0', rows=r, cols=c)
self.declare_partials(of='x0dot', wrt='x1', rows=r, cols=c)
self.declare_partials(of='x0dot', wrt='u', rows=r, cols=c, val=1.0)
self.declare_partials(of='x1dot', wrt='x0', rows=r, cols=c, val=1.0)
self.declare_partials(of='x1dot', wrt='x1', rows=r, cols=c, val=0.0)
self.declare_partials(of='x1dot', wrt='u', rows=r, cols=c, val=0.0)
self.declare_partials(of='Jdot', wrt='x0', rows=r, cols=c)
self.declare_partials(of='Jdot', wrt='x1', rows=r, cols=c)
self.declare_partials(of='Jdot', wrt='u', rows=r, cols=c)
def compute(self, inputs, outputs):
x0 = inputs['x0']
x1 = inputs['x1']
u = inputs['u']
outputs['x0dot'] = (1.0 - x1**2) * x0 - x1 + u
outputs['x1dot'] = x0
outputs['Jdot'] = x0**2 + x1**2 + u**2
def compute_partials(self, inputs, jacobian):
# partials declared with 'val' above do not need to be computed
x0 = inputs['x0']
x1 = inputs['x1']
u = inputs['u']
jacobian['x0dot', 'x0'] = 1.0 - x1 * x1
jacobian['x0dot', 'x1'] = -2.0 * x1 * x0 - 1.0
jacobian['Jdot', 'x0'] = 2.0 * x0
jacobian['Jdot', 'x1'] = 2.0 * x1
jacobian['Jdot', 'u'] = 2.0 * u
class vanderpol_ode_group(om.Group):
"""Group containing distributed vanderpol_ode pass through and calculation"""
def initialize(self):
self.options.declare('num_nodes', types=int)
def setup(self):
nn = self.options['num_nodes']
self.add_subsystem(name='vanderpol_ode_passthrough',
subsys=vanderpol_ode_passthrough(num_nodes=nn),
promotes_inputs=['x0', 'x1', 'u'])
self.add_subsystem(name='vanderpol_ode_delay',
subsys=vanderpol_ode_delay(num_nodes=nn),
promotes_outputs=['x0dot', 'x1dot', 'Jdot'])
# connect collect_comp (pass through) output to distributed ODE input
self.connect('vanderpol_ode_passthrough.x0pass', 'vanderpol_ode_delay.x0')
self.connect('vanderpol_ode_passthrough.x1pass', 'vanderpol_ode_delay.x1')
self.connect('vanderpol_ode_passthrough.upass', 'vanderpol_ode_delay.u')
class vanderpol_ode_passthrough(om.ExplicitComponent):
"""Pass through component that just copies control and state from input to output
if you just use a plain old passthrough (non-distributed with a full sized input and output) component to connect
to the distributed output, the framework will do the MPI allgathering for you"""
def initialize(self):
self.options.declare('num_nodes', types=int)
def setup(self):
nn = self.options['num_nodes'] # total number of inputs and outputs over all processes
# inputs: 2 states and a control
self.add_input('x0', val=np.ones(nn), desc='derivative of Output', units='V/s')
self.add_input('x1', val=np.ones(nn), desc='Output', units='V')
self.add_input('u', val=np.ones(nn), desc='control', units=None)
# outputs: same as inputs
self.add_output('x0pass', val=np.ones(nn), desc='derivative of Output', units='V/s')
self.add_output('x1pass', val=np.ones(nn), desc='Output', units='V')
self.add_output('upass', val=np.ones(nn), desc='control', units=None)
# partials
row_col = np.arange(nn)
self.declare_partials(of='x0pass', wrt='x0', rows=row_col, cols=row_col, val=1.0)
self.declare_partials(of='x1pass', wrt='x1', rows=row_col, cols=row_col, val=1.0)
self.declare_partials(of='upass', wrt='u', rows=row_col, cols=row_col, val=1.0)
def compute(self, inputs, outputs):
outputs['x0pass'] = inputs['x0']
outputs['x1pass'] = inputs['x1']
outputs['upass'] = inputs['u']
class vanderpol_ode_delay(om.ExplicitComponent):
"""intentionally slow version of vanderpol_ode for effects of demonstrating distributed component calculations
MPI can run this component in multiple processes, distributing the calculation of derivatives.
This code has a delay in it to simulate a longer computation. It should run faster with more processes.
"""
def __init__(self, *args, **kwargs):
self.delay_time = 0.005
self.progress_prints = False
super().__init__(*args, **kwargs)
def initialize(self):
self.options.declare('num_nodes', types=int)
self.options['distributed'] = True
self.options.declare('size', types=int, default=1, desc="Size of input and output vectors.")
def setup(self):
nn = self.options['num_nodes']
comm = self.comm
rank = comm.rank
sizes, offsets = evenly_distrib_idxs(comm.size, nn) # (#cpus, #inputs) -> (size array, offset array)
start = offsets[rank]
self.io_size = sizes[rank] # number of inputs and outputs managed by this distributed process
end = start + self.io_size
if self.progress_prints:
print('in vanderpol_ode_delay.setup', self.io_size, self.comm.rank)
# inputs: 2 states and a control
self.add_input('x0', val=np.ones(self.io_size), desc='derivative of Output', units='V/s',
src_indices=np.arange(start, end, dtype=int))
self.add_input('x1', val=np.ones(self.io_size), desc='Output', units='V',
src_indices=np.arange(start, end, dtype=int))
self.add_input('u', val=np.ones(self.io_size), desc='control', units=None,
src_indices=np.arange(start, end, dtype=int))
# outputs: derivative of states
# the objective function will be treated as a state for computation, so its derivative is an output
self.add_output('x0dot', val=np.ones(self.io_size), desc='second derivative of Output', units='V/s**2')
self.add_output('x1dot', val=np.ones(self.io_size), desc='derivative of Output', units='V/s')
self.add_output('Jdot', val=np.ones(self.io_size), desc='derivative of objective', units='1.0/s')
# partials
r = c = np.arange(self.io_size)
self.declare_partials(of='x0dot', wrt='x0', rows=r, cols=c)
self.declare_partials(of='x0dot', wrt='x1', rows=r, cols=c)
self.declare_partials(of='x0dot', wrt='u', rows=r, cols=c, val=1.0)
self.declare_partials(of='x1dot', wrt='x0', rows=r, cols=c, val=1.0)
self.declare_partials(of='x1dot', wrt='x1', rows=r, cols=c, val=0.0)
self.declare_partials(of='x1dot', wrt='u', rows=r, cols=c, val=0.0)
self.declare_partials(of='Jdot', wrt='x0', rows=r, cols=c)
self.declare_partials(of='Jdot', wrt='x1', rows=r, cols=c)
self.declare_partials(of='Jdot', wrt='u', rows=r, cols=c)
def compute(self, inputs, outputs):
if self.progress_prints:
sizes = (len(inputs['x0']), len(inputs['x1']), len(inputs['u']))
print('in vanderpol_ode_delay.compute', sizes, self.comm.rank)
time.sleep(self.delay_time * self.io_size) # introduce slowness proportional to size of computation
x0 = inputs['x0']
x1 = inputs['x1']
u = inputs['u']
outputs['x0dot'] = (1.0 - x1**2) * x0 - x1 + u
outputs['x1dot'] = x0
outputs['Jdot'] = x0**2 + x1**2 + u**2
def compute_partials(self, inputs, jacobian):
if self.progress_prints:
sizes = (len(inputs['x0']), len(inputs['x1']), len(inputs['u']))
print('in vanderpol_ode_delay.compute_partials', sizes)
time.sleep(self.delay_time * self.io_size) # introduce slowness proportional to size of computation
# partials declared with 'val' above do not need to be computed
x0 = inputs['x0']
x1 = inputs['x1']
u = inputs['u']
jacobian['x0dot', 'x0'] = 1.0 - x1 * x1
jacobian['x0dot', 'x1'] = -2.0 * x1 * x0 - 1.0
jacobian['Jdot', 'x0'] = 2.0 * x0
jacobian['Jdot', 'x1'] = 2.0 * x1
jacobian['Jdot', 'u'] = 2.0 * u
| 41.790393 | 117 | 0.611285 |
511c4f585c4e45c444cfdd07c4ed6d740e736c6f | 16,998 | py | Python | charles-university/deep-learning/labs/09/lemmatizer_attn.py | Hyperparticle/lct-master | 8acb0ca8fe14bb86305f235e3fec0a595acae2de | [
"MIT"
] | 3 | 2018-11-08T14:23:45.000Z | 2021-02-08T17:54:59.000Z | charles-university/deep-learning/labs/09/lemmatizer_attn.py | Hyperparticle/lct-master | 8acb0ca8fe14bb86305f235e3fec0a595acae2de | [
"MIT"
] | null | null | null | charles-university/deep-learning/labs/09/lemmatizer_attn.py | Hyperparticle/lct-master | 8acb0ca8fe14bb86305f235e3fec0a595acae2de | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import numpy as np
import tensorflow as tf
import morpho_dataset
class Network:
def __init__(self, threads, seed=42):
# Create an empty graph and a session
graph = tf.Graph()
graph.seed = seed
self.session = tf.Session(graph = graph, config=tf.ConfigProto(inter_op_parallelism_threads=threads,
intra_op_parallelism_threads=threads))
def construct(self, args, source_chars, target_chars, bow, eow):
with self.session.graph.as_default():
if args.recodex:
tf.get_variable_scope().set_initializer(tf.glorot_uniform_initializer(seed=42))
# Inputs
self.sentence_lens = tf.placeholder(tf.int32, [None], name="sentence_lens")
self.source_ids = tf.placeholder(tf.int32, [None, None], name="source_ids")
self.source_seqs = tf.placeholder(tf.int32, [None, None], name="source_seqs")
self.source_seq_lens = tf.placeholder(tf.int32, [None], name="source_seq_lens")
self.target_ids = tf.placeholder(tf.int32, [None, None], name="target_ids")
self.target_seqs = tf.placeholder(tf.int32, [None, None], name="target_seqs")
self.target_seq_lens = tf.placeholder(tf.int32, [None], name="target_seq_lens")
# Append EOW after target_seqs
target_seqs = tf.reverse_sequence(self.target_seqs, self.target_seq_lens, 1)
target_seqs = tf.pad(target_seqs, [[0, 0], [1, 0]], constant_values=eow)
target_seq_lens = self.target_seq_lens + 1
target_seqs = tf.reverse_sequence(target_seqs, target_seq_lens, 1)
# Encoder
# Generate source embeddings for source chars, of shape [source_chars, args.char_dim].
source_embeddings = tf.get_variable("source_embeddings", [source_chars, args.char_dim])
# Embed the self.source_seqs using the source embeddings.
embedded_source_seqs = tf.nn.embedding_lookup(source_embeddings, self.source_seqs)
# Using a GRU with dimension args.rnn_dim, process the embedded self.source_seqs
# using bidirectional RNN. Store the summed fwd and bwd outputs in `source_encoded`
# and the summed fwd and bwd states into `source_states`.
source_encoded, source_states = tf.nn.bidirectional_dynamic_rnn(tf.nn.rnn_cell.GRUCell(args.rnn_dim),
tf.nn.rnn_cell.GRUCell(args.rnn_dim),
embedded_source_seqs,
sequence_length=self.source_seq_lens,
dtype=tf.float32)
source_encoded = tf.reduce_sum(source_encoded, axis=0)
source_states = tf.reduce_sum(source_states, axis=0)
# Index the unique words using self.source_ids and self.target_ids.
sentence_mask = tf.sequence_mask(self.sentence_lens)
source_encoded = tf.boolean_mask(tf.nn.embedding_lookup(source_encoded, self.source_ids), sentence_mask)
source_states = tf.boolean_mask(tf.nn.embedding_lookup(source_states, self.source_ids), sentence_mask)
source_lens = tf.boolean_mask(tf.nn.embedding_lookup(self.source_seq_lens, self.source_ids), sentence_mask)
target_seqs = tf.boolean_mask(tf.nn.embedding_lookup(target_seqs, self.target_ids), sentence_mask)
target_lens = tf.boolean_mask(tf.nn.embedding_lookup(target_seq_lens, self.target_ids), sentence_mask)
# Decoder
# Generate target embeddings for target chars, of shape [target_chars, args.char_dim].
target_embeddings = tf.get_variable("target_embeddings", [target_chars, args.char_dim])
# Embed the target_seqs using the target embeddings.
embedded_target_seqs = tf.nn.embedding_lookup(target_embeddings, target_seqs)
# Generate a decoder GRU with dimension args.rnn_dim.
decoder_rnn = tf.nn.rnn_cell.GRUCell(args.rnn_dim)
# Create a `decoder_layer` -- a fully connected layer with
# target_chars neurons used in the decoder to classify into target characters.
decoder_layer = tf.layers.Dense(target_chars)
# Attention
# Generate three fully connected layers without activations:
# - `source_layer` with args.rnn_dim units
# - `state_layer` with args.rnn_dim units
# - `weight_layer` with 1 unit
source_layer = tf.layers.Dense(args.rnn_dim)
state_layer = tf.layers.Dense(args.rnn_dim)
weight_layer = tf.layers.Dense(1)
def with_attention(inputs, states):
# Generate the attention
# Project source_encoded using source_layer.
proj_source = source_layer(source_encoded)
# Change shape of states from [a, b] to [a, 1, b] and project it using state_layer.
# tf.expand_dims
proj_states = state_layer(tf.expand_dims(states, axis=1))
# Sum the two above projections, apply tf.tanh and project the result using weight_layer.
# The result has shape [x, y, 1].
sum_source_states = weight_layer(tf.tanh(proj_source + proj_states))
# Apply tf.nn.softmax to the latest result, using axis corresponding to source characters.
weight_vec = tf.nn.softmax(sum_source_states, axis=1)
# Multiply the source_encoded by the latest result, and sum the results with respect
# to the axis corresponding to source characters. This is the final attention.
final_attn = tf.reduce_sum(source_encoded * weight_vec, axis=1)
# Return concatenation of inputs and the computed attention.
return tf.concat([inputs, final_attn], axis=1)
# The DecoderTraining will be used during training. It will output logits for each
# target character.
class DecoderTraining(tf.contrib.seq2seq.Decoder):
@property
def batch_size(self): return tf.shape(source_states)[0] # Return size of the batch, using for example source_states size
@property
def output_dtype(self): return tf.float32 # Type for logits of target characters
@property
def output_size(self): return target_chars # Length of logits for every output
def initialize(self, name=None):
finished = tf.less_equal(target_lens, 0) # False if target_lens > 0, True otherwise
states = source_states # Initial decoder state to use
inputs = with_attention(tf.nn.embedding_lookup(target_embeddings, tf.fill([self.batch_size], bow)),
states) # Call with_attention on the embedded BOW characters of shape [self.batch_size].
# You can use tf.fill to generate BOWs of appropriate size.
return finished, inputs, states
def step(self, time, inputs, states, name=None):
outputs, states = decoder_rnn(inputs, states) # Run the decoder GRU cell using inputs and states.
outputs = decoder_layer(outputs) # Apply the decoder_layer on outputs.
next_input = with_attention(embedded_target_seqs[:, time], states) # Next input is with_attention called on words with index `time` in target_embedded.
finished = tf.less_equal(target_lens, time + 1) # False if target_lens > time + 1, True otherwise.
return outputs, states, next_input, finished
output_layer, _, _ = tf.contrib.seq2seq.dynamic_decode(DecoderTraining())
self.predictions_training = tf.argmax(output_layer, axis=2, output_type=tf.int32)
# The DecoderPrediction will be used during prediction. It will
# directly output the predicted target characters.
class DecoderPrediction(tf.contrib.seq2seq.Decoder):
@property
def batch_size(self): return tf.shape(source_states)[0] # Return size of the batch, using for example source_states size
@property
def output_dtype(self): return tf.int32 # Type for predicted target characters
@property
def output_size(self): return 1 # Will return just one output
def initialize(self, name=None):
finished = tf.fill([self.batch_size], False) # False of shape [self.batch_size].
states = source_states # Initial decoder state to use.
inputs = with_attention(tf.nn.embedding_lookup(target_embeddings, tf.fill([self.batch_size], bow)),
states) # Call with_attention on the embedded BOW characters of shape [self.batch_size].
# You can use tf.fill to generate BOWs of appropriate size.
return finished, inputs, states
def step(self, time, inputs, states, name=None):
outputs, states = decoder_rnn(inputs, states) # Run the decoder GRU cell using inputs and states.
outputs = decoder_layer(outputs) # Apply the decoder_layer on outputs.
outputs = tf.argmax(outputs, output_type=tf.int32, axis=1) # Use tf.argmax to choose most probable class (supply parameter `output_type=tf.int32`).
next_input = with_attention(tf.nn.embedding_lookup(target_embeddings, outputs), states) # Embed `outputs` using target_embeddings and pass it to with_attention.
finished = tf.equal(outputs, eow) # True where outputs==eow, False otherwise
return outputs, states, next_input, finished
self.predictions, _, self.prediction_lens = tf.contrib.seq2seq.dynamic_decode(
DecoderPrediction(), maximum_iterations=tf.reduce_max(source_lens) + 10)
# Training
weights = tf.sequence_mask(target_lens, dtype=tf.float32)
loss = tf.losses.sparse_softmax_cross_entropy(target_seqs, output_layer, weights=weights)
global_step = tf.train.create_global_step()
self.training = tf.train.AdamOptimizer().minimize(loss, global_step=global_step, name="training")
# Summaries
accuracy_training = tf.reduce_all(tf.logical_or(
tf.equal(self.predictions_training, target_seqs),
tf.logical_not(tf.sequence_mask(target_lens))), axis=1)
self.current_accuracy_training, self.update_accuracy_training = tf.metrics.mean(accuracy_training)
minimum_length = tf.minimum(tf.shape(self.predictions)[1], tf.shape(target_seqs)[1])
accuracy = tf.logical_and(
tf.equal(self.prediction_lens, target_lens),
tf.reduce_all(tf.logical_or(
tf.equal(self.predictions[:, :minimum_length], target_seqs[:, :minimum_length]),
tf.logical_not(tf.sequence_mask(target_lens, maxlen=minimum_length))), axis=1))
self.current_accuracy, self.update_accuracy = tf.metrics.mean(accuracy)
self.current_loss, self.update_loss = tf.metrics.mean(loss, weights=tf.reduce_sum(weights))
self.reset_metrics = tf.variables_initializer(tf.get_collection(tf.GraphKeys.METRIC_VARIABLES))
summary_writer = tf.contrib.summary.create_file_writer(args.logdir, flush_millis=10 * 1000)
self.summaries = {}
with summary_writer.as_default(), tf.contrib.summary.record_summaries_every_n_global_steps(10):
self.summaries["train"] = [tf.contrib.summary.scalar("train/loss", self.update_loss),
tf.contrib.summary.scalar("train/accuracy", self.update_accuracy_training)]
with summary_writer.as_default(), tf.contrib.summary.always_record_summaries():
for dataset in ["dev", "test"]:
self.summaries[dataset] = [tf.contrib.summary.scalar(dataset + "/loss", self.current_loss),
tf.contrib.summary.scalar(dataset + "/accuracy", self.current_accuracy)]
# Initialize variables
self.session.run(tf.global_variables_initializer())
with summary_writer.as_default():
tf.contrib.summary.initialize(session=self.session, graph=self.session.graph)
def train_epoch(self, train, batch_size):
import sys
while not train.epoch_finished():
sentence_lens, _, charseq_ids, charseqs, charseq_lens = train.next_batch(batch_size, including_charseqs=True)
self.session.run(self.reset_metrics)
predictions, _, _ = self.session.run(
[self.predictions_training, self.training, self.summaries["train"]],
{self.sentence_lens: sentence_lens,
self.source_ids: charseq_ids[train.FORMS], self.target_ids: charseq_ids[train.LEMMAS],
self.source_seqs: charseqs[train.FORMS], self.target_seqs: charseqs[train.LEMMAS],
self.source_seq_lens: charseq_lens[train.FORMS], self.target_seq_lens: charseq_lens[train.LEMMAS]})
form, gold_lemma, system_lemma = "", "", ""
for i in range(charseq_lens[train.FORMS][0]):
form += train.factors[train.FORMS].alphabet[charseqs[train.FORMS][0][i]]
for i in range(charseq_lens[train.LEMMAS][0]):
gold_lemma += train.factors[train.LEMMAS].alphabet[charseqs[train.LEMMAS][0][i]]
system_lemma += train.factors[train.LEMMAS].alphabet[predictions[0][i]]
print("Gold form: {}, gold lemma: {}, predicted lemma: {}".format(form, gold_lemma, system_lemma), file=sys.stderr)
def evaluate(self, dataset_name, dataset, batch_size):
self.session.run(self.reset_metrics)
while not dataset.epoch_finished():
sentence_lens, _, charseq_ids, charseqs, charseq_lens = dataset.next_batch(batch_size, including_charseqs=True)
self.session.run([self.update_accuracy, self.update_loss],
{self.sentence_lens: sentence_lens,
self.source_ids: charseq_ids[train.FORMS], self.target_ids: charseq_ids[train.LEMMAS],
self.source_seqs: charseqs[train.FORMS], self.target_seqs: charseqs[train.LEMMAS],
self.source_seq_lens: charseq_lens[train.FORMS], self.target_seq_lens: charseq_lens[train.LEMMAS]})
return self.session.run([self.current_accuracy, self.summaries[dataset_name]])[0]
if __name__ == "__main__":
import argparse
import datetime
import os
import re
# Fix random seed
np.random.seed(42)
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", default=10, type=int, help="Batch size.")
parser.add_argument("--char_dim", default=64, type=int, help="Character embedding dimension.")
parser.add_argument("--epochs", default=10, type=int, help="Number of epochs.")
parser.add_argument("--recodex", default=False, action="store_true", help="ReCodEx mode.")
parser.add_argument("--rnn_dim", default=64, type=int, help="Dimension of the encoder and the decoder.")
parser.add_argument("--threads", default=1, type=int, help="Maximum number of threads to use.")
args = parser.parse_args()
# Create logdir name
args.logdir = "logs/{}-{}-{}".format(
os.path.basename(__file__),
datetime.datetime.now().strftime("%Y-%m-%d_%H%M%S"),
",".join(("{}={}".format(re.sub("(.)[^_]*_?", r"\1", key), value) for key, value in sorted(vars(args).items())))
)
if not os.path.exists("logs"): os.mkdir("logs") # TF 1.6 will do this by itself
# Load the data
train = morpho_dataset.MorphoDataset("czech-cac-train.txt", max_sentences=5000)
dev = morpho_dataset.MorphoDataset("czech-cac-dev.txt", train=train, shuffle_batches=False)
# Construct the network
network = Network(threads=args.threads)
network.construct(args, len(train.factors[train.FORMS].alphabet), len(train.factors[train.LEMMAS].alphabet),
train.factors[train.LEMMAS].alphabet_map["<bow>"], train.factors[train.LEMMAS].alphabet_map["<eow>"])
# Train
for i in range(args.epochs):
network.train_epoch(train, args.batch_size)
accuracy = network.evaluate("dev", dev, args.batch_size)
print("{:.2f}".format(100 * accuracy))
| 60.924731 | 181 | 0.633898 |
e3b913f47429c19494890afa5d790183ce6e4e60 | 4,610 | py | Python | gridalert/template/logwatchfine_template.py | tkishimoto/gridalert | 2b69999e7dbb6f3dc4919280a1f5bbceaf9e0c43 | [
"Apache-2.0"
] | 1 | 2019-07-19T10:16:15.000Z | 2019-07-19T10:16:15.000Z | gridalert/template/logwatchfine_template.py | tkishimoto/gridalert | 2b69999e7dbb6f3dc4919280a1f5bbceaf9e0c43 | [
"Apache-2.0"
] | null | null | null | gridalert/template/logwatchfine_template.py | tkishimoto/gridalert | 2b69999e7dbb6f3dc4919280a1f5bbceaf9e0c43 | [
"Apache-2.0"
] | null | null | null | from logging import getLogger
logger = getLogger(__name__)
from .logwatch_template import *
class LogwatchfineTemplate(LogwatchTemplate):
def __init__(self, conf):
super().__init__(conf)
def initialize(self):
for service in self.conf['cl']['services'].split(','):
if ('dmlite' in service):
if ('dmlite' in self.service_names):
continue
self.service_keys.append(['- dmlite Begin -',
'- dmlite End -'])
self.service_names.append('dmlite')
elif (service == 'login-sshd'):
self.service_keys.append(['- SSHD Begin -',
'- SSHD End -'])
self.service_names.append('sshd')
else:
logger.info('%s not supported' % (service))
def execute(self, text):
buffers = []
buffers_tmp = []
logwatch = False
for line in open(text, errors='replace'):
if ('##### Logwatch' in line) and (not 'End' in line):
logwatch = True
if logwatch:
buffers_tmp.append(line)
if ('##### Logwatch End' in line):
logwatch = False
buffers += self.extract(buffers_tmp)
buffers_tmp = []
return buffers
def extract(self, lines):
# shoud return lists of 'cluster', 'host', 'date',
# 'service', 'metadata', 'data', 'label'
buffers = []
meta = {}
unix = ''
for ii, key in enumerate(self.metadata_keys):
data = 'unknown'
for line in lines:
data = util_text.extract_sline(line, key[0], key[1])
if data:
if 'date' in self.metadata_names[ii]:
unix = util_date.endate_to_unix(data)
data = util_date.endate_to_sqdate(data)
break
meta[self.metadata_names[ii]] = data
for ii, key in enumerate(self.service_keys):
data = util_text.extract_mline(lines, key[0], key[1])
if data == '':
data = 'unknown'
continue
# cluster, host, date, service, metadata, data, label
service = self.service_names[ii]
if service == 'dmlite':
buffers += self.dmlite(meta, data)
if service == 'sshd':
buffers += self.sshd(meta, data)
return buffers
def dmlite(self, meta, data):
buffers = []
service = 'dmlite'
lines = data.split('\n')
for line in lines:
if line.replace(' ', '').replace('\n', '') == '':
continue
if not 'time(s)' in line:
continue
# host, date, service, metadata, data, label
host = meta['host']
date = meta['date']
metadata = 'range=%s,level=%s' % (meta['range'], meta['level'])
label = '1'
buffers.append([host, date, service,
metadata, line, label])
return buffers
def sshd(self, meta, data):
buffers = []
service = 'login-sshd'
lines = data.split('\n')
header = ''
login = ''
login_buffer = []
for line in lines:
if 'Users logging in through sshd:' in line:
header = '\n' + line + '\n'
continue
elif '' == header:
header = ''
continue
if not header:
continue
if line[0:4] == ' ' and line[4] != ' ':
if login == '':
login += line + '\n'
else:
login_buffer.append(header + login)
login = line + '\n'
if login != '' and line[0:7] == ' ' and line[7] != ' ':
login += line + '\n'
login_buffer.append(header + login)
for login in login_buffer:
# host, date, service, metadata, data, label
host = meta['host']
date = meta['date']
metadata = 'range=%s,level=%s' % (meta['range'], meta['level'])
label = '1'
buffers.append([host, date, service,
metadata, login, label])
return buffers
| 27.939394 | 80 | 0.444685 |
d5d0baa1b454e84698afe7eac7bdd101c9ba0463 | 7,709 | py | Python | src/train_kth.py | mubashirhanif/iclr2017mcnet | 8827cbe9cc83978e7f3fed28a754119d5ee9574f | [
"MIT"
] | null | null | null | src/train_kth.py | mubashirhanif/iclr2017mcnet | 8827cbe9cc83978e7f3fed28a754119d5ee9574f | [
"MIT"
] | null | null | null | src/train_kth.py | mubashirhanif/iclr2017mcnet | 8827cbe9cc83978e7f3fed28a754119d5ee9574f | [
"MIT"
] | null | null | null | import cv2
import sys
import time
import imageio
import tensorflow as tf
import scipy.misc as sm
import numpy as np
import scipy.io as sio
from mcnet import MCNET
from utils import *
from os import listdir, makedirs, system
from os.path import exists
from argparse import ArgumentParser
from joblib import Parallel, delayed
def main(lr, batch_size, alpha, beta, image_size, K,
T, num_iter, gpu):
data_path = "../data/KTH/"
f = open(data_path+"train_data_list_trimmed.txt","r")
trainfiles = f.readlines()
margin = 0.3
updateD = True
updateG = True
iters = 0
prefix = ("KTH_MCNET"
+ "_image_size="+str(image_size)
+ "_K="+str(K)
+ "_T="+str(T)
+ "_batch_size="+str(batch_size)
+ "_alpha="+str(alpha)
+ "_beta="+str(beta)
+ "_lr="+str(lr))
print("\n"+prefix+"\n")
checkpoint_dir = "../models/"+prefix+"/"
samples_dir = "../samples/"+prefix+"/"
summary_dir = "../logs/"+prefix+"/"
if not exists(checkpoint_dir):
makedirs(checkpoint_dir)
if not exists(samples_dir):
makedirs(samples_dir)
if not exists(summary_dir):
makedirs(summary_dir)
with tf.device("/gpu:%d"%gpu[0]):
model = MCNET(image_size=[image_size,image_size], c_dim=1,
K=K, batch_size=batch_size, T=T,
checkpoint_dir=checkpoint_dir)
d_optim = tf.train.AdamOptimizer(lr, beta1=0.5).minimize(
model.d_loss, var_list=model.d_vars
)
g_optim = tf.train.AdamOptimizer(lr, beta1=0.5).minimize(
alpha*model.L_img+beta*model.L_GAN, var_list=model.g_vars
)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1.0)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
log_device_placement=False,
gpu_options=gpu_options)) as sess:
tf.global_variables_initializer().run()
if model.load(sess, checkpoint_dir):
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
g_sum = tf.summary.merge([model.L_p_sum,
model.L_gdl_sum, model.loss_sum,
model.L_GAN_sum])
d_sum = tf.summary.merge([model.d_loss_real_sum, model.d_loss_sum,
model.d_loss_fake_sum])
writer = tf.summary.FileWriter(summary_dir, sess.graph)
counter = iters+1
start_time = time.time()
with Parallel(n_jobs=batch_size) as parallel:
while iters < num_iter:
mini_batches = get_minibatches_idx(len(trainfiles), batch_size, shuffle=True)
for _, batchidx in mini_batches:
if len(batchidx) == batch_size:
seq_batch = np.zeros((batch_size, image_size, image_size,
K+T, 1), dtype="float32")
diff_batch = np.zeros((batch_size, image_size, image_size,
K-1, 1), dtype="float32")
t0 = time.time()
Ts = np.repeat(np.array([T]),batch_size,axis=0)
Ks = np.repeat(np.array([K]),batch_size,axis=0)
paths = np.repeat(data_path, batch_size,axis=0)
tfiles = np.array(trainfiles)[batchidx]
shapes = np.repeat(np.array([image_size]),batch_size,axis=0)
output = parallel(delayed(load_kth_data)(f, p,img_sze, k, t)
for f,p,img_sze,k,t in zip(tfiles,
paths,
shapes,
Ks, Ts))
for i in range(batch_size):
seq_batch[i] = output[i][0]
diff_batch[i] = output[i][1]
if updateD:
_, summary_str = sess.run([d_optim, d_sum],
feed_dict={model.diff_in: diff_batch,
model.xt: seq_batch[:,:,:,K-1],
model.target: seq_batch})
writer.add_summary(summary_str, counter)
if updateG:
_, summary_str = sess.run([g_optim, g_sum],
feed_dict={model.diff_in: diff_batch,
model.xt: seq_batch[:,:,:,K-1],
model.target: seq_batch})
writer.add_summary(summary_str, counter)
errD_fake = model.d_loss_fake.eval({model.diff_in: diff_batch,
model.xt: seq_batch[:,:,:,K-1],
model.target: seq_batch})
errD_real = model.d_loss_real.eval({model.diff_in: diff_batch,
model.xt: seq_batch[:,:,:,K-1],
model.target: seq_batch})
errG = model.L_GAN.eval({model.diff_in: diff_batch,
model.xt: seq_batch[:,:,:,K-1],
model.target: seq_batch})
if errD_fake < margin or errD_real < margin:
updateD = False
if errD_fake > (1.-margin) or errD_real > (1.-margin):
updateG = False
if not updateD and not updateG:
updateD = True
updateG = True
counter += 1
print(
"Iters: [%2d] time: %4.4f, d_loss: %.8f, L_GAN: %.8f"
% (iters, time.time() - start_time, errD_fake+errD_real,errG)
)
if np.mod(counter, 100) == 1:
samples = sess.run([model.G],
feed_dict={model.diff_in: diff_batch,
model.xt: seq_batch[:,:,:,K-1],
model.target: seq_batch})[0]
samples = samples[0].swapaxes(0,2).swapaxes(1,2)
sbatch = seq_batch[0,:,:,K:].swapaxes(0,2).swapaxes(1,2)
samples = np.concatenate((samples,sbatch), axis=0)
print("Saving sample ...")
save_images(samples[:,:,:,::-1], [2, T],
samples_dir+"train_%s.png" % (iters))
if np.mod(counter, 500) == 2:
model.save(sess, checkpoint_dir, counter)
iters += 1
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--lr", type=float, dest="lr",
default=0.0001, help="Base Learning Rate")
parser.add_argument("--batch_size", type=int, dest="batch_size",
default=8, help="Mini-batch size")
parser.add_argument("--alpha", type=float, dest="alpha",
default=1.0, help="Image loss weight")
parser.add_argument("--beta", type=float, dest="beta",
default=0.02, help="GAN loss weight")
parser.add_argument("--image_size", type=int, dest="image_size",
default=128, help="Mini-batch size")
parser.add_argument("--K", type=int, dest="K",
default=10, help="Number of steps to observe from the past")
parser.add_argument("--T", type=int, dest="T",
default=10, help="Number of steps into the future")
parser.add_argument("--num_iter", type=int, dest="num_iter",
default=100000, help="Number of iterations")
parser.add_argument("--gpu", type=int, nargs="+", dest="gpu", required=True,
help="GPU device id")
args = parser.parse_args()
main(**vars(args))
| 41.896739 | 85 | 0.515501 |
4a67c2950d859abe3d2e7d045856544d50a5e9d7 | 5,649 | py | Python | apps/modules/comments/process/adm_comment.py | doublealread/osroom | c0aee322704f1eeca682a3e7669298882ed3dba5 | [
"BSD-2-Clause"
] | 1 | 2019-05-04T03:17:14.000Z | 2019-05-04T03:17:14.000Z | apps/modules/comments/process/adm_comment.py | doublealread/osroom | c0aee322704f1eeca682a3e7669298882ed3dba5 | [
"BSD-2-Clause"
] | null | null | null | apps/modules/comments/process/adm_comment.py | doublealread/osroom | c0aee322704f1eeca682a3e7669298882ed3dba5 | [
"BSD-2-Clause"
] | null | null | null | # -*-coding:utf-8-*-
from bson import ObjectId
from flask import request
from flask_babel import gettext
from flask_login import current_user
from apps.modules.comments.process.comment import find_comments
from apps.modules.message.process.user_message import insert_user_msg
from apps.utils.format.obj_format import json_to_pyseq
from apps.app import mdb_web, mdb_user
from apps.core.utils.get_config import get_config
__author__ = "Allen Woo"
def adm_comments():
sort = json_to_pyseq(request.argget.all('sort'))
status = request.argget.all('status', "is_issued")
keyword = request.argget.all('keyword', "")
page = int(request.argget.all('page', 1))
pre = int(request.argget.all('pre', 10))
basic_query = {'issued': 1, 'is_delete': 0}
data = find_comments(
query_conditions=basic_query,
page=page,
pre=pre,
sort=sort,
keyword=keyword,
status=status)
return data
def adm_comment_audit():
ids = json_to_pyseq(request.argget.all('ids', []))
score = int(request.argget.all("score", 0))
for i, tid in enumerate(ids):
ids[i] = ObjectId(tid)
r = mdb_web.db.comment.update_many({
"_id": {"$in": ids}},
{"$set": {
"audited": 1,
"audit_score": score,
"audit_way": "artificial",
"audit_user_id": current_user.str_id}})
if r.modified_count:
if score >= get_config("content_inspection", "ALLEGED_ILLEGAL_SCORE"):
# 审核不通过,给用户通知
coms = mdb_web.db.comment.find({"_id": {"$in": ids}}, {
"user_id": 1, "content": 1, "_id": 1, "audit_score": 1})
for com in coms:
msg_content = {"text": com["content"]}
insert_user_msg(
user_id=com["user_id"],
ctype="notice",
label="comment",
title=gettext("Comment on alleged violations"),
content=msg_content,
target_id=str(
com["_id"]),
target_type="comment")
else:
# 审核通过, 给被评论对象通知
coms = mdb_web.db.comment.find({"_id": {"$in": ids}})
for com in coms:
msg_content = {
"id": str(com["_id"]),
"user_id": str(com["user_id"]),
"username": com["username"],
"text": com["content"]}
user_ids = [com["target_user_id"]]
if "reply_id" in com:
user_ids.append(com["reply_id"])
msg_content["reply_id"] = com["reply_id"],
msg_content["reply_user_id"] = com["reply_user_id"],
msg_content["reply_username"] = com["reply_username"]
insert_user_msg(
user_id=user_ids,
ctype="notice",
label="comment",
title=com["target_brief_info"],
content=msg_content,
target_id=com["target_id"],
target_type=com["type"])
data = {"msg": gettext("Submitted successfully, {}").format(
r.modified_count), "msg_type": "s", "http_status": 201}
else:
data = {
"msg": gettext("Submitted failed"),
"msg_type": "w",
"http_status": 400}
return data
def adm_comment_delete():
ids = json_to_pyseq(request.argget.all('ids', []))
pending_delete = int(request.argget.all("pending_delete", 1))
for i, tid in enumerate(ids):
ids[i] = ObjectId(tid)
if pending_delete:
r = mdb_web.db.comment.update_many(
{"_id": {"$in": ids}}, {"$set": {"is_delete": 2}})
if r.modified_count:
data = {"msg": gettext("Move to a permanently deleted area, {}").format(
r.modified_count), "msg_type": "s", "http_status": 204}
else:
data = {
"msg": gettext("Does not match the data to be deleted"),
"msg_type": "w",
"http_status": 400}
else:
for tid in ids:
mdb_user.db.user_like.update_many({"type": "comment", "values": str(tid)},
{"$pull": {"values": str(tid)}})
r = mdb_web.db.comment.delete_many(
{"_id": {"$in": ids}, "is_delete": {"$in": [1, 2]}})
if r.deleted_count:
data = {"msg": gettext("Removed from the database, {}").format(
r.deleted_count), "msg_type": "s", "http_status": 204}
else:
data = {"msg": gettext("No match to relevant data"),
"msg_type": "w", "http_status": 400}
return data
def adm_comment_restore():
ids = json_to_pyseq(request.argget.all('ids', []))
for i, tid in enumerate(ids):
ids[i] = ObjectId(tid)
r = mdb_web.db.comment.update_many({"_id": {"$in": ids},
"is_delete": {"$in": [1, 2]}},
{"$set": {"is_delete": 0}})
if r.modified_count:
data = {"msg": gettext("Restore success, {}").format(r.modified_count),
"msg_type": "s", "http_status": 201}
else:
data = {
"msg": gettext("No match to relevant data"),
"msg_type": "w",
"http_status": 400}
return data
| 36.921569 | 99 | 0.501151 |
eed2fe19df6a3f78a4a1f0ee40d26ccbf50f3349 | 34,433 | py | Python | sdk/cwl/arvados_cwl/executor.py | basharbme/arvados | 1c3c8f7fd2e1268b139e046fbd6a7093dd82222f | [
"Apache-2.0"
] | 1 | 2019-09-08T01:49:09.000Z | 2019-09-08T01:49:09.000Z | sdk/cwl/arvados_cwl/executor.py | basharbme/arvados | 1c3c8f7fd2e1268b139e046fbd6a7093dd82222f | [
"Apache-2.0"
] | null | null | null | sdk/cwl/arvados_cwl/executor.py | basharbme/arvados | 1c3c8f7fd2e1268b139e046fbd6a7093dd82222f | [
"Apache-2.0"
] | null | null | null | # Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
from __future__ import division
from builtins import next
from builtins import object
from builtins import str
from future.utils import viewvalues, viewitems
import argparse
import logging
import os
import sys
import threading
import copy
import json
import re
from functools import partial
import time
from cwltool.errors import WorkflowException
import cwltool.workflow
from schema_salad.sourceline import SourceLine
import schema_salad.validate as validate
import arvados
import arvados.config
from arvados.keep import KeepClient
from arvados.errors import ApiError
import arvados_cwl.util
from .arvcontainer import RunnerContainer
from .runner import Runner, upload_docker, upload_job_order, upload_workflow_deps
from .arvtool import ArvadosCommandTool, validate_cluster_target, ArvadosExpressionTool
from .arvworkflow import ArvadosWorkflow, upload_workflow
from .fsaccess import CollectionFsAccess, CollectionFetcher, collectionResolver, CollectionCache, pdh_size
from .perf import Perf
from .pathmapper import NoFollowPathMapper
from .task_queue import TaskQueue
from .context import ArvLoadingContext, ArvRuntimeContext
from ._version import __version__
from cwltool.process import shortname, UnsupportedRequirement, use_custom_schema
from cwltool.pathmapper import adjustFileObjs, adjustDirObjs, get_listing, visit_class
from cwltool.command_line_tool import compute_checksums
from cwltool.load_tool import load_tool
logger = logging.getLogger('arvados.cwl-runner')
metrics = logging.getLogger('arvados.cwl-runner.metrics')
DEFAULT_PRIORITY = 500
class RuntimeStatusLoggingHandler(logging.Handler):
"""
Intercepts logging calls and report them as runtime statuses on runner
containers.
"""
def __init__(self, runtime_status_update_func):
super(RuntimeStatusLoggingHandler, self).__init__()
self.runtime_status_update = runtime_status_update_func
self.updatingRuntimeStatus = False
def emit(self, record):
kind = None
if record.levelno >= logging.ERROR:
kind = 'error'
elif record.levelno >= logging.WARNING:
kind = 'warning'
if kind is not None and self.updatingRuntimeStatus is not True:
self.updatingRuntimeStatus = True
try:
log_msg = record.getMessage()
if '\n' in log_msg:
# If the logged message is multi-line, use its first line as status
# and the rest as detail.
status, detail = log_msg.split('\n', 1)
self.runtime_status_update(
kind,
"%s: %s" % (record.name, status),
detail
)
else:
self.runtime_status_update(
kind,
"%s: %s" % (record.name, record.getMessage())
)
finally:
self.updatingRuntimeStatus = False
class ArvCwlExecutor(object):
"""Execute a CWL tool or workflow, submit work (using containers API),
wait for them to complete, and report output.
"""
def __init__(self, api_client,
arvargs=None,
keep_client=None,
num_retries=4,
thread_count=4):
if arvargs is None:
arvargs = argparse.Namespace()
arvargs.work_api = None
arvargs.output_name = None
arvargs.output_tags = None
arvargs.thread_count = 1
arvargs.collection_cache_size = None
self.api = api_client
self.processes = {}
self.workflow_eval_lock = threading.Condition(threading.RLock())
self.final_output = None
self.final_status = None
self.num_retries = num_retries
self.uuid = None
self.stop_polling = threading.Event()
self.poll_api = None
self.pipeline = None
self.final_output_collection = None
self.output_name = arvargs.output_name
self.output_tags = arvargs.output_tags
self.project_uuid = None
self.intermediate_output_ttl = 0
self.intermediate_output_collections = []
self.trash_intermediate = False
self.thread_count = arvargs.thread_count
self.poll_interval = 12
self.loadingContext = None
self.should_estimate_cache_size = True
self.fs_access = None
self.secret_store = None
if keep_client is not None:
self.keep_client = keep_client
else:
self.keep_client = arvados.keep.KeepClient(api_client=self.api, num_retries=self.num_retries)
if arvargs.collection_cache_size:
collection_cache_size = arvargs.collection_cache_size*1024*1024
self.should_estimate_cache_size = False
else:
collection_cache_size = 256*1024*1024
self.collection_cache = CollectionCache(self.api, self.keep_client, self.num_retries,
cap=collection_cache_size)
self.fetcher_constructor = partial(CollectionFetcher,
api_client=self.api,
fs_access=CollectionFsAccess("", collection_cache=self.collection_cache),
num_retries=self.num_retries)
self.work_api = None
expected_api = ["containers"]
for api in expected_api:
try:
methods = self.api._rootDesc.get('resources')[api]['methods']
if ('httpMethod' in methods['create'] and
(arvargs.work_api == api or arvargs.work_api is None)):
self.work_api = api
break
except KeyError:
pass
if not self.work_api:
if arvargs.work_api is None:
raise Exception("No supported APIs")
else:
raise Exception("Unsupported API '%s', expected one of %s" % (arvargs.work_api, expected_api))
if self.work_api == "jobs":
logger.error("""
*******************************
The 'jobs' API is no longer supported.
*******************************""")
exit(1)
self.loadingContext = ArvLoadingContext(vars(arvargs))
self.loadingContext.fetcher_constructor = self.fetcher_constructor
self.loadingContext.resolver = partial(collectionResolver, self.api, num_retries=self.num_retries)
self.loadingContext.construct_tool_object = self.arv_make_tool
# Add a custom logging handler to the root logger for runtime status reporting
# if running inside a container
if arvados_cwl.util.get_current_container(self.api, self.num_retries, logger):
root_logger = logging.getLogger('')
# Remove existing RuntimeStatusLoggingHandlers if they exist
handlers = [h for h in root_logger.handlers if not isinstance(h, RuntimeStatusLoggingHandler)]
root_logger.handlers = handlers
handler = RuntimeStatusLoggingHandler(self.runtime_status_update)
root_logger.addHandler(handler)
self.runtimeContext = ArvRuntimeContext(vars(arvargs))
self.runtimeContext.make_fs_access = partial(CollectionFsAccess,
collection_cache=self.collection_cache)
validate_cluster_target(self, self.runtimeContext)
def arv_make_tool(self, toolpath_object, loadingContext):
if "class" in toolpath_object and toolpath_object["class"] == "CommandLineTool":
return ArvadosCommandTool(self, toolpath_object, loadingContext)
elif "class" in toolpath_object and toolpath_object["class"] == "Workflow":
return ArvadosWorkflow(self, toolpath_object, loadingContext)
elif "class" in toolpath_object and toolpath_object["class"] == "ExpressionTool":
return ArvadosExpressionTool(self, toolpath_object, loadingContext)
else:
raise Exception("Unknown tool %s" % toolpath_object.get("class"))
def output_callback(self, out, processStatus):
with self.workflow_eval_lock:
if processStatus == "success":
logger.info("Overall process status is %s", processStatus)
state = "Complete"
else:
logger.error("Overall process status is %s", processStatus)
state = "Failed"
if self.pipeline:
self.api.pipeline_instances().update(uuid=self.pipeline["uuid"],
body={"state": state}).execute(num_retries=self.num_retries)
self.final_status = processStatus
self.final_output = out
self.workflow_eval_lock.notifyAll()
def start_run(self, runnable, runtimeContext):
self.task_queue.add(partial(runnable.run, runtimeContext),
self.workflow_eval_lock, self.stop_polling)
def process_submitted(self, container):
with self.workflow_eval_lock:
self.processes[container.uuid] = container
def process_done(self, uuid, record):
with self.workflow_eval_lock:
j = self.processes[uuid]
logger.info("%s %s is %s", self.label(j), uuid, record["state"])
self.task_queue.add(partial(j.done, record),
self.workflow_eval_lock, self.stop_polling)
del self.processes[uuid]
def runtime_status_update(self, kind, message, detail=None):
"""
Updates the runtime_status field on the runner container.
Called when there's a need to report errors, warnings or just
activity statuses, for example in the RuntimeStatusLoggingHandler.
"""
with self.workflow_eval_lock:
current = arvados_cwl.util.get_current_container(self.api, self.num_retries, logger)
if current is None:
return
runtime_status = current.get('runtime_status', {})
# In case of status being an error, only report the first one.
if kind == 'error':
if not runtime_status.get('error'):
runtime_status.update({
'error': message
})
if detail is not None:
runtime_status.update({
'errorDetail': detail
})
# Further errors are only mentioned as a count.
else:
# Get anything before an optional 'and N more' string.
try:
error_msg = re.match(
r'^(.*?)(?=\s*\(and \d+ more\)|$)', runtime_status.get('error')).groups()[0]
more_failures = re.match(
r'.*\(and (\d+) more\)', runtime_status.get('error'))
except TypeError:
# Ignore tests stubbing errors
return
if more_failures:
failure_qty = int(more_failures.groups()[0])
runtime_status.update({
'error': "%s (and %d more)" % (error_msg, failure_qty+1)
})
else:
runtime_status.update({
'error': "%s (and 1 more)" % error_msg
})
elif kind in ['warning', 'activity']:
# Record the last warning/activity status without regard of
# previous occurences.
runtime_status.update({
kind: message
})
if detail is not None:
runtime_status.update({
kind+"Detail": detail
})
else:
# Ignore any other status kind
return
try:
self.api.containers().update(uuid=current['uuid'],
body={
'runtime_status': runtime_status,
}).execute(num_retries=self.num_retries)
except Exception as e:
logger.info("Couldn't update runtime_status: %s", e)
def wrapped_callback(self, cb, obj, st):
with self.workflow_eval_lock:
cb(obj, st)
self.workflow_eval_lock.notifyAll()
def get_wrapped_callback(self, cb):
return partial(self.wrapped_callback, cb)
def on_message(self, event):
if event.get("object_uuid") in self.processes and event["event_type"] == "update":
uuid = event["object_uuid"]
if event["properties"]["new_attributes"]["state"] == "Running":
with self.workflow_eval_lock:
j = self.processes[uuid]
if j.running is False:
j.running = True
j.update_pipeline_component(event["properties"]["new_attributes"])
logger.info("%s %s is Running", self.label(j), uuid)
elif event["properties"]["new_attributes"]["state"] in ("Complete", "Failed", "Cancelled", "Final"):
self.process_done(uuid, event["properties"]["new_attributes"])
def label(self, obj):
return "[%s %s]" % (self.work_api[0:-1], obj.name)
def poll_states(self):
"""Poll status of containers listed in the processes dict.
Runs in a separate thread.
"""
try:
remain_wait = self.poll_interval
while True:
if remain_wait > 0:
self.stop_polling.wait(remain_wait)
if self.stop_polling.is_set():
break
with self.workflow_eval_lock:
keys = list(self.processes)
if not keys:
remain_wait = self.poll_interval
continue
begin_poll = time.time()
if self.work_api == "containers":
table = self.poll_api.container_requests()
pageSize = self.poll_api._rootDesc.get('maxItemsPerResponse', 1000)
while keys:
page = keys[:pageSize]
try:
proc_states = table.list(filters=[["uuid", "in", page]]).execute(num_retries=self.num_retries)
except Exception:
logger.exception("Error checking states on API server: %s")
remain_wait = self.poll_interval
continue
for p in proc_states["items"]:
self.on_message({
"object_uuid": p["uuid"],
"event_type": "update",
"properties": {
"new_attributes": p
}
})
keys = keys[pageSize:]
finish_poll = time.time()
remain_wait = self.poll_interval - (finish_poll - begin_poll)
except:
logger.exception("Fatal error in state polling thread.")
with self.workflow_eval_lock:
self.processes.clear()
self.workflow_eval_lock.notifyAll()
finally:
self.stop_polling.set()
def add_intermediate_output(self, uuid):
if uuid:
self.intermediate_output_collections.append(uuid)
def trash_intermediate_output(self):
logger.info("Cleaning up intermediate output collections")
for i in self.intermediate_output_collections:
try:
self.api.collections().delete(uuid=i).execute(num_retries=self.num_retries)
except Exception:
logger.warning("Failed to delete intermediate output: %s", sys.exc_info()[1], exc_info=(sys.exc_info()[1] if self.debug else False))
except (KeyboardInterrupt, SystemExit):
break
def check_features(self, obj, parentfield=""):
if isinstance(obj, dict):
if obj.get("class") == "DockerRequirement":
if obj.get("dockerOutputDirectory"):
if not obj.get("dockerOutputDirectory").startswith('/'):
raise SourceLine(obj, "dockerOutputDirectory", validate.ValidationException).makeError(
"Option 'dockerOutputDirectory' must be an absolute path.")
if obj.get("class") == "InplaceUpdateRequirement":
if obj["inplaceUpdate"] and parentfield == "requirements":
raise SourceLine(obj, "class", UnsupportedRequirement).makeError("InplaceUpdateRequirement not supported for keep collections.")
for k,v in viewitems(obj):
self.check_features(v, parentfield=k)
elif isinstance(obj, list):
for i,v in enumerate(obj):
with SourceLine(obj, i, UnsupportedRequirement, logger.isEnabledFor(logging.DEBUG)):
self.check_features(v, parentfield=parentfield)
def make_output_collection(self, name, storage_classes, tagsString, outputObj):
outputObj = copy.deepcopy(outputObj)
files = []
def capture(fileobj):
files.append(fileobj)
adjustDirObjs(outputObj, capture)
adjustFileObjs(outputObj, capture)
generatemapper = NoFollowPathMapper(files, "", "", separateDirs=False)
final = arvados.collection.Collection(api_client=self.api,
keep_client=self.keep_client,
num_retries=self.num_retries)
for k,v in generatemapper.items():
if v.type == "Directory" and v.resolved.startswith("_:"):
continue
if v.type == "CreateFile" and (k.startswith("_:") or v.resolved.startswith("_:")):
with final.open(v.target, "wb") as f:
f.write(v.resolved.encode("utf-8"))
continue
if not v.resolved.startswith("keep:"):
raise Exception("Output source is not in keep or a literal")
sp = v.resolved.split("/")
srccollection = sp[0][5:]
try:
reader = self.collection_cache.get(srccollection)
srcpath = "/".join(sp[1:]) if len(sp) > 1 else "."
final.copy(srcpath, v.target, source_collection=reader, overwrite=False)
except arvados.errors.ArgumentError as e:
logger.error("Creating CollectionReader for '%s' '%s': %s", k, v, e)
raise
except IOError as e:
logger.error("While preparing output collection: %s", e)
raise
def rewrite(fileobj):
fileobj["location"] = generatemapper.mapper(fileobj["location"]).target
for k in ("listing", "contents", "nameext", "nameroot", "dirname"):
if k in fileobj:
del fileobj[k]
adjustDirObjs(outputObj, rewrite)
adjustFileObjs(outputObj, rewrite)
with final.open("cwl.output.json", "w") as f:
res = str(json.dumps(outputObj, sort_keys=True, indent=4, separators=(',',': '), ensure_ascii=False))
f.write(res)
final.save_new(name=name, owner_uuid=self.project_uuid, storage_classes=storage_classes, ensure_unique_name=True)
logger.info("Final output collection %s \"%s\" (%s)", final.portable_data_hash(),
final.api_response()["name"],
final.manifest_locator())
final_uuid = final.manifest_locator()
tags = tagsString.split(',')
for tag in tags:
self.api.links().create(body={
"head_uuid": final_uuid, "link_class": "tag", "name": tag
}).execute(num_retries=self.num_retries)
def finalcollection(fileobj):
fileobj["location"] = "keep:%s/%s" % (final.portable_data_hash(), fileobj["location"])
adjustDirObjs(outputObj, finalcollection)
adjustFileObjs(outputObj, finalcollection)
return (outputObj, final)
def set_crunch_output(self):
if self.work_api == "containers":
current = arvados_cwl.util.get_current_container(self.api, self.num_retries, logger)
if current is None:
return
try:
self.api.containers().update(uuid=current['uuid'],
body={
'output': self.final_output_collection.portable_data_hash(),
}).execute(num_retries=self.num_retries)
self.api.collections().update(uuid=self.final_output_collection.manifest_locator(),
body={
'is_trashed': True
}).execute(num_retries=self.num_retries)
except Exception:
logger.exception("Setting container output")
return
def apply_reqs(self, job_order_object, tool):
if "https://w3id.org/cwl/cwl#requirements" in job_order_object:
if tool.metadata.get("http://commonwl.org/cwltool#original_cwlVersion") == 'v1.0':
raise WorkflowException(
"`cwl:requirements` in the input object is not part of CWL "
"v1.0. You can adjust to use `cwltool:overrides` instead; or you "
"can set the cwlVersion to v1.1 or greater and re-run with "
"--enable-dev.")
job_reqs = job_order_object["https://w3id.org/cwl/cwl#requirements"]
for req in job_reqs:
tool.requirements.append(req)
def arv_executor(self, tool, job_order, runtimeContext, logger=None):
self.debug = runtimeContext.debug
tool.visit(self.check_features)
self.project_uuid = runtimeContext.project_uuid
self.pipeline = None
self.fs_access = runtimeContext.make_fs_access(runtimeContext.basedir)
self.secret_store = runtimeContext.secret_store
self.trash_intermediate = runtimeContext.trash_intermediate
if self.trash_intermediate and self.work_api != "containers":
raise Exception("--trash-intermediate is only supported with --api=containers.")
self.intermediate_output_ttl = runtimeContext.intermediate_output_ttl
if self.intermediate_output_ttl and self.work_api != "containers":
raise Exception("--intermediate-output-ttl is only supported with --api=containers.")
if self.intermediate_output_ttl < 0:
raise Exception("Invalid value %d for --intermediate-output-ttl, cannot be less than zero" % self.intermediate_output_ttl)
if runtimeContext.submit_request_uuid and self.work_api != "containers":
raise Exception("--submit-request-uuid requires containers API, but using '{}' api".format(self.work_api))
if not runtimeContext.name:
runtimeContext.name = self.name = tool.tool.get("label") or tool.metadata.get("label") or os.path.basename(tool.tool["id"])
# Upload local file references in the job order.
job_order = upload_job_order(self, "%s input" % runtimeContext.name,
tool, job_order)
submitting = (runtimeContext.update_workflow or
runtimeContext.create_workflow or
(runtimeContext.submit and not
(tool.tool["class"] == "CommandLineTool" and
runtimeContext.wait and
not runtimeContext.always_submit_runner)))
loadingContext = self.loadingContext.copy()
loadingContext.do_validate = False
loadingContext.do_update = False
if submitting:
# Document may have been auto-updated. Reload the original
# document with updating disabled because we want to
# submit the original document, not the auto-updated one.
tool = load_tool(tool.tool["id"], loadingContext)
# Upload direct dependencies of workflow steps, get back mapping of files to keep references.
# Also uploads docker images.
merged_map = upload_workflow_deps(self, tool)
# Recreate process object (ArvadosWorkflow or
# ArvadosCommandTool) because tool document may have been
# updated by upload_workflow_deps in ways that modify
# inheritance of hints or requirements.
loadingContext.loader = tool.doc_loader
loadingContext.avsc_names = tool.doc_schema
loadingContext.metadata = tool.metadata
tool = load_tool(tool.tool, loadingContext)
existing_uuid = runtimeContext.update_workflow
if existing_uuid or runtimeContext.create_workflow:
# Create a pipeline template or workflow record and exit.
if self.work_api == "containers":
return (upload_workflow(self, tool, job_order,
self.project_uuid,
uuid=existing_uuid,
submit_runner_ram=runtimeContext.submit_runner_ram,
name=runtimeContext.name,
merged_map=merged_map),
"success")
self.apply_reqs(job_order, tool)
self.ignore_docker_for_reuse = runtimeContext.ignore_docker_for_reuse
self.eval_timeout = runtimeContext.eval_timeout
runtimeContext = runtimeContext.copy()
runtimeContext.use_container = True
runtimeContext.tmpdir_prefix = "tmp"
runtimeContext.work_api = self.work_api
if self.work_api == "containers":
if self.ignore_docker_for_reuse:
raise Exception("--ignore-docker-for-reuse not supported with containers API.")
runtimeContext.outdir = "/var/spool/cwl"
runtimeContext.docker_outdir = "/var/spool/cwl"
runtimeContext.tmpdir = "/tmp"
runtimeContext.docker_tmpdir = "/tmp"
if runtimeContext.priority < 1 or runtimeContext.priority > 1000:
raise Exception("--priority must be in the range 1..1000.")
if self.should_estimate_cache_size:
visited = set()
estimated_size = [0]
def estimate_collection_cache(obj):
if obj.get("location", "").startswith("keep:"):
m = pdh_size.match(obj["location"][5:])
if m and m.group(1) not in visited:
visited.add(m.group(1))
estimated_size[0] += int(m.group(2))
visit_class(job_order, ("File", "Directory"), estimate_collection_cache)
runtimeContext.collection_cache_size = max(((estimated_size[0]*192) // (1024*1024))+1, 256)
self.collection_cache.set_cap(runtimeContext.collection_cache_size*1024*1024)
logger.info("Using collection cache size %s MiB", runtimeContext.collection_cache_size)
runnerjob = None
if runtimeContext.submit:
# Submit a runner job to run the workflow for us.
if self.work_api == "containers":
if tool.tool["class"] == "CommandLineTool" and runtimeContext.wait and (not runtimeContext.always_submit_runner):
runtimeContext.runnerjob = tool.tool["id"]
else:
tool = RunnerContainer(self, tool, loadingContext, runtimeContext.enable_reuse,
self.output_name,
self.output_tags,
submit_runner_ram=runtimeContext.submit_runner_ram,
name=runtimeContext.name,
on_error=runtimeContext.on_error,
submit_runner_image=runtimeContext.submit_runner_image,
intermediate_output_ttl=runtimeContext.intermediate_output_ttl,
merged_map=merged_map,
priority=runtimeContext.priority,
secret_store=self.secret_store,
collection_cache_size=runtimeContext.collection_cache_size,
collection_cache_is_default=self.should_estimate_cache_size)
if runtimeContext.cwl_runner_job is not None:
self.uuid = runtimeContext.cwl_runner_job.get('uuid')
jobiter = tool.job(job_order,
self.output_callback,
runtimeContext)
if runtimeContext.submit and not runtimeContext.wait:
runnerjob = next(jobiter)
runnerjob.run(runtimeContext)
return (runnerjob.uuid, "success")
current_container = arvados_cwl.util.get_current_container(self.api, self.num_retries, logger)
if current_container:
logger.info("Running inside container %s", current_container.get("uuid"))
self.poll_api = arvados.api('v1', timeout=runtimeContext.http_timeout)
self.polling_thread = threading.Thread(target=self.poll_states)
self.polling_thread.start()
self.task_queue = TaskQueue(self.workflow_eval_lock, self.thread_count)
try:
self.workflow_eval_lock.acquire()
# Holds the lock while this code runs and releases it when
# it is safe to do so in self.workflow_eval_lock.wait(),
# at which point on_message can update job state and
# process output callbacks.
loopperf = Perf(metrics, "jobiter")
loopperf.__enter__()
for runnable in jobiter:
loopperf.__exit__()
if self.stop_polling.is_set():
break
if self.task_queue.error is not None:
raise self.task_queue.error
if runnable:
with Perf(metrics, "run"):
self.start_run(runnable, runtimeContext)
else:
if (self.task_queue.in_flight + len(self.processes)) > 0:
self.workflow_eval_lock.wait(3)
else:
logger.error("Workflow is deadlocked, no runnable processes and not waiting on any pending processes.")
break
if self.stop_polling.is_set():
break
loopperf.__enter__()
loopperf.__exit__()
while (self.task_queue.in_flight + len(self.processes)) > 0:
if self.task_queue.error is not None:
raise self.task_queue.error
self.workflow_eval_lock.wait(3)
except UnsupportedRequirement:
raise
except:
if sys.exc_info()[0] is KeyboardInterrupt or sys.exc_info()[0] is SystemExit:
logger.error("Interrupted, workflow will be cancelled")
elif isinstance(sys.exc_info()[1], WorkflowException):
logger.error("Workflow execution failed:\n%s", sys.exc_info()[1], exc_info=(sys.exc_info()[1] if self.debug else False))
else:
logger.exception("Workflow execution failed")
if self.pipeline:
self.api.pipeline_instances().update(uuid=self.pipeline["uuid"],
body={"state": "Failed"}).execute(num_retries=self.num_retries)
if self.work_api == "containers" and not current_container:
# Not running in a crunch container, so cancel any outstanding processes.
for p in self.processes:
try:
self.api.container_requests().update(uuid=p,
body={"priority": "0"}
).execute(num_retries=self.num_retries)
except Exception:
pass
finally:
self.workflow_eval_lock.release()
self.task_queue.drain()
self.stop_polling.set()
self.polling_thread.join()
self.task_queue.join()
if self.final_status == "UnsupportedRequirement":
raise UnsupportedRequirement("Check log for details.")
if self.final_output is None:
raise WorkflowException("Workflow did not return a result.")
if runtimeContext.submit and isinstance(tool, Runner):
logger.info("Final output collection %s", tool.final_output)
else:
if self.output_name is None:
self.output_name = "Output of %s" % (shortname(tool.tool["id"]))
if self.output_tags is None:
self.output_tags = ""
storage_classes = runtimeContext.storage_classes.strip().split(",")
self.final_output, self.final_output_collection = self.make_output_collection(self.output_name, storage_classes, self.output_tags, self.final_output)
self.set_crunch_output()
if runtimeContext.compute_checksum:
adjustDirObjs(self.final_output, partial(get_listing, self.fs_access))
adjustFileObjs(self.final_output, partial(compute_checksums, self.fs_access))
if self.trash_intermediate and self.final_status == "success":
self.trash_intermediate_output()
return (self.final_output, self.final_status)
| 44.951697 | 161 | 0.577644 |
4796f2eecc16a538a24c657c6103d3fc7c470907 | 118,741 | py | Python | flaskJSONRPCServer/__init__.py | byaka/flaskJSONRPCServer | 328e88c7358e8ce87cd8c56a2db22b0c43e9661d | [
"Apache-2.0"
] | 2 | 2017-04-12T05:54:38.000Z | 2019-10-09T15:56:18.000Z | flaskJSONRPCServer/__init__.py | byaka/flaskJSONRPCServer | 328e88c7358e8ce87cd8c56a2db22b0c43e9661d | [
"Apache-2.0"
] | 160 | 2015-04-24T09:39:08.000Z | 2019-10-13T15:27:29.000Z | flaskJSONRPCServer/__init__.py | byaka/flaskJSONRPCServer | 328e88c7358e8ce87cd8c56a2db22b0c43e9661d | [
"Apache-2.0"
] | 2 | 2016-02-26T13:20:02.000Z | 2021-10-05T04:05:04.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*
__ver_major__ = 0
__ver_minor__ = 9
__ver_patch__ = 2
__ver_sub__ = "dev"
__version__ = "%d.%d.%d" % (__ver_major__, __ver_minor__, __ver_patch__)
"""
This library is an extended implementation of server for JSON-RPC protocol. It supports only json-rpc 2.0 specification for now, which includes batch submission, keyword arguments, notifications, etc.
:authors: John Byaka
:copyright: Copyright 2018, Buber
:license: Apache License 2.0
:license:
Copyright 2018 Buber
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys, inspect, random, decimal, json, datetime, time, os, imp, hashlib, threading, gc, socket, Cookie, httplib, wsgiref.util
from types import InstanceType, IntType, FloatType, LongType, ComplexType, NoneType, UnicodeType, StringType, BooleanType, LambdaType, DictType, ListType, TupleType, ModuleType, FunctionType
from cStringIO import StringIO
from gzip import GzipFile
gevent=None
geventMonkey=None
geventSocket=None
geventFileObjectThread=None
from utils import *
import execBackend as execBackendCollection
import servBackend as servBackendCollection
from postprocess import postprocess
try:
import experimental as experimentalPack
experimentalPack.initGlobal(globals())
except ImportError, e:
print 'EXPERIMENTAL package not loaded:', e
class MainError(Exception):
""" Main error class for flaskJSONRPCServer. """
pass
class flaskJSONRPCServer:
"""
Main class of server.
:param list|str|None ipAndPort: List or sequence, containing IP and PORT(or SOCKET_PATH and SOCKET), or str witp path to UDS socket or None for fake server.
:param bool multipleAdress: If True, <ipAndPort> must be list, containing different <ipAndPort>. All of them will be binded to this server.
:param bool blocking: Switch server to blocking mode (only one request per time will be processed).
:param bool|dict cors: Add CORS headers to output (Access-Control-Allow-*). If 'dict', can contain values for <origin> and <method>.
:param bool gevent: Patch Serv-backend and all process with Gevent.
:param bool debug: Allow log messages from WSGI-backend.
:param int|bool log: Set log-level or disable log messages about activity of flaskJSONRPCServer. If it's <int>, set log-level. 1 is error, 2 is warning, 3 is info, 4 is debug.
:param bool fallback: Automatically accept and process JSONP requests.
:param bool allowCompress: Allowing compression of output.
:param list ssl: List or sequence, containing KEY and CERT. If passed, WSGI-backend will be switched to SSL protocol.
:param list(int,int) tweakDescriptors: List containing new soft and hard limits of file-descriptors for current process.
:param int compressMinSize: Max length of output in bytes that was not compressed.
:param str|obj jsonBackend: Select JSON-backend for json.loads() and json.dumps(). If this parameter 'str' type, module with passed name will be imported. If this parameter 'obj' type, it must contain methods loads() and dumps().
:param str|dict|'simple' dispatcherBackend: Select default dispatcher-exec-backend for processing regular requests. Lib include some prepared backends. Variable <execBackendCollection.execBackendMap> contained all of them. If this parameter 'dict' type, it must contain 'add' key as function, that will be called on every notify-request and recive all data about request, and '_id' key, containing unique identificator. Also it must contain 'check' function, that will be called for waiting result of processed requests. Optionally it can contain 'start' and 'stop' keys as functions (it will be called when server starting or stopping).
:param str|dict|'simple' notifBackend: Select default dispatcher-exec-backend for processing notify-requests. Lib include some prepared backends. Variable <execBackendCollection.execBackendMap> contained all of them. If this parameter 'dict' type, it must contain 'add' key as function, that will be called on every notify-request and recive all data about request, and '_id' key, containing unique identificator. Optionally it can contain 'start' and 'stop' keys as functions (it will be called when server starting or stopping).
:param func auth: This function will be called on every request to server 9before processing it) and must return status as 'bool' type.
:param bool experimental: If 'True', server will be patched with 'experimental' package.
:param bool controlGC: If 'True', server will control GarbageCollector and manually call 'gc.collect()' (by default every 30 minutes or 150k requests or 50k dispatcher's calls).
:param str magicVarForDispatcher: Name for variable, that can be passed to every dispatcher and will contain many useful data and methods. For more info see <server>.aboutMagicVarForDispatcher.
:param str name: Optional name of server. If not passed, it will be generated automatically.
:param str|dict|'auto' servBackend: Select serving-backend. Lib include some prepared backends. Variable <servBackendCollection.servBackendMap> contained all of them. If this parameter 'dict' type, it must contain 'start' key as function, that will be called on server's start, and '_id' key, containing unique identificator. Optionally it can contain 'stop' key as function (it will be called when server stopping).
:param obj parentModule: Link to main program module (usually 'sys.modules[__name__]'). If not passed, it will be tried to find automatically.
"""
def __init__(self, bindAdress, multipleAdress=False, blocking=False, cors=False, gevent=False, debug=False, log=3, fallback=True, allowCompress=False, ssl=False, tweakDescriptors=False, compressMinSize=1*1024*1024, jsonBackend='json', notifBackend='simple', dispatcherBackend='simple', auth=None, experimental=False, controlGC=True, magicVarForDispatcher='_connection', name=None, servBackend='auto', parentModule=None):
self.started=False #indicate is server started
self.exited=False #indicate is server (and main process) wait for terminating, useful for exec-backends
self._pid=os.getpid()
# prepare speedStats
self.speedStats={}
self.speedStatsMax={}
# tweak descriptor's limit
self._tweakLimit(tweakDescriptors)
# init settings
self.settings=magicDictCold({
'multipleAdress':False,
'fakeListener':False,
'ip':[],
'port':[],
'socketPath':[],
'socket':[],
'blocking':blocking,
'fallback_JSONP':fallback,
'postprocess':{
'byStatus':{}
},
'CORS':cors,
'gevent':gevent,
'servBackend':servBackend,
'debug':debug,
'log':log,
'allowCompress':allowCompress,
'compressMinSize':compressMinSize,
'ssl':ssl,
'sleepTime_checkProcessingCount':0.3,
'sleepTime_waitLock':0.1,
'sleepTime_waitDeepLock':0.1,
'antifreeze_batchMaxTime':0.5*1000,
'antifreeze_batchSleep':0.5,
'antifreeze_batchBreak':False,
'auth':auth,
'experimental':experimental,
'controlGC':controlGC,
'controlGC_everySeconds':5*60, # every 30 minutes
'controlGC_everyRequestCount':150*1000, # every 150k requests
'controlGC_everyDispatcherCount':150*1000, # every 150k dispatcher's calls
'backlog':10*1000,
'magicVarForDispatcher':magicVarForDispatcher
})
self.setts=self.settings #backward compatible
self.__settings=self.settings #after server starts we copy settings here like dict() for bursting performance and avoid of changing settings
# set name
self.name=name or randomEx(10**4, pref='flaskJSONRPCServer_<', suf='>')
self.version=__version__
if not bindAdress:
# fake server without listeners
self.settings['fakeListener']=True
else:
# set adress for real listeners
self.settings['multipleAdress']=multipleAdress
bindAdress=bindAdress if multipleAdress else [bindAdress]
for bind in bindAdress:
tArr={'ip':None, 'port':None, 'socketPath':None, 'socket':None}
if isString(bind):
# if not checkPath(bind):
# self._throw('Wrong path for UDS socket: %s'%bind)
tArr['socketPath']=bind
else:
if len(bind)!=2:
self._throw('Wrong "bindAdress" parametr: %s'%bind)
isSocket=str(type(bind[1])) in ["<class 'socket._socketobject'>", "<class 'gevent.socket.socket'>", "<class 'gevent._socket2.socket'>"]
if isSocket:
tArr['socketPath']=bind[0]
tArr['socket']=bind[1]
else:
tArr['ip']=bind[0]
tArr['port']=bind[1]
for k, v in tArr.iteritems():
self.settings[k].append(v)
# other
self._magicVarForDispatcherOverload=[]
self.servBackend=None
self.locked=False
self._parentModule=parentModule
if self._parentModule is None:
self._findParentModule()
self._reloadBackup={}
self._gcStats={'lastTime':0, 'processedRequestCount':0, 'processedDispatcherCount':0, 'processing':False}
self.deepLocked=False
self.processingRequestCount=0
self.processingDispatcherCount=0
self.routes={}
self.fixJSON=self._fixJSON
# prepare connPerMinute
self.connPerMinute={
'nowMinute':0, 'count':0, 'oldCount':0, 'maxCount':0, 'minCount':0,
'history':{'minute':deque2([], 9999), 'count':deque2([], 9999)}
}
# select JSON-backend
self.jsonBackend=json
if isString(jsonBackend):
try: self.jsonBackend=__import__(jsonBackend)
except: self._logger(2, 'Cant import JSON-backend "%s", used standart'%(jsonBackend))
elif jsonBackend: self.jsonBackend=jsonBackend
self.execBackend={}
# select Dispatcher-backend
self.defaultDispatcherBackendId=self._registerExecBackend(dispatcherBackend, notif=False)
# select Notif-backend
self.defaultNotifBackendId=self._registerExecBackend(notifBackend, notif=True)
# enable experimental
if experimental: experimentalPack.initLocal(locals(), self)
# call patchServer if existed
if isFunction(locals().get('_patchServer', None)): locals()['_patchServer'](self)
# check JSON-backend
try:
testVal_o=[{'test1':[1, '2', True, None]}]
testVal_c=self._parseJSON(self._serializeJSON(testVal_o))
if testVal_o!=testVal_c:
self._throw('Checking JSONBackend: values not match (%s, %s)'%(testVal_o, testVal_c))
except Exception, e:
self._throw('Unsupported JSONBackend %s: %s'%(jsonBackend, e))
# enable GC manual control
if self.settings.controlGC: self._controlGC()
def _initListenerUDS(self, path, sockClass=None, backlog=None):
""" Create listeners for UDS socket. """
sockClass=sockClass or self._socketClass()
backlog=backlog or self.__settings['backlog']
if os.path.exists(path): #remove if exist
os.remove(path)
l=sockClass.socket(sockClass.AF_UNIX, sockClass.SOCK_STREAM)
l.bind(path)
l.listen(backlog)
return l
def _registerExecBackend(self, execBackend, notif):
"""
This merhod register new execute backend in server, backend will be start when <server>.start() called.
:param str|obj execBackend: registered backend name or obj.
:param bool notif: flag indicating is this backend a notification backend.
:return: unique identification.
"""
if self._inChild():
self._throw('This method "%s()" can be called only from <main> process'%sys._getframe().f_code.co_name)
# check if execBackend._id passed. If true, not needed to initialize it
if execBackend in self.execBackend: return execBackend
# get _id of backend and prepare
_id=None
if execBackend!='simple' and isString(execBackend): #registered exec-backend
if execBackend not in execBackendCollection.execBackendMap:
self._throw('Unknown Exec-backend "%s"'%execBackend)
execBackend=execBackendCollection.execBackendMap[execBackend](notif)
_id=getattr(execBackend, '_id', None)
elif execBackend=='simple': #without exec-backend
return 'simple'
elif isDict(execBackend):
_id=execBackend.get('_id', None)
execBackend=magicDict(execBackend)
elif isInstance(execBackend):
_id=getattr(execBackend, '_id', None)
else:
self._throw('Unsupported Exec-backend type "%s"'%type(execBackend))
# try to add execBackend
if not _id:
self._throw('No "_id" in Exec-backend "%s"'%execBackend)
if not hasattr(execBackend, 'add'):
self._throw('No "add()" in Exec-backend "%s"'%execBackend)
if _id not in self.execBackend: # add execBackend to map if not exist
self.execBackend[_id]=execBackend
return _id
def postprocessAdd(self, type, cb, mode=None, status=None):
"""
Add new postprocess callback of given <type>.
:param str type: Type of given <cb>, now supported ('wsgi', 'cb').
:param func cb:
:param str mode: Set mode for this <cb>. If None, used default.
:param int status: Use this <cb> on this http status.
"""
if self.started:
self._throw("You can't add new postprocesser while server runned")
if mode and mode not in postprocess._modeSupported:
self._throw("Unsupported postprocess-mode, now suppoerted only %s"%list(postprocess._modeSupported))
if type not in postprocess._typeSupported:
self._throw("Unsupported postprocess-type, now suppoerted only %s"%list(postprocess._typeSupported))
if status is not None:
status=status if isArray(status) else [status]
for s in status:
# use by this status
if not isInt(s):
raise TypeError('<status> must be number or list of numbers')
if s not in self.settings.postprocess['byStatus']:
self.settings.postprocess['byStatus'][s]=[]
self.settings.postprocess['byStatus'][s].append((type, mode, cb))
else:
#! need to test "use always" type
self._throw("Postprocessers without conditions currently not supported")
def postprocessAdd_wsgi(self, wsgi, mode=None, status=None):
"""
Add new postprocess WSGI.
:param wsgi:
:param str mode: Set mode for this <wsgi>. If None, used default.
:param int status: Use this <wsgi> on this http status.
"""
self.postprocessAdd('wsgi', wsgi, mode, status)
def postprocessAdd_cb(self, cb, mode=None, status=None):
"""
Add new postprocess simple callback.
:param cb:
:param str mode: Set mode for this <cb>. If None, used default.
:param int status: Use this <cb> on this http status.
"""
self.postprocessAdd('cb', cb, mode, status)
def _tryGevent(self):
global gevent, geventMonkey, geventSocket, geventFileObjectThread
if gevent and geventMonkey and geventSocket and geventFileObjectThread: return False
try:
import gevent
from gevent import monkey as geventMonkey
from gevent import socket as geventSocket
from gevent.fileobject import FileObjectThread as geventFileObjectThread
return True
except ImportError, e:
self._throw('gevent not found: %s'%e)
def _import(self, modules, forceDelete=True):
"""
This method replace existed (imported) modules and monke_patch if needed.
"""
#! Add checking with "monkey.is_module_patched()"
# clean imported modules
reimportMap={}
tArr1={}
for k in modules:
if k not in sys.modules: continue
tArr1[__import__(k)]=k
if forceDelete: del sys.modules[k]
for m in sys.modules.keys(): #creating copy of keys avoids "dictionary changed size during iteration" error
# if m.startswith('flaskJSONRPCServer'): continue
if m=='six.moves': continue #here contained ALL modules for lazy-importing, so we skip it
if m.startswith('gevent'): continue
if m in modules: continue
for k in dir(sys.modules[m]):
v=getattr(sys.modules[m], k, None)
if not isModule(v) or v not in tArr1: continue
if m not in reimportMap: reimportMap[m]={}
reimportMap[m][k]=tArr1[v]
# apply patchs
if self.__settings['gevent']:
self._tryGevent()
for k, v in modules.iteritems():
if not v: continue
v=v if isArray(v) else [v]
patchName=v[0]
if not hasattr(geventMonkey, patchName):
self._logger(2, 'Warning: unknown patch "%s"'%patchName)
continue
patch=getattr(geventMonkey, patchName)
patchSupported, _, _, _=inspect.getargspec(patch)
patchArgs_default=v[1] if len(v)>1 else {}
patchArgs={}
for k, v in patchArgs_default.iteritems():
if k in patchSupported: patchArgs[k]=v
patch(**patchArgs)
#reimport modules
tArr1=dict((k, __import__(k)) for k in modules)
for m, o in reimportMap.iteritems():
for k, v in o.iteritems():
setattr(sys.modules[m], k, tArr1[v])
self._logger(3, 'Modules "%s" patchet with "%s" (gevent %s)'%(', '.join(reimportMap.keys()), ', '.join(modules.keys()), 'enabled' if self.__settings['gevent'] else 'disabled'))
def _importThreading(self, scope=None, forceDelete=True):
"""
This method import (or patch) module <threading>(and time.sleep) to scope.
:param dict scope:
:param bool forceDelete: if True really delete existed modul before importing.
:return: scope
"""
modules={
'threading':None,
'thread':['patch_thread', {'threading':True, '_threading_local':True, 'Event':False, 'logging':True, 'existing_locks':False}],
'time':'patch_time'
}
return self._import(modules, forceDelete=forceDelete)
def _importSocket(self, scope=None, forceDelete=True):
"""
This method import (or patch) module <socket> (and some others) to scope.
:param dict scope:
:param bool forceDelete: if True really delete existed modul before importing.
:return: scope
"""
modules={
'socket':['patch_socket', {'dns':True, 'aggressive':True}],
'ssl':'patch_ssl',
'select':['patch_select', {'aggressive':True}]
}
return self._import(modules, forceDelete=forceDelete)
def _importAll(self, scope=None, forceDelete=True):
"""
This method call _importThreading() and _importSocket().
"""
self._importThreading(scope=scope, forceDelete=forceDelete)
self._importSocket(scope=scope, forceDelete=forceDelete)
def _patchWithGevent(self):
"""
Patching current process for compatible with gevent.
"""
try:
self._tryGevent()
except Exception:
self._throw('You switch server to GEVENT mode, but gevent not founded. For switching to DEV mode (without GEVENT) please pass <gevent>=False to constructor')
# check what patches supports installed gevent version
monkeyPatchSupported, _, _, _=inspect.getargspec(geventMonkey.patch_all)
monkeyPatchArgs_default={'socket':False, 'dns':False, 'time':False, 'select':False, 'thread':False, 'os':True, 'ssl':False, 'httplib':False, 'subprocess':True, 'sys':False, 'aggressive':True, 'Event':False, 'builtins':True, 'signal':True}
monkeyPatchArgs={}
for k, v in monkeyPatchArgs_default.iteritems():
if k in monkeyPatchSupported: monkeyPatchArgs[k]=v
# monkey patching
geventMonkey.patch_all(**monkeyPatchArgs)
self._importAll(forceDelete=True, scope=globals())
self._logger(3, 'Process patched with gevent')
def _tweakLimit(self, descriptors=(65536, 65536)):
"""
This method change file descriptor's limit of current process.
:param list(int, int) descriptors: New soft and hard limit.
"""
try:
import resource
except ImportError:
self._logger(2, 'WARNING: tweaking file descriptors limit not supported on your platform')
return None
if descriptors:
if descriptors is True:
descriptors=(65536, 65536)
try: #for Linux
if resource.getrlimit(resource.RLIMIT_NOFILE)!=descriptors:
resource.setrlimit(resource.RLIMIT_NOFILE, descriptors)
except: pass
try: #for BSD
if resource.getrlimit(resource.RLIMIT_OFILE)!=descriptors:
resource.setrlimit(resource.RLIMIT_OFILE, descriptors)
except: pass
def _countFileDescriptor(self, pid=None):
"""
This method return number of used file descriptors by process.
:param int|str pid: Process ID if None then pid is ID of current proccess.
:return int:
"""
mytime=getms()
pid=os.getpid() if pid is None else pid
try:
# c=0
# for s in os.listdir('/proc/%s/fd'%pid): c+=1
c=len(os.listdir('/proc/%s/fd'%pid))
self._speedStatsAdd('countFileDescriptor', getms()-mytime)
return c
except Exception, e:
self._speedStatsAdd('countFileDescriptor', getms()-mytime)
self._logger(2, "Can't count File Descriptor for PID %s: %s"%(pid, e))
return None
def _countMemory(self, pid=None):
"""
This method return used memory by process in kilobytes.
:param int pid: Process ID if None then pid is ID of current proccess.
:return dict: {'peak': 'max used memory', 'now': 'current used memory'}
"""
mytime=getms()
pid=os.getpid() if pid is None else pid
f=None
res={}
try:
f=open('/proc/%s/status'%pid)
f=self._fileObj(f)
for s in f:
parts=s.split()
key=parts[0][2:-1].lower()
if key=='peak': res['peak']=int(parts[1])
elif key=='rss': res['now']=int(parts[1])
except Exception, e:
self._logger(2, "Can't count memory for PID %s: %s"%(pid, e))
res=None
if f is not None: f.close()
self._speedStatsAdd('countMemory', getms()-mytime)
return res
def _checkFileDescriptor(self, multiply=1.0):
"""
This method check if used file descriptors near limit.
:param float multiply: Multiply factor.
:return bool:
"""
try:
import resource
except ImportError:
self._logger(1, 'ERROR: checking file descriptors limit not supported on your platform')
return None
limit=None
try: #for Linux
limit=resource.getrlimit(resource.RLIMIT_NOFILE)[0]
except:
try: #for BSD
limit=resource.getrlimit(resource.RLIMIT_OFILE)[0]
except: pass
if limit is None:
self._logger(2, "WARNING: Can't get File Descriptor Limit")
return None
c=self._countFileDescriptor()
if c is None: return None
res=c*multiply>=limit
if res:
self._logger(2, 'WARNING: reached file descriptors limit %s(%s)/%s'%(c, c*multiply, limit))
return res
def _inChild(self):
"""
This methotd retur True if used in Child proccess or return False if used in Parent.
:return bool:
"""
return self._pid!=os.getpid()
def _fileObj(self, f):
if self.__settings['gevent']:
self._tryGevent()
f=geventFileObjectThread(f)
return f
def _fileGet(self, fName, mode='r', silent=True, buffer=-1):
"""
This method open file and read content in mode <mode>, if file is archive then open it and find file with name <mode>.
:Attention:
Auto-unpacking of zip-files temporally disabled.
:param str fName: Path to file.
:param str mode: Read-mode or file name.
:param bool silent: Allow suppress errors.
:param int buffer: Same as standart python's buffering.
:return str:
"""
fName=fName.encode('cp1251')
if not os.path.isfile(fName): return None
if self.__settings['gevent']: self._tryGevent()
try:
with open(fName, mode, buffer) as f:
f=self._fileObj(f)
s=f.read()
except Exception, e:
if not silent: raise
self._logger(1, 'Error fileGet', fName, ',', mode, e)
s=None
return s
def _fileWrite(self, fName, text, mode='w', silent=False, buffer=-1):
"""
This method write content to file with specific mode.
If mode is 'a' method append data to the end of file.
:param str text:
:param str fName: Path to file.
:param str mode: Write-mode.
:param bool silent: Allow suppress errors.
:param int buffer: Same as standart python's buffering.
:return str:
"""
if not isString(text):
text=self._serializeJSON(text)
if self.__settings['gevent']: self._tryGevent()
try:
with open(fName, mode, buffer) as f:
f=self._fileObj(f)
f.write(text)
except Exception, e:
if not silent: raise
self._logger(1, 'Error fileWrite', fName, ',', mode, e)
def _sha1(self, text, onError=None):
"""
This method generate hash with sha1.
Length of symbols = 40.
:param str text:
:param func|any onError:
:return str:
"""
mytime=getms()
try:
try: c=hashlib.sha1(text)
except UnicodeEncodeError: c=hashlib.sha1(text.encode('utf8'))
s=c.hexdigest()
self._speedStatsAdd('sha1', getms()-mytime)
return s
except Exception, e:
self._speedStatsAdd('sha1', getms()-mytime)
self._logger(1, 'ERROR in _sha1():', e)
if isFunction(onError): return onError(text)
return onError
def _sha256(self, text, onError=None):
"""
This method generate hash with sha1.
Length of symbols = 64.
:param str text:
:param func|any onError:
:return str:
"""
mytime=getms()
try:
try: c=hashlib.sha256(text)
except UnicodeEncodeError: c=hashlib.sha256(text.encode('utf8'))
s=c.hexdigest()
self._speedStatsAdd('sha256', getms()-mytime)
return s
except Exception, e:
self._speedStatsAdd('sha256', getms()-mytime)
self._logger(1, 'ERROR in _sha256():', e)
if isFunction(onError): return onError(text)
return onError
def _throw(self, data):
"""
This method throw exception of class <MainError:data>.
:param str data: Info about error.
"""
raise MainError(data)
def _thread(self, target, args=None, kwargs=None, forceNative=False):
"""
This method is wrapper above threading.Thread() or gevent.spawn(). Method swithing automatically, if <forceNative> is False. If it's True, always use unpatched threading.Thread(). Spawned threads always will be started like daemons.
:param func target:
:param list args:
:param dict kwargs:
:param bool forceNative:
"""
args=() if args is None else args
kwargs={} if kwargs is None else kwargs
if not self.__settings['gevent']:
t=threading.Thread(target=target, args=args, kwargs=kwargs)
t.daemon=True
t.start()
else:
self._tryGevent()
if forceNative:
if hasattr(gevent, '_threading'):
t=gevent._threading.start_new_thread(target, tuple(args), kwargs)
else:
try:
thr=geventMonkey.get_original('threading', 'Thread')
except Exception:
self._throw('Cant find nativeThread implementation in gevent')
t=thr(target=target, args=args, kwargs=kwargs)
t.start()
else:
t=gevent.Greenlet(target, *args, **kwargs)
t.start()
return t
def callAsync(self, target, args=None, kwargs=None, sleepTime=0.3, sleepMethod=None, returnChecker=False, wait=True, forceNative=True, cb=None, cbData=None, redirectErrorsToLog=False):
"""
This method allow to run <target> asynchronously (without blocking server) when used with gevent, or simply in threaded-mode without gevent. It can return result of executed function <target>, or function 'checker', that must be called for getting result (or happened errors). If <wait> and <returnChecker> both passed to False, <target> will be executed in background (without blocking current thread or greenlet) and return nothing.
You also can pass optional callback, that will be called after <target> completed and before results returned to main thread. So, if you only want to use callback and nothing more, set <wait> to False also.
:Attention:
It's not a silver bullet! This method incress perfomance only for limited cases, like executing C-code or perfoming IO-ops, that not frendly for gevent. It really help with <pymysql> on large responses. In all other cases it not incress perfomance, but incress responsiveness of server. This mean, while some function executed in this way, server still availible and can process other requests.
:Attention:
This method use hack, that combine greenlets and native threads. It can really burst responsiveness of your server, but strongly recomended to use it only for functions, that don't do any "complicated" things, like working with shared memory, threads or server's instances.
:param func target:
:param list args:
:param dict kwargs:
:param float sleepTime:
:param func(<sleepTime>) sleepMethod: This method will be called, while <target> not complete and <wait> is True. If None, default will be used.
:param bool returnChecker: If True, return function, that must be called for get result.
:param bool wait: If True, current thread or greenlet will be blocked until results.
:param bool forceNative: If this False, method will use greenlets instead of native threads (ofcourse if gevent allowed). This cancels all benefits of this method, but can be useful, if you simply want to run code in greenlet and wait for result.
:param func(result, error, <cbData>) cb: If passed, this function will be called after <target> completed and before results returned to main thread.
:param any cbData:
:param int|bool|func redirectErrorsToLog:
:return any|func: Returns result of executed function or checker.
"""
sleepMethod=sleepMethod or self._sleep
args=args or []
kwargs=kwargs or {}
mytime=getms()
save=wait or returnChecker
# prepare queue
if not hasattr(self, '_callAsync_queue'): self._callAsync_queue={}
# generate unique id and workspace
cId=randomEx(vals=self._callAsync_queue)
self._callAsync_queue[cId]={'result':None, 'inProgress':True, 'error':None, '_thread':None, 'target':target, 'args':args, 'kwargs':kwargs, 'cb':cb, 'cbData':cbData}
# init wrapper
def tFunc_wrapper(self, cId, target, args, kwargs, save, cb, cbData):
link=self._callAsync_queue[cId]
try:
link['result']=target(*args, **kwargs)
except Exception, e:
link['error']=e
if cb:
cb(link['result'], link['error'], cbData)
link['inProgress']=False
if not save:
del self._callAsync_queue[cId]
if link['error']:
if redirectErrorsToLog is not False:
if isFunction(redirectErrorsToLog): redirectErrorsToLog(link)
else:
self._logger((redirectErrorsToLog if isInt(redirectErrorsToLog) else 1), str(link['error']))
else:
e=link['error']
raise e
# call in native thread
self._callAsync_queue[cId]['_thread']=self._thread(tFunc_wrapper, args=[self, cId, target, args, kwargs, save, cb, cbData], forceNative=forceNative)
if not save: return
# return checker or get result
if returnChecker:
# init checker
def tFunc_checker(__cId=cId):
link=self._callAsync_queue[__cId]
if link['inProgress']: return False, None, None
else:
res=link['result']
err=link['error']
del link, self._callAsync_queue[__cId]
return True, res, err
return tFunc_checker
else:
# wait for completion and get result
link=self._callAsync_queue[cId]
while link['inProgress']: sleepMethod(sleepTime)
res=link['result']
err=link['error']
del link, self._callAsync_queue[cId]
self._speedStatsAdd('callAsync', getms()-mytime)
# raise error or return result
if err:
if redirectErrorsToLog is not False:
self._logger((redirectErrorsToLog if isInt(redirectErrorsToLog) else 1), str(err))
return
else:
raise err
else: return res
def _socketClass(self):
"""
This method returns correct socket class. For gevent return gevent's implementation.
"""
if not self.__settings['gevent']: return socket
else:
self._tryGevent()
return geventSocket
def _raw_input(self, msg=None, forceNative=False):
"""
Non-blocking console input. Without gevent uses native raw_input.
:param str msg:
:param bool forceNative:
"""
if forceNative or not self.__settings['gevent']: return raw_input(msg)
else:
if msg:
sys.stdout.write(msg)
sys.stdout.flush()
self._socketClass().wait_read(sys.stdin.fileno())
s=sys.stdin.readline()
return s[:-1] # removing trailing linebreak
def _sleep(self, s, forceNative=False):
"""
This method is wrapper around `time.sleep()` and `gevent.sleep()`. Method swithing automatically, if <forceNative> is `False`. If it's `True`, always use unpatched `time.sleep()`.
For more info about `0` value see https://github.com/gevent/gevent/issues/744#issuecomment-185646372.
:param float s: Delay in seconds.
:param bool forceNative:
"""
if not self.__settings['gevent']:
_sleep=time.sleep
else:
self._tryGevent()
if forceNative:
_sleep=geventMonkey.get_original('time', 'sleep')
else:
_sleep=gevent.sleep
_sleep(s)
def _findParentModule(self):
"""
This method find parent module and pass him to attr <_parentModule> of server.
"""
#! check method by sys.modules['__main__']
m=None
mainPath=getScriptPath(True, False)
for stk in reversed(inspect.stack()):
# find frame of parent by module's path
if mainPath!=stk[1]: continue
m=inspect.getmodule(stk[0])
break
if m is None:
return self._logger(1, "Cant find parent's module")
self._parentModule=m
def _importGlobalsFromParent(self, scope=None, typeOf=None, filterByName=None, filterByNameReversed=False):
"""
This function import global attributes from parent module (main program) to given scope.
Imported attributes can be filtered by type, by name or by callback.
Source based on http://stackoverflow.com/a/9493520/5360266
:param None|dict scope: Scope for add or change resulting variables. If not passed, new will be created.
:param None|True|func(name,value)|list typeOf: Filtering by type or callback. If None, filtering disabled. If True, auto filtering by types [Int, Float, Long, Complex, None, Unicode, String, Boolean, Lambda, Dict, List, Tuple, Module, Function].
:param list filterByName: If passed, only variables with this names will be imported.
"""
if self._inChild():
self._throw('This method "%s()" can be called only from <main> process'%sys._getframe().f_code.co_name)
if self._parentModule is None:
self._throw("Parent module not founded")
if typeOf is True: # check by standart types
typeOf=[IntType, FloatType, LongType, ComplexType, NoneType, UnicodeType, StringType, BooleanType, LambdaType, DictType, ListType, TupleType, ModuleType, FunctionType]
try:
mytime=getms()
scope=scope if scope is not None else {}
if not isFunction(typeOf):
typeOf=None if typeOf is None else tuple(typeOf)
if filterByName is None:
tArr1=dir(self._parentModule)
elif filterByNameReversed: #exclude specific names
tArr1=filterByName if isArray(filterByName) else [filterByName]
tArr1=[k for k in dir(self._parentModule) if k not in tArr1]
else: #include only specific names
tArr1=filterByName if isArray(filterByName) else [filterByName]
for k in tArr1:
v=getattr(self._parentModule, k)
# check type if needed or use callback
if typeOf is None: pass
elif isFunction(typeOf) and not typeOf(k, v): continue
elif not isinstance(v, typeOf): continue
# importing
scope[k]=v
self._speedStatsAdd('importGlobalsFromParent', getms()-mytime)
return scope
except Exception, e:
self._throw("Cant import parent's globals: %s"%e)
def _mergeGlobalsToParent(self, scope, typeOf=None, filterByName=None, filterByNameReversed=False):
"""
This function merge given scope with global attributes from parent module (main program).
Merged attributes can be filtered by type, by name or by callback.
:param dict scope: Scope that will be merged.
:param None|True|func(name,value)|list typeOf: Filtering by type or callback. If None, filtering disabled. If True, auto filtering by types [Int, Float, Long, Complex, None, Unicode, String, Boolean, Lambda, Dict, List, Tuple, Module, Function].
:param list filterByName: If passed, only variables with this names will be imported.
"""
if self._inChild():
self._throw('This method "%s()" can be called only from <main> process'%sys._getframe().f_code.co_name)
if self._parentModule is None:
self._throw("Parent module not founded")
if not isDict(scope):
self._throw("Incorrect scope type: %s"%type(scope))
if typeOf is True: # check by standart types
typeOf=[IntType, FloatType, LongType, ComplexType, NoneType, UnicodeType, StringType, BooleanType, LambdaType, DictType, ListType, TupleType, FunctionType]
try:
mytime=getms()
if not isFunction(typeOf):
typeOf=None if typeOf is None else tuple(typeOf)
if filterByName is None:
tArr1=scope.keys()
elif filterByNameReversed: #exclude specific names
tArr1=filterByName if isArray(filterByName) else [filterByName]
tArr1=[k for k in scope.iterkeys() if k not in tArr1]
else: #include only specific names
tArr1=filterByName if isArray(filterByName) else [filterByName]
for k in tArr1:
v=scope.get(k, None)
# check type if needed or use callback
if typeOf is None: pass
elif isFunction(typeOf) and not typeOf(k, v): continue
elif not isinstance(v, typeOf): continue
# merging
setattr(self._parentModule, k, v)
self._speedStatsAdd('mergeGlobalsToParent', getms()-mytime)
return scope
except Exception, e:
self._throw("Cant merge parent's globals: %s"%e)
def _speedStatsAdd(self, name, val):
"""
This methos write stats about passed <name>. You also can pass multiple names and values like list.
:param str|list name:
:param float|list val: time in milliseconds, that will be writed to stats.
"""
if isArray(name) and isArray(val):
names, vals=name, val
if len(names)!=len(vals):
self._throw('Wrong length')
for i, name in enumerate(names):
val=vals[i]
if name not in self.speedStats:
self.speedStats[name]=deque2([], 99999)
self.speedStats[name].append(val)
if name not in self.speedStatsMax: self.speedStatsMax[name]=val
elif val>self.speedStatsMax[name]: self.speedStatsMax[name]=val
else:
if name not in self.speedStats:
self.speedStats[name]=deque2([], 99999)
self.speedStats[name].append(val)
if name not in self.speedStatsMax: self.speedStatsMax[name]=val
elif val>self.speedStatsMax[name]: self.speedStatsMax[name]=val
def registerInstance(self, dispatcher, path='/', fallback=None, dispatcherBackend=None, notifBackend=None, includePrivate=None, filter=None):
"""
This method Create dispatcher for methods of given class's instance.
If methods has attribute _alias(List or String), it used as aliases of name.
:param instance dispatcher: Class's instance.
:param str path: Optional string that contain path-prefix.
:param bool|string fallback: Switch JSONP-mode fot this dispatchers.
:param str|obj dispatcherBackend: Set specific backend for this dispatchers.
:param str|obj notifBackend: Set specific backend for this dispatchers.
:param list|None includePrivate: By default this method ignore private and special methods of instance. If you want to include some of them, pass theirs names.
:param func filter: If this param passed, this function will be called for every method of instance. It must return tuple(<use>, <name>, <link>), and only methods with <use> is True will be registered. This param also disable ignoring private and special methods of instance, so param <includePrivate> also will be disabled.
"""
if self._inChild():
self._throw('This method "%s()" can be called only from <main> process'%sys._getframe().f_code.co_name)
# if not isInstance(dispatcher):
# self._throw('Bad dispatcher type: %s'%type(dispatcher))
includePrivate=includePrivate or tuple()
fallback=self.__settings['fallback_JSONP'] if fallback is None else fallback
path=formatPath(path)
if path not in self.routes: self.routes[path]={}
# select Dispatcher-backend
if dispatcherBackend is None: dBckId=self.defaultDispatcherBackendId
else: dBckId=self._registerExecBackend(dispatcherBackend, notif=False)
# select Notif-backend
if notifBackend is None: nBckId=self.defaultNotifBackendId
else: nBckId=self._registerExecBackend(notifBackend, notif=True)
# add dispatcher to routes
for name in dir(dispatcher):
link=getattr(dispatcher, name)
if filter:
s, name, link=filter(dispatcher, name, link)
if not s: continue
elif name[0]=='_' and name not in includePrivate: continue #skip private and special methods
if isFunction(link):
# extract arguments
_args, _, _, _=inspect.getargspec(link)
_args=tuple([s for i, s in enumerate(_args) if not(i==0 and s=='self')])
# add dispatcher to routes
self.routes[path][name]={'allowJSONP':fallback, 'link':link, 'dispatcherBackendId':dBckId, 'notifBackendId':nBckId, 'args':_args}
link.__func__._id={'path':path, 'name':name} #save path for dispatcher in routes
if hasattr(link, '_alias'):
tArr1=link._alias if isArray(link._alias) else [link._alias]
for alias in tArr1:
if isString(alias):
self.routes[path][alias]={'allowJSONP':fallback, 'link':link, 'dispatcherBackendId':dBckId, 'notifBackendId':nBckId, 'args':_args}
def registerFunction(self, dispatcher, path='/', fallback=None, name=None, dispatcherBackend=None, notifBackend=None):
"""
This method reate dispatcher for given function.
If methods has attribute _alias(List or String), it used as aliases of name.
:param instance dispatcher: Class's instance.
:param str path: Optional string that contain path-prefix.
:param bool|string fallback: Switch JSONP-mode fot this dispatcher.
:param str name: Alternative name for this dispatcher.
:param str|obj dispatcherBackend: Set specific backend for this dispatcher.
:param str|obj notifBackend: Set specific backend for this dispatchers.
"""
if self._inChild():
self._throw('This method "%s()" can be called only from <main> process'%sys._getframe().f_code.co_name)
if not isFunction(dispatcher): self._throw('Bad dispatcher type: %s'%type(dispatcher))
fallback=self.__settings['fallback_JSONP'] if fallback is None else fallback
name=name or dispatcher.__name__
path=formatPath(path)
if path not in self.routes: self.routes[path]={}
# select Dispatcher-backend
if dispatcherBackend is None: dBckId=self.defaultDispatcherBackendId
else: dBckId=self._registerExecBackend(dispatcherBackend, notif=False)
# select Notif-backend
if notifBackend is None: nBckId=self.defaultNotifBackendId
else: nBckId=self._registerExecBackend(notifBackend, notif=True)
# extract arguments
_args, _, _, _=inspect.getargspec(dispatcher)
_args=tuple(s for i, s in enumerate(_args) if not(i==0 and (s=='self' or s=='cls')))
# add dispatcher to routes
self.routes[path][name]={'allowJSONP':fallback, 'link':dispatcher, 'dispatcherBackendId':dBckId, 'notifBackendId':nBckId, 'args':_args}
if hasattr(dispatcher, '__func__'):
dispatcher.__func__._id={'path':path, 'name':name} #save path for dispatcher in routes
else:
dispatcher._id={'path':path, 'name':name} #save path for dispatcher in routes
if hasattr(dispatcher, '_alias'):
tArr1=dispatcher._alias if isArray(dispatcher._alias) else [dispatcher._alias]
for alias in tArr1:
if isString(alias):
self.routes[path][alias]={'allowJSONP':fallback, 'link':dispatcher, 'dispatcherBackendId':dBckId, 'notifBackendId':nBckId}
def _parseJSON(self, data):
"""
This method parse JSON-data to native object.
:param str data:
:return native:
"""
mytime=getms()
s=self.jsonBackend.loads(data)
self._speedStatsAdd('parseJSON', getms()-mytime)
return s
def _parseRequest(self, data):
"""
This method parse reguest's data and validate.
:param str|list|dict data:
:return set(bool, list): First argument is validation status.
"""
try:
mytime=getms()
if not data:
raise ValueError('Empty request')
tArr1=self._parseJSON(data) if isString(data) else data
tArr2=[]
tArr1=tArr1 if isArray(tArr1) else (tArr1, ) #support for batch requests
for r in tArr1:
correctId=None
if 'id' in r:
# if in request exists key "id" but it's "null", we process him like correct request, not notify-request
correctId=0 if r['id'] is None else r['id']
tArr2.append({
'jsonrpc': r['jsonrpc'] if 'jsonrpc' in r else None,
'method': r['method'] if 'method' in r else None,
'params': r['params'] if 'params' in r else None,
'id':correctId
})
self._speedStatsAdd('parseRequest', getms()-mytime)
return True, tArr2
except Exception, e:
self._speedStatsAdd('parseRequest', getms()-mytime)
self._logger(1, 'Error parseRequest', e)
return False, e
def _prepResponse(self, data, isError=False):
"""
This method prepare data for responce.
:param dict data:
:param bool isError: Switch response's format.
:return dict:
"""
if 'id' in data:
_id=data['id']
del data['id']
else:
_id=None
if isError:
s={"jsonrpc": "2.0", "error": data, "id": _id}
elif _id:
s={"jsonrpc": "2.0", "result": data['data'], "id": _id}
else: s=None
return s
def _fixJSON(self, o):
"""
This method can be called by JSON-backend and process special types.
"""
if isinstance(o, decimal.Decimal): return str(o) #fix Decimal conversion
elif isinstance(o, (datetime.datetime, datetime.date, datetime.time)): return o.isoformat() #fix DateTime conversion
# elif isNum(o) and o>=sys.maxint: return str(o) #? fix LONG
def _serializeJSON(self, data):
"""
This method convert native python object to JSON.
:param native data:
:return str:
"""
def _fixJSON(o):
if isFunction(self.fixJSON): return self.fixJSON(o)
mytime=getms()
#! без экранирования кавычек такой хак поломает парсер, кроме того он мешает поддерживать не-JSON парсеры
# if isString(data) and not getattr(self.jsonBackend, '_skipFastStringSerialize', False): data='"'+data+'"'
data=self.jsonBackend.dumps(data, indent=None, separators=(',',':'), ensure_ascii=True, sort_keys=False, default=_fixJSON)
self._speedStatsAdd('serializeJSON', getms()-mytime)
return data
def stats(self, inMS=False, history=10):
"""
This method return statistics of server.
:param bool inMS: If True, all speed-stats will be in milliseconds, else in seconds.
:param bool|int history: Size of sliced connPerSec history.
:return dict: Collected perfomance stats
"""
res={'connPerSec_now':round(self.connPerMinute['count']/60.0, 2), 'connPerSec_old':round(self.connPerMinute['oldCount']/60.0, 2), 'connPerSec_max':round(self.connPerMinute['maxCount']/60.0, 2), 'speedStats':{}, 'processingRequestCount':self.processingRequestCount, 'processingDispatcherCount':self.processingDispatcherCount}
if history and isNum(history):
l=history*-1
res['connPerSec_history']={
'minute':list(self.connPerMinute['history']['minute'])[l:],
'count':list(self.connPerMinute['history']['count'])[l:]
}
elif history:
res['connPerSec_history']={
'minute':list(self.connPerMinute['history']['minute']),
'count':list(self.connPerMinute['history']['count'])
}
#calculate speed stats
for k, v in self.speedStats.iteritems():
v1=max(v)
v2=float(sum(v))/len(v)
v3=self.speedStatsMax[k]
res['speedStats'][k+'_max']=round(v1/1000.0, 1) if not inMS else round(v1, 1)
res['speedStats'][k+'_average']=round(v2/1000.0, 1) if not inMS else round(v2, 1)
res['speedStats'][k+'_max2']=round(v3/1000.0, 1) if not inMS else round(v3, 1)
#get backend's stats
for _id, backend in self.execBackend.iteritems():
if hasattr(backend, 'stats'):
r=backend.stats(inMS=inMS, history=history)
elif 'stats' in backend:
r=backend['stats'](inMS=inMS, history=history)
else: continue
res.update(r)
return res
def _logger(self, level, *args):
"""
This method is wrapper for logger. First parametr <level> is optional, if it not setted, message is interpreted as "critical" and will be shown also if logging disabled.
:param int level: Info-level of message. 0 is critical (and visible always), 1 is error, 2 is warning, 3 is info, 4 is debug. If is not number, it passed as first part of message.
"""
level, args=prepDataForLogger(level, args)
if level!=0 and level is not None: #non-critical or non-fallback msg
loglevel=self.__settings['log']
if not loglevel: return
elif loglevel is True: pass
elif level>loglevel: return
levelPrefix=('', 'ERROR:', 'WARNING:', 'INFO:', 'DEBUG:')
_write=sys.stdout.write
_repr=self._serializeJSON
for i, s in enumerate(args):
# auto-prefix
if not i and level and level<len(levelPrefix):
s2=levelPrefix[level]
if not isString(s) or not s.startswith(s2): _write(s2+' ')
# try to printing
try: _write(s)
except:
try:
s=_repr(s)
try:
if s: _write(s)
except UnicodeEncodeError:
_write(s.encode('utf8'))
except Exception, e:
_write('<UNPRINTABLE_DATA: %s>'%e)
if i<len(args)-1: _write(' ')
_write('\n')
sys.stdout.flush()
def lock(self, dispatcher=None):
"""
This method locking server or specific <dispatcher>.
:param func dispatcher:
"""
if self._inChild():
self._throw('This method "%s()" can be called only from <main> process'%sys._getframe().f_code.co_name)
if dispatcher is None: self.locked=True #global lock
else: #local lock
if isFunction(dispatcher):
if hasattr(dispatcher, '__func__'):
setattr(dispatcher.__func__, '__locked', True)
else:
setattr(dispatcher, '__locked', True)
def unlock(self, dispatcher=None, exclusive=False):
"""
This method unlocking server or specific <dispatcher>.
If all server locked, you can unlock specific <dispatcher> by pass <exclusive> to True.
:Attention:
If you use exclusive unlocking, don't forget to switch locking-status to normal state after all done. For doing this simply call this method for exclusivelly locked dispatcher with <exclusive=False> flag (or without passing <exclusive> flag). This will automatically reset status of exclusive unlocking for given <dispatcher>.
:param func dispatcher:
:param bool exclusive:
"""
if self._inChild():
self._throw('This method "%s()" can be called only from <main> process'%sys._getframe().f_code.co_name)
#exclusive=True unlock dispatcher also if global lock=True
if dispatcher is None: self.locked=False #global lock
else: #local lock
if isFunction(dispatcher):
if hasattr(dispatcher, '__func__'):
setattr(dispatcher.__func__, '__locked', False if exclusive else None)
else:
setattr(dispatcher, '__locked', False if exclusive else None)
def wait(self, dispatcher=None, sleepMethod=None, returnStatus=False):
"""
This method wait while server or specific <dispatcher> locked or return locking status.
If <returnStatus> is True, method only return locking status.
If <returnStatus> is False, method cyclically call <sleepMethod> until server or <dispatcher> locked. If <sleepMethod> not passed, it will be automatically selected.
:param func dispatcher:
:param func sleepMethod:
:param bool returnStatus:
"""
if self._inChild():
self._throw('This method "%s()" can be called only from <main> process'%sys._getframe().f_code.co_name)
mytime=getms()
sleepMethod=sleepMethod or self._sleep
if dispatcher is None: #global lock
while self.locked:
if returnStatus:
self._speedStatsAdd('wait', getms()-mytime)
return True
sleepMethod(self.__settings['sleepTime_waitLock']) #global lock
else: #local and global lock
if hasattr(dispatcher, '__func__'): dispatcher=dispatcher.__func__
while True:
# local
locked=getattr(dispatcher, '__locked', None)
# global
if locked is False: break #exclusive unlock
elif not locked and not self.locked: break
if returnStatus:
self._speedStatsAdd('wait', getms()-mytime)
return True
sleepMethod(self.__settings['sleepTime_waitLock'])
self._speedStatsAdd('wait', getms()-mytime)
if returnStatus: return False
def _deepLock(self):
"""
This method locks the server completely.
While locked, server doesn't process requests, but receives them. All request will wait, until server unlocked.
"""
if self._inChild():
self._throw('This method "%s()" can be called only from <main> process'%sys._getframe().f_code.co_name)
self.deepLocked=True
def _deepUnlock(self):
"""
This method unlocks the server completely.
"""
if self._inChild():
self._throw('This method "%s()" can be called only from <main> process'%sys._getframe().f_code.co_name)
self.deepLocked=False
def _deepWait(self):
"""
This method waits while server be locked.
"""
if self._inChild():
self._throw('This method "%s()" can be called only from <main> process'%sys._getframe().f_code.co_name)
while self.deepLocked:
self._sleep(self.__settings['sleepTime_waitDeepLock'])
def _reload(self, api, clearOld=False, timeout=60, processingDispatcherCountMax=0, safely=True):
"""
This method overload server's source without stopping.
:param list|dict api:
:param bool clearOld:
:param int timeout:
:param int processingDispatcherCountMax:
:param bool safely:
"""
if self._inChild():
self._throw('This method "%s()" can be called only from <main> process'%sys._getframe().f_code.co_name)
self._deepLock()
mytime=getms()
self._waitProcessingDispatchers(timeout=timeout, processingDispatcherCountMax=processingDispatcherCountMax)
# stop execBackends
self._stopExecBackends(timeout=timeout-(getms()-mytime)/1000.0, processingDispatcherCountMax=processingDispatcherCountMax)
# reloading
oldRoutes=self.routes
if clearOld: self.routes={}
api=api if isArray(api) else [api]
try:
for o in api:
if not isDict(o): continue
path=o.get('path', '')
dispatcher=o.get('dispatcher')
if not dispatcher:
self._deepUnlock()
self._throw('Empty dispatcher"')
if o.get('isInstance', False):
self.registerInstance(dispatcher, path, fallback=o.get('fallback', None), dispatcherBackend=o.get('dispatcherBackend', None), notifBackend=o.get('notifBackend', None))
else:
self.registerFunction(dispatcher, path, name=o.get('name', None), fallback=o.get('fallback', None), dispatcherBackend=o.get('dispatcherBackend', None), notifBackend=o.get('notifBackend', None))
except Exception, e:
msg='Cant reload server: %s'%e
if safely:
self.routes=oldRoutes
self._logger(1, msg)
self._logger(3, 'Server is reloaded in safe-mode, so all dispatchers was restored. But if you overloaded some globals in callback, they can not be restored!')
else: self._throw(msg)
# start execBackends
self._startExecBackends()
self._deepUnlock()
def reload(self, api, clearOld=False, timeout=60, processingDispatcherCountMax=0, safely=True):
"""
This method is wrapper above <server>._reload(). It overload server's source without stopping.
:Example:
# example of passed <api>
api={
'dispatcher':str() or "function" or "class's instance", # dispatcher's name (replace next param) or link to function (that will be loaded)
'name':str(), # name of dispatcher that will overloaded
'dispatcherName':str(), # same as <name>, for backward compatibility
'scriptPath':str(), # path to source, that must be loaded. If not passed, main program's path will be used
'scriptName':str(), # don't use it
'isInstance':bool(), # is passed dispatcher instance of class
'overload':list(), # overload this attrs in source or call this function
'path':str() # API path for dispatcher
}
:param list(dict)|dict api: see example
:param bool clearOld:
:param int timeout:
:param int processingDispatcherCountMax:
:param bool safely:
"""
if self._inChild():
self._throw('This method "%s()" can be called only from <main> process'%sys._getframe().f_code.co_name)
api2=api if isArray(api) else [api]
mArr={}
api=[]
for o in api2:
if not isDict(o): continue
scriptPath=o.get('scriptPath', getScriptPath(True))
scriptName=o.get('scriptName', '')
dispatcherName=o.get('dispatcher', '')
isInstance=o.get('isInstance', False)
exclusiveModule=o.get('exclusiveModule', False)
exclusiveDispatcher=o.get('exclusiveDispatcher', False)
overload=o.get('overload', None)
path=o.get('path', '')
name=o.get('name', o.get('dispatcherName', None))
# start importing new code
if scriptPath and dispatcherName and isString(dispatcherName):
# passed path to module and name of dispatcher
if exclusiveModule:
module=imp.load_source(scriptName, scriptPath)
else:
if '%s_%s'%(scriptName, scriptPath) not in mArr:
mArr['%s_%s'%(scriptName, scriptPath)]={'module':imp.load_source(scriptName, scriptPath), 'dArr':{}}
if scriptPath==getScriptPath(True) or scriptPath==getScriptPath(True, False):
# passed parent module
self._parentModule=mArr['%s_%s'%(scriptName, scriptPath)]['module']
module=mArr['%s_%s'%(scriptName, scriptPath)]['module']
if exclusiveDispatcher or '%s_%s'%(scriptName, scriptPath) not in mArr:
# module not imported yet
dispatcher=getattr(module, dispatcherName)
else:
# imported module, use it
if dispatcherName not in mArr['%s_%s'%(scriptName, scriptPath)]['dArr']:
# dispatcher not imported yet
mArr['%s_%s'%(scriptName, scriptPath)]['dArr'][dispatcherName]=getattr(module, dispatcherName)
if isInstance:
mArr['%s_%s'%(scriptName, scriptPath)]['dArr'][dispatcherName]=mArr['%s_%s'%(scriptName, scriptPath)]['dArr'][dispatcherName]()
dispatcher=mArr['%s_%s'%(scriptName, scriptPath)]['dArr'][dispatcherName]
elif name and isFunction(dispatcherName):
# passed function as new (or existed) dispatcher
dispatcher=dispatcherName
module=dispatcherName
else:
self._throw('Incorrect data for "reload()"')
# overloading with passed objects or via callback
overload=overload if isArray(overload) else [overload]
for oo in overload:
if not oo: continue
elif isDict(oo):
for k, v in oo.iteritems(): setattr(module, k, v)
elif isFunction(oo): oo(self, module, dispatcher)
# additional settings
allowJSONP=o.get('fallback', None)
dispatcherBackend=o.get('dispatcherBackend', None)
notifBackend=o.get('notifBackend', None)
# get additional settings from original dispatcher
if isInstance:
if (allowJSONP is None) or (dispatcherBackend is None) or (notifBackend is None):
# dispatcher's settings stored for Class methods, not for Class instance
# so we need to find at least one Class method, stored previosly
d=None
p=formatPath(path)
if p in self.routes:
for n in dir(dispatcher):
link=getattr(dispatcher, n)
if not isFunction(link): continue
if n not in self.routes[p]: continue
d=self.routes[p][n]
break
if d:
if allowJSONP is None: allowJSONP=d['allowJSONP']
if dispatcherBackend is None: dispatcherBackend=d['dispatcherBackendId']
if notifBackend is None: notifBackend=d['notifBackendId']
else:
n=(name or dispatcherName)
p=formatPath(path)
if p in self.routes and n in self.routes[p]:
if allowJSONP is None: allowJSONP=self.routes[p][n]['allowJSONP']
if dispatcherBackend is None: dispatcherBackend=self.routes[p][n]['dispatcherBackendId']
if notifBackend is None: notifBackend=self.routes[p][n]['notifBackendId']
# add result
api.append({'dispatcher':dispatcher, 'path':path, 'isInstance':isInstance, 'name':name, 'fallback':allowJSONP, 'dispatcherBackend':dispatcherBackend, 'notifBackend':notifBackend})
self._thread(target=self._reload, kwargs={'api':api, 'clearOld':clearOld, 'timeout':timeout, 'processingDispatcherCountMax':processingDispatcherCountMax, 'safely':safely})
aboutMagicVarForDispatcher="""
This variable passed to dispatcher, if required. You can set name for this var by passing <magicVarForDispatcher> parametr to server's constructor. By default it named "_connection".
:param dict headers: Request's headers.
:param list cookies: Request's cookies.
:param str ip: Request's IP (ip of client).
:param dict headersOut: You can set headers, that will be passed to response.
:param list cookiesOut: You can set cookies, that will be passed to response.
:param str uniqueId: Unique identificator of request.
:param bool|str jsonp: If this requests is JSONP fallback, the output format string passed here, otherwise False. You can change this for JSONP fallback requests.
:param str mimeType: Request's MimeType. You can change this for setting MimeType of Response.
:param bool allowCompress: Is compressing allowed for this request. You can change it for forcing compression.
:param instance server: Link to server's instance.
:param str serverName: Name of server.
:param set(isRawSocket,(ip|socketPath),(port|socket)) servedBy: Info about server's adress. Usefull if you use multiple adresses for one server.
:param dict('lock','unlock','wait','sleep','log',..) call: Some useful server's methods, you can call them.
:param bool nativeThread: Is this request and dispatcher executed in native python thread.
:param bool notify: Is this request is Notify-request.
:param func dispatcher: Link to dispatcher.
:param str path: Server's path, that client used for sending request. Useful if you bind one dispatcher to different paths.
:param str dispatcherName: Name of dispatcher, that passed with request. Useful if you bind one dispatcher to different names.
:param str parallelType: Optional parametr. Can be passed by ExecBackend and contain name of it.
:param int parallelPoolSize: Optional parametr. Can be passed by ExecBackend and contain size of exec's pool.
:param str parallelId: Optional parametr. Can be passed by ExecBackend and contain identificator of process or thread, that processing this request.
"""
def _addMagicVarOverloader(self, f):
if not isFunction(f):
self._throw('You must pass function, passed %s'%type(f))
self._magicVarForDispatcherOverload.append(f)
def _callDispatcher(self, uniqueId, data, request, isJSONP=False, nativeThread=None, overload=None, ignoreLocking=False):
"""
This method call dispatcher, requested by client.
:param str uniqueId: Unique ID of current request.
:param list|dict data: Request's params.
:param dict request: Reuest's and Enveronment's variables of WSGI or another backend.
:param bool|str isJSONP:
:param bool nativeThread:
:param dict|func overload: Overload magicVarForDispatcher param.
"""
argsPassed=locals()
params={}
try:
mytime=getms()
self.processingDispatcherCount+=1
self._gcStats['processedDispatcherCount']+=1
if nativeThread:
def tFunc_sleep(s, forceNative=True):
self._sleep(s, forceNative=forceNative)
else:
tFunc_sleep=self._sleep
currentDispatcher=self.routes[request['path']][data['method']]
currentDispatcherLink=currentDispatcher['link']
_args=self.routes[request['path']][data['method']]['args']
if isDict(data['params']):
params=data['params'].copy()
elif isArray(data['params']): #convert *args to **kwargs
if len(data['params'])>len(_args):
self.processingDispatcherCount-=1
return False, params, 'Too many arguments'
for i, v in enumerate(data['params']): params[_args[i]]=v
magicVarForDispatcher=self.__settings['magicVarForDispatcher']
if magicVarForDispatcher in _args: #add magicVarForDispatcher if requested
# link to current execBackend
if data['id'] is None:
currentExecBackendId=currentDispatcher['notifBackendId']
else:
currentExecBackendId=currentDispatcher['dispatcherBackendId']
if currentExecBackendId in self.execBackend:
currentExecBackend=self.execBackend[currentDispatcher['dispatcherBackendId']]
else:
currentExecBackend=None
currentExecBackendId='simple'
# create magicVarForDispatcher
params[magicVarForDispatcher]={
'headers':request['headers'],
'cookies':request['cookies'],
'ip':request['ip'],
'cookiesOut':[],
'headersOut':{},
'uniqueId':uniqueId,
'jsonp':isJSONP,
'mimeType':calcMimeType(request),
'allowCompress':self.__settings['allowCompress'],
'server':self,
'serverName':self.name,
'servedBy':request['servedBy'],
'call':magicDict({
'lockThis':lambda: self.lock(dispatcher=currentDispatcherLink),
'unlockThis':lambda exclusive=False: self.unlock(dispatcher=currentDispatcherLink, exclusive=exclusive),
'waitThis':lambda returnStatus=False: self.wait(dispatcher=currentDispatcherLink, sleepMethod=tFunc_sleep, returnStatus=returnStatus),
'lock':self.lock,
'unlock':self.unlock,
'wait':lambda dispatcher=None, returnStatus=False: self.wait(dispatcher=dispatcher, sleepMethod=tFunc_sleep, returnStatus=returnStatus),
'sleep':tFunc_sleep,
'log':self._logger
}),
'nativeThread':nativeThread if nativeThread is not None else not(self.__settings['gevent']),
'notify':data['id'] is None,
'request':data,
'dispatcher':currentDispatcherLink,
'path':request['path'],
'dispatcherName':data['method'],
'execBackend':currentExecBackend,
'execBackendId':currentExecBackendId
}
# overload magicVarForDispatcher (passed)
if overload and isDict(overload):
for k, v in overload.iteritems(): params[magicVarForDispatcher][k]=v
elif overload and isFunction(overload):
params[magicVarForDispatcher]=overload(params[magicVarForDispatcher])
# overload magicVarForDispatcher (global)
if self._magicVarForDispatcherOverload:
for f in self._magicVarForDispatcherOverload:
params[magicVarForDispatcher]=f(params[magicVarForDispatcher], argsPassed, currentExecBackendId, currentExecBackend)
params[magicVarForDispatcher]=magicDict(params[magicVarForDispatcher])
self._speedStatsAdd('callDispatcherPrepare', getms()-mytime)
if not ignoreLocking: #locking
self.wait(dispatcher=currentDispatcherLink, sleepMethod=tFunc_sleep)
# call dispatcher
mytime=getms()
result=currentDispatcherLink(**params)
self._speedStatsAdd('callDispatcher', getms()-mytime)
self.processingDispatcherCount-=1
return True, params, result
except Exception:
self.processingDispatcherCount-=1
return False, params, getErrorInfo()
def _controlGC(self, force=False):
"""
This method collects garbage if one off specific conditions is True or if <force> is True.
:param bool force:
"""
if (gc.isenabled() or not self._gcStats['lastTime']) and self.__settings['controlGC']:
# gc.set_threshold(0)
gc.disable()
self._logger(3, 'GC disabled by manual control')
self._gcStats['lastTime']=getms(False)
self._gcStats['processedRequestCount']=0
self._gcStats['processedDispatcherCount']=0
if not force: return
# check
if self._gcStats['processing']: return
mytime=getms(False)
# self._logger('controlGC', self._gcStats['lastTime'], mytime-self._gcStats['lastTime'])
if not force:
if not self._gcStats['lastTime']: return
if mytime-self._gcStats['lastTime']<self.__settings['controlGC_everySeconds'] and \
self._gcStats['processedRequestCount']<self.__settings['controlGC_everyRequestCount'] and \
self._gcStats['processedDispatcherCount']<self.__settings['controlGC_everyDispatcherCount']: return
# collect garbage and reset stats
self._gcStats['processing']=True
mytime=getms()
m1=self._countMemory()
thr=gc.get_threshold()
l=gc.get_count()
c=0
for i in xrange(3):
if l[i]<thr[i]: break
c+=gc.collect(i)
m2=self._countMemory()
if c and m1 and m2:
self._logger(3, 'GC executed manually: collected %s objects, memory freed %smb, used %smb, peak %smb'%(c, round((m1['now']-m2['now'])/1024.0, 1), round(m2['now']/1024.0, 1), round(m2['peak']/1024.0, 1)))
self._speedStatsAdd('controlGC', getms()-mytime)
self._gcStats['lastTime']=getms(False)
self._gcStats['processedRequestCount']=0
self._gcStats['processedDispatcherCount']=0
self._gcStats['processing']=False
def _compressResponse(self, headers, data):
"""
This method compress responce and add compression-headers.
:param list headers: List of headers of response, that can be modified.
:param str data: Response data, that will be compressed.
:return tuple(headers, data):
"""
data=self._compressGZIP(data)
headers.append(('Content-Encoding', 'gzip'))
headers.append(('Vary', 'Accept-Encoding'))
# headers.append(('Content-Length', len(data))) # serv-backend set it automatically
return (headers, data)
def _compressGZIP(self, data):
"""
This method compress input data with gzip.
:param str data:
:return str:
"""
mytime=getms()
gzip_buffer=StringIO()
l=len(data)
f=GzipFile(mode='wb', fileobj=gzip_buffer, compresslevel=3)
f.write(data)
f.close()
res=gzip_buffer.getvalue()
self._logger(4, '>> compression %s%%, original size %smb'%(round((1-len(res)/float(l))*100.0, 1), round(l/1024.0/1024.0, 2)))
self._speedStatsAdd('compressResponse', getms()-mytime)
return res
def _uncompressGZIP(self, data):
"""
This method uncompress input data with gzip.
:param str data:
:return str:
"""
mytime=getms()
gzip_buffer=StringIO(data)
f=GzipFile('', 'r', 0, gzip_buffer)
res=f.read()
f.close()
self._speedStatsAdd('uncompressResponse', getms()-mytime)
return res
def _loadPostData(self, request):
"""
Load POST data for given <request>.
:param dict request:
:return dict:
"""
if request['data'] is None:
size=int(request['environ']['CONTENT_LENGTH']) if 'CONTENT_LENGTH' in request['environ'] else 0
request['data']=request['environ']['wsgi.input'].read(size) if size else ''
# decompress if needed
if 'gzip' in request['headers'].get('Content-Encoding', '').lower():
self._logger(4, 'COMPRESSED_REQUEST: %skb'%round(sys.getsizeof(request['data'])/1024.0, 2))
request['data']=self._uncompressGZIP(request['data'])
# prep for printing
if len(request['data'])>128:
request['dataPrint']=request['data'][:50]+' <..> '+request['data'][-50:]
return request['data']
def _prepRequestContext(self, env):
"""
Prepare request's context from given <env>.
:param dict env:
:return dict:
"""
# parsing query string. we need clean dict, so can't use urlparse.parse_qs()
mytime=getms()
args={}
if 'QUERY_STRING' in env and env['QUERY_STRING']:
for s in env['QUERY_STRING'].split('&'):
if not s: continue
if '=' in s:
i=s.find('=')
args[s[:i]]=s[i+1:]
else: args[s]=''
# prep headers
headers=dict((k[5:].replace('_', '-').title(), v) for k, v in env.iteritems() if k[:5]=='HTTP_')
# parsing cookies
cookies={}
if 'HTTP_COOKIE' in env and env['HTTP_COOKIE']:
for s in env['HTTP_COOKIE'].split(';'):
s=s.strip()
if not s: continue
if '=' in s:
i=s.find('=')
cookies[s[:i]]=s[i+1:]
else: cookies[s]=''
# gen request
request={
'path':formatPath(env['PATH_INFO'] if 'PATH_INFO' in env else ''),
'fileName':'', # for JSONP
'headers':headers,
'cookies':cookies,
'environ':env,
'remote_addr':env['REMOTE_ADDR'] if 'REMOTE_ADDR' in env else '',
'method':env['REQUEST_METHOD'] if 'REQUEST_METHOD' in env else '',
'url':wsgiref.util.request_uri(env, include_query=True),
'data':None,
'dataPrint':None,
'dataParsed':None,
'dataStatus':None,
'args':args,
'servedBy':env['flaskJSONRPCServer_binded'] if 'flaskJSONRPCServer_binded' in env else (None, None, None)
}
# prepare client's ip
request['ip']=env['HTTP_X_REAL_IP'] if 'HTTP_X_REAL_IP' in env else request['remote_addr']
self._speedStatsAdd('prepRequestContext', getms()-mytime)
return request
def _copyRequestContext(self, request, typeOf=True):
"""
This method create shallow copy of <request> and skip some not-serializable keys.
:param dict request:
:return dict:
"""
mytime=getms()
typeOf=(IntType, FloatType, LongType, ComplexType, NoneType, UnicodeType, StringType, BooleanType, DictType, ListType)
requestCopy=request.copy()
requestCopy['environ']=dict((k, v) for k, v in request['environ'].iteritems() if isinstance(v, typeOf))
self._speedStatsAdd('copyRequestContext', getms()-mytime)
return requestCopy
def _toHttpStatus(self, code):
"""
Complete HTTP status from given <code>.
:param int code:
:return str:
"""
if code in httplib.responses:
return str(code)+' '+httplib.responses[code]
else:
self._throw('Unknow http code: %s'%code)
def _fromHttpStatus(self, status):
"""
Convert given HTTP status to code.
:param str status:
:return int:
"""
try:
return int(status[:3])
except:
self._throw('Unknow http status: %s'%status)
def simulateRequest(self, dispatcherName, args=None, path='/', headers=None, cookies=None, notify=False):
"""
This method simulate request to server and return result.
:param str dispatcherName:
:param list|dict args:
:param str path: Simulated api path.
:param dict headers: Simulated headers. Don't pass cookies here, they will not been processed.
:param dict cookies: Pass cookies here if needed.
:param bool notify: Is this a notify-request.
"""
#! симуляция не поддерживает постпроцессеры
if self._inChild():
self._throw('This method "%s()" can be called only from <main> process'%sys._getframe().f_code.co_name)
d={"jsonrpc": "2.0", "method": dispatcherName, "params": args or []}
if not notify: d['id']=1
request={
'path':path,
'fileName':'',
'headers':headers or {},
'cookies':cookies or {},
'environ':{
'wsgi.multiprocess': False,
'CONTENT_TYPE': 'application/json-rpc',
'wsgi.multithread': True,
'SERVER_SOFTWARE': 'FAKE_REQUEST',
'SCRIPT_NAME': '',
'wsgi.input': True,
'REQUEST_METHOD': 'POST',
'HTTP_HOST': 'localhost',
'PATH_INFO': path,
'SERVER_PROTOCOL': 'HTTP/1.1',
'QUERY_STRING': '',
'CONTENT_LENGTH': '9999', #! we don't know real length, but it needed for postprocessing
'wsgi.version': (1, 0),
'SERVER_NAME': 'FAKE_REQUEST',
'GATEWAY_INTERFACE': 'CGI/1.1',
'wsgi.run_once': False,
'wsgi.errors': None,
'REMOTE_ADDR': 'localhost',
'wsgi.url_scheme': 'fake',
'SERVER_PORT': '',
'REMOTE_HOST': 'localhost',
'HTTP_ACCEPT_ENCODING': 'identity'
},
'remote_addr':'localhost',
'method':'POST',
'url':'localhost'+path,
'args':{},
'servedBy':(None, None, None),
'ip':'localhost',
'data':[d],
'dataPrint':'', #! add correct
'fake':True
}
# add headers to environ
if headers:
for k, v in headers.iteritems():
request['environ']['HTTP_'+k.replace('-', '_').upper()]=v
# add cookies to environ
if cookies:
request['environ']['HTTP_COOKIE']=' '.join('%s=%s;'%(k, v) for k, v in cookies.iteritems())
# start processing request in thread
out={'status':None, 'headers':None, 'result':None}
def tFunc_start_response(status, headers):
out['status']=status
out['headers']=headers
def tFunc_wrapper(*args, **kwargs):
out['result']=self._requestHandler(*args, **kwargs)
self._thread(target=tFunc_wrapper, args=(request, tFunc_start_response), kwargs={'prepRequest':False, 'returnRaw':True}).join()
if out['status']=='200 OK' and 'result' in out['result']:
return out['result']['result'], out['headers']
elif 'error' in out['result']:
self._throw(out['result']['error']['message'])
else:
self._throw('%s: %s'%(out['status'], out['result']))
def _requestHandler(self, env, start_response, prepRequest=True, returnRaw=False):
"""
This method is callback, that will be called by WSGI or another backend for every request.
It implement error's handling of <server>._requestProcess() and some additional funtionality.
:param dict env:
:param func start_response:
:param bool prepRequest: If this True, environ will be prepared via <server>._prepRequestContext().
:return tuple: Response as iterator.
"""
if self._inChild():
self._throw('This method "%s()" can be called only from <main> process'%sys._getframe().f_code.co_name)
mytime=getms()
self.processingRequestCount+=1
self._gcStats['processedRequestCount']+=1
# calculate connections per second
nowMinute=int(time.time())/60
if nowMinute!=self.connPerMinute['nowMinute']:
# add to history
self.connPerMinute['history']['minute'].append(self.connPerMinute['nowMinute'])
self.connPerMinute['history']['count'].append(round(self.connPerMinute['count']/60.0, 2))
# replace old values
self.connPerMinute['nowMinute']=nowMinute
if self.connPerMinute['count']:
self.connPerMinute['oldCount']=self.connPerMinute['count']
if self.connPerMinute['count']>self.connPerMinute['maxCount']:
self.connPerMinute['maxCount']=self.connPerMinute['count']
if self.connPerMinute['count']<self.connPerMinute['minCount'] or not self.connPerMinute['minCount']:
self.connPerMinute['minCount']=self.connPerMinute['count']
self.connPerMinute['count']=0
self.connPerMinute['count']+=1
# DeepLocking
self._deepWait()
if self.__settings['blocking']: self._deepLock()
# processing request
try:
request=self._prepRequestContext(env) if prepRequest else env
status, headers, data, dataRaw=self._requestProcess(request)
except Exception:
e=getErrorInfo()
self._logger(1, 'ERROR processing request: %s'%e)
status, headers, data, dataRaw=(500, {}, e, None)
if self.__settings['postprocess']:
# call postprocessers
try:
pp=postprocess(self, self.__settings['postprocess'])
status, headers, dataRaw=pp(request, status, headers, data, dataRaw)
data=dataRaw if isString(dataRaw) else self._serializeJSON(dataRaw)
except Exception:
e=getErrorInfo()
self._logger(1, 'ERROR in postprocesser: %s'%e)
status, headers, dataRaw=(500, {}, e)
data=dataRaw
# convert status to http-status
if not isString(status): status=self._toHttpStatus(status)
# convert string-data to iterator
if isString(data): data=(data,)
# finalize
self.processingRequestCount-=1
self._logger(4, 'GENERATE_TIME:', round(getms()-mytime, 1))
self._speedStatsAdd('generateResponse', getms()-mytime)
if self.__settings['blocking']: self._deepUnlock()
if self.__settings['controlGC']: self._controlGC() # call GC manually
start_response(status, headers)
return dataRaw if returnRaw else data
def _requestProcess(self, request):
"""
This method implement all logic of proccessing requests.
:param dict request:
:return tuple(int(code), dict(headers), str(data), object(dataRaw)):
"""
if self._inChild():
self._throw('This method "%s()" can be called only from <main> process'%sys._getframe().f_code.co_name)
# process only (GET, POST, OPTIONS) requests
if request['method'] not in ('GET', 'POST', 'OPTIONS'):
self._logger(3, 'Unsupported method:', request['method'])
return (405, {}, 'Method Not Allowed', None)
# to local vars
isFake='fake' in request and request['fake']
request_method=request['method']
request_path=request['path']
request_fileName=request['fileName']
request_data=None
request_dataPrint=None
request_ip=request['ip']
request_url=request['url']
request_args=request['args']
# extract <method> for JSONP fallback
if request_method=='GET':
tArr=request_path[1:-1].split('/')
if not tArr[-1]:
self._throw('Incorrect path for GET request: %s'%request_path)
request_fileName=request['fileName']=tArr[-1]
request_path=request['path']=formatPath('/'.join(tArr[:-1]))
# don't process request with unknown path
if request_path not in self.routes:
if request_method!='GET':
# special checking of previous level, allows to migrate from REST
tArr=request_path[1:-1].split('/')
if tArr[-1]:
tmp_path=formatPath('/'.join(tArr[:-1]))
if tmp_path in self.routes:
self._logger(3, 'Unknown path (but known previous level):', request_path)
return (404, {}, 'Not found, but known previous level: %s'%(tmp_path), None)
self._logger(3, 'Unknown path:', request_path)
return (404, {}, 'Not found', None)
# start processing request
outHeaders={}
outCookies=[]
dataOut=[]
dataRaw={'rpcError':None, 'result':[]}
allowCompress=self.__settings['allowCompress']
mimeType=None
# get post data
if request_method=='POST':
request_data=self._loadPostData(request)
request_dataPrint=request['dataPrint']
self._logger(4, 'RAW_REQUEST:', request_url, request_method, request_dataPrint or request_data)
# AUTH
if self.__settings['auth'] and isFunction(self.__settings['auth']):
if self.__settings['auth'](self, request) is not True:
self._logger(3, 'Access denied:', request_url, request_method, request_dataPrint or request_data)
return (403, {}, 'Forbidden', None)
# CORS
if self.__settings['CORS']:
outHeaders['Access-Control-Allow-Headers']='Origin, Authorization, X-Requested-With, Content-Type, Accept'
outHeaders['Access-Control-Max-Age']='0'
if isDict(self.__settings['CORS']):
outHeaders['Access-Control-Allow-Origin']=self.__settings['CORS'].get('origin', '*')
outHeaders['Access-Control-Allow-Methods']=self.__settings['CORS'].get('methods', 'GET, POST, OPTIONS')
else:
outHeaders['Access-Control-Allow-Origin']='*'
outHeaders['Access-Control-Allow-Methods']='GET, POST, OPTIONS'
# Look at request's type
if request_method=='OPTIONS':
self._logger(4, 'REQUEST_TYPE == OPTIONS')
elif request_method=='POST': #JSONRPC
error=[]
out=[]
self._logger(4, 'REQUEST:', request_dataPrint or request_data)
if isFake:
status, dataInList=(True, request_data)
request_data='<fakeRequest>' #for generating uniqueId
else:
status, dataInList=self._parseRequest(request_data)
request['dataParsed']=dataInList
request['dataStatus']=status
if not status: #error of parsing
error={"code": -32700, "message": "Parse error"}
else:
procTime=getms()
for dataIn in dataInList:
# protect from freezes at long batch-requests
if self.__settings['antifreeze_batchMaxTime'] and getms()-procTime>=self.__settings['antifreeze_batchMaxTime']:
self._logger(3, 'ANTIFREEZE_PROTECTION', len(dataInList), getms()-procTime)
if self.__settings['antifreeze_batchBreak']: break
else: self._sleep(self.__settings['antifreeze_batchSleep'])
procTime=getms()
# try to process request
if not dataIn['jsonrpc'] or not dataIn['method'] or (dataIn['params'] and not(isDict(dataIn['params'])) and not(isArray(dataIn['params']))): #syntax error in request
error.append({"code": -32600, "message": "Invalid Request"})
elif dataIn['method'] not in self.routes[request_path]: #call of unknown method
error.append({"code": -32601, "message": "Method not found", "id":dataIn['id']})
else: #process correct request
# generate unique id
uniqueId='%s--%s--%s--%s--%s--%i--%i'%(dataIn['method'], dataIn['id'], request_ip, self._sha1(request_data), getms(), random.random()*sys.maxint, random.random()*sys.maxint)
# select dispatcher
dispatcher=self.routes[request_path][dataIn['method']]
#select backend for executing
if dataIn['id'] is None:
# notification request
execBackend=self.execBackend[dispatcher['notifBackendId']] if dispatcher['notifBackendId'] in self.execBackend else None
if execBackend and hasattr(execBackend, 'add'):
#! нужно поменять выходной формат для совместимости с light-backend
status, m, dataForBackend=execBackend.add(uniqueId, dataIn, request)
if not status:
self._logger(1, 'Error in notifBackend.add(): %s'%m)
else:
status, _, _=self._callDispatcher(uniqueId, dataIn, request)
else:
# simple request
execBackend=self.execBackend[dispatcher['dispatcherBackendId']] if dispatcher['dispatcherBackendId'] in self.execBackend else None
if execBackend:
if hasattr(execBackend, 'check'):
# full-backend with 'add' and 'check'
status, m, dataForBackend=execBackend.add(uniqueId, dataIn, request)
if not status:
self._logger(1, 'Error in dispatcherBackend.add(): %s'%m)
result='Error in dispatcherBackend.add(): %s'%m
else:
status, params, result=execBackend.check(uniqueId, dataForBackend)
else:
# light-backend with 'add' only
status, params, result=execBackend.add(uniqueId, dataIn, request)
if not status:
self._logger(1, 'Error in dispatcherBackend.add(): %s'%result)
result='Error in dispatcherBackend.add(): %s'%result
else:
status, params, result=self._callDispatcher(uniqueId, dataIn, request)
if status:
if self.__settings['magicVarForDispatcher'] in params:
# get additional headers and cookies
magicVar=dict(params[self.__settings['magicVarForDispatcher']])
if magicVar['headersOut']:
outHeaders.update(magicVar['headersOut'])
if magicVar['cookiesOut']:
outCookies+=magicVar['cookiesOut']
if self.__settings['allowCompress'] and magicVar['allowCompress'] is False:
allowCompress=False
elif self.__settings['allowCompress'] is False and magicVar['allowCompress']: allowCompress=True
mimeType=str(magicVar['mimeType']) #avoid unicode
out.append({"id":dataIn['id'], "data":result})
else:
error.append({"code": -32603, "message": result, "id":dataIn['id']})
# prepare output for response
if error: self._logger(4, 'ERRORS:', error)
self._logger(4, 'OUT:', out)
if len(error):
if isDict(error): #error of parsing
dataRaw['result'].append(error)
dataRaw['rpcError']=[error['code']]
dataOut=self._prepResponse(error, isError=True)
elif len(dataInList)>1: #error for batch request
dataRaw['result']+=error
dataRaw['rpcError']=[]
for d in error:
dataRaw['result'].append(d)
if d['code'] not in dataRaw['rpcError']:
dataRaw['rpcError'].append(d['code'])
dataOut.append(self._prepResponse(d, isError=True))
else: #error for simple request
dataRaw['result'].append(error[0])
dataOut=self._prepResponse(error[0], isError=True)
dataRaw['rpcError']=[error[0]['code']]
if len(out):
dataRaw['result']+=out
if len(dataInList)>1: #response for batch request
for d in out:
dataOut.append(self._prepResponse(d, isError=False))
else: #response for simple request
dataOut=self._prepResponse(out[0], isError=False)
if not isFake:
dataOut=self._serializeJSON(dataOut)
elif request_method=='GET': #JSONP fallback
out=None
jsonpCB='%s;'
self._logger(4, 'REQUEST:', request_fileName, request_args)
if request_fileName not in self.routes[request_path]: #call of unknown method
out={"error":{"code": -32601, "message": "Method not found"}}
elif not self.routes[request_path][request_fileName]['allowJSONP']: #fallback to JSONP denied
self._logger(2, 'JSONP_DENIED:', request_path, request_fileName)
return (403, {}, 'JSONP_DENIED', None)
else: #process correct request
# generate unique id
uniqueId='%s--%s--%s--%s--%i--%i'%(request_fileName, request_ip, self._sha1(self._serializeJSON(request_args)), getms(), random.random()*sys.maxint, random.random()*sys.maxint)
request['dataParsed']=[{'method':request_fileName, 'params':request_args, 'id':uniqueId}]
request['dataStatus']=True
_jsonpCB=request_args.pop('jsonp', False)
if _jsonpCB:
jsonpCB='%s(%%s);'%(_jsonpCB)
# <id> passed like for normal request
dataIn=request['dataParsed'][0]
# select dispatcher
dispatcher=self.routes[request_path][dataIn['method']]
# select backend for executing
execBackend=self.execBackend[dispatcher['dispatcherBackendId']] if dispatcher['dispatcherBackendId'] in self.execBackend else None
if execBackend:
if hasattr(execBackend, 'check'):
# full backend with 'add' and 'check'
status, m, dataForBackend=execBackend.add(uniqueId, dataIn, request, isJSONP=jsonpCB)
if not status:
self._logger(1, 'Error in dispatcherBackend.add(): %s'%m)
result='Error in dispatcherBackend.add(): %s'%m
else:
status, params, result=execBackend.check(uniqueId, dataForBackend)
else:
# light backend with 'add' only
status, params, result=execBackend.add(uniqueId, dataIn, request, isJSONP=jsonpCB)
if not status:
self._logger(1, 'Error in dispatcherBackend.add(): %s'%result)
result='Error in dispatcherBackend.add(): %s'%result
else:
status, params, result=self._callDispatcher(uniqueId, dataIn, request, isJSONP=jsonpCB)
if status:
if self.__settings['magicVarForDispatcher'] in params: #get additional headers and cookies
magicVar=dict(params[self.__settings['magicVarForDispatcher']])
if magicVar['headersOut']:
outHeaders.update(magicVar['headersOut'])
if magicVar['cookiesOut']:
outCookies+=magicVar['cookiesOut']
if self.__settings['allowCompress'] and magicVar['allowCompress'] is False: allowCompress=False
elif self.__settings['allowCompress'] is False and magicVar['allowCompress']: allowCompress=True
mimeType=str(magicVar['mimeType'])
jsonpCB=magicVar['jsonp']
out=result
else:
out={"error":{"code": -32603, "message": result}}
dataRaw['rpcError']=[-32603]
dataRaw['result'].append(out)
# prepare output for response
self._logger(4, 'OUT:', out)
out=self._serializeJSON(out)
dataOut=jsonpCB%(out)
self._logger(4, 'RESPONSE:', dataOut)
mimeType=mimeType or calcMimeType(request)
allowCompress=not isFake and allowCompress and len(dataOut)>=self.__settings['compressMinSize'] and 'gzip' in request['headers'].get('Accept-Encoding', '').lower()
dataRaw['mimeType']=mimeType
dataRaw['allowCompress']=allowCompress
dataRaw['isFake']=isFake
dataRaw['outHeaders']=outHeaders
dataRaw['outCookies']=outCookies
if isDict(outHeaders):
if 'Content-Type' not in outHeaders: outHeaders['Content-Type']=mimeType
outHeaders=outHeaders.items()
else: #for future use, now only dict supported
outHeaders.append(('Content-Type', mimeType))
if outCookies:
# set cookies
cookies=Cookie.SimpleCookie()
for c in outCookies:
if not isDict(c) or 'name' not in c or 'value' not in c: continue
cookies[c['name']]=c['value']
cookies[c['name']]['expires']=c.get('expires', 2147483647)
cookies[c['name']]['path']=c.get('domain', '*')
outHeaders.append(('Set-Cookie', cookies[c['name']].OutputString()))
if allowCompress:
# with compression
mytime=getms()
try:
outHeaders, dataOut=self._compressResponse(outHeaders, dataOut)
except Exception, e:
self._logger(1, "Can't compress response:", e)
self._logger(4, 'COMPRESSION TIME:', round(getms()-mytime, 1))
return (200, outHeaders, dataOut, dataRaw)
def serveForever(self, restartOn=False, sleep=10):
"""
This method is wrapper above <server>.start().
It implement logic, when this method block executing of source, placed below.
:Example:
server=flaskJSONRPCServer(["127.0.0.1", "8080"])
print "before serveForever"
server.serveForever()
print "after serveForever" # you never see this message, while server runned
:param bool restart:
:param int sleep:
"""
if self._inChild():
self._throw('This method "%s()" can be called only from <main> process'%sys._getframe().f_code.co_name)
if not restartOn and not self.__settings['controlGC']: self.start(joinLoop=True)
else:
if restartOn:
restartOn=restartOn if isArray(restartOn) else [restartOn]
self.start(joinLoop=False)
try:
while True:
self._sleep(sleep)
if self.__settings['controlGC']: self._controlGC() # call GC manually
if not restartOn: continue
for cb in restartOn:
if not cb: continue
elif isFunction(cb) and cb(self) is not True: continue
elif cb=='checkFileDescriptor' and not self._checkFileDescriptor(multiply=1.25): continue
self._logger(3, 'Restarting server..')
self.restart(joinLoop=False)
break
except KeyboardInterrupt: self.exited=True
def _startExecBackends(self):
"""
This merhod run all execute backends of server.
"""
if self._inChild():
self._throw('This method "%s()" can be called only from <main> process'%sys._getframe().f_code.co_name)
# start execBackends
for _id, bck in self.execBackend.iteritems():
if hasattr(bck, 'start'): bck.start(self)
def getWSGI(self):
"""
This method return WSGI-app of server.
"""
if self._inChild():
self._throw('This method "%s()" can be called only from <main> process'%sys._getframe().f_code.co_name)
# return self.flaskApp.wsgi_app
return self._requestHandler
def _registerServBackend(self, servBackend):
"""
This merhod register new serving backend in server, backend will be start when <server>.start() called.
:param str|obj servBackend: registered backend name or obj.
:return: unique identification.
"""
if self._inChild():
self._throw('This method "%s()" can be called only from <main> process'%sys._getframe().f_code.co_name)
# get _id of backend and prepare
_id=None
if isString(servBackend): #registered serv-backend
if servBackend not in servBackendCollection.servBackendMap:
self._throw('Unknown Serv-backend "%s"'%servBackend)
servBackend=servBackendCollection.servBackendMap[servBackend]()
_id=getattr(servBackend, '_id', None)
elif isDict(servBackend):
_id=servBackend.get('_id', None)
servBackend=magicDict(servBackend)
elif isInstance(servBackend):
_id=getattr(servBackend, '_id', None)
else:
self._throw('Unsupported Serv-backend type "%s"'%type(servBackend))
# try to use servBackend
if not _id:
self._throw('No "_id" in Serv-backend "%s"'%servBackend)
if not hasattr(servBackend, 'start'):
self._throw('No "start" method in Serv-backend "%s"'%servBackend)
self.servBackend=servBackend
return _id
def start(self, joinLoop=False):
"""
This method start all execute backends of server, then start serving backend (werkzeug or pywsgi for now).
If <joinLoop> is True, current thread join serving-thread.
:param bool joinLoop:
"""
if self._inChild():
self._throw('This method "%s()" can be called only from <main> process'%sys._getframe().f_code.co_name)
# freeze settings
self.__settings=dict(self.settings)
self.settings._MagicDictCold__freeze()
# start execBackends
self._startExecBackends()
if self.settings.allowCompress and not(self.settings.experimental and experimentalPack.use_moreAsync and '_compressGZIP' in experimentalPack.moreAsync_methods):
self._logger(2, 'Included compression is slow')
if self.settings.blocking:
self._logger(2, 'Server work in blocking mode')
if self.settings.gevent:
self._patchWithGevent()
# select serving backend
if not self.servBackend:
servBackend=self.settings.servBackend
if servBackend=='auto':
if self.settings.fakeListener: servBackend='fake'
elif self.settings.gevent: servBackend='pywsgi'
else: servBackend='wsgiex'
_id=self._registerServBackend(servBackend)
# checking compatibility
if self.settings.fakeListener and not getattr(self.servBackend, '_supportNoListener', False):
self._throw('Serving without listeners not supported with selected Serv-backend "%s"'%_id)
if self.settings.gevent and not getattr(self.servBackend, '_supportGevent', False):
self._throw('Using Gevent not supported with selected Serv-backend "%s"'%_id)
if not self.settings.gevent and not getattr(self.servBackend, '_supportNative', False):
self._throw('Working without Gevent not supported with selected Serv-backend "%s"'%_id)
if any(self.settings.socketPath) and not getattr(self.servBackend, '_supportRawSocket', False):
self._throw('Serving on raw-socket not supported with selected Serv-backend "%s"'%_id)
# run serving backend
self.started=True
if not self.settings.postprocess['byStatus']:
# for fast checking, if no any postprocess WSGI
self.__settings['postprocess']=None
self._logger(3, 'Server "%s" running with "%s" serv-backend..'%(self.name, _id))
self._server=[]
wsgi=self.getWSGI()
for i in xrange(len(self.settings.ip)):
# if passed only UDS path, create listener for it
if self.settings.socketPath[i] and not self.settings.socket[i]:
self.settings.socket[i]=self._initListenerUDS(self.settings.socketPath[i])
# select bindAdress
bindAdress=self.settings.socket[i] if self.settings.socket[i] else (self.settings.ip[i], self.settings.port[i])
# wrapping WSGI for detecting adress
if self.settings.socket[i]:
s=(True, self.settings.socketPath[i], self.settings.socket[i])
else:
s=(False, self.settings.ip[i], self.settings.port[i])
def wsgiWrapped(environ, start_response, __flaskJSONRPCServer_binded=s, __wsgi=wsgi):
environ['flaskJSONRPCServer_binded']=__flaskJSONRPCServer_binded
return __wsgi(environ, start_response)
# start
if getattr(self.servBackend, '_supportMultiple', False) and len(self.settings.ip)>1:
self.servBackend.startMultiple(bindAdress, wsgiWrapped, self, joinLoop=joinLoop, isLast=(i+1==len(self.settings.ip)))
else:
self.servBackend.start(bindAdress, wsgiWrapped, self, joinLoop=(joinLoop and i+1==len(self.settings.ip)))
def _stopExecBackends(self, timeout=20, processingDispatcherCountMax=0):
"""
This merhod stop all execute backends of server.
For more info see <server>._waitProcessingDispatchers().
:param int timeout:
:param int processingDispatcherCountMax:
"""
if self._inChild():
self._throw('This method "%s()" can be called only from <main> process'%sys._getframe().f_code.co_name)
mytime=getms()
for _id, bck in self.execBackend.iteritems():
if hasattr(bck, 'stop'):
bck.stop(self, timeout=timeout-(getms()-mytime)/1000.0, processingDispatcherCountMax=processingDispatcherCountMax)
def _waitProcessingDispatchers(self, timeout=20, processingDispatcherCountMax=0):
"""
This method try to wait (for <timeout> seconds), while currently runed dispatchers will be done.
If <processingDispatcherCountMax> is not 0, this check skip this nuber of runned dispatchers.
:param int timeout:
:param int processingDispatcherCountMax:
"""
if self._inChild():
self._throw('This method "%s()" can be called only from <main> process'%sys._getframe().f_code.co_name)
mytime=getms()
if processingDispatcherCountMax is not False:
while self.processingDispatcherCount>processingDispatcherCountMax:
if timeout and getms()-mytime>=timeout*1000:
self._logger(2, 'Warning: So long wait for completing dispatchers(%s)'%(self.processingDispatcherCount))
break
self._sleep(self.__settings['sleepTime_checkProcessingCount'])
def stop(self, timeout=20, processingDispatcherCountMax=0):
"""
This method stop all execute backends of server, then stop serving backend (werkzeug or gevent.pywsgi for now). For more info see <server>._waitProcessingDispatchers().
Dont call this method from dispatchers!
:param int timeout:
:param int processingDispatcherCountMax:
"""
if self._inChild():
self._throw('This method "%s()" can be called only from <main> process'%sys._getframe().f_code.co_name)
if not hasattr(self.servBackend, 'stop'):
self._throw('Stopping not provided by Serv-backend "%s"'%self.servBackend)
self._deepLock()
mytime=getms()
self._waitProcessingDispatchers(timeout=timeout, processingDispatcherCountMax=processingDispatcherCountMax)
# stop execBackends
self._stopExecBackends(timeout=timeout-(getms()-mytime)/1000.0, processingDispatcherCountMax=processingDispatcherCountMax)
withError=[]
# stop serving backend
for i, s in enumerate(self._server):
try:
self.servBackend.stop(s, i, self, timeout=timeout-(getms()-mytime)/1000.0)
except Exception, e: withError.append(e)
# un-freeze settings
self.__settings=self.settings
self.settings._magicDictCold__unfreeze()
self.started=False
# process happened errors
if withError:
self._deepUnlock()
self._throw('\n'.join(withError))
self._logger(3, 'SERVER STOPPED')
self._logger(0, 'INFO: Ignore all errors about "Unhandled exception in thread..."')
self.processingRequestCount=0
self._deepUnlock()
def restart(self, timeout=20, processingDispatcherCountMax=0, joinLoop=False):
"""
This method call <server>.stop() and then <server>.start().
For more info see <server>._waitProcessingDispatchers().
:param int timeout:
:param int processingDispatcherCountMax:
:param bool joinLoop:
"""
if self._inChild():
self._throw('This method "%s()" can be called only from <main> process'%sys._getframe().f_code.co_name)
self.stop(timeout=timeout, processingDispatcherCountMax=processingDispatcherCountMax)
self.start(joinLoop=joinLoop)
# def createSSLTunnel(self, port_https, port_http, sslCert='', sslKey='', stunnel_configPath='/home/sslCert/', stunnel_exec='stunnel4', stunnel_configSample=None, stunnel_sslAllow='all', stunnel_sslOptions='-NO_SSLv2 -NO_SSLv3', stunnel_logLevel=4, stunnel_logFile='/home/python/logs/stunnel_%s.log'):
# import atexit, subprocess
# print 'Creating tunnel (localhost:%s --> localhost:%s)..'%(port_https, port_http)
# configSample=self._fileGet(stunnel_configSample) if stunnel_configSample else """
# debug = %(logLevel)s
# output = /dev/null
# foreground = yes
# socket = l:TCP_NODELAY=1
# socket = r:TCP_NODELAY=1
# [myservice_%(name)s]
# sslVersion = %(sslAllow)s
# %(sslOptions)s
# cert = %(sslCert)s
# key = %(sslKey)s
# accept = %(portHttps)s
# connect = %(portHttp)s
# TIMEOUTclose = 10
# TIMEOUTbusy = 30
# TIMEOUTconnect = 10
# TIMEOUTidle = 10
# sessionCacheTimeout = 60
# """
# name=os.path.splitext(os.path.basename(sys.argv[0]))[0]
# stunnel_sslOptions='\n'.join(['options = '+s for s in stunnel_sslOptions.split(' ') if s])
# config={'name':name, 'logLevel':stunnel_logLevel, 'sslAllow':stunnel_sslAllow, 'sslOptions':stunnel_sslOptions, 'sslCert':sslCert, 'sslKey':sslKey, 'portHttps':port_https, 'portHttp':port_http}
# config=configSample%config
# configPath=stunnel_configPath+('stunnel_%s.conf'%name)
# logPath=stunnel_logFile%name
# self._fileWrite(configPath, config)
# stunnel=subprocess.Popen([stunnel_exec, configPath], stderr=open(logPath, "w"))
# self._sleep(1)
# if stunnel.poll(): #error
# s=self._fileGet(logPath)
# s='[!] '+strGet(s, '[!]', '')
# print '!!! ERROR creating tunnel\n', s
# return False
# def closeSSLTunnel():
# try: os.system('pkill -f "%s %s"'%(stunnel_exec, configPath))
# except: pass
# atexit.register(closeSSLTunnel)
# # def checkSSLTunnel():
# # badPatterns=['Connection rejected: too many clients']
# # while True:
# # self._sleep(3)
# # #! Здесь нужно проверять лог на наличие критических ошибок
# # stunnelLog=self._fileGet(logPath)
# # thread_checkSSLTunnel=threading.Thread(target=checkSSLTunnel).start()
# return stunnel
"""REQUEST-RESPONSE SAMPLES
--> {"jsonrpc": "2.0", "method": "subtract", "params": [42, 23], "id": 1}
<-- {"jsonrpc": "2.0", "result": 19, "id": 1}
--> {"jsonrpc": "2.0", "method": "subtract", "params": {"subtrahend": 23, "minuend": 42}, "id": 3}
<-- {"jsonrpc": "2.0", "result": 19, "id": 3}
--> {"jsonrpc": "2.0", "method": "update", "params": [1,2,3,4,5]}
--> {"jsonrpc": "2.0", "method": "foobar"}
--> {"jsonrpc": "2.0", "method": "foobar", "id": "1"}
<-- {"jsonrpc": "2.0", "error": {"code": -32601, "message": "Method not found"}, "id": "1"}
--> {"jsonrpc": "2.0", "method": "foobar, "params": "bar", "baz]
<-- {"jsonrpc": "2.0", "error": {"code": -32700, "message": "Parse error"}, "id": null}
--> {"jsonrpc": "2.0", "method": 1, "params": "bar"}
<-- {"jsonrpc": "2.0", "error": {"code": -32600, "message": "Invalid Request"}, "id": null}
--> [1,2,3]
<-- [
{"jsonrpc": "2.0", "error": {"code": -32600, "message": "Invalid Request"}, "id": null},
{"jsonrpc": "2.0", "error": {"code": -32600, "message": "Invalid Request"}, "id": null},
{"jsonrpc": "2.0", "error": {"code": -32600, "message": "Invalid Request"}, "id": null}
]
--> [
{"jsonrpc": "2.0", "method": "sum", "params": [1,2,4], "id": "1"},
{"jsonrpc": "2.0", "method"
]
<-- {"jsonrpc": "2.0", "error": {"code": -32700, "message": "Parse error"}, "id": null}
--> [
{"jsonrpc": "2.0", "method": "sum", "params": [1,2,4], "id": "1"},
{"jsonrpc": "2.0", "method": "notify_hello", "params": [7]},
{"jsonrpc": "2.0", "method": "subtract", "params": [42,23], "id": "2"},
{"foo": "boo"},
{"jsonrpc": "2.0", "method": "foo.get", "params": {"name": "myself"}, "id": "5"},
{"jsonrpc": "2.0", "method": "get_data", "id": "9"}
]
<-- [
{"jsonrpc": "2.0", "result": 7, "id": "1"},
{"jsonrpc": "2.0", "result": 19, "id": "2"},
{"jsonrpc": "2.0", "error": {"code": -32600, "message": "Invalid Request"}, "id": null},
{"jsonrpc": "2.0", "error": {"code": -32601, "message": "Method not found"}, "id": "5"},
{"jsonrpc": "2.0", "result": ["hello", 5], "id": "9"}
]
"""
# this code allows to profelling greenlet-switching time
"""
if environ.get('LOG_GREENLET_RUN_DURATIONS') and environ.get('LOG_GREENLET_RUN_DURATIONS').lower() == 'true':
import time, greenlet, gevent.hub, threading
MIN_DURATION_TO_LOG = float(environ.get('MIN_GREENLET_RUN_DURATION_TO_LOG', 0.5)) # seconds
def log_greenlet_run_duration(what, (origin, target)):
global _last_switch_time
then = _last_switch_time
now = _last_switch_time = time.time()
if then is not None:
blocking_time = now - then
if origin is not gevent.hub.get_hub() and blocking_time > MIN_DURATION_TO_LOG:
msg = "Greenlet ran for %.4f seconds (%s from %s %s to %s %s).\n"
msg = msg % (blocking_time, what, type(origin), id(origin), type(target), id(target))
print msg
greenlet.settrace(log_greenlet_run_duration)
""" | 47.307171 | 640 | 0.630111 |
a7d08810a4a8509de953ac470523cc2958616a35 | 3,375 | py | Python | ask-smapi-model/ask_smapi_model/v1/skill/nlu/evaluations/pagination_context.py | alexa-labs/alexa-apis-for-python | 52838be4f57ee1a2479402ea78b1247b56017942 | [
"Apache-2.0"
] | 90 | 2018-09-19T21:56:42.000Z | 2022-03-30T11:25:21.000Z | ask-smapi-model/ask_smapi_model/v1/skill/nlu/evaluations/pagination_context.py | ishitaojha/alexa-apis-for-python | a68f94b7a0e41f819595d6fe56e800403e8a4194 | [
"Apache-2.0"
] | 11 | 2018-09-23T12:16:48.000Z | 2021-06-10T19:49:45.000Z | ask-smapi-model/ask_smapi_model/v1/skill/nlu/evaluations/pagination_context.py | ishitaojha/alexa-apis-for-python | a68f94b7a0e41f819595d6fe56e800403e8a4194 | [
"Apache-2.0"
] | 28 | 2018-09-19T22:30:38.000Z | 2022-02-22T22:57:07.000Z | # coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union, Any
from datetime import datetime
class PaginationContext(object):
"""
:param next_token: opaque token returned if there are more results for the given inputs than `maxResults` from the request.
:type next_token: (optional) str
"""
deserialized_types = {
'next_token': 'str'
} # type: Dict
attribute_map = {
'next_token': 'nextToken'
} # type: Dict
supports_multiple_types = False
def __init__(self, next_token=None):
# type: (Optional[str]) -> None
"""
:param next_token: opaque token returned if there are more results for the given inputs than `maxResults` from the request.
:type next_token: (optional) str
"""
self.__discriminator_value = None # type: str
self.next_token = next_token
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, PaginationContext):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| 31.542056 | 141 | 0.586667 |
126d87a0ec738ce81406d6525efa08bf3e495896 | 261 | py | Python | Cycle_1/Week_2/Session_8/conditionals_exercise.py | htrismicristo/MisionTIC_2022 | 62837503ac33b3fb4e4d2d23bd7b8388bbecc02d | [
"MIT"
] | null | null | null | Cycle_1/Week_2/Session_8/conditionals_exercise.py | htrismicristo/MisionTIC_2022 | 62837503ac33b3fb4e4d2d23bd7b8388bbecc02d | [
"MIT"
] | null | null | null | Cycle_1/Week_2/Session_8/conditionals_exercise.py | htrismicristo/MisionTIC_2022 | 62837503ac33b3fb4e4d2d23bd7b8388bbecc02d | [
"MIT"
] | null | null | null |
# Definiendo variables
A = int(input())
B = int(input())
C = int(input())
D = int(input())
# Comparando variables
if B > C and D > A and (C+D) > (A+B) and D > 0 and C > 0 and (not A%2):
print('Valores aceptados')
else:
print('Valores no aceptados') | 21.75 | 71 | 0.597701 |
2b6ef8881db4fa004b46cba32ab9d6523281ce65 | 4,963 | py | Python | compiler/base/utils.py | ycyang0508/OpenRAM | 54c6043cb81c51f5f4a2f77e91145545ce0ed6d6 | [
"BSD-3-Clause"
] | 1 | 2022-02-17T22:12:46.000Z | 2022-02-17T22:12:46.000Z | compiler/base/utils.py | ycyang0508/OpenRAM | 54c6043cb81c51f5f4a2f77e91145545ce0ed6d6 | [
"BSD-3-Clause"
] | null | null | null | compiler/base/utils.py | ycyang0508/OpenRAM | 54c6043cb81c51f5f4a2f77e91145545ce0ed6d6 | [
"BSD-3-Clause"
] | null | null | null | # See LICENSE for licensing information.
#
# Copyright (c) 2016-2021 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
import os
import math
import gdsMill
import tech
import globals
import debug
from vector import vector
from pin_layout import pin_layout
OPTS = globals.OPTS
def ceil(decimal):
"""
Performs a ceiling function on the decimal place specified by the DRC grid.
"""
grid = tech.drc["grid"]
return math.ceil(decimal * 1 / grid) / (1 / grid)
def round_to_grid(number):
"""
Rounds an arbitrary number to the grid.
"""
grid = tech.drc["grid"]
# this gets the nearest integer value
number_grid = int(round(round((number / grid), 2), 0))
number_off = number_grid * grid
return number_off
def snap_to_grid(offset):
"""
Changes the coodrinate to match the grid settings
"""
return [round_to_grid(offset[0]),
round_to_grid(offset[1])]
def pin_center(boundary):
"""
This returns the center of a pin shape in the vlsiLayout border format.
"""
return [0.5 * (boundary[0] + boundary[2]),
0.5 * (boundary[1] + boundary[3])]
def auto_measure_libcell(pin_list, name, units, lpp):
"""
Open a GDS file and find the pins in pin_list as text on a given layer.
Return these as a set of properties including the cell width/height too.
"""
cell_gds = OPTS.openram_tech + "gds_lib/" + str(name) + ".gds"
cell_vlsi = _get_gds_reader(units, cell_gds)
# FIXME: This duplicates a lot of functionality of get_gds_size and
# get_gds_pins, it should probably just call those functions?
cell = {}
measure_result = cell_vlsi.getLayoutBorder(lpp[0])
if measure_result:
measure_result = cell_vlsi.measureSize(name)
[cell["width"], cell["height"]] = measure_result
for pin in pin_list:
(name, lpp, boundary) = cell_vlsi.getPinShapeByLabel(str(pin))
cell[str(pin)] = pin_center(boundary)
return cell
_GDS_READER_CACHE = {}
def _get_gds_reader(units, gds_filename):
gds_absname = os.path.realpath(gds_filename)
k = (units, gds_absname)
try:
return _GDS_READER_CACHE[k]
except KeyError:
debug.info(4, "Creating VLSI layout from {}".format(gds_absname))
cell_vlsi = gdsMill.VlsiLayout(units=units)
reader = gdsMill.Gds2reader(cell_vlsi)
reader.loadFromFile(gds_absname)
_GDS_READER_CACHE[k] = cell_vlsi
return cell_vlsi
_GDS_SIZE_CACHE = {}
def get_gds_size(name, gds_filename, units, lpp):
"""
Open a GDS file and return the size from either the
bounding box or a border layer.
"""
k = (name, os.path.realpath(gds_filename), units, lpp)
try:
return _GDS_SIZE_CACHE[k]
except KeyError:
cell_vlsi = _get_gds_reader(units, gds_filename)
measure_result = cell_vlsi.getLayoutBorder(lpp)
if not measure_result:
debug.info(2, "Layout border failed. Trying to measure size for {}".format(name))
measure_result = cell_vlsi.measureSize(name)
_GDS_SIZE_CACHE[k] = measure_result
# returns width,height
return measure_result
def get_libcell_size(name, units, lpp):
"""
Open a GDS file and return the library cell size from either the
bounding box or a border layer.
"""
cell_gds = OPTS.openram_tech + "gds_lib/" + str(name) + ".gds"
return(get_gds_size(name, cell_gds, units, lpp))
_GDS_PINS_CACHE = {}
def get_gds_pins(pin_names, name, gds_filename, units):
"""
Open a GDS file and find the pins in pin_names as text on a given layer.
Return these as a rectangle layer pair for each pin.
"""
k = (tuple(pin_names), name, os.path.realpath(gds_filename), units)
try:
return dict(_GDS_PINS_CACHE[k])
except KeyError:
cell_vlsi = _get_gds_reader(units, gds_filename)
cell = {}
for pin_name in pin_names:
cell[str(pin_name)] = []
pin_list = cell_vlsi.getPinShape(str(pin_name))
for pin_shape in pin_list:
(lpp, boundary) = pin_shape
rect = [vector(boundary[0], boundary[1]),
vector(boundary[2], boundary[3])]
# this is a list because other cells/designs
# may have must-connect pins
cell[str(pin_name)].append(pin_layout(pin_name, rect, lpp))
_GDS_PINS_CACHE[k] = cell
return dict(cell)
def get_libcell_pins(pin_list, name, units):
"""
Open a GDS file and find the pins in pin_list as text on a given layer.
Return these as a rectangle layer pair for each pin.
"""
cell_gds = OPTS.openram_tech + "gds_lib/" + str(name) + ".gds"
return(get_gds_pins(pin_list, name, cell_gds, units))
| 29.194118 | 93 | 0.656659 |
98d24c51a88e1470b4dde363bc1502e40c76b016 | 10,625 | py | Python | 3 calcGUI/CalcGUIModified.py | Blade24-byte/pythonteachingcode | 3fa7f7e5b459873ad6c0b921c0760d11e97db054 | [
"MIT"
] | null | null | null | 3 calcGUI/CalcGUIModified.py | Blade24-byte/pythonteachingcode | 3fa7f7e5b459873ad6c0b921c0760d11e97db054 | [
"MIT"
] | null | null | null | 3 calcGUI/CalcGUIModified.py | Blade24-byte/pythonteachingcode | 3fa7f7e5b459873ad6c0b921c0760d11e97db054 | [
"MIT"
] | 19 | 2019-05-31T19:29:18.000Z | 2020-07-20T05:23:56.000Z | '''
Modified calcGUI
This example helps show how the tkinter library works in Python.
Run it and press the 1 key on your keyboard. Why does "dominic" appear in the entry bar?
Why does one key have "DT" on it? When you click it, why does it make "Thomas" show in the entry bar?
Why is the font in the entry bar now fancy?
How would we add more buttons?
'''
from tkinter import *
from math import sqrt as sqr
class Application(Frame):
"""
An example of a calculator app developed using the
Tkinter GUI.
"""
def __init__(self, master):
"""
Initializes the frame.
:param master: root.Tk()
"""
Frame.__init__(self, master)
self.entry = Entry(master, width=32, font=("Brush Script MT",25))
self.entry.grid(row=0, column=0, columnspan=6, sticky="w")
self.entry.focus_set()
self.entry.configure(state="disabled", disabledbackground="white", disabledforeground="black")
self.create_widgets()
self.bind_buttons(master)
self.grid()
def add_chr(self, char, btn=None):
"""
Concatenates a character passed from a button press (or key type)
to a string.
:param char: string to add passed from a button
:param btn: button name to use if key is pressed (to flash)
:return: None
"""
self.entry.configure(state="normal")
self.flash(btn) # Flash a button correspond to keystroke
if self.entry.get() == "Invalid Input":
self.entry.delete(0,END)
self.entry.insert(END, char)
self.entry.configure(state="disabled")
def clear(self):
"""
Allows user to backspace their entry.
:return: None
"""
self.entry.configure(state="normal")
if self.entry.get() != "Invalid Input":
# Clears full entry when "Invalid Input"
text = self.entry.get()[:-1]
self.entry.delete(0,END)
self.entry.insert(0,text)
else:
self.entry.delete(0, END)
self.entry.configure(state="disabled")
def clear_all(self):
"""
Allows user to clear the full entry.
:return: None
"""
self.entry.configure(state="normal")
self.entry.delete(0, END)
self.entry.configure(state="disabled")
def calculate(self):
"""
Changes the operation symbols to their mathematical representation used in
the eval() method.
:return: None
"""
self.entry.configure(state="normal")
e = self.entry.get()
e = e.replace("√","sqr")
e = e.replace("×", "*")
e = e.replace("²", "**2")
e = e.replace("^", "**")
e = e.replace("÷", "/")
try:
ans = eval(e)
except Exception as ex:
self.entry.delete(0,END)
self.entry.insert(0, "Invalid Input")
else:
self.entry.delete(0,END)
if len(str(ans)) > 20: # Alleviates problem of large numbers
self.entry.insert(0, '{:.10e}'.format(ans))
else:
self.entry.insert(0, ans)
self.entry.configure(state="disabled")
def flash(self,btn):
"""
Flashes a corresponding button when key is pressed.
:param btn: button
:return: None
"""
if btn != None:
btn.config(bg="yellow")
if btn == self.c_bttn:
self.clear()
self.master.after(100, lambda: btn.config(bg="SystemButtonFace"))
elif btn == self.eq_bttn:
self.master.after(100, lambda: btn.config(bg="lightgrey"))
self.calculate()
elif btn == self.ac_bttn:
self.clear_all()
self.master.after(100, lambda: btn.config(bg="SystemButtonFace"))
else:
self.master.after(100, lambda: btn.config(bg="SystemButtonFace"))
else:
pass
def bind_buttons(self, master):
"""
Binds keys to their appropriate input
:param master: root.Tk()
:return: None
"""
master.bind("<Return>", lambda event, btn=self.eq_bttn: self.flash(btn))
master.bind("<BackSpace>", lambda event, btn=self.c_bttn: self.flash(btn))
master.bind("9", lambda event, char="9", btn=self.nine_bttn: self.add_chr(char, btn))
master.bind("8", lambda event, char="8", btn=self.eight_bttn: self.add_chr(char, btn))
master.bind("7", lambda event, char="7", btn=self.seven_bttn: self.add_chr(char, btn))
master.bind("6", lambda event, char="6", btn=self.six_bttn: self.add_chr(char, btn))
master.bind("5", lambda event, char="5", btn=self.five_bttn: self.add_chr(char, btn))
master.bind("4", lambda event, char="4", btn=self.four_bttn: self.add_chr(char, btn))
master.bind("3", lambda event, char="3", btn=self.three_bttn: self.add_chr(char, btn))
master.bind("2", lambda event, char="2", btn=self.two_bttn: self.add_chr(char, btn))
master.bind("1", lambda event, char="Dominic", btn=self.one_bttn: self.add_chr(char, btn))
master.bind("0", lambda event, char="0", btn=self.zero_bttn: self.add_chr(char, btn))
master.bind("*", lambda event, char="×", btn=self.mult_bttn: self.add_chr(char, btn))
master.bind("/", lambda event, char="÷", btn=self.div_bttn: self.add_chr(char, btn))
master.bind("^", lambda event, char="^", btn=self.sqr_bttn: self.add_chr(char, btn))
master.bind("%", lambda event, char="%", btn=self.mod_bttn: self.add_chr(char, btn))
master.bind(".", lambda event, char=".", btn=self.dec_bttn: self.add_chr(char, btn))
master.bind("-", lambda event, char="-", btn=self.sub_bttn: self.add_chr(char, btn))
master.bind("+", lambda event, char="+", btn=self.add_bttn: self.add_chr(char, btn))
master.bind("(", lambda event, char="(", btn=self.lpar_bttn: self.add_chr(char, btn))
master.bind(")", lambda event, char=")", btn=self.rpar_bttn: self.add_chr(char, btn))
master.bind("c", lambda event, btn=self.ac_bttn: self.flash(btn), self.clear_all)
def create_widgets(self):
"""
Creates the widgets to be used in the grid.
:return: None
"""
self.eq_bttn = Button(self, text="=", width=20, height=3, bg="Orange", command=lambda: self.calculate())
self.eq_bttn.grid(row=4, column=4, columnspan=2)
self.ac_bttn = Button(self, text='CE', width=9, height=3, bg='LightBlue', fg='red',command=lambda: self.clear_all())
self.ac_bttn.grid(row=1, column=4)
self.c_bttn = Button(self, text='←', width=9, height=3, bg='LightBlue', fg='red',command=lambda: self.clear())
self.c_bttn.grid(row=1, column=5 )
self.add_bttn = Button(self, text="+", width=9, height=3,bg='LightBlue', fg='red', command=lambda: self.add_chr('+'))
self.add_bttn.grid(row=4, column=3)
self.mult_bttn = Button(self, text="×", width=9, height=3,bg='LightBlue', fg='red', command=lambda: self.add_chr('×'))
self.mult_bttn.grid(row=2, column=3)
self.sub_bttn = Button(self, text="-", width=9, height=3, bg='LightBlue', fg='red',command=lambda: self.add_chr('-'))
self.sub_bttn.grid(row=3, column=3)
self.div_bttn = Button(self, text="÷", width=9, height=3,bg='LightBlue', fg='red', command=lambda: self.add_chr('/'))
self.div_bttn.grid(row=1, column=3)
self.mod_bttn = Button(self, text="%", width=9, height=3,bg='LightBlue', fg='red', command=lambda: self.add_chr('%'))
self.mod_bttn.grid(row=4, column=2)
self.seven_bttn = Button(self, text="7", width=9, height=3, bg='LightBlue', fg='red',command=lambda: self.add_chr("7"))
self.seven_bttn.grid(row=1, column=0)
self.eight_bttn = Button(self, text="8", width=9, height=3,bg='LightBlue', fg='red', command=lambda: self.add_chr(8))
self.eight_bttn.grid(row=1, column=1)
self.nine_bttn = Button(self, text="9", width=9, height=3,bg='LightBlue', fg='red', command=lambda: self.add_chr(9))
self.nine_bttn.grid(row=1, column=2)
self.four_bttn = Button(self, text="4", width=9, height=3,bg='LightBlue', fg='red', command=lambda: self.add_chr(4))
self.four_bttn.grid(row=2, column=0)
self.five_bttn = Button(self, text="5", width=9, height=3,bg='LightBlue', fg='red', command=lambda: self.add_chr(5))
self.five_bttn.grid(row=2, column=1)
self.six_bttn = Button(self, text="6", width=9, height=3,bg='LightBlue', fg='red', command=lambda: self.add_chr(6))
self.six_bttn.grid(row=2, column=2)
self.one_bttn = Button(self, text="1", width=9, height=3,bg='LightBlue', fg='red', command=lambda: self.add_chr(1))
self.one_bttn.grid(row=3, column=0)
self.two_bttn = Button(self, text="DT", width=9, height=3, bg='LightBlue', fg='red',command=lambda: self.add_chr("Thomas"))
self.two_bttn.grid(row=3, column=1)
self.three_bttn = Button(self, text="3", width=9, height=3,bg='LightBlue', fg='red', command=lambda: self.add_chr(3))
self.three_bttn.grid(row=3, column=2)
self.zero_bttn = Button(self, text="0", width=9, height=3,bg='LightBlue', fg='red', command=lambda: self.add_chr(0))
self.zero_bttn.grid(row=4, column=0)
self.dec_bttn = Button(self, text=".", width=9, height=3,bg='LightBlue', fg='red', command=lambda: self.add_chr('.'))
self.dec_bttn.grid(row=4, column=1)
self.lpar_bttn = Button(self, text="(", width=9, height=3,bg='LightBlue', fg='red', command=lambda: self.add_chr('('))
self.lpar_bttn.grid(row=2, column=4)
self.rpar_bttn = Button(self, text=")", width=9, height=3,bg='LightBlue', fg='red', command=lambda: self.add_chr(')'))
self.rpar_bttn.grid(row=2, column=5)
self.sq_bttn = Button(self, text="√", width=9, height=3,bg='LightBlue', fg='red', command=lambda: self.add_chr('√('))
self.sq_bttn.grid(row=3, column=4)
self.sqr_bttn = Button(self, text="^", width=9, height=3,bg='LightBlue', fg='red', command=lambda: self.add_chr('^'))
self.sqr_bttn.grid(row=3, column=5)
root = Tk()
root.geometry()
root.title("Modified GUI Calculator")
app = Application(root)
root.mainloop() | 46.39738 | 132 | 0.590306 |
4c69f920f6f9e3a07b6ee6417b6c86ee16615000 | 3,245 | py | Python | sexpr.py | lispparser/sexp-python | 588ba16c0e846486295e5e699f7c714d1391560a | [
"Zlib"
] | null | null | null | sexpr.py | lispparser/sexp-python | 588ba16c0e846486295e5e699f7c714d1391560a | [
"Zlib"
] | null | null | null | sexpr.py | lispparser/sexp-python | 588ba16c0e846486295e5e699f7c714d1391560a | [
"Zlib"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2014 Ingo Ruhnke <grumbel@gmail.com>
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
import re
import codecs
def parse(text):
stack = [[]]
state = 'list'
i = 0
line = 1
column = 0
while i < len(text):
c = text[i]
if c == '\n':
line += 1
column = 0
else:
column += 1
if state == 'list':
if c == '(':
stack.append([])
elif c == ')':
stack[-2].append(stack.pop())
elif c == "\"":
state = 'string'
atom = ""
elif c == ";":
state = 'comment'
elif c.isalpha():
state = 'symbol'
atom = c
elif c.isdigit():
state = 'number'
atom = c
elif c.isspace():
pass
else:
raise Exception("%d:%d: error: unexpected character: '%s'" % (line, column, c))
elif state == 'comment':
if c == '\n':
state = 'list'
else:
pass
elif state == 'string':
if c == "\\":
i += 1
atom += text[i]
elif c == "\"":
stack[-1].append(atom)
state = 'list'
else:
atom += c
elif state == 'number':
if not c.isdigit() or c != ".":
stack[-1].append(int(atom))
state = 'list'
i -= 1
else:
atom += c
elif state == 'symbol':
if c.isspace() or c == '(' or c == ')':
stack[-1].append(atom)
state = 'list'
i -= 1
else:
atom += c
# print c, stack
i += 1
if len(stack) == 1:
return stack[0]
else:
raise Exception("error: list not closed")
if __name__ == "__main__":
print "parsing..."
result = parse(r'(() ("bar" foo) ()) () bar ')
print "1.", result
print "2.", parse(""";;comment
("Hello World" 5 1 123) ("Hello" 123 123 "foobar") ;; comment""")
print "3.", parse(r'(8(8)8)')
print "4.", parse(r'')
print "5.", parse(r' ')
with codecs.open("white.stf", encoding='utf-8') as fin:
print "6.", parse(fin.read())
# EOF #
| 28.716814 | 95 | 0.48074 |
0fed2853f893c8c80ee2fdf83d522c6a9cc8fe30 | 298 | py | Python | Player.py | THEToilet/kamisama | 5a74819d2c768cf7cf44f184c26428d72bbcfe80 | [
"MIT"
] | null | null | null | Player.py | THEToilet/kamisama | 5a74819d2c768cf7cf44f184c26428d72bbcfe80 | [
"MIT"
] | null | null | null | Player.py | THEToilet/kamisama | 5a74819d2c768cf7cf44f184c26428d72bbcfe80 | [
"MIT"
] | null | null | null | import Vector2 as vec2
class pc:
def __init__(self):
self.pos = vec2.Vec2(10, 95)
self.vec = 0
self.direction = 0
self.is_floating = False
self.vy = 0
def update(self, x, y, dx):
self.pos.x += x
self.pos.y += y
self.vec = dx | 21.285714 | 36 | 0.513423 |
e1b59f55eebe53b762d0b76374429e354e926afa | 3,445 | py | Python | backend/tests/test_entry.py | acifani/reviso-timetracking | f7ee89fb8ea6e39960af8c072633c7cf73c849e6 | [
"MIT"
] | null | null | null | backend/tests/test_entry.py | acifani/reviso-timetracking | f7ee89fb8ea6e39960af8c072633c7cf73c849e6 | [
"MIT"
] | null | null | null | backend/tests/test_entry.py | acifani/reviso-timetracking | f7ee89fb8ea6e39960af8c072633c7cf73c849e6 | [
"MIT"
] | null | null | null | import unittest
import json
from api import create_app, db
BASE_URL = '/api/v0.1/entries'
GOOD_ENTRY = {
'id': 1,
'customer': 'good_customer',
'hourly_rate': 50,
'length': 60
}
BAD_ENTRY = {
'id': 1,
'customer': 'bad_customer',
'hourly_rate': 'twenty',
'length': 60
}
class EntryTestCase(unittest.TestCase):
"""Test cases for Entry api resource"""
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
self.client = self.app.test_client()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_get_all(self):
self.client.post(BASE_URL, data=GOOD_ENTRY)
rv = self.client.get(BASE_URL)
json_rv = json.loads(rv.data.decode('utf-8'))
self.assertEqual(rv.status_code, 200)
self.assertEqual(len(json_rv), 1)
self.assertEqual(GOOD_ENTRY, json_rv[0])
def test_get_one(self):
self.client.post(BASE_URL, data=GOOD_ENTRY)
rv = self.client.get(BASE_URL+'/1')
json_rv = json.loads(rv.data.decode('utf-8'))
self.assertEqual(rv.status_code, 200)
self.assertEqual(GOOD_ENTRY, json_rv)
def test_get_all_is_empty(self):
rv = self.client.get(BASE_URL)
json_rv = json.loads(rv.data.decode('utf-8'))
self.assertEqual(rv.status_code, 200)
self.assertEqual(json_rv, [])
def test_get_one_not_exist(self):
rv = self.client.get(BASE_URL+'/1')
self.assertEqual(rv.status_code, 404)
def test_post(self):
rv = self.client.post(BASE_URL, data=GOOD_ENTRY)
json_rv = json.loads(rv.data.decode('utf-8'))
self.assertEqual(rv.status_code, 201)
self.assertEqual(GOOD_ENTRY, json_rv)
def test_post_invalid_params(self):
rv = self.client.post(BASE_URL, data=BAD_ENTRY)
self.assertEqual(rv.status_code, 400)
def test_put(self):
rv = self.client.post(BASE_URL, data=GOOD_ENTRY)
entry = json.loads(rv.data.decode('utf-8'))
entry['customer'] = 'modified_customer'
rv = self.client.put(BASE_URL+'/1', data=entry)
json_rv = json.loads(rv.data.decode('utf-8'))
self.assertEqual(rv.status_code, 201)
self.assertEqual(entry['customer'], json_rv['customer'])
def test_put_invalid_params(self):
self.client.post(BASE_URL, data=GOOD_ENTRY)
rv = self.client.put(BASE_URL+'/1', data=BAD_ENTRY)
self.assertEqual(rv.status_code, 400)
def test_put_not_exist(self):
rv = self.client.post(BASE_URL)
def test_delete(self):
self.client.post(BASE_URL, data=GOOD_ENTRY)
rv = self.client.delete(BASE_URL+'/1')
self.assertEqual(rv.status_code, 204)
def test_delete_not_exist(self):
rv = self.client.delete(BASE_URL + '/1')
self.assertEqual(rv.status_code, 404)
def test_overview(self):
self.client.post(BASE_URL, data=GOOD_ENTRY)
rv = self.client.get(BASE_URL+'/overview')
json_rv = json.loads(rv.data.decode('utf-8'))
self.assertEqual(rv.status_code, 200)
self.assertEqual(json_rv[0]['customer'], GOOD_ENTRY['customer'])
self.assertEqual(json_rv[0]['total_length'], GOOD_ENTRY['length'])
self.assertEqual(json_rv[0]['total_due'], GOOD_ENTRY['hourly_rate'])
| 33.125 | 76 | 0.640929 |
b297bf104c1010ccfd8dfd09abd42f129775093f | 23,409 | py | Python | 3.py | znuxor/adventofcode2016 | 43c1da30cf3b0bca0d9daeddd5692ce2787b544a | [
"BSD-3-Clause"
] | null | null | null | 3.py | znuxor/adventofcode2016 | 43c1da30cf3b0bca0d9daeddd5692ce2787b544a | [
"BSD-3-Clause"
] | null | null | null | 3.py | znuxor/adventofcode2016 | 43c1da30cf3b0bca0d9daeddd5692ce2787b544a | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
theInput = """785 516 744
272 511 358
801 791 693
572 150 74
644 534 138
191 396 196
860 92 399
233 321 823
720 333 570
308 427 572
246 206 66
156 261 595
336 810 505
810 210 938
615 987 820
117 22 519
412 990 256
405 996 423
55 366 418
290 402 810
313 608 755
740 421 321
255 322 582
990 174 658
609 818 360
565 831 87
146 94 313
895 439 866
673 3 211
517 439 733
281 651 582
601 711 257
467 262 375
33 52 584
281 418 395
278 438 917
397 413 991
495 306 757
232 542 800
686 574 729
101 642 506
785 898 932
975 924 106
889 792 114
287 901 144
586 399 529
619 307 456
287 508 88
159 175 190
195 261 148
348 195 270
905 600 686
847 396 680
59 421 879
969 343 600
969 361 585
95 115 209
512 831 395
172 774 662
372 396 290
957 281 445
745 525 297
489 630 225
81 138 18
694 114 404
764 196 383
607 861 94
896 92 140
786 862 123
389 449 298
795 339 780
863 507 892
589 850 759
273 645 371
368 884 486
637 553 423
391 630 950
442 950 581
383 650 712
538 844 405
353 261 544
682 60 336
750 308 698
177 369 643
479 919 137
482 598 184
275 726 55
139 874 850
456 195 839
385 766 205
561 751 249
397 764 714
508 856 876
478 410 12
686 230 267
876 247 272
160 436 673
466 798 278
487 839 773
754 780 900
45 983 801
800 595 188
523 408 239
269 609 216
745 692 237
15 588 840
702 583 298
707 150 859
835 750 375
211 754 368
892 434 152
521 659 592
683 573 904
902 544 412
718 218 502
379 227 292
482 87 780
903 433 382
223 196 369
824 588 734
342 396 279
164 561 918
409 841 918
893 409 204
33 435 169
858 423 74
134 797 255
517 881 109
466 373 193
379 180 973
620 467 941
260 512 298
993 461 89
111 986 990
946 668 987
26 65 110
223 55 372
235 103 473
288 244 964
343 199 25
62 213 984
602 117 311
624 142 356
65 130 248
709 95 376
316 897 723
420 840 349
159 460 208
385 445 929
408 13 791
149 92 682
791 253 440
870 196 395
651 347 49
738 362 536
392 226 485
683 642 938
332 890 393
954 394 971
279 217 309
610 429 747
588 219 959
840 565 791
671 624 380
384 426 485
407 323 226
780 290 428
539 41 571
455 267 306
48 607 250
432 567 400
851 507 477
853 456 923
615 416 838
245 496 353
253 325 926
159 716 989
488 216 473
808 222 742
395 178 798
514 383 732
478 845 728
508 486 4
230 643 35
151 298 584
123 906 576
583 682 294
580 605 784
624 517 984
911 778 745
9 897 325
913 357 501
27 221 249
798 669 614
824 777 397
749 461 304
734 769 1
447 543 306
454 200 19
551 134 674
562 329 665
352 188 281
808 151 622
834 255 648
352 199 340
429 182 121
585 223 382
524 977 225
520 156 532
827 929 419
429 175 759
284 376 877
312 548 751
571 507 529
390 503 483
710 1 146
938 421 582
975 981 186
118 771 531
328 490 638
452 743 750
511 772 242
957 850 177
669 750 665
975 296 664
228 35 159
763 347 650
752 315 557
366 530 294
828 154 645
730 388 763
744 298 774
459 508 375
449 485 748
537 819 907
526 259 551
773 890 650
523 839 473
645 928 485
333 109 115
403 952 399
229 50 606
377 900 212
693 731 399
682 103 579
441 764 471
481 114 267
196 567 591
353 495 798
436 348 30
794 88 526
926 411 524
1 862 754
839 440 848
839 458 109
961 799 930
944 692 853
168 520 788
579 920 687
32 930 283
575 759 747
857 705 926
842 674 925
233 163 29
544 409 719
266 643 767
315 323 56
754 135 658
99 757 569
818 832 207
296 602 519
316 371 301
409 879 747
765 696 151
960 836 689
526 564 790
33 954 343
548 203 379
545 797 622
550 122 105
606 538 12
686 434 102
595 820 249
642 215 221
120 703 124
972 440 214
444 544 447
963 225 373
904 628 271
733 109 374
193 673 588
446 724 945
246 771 901
389 900 339
331 323 756
245 428 969
565 457 539
977 743 742
26 199 543
960 804 405
795 914 721
454 695 816
984 422 849
437 495 803
237 106 58
221 442 834
638 278 21
697 880 830
818 953 849
276 335 944
152 650 953
232 972 23
675 991 179
741 579 408
164 741 285
682 156 113
71 607 759
740 692 644
284 229 308
681 114 133
961 232 394
214 653 533
240 863 332
115 651 664
396 356 477
308 220 134
283 505 569
286 400 234
413 830 734
534 877 619
293 562 171
862 216 186
819 427 63
491 121 321
139 108 142
438 39 219
345 120 486
367 91 482
400 61 605
780 858 434
854 188 478
141 726 62
600 904 292
312 328 103
648 896 200
304 299 382
372 325 229
625 114 513
95 742 875
432 99 818
510 731 863
353 520 495
501 335 400
411 187 358
612 274 381
658 586 774
908 858 876
162 722 881
604 277 772
677 484 369
964 772 239
973 618 388
463 799 264
262 49 691
800 816 875
827 820 394
828 682 576
571 670 724
322 910 202
12 72 856
529 771 829
520 830 38
796 154 681
662 160 750
193 314 633
772 925 453
769 769 427
318 182 338
552 366 505
82 205 468
486 218 352
542 633 640
612 625 879
69 715 867
233 571 479
818 703 639
866 989 856
285 504 265
981 758 773
920 716 904
698 390 977
336 1 838
563 391 169
692 87 692
17 75 754
691 100 143
605 754 711
844 724 864
261 457 167
640 655 371
554 294 874
777 541 528
902 595 406
774 309 254
322 721 257
638 883 617
278 793 525
779 669 120
144 539 722
106 533 242
187 925 743
221 863 490
284 899 481
186 82 103
102 143 562
306 494 540
352 574 239
885 218 247
551 750 123
859 634 206
391 513 363
361 608 410
390 303 93
353 111 592
472 450 724
395 507 621
494 19 266
184 416 881
330 402 821
999 82 370
613 165 722
572 141 978
361 202 671
975 376 474
878 445 216
925 529 713
499 522 338
891 315 749
712 539 290
382 388 479
806 394 342
273 56 594
213 3 226
359 52 693
637 612 601
792 336 253
223 380 699
189 101 265
812 297 699
635 255 739
885 653 957
165 873 646
883 444 400
982 789 89
6 922 192
990 310 109
159 595 656
884 640 514
876 44 671
288 569 864
108 255 977
237 819 178
417 923 144
231 444 375
452 951 241
947 724 475
569 243 481
646 678 7
282 474 921
830 520 36
961 461 957
333 955 876
359 778 909
128 276 70
914 961 185
606 942 453
373 323 614
270 170 447
745 480 454
499 649 95
468 127 922
436 722 121
202 773 971
307 127 21
11 122 90
305 54 93
266 543 113
931 735 706
931 480 683
306 433 158
155 35 379
343 401 321
880 477 516
226 996 282
778 531 528
722 313 162
975 489 594
406 312 635
106 191 147
180 731 20
249 869 140
336 359 426
266 580 403
569 702 587
740 913 549
197 372 292
585 964 683
340 532 249
592 588 910
280 78 824
675 892 101
642 718 222
393 359 157
714 442 999
851 425 954
487 545 408
504 759 191
509 179 626
774 859 455
335 476 523
573 622 288
518 561 504
812 100 602
433 455 676
565 453 112
282 266 523
642 508 440
558 512 102
109 685 128
291 903 221
254 370 275
300 398 431
341 809 383
622 948 79
813 961 308
972 451 601
390 877 719
988 448 275
184 229 542
902 307 761
587 575 909
442 648 331
424 98 620
512 106 578
411 219 614
577 294 104
81 916 468
84 842 287
96 261 678
34 323 226
943 321 29
906 619 258
924 503 215
929 149 431
56 505 511
876 769 999
994 714 980
416 495 355
79 265 420
37 917 286
53 782 558
868 327 59
926 27 398
704 348 370
773 909 356
969 799 551
282 138 448
808 51 437
417 277 372
806 291 537
818 510 460
945 372 38
127 191 422
100 287 753
341 510 391
317 252 884
629 201 567
164 10 560
632 205 370
353 891 990
609 391 12
889 564 990
74 820 241
356 636 389
309 232 292
654 294 199
45 226 362
645 308 329
955 891 186
180 78 115
842 938 141
141 179 159
401 227 573
372 73 681
562 216 682
184 526 998
530 450 357
296 812 233
398 287 530
613 539 372
523 719 554
377 735 429
854 319 362
445 828 221
506 485 402
519 603 250
490 421 819
638 204 983
664 585 407
434 503 124
512 551 153
135 449 30
673 10 513
682 45 265
32 44 498
168 415 698
151 821 711
179 682 145
800 471 326
376 893 698
885 523 390
992 49 159
949 8 59
83 47 107
871 46 660
610 954 892
352 956 637
12 139 444
517 748 733
502 731 354
368 754 687
197 759 584
292 25 928
197 319 514
359 824 99
458 827 546
681 543 197
160 492 603
634 82 455
456 96 53
399 94 836
702 2 814
614 422 467
161 290 252
506 605 591
8 454 407
46 544 489
42 491 477
772 602 767
359 465 769
970 360 114
959 552 83
945 581 284
26 314 286
153 708 707
444 681 830
400 65 430
22 993 185
327 525 125
321 665 106
538 632 959
552 220 966
17 787 5
561 309 865
997 652 785
678 924 297
772 290 460
322 347 473
811 393 92
283 398 625
349 50 528
385 403 544
404 671 204
430 214 286
798 480 219
430 440 811
240 249 442
223 510 411
590 18 592
468 166 556
542 165 708
93 12 480
893 355 601
822 348 850
431 606 256
367 819 690
188 247 644
766 199 514
384 469 416
412 520 459
261 326 646
746 533 31
972 788 664
465 548 470
257 371 412
633 703 817
525 26 466
6 667 539
532 692 356
891 468 602
709 24 599
275 449 2
674 471 289
683 549 57
177 917 270
954 311 715
991 921 707
115 946 6
745 615 446
646 288 148
725 333 588
933 915 326
828 947 286
350 59 117
598 98 286
436 127 91
461 223 198
334 167 679
506 86 803
254 237 989
878 248 371
416 757 398
721 841 757
761 303 973
24 76 928
749 280 886
194 695 42
134 261 752
134 557 727
345 367 861
380 87 425
685 424 723
17 738 451
902 886 569
920 272 125
239 222 797
361 951 767
273 835 197
696 235 427
247 212 922
706 389 739
480 893 290
877 177 494
450 864 281
392 164 313
799 233 293
416 168 35
860 290 4
989 284 124
710 88 120
431 307 526
515 417 528
442 400 566
108 858 371
47 472 519
147 627 386
644 481 315
168 838 337
675 409 29
130 117 449
959 401 512
963 416 667
729 166 375
843 452 322
749 325 88
978 850 511
91 789 818
993 552 510
741 512 45
836 644 865
136 851 903
711 818 984
933 760 333
461 66 945
285 198 321
726 577 317
952 421 2
278 961 835
995 134 148
805 999 760
542 731 575
657 754 721
135 43 343
755 179 318
372 24 646
577 194 595
277 7 440
530 48 416
257 54 634
772 302 492
789 397 21
532 270 499
145 511 583
600 286 402
628 449 621
577 588 199
485 965 239
765 760 422
709 284 676
962 672 786
760 716 362
511 254 53
626 96 383
488 316 340
19 256 733
680 798 260
693 578 908
810 216 783
485 703 650
965 741 152
44 544 334
880 702 451
887 581 132
476 77 741
661 24 435
858 68 607
943 416 836
936 334 662
5 397 348
452 838 182
801 89 369
781 853 284
969 23 717
482 493 611
560 483 394
221 642 492
641 393 428
491 752 98
710 791 437
615 198 656
146 646 943
218 385 132
934 209 589
863 299 513
941 624 167
648 514 553
724 157 441
389 733 241
236 109 421
607 816 536
363 877 317
508 493 332
782 929 79
535 607 463
877 32 399
637 626 172
511 865 972
560 916 928
599 325 80
809 477 224
724 60 279
524 454 262
960 517 994
216 42 880
969 487 190
977 329 652
916 539 696
271 581 76
660 74 681
768 761 323
108 821 440
224 478 560
373 567 614
417 716 566
178 155 529
994 670 562
987 621 375
161 498 922
527 843 478
495 975 788
528 11 567
713 744 575
268 746 35
802 53 869
789 717 381
437 703 871
177 220 104
638 684 79
807 535 71
525 978 321
576 696 351
928 572 83
414 437 25
75 371 320
338 89 327
376 90 239
363 330 126
12 260 210
284 21 356
403 54 748
551 49 530
530 461 249
640 450 399
153 754 393
548 774 958
602 773 906
417 11 377
188 879 740
486 105 649
426 929 107
848 677 563
913 728 646
700 116 390
148 425 782
564 335 839
584 652 155
707 887 518
489 250 857
979 726 399
113 305 420
402 396 742
479 99 950
753 425 677
88 533 246
804 138 554
76 734 294
472 550 372
415 621 525
76 617 903
821 145 901
876 539 35
91 745 637
871 604 106
811 466 729
694 153 573
100 735 306
660 640 817
927 55 814
852 30 289
741 33 898
193 57 636
260 208 711
172 215 152
790 262 520
92 511 437
726 622 89
709 848 318
269 960 557
940 814 793
286 59 993
529 6 870
415 58 850
578 13 524
261 258 423
695 247 290
512 229 270
485 271 272
118 461 3
757 679 808
830 886 324
913 315 870
414 229 764
386 567 738
32 657 59
336 169 14
821 494 667
815 606 674
20 654 529
482 674 49
476 321 512
661 466 229
869 974 565
205 686 438
466 218 494
567 519 761
257 658 648
546 491 467
102 526 542
542 949 126
608 999 976
867 666 798
421 801 941
825 589 335
871 93 179
491 670 303
464 256 249
318 650 322
168 807 391
513 5 179
770 8 127
960 9 82
561 661 885
176 670 865
468 382 20
811 732 457
709 856 356
713 378 649
306 510 409
963 269 649
988 749 782
208 173 181
679 734 178
884 870 45
763 290 80
228 495 689
736 653 771
325 948 972
985 132 914
770 859 360
382 859 755
781 866 681
922 20 119
628 584 547
584 262 320
62 407 277
831 531 304
979 31 842
194 538 646
77 61 758
245 247 620
175 298 876
315 121 893
185 404 558
222 359 367
901 873 23
109 560 553
819 848 567
509 184 809
188 194 46
405 255 773
333 734 547
283 750 154
115 220 406
551 373 358
851 505 478
961 847 160
661 295 417
489 136 814
192 307 866
976 763 437
255 964 24
786 900 454
727 560 520
814 169 504
882 573 524
550 409 236
567 647 258
155 576 474
508 455 921
718 197 9
331 356 917
344 78 748
204 6 937
187 83 648
138 81 913
314 972 914
286 971 4
677 344 702
326 452 163
407 131 576
560 351 137
701 839 354
475 503 263
606 504 651
919 601 112
709 224 732
714 184 103
261 554 192
766 381 290
388 784 853
447 869 923
504 124 571
923 643 251
323 679 152
847 477 171
796 368 649
80 716 799
771 677 294
270 364 957
253 591 959
17 756 22
121 466 617
401 838 752
350 604 913
393 811 828
646 949 940
328 230 516
794 443 695
136 429 579
657 140 613
803 177 821
829 564 440
560 469 853
961 693 979
382 661 84
630 180 995
626 353 575
616 502 687
264 223 764
64 507 569
575 427 662
619 807 506
663 203 959
978 775 783
317 749 481
3 581 875
320 828 793
317 838 107
671 603 282
524 581 326
619 728 57
91 937 198
182 353 260
226 759 244
140 153 149
387 732 239
427 761 138
339 447 421
278 439 647
82 135 839
824 513 865
117 310 825
838 670 58
183 82 130
212 209 749
118 151 861
978 275 262
273 747 689
916 739 878
689 270 339
358 268 750
966 97 753
161 685 813
174 396 866
70 861 132
866 117 790
737 201 723
209 85 468
821 948 557
182 374 327
912 671 412
444 592 746
567 613 415
561 75 393
631 428 740
976 362 326
504 171 911
753 886 430
738 680 494
839 371 481
979 537 330
333 886 216
669 357 476
107 186 484
302 327 78
400 231 541
159 873 75
744 684 46
592 363 80
944 670 496
811 292 699
545 959 949
299 552 632
683 94 14
418 603 646
370 781 758
364 236 619
107 837 860
106 409 344
492 713 36
398 460 375
730 569 497
733 409 499
577 349 19
652 182 824
768 822 363
207 862 535
911 344 372
868 814 640
68 792 781
674 787 205
182 852 241
725 665 43
187 852 838
615 856 418
632 277 593
654 386 27
805 801 218
328 416 226
76 206 209
81 209 660
31 231 523
569 910 110
815 106 675
739 830 604
534 724 869
379 460 782
549 270 934
324 105 218
841 218 205
739 259 232
572 504 356
66 459 486
504 66 344
873 117 119
261 245 916
621 157 915
220 648 409
630 192 549
440 773 415
816 468 543
475 374 845
446 219 487
999 434 835
304 444 775
698 203 348
715 544 424
206 628 403
760 782 86
651 599 486
973 404 562
614 229 172
396 460 782
434 339 349
88 790 818
925 685 952
922 381 967
723 870 704
94 145 400
308 686 530
288 716 629
867 678 982
554 414 584
942 429 931
608 828 977
599 663 620
867 330 419
200 740 588
225 213 673
146 675 372
302 792 589
299 948 809
16 942 797
262 796 418
591 828 555
532 403 619
694 289 960
801 532 203
918 746 870
127 617 829
350 179 938
326 510 128
432 714 226
948 786 102
866 664 162
302 115 584
714 623 211
829 582 543
173 321 260
47 284 919
133 35 880
614 25 827
768 490 998
825 502 252
275 750 219
716 140 453
758 864 541
563 352 768
197 800 911
670 540 302
307 237 726
76 667 665
322 617 207
118 298 820
283 548 228
381 502 797
990 491 579
250 474 670
784 55 283
729 933 464
255 765 347
807 818 198
594 601 446
374 725 121
591 760 424
480 456 809
974 408 234
876 153 811
540 263 238
535 68 556
21 293 527
613 39 765
761 255 406
596 279 414
772 451 527
258 554 169
958 697 445
127 9 107
607 445 305
695 435 396
487 224 873
671 199 792
739 37 85
859 744 284
947 299 230
755 817 226
827 207 658
882 709 567
303 509 790
73 262 270
917 112 21
949 277 281
559 557 918
668 875 906
308 669 543
479 563 879
311 317 834
534 751 50
275 774 278
200 642 690
293 196 466
780 804 135
866 162 122
916 783 58
631 477 70
878 375 67
425 621 4
826 161 926
147 884 139
717 936 799
140 703 405
284 168 89
144 738 315
418 417 564
439 357 820
73 113 702
163 550 647
144 780 984
34 592 770
696 167 452
666 541 973
314 622 567
986 92 636
301 171 1
812 146 637
673 395 895
583 283 510
380 482 907
953 189 148
513 372 455
923 505 387
525 45 877
630 816 797
119 776 276
540 139 396
560 62 596
502 97 876
431 977 533
867 782 484
844 409 190
46 63 700
102 972 421
110 987 312
58 543 365
657 248 64
613 658 340
605 875 408
746 653 401
898 980 5
449 371 108
496 690 91
672 657 184
816 48 744
121 109 689
849 88 201
982 268 418
569 193 589
630 267 676
690 453 47
496 369 792
677 412 833
95 316 802
957 774 647
966 842 861
233 737 194
260 605 424
266 274 310
874 365 762
411 87 704
477 356 739
554 598 454
107 540 64
641 631 470
444 387 133
277 704 401
226 869 475
299 986 127
831 706 60
899 442 111
414 281 804
579 702 597
587 807 932
755 649 537
844 439 295
979 235 417
821 852 719
546 59 716
607 889 8
851 534 334
926 234 50
184 710 286
152 872 638
132 517 712
21 970 152
801 701 104
438 845 30
966 454 106
37 894 741
276 896 923
274 6 535
339 346 129
141 566 488
386 418 551
160 69 822
586 589 634
443 633 319
466 944 856
704 6 944
438 937 229
47 201 738
283 102 389
305 168 844
760 854 880
827 903 750
612 138 163
658 57 491
622 91 900
233 144 773
113 85 645
399 129 190
497 49 481
85 698 906
604 146 968
653 767 92
130 260 706
288 396 267
268 625 621
6 283 805
992 917 363
985 716 887
900 677 593
892 668 406
40 259 733
572 860 510
154 225 479
575 750 809
938 312 243
36 294 461
973 150 452
226 270 159
66 81 520
247 346 496
58 864 207
395 140 524
438 901 717
491 838 807
85 203 859
541 931 704
764 26 272
912 250 107
512 278 182
910 89 345
242 826 85
687 889 267
112 610 93
445 882 337
532 746 381
689 526 854
696 858 351
778 798 801
255 8 362
200 45 44
203 50 342
520 236 135
228 35 196
421 236 120
689 653 418
692 773 233
898 438 334
32 821 511
419 55 31
449 776 496
617 857 815
691 530 996
105 959 469
403 371 317
309 394 366
207 449 84
902 419 633
361 480 733
987 318 213
722 531 649
600 600 12
954 968 654
436 429 111
169 205 606
331 227 610
943 543 304
146 666 412
998 544 402
459 475 58
269 455 55
388 98 38
243 675 858
172 732 707
188 120 313
959 887 640
719 968 101
752 83 547
477 517 337
908 620 289
869 878 321
738 33 20
817 227 913
469 260 898
138 329 593
23 459 967
159 339 524
681 669 674
216 619 673
740 360 420
302 875 950
539 759 635
430 548 612
239 841 169
323 702 113
374 615 255
457 851 958
721 40 270
495 842 808
745 939 343
484 408 610
554 739 576
539 695 49
535 745 493
117 88 444
554 939 3
665 470 581
133 876 580
268 430 703
436 883 249
448 823 862
3 218 505
85 944 264
81 994 367
673 488 484
506 901 694
847 914 612
426 423 29
971 214 741
589 221 732
20 853 541
995 783 448
983 854 858
446 523 27
418 52 118
73 566 122
438 74 361
354 136 981
399 183 794
888 816 366
863 586 878
388 254 979
430 735 19
922 536 47
750 686 60
545 836 683
828 748 301
678 297 546
493 567 351
514 643 523
58 191 768
418 778 387
273 925 613
651 160 330
859 215 624
750 876 36
138 836 637
906 550 568
46 520 876
928 79 632
400 610 906
380 471 22
163 624 931
822 507 661
49 89 414
874 593 476
958 895 660
910 783 691
341 147 325
751 767 297
194 81 335
633 808 345
726 290 602
550 102 207
345 194 542
217 68 103
290 441 451
239 464 407
987 401 195
300 341 313
797 409 430
471 607 441
82 153 439
511 578 399
634 593 414
630 113 776
448 679 413
346 784 577
320 851 645
584 584 73
603 742 196
165 758 361
624 23 262
626 90 435
943 647 702
446 598 392
993 579 904
41 608 924
979 209 371
654 642 136
776 518 520
787 369 444
518 543 529
824 974 110
415 582 629
651 356 869
903 347 977
345 269 581
549 840 613
433 209 891
407 630 900
509 95 409
510 103 362
194 69 754"""
theInput = theInput.split('\n')
theInput2 = [i.split(' ') for i in theInput]
totalPossible = 0
for triangle in theInput2:
triangle.sort()
print(triangle)
if len(triangle) == 3:
side1, side2, side3 = triangle
else:
nullStr, side1, side2, side3 = triangle
if int(side1)+int(side2) > int(side3) and \
int(side1)+int(side3) > int(side2) and \
int(side2)+int(side3) > int(side1):
totalPossible += 1
print(totalPossible)
| 14.127339 | 47 | 0.622795 |
922f343c6340915633952061b4a90f41e3b900cc | 2,986 | py | Python | utils/TrainGAN.py | ynakaDream/Deep-Learning-GANs | 2e00405079c131245f4dd23eb494a27a2b12598d | [
"MIT"
] | 4 | 2019-01-14T04:38:51.000Z | 2020-02-13T20:38:10.000Z | utils/TrainGAN.py | ynakaDream/Deep-Learning-GANs | 2e00405079c131245f4dd23eb494a27a2b12598d | [
"MIT"
] | null | null | null | utils/TrainGAN.py | ynakaDream/Deep-Learning-GANs | 2e00405079c131245f4dd23eb494a27a2b12598d | [
"MIT"
] | null | null | null | import torch
class TrainGAN:
def __init__(self, batch_size):
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('DEVICE:', self.device)
self.batch_size = batch_size
self.noise = None
self.real_target = None
self.fake_target = None
self.fake_image = None
self.optimizerD = None
self.optimizerG = None
self.criterion = None
self.netD = None
self.netG = None
self.fixed_noise = torch.randn(self.batch_size, 100, 1, 1, device=self.device)
self.target_setting()
def target_setting(self):
'''
Define real(=1) and fake(=0) target tensor
:return: None
'''
self.real_target = torch.full((self.batch_size,), 1., device=self.device)
self.fake_target = torch.full((self.batch_size,), 0., device=self.device)
def networks(self, netD, netG):
'''
Define neural networks for discriminators and generator
:param netD: neural network for discriminators
:param netG: neural network for generator
:return: None
'''
self.netD = netD.to(self.device)
self.netG = netG.to(self.device)
def optimizers(self, optD, optG):
'''
Define optimizers for discriminators and generator
:param optD: optimizer for discriminators
:param optG: optimizer for generator
:return: None
'''
self.optimizerD = optD
self.optimizerG = optG
def loss(self, criterion):
'''
Define loss function
:param criterion: loss function
:return: None
'''
self.criterion = criterion
def update_discriminator(self, real_img):
'''
Update Discriminator
:param real_img:
:return: errD, D_x, D_G_z1
'''
real_img = real_img.to(self.device)
noise = torch.randn(self.batch_size, 100, 1, 1, device=self.device)
self.netD.zero_grad()
output = self.netD(real_img)
errD_real = self.criterion(output, self.real_target)
errD_real.backward()
self.fake_img = self.netG(noise)
output = self.netD(self.fake_img.detach())
errD_fake = self.criterion(output, self.fake_target)
errD_fake.backward()
errD = errD_real + errD_fake
D_x = output.mean().item()
D_G_z1 = output.mean().item()
self.optimizerD.step()
return errD, D_x, D_G_z1
def update_generator(self):
'''
Update Generator
:return: errG, D_G_z2
'''
self.netG.zero_grad()
output = self.netD(self.fake_img)
errG = self.criterion(output, self.real_target)
errG.backward()
D_G_z2 = output.mean().item()
self.optimizerG.step()
return errG, D_G_z2
def output_images(self):
fake_image = self.netG(self.fixed_noise)
return fake_image
| 27.648148 | 86 | 0.593101 |
f242fb6989fed1087788efa2c6a97dfc7b99679a | 349 | py | Python | tests/conftest.py | City-of-Helsinki/helsinki-profile-gdpr-api | c2c2aca8847b32754bda49c1c8c03052ec40703d | [
"MIT"
] | 2 | 2021-03-23T08:19:58.000Z | 2021-04-07T13:31:55.000Z | tests/conftest.py | City-of-Helsinki/helsinki-profile-gdpr-api | c2c2aca8847b32754bda49c1c8c03052ec40703d | [
"MIT"
] | 2 | 2021-03-10T22:03:40.000Z | 2021-03-15T14:07:27.000Z | tests/conftest.py | City-of-Helsinki/helsinki-profile-gdpr-api | c2c2aca8847b32754bda49c1c8c03052ec40703d | [
"MIT"
] | null | null | null | import pytest
from rest_framework.test import APIClient
from tests.factories import ProfileFactory, UserFactory
@pytest.fixture(autouse=True)
def autouse_db(db):
pass
@pytest.fixture
def api_client():
return APIClient()
@pytest.fixture
def user():
return UserFactory()
@pytest.fixture
def profile():
return ProfileFactory()
| 13.96 | 55 | 0.750716 |
614d6531c2bf98e31b3bee63514302b28448362f | 8,645 | py | Python | large_cohort/flag_utils_test.py | amitport/google_research_federated | b0dea245eb484f731f28d80e460795816d938b16 | [
"BSD-3-Clause"
] | null | null | null | large_cohort/flag_utils_test.py | amitport/google_research_federated | b0dea245eb484f731f28d80e460795816d938b16 | [
"BSD-3-Clause"
] | null | null | null | large_cohort/flag_utils_test.py | amitport/google_research_federated | b0dea245eb484f731f28d80e460795816d938b16 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2018, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
from absl import flags
from absl.testing import parameterized
import tensorflow as tf
from large_cohort import flag_utils
FLAGS = flags.FLAGS
TEST_CLIENT_FLAG_PREFIX = 'test_client'
TEST_SERVER_FLAG_PREFIX = 'test_server'
@contextlib.contextmanager
def flag_sandbox(flag_value_dict):
def _set_flags(flag_dict):
for name, value in flag_dict.items():
FLAGS[name].value = value
# Store the current values and override with the new.
preserved_value_dict = {
name: FLAGS[name].value for name in flag_value_dict.keys()
}
_set_flags(flag_value_dict)
yield
# Restore the saved values.
for name in preserved_value_dict.keys():
FLAGS[name].unparse()
_set_flags(preserved_value_dict)
def setUpModule():
# Create flags here to ensure duplicate flags are not created.
flag_utils.define_optimizer_flags(TEST_SERVER_FLAG_PREFIX)
flag_utils.define_optimizer_flags(TEST_CLIENT_FLAG_PREFIX)
# Create a list of `(test name, optimizer name flag value, optimizer class)`
# for parameterized tests.
_OPTIMIZERS_TO_TEST = [
(name, name, cls) for name, cls in flag_utils._SUPPORTED_OPTIMIZERS.items()
]
class FlagUtilsTest(tf.test.TestCase, parameterized.TestCase):
def test_create_optimizer_fn_from_flags_invalid_optimizer(self):
FLAGS['{}_optimizer'.format(TEST_CLIENT_FLAG_PREFIX)].value = 'foo'
with self.assertRaisesRegex(ValueError, 'not a valid optimizer'):
flag_utils.create_optimizer_fn_from_flags(TEST_CLIENT_FLAG_PREFIX)
def test_create_optimizer_fn_with_no_learning_rate(self):
with flag_sandbox({
'{}_optimizer'.format(TEST_CLIENT_FLAG_PREFIX): 'sgd',
'{}_learning_rate'.format(TEST_CLIENT_FLAG_PREFIX): None
}):
with self.assertRaisesRegex(ValueError, 'Learning rate'):
flag_utils.create_optimizer_fn_from_flags(TEST_CLIENT_FLAG_PREFIX)
def test_create_optimizer_fn_from_flags_flags_set_not_for_optimizer(self):
with flag_sandbox({'{}_optimizer'.format(TEST_CLIENT_FLAG_PREFIX): 'sgd'}):
# Set an Adam flag that isn't used in SGD.
# We need to use `_parse_args` because that is the only way FLAGS is
# notified that a non-default value is being used.
bad_adam_flag = '{}_adam_beta_1'.format(TEST_CLIENT_FLAG_PREFIX)
FLAGS._parse_args(
args=['--{}=0.5'.format(bad_adam_flag)], known_only=True)
with self.assertRaisesRegex(
ValueError,
r'Commandline flags for .*\[sgd\].*\'test_client_adam_beta_1\'.*'):
flag_utils.create_optimizer_fn_from_flags(TEST_CLIENT_FLAG_PREFIX)
FLAGS[bad_adam_flag].unparse()
@parameterized.named_parameters(_OPTIMIZERS_TO_TEST)
def test_create_client_optimizer_from_flags(self, optimizer_name,
optimizer_cls):
commandline_set_learning_rate = 100.0
with flag_sandbox({
'{}_optimizer'.format(TEST_CLIENT_FLAG_PREFIX):
optimizer_name,
'{}_learning_rate'.format(TEST_CLIENT_FLAG_PREFIX):
commandline_set_learning_rate
}):
custom_optimizer_fn = flag_utils.create_optimizer_fn_from_flags(
TEST_CLIENT_FLAG_PREFIX)
custom_optimizer = custom_optimizer_fn()
self.assertIsInstance(custom_optimizer, optimizer_cls)
self.assertEqual(custom_optimizer.get_config()['learning_rate'],
commandline_set_learning_rate)
custom_optimizer_with_arg = custom_optimizer_fn(11.0)
self.assertIsInstance(custom_optimizer_with_arg, optimizer_cls)
self.assertEqual(
custom_optimizer_with_arg.get_config()['learning_rate'], 11.0)
@parameterized.named_parameters(_OPTIMIZERS_TO_TEST)
def test_create_server_optimizer_from_flags(self, optimizer_name,
optimizer_cls):
commandline_set_learning_rate = 100.0
with flag_sandbox({
'{}_optimizer'.format(TEST_SERVER_FLAG_PREFIX):
optimizer_name,
'{}_learning_rate'.format(TEST_SERVER_FLAG_PREFIX):
commandline_set_learning_rate
}):
custom_optimizer_fn = flag_utils.create_optimizer_fn_from_flags(
TEST_SERVER_FLAG_PREFIX)
custom_optimizer = custom_optimizer_fn()
self.assertIsInstance(custom_optimizer, optimizer_cls)
self.assertEqual(custom_optimizer.get_config()['learning_rate'],
commandline_set_learning_rate)
custom_optimizer_with_arg = custom_optimizer_fn(11.0)
self.assertIsInstance(custom_optimizer_with_arg, optimizer_cls)
self.assertEqual(custom_optimizer_with_arg.get_config()['learning_rate'],
11.0)
def test_remove_unused_optimizer_flags_without_optimizer_flag(self):
hparam_dict = collections.OrderedDict([('client_opt_fn', 'sgd'),
('client_sgd_momentum', 0.3)])
with self.assertRaisesRegex(ValueError,
'The flag client_optimizer was not defined.'):
_ = flag_utils.remove_unused_optimizer_flags('client', hparam_dict)
def test_remove_unused_optimizer_flags_with_empty_optimizer(self):
hparam_dict = collections.OrderedDict([('optimizer', '')])
with self.assertRaisesRegex(
ValueError, 'The flag optimizer was not set. '
'Unable to determine the relevant optimizer.'):
_ = flag_utils.remove_unused_optimizer_flags(
prefix=None, hparam_dict=hparam_dict)
def test_remove_unused_optimizer_flags_with_prefix(self):
hparam_dict = collections.OrderedDict([('client_optimizer', 'sgd'),
('non_client_value', 0.1),
('client_sgd_momentum', 0.3),
('client_adam_momentum', 0.5)])
relevant_hparam_dict = flag_utils.remove_unused_optimizer_flags(
'client', hparam_dict)
expected_flag_names = [
'client_optimizer', 'non_client_value', 'client_sgd_momentum'
]
self.assertCountEqual(relevant_hparam_dict.keys(), expected_flag_names)
self.assertEqual(relevant_hparam_dict['client_optimizer'], 'sgd')
self.assertEqual(relevant_hparam_dict['non_client_value'], 0.1)
self.assertEqual(relevant_hparam_dict['client_sgd_momentum'], 0.3)
def test_remove_unused_optimizer_flags_without_prefix(self):
hparam_dict = collections.OrderedDict([('optimizer', 'sgd'), ('value', 0.1),
('sgd_momentum', 0.3),
('adam_momentum', 0.5)])
relevant_hparam_dict = flag_utils.remove_unused_optimizer_flags(
prefix=None, hparam_dict=hparam_dict)
expected_flag_names = ['optimizer', 'value', 'sgd_momentum']
self.assertCountEqual(relevant_hparam_dict.keys(), expected_flag_names)
self.assertEqual(relevant_hparam_dict['optimizer'], 'sgd')
self.assertEqual(relevant_hparam_dict['value'], 0.1)
self.assertEqual(relevant_hparam_dict['sgd_momentum'], 0.3)
def test_removal_with_standard_default_values(self):
hparam_dict = collections.OrderedDict([('client_optimizer', 'adam'),
('non_client_value', 0),
('client_sgd_momentum', 0),
('client_adam_param1', None),
('client_adam_param2', False)])
relevant_hparam_dict = flag_utils.remove_unused_optimizer_flags(
'client', hparam_dict)
expected_flag_names = [
'client_optimizer', 'non_client_value', 'client_adam_param1',
'client_adam_param2'
]
self.assertCountEqual(relevant_hparam_dict.keys(), expected_flag_names)
self.assertEqual(relevant_hparam_dict['client_optimizer'], 'adam')
self.assertEqual(relevant_hparam_dict['non_client_value'], 0)
self.assertIsNone(relevant_hparam_dict['client_adam_param1'])
self.assertEqual(relevant_hparam_dict['client_adam_param2'], False)
if __name__ == '__main__':
tf.test.main()
| 43.442211 | 80 | 0.700868 |
804adf1c235213b9eec8c6b22d376b19b467f7d3 | 663 | py | Python | RegAndLogin/migrations/0001_initial.py | Trevelyn/steganography | 41fb3cedd56464d664f91a6c95f531da937e4e15 | [
"MIT"
] | null | null | null | RegAndLogin/migrations/0001_initial.py | Trevelyn/steganography | 41fb3cedd56464d664f91a6c95f531da937e4e15 | [
"MIT"
] | null | null | null | RegAndLogin/migrations/0001_initial.py | Trevelyn/steganography | 41fb3cedd56464d664f91a6c95f531da937e4e15 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-31 16:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='HelpInstruction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, null=True)),
('instruction', models.TextField(blank=True, max_length=1255, null=True)),
],
),
]
| 26.52 | 114 | 0.604827 |
d863d893cae128d95ee57fdb02f253b1afcbcee5 | 36,352 | py | Python | Ideas/SearchSpaceOptions/Segmentors_MinParams.py | genster6/Simple-Evolutionary-Exploration | 063a64b3903d560b2622d63e049939537dc0c7b1 | [
"MIT"
] | 2 | 2020-04-25T16:32:51.000Z | 2022-03-07T20:38:08.000Z | Ideas/SearchSpaceOptions/Segmentors_MinParams.py | genster6/Simple-Evolutionary-Exploration | 063a64b3903d560b2622d63e049939537dc0c7b1 | [
"MIT"
] | null | null | null | Ideas/SearchSpaceOptions/Segmentors_MinParams.py | genster6/Simple-Evolutionary-Exploration | 063a64b3903d560b2622d63e049939537dc0c7b1 | [
"MIT"
] | 1 | 2020-03-20T19:36:36.000Z | 2020-03-20T19:36:36.000Z | """ Segmentor library designed to learn how to segment images using GAs.
This libary actually does not incode the GA itself, instead it just defines
the search parameters the evaluation funtions and the fitness function (comming soon)
"""
# TODO: Research project-clean up the parameters class to reduce the search space
# TODO: Change the seed from a number to a fraction 0-1 which is scaled to image rows and columns
# TODO: Enumerate teh word based measures.
from collections import OrderedDict
import sys
import numpy as np
import skimage
from skimage import segmentation
from skimage import color
from PIL import Image
import pandas as pd # used in fitness? Can it be removed?
import logging
# List of all algorithms
algorithmspace = dict()
def runAlgo(img, groundImg, individual, returnMask=False):
logging.getLogger().info(f"Running Algorithm {individual[0]}")
# img = copy.deepcopy(copyImg)
seg = algoFromParams(individual)
mask = seg.evaluate(img)
logging.getLogger().info("Calculating Fitness")
fitness = FitnessFunction(mask, groundImg)
if returnMask:
return [fitness, mask]
else:
return fitness
def algoFromParams(individual):
"""Converts a param list to an algorithm Assumes order
defined in the parameters class"""
if individual[0] in algorithmspace:
algorithm = algorithmspace[individual[0]]
return algorithm(individual)
else:
raise ValueError("Algorithm not avaliable")
class parameters(OrderedDict):
descriptions = dict()
ranges = dict()
pkeys = []
ranges["algorithm"] = "['CT','FB','SC','WS','CV','MCV','AC']"
descriptions["algorithm"] = "string code for the algorithm"
descriptions["numerical_1"] = "The first multi-used variable for the numerical range 0-10000"
ranges["numerical_1"] = "[i for i in range(0,10000)]"
descriptions["numerical_2"] = "The second multi-used variable for the numerical range 0-10000"
ranges["numerical_2"] = "[i for i in range(0,10000)]"
descriptions["numerical_3"] = "The third multi-used variable for the numerical range 0-10000"
ranges["numerical_3"] = "[i for i in range(0,10000)]"
descriptions["numerical_4"] = "The fourth multi-used variable for the numerical range 0-10000"
ranges["numerical_4"] = "[i for i in range(0,10000)]"
descriptions["numerical_5"] = "The fifth multi-used variable for the numerical range 0-10000"
ranges["numerical_5"] = "[i for i in range(0,10000)]"
# Try to set defaults only once.
# Current method may cause all kinds of weird problems.
# @staticmethod
# def __Set_Defaults__()
def __init__(self):
self["algorithm"] = "None"
self["numerical_1"] = 0.0
self["numerical_2"] = 0.0
self["numerical_3"] = 0.0
self["numerical_4"] = 0.0
self["numerical_5"] = 0.0
self.pkeys = list(self.keys())
def printparam(self, key):
return f"{key}={self[key]}\n\t{self.descriptions[key]}\n\t{self.ranges[key]}\n"
def __str__(self):
out = ""
for index, k in enumerate(self.pkeys):
out += f"{index} " + self.printparam(k)
return out
def tolist(self):
plist = []
for key in self.pkeys:
plist.append(self.params[key])
return plist
def fromlist(self, individual):
logging.getLogger().info(f"Parsing Parameter List for {len(individual)} parameters")
for index, key in enumerate(self.pkeys):
self[key] = individual[index]
class segmentor(object):
algorithm = ""
def __init__(self, paramlist=None):
self.params = parameters()
if paramlist:
self.params.fromlist(paramlist)
def evaluate(self, im):
return np.zeros(im.shape[0:1])
def __str__(self):
mystring = f"{self.params['algorithm']} -- \n"
for p in self.paramindexes:
mystring += f"\t{p} = {self.params[p]}\n"
return mystring
class ColorThreshold(segmentor):
def __init__(self, paramlist=None):
super(ColorThreshold, self).__init__(paramlist)
if not paramlist:
self.params["algorithm"] = "CT"
self.params["numerical_1"] = 0.4
self.params["numerical_2"] = 0.6
self.paramindexes = ["numerical_1", "numerical_2"]
def evaluate(self, img): #XX
channel_num = 1 # TODO: Need to make this a searchable parameter.
if len(img.shape) > 2:
if channel_num < img.shape[2]:
channel = img[:, :, channel_num]
else:
channel = img[:, :, 0]
else:
channel = img
pscale = np.max(channel)
mx = round(self.params["numerical_1"]/100000, 2) * pscale
mn = round(self.params["numerical_2"]/10000, 2) * pscale
if mx < mn:
temp = mx
mx = mn
mn = temp
output = np.ones(channel.shape)
output[channel < mn] = 0
output[channel > mx] = 0
return output
algorithmspace["CT"] = ColorThreshold
class TripleA (segmentor):
def __init__(self, paramlist=None):
super(TripleA, self).__init__(paramlist)
if not paramlist:
self.params["algorithm"] = "AAA"
self.params["numerical_1"] = 0.4
self.params["numerical_2"] = 0.6
self.paramindexes = ["numerical_1", "numerical_2"]
def evaluate(self, img): #XX
channel_num = 1 # TODO: Need to make this a searchable parameter.
if len(img.shape) > 2:
if channel_num < img.shape[2]:
channel = img[:, :, channel_num]
else:
channel = img[:, :, 0]
else:
channel = img
pscale = np.max(channel)
mx = round(self.params["numerical_1"]/100000, 2) * pscale
mn = round(self.params["numerical_2"]/10000, 2) * pscale
if mx < mn:
temp = mx
mx = mn
mn = temp
output = np.ones(channel.shape)
output[channel < mn] = 0
output[channel > mx] = 0
return output
algorithmspace["AAA"] = TripleA
"""
#felzenszwalb
#ONLY WORKS FOR RGB
https://scikit-image.org/docs/stable/auto_examples/segmentation/plot_segmentations.html
The felzenszwalb algorithms computes a graph based on the segmentation
Produces an oversegmentation of the multichannel using min-span tree.
Returns an integer mask indicating the segment labels
#Variables
scale: float, higher meanse larger clusters
sigma: float, std. dev of Gaussian kernel for preprocessing
min_size: int, minimum component size. For postprocessing
mulitchannel: bool, Whether the image is 2D or 3D. 2D images
are not supported at all
"""
class Felzenszwalb(segmentor):
def __doc__(self):
myhelp = "Wrapper function for the scikit-image Felzenszwalb segmentor:"
myhelp += f" xx {skimage.segmentation.random_walker.__doc__}"
return myhelp
def __init__(self, paramlist=None):
super(Felzenszwalb, self).__init__(paramlist)
if not paramlist:
self.params["algorithm"] = "FB"
self.params["numerical_1"] = 984
self.params["numerical_2"] = 0.09
self.params["numerical_3"] = 92
self.paramindexes = ["numerical_1", "numerical_2", "numerical_3"]
def evaluate(self, img):
multichannel = False
if len(img.shape) > 2:
multichannel = True
output = skimage.segmentation.felzenszwalb(
img,
scale = self.params["numerical_1"],
sigma = round(self.params["numerical_2"]/100000, 2),
min_size = self.params["numerical_3"],
multichannel=True,
)
return output
algorithmspace["FB"] = Felzenszwalb
class Slic(segmentor):
"""
#slic
https://scikit-image.org/docs/stable/auto_examples/segmentation/plot_segmentations.html
segments k-means clustering in Color space (x, y, z)
#Returns a 2D or 3D array of labels
#Variables
image -- ndarray, input image
n_segments -- int, number of labels in segmented output image
(approx). Should find a way to compute n_segments
compactness -- float, Balances color proximity and space proximity.
Higher values mean more weight to space proximity (superpixels
become more square/cubic) #Recommended log scale values (0.01,
0.1, 1, 10, 100, etc)
max_iter -- int, max number of iterations of k-means
sigma -- float or (3,) shape array of floats, width of Guassian
smoothing kernel. For pre-processing for each dimesion of the
image. Zero means no smoothing
spacing -- (3,) shape float array : voxel spacing along each image
dimension. Defalt is uniform spacing
multichannel -- bool, multichannel (True) vs grayscale (False)
#Needs testing to find correct values
#Abbreviation for algorithm == SC
"""
def __init__(self, paramlist=None):
super(Slic, self).__init__(paramlist)
if not paramlist:
self.params["algorithm"] = "SC"
self.params["numerical_1"] = 5
self.params["numerical_2"] = 5
self.params["numerical_3"] = 3
self.paramindexes = ["numerical_1", "numerical_2", "numerical_3"]
def evaluate(self, img):
multichannel = False
if len(img.shape) > 2:
multichannel = True
output = skimage.segmentation.slic(
img,
n_segments=self.params["numerical_1"] + 2,
compactness=10**(round(self.params["numerical_2"]/1000, 0)-5),
max_iter=10,
sigma=round(self.params["numerical_3"]/100000, 2),
convert2lab=True,
multichannel=multichannel,
)
return output
algorithmspace["SC"] = Slic
class QuickShift(segmentor):
"""
#quickshift
https://scikit-image.org/docs/stable/auto_examples/segmentation/plot_segmentations.html
Segments images with quickshift clustering in Color (x,y) space
#Returns ndarray segmentation mask of the labels
#Variables
image -- ndarray, input image
ratio -- float, balances color-space proximity & image-space proximity. Higher vals give more weight to color-space
kernel_size: float, Width of Guassian kernel using smoothing. Higher means fewer clusters
max_dist -- float: Cut-off point for data distances. Higher means fewer clusters
return_tree -- bool: Whether to return the full segmentation hierachy tree and distances. Set as False
sigma -- float: Width of Guassian smoothing as preprocessing.Zero means no smoothing
convert2lab -- bool: leave alone
random_seed -- int, Random seed used for breacking ties.
"""
def __init__(self, paramlist=None):
super(QuickShift, self).__init__(paramlist)
if not paramlist:
self.params["algorithm"] = "QS"
self.params["numerical_1"] = 5
self.params["numerical_2"] = 5
self.params["numerical_3"] = 60
self.params["numerical_4"] = 3
self.paramindexes = ["numerical_1", "numerical_2", "numerical_3", "numerical_4"]
def evaluate(self, img):
output = skimage.segmentation.quickshift(
img,
ratio=round(self.params["numerical_1"]/10000, 2),
kernel_size=self.params["numerical_2"],
max_dist=self.params["numerical_3"],
sigma=round(self.params["numerical_4"]/100000, 2),
random_seed=134,
)
return output
algorithmspace["QS"] = QuickShift
class Watershed(segmentor):
"""
#Watershed
https://scikit-image.org/docs/dev/auto_examples/segmentation/plot_watershed.html
Uses user-markers. treats markers as basins and 'floods' them.
Especially good if overlapping objects.
#Returns a labeled image ndarray
#Variables
image -> ndarray, input array
markers -> int, or int ndarray same shape as image: markers indicating 'basins'
connectivity -> ndarray, indicates neighbors for connection
offset -> array, same shape as image: offset of the connectivity
mask -> ndarray of bools (or 0s and 1s):
compactness -> float, compactness of the basins Higher values make more regularly-shaped basin
"""
# Not using connectivity, markers, or offset params as arrays would
# expand the search space too much.
# abbreviation for algorithm = WS
def __init__(self, paramlist=None):
super(Watershed, self).__init__(paramlist)
if not paramlist:
self.params["algorithm"] = "WS"
self.params["numerical_1"] = 2.0
self.paramindexes = ["numerical_1"]
def evaluate(self, img):
output = skimage.segmentation.watershed(
img, markers=None, compactness=10**(round(self.params["numerical_1"]/1000, 0)-5)
)
return output
algorithmspace["WS"] = Watershed
class Chan_Vese(segmentor):
"""
#chan_vese
#ONLY GRAYSCALE
https://scikit-image.org/docs/dev/auto_examples/segmentation/plot_chan_vese.html
Segments objects without clear boundaries
#Returns: segmentation array of algorithm. Optional: When the algorithm converges
#Variables
image -> ndarray grayscale image to be segmented
mu -> float, 'edge length' weight parameter. Higher mu vals make a 'round edge' closer to zero will detect smaller objects
lambda1 -> float 'diff from average' weight param to determine if
output region is True. If lower than lambda1, the region has a
larger range of values than the other
lambda2 -> float 'diff from average' weight param to determine if
output region is False. If lower than lambda1, the region will
have a larger range of values
Note: Typical values for mu are from 0-1.
Note: Typical values for lambda1 & lambda2 are 1. If the background
is 'very' different from the segmented values, in terms of
distribution, then the lambdas should be different from
eachother
tol: positive float, typically (0-1), very low level set variation
tolerance between iterations.
max_iter: uint, max number of iterations before algorithms stops
dt: float, Multiplication factor applied at the calculations step
init_level_set: str/ndarray, defines starting level set used by
algorithm. Accepted values are:
'checkerboard': fast convergence, hard to find implicit edges
'disk': Somewhat slower convergence, more likely to find
implicit edges
'small disk': Slowest convergence, more likely to find implicit edges
can also be ndarray same shape as image
extended_output: bool, If true, adds more returns
(Final level set & energies)
"""
# Abbreviation for Algorithm = CV
def __init__(self, paramlist=None):
super(Chan_Vese, self).__init__(paramlist)
if not paramlist:
self.params["algorithm"] = "CV"
self.params["numerical_1"] = 2.0
self.params["numerical_2"] = 10
self.params["numerical_3"] = 10
self.params["numerical_4"] = "small disk"
self.params["numerical_5"] = 20
self.paramindexes = ["numerical_1", "numerical_2", "numerical_3", "numerical_4", "numerical_5"]
def evaluate(self, img):
if len(img.shape) == 3:
img = skimage.color.rgb2gray(img)
output = skimage.segmentation.chan_vese(
img,
mu=round(self.params["numerical_1"] /10000, 2),
lambda1=2**round(self.params["numerical_2"]/10000, 0),
lambda2= 2**round(self.params["numerical_5"]/10000, 0) / 2**round(self.params["numerical_2"]/10000, 0),
tol=round(self.params["numerical_3"] /10000, 3),
max_iter=10,
dt=round(self.params["numerical_4"]/1000, 1),
)
return output
algorithmspace["CV"] = Chan_Vese
class Morphological_Chan_Vese(segmentor):
"""
#morphological_chan_vese
#ONLY WORKS ON GRAYSCALE
https://scikit-image.org/docs/dev/api/skimage.segmentation.html#skimage.segmentation.morphological_chan_vese
Active contours without edges. Can be used to segment images/
volumes without good borders. Required that the inside of the
object looks different than outside (color, shade, darker).
#Returns Final segmention
#Variables:
image -> ndarray of grayscale image
iterations -> uint, number of iterations to run
init_level_set: str, or array same shape as image. Accepted string
values are:
'checkerboard': Uses checkerboard_level_set
https://scikit-image.org/docs/dev/api/skimage.segmentation.html#skimage.segmentation.checkerboard_level_set
returns a binary level set of a checkerboard
'circle': Uses circle_level_set
https://scikit-image.org/docs/dev/api/skimage.segmentation.html#skimage.segmentation.circle_level_set
Creates a binary level set of a circle, given a radius and a
center
smoothing: uint, number of times the smoothing operator is applied
per iteration. Usually around 1-4. Larger values make stuf
smoother
lambda1: Weight param for outer region. If larger than lambda2,
outer region will give larger range of values than inner value
lambda2: Weight param for inner region. If larger thant lambda1,
inner region will have a larger range of values than outer region
"""
# Abbreviation for algorithm = MCV
def __init__(self, paramlist=None):
super(Morphological_Chan_Vese, self).__init__(paramlist)
if not paramlist:
self.params["algorithm"] = "MCV"
self.params["numerical_1"] = "checkerboard"
self.params["numerical_2"] = 10
self.params["numerical_3"] = 10
self.params["numerical_4"] = 20
self.paramindexes = ["numerical_1", "numerical_2", "numerical_3", "numerical_4"]
def evaluate(self, img):
init_level_set_options = ['checkerboard', 'disk', 'small disk', 'circle']
if len(img.shape) == 3:
img = skimage.color.rgb2gray(img)
output = skimage.segmentation.morphological_chan_vese(
img,
iterations = 10,
init_level_set = init_level_set_options[int(round(self.params["numerical_1"]/10000, 0)*3)],
smoothing = int(round(self.params["numerical_2"]/3000, 0)+1),
lambda1 = 2**round(self.params["numerical_3"]/10000, 0),
lambda2 = 2**round(self.params["numerical_4"]/10000, 0) / 2**round(self.params["numerical_3"]/10000, 0),
)
return output
algorithmspace["MCV"] = Morphological_Chan_Vese
class MorphGeodesicActiveContour(segmentor):
"""
#morphological_geodesic_active_contour
https://scikit-image.org/docs/dev/api/skimage.segmentation.html#skimage.segmentation.morphological_geodesic_active_contour
Uses an image from inverse_gaussian_gradient in order to segment
object with visible, but noisy/broken borders
#inverse_gaussian_gradient
https://scikit-image.org/docs/dev/api/skimage.segmentation.html#skimage.segmentation.inverse_gaussian_gradient
Compute the magnitude of the gradients in an image. returns a
preprocessed image suitable for above function
#Returns ndarray of segmented image
#Variables
gimage: array, preprocessed image to be segmented
iterations: uint, number of iterations to run
init_level_set: str, array same shape as gimage. If string, possible
values are:
'checkerboard': Uses checkerboard_level_set
https://scikit-image.org/docs/dev/api/skimage.segmentation.html#skimage.segmentation.checkerboard_level_set
returns a binary level set of a checkerboard
'circle': Uses circle_level_set
https://scikit-image.org/docs/dev/api/skimage.segmentation.html#skimage.segmentation.circle_level_set
Creates a binary level set of a circle, given a radius and a
center
smoothing: uint, number of times the smoothing operator is applied
per iteration. Usually 1-4, larger values have smoother
segmentation
threshold: Areas of image with a smaller value than the threshold
are borders
balloon: float, guides contour of low-information parts of image,
"""
# Abbrevieation for algorithm = AC
def __init__(self, paramlist=None):
super(MorphGeodesicActiveContour, self).__init__(paramlist)
if not paramlist:
self.params["algorithm"] = "AC"
self.params["numerical_4"] = 0.2
self.params["numerical_5"] = 0.3
self.params["numerical_1"] = "checkerboard"
self.params["numerical_2"] = 5
self.params["numerical_3"] = 10
self.paramindexes = [
"alpha",
"sigma",
"iterations",
"init_level_set_morph",
"smoothing",
"balloon",
]
def evaluate(self, img):
# We run the inverse_gaussian_gradient to get the image to use
init_level_set_options = ['checkerboard', 'disk', 'small disk', 'circle']
gimage = skimage.segmentation.inverse_gaussian_gradient(
img, self.params["numerical_4"], round(self.params["numerical_5"]/100000, 2)
)
zeros = 0
output = skimage.segmentation.morphological_geodesic_active_contour(
gimage,
iterations = 10,
init_level_set = init_level_set_options[int(round(self.params["numerical_1"]/10000, 0)*3)],
smoothing=int(round(self.params["numerical_2"]/3000, 0)+1),
threshold="auto",
balloon=round(self.params["numerical_3"]/100, 0)-50,
)
return output
algorithmspace["AC"] = MorphGeodesicActiveContour
# class Flood(segmentor):
# '''
# #flood
# #DOES NOT SUPPORT MULTICHANNEL IMAGES
# https://scikit-image.org/docs/dev/auto_examples/segmentation/plot_floodfill.html
# Uses a seed point and to fill all connected points within/equal to
# a tolerance around the seed point
# #Returns a boolean array with 'flooded' areas being true
# #Variables
# image: ndarray, input image
# seed_point: tuple/int, x,y,z referring to starting point for flood
# fill
# selem: ndarray of 1's and 0's, Used to determine neighborhood of
# each pixel
# connectivity: int, Used to find neighborhood of each pixel. Can use
# this or selem.
# tolerance: float or int, If none, adjacent values must be equal to
# seed_point. Otherwise, how likely adjacent values are flooded.
# '''
# #Abbreviation for algorithm = FD
# def __init__(self, paramlist=None):
# super(Flood, self).__init__(paramlist)
# self.params['algorithm'] = 'AC'
# self.params['seed_pointX'] = 10
# self.params['seed_pointY'] = 20
# self.params['seed_pointZ'] = 0
# self.params['connect'] = 4
# self.params['tolerance'] = 0.5
# self.paramindexes = ['seed', 'connect', 'tolerance']
# def evaluate(self, img):
# output = skimage.segmentation.flood(
# img,
# (self.params['seed_pointX'],
# self.params['seed_pointY'],
# self.params['seed_pointZ']),
# connectivity=self.params['connect'],
# tolerance=self.params['tolerance'])
# return output
# algorithmspace['FD'] = Flood
# class FloodFill(segmentor):
# '''
# #flood_fill
# https://scikit-image.org/docs/dev/auto_examples/segmentation/plot_floodfill.html
# Like a paint-bucket tool in paint. Like flood, but changes the
# color equal to new_type
# #Returns A filled array of same shape as the image
# #Variables
# image: ndarray, input image
# seed_point: tuple or int, starting point for filling (x,y,z)
# new_value: new value to set the fill to (e.g. color). Must agree
# with image type
# selem: ndarray, Used to find neighborhood of filling
# connectivity: Also used to find neighborhood of filling if selem is
# None
# tolerance: float or int, If none, adjacent values must be equal to
# seed_point. Otherwise, how likely adjacent values are flooded.
# inplace: bool, If true, the flood filling is applied to the image,
# if False, the image is not modified. Default False, don't
# change
# '''
# #Abbreviation for algorithm == FF
# def __init__(self, paramlist=None):
# super(FloodFill, self).__init__(paramlist)
# self.params['algorithm'] = 'AC'
# self.params['seed_pointX'] = 10
# self.params['seed_pointY'] = 20
# self.params['seed_pointZ'] = 0
# self.params['connect'] = 4
# self.params['tolerance'] = 0.5
# self.paramindexes = ['seed', 'connect', 'tolerance']
# def evaluate(self, img):
# output = skimage.segmentation.flood_fill(
# img,
# (self.params['seed_pointX'],
# self.params['seed_pointY'],
# self.params['seed_pointZ']),
# 134, #TODO: Had coded value
# connectivity= self.params['connect'],
# tolerance=self.params['tolerance'])
# try:
# #I'm not sure if this will work on grayscale
# image = Image.fromarray(output.astype('uint8'), '1')
# except ValueError:
# image = Image.fromarray(output.astype('uint8'), 'RGB')
# width = image.width
# height = image.width
# #Converting the background to black
# for x in range(0, width):
# for y in range(0, height):
# #First check for grayscale
# pixel = image.getpixel((x,y))
# if pixel[0] == 134:
# image.putpixel((x,y), 134)
# continue
# else:
# image.putpixel((x,y), 0)
# #print(image.getpixel((x,y)))
# #image.convert(mode='L')
# pic = np.array(image)
# return pic
# algorithmspace['FF'] = FloodFill
# TODO: Figure out the mask part?
# class RandomWalker(segmentor):
# algorithm = 'RW'
# paramindexes = [1, 2]
# def __doc__(self):
# myhelp = "Wrapper function for the scikit-image random_walker segmentor:"
# myhelp += f" xx {skimage.segmentation.random_walker.__doc__}"
# return myhelp
# def __init__(self, beta = 0.5, tolerance = 0.4):
# self.beta = beta
# self.tolerance = tolerance
# def evaluate(self, img):
# #Let's deterime what mode to use
# mode = "bf"
# if len(img) < 512 :
# mode = "cg_mg"
# #If data is 2D, then this is a grayscale, so multichannel is
# output = skimage.segmentation.random_walker(
# img, labels=mask,
# beta=self.beta,
# tol=self.tolerance, copy=True,
# multichannel=True, return_full_prob=False)
# return output
"""
function to calculate number of sets in our test image
that map to more than one set in our truth image, and how many
pixels are in those sets. Used in fitness function below.
INPUTS: truth image, infer image
RETURNS: number of repeated sets, number of pixels in repeated sets
"""
def set_fitness_func(a_test, b_test, include_L=False):
#TODO: This is redundant. We just pass in the raveled vector from fitnessfunciton.
a_test_int = a_test.ravel().astype(int) # turn float array into int array
b_test_int = b_test.ravel().astype(int) # turn float array into in array
assert len(a_test_int == len(b_test_int))
# create char array to separate two images
filler = np.chararray((len(a_test_int)))
filler[:] = ":"
# match arrays so we can easily compare
matched = np.core.defchararray.add(a_test_int.astype(str), filler.astype(str))
matched = np.core.defchararray.add(matched, b_test_int.astype(str))
# collect unique set pairings
unique_sets = np.unique(matched)
# count number of pixels for each set pairing
set_counts = {}
for i in unique_sets:
set_counts[i] = sum(matched[:] == i)#sum(np.core.defchararray.count(matched, i))
# print statements for debugging
# print('UNIQUE: ', unique_sets) # see set pairings
# print('SET_COUNTS: ', set_counts) # see counts
# counts every repeated set. EX: if we have (A, A, B, B, B, C) we get 5 repeated.
sets = set() # init container that will hold all sets in infer. image
repeats = [] # init container that will hold all repeated sets
b_set_counts = (
{}
) # init container that will hold pixel counts for each repeated set
for i in unique_sets:
current_set = i[i.find(":") + 1 :] # get inf. set from each pairing
if current_set in sets: # if repeat set
repeats.append(current_set) # add set to repeats list
# add pixel count to set in dict.
b_set_counts[current_set].append(set_counts[i])
elif current_set not in sets: # if new set
# init. key and add pixel count
b_set_counts[current_set] = [set_counts[i]]
sets.add(current_set) # add set to sets container
# get number of repeated sets
num_repeats = len(np.unique(repeats)) + len(repeats)
# num_repeats = len(sets)## get all sets in infer image
# count number of pixels in all repeated sets. Assumes pairing with max. num
# of pixels is not error
repeat_count = 0
used_sets = set()
for i in b_set_counts.keys():
repeat_count += sum(b_set_counts[i]) - max(b_set_counts[i])
for j in unique_sets:
if j[j.find(":") + 1 :] == i and set_counts[j] == max(b_set_counts[i]):
used_sets.add(j[: j.find(":")])
if include_L == True:
return num_repeats, repeat_count, used_sets
else:
return num_repeats, repeat_count
"""Takes in two ImageData obects and compares them according to
skimage's Structual Similarity Index and the mean squared error
Variables:
img1 is an image array segmented by the algorithm.
img2 is the validation image
imgDim is the number of dimensions of the image.
"""
def FitnessFunction_old(mask1, mask2):
# assert(len(img1.shape) == len(img2.shape) == imgDim)
# #The channel deterimines if this is a RGB or grayscale image
# channel = False
# if imgDim > 2: channel = True
# #print(img1.dtype, img2.dtype)
# img1 = np.uint8(img1)
# #print(img1.dtype, img2.dtype)
# assert(img1.dtype == img2.dtype)
# #TODO: Change to MSE
# #Comparing the Structual Similarity Index (SSIM) of two images
# ssim = skimage.measure.compare_ssim(img1, img2, win_size=3,
# multichannel=channel, gaussian_weights=True)
# #Comparing the Mean Squared Error of the two image
# #print("About to compare")
# #print(img1.shape, img2.shape, imgDim)
# #mse = skimage.measure.compare_mse(img1, img2)
# #Deleting the references to the objects and freeing memory
# del img1
# del img2
# #print("eror above?")
# return [abs(ssim),]
# makes sure images are in grayscale
if len(mask1.shape) > 2:
llogging.getLogger().info("mask1 not in grayscale")
mask1 = color.rgb2gray(mask1)
if len(mask2.shape) > 2: # comment out
logging.getLogger().info("img2 not in grayscale")
mask2 = color.rgb2gray(mask2) # comment out
# img2 = img2[:,:,0]#color.rgb2gray(true_im) # convert to grayscale
# img2[img2[:,:] != 0] = 1
# makes sure images can be read as segmentation labels (i.e. integers)
mask1 = pd.factorize(mask1.ravel())[0].reshape(mask1.shape)
mask2 = pd.factorize(mask2.ravel())[0].reshape(mask2.shape) # comment out
# Replace with function to output p an L
# p - number of pixels not correcly mapped
# L - Number of correctly mapped sets
num_repeats, p, used_sets = set_fitness_func(mask2, mask1, True)
m = len(np.unique(mask1)) # Number of unique labels in mask1
n = len(np.unique(mask2)) # Number of unique labels in mask1
L = len(used_sets) # number of true sets (i.e. used)
logging.getLogger().info(f"p={p}, m={m}, n={n}, L={L}")
error = (p + 2) ** np.log(abs(m - n) + 2) # / (L >= n)
print(f"TESTING - L={L} < n={n} p={p} m={m} error = {error} ")
# error = (repeat_count + 2)**(abs(m - n)+1)
if (L < n) or error <= 0 or error == np.inf or error == np.nan:
print(
f"WARNING: Fitness bounds exceeded, using Maxsize - {L} < {n} or {error} <= 0 or {error} == np.inf or {error} == np.nan:"
)
error = sys.maxsize
# print(error)
return [
error,
]
def countMatches(inferred, groundTruth):
assert (inferred.shape == groundTruth.shape)
m = set()
n = set()
setcounts = dict()
for r in range(inferred.shape[0]):
for c in range(inferred.shape[1]):
i_key = inferred[r,c]
m.add(i_key)
g_key = groundTruth[r,c]
n.add(g_key)
if i_key in setcounts:
if g_key in setcounts[i_key]:
setcounts[i_key][g_key] += 1
else:
setcounts[i_key][g_key] = 1
else:
setcounts[i_key] = dict()
setcounts[i_key][g_key] = 1
return setcounts, len(m), len(n)
'''
For each inferred set, find the ground truth set which it maps the most
pixels to. So we start from the inferred image, and map towards the
ground truth image. For each i_key, the g_key that it maps the most
pixels to is considered True. In order to see what ground truth sets
have a corresponding set(s) in the inferred image, we record these "true" g_keys.
This number of true g_keys is the value for L in our fitness function.
'''
def countsets(setcounts):
p = 0
#L = len(setcounts)
total = 0
Lsets = set()
best = dict()
for i_key in setcounts:
mx = 0
mx_key = ''
for g_key in setcounts[i_key]:
total += setcounts[i_key][g_key] # add to total pixel count
if setcounts[i_key][g_key] > mx:
mx = setcounts[i_key][g_key]
# mx_key = i_key
mx_key = g_key # record mapping with greatest pixel count
p += mx
# Lsets.add(g_key)
Lsets.add(mx_key) # add the g_key we consider to be correct
# best[i_key] = g_key
best[i_key] = mx_key # record "true" mapping
L = len(Lsets)
return total-p,L, best
"""Takes in two ImageData obects and compares them according to
skimage's Structual Similarity Index and the mean squared error
Variables:
img1 is the validation image
img2 is an image array segmented by the algorithm.
imgDim is the number of dimensions of the image.
"""
def FitnessFunction(inferred, groundTruth):
# makes sure images are in grayscale
if len(inferred.shape) > 2:
logging.getLogger().info("inferred not in grayscale")
inferred = color.rgb2gray(inferred)
if len(groundTruth.shape) > 2: # comment out
logging.getLogger().info("img2 not in grayscale")
groundTruth = color.rgb2gray(groundTruth) # comment out
# Replace with function to output p an L
# p - number of pixels not correcly mapped
# L - Number of correctly mapped sets
setcounts, m, n = countMatches(inferred, groundTruth)
#print(setcounts)
p, L, best = countsets(setcounts)
logging.getLogger().info(f"p={p}, m={m}, n={n}, L={L}")
error = (p + 2) ** np.log(abs(m - n) + 2) # / (L >= n)
# error = (repeat_count + 2)**(abs(m - n)+1)
# print(f"TESTING - L={L} < n={n} p={p} m={m} error = {error} ")
if (L < n) or error <= 0 or error == np.inf or error == np.nan:
logging.warning(
f"WARNING: Fitness bounds exceeded, using Maxsize - {L} < {n} or {error} <= 0 or {error} == np.inf or {error} == np.nan:"
)
error = sys.maxsize
# print(error)
return [error, best]
| 38.144806 | 133 | 0.633913 |
33b50e962b3aac2b681d03eee012946ef27465d5 | 383 | py | Python | backend/tasks/admin.py | Newinacode/Daily_Tracker | c5ce28832039dfbca977826be6409586311d107d | [
"MIT"
] | 2 | 2020-10-11T12:36:49.000Z | 2020-10-12T03:10:00.000Z | backend/tasks/admin.py | Newinacode/Daily_Tracker | c5ce28832039dfbca977826be6409586311d107d | [
"MIT"
] | null | null | null | backend/tasks/admin.py | Newinacode/Daily_Tracker | c5ce28832039dfbca977826be6409586311d107d | [
"MIT"
] | 2 | 2020-10-11T12:21:31.000Z | 2020-10-12T11:27:13.000Z | from django.contrib import admin
from .models import Task
# Register your models here.
class TaskAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'start_date', 'end_date', 'status', 'user')
list_display_links = ('id', 'title')
list_editable = ('status',)
search_fields = ('title', 'start_date')
list_per_page = 25
admin.site.register(Task, TaskAdmin)
| 23.9375 | 78 | 0.686684 |
30db225ddee8472b0d8ffa4465898e2e0d6cfd49 | 2,297 | py | Python | metflix_remake/movies/migrations/0001_initial.py | luismoa/yummy_metflix_remake | bf6e108f9229c12b44e2dc1ea95e6c3d043cb465 | [
"MIT"
] | 1 | 2021-04-11T15:56:34.000Z | 2021-04-11T15:56:34.000Z | metflix_remake/movies/migrations/0001_initial.py | luismoa/yummy_metflix_remake | bf6e108f9229c12b44e2dc1ea95e6c3d043cb465 | [
"MIT"
] | 16 | 2021-04-10T22:46:43.000Z | 2021-06-08T22:21:00.000Z | metflix_remake/movies/migrations/0001_initial.py | luismoa/yummy_metflix_remake | bf6e108f9229c12b44e2dc1ea95e6c3d043cb465 | [
"MIT"
] | null | null | null | # Generated by Django 3.2 on 2021-04-18 12:13
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Actor',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('_name', models.CharField(max_length=35)),
('nationality', models.CharField(max_length=40)),
('born', models.DateField()),
('sex', models.CharField(max_length=1)),
],
),
migrations.CreateModel(
name='Director',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('_name', models.CharField(max_length=35)),
('nationality', models.CharField(max_length=40)),
],
),
migrations.CreateModel(
name='Movie',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('title', models.CharField(max_length=60)),
('nationality', models.CharField(max_length=40)),
('production_company', models.CharField(max_length=35)),
('year_release', models.PositiveIntegerField()),
('budget', models.PositiveBigIntegerField()),
('box_office', models.PositiveBigIntegerField()),
('running_time', models.PositiveSmallIntegerField()),
('id_director', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='movies.director')),
],
),
migrations.CreateModel(
name='MovieActor',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('_role', models.CharField(max_length=35)),
('is_main_character', models.BooleanField()),
('id_actor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='movies.actor')),
('id_movie', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='movies.movie')),
],
),
]
| 39.603448 | 118 | 0.559861 |
7450d9d05b981f5094edda40dee5dc314d6216ce | 56,036 | py | Python | src/sage/schemes/elliptic_curves/gal_reps_number_field.py | ChamanAgrawal/sage | 5f6d56ba247b352d7d46442e88fa3a027e9f222d | [
"BSL-1.0"
] | 2 | 2019-06-02T03:16:59.000Z | 2019-06-15T10:17:18.000Z | src/sage/schemes/elliptic_curves/gal_reps_number_field.py | ChamanAgrawal/sage | 5f6d56ba247b352d7d46442e88fa3a027e9f222d | [
"BSL-1.0"
] | null | null | null | src/sage/schemes/elliptic_curves/gal_reps_number_field.py | ChamanAgrawal/sage | 5f6d56ba247b352d7d46442e88fa3a027e9f222d | [
"BSL-1.0"
] | 1 | 2019-06-02T03:16:55.000Z | 2019-06-02T03:16:55.000Z | # -*- coding: utf-8 -*-
r"""
Galois representations for elliptic curves over number fields
This file contains the code to compute for which primes the Galois
representation attached to an elliptic curve (over an arbitrary number field)
is surjective. The functions in this file are called by the ``is_surjective``
and ``non_surjective`` methods of an elliptic curve over a number field.
EXAMPLES::
sage: K = NumberField(x**2 - 29, 'a'); a = K.gen()
sage: E = EllipticCurve([1, 0, ((5 + a)/2)**2, 0, 0])
sage: rho = E.galois_representation()
sage: rho.is_surjective(29) # Cyclotomic character not surjective.
False
sage: rho.is_surjective(31) # See Section 5.10 of [Serre72].
True
sage: rho.non_surjective() # long time (4s on sage.math, 2014)
[3, 5, 29]
sage: E = EllipticCurve_from_j(1728).change_ring(K) # CM
sage: E.galois_representation().non_surjective() # long time (2s on sage.math, 2014)
[0]
AUTHORS:
- Eric Larson (2012-05-28): initial version.
- Eric Larson (2014-08-13): added isogeny_bound function.
- John Cremona (2016, 2017): various efficiency improvements to _semistable_reducible_primes
- John Cremona (2017): implementation of Billerey's algorithm to find all reducible primes
REFERENCES:
.. [Serre72] Jean-Pierre Serre. *Propriétés galoisiennes des points d'ordre
fini des courbes elliptiques*. Inventiones mathematicae, 1972.
.. [Sutherland12] Sutherland. A local-global principle for rational
isogenies of prime degree. Journal de Théorie des Nombres de Bordeaux,
2012.
"""
# ****************************************************************************
# Copyright (C) 2012 Eric Larson <elarson3@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from __future__ import division
from six.moves import range
from sage.structure.sage_object import SageObject
from sage.rings.number_field.number_field import NumberField
from sage.modules.free_module import VectorSpace
from sage.rings.finite_rings.finite_field_constructor import GF
from sage.misc.functional import cyclotomic_polynomial
from sage.arith.all import legendre_symbol, primes
from sage.sets.set import Set
from sage.rings.all import Integer, ZZ, QQ, Infinity
class GaloisRepresentation(SageObject):
r"""
The compatible family of Galois representation
attached to an elliptic curve over a number field.
Given an elliptic curve `E` over a number field `K`
and a rational prime number `p`, the `p^n`-torsion
`E[p^n]` points of `E` is a representation of the
absolute Galois group `G_K` of `K`. As `n` varies
we obtain the Tate module `T_p E` which is
a representation of `G_K` on a free `\ZZ_p`-module
of rank `2`. As `p` varies the representations
are compatible.
EXAMPLES::
sage: K = NumberField(x**2 + 1, 'a')
sage: E = EllipticCurve('11a1').change_ring(K)
sage: rho = E.galois_representation()
sage: rho
Compatible family of Galois representations associated to the Elliptic Curve defined by y^2 + y = x^3 + (-1)*x^2 + (-10)*x + (-20) over Number Field in a with defining polynomial x^2 + 1
"""
def __init__(self, E):
r"""
See ``GaloisRepresentation`` for documentation.
EXAMPLES::
sage: K = NumberField(x**2 + 1, 'a')
sage: E = EllipticCurve('11a1').change_ring(K)
sage: rho = E.galois_representation()
sage: rho
Compatible family of Galois representations associated to the Elliptic Curve defined by y^2 + y = x^3 + (-1)*x^2 + (-10)*x + (-20) over Number Field in a with defining polynomial x^2 + 1
sage: loads(rho.dumps()) == rho
True
"""
self.E = E
def __repr__(self):
r"""
Return a string representation of the class.
EXAMPLES::
sage: K = NumberField(x**2 + 1, 'a')
sage: E = EllipticCurve('11a1').change_ring(K)
sage: rho = E.galois_representation()
sage: rho
Compatible family of Galois representations associated to the Elliptic Curve defined by y^2 + y = x^3 + (-1)*x^2 + (-10)*x + (-20) over Number Field in a with defining polynomial x^2 + 1
sage: K.<a> = NumberField(x^2-x+1)
sage: E = EllipticCurve([0,0,0,a,0])
sage: E.galois_representation()
Compatible family of Galois representations associated to the CM Elliptic Curve defined by y^2 = x^3 + a*x over Number Field in a with defining polynomial x^2 - x + 1
"""
if self.E.has_cm():
return "Compatible family of Galois representations associated to the CM " + repr(self.E)
else:
return "Compatible family of Galois representations associated to the " + repr(self.E)
def __eq__(self,other):
r"""
Compares two Galois representations.
We define two compatible families of representations attached
to elliptic curves to be equal if the curves are isomorphic.
EXAMPLES::
sage: K = NumberField(x**2 + 1, 'a'); a = K.gen()
sage: rho1 = EllipticCurve_from_j(1 + a).galois_representation()
sage: rho2 = EllipticCurve_from_j(2 + a).galois_representation()
sage: rho1 == rho1
True
sage: rho1 == rho2
False
sage: rho1 == 42
False
"""
if type(self) is not type(other):
return False
return self.E.is_isomorphic(other.E)
def elliptic_curve(self):
r"""
Return the elliptic curve associated to this representation.
EXAMPLES::
sage: K = NumberField(x**2 + 1, 'a'); a = K.gen()
sage: E = EllipticCurve_from_j(a)
sage: rho = E.galois_representation()
sage: rho.elliptic_curve() == E
True
"""
return self.E
def non_surjective(self, A=100):
r"""
Return a list of primes `p` including all primes for which the mod-`p`
representation might not be surjective.
INPUT:
- ``A`` -- int (a bound on the number of traces of Frobenius to use
while trying to prove surjectivity).
OUTPUT:
- ``list`` -- A list of primes where mod-`p` representation is
very likely not surjective. At any prime not in this list,
the representation is definitely surjective. If `E` has CM,
the list [0] is returned.
EXAMPLES::
sage: K = NumberField(x**2 - 29, 'a'); a = K.gen()
sage: E = EllipticCurve([1, 0, ((5 + a)/2)**2, 0, 0])
sage: rho = E.galois_representation()
sage: rho.non_surjective() # See Section 5.10 of [Serre72].
[3, 5, 29]
sage: K = NumberField(x**2 + 3, 'a'); a = K.gen()
sage: E = EllipticCurve([0, -1, 1, -10, -20]).change_ring(K) # X_0(11)
sage: rho = E.galois_representation()
sage: rho.non_surjective() # long time (4s on sage.math, 2014)
[3, 5]
sage: K = NumberField(x**2 + 1, 'a'); a = K.gen()
sage: E = EllipticCurve_from_j(1728).change_ring(K) # CM
sage: rho = E.galois_representation()
sage: rho.non_surjective()
[0]
sage: K = NumberField(x**2 - 5, 'a'); a = K.gen()
sage: E = EllipticCurve_from_j(146329141248*a - 327201914880) # CM
sage: rho = E.galois_representation()
sage: rho.non_surjective() # long time (3s on sage.math, 2014)
[0]
TESTS:
An example which failed until fixed at :trac:`19229`::
sage: K.<a> = NumberField(x^2-x+1)
sage: E = EllipticCurve([a+1,1,1,0,0])
sage: rho = E.galois_representation()
sage: rho.non_surjective()
[2, 3]
"""
if self.E.has_cm():
return [0]
return _non_surjective(self.E, A)
def is_surjective(self, p, A=100):
r"""
Return ``True`` if the mod-p representation is (provably)
surjective onto `Aut(E[p]) = GL_2(\GF{p})`. Return
``False`` if it is (probably) not.
INPUT:
* ``p`` - int - a prime number.
* ``A`` - int - a bound on the number of traces of Frobenius to use
while trying to prove surjectivity.
EXAMPLES::
sage: K = NumberField(x**2 - 29, 'a'); a = K.gen()
sage: E = EllipticCurve([1, 0, ((5 + a)/2)**2, 0, 0])
sage: rho = E.galois_representation()
sage: rho.is_surjective(29) # Cyclotomic character not surjective.
False
sage: rho.is_surjective(7) # See Section 5.10 of [Serre72].
True
If `E` is defined over `\QQ`, then the exceptional primes for `E_{/K}`
are the same as the exceptional primes for `E`, except for those primes
that are ramified in `K/\QQ` or are less than `[K:\QQ]`::
sage: K = NumberField(x**2 + 11, 'a')
sage: E = EllipticCurve([2, 14])
sage: rhoQQ = E.galois_representation()
sage: rhoK = E.change_ring(K).galois_representation()
sage: rhoQQ.is_surjective(2) == rhoK.is_surjective(2)
False
sage: rhoQQ.is_surjective(3) == rhoK.is_surjective(3)
True
sage: rhoQQ.is_surjective(5) == rhoK.is_surjective(5)
True
For CM curves, the mod-p representation is never surjective::
sage: K.<a> = NumberField(x^2-x+1)
sage: E = EllipticCurve([0,0,0,0,a])
sage: E.has_cm()
True
sage: rho = E.galois_representation()
sage: any(rho.is_surjective(p) for p in [2,3,5,7])
False
"""
if self.E.has_cm():
return False
return (_exceptionals(self.E, [p], A) == [])
def isogeny_bound(self, A=100):
r"""
Return a list of primes `p` including all primes for which
the image of the mod-`p` representation is contained in a
Borel.
.. NOTE::
For the actual list of primes `p` at which the
representation is reducible see :meth:`reducible_primes()`.
INPUT:
- ``A`` -- int (a bound on the number of traces of Frobenius to
use while trying to prove the mod-`p`
representation is not contained in a Borel).
OUTPUT:
- ``list`` - A list of primes which contains (but may not be
equal to) all `p` for which the image of the mod-`p`
representation is contained in a Borel subgroup. At any
prime not in this list, the image is definitely not
contained in a Borel. If E has `CM` defined over `K`, the list
[0] is returned.
EXAMPLES::
sage: K = NumberField(x**2 - 29, 'a'); a = K.gen()
sage: E = EllipticCurve([1, 0, ((5 + a)/2)**2, 0, 0])
sage: rho = E.galois_representation()
sage: rho.isogeny_bound() # See Section 5.10 of [Serre72].
[3, 5]
sage: K = NumberField(x**2 + 1, 'a')
sage: EllipticCurve_from_j(K(1728)).galois_representation().isogeny_bound() # CM over K
[0]
sage: EllipticCurve_from_j(K(0)).galois_representation().isogeny_bound() # CM NOT over K
[2, 3]
sage: E = EllipticCurve_from_j(K(2268945/128)) # c.f. [Sutherland12]
sage: E.galois_representation().isogeny_bound() # No 7-isogeny, but...
[7]
For curves with rational CM, there are infinitely many primes
`p` for which the mod-`p` representation is reducible, and [0]
is returned::
sage: K.<a> = NumberField(x^2-x+1)
sage: E = EllipticCurve([0,0,0,0,a])
sage: E.has_rational_cm()
True
sage: rho = E.galois_representation()
sage: rho.isogeny_bound()
[0]
An example (an elliptic curve with everywhere good reduction
over an imaginary quadratic field with quite large
discriminant), which failed until fixed at :trac:`21776`::
sage: K.<a> = NumberField(x^2 - x + 112941801)
sage: E = EllipticCurve([a+1,a-1,a,-23163076*a + 266044005933275,57560769602038*a - 836483958630700313803])
sage: E.conductor().norm()
1
sage: GR = E.galois_representation()
sage: GR.isogeny_bound()
[]
"""
if self.E.has_rational_cm():
return [0]
E = _over_numberfield(self.E)
K = E.base_field()
char = lambda P: P.smallest_integer() # cheaper than constructing the residue field
# semistable reducible primes (we are now not in the CM case)
bad_primes = _semistable_reducible_primes(E)
# primes of additive reduction
bad_primesK = (K.ideal(E.c4()) + K.ideal(E.discriminant())).prime_factors()
bad_primes += [char(P) for P in bad_primesK]
# ramified primes
bad_primes += K.absolute_discriminant().prime_factors()
# remove repeats:
bad_primes = list(Set(bad_primes))
return Frobenius_filter(E, bad_primes, A)
def reducible_primes(self):
r"""
Return a list of primes `p` for which the mod-`p`
representation is reducible, or [0] for CM curves.
OUTPUT:
- ``list`` - A list of those primes `p` for which the mod-`p`
representation is contained in a Borel subgroup, i.e. is
reducible. If E has CM *defined over K*, the list [0] is
returned (in this case the representation is reducible for
infinitely many primes).
EXAMPLES::
sage: K = NumberField(x**2 - 29, 'a'); a = K.gen()
sage: E = EllipticCurve([1, 0, ((5 + a)/2)**2, 0, 0])
sage: rho = E.galois_representation()
sage: rho.isogeny_bound() # See Section 5.10 of [Serre72].
[3, 5]
sage: rho.reducible_primes()
[3, 5]
sage: K = NumberField(x**2 + 1, 'a')
sage: EllipticCurve_from_j(K(1728)).galois_representation().isogeny_bound() # CM over K
[0]
sage: EllipticCurve_from_j(K(0)).galois_representation().reducible_primes() # CM but NOT over K
[2, 3]
sage: E = EllipticCurve_from_j(K(2268945/128)) # c.f. [Sutherland12]
sage: rho = E.galois_representation()
sage: rho.isogeny_bound() # ... but there is no 7-isogeny ...
[7]
sage: rho.reducible_primes()
[]
For curves with rational CM, there are infinitely many primes
`p` for which the mod-`p` representation is reducible, and [0]
is returned::
sage: K.<a> = NumberField(x^2-x+1)
sage: E = EllipticCurve([0,0,0,0,a])
sage: E.has_rational_cm()
True
sage: rho = E.galois_representation()
sage: rho.reducible_primes()
[0]
"""
if self.E.has_rational_cm():
return [0]
return [l for l in self.isogeny_bound() if self.E.isogenies_prime_degree(l)]
def _non_surjective(E, patience=100):
r"""
Return a list of primes `p` including all primes for which the mod-`p`
representation might not be surjective.
INPUT:
- ``E`` - EllipticCurve (over a number field).
- ``A`` - int (a bound on the number of traces of Frobenius to use
while trying to prove surjectivity).
OUTPUT:
- ``list`` - A list of primes where mod-`p` representation is very likely
not surjective. At any prime not in this list, the representation is
definitely surjective. If E has CM, a ValueError is raised.
EXAMPLES::
sage: K = NumberField(x**2 - 29, 'a'); a = K.gen()
sage: E = EllipticCurve([1, 0, ((5 + a)/2)**2, 0, 0])
sage: sage.schemes.elliptic_curves.gal_reps_number_field._non_surjective(E) # See Section 5.10 of [Serre72].
[3, 5, 29]
sage: E = EllipticCurve_from_j(1728).change_ring(K) # CM
sage: sage.schemes.elliptic_curves.gal_reps_number_field._non_surjective(E)
Traceback (most recent call last):
...
ValueError: The curve E should not have CM.
"""
if E.has_cm():
raise ValueError("The curve E should not have CM.")
E = _over_numberfield(E)
K = E.base_field()
exceptional_primes = [2, 3, 5, 7, 11, 13, 17, 19]
# The possible primes l unramified in K/QQ for which the image of the mod l
# Galois representation could be contained in an exceptional subgroup.
# Find the places of additive reduction.
SA = []
for P, n in E.conductor().factor():
if n > 1:
SA.append(P)
# TODO: All we really require is a list of primes that include all
# primes at which E has additive reduction. Perhaps we can speed
# up things by doing something less time-consuming here that produces
# a list with some extra terms? (Of course, the longer this list is,
# the slower the rest of the computation is, so it is not clear that
# this would help...)
char = lambda P: P.smallest_integer() # cheaper than constructing the residue field
bad_primes = exceptional_primes
bad_primes += [char(P) for P in SA]
bad_primes += K.discriminant().prime_factors()
bad_primes += _semistable_reducible_primes(E)
bad_primes += _possible_normalizers(E, SA)
bad_primes = list(Set(bad_primes))
return _exceptionals(E, bad_primes, patience)
def Frobenius_filter(E, L, patience=100):
r""" Determine which primes in L might have an image contained in a
Borel subgroup, by checking of traces of Frobenius.
.. NOTE::
This function will sometimes return primes for which the image
is not contained in a Borel subgroup. This issue cannot always
be fixed by increasing patience as it may be a result of a
failure of a local-global principle for isogenies.
INPUT:
- ``E`` -- EllipticCurve - over a number field.
- ``L`` -- list - a list of prime numbers.
- ``patience`` (int), default 100-- a positive integer bounding
the number of traces of Frobenius to use while trying to prove
irreducibility.
OUTPUT:
- list -- The list of all primes `\ell` in L for which the mod
`\ell` image might be contained in a Borel subgroup of
`GL_2(\mathbf{F}_{\ell})`.
EXAMPLES::
sage: E = EllipticCurve('11a1') # has a 5-isogeny
sage: sage.schemes.elliptic_curves.gal_reps_number_field.Frobenius_filter(E,primes(40))
[5]
Example to show that the output may contain primes where the
representation is in fact reducible. Over `\QQ` the following is
essentially the unique such example by [Sutherland12]_::
sage: E = EllipticCurve_from_j(2268945/128)
sage: sage.schemes.elliptic_curves.gal_reps_number_field.Frobenius_filter(E, [7, 11])
[7]
This curve does possess a 7-isogeny modulo every prime of good
reduction, but has no rational 7-isogeny::
sage: E.isogenies_prime_degree(7)
[]
A number field example::
sage: K.<i> = QuadraticField(-1)
sage: E = EllipticCurve([1+i, -i, i, -399-240*i, 2627+2869*i])
sage: sage.schemes.elliptic_curves.gal_reps_number_field.Frobenius_filter(E, primes(20))
[2, 3]
Here the curve really does possess isogenies of degrees 2 and 3::
sage: [len(E.isogenies_prime_degree(l)) for l in [2,3]]
[1, 1]
"""
E = _over_numberfield(E)
K = E.base_field()
L = list(set(L)) # Remove duplicates from L and makes a copy for output
L.sort()
include_2 = False
if 2 in L: # c.f. Section 5.3(a) of [Serre72].
L.remove(2)
include_2 = not E.division_polynomial(2).is_irreducible()
K_is_Q = (K==QQ)
from sage.arith.misc import primes
from sage.rings.infinity import infinity
def primes_iter():
for p in primes(start=2, stop=infinity):
if K_is_Q:
if E.has_good_reduction(p):
yield ZZ.ideal(p)
else:
for P in K.primes_above(p):
if E.has_good_reduction(P):
yield P
numP = 0
for P in primes_iter():
if not L or numP==patience: # stop if no primes are left, or patience is exhausted
break
numP += 1
# Discard any l for which the Frobenius polynomial at P is
# irreducible modulo l
disc = E.reduction(P).frobenius_polynomial().discriminant()
L = [l for l in L if legendre_symbol(disc,l) != -1]
#print("After using {} primes P, {} primes l remain".format(numP,len(L)))
if include_2:
L = [2] + L
return L
def _exceptionals(E, L, patience=1000):
r"""
Determine which primes in L are exceptional for E, using Proposition 19
of Section 2.8 of Serre's ``Propriétés Galoisiennes des Points d'Ordre
Fini des Courbes Elliptiques'' [Serre72]_.
INPUT:
- ``E`` - EllipticCurve - over a number field.
- ``L`` - list - a list of prime numbers.
- ``patience`` - int (a bound on the number of traces of Frobenius to
use while trying to prove surjectivity).
OUTPUT:
- list -- The list of all primes l in L for which the mod l image
might fail to be surjective.
EXAMPLES::
sage: K = NumberField(x**2 - 29, 'a'); a = K.gen()
sage: E = EllipticCurve([1, 0, ((5 + a)/2)**2, 0, 0])
sage: sage.schemes.elliptic_curves.gal_reps_number_field._exceptionals(E, [29, 31])
[29]
For CM curves an error is raised::
sage: E = EllipticCurve_from_j(1728).change_ring(K) # CM
sage: sage.schemes.elliptic_curves.gal_reps_number_field._exceptionals(E,[2,3,5])
Traceback (most recent call last):
...
ValueError: The curve E should not have CM.
"""
if E.has_cm():
raise ValueError("The curve E should not have CM.")
E = _over_numberfield(E)
K = E.base_field()
output = []
L = list(set(L)) # Remove duplicates from L.
for l in L:
if l == 2: # c.f. Section 5.3(a) of [Serre72].
if (E.j_invariant() - 1728).is_square():
output.append(2)
elif not E.division_polynomial(2).is_irreducible():
output.append(2)
elif l == 3: # c.f. Section 5.3(b) of [Serre72].
if K(-3).is_square():
output.append(3)
elif not (K['x'].gen()**3 - E.j_invariant()).is_irreducible():
output.append(3)
elif not E.division_polynomial(3).is_irreducible():
output.append(3)
elif (K.discriminant() % l) == 0:
if not K['x'](cyclotomic_polynomial(l)).is_irreducible():
# I.E. if the action on lth roots of unity is not surjective
# (We want this since as a Galois module, \wedge^2 E[l]
# is isomorphic to the lth roots of unity.)
output.append(l)
for l in output:
L.remove(l)
if 2 in L:
L.remove(2)
if 3 in L:
L.remove(3)
# If the image is not surjective, then it is contained in one of the
# maximal subgroups. So, we start by creating a dictionary between primes
# l in L and possible maximal subgroups in which the mod l image could
# be contained. This information is stored as a triple whose elements
# are True/False according to whether the mod l image could be contained
# in:
# 0. A Borel or normalizer of split Cartan subgroup.
# 1. A nonsplit Cartan subgroup or its normalizer.
# 2. An exceptional subgroup of GL_2.
D = {}
for l in L:
D[l] = [True, True, True]
for P in deg_one_primes_iter(K):
try:
trace = E.change_ring(P.residue_field()).trace_of_frobenius()
except ArithmeticError: # Bad reduction at P.
continue
patience -= 1
determinant = P.norm()
discriminant = trace**2 - 4 * determinant
unexc = [] # Primes we discover are unexceptional go here.
for l in D:
tr = GF(l)(trace)
det = GF(l)(determinant)
disc = GF(l)(discriminant)
if tr == 0:
# I.E. if Frob_P could be contained in the normalizer of
# a Cartan subgroup, but not in the Cartan subgroup.
continue
if disc == 0:
# I.E. If the matrix might be non-diagonalizable over F_{p^2}.
continue
if legendre_symbol(disc, l) == 1:
# If the matrix is diagonalizable over F_p, it can't be
# contained in a non-split Cartan subgroup. Since we've
# gotten rid of the case where it is contained in the
# of a nonsplit Cartan subgroup but not the Cartan subgroup,
D[l][1] = False
else:
# If the matrix is not diagonalizable over F_p, it can't
# be contained Borel subgroup.
D[l][0] = False
if det != 0: # c.f. [Serre72], Section 2.8, Prop. 19
u = trace**2 / det
if u not in (1, 2, 4) and u**2 - 3 * u + 1 != 0:
D[l][2] = False
if D[l] == [False, False, False]:
unexc.append(l)
for l in unexc:
D.pop(l)
unexc = []
if (D == {}) or (patience == 0):
break
for l in D:
output.append(l)
output.sort()
return output
def _over_numberfield(E):
r"""Return `E`, defined over a NumberField object. This is necessary
since if `E` is defined over `\QQ`, then we cannot use SAGE commands
available for number fields.
INPUT:
- ``E`` - EllipticCurve - over a number field.
OUTPUT:
- If `E` is defined over a NumberField, returns E.
- If `E` is defined over QQ, returns E defined over the NumberField QQ.
EXAMPLES::
sage: E = EllipticCurve([1, 2])
sage: sage.schemes.elliptic_curves.gal_reps_number_field._over_numberfield(E)
Elliptic Curve defined by y^2 = x^3 + x + 2 over Number Field in a with defining polynomial x
"""
K = E.base_field()
if K == QQ:
x = QQ['x'].gen()
K = NumberField(x, 'a')
E = E.change_ring(K)
return E
def deg_one_primes_iter(K, principal_only=False):
r"""
Return an iterator over degree 1 primes of ``K``.
INPUT:
- ``K`` -- a number field
- ``principal_only`` -- bool; if ``True``, only yield principal primes
OUTPUT:
An iterator over degree 1 primes of `K` up to the given norm,
optionally yielding only principal primes.
EXAMPLES::
sage: K.<a> = QuadraticField(-5)
sage: from sage.schemes.elliptic_curves.gal_reps_number_field import deg_one_primes_iter
sage: it = deg_one_primes_iter(K)
sage: [next(it) for _ in range(6)]
[Fractional ideal (2, a + 1),
Fractional ideal (3, a + 1),
Fractional ideal (3, a + 2),
Fractional ideal (-a),
Fractional ideal (7, a + 3),
Fractional ideal (7, a + 4)]
sage: it = deg_one_primes_iter(K, True)
sage: [next(it) for _ in range(6)]
[Fractional ideal (-a),
Fractional ideal (-2*a + 3),
Fractional ideal (2*a + 3),
Fractional ideal (a + 6),
Fractional ideal (a - 6),
Fractional ideal (-3*a + 4)]
"""
# imaginary quadratic fields have no principal primes of norm < disc / 4
start = K.discriminant().abs() // 4 if principal_only and K.signature() == (0,1) else 2
K_is_Q = (K==QQ)
for p in primes(start=start, stop=Infinity):
if K_is_Q:
yield ZZ.ideal(p)
else:
for P in K.primes_above(p, degree=1):
if not principal_only or P.is_principal():
yield P
def _semistable_reducible_primes(E, verbose=False):
r"""Find a list containing all semistable primes l unramified in K/QQ
for which the Galois image for E could be reducible.
INPUT:
- ``E`` - EllipticCurve - over a number field.
OUTPUT:
A list of primes, which contains all primes `l` unramified in
`K/\mathbb{QQ}`, such that `E` is semistable at all primes lying
over `l`, and the Galois image at `l` is reducible. If `E` has CM
defined over its ground field, a ``ValueError`` is raised.
EXAMPLES::
sage: E = EllipticCurve([0, -1, 1, -10, -20]) # X_0(11)
sage: 5 in sage.schemes.elliptic_curves.gal_reps_number_field._semistable_reducible_primes(E)
True
This example, over a quintic field with Galois group `S_5`, took a
very long time before :trac:`22343`::
sage: K.<a> = NumberField(x^5 - 6*x^3 + 8*x - 1)
sage: E = EllipticCurve(K, [a^3 - 2*a, a^4 - 2*a^3 - 4*a^2 + 6*a + 1, a + 1, -a^3 + a + 1, -a])
sage: from sage.schemes.elliptic_curves.gal_reps_number_field import _semistable_reducible_primes
sage: _semistable_reducible_primes(E)
[2, 5, 53, 1117]
"""
if verbose: print("In _semistable_reducible_primes with E={}".format(E.ainvs()))
K = E.base_field()
d = K.degree()
deg_one_primes = deg_one_primes_iter(K, principal_only=True)
bad_primes = set([]) # This will store the output.
# We find two primes (of distinct residue characteristics) which are
# of degree 1, unramified in K/Q, and at which E has good reduction.
# Each of these primes will give us a nontrivial divisibility constraint
# on the exceptional primes l. For both of these primes P, we precompute
# a generator and the characteristic polynomial of Frob_P^12.
precomp = []
last_p = 0 # The residue characteristic of the most recent prime.
while len(precomp) < 2:
P = next(deg_one_primes)
p = P.norm()
if p != last_p and (d==1 or P.ramification_index() == 1) and E.has_good_reduction(P):
precomp.append(P)
last_p = p
Px, Py = precomp
x, y = [PP.gens_reduced()[0] for PP in precomp]
EmodPx = E.reduction(Px) if d>1 else E.reduction(x)
EmodPy = E.reduction(Py) if d>1 else E.reduction(y)
fxpol = EmodPx.frobenius_polynomial()
fypol = EmodPy.frobenius_polynomial()
fx12pol = fxpol.adams_operator(12) # roots are 12th powers of those of fxpol
fy12pol = fypol.adams_operator(12)
px = x.norm() if d>1 else x
py = y.norm() if d>1 else x
Zx = fxpol.parent()
xpol = x.charpoly() if d>1 else Zx([-x,1])
ypol = y.charpoly() if d>1 else Zx([-y,1])
if verbose: print("Finished precomp, x={} (p={}), y={} (p={})".format(x,px,y,py))
for w in range(1 + d // 2):
if verbose: print("w = {}".format(w))
gx = xpol.symmetric_power(w).adams_operator(12).resultant(fx12pol)
gy = ypol.symmetric_power(w).adams_operator(12).resultant(fy12pol)
if verbose: print("computed gx and gy")
gxn = Integer(gx.absolute_norm()) if d > 1 else gx
gyn = Integer(gy.absolute_norm()) if d > 1 else gy
gxyn = gxn.gcd(gyn)
if gxyn:
xprimes = gxyn.prime_factors()
if verbose: print("adding prime factors {} of {} to {}".format(xprimes, gxyn, sorted(bad_primes)))
bad_primes.update(xprimes)
if verbose: print("...done, bad_primes now {}".format(sorted(bad_primes)))
continue
else:
if verbose: print("gx and gy both 0!")
## It is possible that our curve has CM. ##
# Our character must be of the form Nm^K_F for an imaginary
# quadratic subfield F of K (which is the CM field if E has CM).
# Note that this can only happen when d is even, w=d/2, and K
# contains (or the Galois closure of K contains?) the
# imaginary quadratic field F = Q(sqrt(a)) which is the
# splitting field of both fx12pol and fy12pol. We compute a
# and relativise K over F:
a = fx12pol.discriminant().squarefree_part()
# Construct a field isomorphic to K but a relative extension over QQ(sqrt(a)).
# See #19229: the names given here, which are not used, should
# not be the name of the generator of the base field.
rootsa = K(a).sqrt(all=True) # otherwise if a is not a square the
# returned result is in the symbolic ring!
try:
roota = rootsa[0]
except IndexError:
raise RuntimeError("error in _semistable_reducible_primes: K={} does not contain sqrt({})".format(K,a))
K_rel = K.relativize(roota, ['name1','name2'])
iso = K_rel.structure()[1] # an isomorphism from K to K_rel
## We try again to find a nontrivial divisibility condition. ##
div = 0
patience = 5 * K.absolute_degree()
# Number of Frobenius elements to check before suspecting that E
# has CM and computing the set of CM j-invariants of K to check.
# TODO: Is this the best value for this parameter?
while div==0 and patience>0:
P = next(deg_one_primes) # a prime of K not K_rel
while E.has_bad_reduction(P):
P = next(deg_one_primes)
if verbose: print("trying P = {}...".format(P))
EmodP = E.reduction(P)
fpol = EmodP.frobenius_polynomial()
if verbose: print("...good reduction, frobenius poly = {}".format(fpol))
x = iso(P.gens_reduced()[0]).relative_norm()
xpol = x.charpoly().adams_operator(12)
div2 = Integer(xpol.resultant(fpol.adams_operator(12)) // x.norm()**12)
if div2:
div = div2.isqrt()
assert div2==div**2
if verbose: print("...div = {}".format(div))
else:
if verbose: print("...div = 0, continuing")
patience -= 1
if patience == 0:
# We suspect that E has CM, so we check:
if E.has_cm():
raise ValueError("In _semistable_reducible_primes, the curve E should not have CM.")
assert div != 0
# We found our divisibility constraint.
xprimes = div.prime_factors()
if verbose: print("...adding prime factors {} of {} to {}...".format(xprimes,div, sorted(bad_primes)))
bad_primes.update(xprimes)
if verbose: print("...done, bad_primes now {}".format(sorted(bad_primes)))
L = sorted(bad_primes)
return L
def _possible_normalizers(E, SA):
r"""Find a list containing all primes `l` such that the Galois image at `l`
is contained in the normalizer of a Cartan subgroup, such that the
corresponding quadratic character is ramified only at the given primes.
INPUT:
- ``E`` - EllipticCurve - over a number field K.
- ``SA`` - list - a list of primes of K.
OUTPUT:
- list -- A list of primes, which contains all primes `l` such that the
Galois image at `l` is contained in the normalizer of a Cartan
subgroup, such that the corresponding quadratic character is
ramified only at primes in SA.
- If `E` has geometric CM that is not defined over its ground field, a
ValueError is raised.
EXAMPLES::
sage: E = EllipticCurve([0,0,0,-56,4848])
sage: 5 in sage.schemes.elliptic_curves.gal_reps_number_field._possible_normalizers(E, [ZZ.ideal(2)])
True
For CM curves, an error is raised::
sage: K.<i> = QuadraticField(-1)
sage: E = EllipticCurve_from_j(1728).change_ring(K) # CM
sage: sage.schemes.elliptic_curves.gal_reps_number_field._possible_normalizers(E, [])
Traceback (most recent call last):
...
ValueError: The curve E should not have CM.
"""
if E.has_cm():
raise ValueError("The curve E should not have CM.")
E = _over_numberfield(E)
K = E.base_field()
SA = [K.ideal(I.gens()) for I in SA]
selmer_group = K.selmer_group(SA, 2) # Generators of the selmer group.
if selmer_group == []:
return []
V = VectorSpace(GF(2), len(selmer_group))
# We think of this as the character group of the selmer group.
traces_list = []
W = V.zero_subspace()
deg_one_primes = deg_one_primes_iter(K)
while W.dimension() < V.dimension() - 1:
P = next(deg_one_primes)
k = P.residue_field()
defines_valid_character = True
# A prime P defines a quadratic residue character
# on the Selmer group. This variable will be set
# to zero if any elements of the selmer group are
# zero mod P (i.e. the character is ramified).
splitting_vector = [] # This will be the values of this
# character on the generators of the Selmer group.
for a in selmer_group:
abar = k(a)
if abar == 0:
# Ramification.
defines_valid_character = False
break
if abar.is_square():
splitting_vector.append(GF(2)(0))
else:
splitting_vector.append(GF(2)(1))
if not defines_valid_character:
continue
if splitting_vector in W:
continue
try:
Etilde = E.change_ring(k)
except ArithmeticError: # Bad reduction.
continue
tr = Etilde.trace_of_frobenius()
if tr == 0:
continue
traces_list.append(tr)
W = W + V.span([splitting_vector])
bad_primes = set([])
for i in traces_list:
for p in i.prime_factors():
bad_primes.add(p)
# We find the unique vector v in V orthogonal to W:
v = W.matrix().transpose().kernel().basis()[0]
# We find the element a of the selmer group corresponding to v:
a = 1
for i in range(len(selmer_group)):
if v[i] == 1:
a *= selmer_group[i]
# Since we've already included the above bad primes, we can assume
# that the quadratic character corresponding to the exceptional primes
# we're looking for is given by mapping into Gal(K[\sqrt{a}]/K).
patience = 5 * K.degree()
# Number of Frobenius elements to check before suspecting that E
# has CM and computing the set of CM j-invariants of K to check.
# TODO: Is this the best value for this parameter?
while True:
P = next(deg_one_primes)
k = P.residue_field()
if not k(a).is_square():
try:
tr = E.change_ring(k).trace_of_frobenius()
except ArithmeticError: # Bad reduction.
continue
if tr == 0:
patience -= 1
else:
for p in tr.prime_factors():
bad_primes.add(p)
bad_primes = sorted(bad_primes)
return bad_primes
#
# Code for Billerey's algorithm to find reducible primes
#
# See "Critères d'irréductibilité pour les représentations des courbes
# elliptiques", Nicolas Billerey, https://arxiv.org/abs/0908.1084
#
def Billerey_P_l(E, l):
r"""
Return Billerey's `P_l^*` as defined in [Bil2011]_, equation (9).
INPUT:
- ``E`` -- an elliptic curve over a number field `K`
- ``l`` -- a rational prime
EXAMPLES::
sage: K = NumberField(x**2 - 29, 'a'); a = K.gen()
sage: E = EllipticCurve([1, 0, ((5 + a)/2)**2, 0, 0])
sage: from sage.schemes.elliptic_curves.gal_reps_number_field import Billerey_P_l
sage: [Billerey_P_l(E,l) for l in primes(10)]
[x^2 + 8143*x + 16777216,
x^2 + 451358*x + 282429536481,
x^4 - 664299076*x^3 + 205155493652343750*x^2 - 39595310449600219726562500*x + 3552713678800500929355621337890625,
x^4 - 207302404*x^3 - 377423798538689366394*x^2 - 39715249826471656586987520004*x + 36703368217294125441230211032033660188801]
"""
K = E.base_field()
qq = K.primes_above(l)
# if len(qq) == K.degree():
# return None
from sage.rings.polynomial.polynomial_ring import polygen
from operator import mul
P = polygen(ZZ)-1
for q in qq:
e = K(l).valuation(q)
P = P.composed_op(E.reduction(q).frobenius_polynomial().adams_operator(12*e), mul, monic=True)
return P
def Billerey_B_l(E,l,B=0):
r"""
Return Billerey's `B_l`, adapted from the definition in [Bil2011]_, after (9).
INPUT:
- ``E`` -- an elliptic curve over a number field `K`
- ``l`` (int) -- a rational prime
- ``B`` (int) -- 0 or LCM of previous `B_l`: the prime-to-B part of this `B_l` is ignored.
EXAMPLES::
sage: K = NumberField(x**2 - 29, 'a'); a = K.gen()
sage: E = EllipticCurve([1, 0, ((5 + a)/2)**2, 0, 0])
sage: from sage.schemes.elliptic_curves.gal_reps_number_field import Billerey_B_l
sage: [Billerey_B_l(E,l) for l in primes(15)]
[1123077552537600,
227279663773903886745600,
0,
0,
269247154818492941287713746693964214802283882086400,
0]
"""
d = E.base_field().absolute_degree()
P = Billerey_P_l(E, l)
if P is None:
return ZZ.zero()
# We compute the factors one at a time since if any is 0 we quit:
B_l = ZZ(1)
for k in range(1 + d // 2):
factor = ZZ(P(l**(12*k)))
if factor:
B_l *= factor.gcd(B)
else:
return ZZ(0)
return B_l
def Billerey_R_q(E, q, B=0):
r"""
Return Billerey's `R_q`, adapted from the definition in [Bil2011]_, Theorem 2.8.
INPUT:
- ``E`` -- an elliptic curve over a number field `K`
- ``q`` -- a prime ideal of `K`
- ``B`` (int) -- 0 or LCM of previous `R_q`: the prime-to-B part of this `R_q` is ignored.
EXAMPLES::
sage: K = NumberField(x**2 - 29, 'a'); a = K.gen()
sage: E = EllipticCurve([1, 0, ((5 + a)/2)**2, 0, 0])
sage: from sage.schemes.elliptic_curves.gal_reps_number_field import Billerey_R_q
sage: [Billerey_R_q(E,K.prime_above(l)) for l in primes(10)]
[1123077552537600,
227279663773903886745600,
51956919562116960000000000000000,
252485933820556361829926400000000]
"""
K = E.base_field()
d = K.absolute_degree()
h = K.class_number()
P = E.reduction(q).frobenius_polynomial().adams_operator(12*h)
Q = ((q**h).gens_reduced()[0]).absolute_minpoly().adams_operator(12)
# We compute the factors one at a time since if any is 0 we quit:
R_q = ZZ(1)
for k in range(1 + d // 2):
# the following would be in QQ if we did not coerce
factor = ZZ(P.resultant(Q.compose_power(k)))
if factor:
R_q *= factor.gcd(B)
else:
return ZZ(0)
return R_q
def Billerey_B_bound(E, max_l=200, num_l=8, small_prime_bound=0, debug=False):
"""
Compute Billerey's bound `B`.
We compute `B_l` for `l` up to ``max_l`` (at most) until ``num_l``
nonzero values are found (at most). Return the list of primes
dividing all `B_l` computed, excluding those dividing 6 or
ramified or of bad reduction or less than small_prime_bound. If
no non-zero values are found return [0].
INPUT:
- ``E`` -- an elliptic curve over a number field `K`.
- ``max_l`` (int, default 200) -- maximum size of primes l to check.
- ``num_l`` (int, default 8) -- maximum number of primes l to check.
- ``small_prime_bound`` (int, default 0) -- remove primes less
than this from the output.
- ``debug`` (bool, default ``False``) -- if ``True`` prints details.
.. note::
The purpose of the small_prime_bound is that it is faster to
deal with these using the local test; by ignoring them here,
we enable the algorithm to terminate sooner when there are no
large reducible primes, which is always the case in practice.
EXAMPLES::
sage: K = NumberField(x**2 - 29, 'a'); a = K.gen()
sage: E = EllipticCurve([1, 0, ((5 + a)/2)**2, 0, 0])
sage: from sage.schemes.elliptic_curves.gal_reps_number_field import Billerey_B_bound
sage: Billerey_B_bound(E)
[5]
If we do not use enough primes `l`, extraneous primes will be
included which are not reducible primes::
sage: Billerey_B_bound(E, num_l=6)
[5, 7]
Similarly if we do not use large enough primes `l`::
sage: Billerey_B_bound(E, max_l=50, num_l=8)
[5, 7]
sage: Billerey_B_bound(E, max_l=100, num_l=8)
[5]
This curve does have a rational 5-isogeny::
sage: len(E.isogenies_prime_degree(5))
1
"""
if debug:
print("Computing B-bound for {} with max_l={}, num_l={}".format(E.ainvs(),max_l,num_l) + " (ignoring primes under {})".format(small_prime_bound) if small_prime_bound else "")
B = ZZ.zero()
ells = []
K = E.base_field()
DK = K.discriminant()
ED = E.discriminant().norm()
B0 = ZZ(6*DK*ED)
def remove_primes(B):
B1 = B.prime_to_m_part(B0)
for p in primes(small_prime_bound):
B1 = B1.prime_to_m_part(p)
return B1
ll = primes(5,max_l) # iterator
while B!=1 and len(ells)<num_l:
try:
l = next(ll)
while B0.valuation(l):
l = next(ll)
except StopIteration:
break
if debug:
print("..trying l={}".format(l))
b = Billerey_B_l(E,l,B)
if b:
if debug:
print("..ok, B_l = {}".format(b))
if B:
B = B.gcd(b)
else:
B = remove_primes(b)
ells.append(l)
if debug:
print("..so far, B = {} using l in {}".format(B,ells))
else:
if debug:
print("..B_l=0 for l={}".format(l))
if B:
res = [p for p,e in B.factor()]
if debug:
print("..returning {}".format(res))
return res
# or we failed to find any nonzero values...
if debug:
print("..failed to find a bound")
return [0]
def Billerey_R_bound(E, max_l=200, num_l=8, small_prime_bound=None, debug=False):
r"""
Compute Billerey's bound `R`.
We compute `R_q` for `q` dividing primes `\ell` up to ``max_l``
(at most) until ``num_l`` nonzero values are found (at most).
Return the list of primes dividing all ``R_q`` computed, excluding
those dividing 6 or ramified or of bad reduction or less than
small_prime_bound. If no non-zero values are found return [0].
INPUT:
- ``E`` -- an elliptic curve over a number field `K`.
- ``max_l`` (int, default 200) -- maximum size of rational primes
l for which the primes q above l are checked.
- ``num_l`` (int, default 8) -- maximum number of rational primes
l for which the primes q above l are checked.
- ``small_prime_bound`` (int, default 0) -- remove primes less
than this from the output.
- ``debug`` (bool, default ``False``) -- if ``True`` prints details.
.. note::
The purpose of the small_prime_bound is that it is faster to
deal with these using the local test; by ignoring them here,
we enable the algorithm to terminate sooner when there are no
large reducible primes, which is always the case in practice.
EXAMPLES::
sage: K = NumberField(x**2 - 29, 'a'); a = K.gen()
sage: E = EllipticCurve([1, 0, ((5 + a)/2)**2, 0, 0])
sage: from sage.schemes.elliptic_curves.gal_reps_number_field import Billerey_R_bound
sage: Billerey_R_bound(E)
[5]
We may get no bound at all if we do not use enough primes::
sage: Billerey_R_bound(E, max_l=2, debug=False)
[0]
Or we may get a bound but not a good one if we do not use enough primes::
sage: Billerey_R_bound(E, num_l=1, debug=False)
[5, 17, 67, 157]
In this case two primes is enough to restrict the set of possible
reducible primes to just `\{5\}`. This curve does have a rational 5-isogeny::
sage: Billerey_R_bound(E, num_l=2, debug=False)
[5]
sage: len(E.isogenies_prime_degree(5))
1
"""
if debug:
print("Computing R-bound for {} with max_l={}, num_l={}".format(E.ainvs(),max_l,num_l) + " (ignoring primes under {})".format(small_prime_bound) if small_prime_bound else "")
B = ZZ.zero()
ells = []
K = E.base_field()
DK = K.discriminant()
ED = E.discriminant().norm()
B0 = ZZ(6*DK*ED)
def remove_primes(B):
B1 = B.prime_to_m_part(B0)
for p in primes(small_prime_bound):
B1 = B1.prime_to_m_part(p)
return B1
ll = primes(5, max_l) # iterator
while len(ells) < num_l and B != 1:
try:
l = next(ll)
while B0.valuation(l):
l = next(ll)
except StopIteration:
break
q = K.prime_above(l)
if debug:
print("..trying q={} above l={}".format(q,l))
b = Billerey_R_q(E,q,B)
if b:
if debug:
print("..ok, R_q = {}, type={}".format(b,type(b)))
if B:
B = B.gcd(b)
else:
B = remove_primes(b)
ells.append(l)
if debug:
print("..so far, B = {} using l in {}".format(B,ells))
if B:
res = B.support()
if debug:
print("..returning {}".format(res))
return res
# or we failed to find any nonzero values...
if debug:
print("..failed to find a bound")
return [0]
def reducible_primes_Billerey(E, num_l=None, max_l=None, verbose=False):
r"""
Return a finite set of primes `\ell` containing all those for which
`E` has a `K`-rational ell-isogeny, where `K` is the base field of
`E`: i.e., the mod-`\ell` representation is irreducible for all
`\ell` outside the set returned.
INPUT:
- ``E`` -- an elliptic curve defined over a number field `K`.
- ``max_l`` (int or ``None`` (default)) -- the maximum prime
`\ell` to use for the B-bound and R-bound. If ``None``, a
default value will be used.
- ``num_l`` (int or ``None`` (default)) -- the number of primes
`\ell` to use for the B-bound and R-bound. If ``None``, a
default value will be used.
.. note::
If ``E`` has CM then [0] is returned. In this case use the
function
sage.schemes.elliptic_curves.isogeny_class.possible_isogeny_degrees
We first compute Billeray's B_bound using at most ``num_l`` primes
of size up to ``max_l``. If that fails we compute Billeray's
R_bound using at most ``num_q`` primes of size up to ``max_q``.
Provided that one of these methods succeeds in producing a finite
list of primes we check these using a local condition, and finally
test that the primes returned actually are reducible. Otherwise
we return [0].
EXAMPLES::
sage: from sage.schemes.elliptic_curves.gal_reps_number_field import reducible_primes_Billerey
sage: K = NumberField(x**2 - 29, 'a'); a = K.gen()
sage: E = EllipticCurve([1, 0, ((5 + a)/2)**2, 0, 0])
sage: reducible_primes_Billerey(E)
[3, 5]
sage: K = NumberField(x**2 + 1, 'a')
sage: E = EllipticCurve_from_j(K(1728)) # CM over K
sage: reducible_primes_Billerey(E)
[0]
sage: E = EllipticCurve_from_j(K(0)) # CM but NOT over K
sage: reducible_primes_Billerey(E)
[2, 3]
An example where a prime is not reducible but passes the test::
sage: E = EllipticCurve_from_j(K(2268945/128)).global_minimal_model() # c.f. [Sutherland12]
sage: reducible_primes_Billerey(E)
[7]
"""
#verbose=True
if verbose:
print("E = {}, finding reducible primes using Billerey's algorithm".format(E.ainvs()))
# Set parameters to default values if not given:
if max_l is None:
max_l = 200
if num_l is None:
num_l = 8
K = E.base_field()
DK = K.discriminant()
ED = E.discriminant().norm()
B0 = ZZ(6*DK*ED).prime_divisors() # TODO: only works if discriminant is integral
# Billeray's algorithm will be faster if we tell it to ignore
# small primes; these can be tested using the naive algorithm.
if verbose:
print("First doing naive test of primes up to {}...".format(max_l))
max_small_prime = 200
OK_small_primes = reducible_primes_naive(E, max_l=max_small_prime, num_P=200, verbose=verbose)
if verbose:
print("Naive test of primes up to {} returns {}.".format(max_small_prime, OK_small_primes))
B1 = Billerey_B_bound(E, max_l, num_l, max_small_prime, verbose)
if B1 == [0]:
if verbose:
print("... B_bound ineffective using max_l={}, moving on to R-bound".format(max_l))
B1 = Billerey_R_bound(E,max_l, num_l, max_small_prime, verbose)
if B1 == [0]:
if verbose:
print("... R_bound ineffective using max_l={}",format(max_l))
return [0]
if verbose:
print("... R_bound = {}".format(B1))
else:
if verbose:
print("... B_bound = {}".format(B1))
B = sorted(set(B0 + B1 + OK_small_primes))
if verbose:
print("... combined bound = {}".format(B))
num_p = 100
B = Frobenius_filter(E, B, num_p)
if verbose:
print("... after Frobenius filter = {}".format(B))
return B
def reducible_primes_naive(E, max_l=None, num_P=None, verbose=False):
r"""
Return locally reducible primes `\ell` up to ``max_l``.
The list of primes `\ell` returned consists of all those up to
``max_l`` such that `E` mod `P` has an `\ell`-isogeny, where `K`
is the base field of `E`, for ``num_P`` primes `P` of `K`. In
most cases `E` then has a `K`-rational `\ell`-isogeny, but there
are rare exceptions.
INPUT:
- ``E`` -- an elliptic curve defined over a number field `K`
- ``max_l`` (int or ``None`` (default)) -- the maximum prime
`\ell` to test.
- ``num_P`` (int or ``None`` (default)) -- the number of primes
`P` of `K` to use in testing each `\ell`.
EXAMPLES::
sage: from sage.schemes.elliptic_curves.gal_reps_number_field import reducible_primes_naive
sage: K.<a> = NumberField(x^4 - 5*x^2 + 3)
sage: E = EllipticCurve(K, [a^2 - 2, -a^2 + 3, a^2 - 2, -50*a^2 + 35, 95*a^2 - 67])
sage: reducible_primes_naive(E,num_P=10)
[2, 5, 53, 173, 197, 241, 293, 317, 409, 557, 601, 653, 677, 769, 773, 797]
sage: reducible_primes_naive(E,num_P=15)
[2, 5, 197, 557, 653, 769]
sage: reducible_primes_naive(E,num_P=20)
[2, 5]
sage: reducible_primes_naive(E)
[2, 5]
sage: [phi.degree() for phi in E.isogenies_prime_degree()]
[2, 2, 2, 5]
"""
if max_l is None:
max_l = 1000
if num_P is None:
num_P = 100
if verbose:
print("E = {}, finding reducible primes up to {} using Frobenius filter with {} primes".format(E.ainvs(), max_l, num_P))
B = Frobenius_filter(E, primes(max_l), num_P)
if verbose:
print("... returning {}".format(B))
return B
| 34.804969 | 198 | 0.589996 |
e22fcccb7a3ee9ba1f9fcbc2a3fe6fb9cd44d86e | 1,521 | py | Python | examples/multidimensionalexample.py | dish59742/amplpy | 9309a947b74dcc524a07809a68bf32d93e9f0a48 | [
"BSD-3-Clause"
] | 47 | 2017-08-11T16:38:26.000Z | 2022-03-24T08:37:40.000Z | examples/multidimensionalexample.py | Seanpm2001-Python/amplpy | 26c04134b6c4135a541d54c7873d9b2933df039a | [
"BSD-3-Clause"
] | 41 | 2017-08-05T00:54:27.000Z | 2022-03-08T21:56:19.000Z | examples/multidimensionalexample.py | Seanpm2001-Python/amplpy | 26c04134b6c4135a541d54c7873d9b2933df039a | [
"BSD-3-Clause"
] | 22 | 2017-08-05T00:38:43.000Z | 2022-02-02T20:22:10.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import, division
from builtins import map, range, object, zip, sorted
import sys
import os
def main(argc, argv):
from amplpy import AMPL, DataFrame
os.chdir(os.path.dirname(__file__) or os.curdir)
try:
# Create an AMPL instance
ampl = AMPL()
"""
# If the AMPL installation directory is not in the system search path:
from amplpy import Environment
ampl = AMPL(
Environment('full path to the AMPL installation directory'))
"""
ampl.eval('set CITIES; set LINKS within (CITIES cross CITIES);')
ampl.eval('param cost {LINKS} >= 0; param capacity {LINKS} >= 0;')
ampl.eval('data; set CITIES := PITT NE SE BOS EWR BWI ATL MCO;')
cost = [2.5, 3.5, 1.7, 0.7, 1.3, 1.3, 0.8, 0.2, 2.1]
capacity = [250, 250, 100, 100, 100, 100, 100, 100, 100]
LinksFrom = ['PITT', 'PITT', 'NE', 'NE', 'NE', 'SE', 'SE', 'SE', 'SE']
LinksTo = ['NE', 'SE', 'BOS', 'EWR', 'BWI', 'EWR', 'BWI', 'ATL', 'MCO']
df = DataFrame(('LINKSFrom', 'LINKSTo'), ('cost', 'capacity'))
df.setColumn('LINKSFrom', LinksFrom)
df.setColumn('LINKSTo', LinksTo)
df.setColumn('cost', cost)
df.setColumn('capacity', capacity)
print(df)
ampl.setData(df, 'LINKS')
except Exception as e:
print(e)
raise
if __name__ == '__main__':
main(len(sys.argv), sys.argv)
| 32.361702 | 79 | 0.577909 |
41b73a620ca0ece5113da2c3674e0a857f556547 | 833 | py | Python | meiduo_mall/celery_tasks/email/tasks.py | Wang-TaoTao/meiduo_project | f95f097c2a85f500d0fd264a58e2f0d92771fff6 | [
"MIT"
] | null | null | null | meiduo_mall/celery_tasks/email/tasks.py | Wang-TaoTao/meiduo_project | f95f097c2a85f500d0fd264a58e2f0d92771fff6 | [
"MIT"
] | null | null | null | meiduo_mall/celery_tasks/email/tasks.py | Wang-TaoTao/meiduo_project | f95f097c2a85f500d0fd264a58e2f0d92771fff6 | [
"MIT"
] | null | null | null | from django.conf import settings
from django.core.mail import send_mail
from celery_tasks.main import app
from meiduo_mall.settings.dev import logger
@app.task(bind=True, name='send_verify_email', retry_backoff=3)
def send_verify_email(self, to_email, verify_url):
"""
发送验证邮箱邮件
:param to_email: 收件人邮箱
:param verify_url: 验证链接
:return: None
"""
subject = "美多商城邮箱验证"
html_message = '<p>尊敬的用户您好!</p>' \
'<p>感谢您使用美多商城。</p>' \
'<p>您的邮箱为:%s 。请点击此链接激活您的邮箱:</p>' \
'<p><a href="%s">%s<a></p>' % (to_email, verify_url, verify_url)
try:
send_mail(subject, "", settings.EMAIL_FROM, [to_email], html_message=html_message)
except Exception as e:
logger.error(e)
# 有异常自动重试三次
raise self.retry(exc=e, max_retries=3)
| 28.724138 | 90 | 0.621849 |
96eadf00b79bbb62281b9145d4250247afd772af | 1,622 | py | Python | linter.py | corvisa/SummitLinter | d8dd68e212cff9d4f8184c9f3c20a377af72ed56 | [
"MIT"
] | 22 | 2015-03-11T16:37:14.000Z | 2021-08-03T09:09:12.000Z | linter.py | corvisa/SummitLinter | d8dd68e212cff9d4f8184c9f3c20a377af72ed56 | [
"MIT"
] | null | null | null | linter.py | corvisa/SummitLinter | d8dd68e212cff9d4f8184c9f3c20a377af72ed56 | [
"MIT"
] | 1 | 2018-03-03T16:10:01.000Z | 2018-03-03T16:10:01.000Z | #
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
#
# Copyright (c) 2014 CorvisaCloud, LLC
#
# License: MIT
#
"""This module exports the LuaSummit plugin class."""
from SublimeLinter.lint import Linter, util
class SummitLinter(Linter):
"""Provides an interface to luacheck for the SummitEditor syntax."""
syntax = 'summit'
tempfile_suffix = 'lua'
defaults = {
'--ignore:,': 'channel',
'--only:,': '',
'--limit=': None,
'--globals: ': '',
}
comment_re = r'\s*-[-*]'
inline_settings = ('ignore', 'limit', 'only', 'globals')
cmd = 'luacheck @ *'
regex = r'^(?P<filename>[^:]+):(?P<line>\d+):(?P<col>\d+): (?P<message>.*)$'
def build_args(self, settings):
"""Return args, transforming --ignore, --only, and --globals args into a format luacheck understands."""
args = super().build_args(settings)
try:
index = args.index('--ignore')
# Split the comma-separated arg after --ignore into separate elements
vars = args[index + 1].split(',')
args[index + 1:index + 2] = vars
except ValueError:
pass
try:
index = args.index('--only')
vars = args[index + 1].split(',')
args[index + 1:index + 2] = vars
except ValueError:
pass
try:
index = args.index('--globals')
vars = args[index + 1].split(',')
args[index + 1:index + 2] = vars
except ValueError:
pass
return args
| 28.964286 | 112 | 0.532676 |
b1d476b2145a7c20c5093bbb3594fb263f1a7766 | 58,848 | py | Python | simtbx/diffBragg/hopper_utils.py | ReliaSolve/cctbx_project | 4a0eebc1f19af94edde170553e1c4be3cffac1b8 | [
"BSD-3-Clause-LBNL"
] | null | null | null | simtbx/diffBragg/hopper_utils.py | ReliaSolve/cctbx_project | 4a0eebc1f19af94edde170553e1c4be3cffac1b8 | [
"BSD-3-Clause-LBNL"
] | 170 | 2020-09-26T19:17:07.000Z | 2022-03-31T21:32:41.000Z | simtbx/diffBragg/hopper_utils.py | ReliaSolve/cctbx_project | 4a0eebc1f19af94edde170553e1c4be3cffac1b8 | [
"BSD-3-Clause-LBNL"
] | null | null | null | from __future__ import absolute_import, division, print_function
import os
from dials.algorithms.shoebox import MaskCode
from copy import deepcopy
from dials.model.data import Shoebox
import numpy as np
from scipy.optimize import dual_annealing, basinhopping
from collections import Counter
from scitbx.matrix import sqr, col
from dxtbx.model.experiment_list import ExperimentListFactory
from simtbx.nanoBragg.utils import downsample_spectrum
from dials.array_family import flex
from simtbx.diffBragg import utils
from simtbx.diffBragg.refiners.parameters import RangedParameter, Parameters
from simtbx.diffBragg.attr_list import NB_BEAM_ATTRS, NB_CRYST_ATTRS, DIFFBRAGG_ATTRS
try:
from line_profiler import LineProfiler
except ImportError:
LineProfiler = None
import logging
MAIN_LOGGER = logging.getLogger("diffBragg.main")
PROFILE_LOGGER = logging.getLogger("diffBragg.profile")
ROTX_ID = 0
ROTY_ID = 1
ROTZ_ID = 2
ROTXYZ_IDS = ROTX_ID, ROTY_ID, ROTZ_ID
NCELLS_ID = 9
UCELL_ID_OFFSET = 3
DETZ_ID = 10
FHKL_ID = 11
ETA_ID = 19
DIFFUSE_ID = 23
DEG = 180 / np.pi
def write_SIM_logs(SIM, log=None, lam=None):
"""
Logs properties of SIM.D (diffBragg instance), and SIM.crystal, SIM.beam (nanoBragg beam and crystal)
These are important for reproducing results
:param SIM: sim_data instance used during hopper refinement, member of the data modeler
:param log: optional log file to dump attributes of SIM
:param lam: optional lambda file to dump the spectra to disk, can be read usint diffBragg.utils.load_spectra_file
"""
if log is not None:
with open(log, "w") as o:
print("<><><><><>", file=o)
print("DIFFBRAGG", file=o)
print("<><><><><>", file=o)
for attr in DIFFBRAGG_ATTRS:
val = getattr(SIM.D, attr)
print(attr+": ", val, file=o)
print("\n<><><>", file=o)
print("BEAM", file=o)
print("<><><>", file=o)
for attr in NB_BEAM_ATTRS:
val = getattr(SIM.beam, attr)
print(attr+": ", val, file=o)
print("\n<><><><>", file=o)
print("CRYSTAL", file=o)
print("<><><><>", file=o)
for attr in NB_CRYST_ATTRS:
val = getattr(SIM.crystal, attr)
print(attr+": ", val, file=o)
if lam is not None:
wavelen, wt = zip(*SIM.beam.spectrum)
utils.save_spectra_file(lam, wavelen, wt)
def free_SIM_mem(SIM):
"""
Frees memory allocated to host CPU (and GPU device, if applicable).
Using this method is critical for serial applications!
:param SIM: sim_data instance used during hopper refinement, member of the data modeler
"""
SIM.D.free_all()
SIM.D.free_Fhkl2()
try:
SIM.D.gpu_free()
except TypeError:
pass # occurs on CPU-only builds
def finalize_SIM(SIM, log=None, lam=None):
"""
thin wrapper to free_SIM_mem and write_SIM_logs
:param SIM: sim_data instance used during hopper refinement, member of the data modeler
:param log: optional log file to dump attributes of SIM
:param lam: optional lambda file to dump the spectra to disk, can be read usint diffBragg.utils.load_spectra_file
"""
write_SIM_logs(SIM, log, lam)
free_SIM_mem(SIM)
class DataModeler:
"""
The data modeler stores information in two ways:
1- lists whose length is the number of pixels being modeled
2- lists whose length is the number of shoeboxes being modeled
for example if one is modeling 3 shoeboxes whose dimensions are 10x10, then
the objects below like self.all_data will have length 300, and other objects like self.selection_flags
will have length 3
"""
def __init__(self, params):
""" params is a simtbx.diffBragg.hopper phil"""
self.no_rlp_info = False # whether rlps are stored in the refls table
self.params = params # phil params (see diffBragg/phil.py)
self._abs_path_params()
self.SIM = None # simulator object (instance of nanoBragg.sim_data.SimData
self.E = None # placeholder for the experiment
self.pan_fast_slow =None # (pid, fast, slow) per pixel
self.all_background =None # background model per pixel (photon units)
self.roi_id =None # region of interest ID per pixel
self.u_id = None # set of unique region of interest ids
self.all_freq = None # flag for the h,k,l frequency of the observed pixel
self.best_model = None # best model value at each pixel
self.all_data =None # data at each pixel (photon units)
self.all_gain = None # gain value per pixel (used during diffBragg/refiners/stage_two_refiner)
self.all_sigmas =None # error model for each pixel (photon units)
self.all_trusted =None # trusted pixel flags (True is trusted and therefore used during refinement)
self.npix_total =None # total number of pixels
self.all_fast =None # fast-scan coordinate per pixel
self.all_slow =None # slow-scan coordinate per pixel
self.all_pid = None # panel id per pixel
self.rois=None # region of interest (per spot)
self.pids=None # panel id (per spot)
self.tilt_abc=None # background plane constants (per spot), a,b are fast,slow scan components, c is offset
self.selection_flags=None # whether the spot was selected for refinement (sometimes poorly conditioned spots are rejected)
self.background=None # background for entire image (same shape as the detector)
self.tilt_cov = None # covariance estimates from background fitting (not used)
self.simple_weights = None # not used
self.refls_idx = None # position of modeled spot in original refl array
self.refls = None # reflection table
self.sigma_rdout = None # the value of the readout noise in photon units
self.Hi = None # miller index (P1)
self.Hi_asu = None # miller index (high symmetry)
# which attributes to save when pickling a data modeler
self.saves = ["all_data", "all_background", "all_trusted", "best_model", "sigma_rdout",
"rois", "pids", "tilt_abc", "selection_flags", "refls_idx", "pan_fast_slow",
"Hi", "Hi_asu", "roi_id", "params", "all_pid", "all_fast", "all_slow"]
def _abs_path_params(self):
"""adds absolute path to certain params"""
self.params.simulator.structure_factors.mtz_name = os.path.abspath(self.params.simulator.structure_factors.mtz_name)
def __getstate__(self):
# TODO cleanup/compress
return {name: getattr(self, name) for name in self.saves}
def __setstate__(self, state):
for name in state:
setattr(self, name, state[name])
def clean_up(self):
free_SIM_mem(self.SIM)
def set_experiment(self, exp, load_imageset=True):
if isinstance(exp, str):
self.E = ExperimentListFactory.from_json_file(exp, load_imageset)[0]
else:
self.E = exp
if self.params.opt_det is not None:
opt_det_E = ExperimentListFactory.from_json_file(self.params.opt_det, False)[0]
self.E.detector = opt_det_E.detector
MAIN_LOGGER.info("Set the optimal detector from %s" % self.params.opt_det)
if self.params.opt_beam is not None:
opt_beam_E = ExperimentListFactory.from_json_file(self.params.opt_beam, False)[0]
self.E.beam = opt_beam_E.beam
MAIN_LOGGER.info("Set the optimal beam from %s" % self.params.opt_beam)
def load_refls(self, ref):
if isinstance(ref, str):
refls = flex.reflection_table.from_file(ref)
else:
# assert is a reflection table. ..
refls = ref
return refls
def is_duplicate_hkl(self, refls):
nref = len(refls)
is_duplicate = np.zeros(nref, bool)
if len(set(refls['miller_index'])) < nref:
hkls = refls['miller_index']
dupe_hkl = {h for h, count in Counter(hkls).items() if count > 1}
for i_ref in range(nref):
hh = refls[i_ref]['miller_index']
is_duplicate[i_ref] = hh in dupe_hkl
return is_duplicate
def GatherFromReflectionTable(self, exp, ref, sg_symbol=None):
self.set_experiment(exp, load_imageset=False)
self.refls = self.load_refls(ref)
nref = len(self.refls)
if nref ==0:
return False
self.refls_idx = list(range(nref))
self.rois = [(x1, x2, y1, y2) for x1,x2,y1,y2,_,_ in self.refls["shoebox"].bounding_boxes()]
self.pids = list(self.refls["panel"])
npan = len(self.E.detector)
nfast, nslow = self.E.detector[0].get_image_size() # NOTE assumes all panels same shape
img_data = np.zeros((npan, nslow, nfast))
background = np.zeros_like(img_data)
is_trusted = np.zeros((npan, nslow, nfast), bool)
for i_ref in range(nref):
ref = self.refls[i_ref]
pid = ref['panel']
x1, x2, y1, y2 = self.rois[i_ref]
# these are the in-bounds limits (on the panel)
x1_onPanel = max(x1,0)
x2_onPanel = min(x2,nfast)
y1_onPanel = max(y1,0)
y2_onPanel = min(y2,nslow)
xdim = x2_onPanel-x1_onPanel
ydim = y2_onPanel-y1_onPanel
sb = ref['shoebox']
sb_ystart = y1_onPanel - y1
sb_xstart = x1_onPanel - x1
sb_sliceY = slice(sb_ystart, sb_ystart+ydim,1)
sb_sliceX = slice(sb_xstart, sb_xstart+xdim,1)
dat_sliceY = slice(y1_onPanel, y1_onPanel+ydim,1)
dat_sliceX = slice(x1_onPanel, x1_onPanel+xdim,1)
img_data[pid, dat_sliceY, dat_sliceX] = sb.data.as_numpy_array()[0,sb_sliceY,sb_sliceX]
sb_bkgrnd = sb.background.as_numpy_array()[0,sb_sliceY,sb_sliceX]
background[pid, dat_sliceY, dat_sliceX] = sb_bkgrnd
fg_code = MaskCode.Valid + MaskCode.Foreground # 5
bg_code = MaskCode.Valid + MaskCode.Background + MaskCode.BackgroundUsed # 19
mask = sb.mask.as_numpy_array()[0,sb_sliceY,sb_sliceX]
if self.params.refiner.refldata_trusted=="allValid":
sb_trust = mask > 0
elif self.params.refiner.refldata_trusted=="fg":
sb_trust = mask==fg_code
else:
sb_trust = np.logical_or(mask==fg_code, mask==bg_code)
below_zero = sb_bkgrnd <= 0
if np.any(below_zero):
nbelow = np.sum(below_zero)
ntot = sb_bkgrnd.size
MAIN_LOGGER.debug("background <= zero in %d/%d pixels from shoebox %d! Marking those pixels as untrusted!" % ( nbelow, ntot, i_ref ))
sb_trust[below_zero] = False
is_trusted[pid, dat_sliceY,dat_sliceX] = sb_trust
self.rois[i_ref] = x1_onPanel, x2_onPanel, y1_onPanel, y2_onPanel
if self.params.refiner.refldata_to_photons:
MAIN_LOGGER.debug("Re-scaling reflection data to photon units: conversion factor=%f" % self.params.refiner.adu_per_photon)
img_data /= self.params.refiner.adu_per_photon
background /= self.params.refiner.adu_per_photon
# can be used for Bfactor modeling
self.Q = np.linalg.norm(self.refls["rlp"], axis=1)
self.sigma_rdout = self.params.refiner.sigma_r / self.params.refiner.adu_per_photon
self.Hi = list(self.refls["miller_index"])
if sg_symbol is not None:
self.Hi_asu = utils.map_hkl_list(self.Hi, True, sg_symbol)
else:
self.Hi_asu = self.Hi
self.data_to_one_dim(img_data, is_trusted, background)
return True
def GatherFromExperiment(self, exp, ref, remove_duplicate_hkl=True, sg_symbol=None):
self.set_experiment(exp, load_imageset=True)
refls = self.load_refls(ref)
if len(refls)==0:
MAIN_LOGGER.warning("no refls loaded!")
return False
if "rlp" not in list(refls[0].keys()):
try:
utils.add_rlp_column(refls, self.E)
assert "rlp" in list(refls[0].keys())
except KeyError:
self.no_rlp_info = True
img_data = utils.image_data_from_expt(self.E)
img_data /= self.params.refiner.adu_per_photon
is_trusted = np.ones(img_data.shape, bool)
hotpix_mask = None
if self.params.roi.hotpixel_mask is not None:
is_trusted = utils.load_mask(self.params.roi.hotpixel_mask)
hotpix_mask = ~is_trusted
self.sigma_rdout = self.params.refiner.sigma_r / self.params.refiner.adu_per_photon
roi_packet = utils.get_roi_background_and_selection_flags(
refls, img_data, shoebox_sz=self.params.roi.shoebox_size,
reject_edge_reflections=self.params.roi.reject_edge_reflections,
reject_roi_with_hotpix=self.params.roi.reject_roi_with_hotpix,
background_mask=None, hotpix_mask=hotpix_mask,
bg_thresh=self.params.roi.background_threshold,
use_robust_estimation=not self.params.roi.fit_tilt,
set_negative_bg_to_zero=self.params.roi.force_negative_background_to_zero,
pad_for_background_estimation=self.params.roi.pad_shoebox_for_background_estimation,
sigma_rdout=self.sigma_rdout, deltaQ=self.params.roi.deltaQ, experiment=self.E,
weighted_fit=self.params.roi.fit_tilt_using_weights,
tilt_relative_to_corner=self.params.relative_tilt, ret_cov=True)
if roi_packet is None:
return False
self.rois, self.pids, self.tilt_abc, self.selection_flags, self.background, self.tilt_cov = roi_packet
if remove_duplicate_hkl and not self.no_rlp_info:
is_not_a_duplicate = ~self.is_duplicate_hkl(refls)
self.selection_flags = np.logical_and( self.selection_flags, is_not_a_duplicate)
if self.params.refiner.res_ranges is not None:
# TODO add res ranges support for GatherFromReflectionTable
if self.no_rlp_info:
raise NotImplementedError("Cannot set resolution limits when processing refls that are missing the RLP column")
res_flags = np.zeros(len(refls)).astype(bool)
res = 1. / np.linalg.norm(refls["rlp"], axis=1)
for dmin,dmax in utils.parse_reso_string(self.params.refiner.res_ranges):
MAIN_LOGGER.debug("Parsing res range %.3f - %.3f Angstrom" % (dmin, dmax))
in_resShell = np.logical_and(res >= dmin, res <= dmax)
res_flags[in_resShell] = True
MAIN_LOGGER.info("Resolution filter removed %d/%d refls outside of all resolution ranges " \
% (sum(~res_flags), len(refls)))
self.selection_flags[~res_flags] = False
if sum(self.selection_flags) == 0:
MAIN_LOGGER.info("No pixels slected, continuing")
return False
self.refls = refls
self.refls_idx = [i_roi for i_roi in range(len(refls)) if self.selection_flags[i_roi]]
self.rois = [roi for i_roi, roi in enumerate(self.rois) if self.selection_flags[i_roi]]
self.tilt_abc = [abc for i_roi, abc in enumerate(self.tilt_abc) if self.selection_flags[i_roi]]
self.pids = [pid for i_roi, pid in enumerate(self.pids) if self.selection_flags[i_roi]]
self.tilt_cov = [cov for i_roi, cov in enumerate(self.tilt_cov) if self.selection_flags[i_roi]]
if not self.no_rlp_info:
self.Q = [np.linalg.norm(refls[i_roi]["rlp"]) for i_roi in range(len(refls)) if self.selection_flags[i_roi]]
refls = refls.select(flex.bool(self.selection_flags))
if "miller_index" in list(refls.keys()):
self.Hi = list(refls["miller_index"])
if sg_symbol is not None:
self.Hi_asu = utils.map_hkl_list(self.Hi, True, sg_symbol)
else:
self.Hi_asu = self.Hi
self.data_to_one_dim(img_data, is_trusted, self.background)
return True
def data_to_one_dim(self, img_data, is_trusted, background):
all_data = []
all_pid = []
all_fast = []
all_slow = []
all_fast_relative = []
all_slow_relative = []
all_trusted = []
all_sigmas = []
all_background = []
roi_id = []
all_q_perpix = []
all_refls_idx = []
pixel_counter = np.zeros_like(img_data)
self.all_nominal_hkl = []
self.hi_asu_perpix = []
for i_roi in range(len(self.rois)):
pid = self.pids[i_roi]
x1, x2, y1, y2 = self.rois[i_roi]
Y, X = np.indices((y2 - y1, x2 - x1))
data = img_data[pid, y1:y2, x1:x2].copy()
pixel_counter[pid, y1:y2, x1:x2] += 1
data = data.ravel()
all_background += list(background[pid, y1:y2, x1:x2].ravel())
trusted = is_trusted[pid, y1:y2, x1:x2].ravel()
# TODO implement per-shot masking here
#lower_cut = np.percentile(data, 20)
#trusted[data < lower_cut] = False
#d_strong_order = np.argsort(data)
#trusted[d_strong_order[-1:]] = False
all_trusted += list(trusted)
#TODO ignore invalid value warning (handled below), or else mitigate it!
all_sigmas += list(np.sqrt(data + self.sigma_rdout ** 2))
all_fast += list(X.ravel() + x1)
all_fast_relative += list(X.ravel())
all_slow += list(Y.ravel() + y1)
all_slow_relative += list(Y.ravel())
all_data += list(data)
npix = len(data) # np.sum(trusted)
all_pid += [pid] * npix
roi_id += [i_roi] * npix
all_refls_idx += [self.refls_idx[i_roi]] * npix
if not self.no_rlp_info:
all_q_perpix += [self.Q[i_roi]]*npix
if self.Hi is not None:
self.all_nominal_hkl += [tuple(self.Hi[i_roi])]*npix
self.hi_asu_perpix += [self.Hi_asu[i_roi]] * npix
all_freq = []
for i_roi in range(len(self.rois)):
pid = self.pids[i_roi]
x1, x2, y1, y2 = self.rois[i_roi]
freq = pixel_counter[pid, y1:y2, x1:x2].ravel()
all_freq += list(freq)
self.all_freq = np.array(all_freq) # if no overlapping pixels, this should be an array of 1's
if not self.params.roi.allow_overlapping_spots:
if not np.all(self.all_freq==1):
print(set(self.all_freq))
raise ValueError("There are overlapping regions of interest, despite the command to not allow overlaps")
self.all_q_perpix = np.array(all_q_perpix)
pan_fast_slow = np.ascontiguousarray((np.vstack([all_pid, all_fast, all_slow]).T).ravel())
self.pan_fast_slow = flex.size_t(pan_fast_slow)
self.all_background = np.array(all_background)
self.roi_id = np.array(roi_id)
self.all_data = np.array(all_data)
self.all_sigmas = np.array(all_sigmas)
# note rare chance for sigmas to be nan if the args of sqrt is below 0
self.all_trusted = np.logical_and(np.array(all_trusted), ~np.isnan(all_sigmas))
self.npix_total = len(all_data)
self.all_fast = np.array(all_fast)
self.all_slow = np.array(all_slow)
self.all_pid = np.array(all_pid)
#self.simple_weights = 1/self.all_sigmas**2
self.u_id = set(self.roi_id)
self.all_refls_idx = np.array(all_refls_idx)
MAIN_LOGGER.debug("Modeler has %d/ %d trusted pixels" % (self.all_trusted.sum() , self.npix_total))
def dump_gathered_to_refl(self, output_name, do_xyobs_sanity_check=False):
"""after running GatherFromExperiment, dump the gathered results
(data, background etc) to a new reflection file which can then be used to run
diffBragg without the raw data in the experiment (this exists mainly for portability, and
unit tests)"""
shoeboxes = []
R = flex.reflection_table()
for i_roi, i_ref in enumerate(self.refls_idx):
roi_sel = self.roi_id==i_roi
x1, x2, y1, y2 = self.rois[i_roi]
roi_shape = y2-y1, x2-x1
roi_img = self.all_data[roi_sel].reshape(roi_shape).astype(np.float32) #NOTE this has already been converted to photon units
roi_bg = self.all_background[roi_sel].reshape(roi_shape).astype(np.float32)
sb = Shoebox((x1, x2, y1, y2, 0, 1))
sb.allocate()
sb.data = flex.float(np.ascontiguousarray(roi_img[None]))
sb.background = flex.float(np.ascontiguousarray(roi_bg[None]))
dials_mask = np.zeros(roi_img.shape).astype(np.int32)
mask = self.all_trusted[roi_sel].reshape(roi_shape)
dials_mask[mask] = dials_mask[mask] + MaskCode.Valid
sb.mask = flex.int(np.ascontiguousarray(dials_mask[None]))
# quick sanity test
if do_xyobs_sanity_check:
ref = self.refls[i_ref]
x,y,_ = ref['xyzobs.px.value']
assert x1 <= x <= x2, "exp %s; refl %d, %f %f %f" % (output_name, i_ref, x1,x,x2)
assert y1 <= y <= y2, "exp %s; refl %d, %f %f %f" % (output_name, i_ref, y1,y,y2)
R.extend(self.refls[i_ref: i_ref+1])
shoeboxes.append(sb)
R['shoebox'] = flex.shoebox(shoeboxes)
R.as_file(output_name)
def SimulatorFromExperiment(self, best=None):
"""optional best parameter is a single row of a pandas datafame containing the starting
models, presumably optimized from a previous minimzation using this program"""
ParameterType = RangedParameter
if best is not None:
# set the crystal Umat (rotational displacement) and Bmat (unit cell)
# Umatrix
# NOTE: just set the best Amatrix here
if self.params.apply_best_crystal_model:
xax = col((-1, 0, 0))
yax = col((0, -1, 0))
zax = col((0, 0, -1))
rotX,rotY,rotZ = best[["rotX", "rotY", "rotZ"]].values[0]
RX = xax.axis_and_angle_as_r3_rotation_matrix(rotX, deg=False)
RY = yax.axis_and_angle_as_r3_rotation_matrix(rotY, deg=False)
RZ = zax.axis_and_angle_as_r3_rotation_matrix(rotZ, deg=False)
M = RX * RY * RZ
U = M * sqr(self.E.crystal.get_U())
self.E.crystal.set_U(U)
# Bmatrix:
ucparam = best[["a","b","c","al","be","ga"]].values[0]
ucman = utils.manager_from_params(ucparam)
self.E.crystal.set_B(ucman.B_recipspace)
## TODO , currently need this anyway
ucparam = best[["a","b","c","al","be","ga"]].values[0]
ucman = utils.manager_from_params(ucparam)
self.E.crystal.set_B(ucman.B_recipspace)
# mosaic block
self.params.init.Nabc = tuple(best.ncells.values[0])
# scale factor
self.params.init.G = best.spot_scales.values[0]
if "detz_shift_mm" in list(best):
self.params.init.detz_shift = best.detz_shift_mm.values[0]
# TODO: set best eta_abc params
# TODO: choose a phil param and remove support for the other: crystal.anositropic_mosaicity, or init.eta_abc
MAIN_LOGGER.info("Setting initial anisotropic mosaicity from params.init.eta_abc: %f %f %f" % tuple(self.params.init.eta_abc))
if self.params.simulator.crystal.has_isotropic_mosaicity:
self.params.simulator.crystal.anisotropic_mosaicity = None
else:
self.params.simulator.crystal.anisotropic_mosaicity = self.params.init.eta_abc
MAIN_LOGGER.info("Number of mosaic domains from params: %d" % self.params.simulator.crystal.num_mosaicity_samples)
self.SIM = utils.simulator_from_expt_and_params(self.E, self.params)
if self.SIM.D.mosaic_domains > 1:
MAIN_LOGGER.info("Will use mosaic models: %d domains" % self.SIM.D.mosaic_domains)
else:
MAIN_LOGGER.info("Will not use mosaic models, as simulator.crystal.num_mosaicity_samples=1")
if not self.params.fix.diffuse_gamma or not self.params.fix.diffuse_sigma:
assert self.params.use_diffuse_models
self.SIM.D.use_diffuse = self.params.use_diffuse_models
self.SIM.D.gamma_miller_units = self.params.gamma_miller_units
self.SIM.isotropic_diffuse_gamma = self.params.isotropic.diffuse_gamma
self.SIM.isotropic_diffuse_sigma = self.params.isotropic.diffuse_sigma
if self.params.spectrum_from_imageset:
downsamp_spec(self.SIM, self.params, self.E)
self.SIM.D.no_Nabc_scale = self.params.no_Nabc_scale # TODO check gradients for this setting
self.SIM.D.update_oversample_during_refinement = False
self.SIM.num_xtals = self.params.number_of_xtals
init = self.params.init
sigma = self.params.sigmas
mins = self.params.mins
maxs = self.params.maxs
centers = self.params.centers
betas = self.params.betas
fix = self.params.fix
P = Parameters()
for i_xtal in range(self.SIM.num_xtals):
for ii in range(3):
p = ParameterType(init=init.diffuse_gamma[ii], sigma=sigma.diffuse_gamma[ii],
minval=mins.diffuse_gamma[ii], maxval=maxs.diffuse_gamma[ii],
fix=fix.diffuse_gamma, name="diffuse_gamma%d" % ii,
center=centers.diffuse_gamma[ii], beta=betas.diffuse_gamma[ii])
P.add(p)
p = ParameterType(init=init.diffuse_sigma[ii], sigma=sigma.diffuse_sigma[ii],
minval=mins.diffuse_sigma[ii], maxval=maxs.diffuse_sigma[ii],
fix=fix.diffuse_sigma, name="diffuse_sigma%d" % ii,
center=centers.diffuse_sigma[ii], beta=betas.diffuse_sigma[ii])
P.add(p)
p = ParameterType(init=init.Nabc[ii], sigma=sigma.Nabc[ii],
minval=mins.Nabc[ii], maxval=maxs.Nabc[ii],
fix=fix.Nabc,name="Nabc%d" % ii,
center=centers.Nabc[ii], beta=betas.Nabc[ii])
P.add(p)
p = ParameterType(init=0, sigma=sigma.RotXYZ[ii],
minval=mins.RotXYZ[ii], maxval=maxs.RotXYZ[ii],
fix=fix.RotXYZ, name="RotXYZ%d" %ii,
center=centers.RotXYZ[ii], beta=betas.RotXYZ)
P.add(p)
# only refine eta_abc0 for isotropic spread model
fix_eta = fix.eta_abc
if not fix_eta and not self.SIM.D.has_anisotropic_mosaic_spread and ii >0:
fix_eta = False
p = ParameterType(init=init.eta_abc[ii], sigma=sigma.eta_abc[ii],
minval=mins.eta_abc[ii], maxval=maxs.eta_abc[ii],
fix=fix_eta, name="eta_abc%d" % ii,
center=centers.eta_abc[ii], beta=betas.eta_abc[ii])
P.add(p)
p = ParameterType(init=init.G, sigma=sigma.G,
minval=mins.G, maxval=maxs.G,
fix=fix.G, name="G",
center=centers.G, beta=betas.G)
P.add(p)
ucell_man = utils.manager_from_crystal(self.E.crystal)
ucell_vary_perc = self.params.ucell_edge_perc / 100.
for i_uc, (name, val) in enumerate(zip(ucell_man.variable_names, ucell_man.variables)):
if "Ang" in name:
minval = val - ucell_vary_perc * val
maxval = val + ucell_vary_perc * val
else:
val_in_deg = val * 180 / np.pi
minval = (val_in_deg - self.params.ucell_ang_abs) * np.pi / 180.
maxval = (val_in_deg + self.params.ucell_ang_abs) * np.pi / 180.
p = ParameterType(init=val, sigma=sigma.ucell[i_uc],
minval=minval, maxval=maxval, fix=fix.ucell,
name="ucell%d" % i_uc, center=centers.ucell[i_uc],
beta=betas.ucell[i_uc])
MAIN_LOGGER.info(
"Unit cell variable %s (currently=%f) is bounded by %f and %f" % (name, val, minval, maxval))
P.add(p)
self.SIM.ucell_man = ucell_man
p = ParameterType(init=init.detz_shift*1e-3, sigma=sigma.detz_shift,
minval=mins.detz_shift*1e-3, maxval=maxs.detz_shift*1e-3,
fix=fix.detz_shift,name="detz_shift",
center=centers.detz_shift,
beta=betas.detz_shift)
P.add(p)
self.SIM.P = P
def get_data_model_pairs(self):
if self.best_model is None:
raise ValueError("cannot get the best model with setting best_model attribute")
all_dat_img, all_mod_img = [], []
all_trusted = []
all_bragg = []
for i_roi in range(len(self.rois)):
x1, x2, y1, y2 = self.rois[i_roi]
mod = self.best_model[self.roi_id == i_roi].reshape((y2 - y1, x2 - x1))
if self.all_trusted is not None:
trusted = self.all_trusted[self.roi_id == i_roi].reshape((y2 - y1, x2 - x1))
all_trusted.append(trusted)
else:
all_trusted.append(None)
# dat = img_data[pid, y1:y2, x1:x2]
dat = self.all_data[self.roi_id == i_roi].reshape((y2 - y1, x2 - x1))
all_dat_img.append(dat)
if self.all_background is not None:
bg = self.all_background[self.roi_id==i_roi].reshape((y2-y1, x2-x1))
# assume mod does not contain background
all_bragg.append(mod)
all_mod_img.append(mod+bg)
else: # assume mod contains background
all_mod_img.append(mod)
all_bragg.append(None)
return all_dat_img, all_mod_img, all_trusted, all_bragg
def Minimize(self, x0):
target = TargetFunc(SIM=self.SIM, niter_per_J=self.params.niter_per_J, profile=self.params.profile)
# set up the refinement flags
vary = np.ones(len(x0), bool)
assert len(x0) == len(self.SIM.P)
for p in self.SIM.P.values():
if not p.refine:
vary[p.xpos] = False
target.vary = vary # fixed flags
target.x0 = np.array(x0, np.float64) # initial full parameter list
x0_for_refinement = target.x0[vary]
if self.params.method is None:
method = "Nelder-Mead"
else:
method = self.params.method
maxfev = None
if self.params.nelder_mead_maxfev is not None:
maxfev = self.params.nelder_mead_maxfev * self.npix_total
at_min = target.at_minimum
if method in ["L-BFGS-B", "BFGS", "CG", "dogleg", "SLSQP", "Newton-CG", "trust-ncg", "trust-krylov", "trust-exact", "trust-ncg"]:
if self.SIM.P["RotXYZ0"].refine:
self.SIM.D.refine(ROTX_ID)
self.SIM.D.refine(ROTY_ID)
self.SIM.D.refine(ROTZ_ID)
if self.SIM.P["Nabc0"].refine:
self.SIM.D.refine(NCELLS_ID)
if self.SIM.P["ucell0"].refine:
for i_ucell in range(len(self.SIM.ucell_man.variables)):
self.SIM.D.refine(UCELL_ID_OFFSET + i_ucell)
if self.SIM.P["eta_abc0"].refine:
self.SIM.D.refine(ETA_ID)
if self.SIM.P["detz_shift"].refine:
self.SIM.D.refine(DETZ_ID)
if self.SIM.D.use_diffuse:
self.SIM.D.refine(DIFFUSE_ID)
args = (self.SIM, self.pan_fast_slow, self.all_data,
self.all_sigmas, self.all_trusted, self.all_background, True, self.params, True)
min_kwargs = {'args': args, "method": method, "jac": target.jac,
'hess': self.params.hess}
if method=="L-BFGS-B":
min_kwargs["options"] = {"ftol": self.params.ftol, "gtol": 1e-10, "maxfun":1e5, "maxiter":self.params.lbfgs_maxiter}
else:
args = (self.SIM, self.pan_fast_slow, self.all_data,
self.all_sigmas, self.all_trusted, self.all_background, True, self.params, False)
min_kwargs = {'args': args, "method": method,
'options': {'maxfev': maxfev,
'fatol': self.params.nelder_mead_fatol}}
if self.params.global_method=="basinhopping":
HOPPER = basinhopping
out = HOPPER(target, x0_for_refinement,
niter=self.params.niter,
minimizer_kwargs=min_kwargs,
T=self.params.temp,
callback=at_min,
disp=False,
stepsize=self.params.stepsize)
else:
bounds = [(-100,100)] * len(x0_for_refinement) # TODO decide about bounds, usually x remains close to 1 during refinement
print("Beginning the annealing process")
args = min_kwargs.pop("args")
if self.params.dual.no_local_search:
compute_grads = args[-1]
if compute_grads:
print("Warning, parameters setup to compute gradients, swicthing off because no_local_search=True")
args = list(args)
args[-1] = False # switch off grad
args = tuple(args)
out = dual_annealing(target, bounds=bounds, args=args,
no_local_search=self.params.dual.no_local_search,
x0=x0_for_refinement,
accept=self.params.dual.accept,
visit=self.params.dual.visit,
maxiter=self.params.niter,
local_search_options=min_kwargs,
callback=at_min)
target.x0[vary] = out.x
return target.x0
def model(x, SIM, pfs, compute_grad=True):
#params_per_xtal = np.array_split(x[:num_per_xtal_params], SIM.num_xtals)
# get the unit cell variables
nucell = len(SIM.ucell_man.variables)
ucell_params = [SIM.P["ucell%d" % i_uc] for i_uc in range(nucell)]
ucell_xpos = [p.xpos for p in ucell_params]
unitcell_var_reparam = [x[xpos] for xpos in ucell_xpos]
unitcell_variables = [ucell_params[i].get_val(xval) for i, xval in enumerate(unitcell_var_reparam)]
SIM.ucell_man.variables = unitcell_variables
Bmatrix = SIM.ucell_man.B_recipspace
SIM.D.Bmatrix = Bmatrix
if compute_grad:
for i_ucell in range(len(unitcell_variables)):
SIM.D.set_ucell_derivative_matrix(
i_ucell + UCELL_ID_OFFSET,
SIM.ucell_man.derivative_matrices[i_ucell])
# update the mosaicity here
eta_params = [SIM.P["eta_abc%d" % i_eta] for i_eta in range(3)]
if SIM.umat_maker is not None:
# we are modeling mosaic spread
eta_abc = [p.get_val(x[p.xpos]) for p in eta_params]
if not SIM.D.has_anisotropic_mosaic_spread:
eta_abc = eta_abc[0]
SIM.update_umats_for_refinement(eta_abc)
# detector parameters
DetZ = SIM.P["detz_shift"]
x_shiftZ = x[DetZ.xpos]
shiftZ = DetZ.get_val(x_shiftZ)
SIM.D.shift_origin_z(SIM.detector, shiftZ)
npix = int(len(pfs) / 3)
nparam = len(x)
J = np.zeros((nparam, npix)) # note: order is: scale, rotX, rotY, rotZ, Na, Nb, Nc, ... (for each xtal), then ucell0, ucell1 , ucell2, .. detshift,
model_pix = None
for i_xtal in range(SIM.num_xtals):
#SIM.D.raw_pixels_roi *= 0 #todo do i matter?
RotXYZ_params = [SIM.P["RotXYZ%d" %i_rot] for i_rot in range(3)]
rotX,rotY,rotZ = [rot_param.get_val(x[rot_param.xpos]) for rot_param in RotXYZ_params]
## update parameters:
# TODO: if not refining Umat, assert these are 0 , and dont set them here
SIM.D.set_value(ROTX_ID, rotX)
SIM.D.set_value(ROTY_ID, rotY)
SIM.D.set_value(ROTZ_ID, rotZ)
G = SIM.P["G"]
scale = G.get_val(x[G.xpos])
Nabc_params = [SIM.P["Nabc%d" % i_n] for i_n in range(3)]
Na,Nb,Nc = [n_param.get_val(x[n_param.xpos]) for n_param in Nabc_params]
SIM.D.set_ncells_values(tuple([Na, Nb, Nc]))
# diffuse signals
if SIM.D.use_diffuse:
diffuse_params_lookup = {}
iso_flags = {'gamma':SIM.isotropic_diffuse_gamma, 'sigma':SIM.isotropic_diffuse_sigma}
for diff_type in ['gamma', 'sigma']:
diff_params = [SIM.P["diffuse_%s%d" % (diff_type,i_gam)] for i_gam in range(3)]
diffuse_params_lookup[diff_type] = diff_params
diff_vals = []
for i_diff, param in enumerate(diff_params):
val = param.get_val(x[param.xpos])
if iso_flags[diff_type]:
diff_vals = [val]*3
break
else:
diff_vals.append(val)
if diff_type == "gamma":
SIM.D.diffuse_gamma = tuple(diff_vals)
else:
SIM.D.diffuse_sigma = tuple(diff_vals)
SIM.D.add_diffBragg_spots(pfs)
pix = SIM.D.raw_pixels_roi[:npix]
pix = pix.as_numpy_array()
if model_pix is None:
model_pix = scale*pix
else:
model_pix += scale*pix
if compute_grad:
if G.refine:
scale_grad = pix # TODO double check multi crystal case
scale_grad = G.get_deriv(x[G.xpos], scale_grad)
J[G.xpos] += scale_grad
if RotXYZ_params[0].refine:
for i_rot in range(3):
rot_grad = scale * SIM.D.get_derivative_pixels(ROTXYZ_IDS[i_rot]).as_numpy_array()[:npix]
rot_p = RotXYZ_params[i_rot]
rot_grad = rot_p.get_deriv(x[rot_p.xpos], rot_grad)
J[rot_p.xpos] += rot_grad
if Nabc_params[0].refine:
Nabc_grads = SIM.D.get_ncells_derivative_pixels()
for i_n in range(3):
N_grad = scale*(Nabc_grads[i_n][:npix].as_numpy_array())
p = Nabc_params[i_n]
N_grad = p.get_deriv(x[p.xpos], N_grad)
J[p.xpos] += N_grad
if SIM.D.use_diffuse:
for t in ['gamma','sigma']:
if diffuse_params_lookup[t][0].refine:
diffuse_grads = getattr(SIM.D,"get_diffuse_%s_derivative_pixels"%t)()
for i_diff in range(3):
diff_grad = scale*(diffuse_grads[i_diff][:npix].as_numpy_array())
p = diffuse_params_lookup[t][i_diff]
diff_grad = p.get_deriv(x[p.xpos], diff_grad)
J[p.xpos] += diff_grad
if eta_params[0].refine:
if SIM.D.has_anisotropic_mosaic_spread:
eta_derivs = SIM.D.get_aniso_eta_deriv_pixels()
else:
eta_derivs = [SIM.D.get_derivative_pixels(ETA_ID)]
num_eta = 3 if SIM.D.has_anisotropic_mosaic_spread else 1
for i_eta in range(num_eta):
p = eta_params[i_eta]
eta_grad = scale * (eta_derivs[i_eta][:npix].as_numpy_array())
eta_grad = p.get_deriv(x[p.xpos], eta_grad)
J[p.xpos] += eta_grad
if ucell_params[0].refine:
for i_ucell in range(nucell):
p = ucell_params[i_ucell]
deriv = scale*SIM.D.get_derivative_pixels(UCELL_ID_OFFSET+i_ucell).as_numpy_array()[:npix]
deriv = p.get_deriv(x[p.xpos], deriv)
J[p.xpos] += deriv
if DetZ.refine:
d = SIM.D.get_derivative_pixels(DETZ_ID).as_numpy_array()[:npix]
d = DetZ.get_deriv(x[DetZ.xpos], d)
J[DetZ.xpos] += d
return model_pix, J
def look_at_x(x, SIM):
for name, p in SIM.P.items():
val = p.get_val(x[p.xpos])
print("%s: %f" % (name, val))
def get_param_from_x(x, SIM):
G = SIM.P['G']
scale = G.get_val(x[G.xpos])
RotXYZ = [SIM.P["RotXYZ%d" % i] for i in range(3)]
rotX, rotY, rotZ = [r.get_val(x[r.xpos]) for r in RotXYZ]
Nabc = [SIM.P["Nabc%d" % i] for i in range(3)]
Na, Nb, Nc = [p.get_val(x[p.xpos]) for p in Nabc]
diff_gam_abc = [SIM.P["diffuse_gamma%d" % i] for i in range(3)]
diff_gam_a, diff_gam_b, diff_gam_c = [p.get_val(x[p.xpos]) for p in diff_gam_abc]
diff_sig_abc = [SIM.P["diffuse_sigma%d" % i] for i in range(3)]
diff_sig_a, diff_sig_b, diff_sig_c = [p.get_val(x[p.xpos]) for p in diff_sig_abc]
nucell = len(SIM.ucell_man.variables)
ucell_p = [SIM.P["ucell%d" % i] for i in range(nucell)]
ucell_var = [p.get_val(x[p.xpos]) for p in ucell_p]
SIM.ucell_man.variables = ucell_var
a,b,c,al,be,ga = SIM.ucell_man.unit_cell_parameters
DetZ = SIM.P["detz_shift"]
detz = DetZ.get_val(x[DetZ.xpos])
return scale, rotX, rotY, rotZ, Na, Nb, Nc, diff_gam_a, diff_gam_b, diff_gam_c, diff_sig_a, diff_sig_b, diff_sig_c, a,b,c,al,be,ga, detz
class TargetFunc:
def __init__(self, SIM, niter_per_J=1, profile=False):
self.niter_per_J = niter_per_J
self.global_x = []
self.all_x = []
self.vary = None #boolean numpy array specifying which params to refine
self.x0 = None # 1d array of parameters (should be numpy array, same length as vary)
self.old_J = None
self.old_model = None
self.delta_x = None
self.iteration = 0
self.minima = []
self.SIM = SIM
def at_minimum(self, x, f, accept):
self.iteration = 0
self.all_x = []
self.x0[self.vary] = x
look_at_x(self.x0,self.SIM)
self.minima.append((f,self.x0,accept))
def jac(self, x, *args):
if self.g is not None:
return self.g[self.vary]
def __call__(self, x, *args, **kwargs):
self.x0[self.vary] = x
if self.all_x:
self.delta_x = self.x0 - self.all_x[-1]
update_terms = None
if not self.iteration % (self.niter_per_J) == 0:
update_terms = (self.delta_x, self.old_J, self.old_model)
self.all_x.append(self.x0)
f, g, modelpix, J = target_func(self.x0, update_terms, *args, **kwargs)
self.old_model = modelpix
self.old_J = J
self.iteration += 1
self.g = g
return f
def target_func(x, udpate_terms, SIM, pfs, data, sigmas, trusted, background, verbose=True, params=None, compute_grad=True):
if udpate_terms is not None:
# if approximating the gradients, then fix the parameter refinment managers in diffBragg
# so we dont waste time computing them
_compute_grad = False
SIM.D.fix(NCELLS_ID)
SIM.D.fix(ROTX_ID)
SIM.D.fix(ROTY_ID)
SIM.D.fix(ROTZ_ID)
for i_ucell in range(len(SIM.ucell_man.variables)):
SIM.D.fix(UCELL_ID_OFFSET + i_ucell)
SIM.D.fix(DETZ_ID)
SIM.D.fix(ETA_ID)
SIM.D.fix(DIFFUSE_ID)
elif compute_grad:
# actually compute the gradients
_compute_grad = True
if SIM.P["Nabc0"].refine:
SIM.D.let_loose(NCELLS_ID)
if SIM.P["RotXYZ0"].refine:
SIM.D.let_loose(ROTX_ID)
SIM.D.let_loose(ROTY_ID)
SIM.D.let_loose(ROTZ_ID)
if SIM.P["ucell0"].refine:
for i_ucell in range(len(SIM.ucell_man.variables)):
SIM.D.let_loose(UCELL_ID_OFFSET + i_ucell)
if SIM.P["detz_shift"].refine:
SIM.D.let_loose(DETZ_ID)
if SIM.P["eta_abc0"].refine:
SIM.D.let_loose(ETA_ID)
else:
_compute_grad = False
model_bragg, Jac = model(x, SIM, pfs,compute_grad=_compute_grad)
if udpate_terms is not None:
# try a Broyden update ?
# https://people.duke.edu/~hpgavin/ce281/lm.pdf equation 19
delta_x, prev_J, prev_model_bragg = udpate_terms
if prev_J is not None:
delta_y = model_bragg - prev_model_bragg
delta_J = (delta_y - np.dot(prev_J.T, delta_x))
delta_J /= np.dot(delta_x,delta_x)
Jac = prev_J + delta_J
# Jac has shape of num_param x num_pix
model_pix = model_bragg + background
resid = data - model_pix
# data contributions to target function
sigma_rdout = params.refiner.sigma_r / params.refiner.adu_per_photon
V = model_pix + sigma_rdout**2
resid_square = resid**2
fLogLike = (.5*(np.log(2*np.pi*V) + resid_square / V))[trusted].sum() # negative log Likelihood target
# width of z-score should decrease as refinement proceeds
zscore_sigma = np.std(resid / np.sqrt(V))
restraint_terms = {}
if params.use_restraints:
# scale factor restraint
for name in SIM.P:
p = SIM.P[name]
val = p.get_restraint_val(x[p.xpos])
restraint_terms[name] = val
if params.centers.Nvol is not None:
Nvol = np.product(SIM.D.Nabc_aniso)
del_Nvol = params.centers.Nvol - Nvol
fN_vol = .5*del_Nvol**2/params.betas.Nvol
restraint_terms["Nvol"] = fN_vol
# accumulate target function
f_restraints = 0
if restraint_terms:
f_restraints = np.sum(list(restraint_terms.values()))
f = f_restraints + fLogLike
restraint_debug_s = "LogLike: %.1f%%; " % (fLogLike / f *100.)
for name, val in restraint_terms.items():
if val > 0:
frac_total = val / f *100.
restraint_debug_s += "%s: %.1f%%; " % (name, frac_total)
# fractions of the target function
g = None # gradient vector
gnorm = -1 # norm of gradient vector
if compute_grad:
common_grad_term = (0.5 /V * (1-2*resid - resid_square / V))[trusted]
# trusted pixels portion of Jacobian
Jac_t = Jac[:,trusted]
# gradient vector
g = np.array([np.sum(common_grad_term*Jac_t[param_idx]) for param_idx in range(Jac_t.shape[0])])
if params.use_restraints:
# update gradients according to restraints
for name, p in SIM.P.items():
g[p.xpos] += p.get_restraint_deriv(x[p.xpos])
if params.centers.Nvol is not None:
Na,Nb,Nc = SIM.D.Ncells_aniso
dNvol_dN = Nb*Nc, Na*Nc, Na*Nb
for i_N in range(3):
p = SIM.P["Nabc%d" % i_N]
gterm = -del_Nvol / params.betas.Nvol * dNvol_dN[i_N]
g[p.xpos] += p.get_deriv(x[p.xpos], gterm)
gnorm = np.linalg.norm(g)
if verbose:
MAIN_LOGGER.debug("F=%10.7g sigZ=%10.7g (Fracs of F: %s), |g|=%10.7g" \
% (f, zscore_sigma, restraint_debug_s, gnorm))
return f, g, model_bragg, Jac
def refine(exp, ref, params, spec=None, gpu_device=None, return_modeler=False, best=None):
if gpu_device is None:
gpu_device = 0
params.simulator.spectrum.filename = spec
Modeler = DataModeler(params)
if params.load_data_from_refls:
Modeler.GatherFromReflectionTable(exp, ref)
else:
assert Modeler.GatherFromExperiment(exp, ref)
Modeler.SimulatorFromExperiment(best)
Modeler.SIM.D.device_Id = gpu_device
nparam = len(Modeler.SIM.P)
x0 = [1] * nparam
x = Modeler.Minimize(x0)
Modeler.best_model, _ = model(x, Modeler.SIM, Modeler.pan_fast_slow, compute_grad=False)
new_crystal = update_crystal_from_x(Modeler.SIM, x)
new_exp = deepcopy(Modeler.E)
new_exp.crystal = new_crystal
try:
new_exp.beam.set_wavelength(Modeler.SIM.dxtbx_spec.get_weighted_wavelength())
except Exception: pass
# if we strip the thickness from the detector, then update it here:
#new_exp.detector. shift Z mm
new_det = update_detector_from_x(Modeler.SIM, x)
new_exp.detector = new_det
new_refl = get_new_xycalcs(Modeler, new_exp)
Modeler.clean_up()
if return_modeler:
return new_exp, new_refl, Modeler, x
else:
return new_exp, new_refl
def update_detector_from_x(SIM, x):
scale, rotX, rotY, rotZ, Na, Nb, Nc, _,_,_,_,_,_,a, b, c, al, be, ga, detz_shift = get_param_from_x(x, SIM)
detz_shift_mm = detz_shift*1e3
det = SIM.detector
det = utils.shift_panelZ(det, detz_shift_mm)
return det
def update_crystal_from_x(SIM, x):
scale, rotX, rotY, rotZ, Na, Nb, Nc, _,_,_,_,_,_,a, b, c, al, be, ga, detz_shift = get_param_from_x(x, SIM)
xax = col((-1, 0, 0))
yax = col((0, -1, 0))
zax = col((0, 0, -1))
## update parameters:
RX = xax.axis_and_angle_as_r3_rotation_matrix(rotX, deg=False)
RY = yax.axis_and_angle_as_r3_rotation_matrix(rotY, deg=False)
RZ = zax.axis_and_angle_as_r3_rotation_matrix(rotZ, deg=False)
M = RX * RY * RZ
U = M * sqr(SIM.crystal.dxtbx_crystal.get_U())
new_C = deepcopy(SIM.crystal.dxtbx_crystal)
new_C.set_U(U)
ucparam = a, b, c, al, be, ga
ucman = utils.manager_from_params(ucparam)
new_C.set_B(ucman.B_recipspace)
return new_C
def get_new_xycalcs(Modeler, new_exp):
_,_,_, bragg_subimg = Modeler.get_data_model_pairs()
new_refls = deepcopy(Modeler.refls)
reflkeys = list(new_refls.keys())
if "xyzcal.px" in reflkeys:
new_refls['dials.xyzcal.px'] = deepcopy(new_refls['xyzcal.px'])
if "xyzcal.mm" in reflkeys:
new_refls['dials.xyzcal.mm'] = deepcopy(new_refls['xyzcal.mm'])
if "xyzobs.mm.value" in list(new_refls.keys()):
new_refls['dials.xyzobs.mm.value'] = deepcopy(new_refls['xyzobs.mm.value'])
new_xycalcs = flex.vec3_double(len(Modeler.refls), (np.nan, np.nan, np.nan))
new_xycalcs_mm = flex.vec3_double(len(Modeler.refls), (np.nan, np.nan, np.nan))
new_xyobs_mm = flex.vec3_double(len(Modeler.refls), (np.nan, np.nan, np.nan))
for i_roi in range(len(bragg_subimg)):
ref_idx = Modeler.refls_idx[i_roi]
if np.any(bragg_subimg[i_roi] > 0):
I = bragg_subimg[i_roi]
Y, X = np.indices(bragg_subimg[i_roi].shape)
x1, _, y1, _ = Modeler.rois[i_roi]
X += x1
Y += y1
Isum = I.sum()
xcom = (X * I).sum() / Isum + .5
ycom = (Y * I).sum() / Isum + .5
com = xcom, ycom, 0
pid = Modeler.pids[i_roi]
assert pid == new_refls[ref_idx]['panel']
panel = new_exp.detector[pid]
xmm, ymm = panel.pixel_to_millimeter((xcom, ycom))
com_mm = xmm, ymm, 0
xobs, yobs, _ = new_refls[ref_idx]["xyzobs.px.value"]
xobs_mm, yobs_mm = panel.pixel_to_millimeter((xobs, yobs))
obs_com_mm = xobs_mm, yobs_mm, 0
new_xycalcs[ref_idx] = com
new_xycalcs_mm[ref_idx] = com_mm
new_xyobs_mm[ref_idx] = obs_com_mm
new_refls["xyzcal.px"] = new_xycalcs
new_refls["xyzcal.mm"] = new_xycalcs_mm
new_refls["xyzobs.mm.value"] = new_xyobs_mm
if Modeler.params.filter_unpredicted_refls_in_output:
sel = [not np.isnan(x) for x,_,_ in new_refls['xyzcal.px']]
nbefore = len(new_refls)
new_refls = new_refls.select(flex.bool(sel))
nafter = len(new_refls)
MAIN_LOGGER.info("Filtered %d / %d reflections which did not show peaks in model" % (nbefore-nafter, nbefore))
return new_refls
def get_mosaicity_from_x(x, SIM):
"""
:param x: refinement parameters
:param SIM: simulator used during refinement
:return: float or 3-tuple, depending on whether mosaic spread was modeled isotropically
"""
eta_params = [SIM.P["eta_abc%d"%i] for i in range(3)]
eta_abc = [p.get_val(x[p.xpos]) for p in eta_params]
if not SIM.D.has_anisotropic_mosaic_spread:
eta_abc = [eta_abc[0]]*3
return eta_abc
def downsamp_spec_from_params(params, expt):
dxtbx_spec = expt.imageset.get_spectrum(0)
spec_en = dxtbx_spec.get_energies_eV()
spec_wt = dxtbx_spec.get_weights()
if params.downsamp_spec.skip:
spec_wave = utils.ENERGY_CONV / spec_en.as_numpy_array()
spectrum = list(zip(spec_wave, spec_wt))
else:
spec_en = dxtbx_spec.get_energies_eV()
spec_wt = dxtbx_spec.get_weights()
# ---- downsample the spectrum
method2_param = {"filt_freq": params.downsamp_spec.filt_freq,
"filt_order": params.downsamp_spec.filt_order,
"tail": params.downsamp_spec.tail,
"delta_en": params.downsamp_spec.delta_en}
downsamp_en, downsamp_wt = downsample_spectrum(spec_en.as_numpy_array(),
spec_wt.as_numpy_array(),
method=2, method2_param=method2_param)
stride = params.simulator.spectrum.stride
if stride > len(downsamp_en) or stride == 0:
raise ValueError("Incorrect value for pinkstride")
downsamp_en = downsamp_en[::stride]
downsamp_wt = downsamp_wt[::stride]
tot_fl = params.simulator.total_flux
if tot_fl is not None:
downsamp_wt = downsamp_wt / sum(downsamp_wt) * tot_fl
downsamp_wave = utils.ENERGY_CONV / downsamp_en
spectrum = list(zip(downsamp_wave, downsamp_wt))
# the nanoBragg beam has an xray_beams property that is used internally in diffBragg
starting_wave = expt.beam.get_wavelength()
waves, specs = map(np.array, zip(*spectrum))
ave_wave = sum(waves*specs) / sum(specs)
expt.beam.set_wavelength(ave_wave)
MAIN_LOGGER.debug("Shifting wavelength from %f to %f" % (starting_wave, ave_wave))
MAIN_LOGGER.debug("USING %d ENERGY CHANNELS" % len(spectrum))
return spectrum
# set the X-ray spectra for this shot
def downsamp_spec(SIM, params, expt, return_and_dont_set=False):
SIM.dxtbx_spec = expt.imageset.get_spectrum(0)
spec_en = SIM.dxtbx_spec.get_energies_eV()
spec_wt = SIM.dxtbx_spec.get_weights()
if params.downsamp_spec.skip:
spec_wave = utils.ENERGY_CONV / spec_en.as_numpy_array()
SIM.beam.spectrum = list(zip(spec_wave, spec_wt))
else:
spec_en = SIM.dxtbx_spec.get_energies_eV()
spec_wt = SIM.dxtbx_spec.get_weights()
# ---- downsample the spectrum
method2_param = {"filt_freq": params.downsamp_spec.filt_freq,
"filt_order": params.downsamp_spec.filt_order,
"tail": params.downsamp_spec.tail,
"delta_en": params.downsamp_spec.delta_en}
downsamp_en, downsamp_wt = downsample_spectrum(spec_en.as_numpy_array(),
spec_wt.as_numpy_array(),
method=2, method2_param=method2_param)
stride = params.simulator.spectrum.stride
if stride > len(downsamp_en) or stride == 0:
raise ValueError("Incorrect value for pinkstride")
downsamp_en = downsamp_en[::stride]
downsamp_wt = downsamp_wt[::stride]
tot_fl = params.simulator.total_flux
if tot_fl is not None:
downsamp_wt = downsamp_wt / sum(downsamp_wt) * tot_fl
downsamp_wave = utils.ENERGY_CONV / downsamp_en
SIM.beam.spectrum = list(zip(downsamp_wave, downsamp_wt))
# the nanoBragg beam has an xray_beams property that is used internally in diffBragg
starting_wave = expt.beam.get_wavelength()
waves, specs = map(np.array, zip(*SIM.beam.spectrum))
ave_wave = sum(waves*specs) / sum(specs)
expt.beam.set_wavelength(ave_wave)
MAIN_LOGGER.debug("Shifting wavelength from %f to %f" % (starting_wave, ave_wave))
if return_and_dont_set:
return SIM.beam.spectrum
else:
SIM.D.xray_beams = SIM.beam.xray_beams
def sanity_test_input_lines(input_lines):
for line in input_lines:
line_fields = line.strip().split()
if len(line_fields) not in [2, 3]:
raise IOError("Input line %s is not formatted properly" % line)
for fname in line_fields:
if not os.path.exists(fname):
raise FileNotFoundError("File %s does not exist" % fname)
def print_profile(stats, timed_methods):
for method in stats.timings.keys():
filename, header_ln, name = method
if name not in timed_methods:
continue
info = stats.timings[method]
PROFILE_LOGGER.warning("\n")
PROFILE_LOGGER.warning("FILE: %s" % filename)
if not info:
PROFILE_LOGGER.warning("<><><><><><><><><><><><><><><><><><><><><><><>")
PROFILE_LOGGER.warning("METHOD %s : Not profiled because never called" % (name))
PROFILE_LOGGER.warning("<><><><><><><><><><><><><><><><><><><><><><><>")
continue
unit = stats.unit
line_nums, ncalls, timespent = zip(*info)
fp = open(filename, 'r').readlines()
total_time = sum(timespent)
header_line = fp[header_ln-1][:-1]
PROFILE_LOGGER.warning(header_line)
PROFILE_LOGGER.warning("TOTAL FUNCTION TIME: %f ms" % (total_time*unit*1e3))
PROFILE_LOGGER.warning("<><><><><><><><><><><><><><><><><><><><><><><>")
PROFILE_LOGGER.warning("%5s%14s%9s%10s" % ("Line#", "Time", "%Time", "Line" ))
PROFILE_LOGGER.warning("%5s%14s%9s%10s" % ("", "(ms)", "", ""))
PROFILE_LOGGER.warning("<><><><><><><><><><><><><><><><><><><><><><><>")
for i_l, l in enumerate(line_nums):
frac_t = timespent[i_l] / total_time * 100.
line = fp[l-1][:-1]
PROFILE_LOGGER.warning("%5d%14.2f%9.2f%s" % (l, timespent[i_l]*unit*1e3, frac_t, line))
| 43.430258 | 152 | 0.601397 |
d1973af0ea65b420847e46be97eb6882e0181ecf | 466 | py | Python | data/scripts/templates/object/tangible/item/quest/force_sensitive/shared_fs_sculpture_4.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/tangible/item/quest/force_sensitive/shared_fs_sculpture_4.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/tangible/item/quest/force_sensitive/shared_fs_sculpture_4.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/item/quest/force_sensitive/shared_fs_sculpture_4.iff"
result.attribute_template_id = -1
result.stfName("item_n","fs_sculpture_4")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 27.411765 | 89 | 0.733906 |
b3e65c42bd4b62e9f9140df2ca630dfa08772cce | 3,251 | py | Python | test_project_postgres/sample/tests.py | Piero-Palevsky-OH/aldjemy | dc79f3b7aabe47c4a13e44f61038bc19a921980d | [
"BSD-3-Clause"
] | null | null | null | test_project_postgres/sample/tests.py | Piero-Palevsky-OH/aldjemy | dc79f3b7aabe47c4a13e44f61038bc19a921980d | [
"BSD-3-Clause"
] | null | null | null | test_project_postgres/sample/tests.py | Piero-Palevsky-OH/aldjemy | dc79f3b7aabe47c4a13e44f61038bc19a921980d | [
"BSD-3-Clause"
] | null | null | null | from django.test import TestCase
from sqlalchemy import select
from sqlalchemy.dialects.postgresql import array
from sample.models import TicTacToeBoard, JsonModel
from aldjemy.core import get_engine
class TestArrayField(TestCase):
"""
Tests that queries involving array fields can be performed.
"""
def test_tic_tac_toe(self):
"""
Test querying the TicTacToeBoard model.
"""
boards = [
["x", "o", "x", "o", "o", "x", "x", "x", "o"], # both (full board)
[" ", " ", " ", " ", "x", " ", " ", " ", " "], # only x
[" ", " ", " ", "o", "o", " ", " ", " ", "o"], # only o
[" ", " ", " ", " ", " ", " ", " ", " ", " "], # none
]
for board in boards:
ttt = TicTacToeBoard(board=board)
ttt.save()
contains = lambda c: TicTacToeBoard.sa.board.contains(array([c]))
query = TicTacToeBoard.sa.query(TicTacToeBoard.sa.id)
assert query.filter(contains("x")).count() == 2
assert query.filter(contains("o")).count() == 2
assert query.filter(contains(" ")).count() == 3
def test_sa_objects_fetching(self):
"""
Test full object fetching using SQLAlchemy-aldjemy ORM.
"""
boards = [
["x", "o", "x", "o", "o", "x", "x", "x", "o"], # both (full board)
[" ", " ", " ", " ", "x", " ", " ", " ", " "], # only x
[" ", " ", " ", "o", "o", " ", " ", " ", "o"], # only o
[" ", " ", " ", " ", " ", " ", " ", " ", " "], # none
]
created_objects = []
for board in boards:
ttt = TicTacToeBoard(board=board)
ttt.save()
created_objects.append(ttt)
test_object = TicTacToeBoard.sa.query().get(created_objects[0].id)
assert test_object.id == created_objects[0].id
assert test_object.board == boards[0]
def test_sa_sql_expression_language_fetching(self):
"""
Test full record fetching using SQLAlchemy-aldjemy SQL Expression Language.
"""
boards = [
["x", "o", "x", "o", "o", "x", "x", "x", "o"], # both (full board)
[" ", " ", " ", " ", "x", " ", " ", " ", " "], # only x
[" ", " ", " ", "o", "o", " ", " ", " ", "o"], # only o
[" ", " ", " ", " ", " ", " ", " ", " ", " "], # none
]
created_objects = []
for board in boards:
ttt = TicTacToeBoard(board=board)
ttt.save()
created_objects.append(ttt)
query = (
select([TicTacToeBoard.sa.id, TicTacToeBoard.sa.board])
.order_by(TicTacToeBoard.sa.id)
.limit(10)
)
with get_engine().begin() as connection:
test_data = connection.execute(query)
for t_data, c_object in zip(test_data, created_objects):
t_data_id, t_data_board = t_data
assert t_data_id == c_object.id
assert t_data_board == c_object.board
class TestJsonField(TestCase):
def test_model_creates(self):
"""
It's important that the field not cause the project to fail startup.
"""
assert JsonModel.sa is not None
| 34.221053 | 83 | 0.485082 |
934c2a4a93b17b67a00991e639a73f782b01b62b | 7,384 | py | Python | bot/exts/utils/eval.py | ShakyaMajumdar/gurkbot | 7532e3ee152804f0211760cc4ace27509eaccc8c | [
"MIT"
] | 24 | 2020-12-18T07:26:14.000Z | 2022-03-30T22:56:49.000Z | bot/exts/utils/eval.py | ShakyaMajumdar/gurkbot | 7532e3ee152804f0211760cc4ace27509eaccc8c | [
"MIT"
] | 143 | 2020-12-18T09:13:51.000Z | 2022-03-02T19:27:44.000Z | bot/exts/utils/eval.py | ShakyaMajumdar/gurkbot | 7532e3ee152804f0211760cc4ace27509eaccc8c | [
"MIT"
] | 44 | 2020-12-18T09:05:29.000Z | 2022-03-02T20:06:23.000Z | import datetime
import json
from pathlib import Path
from typing import Optional
import aiohttp
from bot.bot import Bot
from discord import Embed, Message
from discord.ext import commands, tasks
from discord.ext.commands import Cog, Context, command
from discord.utils import escape_mentions
from loguru import logger
from yaml import safe_load
from ._eval_helper import EvalHelper, FormatOutput, Tio
SOFT_RED = 0xCD6D6D
GREEN = 0x1F8B4C
class Eval(Cog):
"""Safe evaluation of Code using Tio Run Api."""
def __init__(self, bot: Bot) -> None:
self.bot = bot
with Path("bot/resources/eval/default_langs.yml").open(encoding="utf8") as file:
self.default_languages = safe_load(file)
self.languages_url = "https://tio.run/languages.json"
self.update_languages.start()
with Path("bot/resources/eval/wrapping.yml").open(encoding="utf8") as file:
self.wrapping = safe_load(file)
with Path("bot/resources/eval/quick_map.yml").open(encoding="utf8") as file:
self.quick_map = safe_load(file)
@tasks.loop(hours=5)
async def update_languages(self) -> None:
"""Update list of languages supported by api every 5 hour."""
logger.info("Updating List Of Languages")
async with aiohttp.ClientSession() as client_session:
async with client_session.get(self.languages_url) as response:
if response.status != 200:
logger.warning(
f"Couldn't reach languages.json (status code: {response.status})."
)
languages = tuple(sorted(json.loads(await response.text())))
self.languages = languages
logger.info(
f"Successfully Updated List Of Languages To Date: {datetime.datetime.now()}"
)
@command(
help="""eval <language> [--wrapped] [--stats] <code>
for command-line-options, compiler-flags and arguments you may
add a line starting with this argument, and after a space add
your options, flags or args.
stats - option displays more information on execution consumption
wrapped - allows you to not put main function in some languages
<code> may be normal code, but also an attached file, or a link from \
[hastebin](https://hastebin.com) or [Github gist](https://gist.github.com)
If you use a link, your command must end with this syntax: \
`link=<link>` (no space around `=`)
for instance : `!eval python link=https://hastebin.com/gurkbot.py`
If the output exceeds 40 lines or Discord max message length, it will be put
in a new hastebin and the link will be returned.
""",
brief="Execute code in a given programming language",
name="eval",
aliases=("e",),
)
@commands.cooldown(3, 10, commands.BucketType.user)
async def eval_command(
self, ctx: Context, language: str, *, code: str = ""
) -> Optional[Message]:
"""
Evaluate code, format it, and send the output to the corresponding channel.
Return the bot response.
"""
async with ctx.typing():
eval_helper = EvalHelper(language)
parsed_data = await eval_helper.parse(code)
(
inputs,
code,
lang,
options,
compiler_flags,
command_line_options,
args,
) = parsed_data
text = None
if ctx.message.attachments:
text = await eval_helper.code_from_attachments(ctx)
if not text:
return
elif code.split(" ")[-1].startswith("link="):
# Code in a paste service (gist or a hastebin link)
text = await eval_helper.code_from_url(ctx, code)
if not text:
return
elif code.strip("`"):
# Code in message
text = code.strip("`")
first_line = text.splitlines()[0]
if not language.startswith("```"):
text = text[len(first_line) + 1 :]
if text is None:
# Ensures code isn't empty after removing options
raise commands.MissingRequiredArgument(ctx.command.clean_params["code"])
if lang in self.quick_map:
lang = self.quick_map[lang]
if lang in self.default_languages:
lang = self.default_languages[lang]
if lang not in self.languages:
if not escape_mentions(lang):
embed = Embed(
title="MissingRequiredArgument",
description=f"Missing Argument Language.\n\nUsage:\n"
f"```{ctx.prefix}{ctx.command} {ctx.command.signature}```",
color=SOFT_RED,
)
else:
embed = Embed(
title="Language Not Supported",
description=f"Your language was invalid: {lang}\n"
f"All Supported languages: [here](https://tio.run)\n\nUsage:\n"
f"```{ctx.prefix}{ctx.command} {ctx.command.signature}```",
color=SOFT_RED,
)
await ctx.send(embed=embed)
logger.info("Exiting | Language not found.")
return
if options["--wrapped"]:
if not (
any(map(lambda x: lang.split("-")[0] == x, self.wrapping))
) or lang in ("cs-mono-shell", "cs-csi"):
await ctx.send(f"`{lang}` cannot be wrapped")
return
for beginning in self.wrapping:
if lang.split("-")[0] == beginning:
text = self.wrapping[beginning].replace("code", text)
break
tio = Tio(lang, text, inputs, compiler_flags, command_line_options, args)
result = await tio.get_result()
result = result.rstrip("\n")
if not options["--stats"]:
try:
start, end = result.rindex("Real time: "), result.rindex(
"%\nExit code: "
)
result = result[:start] + result[end + 2 :]
except ValueError:
pass
format_output = FormatOutput(language=lang)
if (
len(result) > format_output.max_output_length
or result.count("\n") > format_output.max_lines
):
output = await eval_helper.paste(result)
embed = format_output.format_hastebin_output(output, result)
await ctx.send(content=f"{ctx.author.mention}", embed=embed)
logger.info("Result Sent.")
return
embed = format_output.format_code_output(result)
await ctx.send(content=f"{ctx.author.mention}", embed=embed)
logger.info("Result Sent.")
def setup(bot: Bot) -> None:
"""Load the Eval cog."""
bot.add_cog(Eval(bot))
| 38.659686 | 91 | 0.543743 |
2b2d228cdbe510ab5762c904f305b18a49d1659d | 2,423 | py | Python | linux/lib/python2.7/dist-packages/blueman/plugins/applet/AuthAgent.py | nmercier/linux-cross-gcc | a5b0028fd2b72ec036a4725e93ba29d73cb753a6 | [
"BSD-3-Clause"
] | 3 | 2015-10-31T10:39:25.000Z | 2019-04-27T20:19:33.000Z | linux/lib/python2.7/dist-packages/blueman/plugins/applet/AuthAgent.py | nmercier/linux-cross-gcc | a5b0028fd2b72ec036a4725e93ba29d73cb753a6 | [
"BSD-3-Clause"
] | null | null | null | linux/lib/python2.7/dist-packages/blueman/plugins/applet/AuthAgent.py | nmercier/linux-cross-gcc | a5b0028fd2b72ec036a4725e93ba29d73cb753a6 | [
"BSD-3-Clause"
] | null | null | null | from blueman.Functions import *
from blueman.plugins.AppletPlugin import AppletPlugin
import blueman.main.applet.BluezAgent as BluezAgent
import blueman.bluez as Bluez
from gi.repository import GObject
from gi.repository import Gtk
class AuthAgent(AppletPlugin):
__description__ = _("Provides passkey, authentication services for BlueZ daemon")
__icon__ = "gtk-dialog-authentication"
__author__ = "Walmis"
__depends__ = ["StatusIcon"]
def on_load(self, applet):
self.Applet = applet
self.add_dbus_method(self.SetTimeHint, in_signature="u")
self.agents = []
self.last_event_time = 0
self.agent_manager = Bluez.AgentManager()
def SetTimeHint(self, time):
self.last_event_time = time
def on_unload(self):
for agent in self.agents:
if self.legacy():
agent.adapter.unregister_agent(agent)
else:
self.agent_manager.unregister_agent(agent)
def on_manager_state_changed(self, state):
if state:
adapters = self.Applet.Manager.list_adapters()
for adapter in adapters:
self.register_agent(adapter)
else:
for agent in self.agents:
agent.Release()
def on_adapter_added(self, path):
adapter = Bluez.Adapter(path)
self.register_agent(adapter)
def on_released(self, agent):
agent.disconnect(agent.signal)
self.agents.remove(agent)
def get_event_time(self):
return self.last_event_time
def register_agent(self, adapter):
dprint("Registering agent")
try:
if self.legacy():
agent = BluezAgent.AdapterAgent(self.Applet.Plugins.StatusIcon, adapter, self.get_event_time)
agent.signal = agent.connect("released", self.on_released)
adapter.register_agent(agent, "DisplayYesNo")
self.agents.append(agent)
elif not self.agents:
agent = BluezAgent.GlobalAgent(self.Applet.Plugins.StatusIcon, self.get_event_time)
self.agent_manager.register_agent(agent, "DisplayYesNo", default=True)
self.agents.append(agent)
except Exception as e:
dprint("Failed to register agent")
dprint(e)
def legacy(self):
return self.Applet.Manager.get_interface_version()[0] < 5
| 32.306667 | 109 | 0.641766 |
0a30795db1ff0f82f16f32ac4ecabed4b6410033 | 1,738 | py | Python | admin_cli/utils.py | fireicewater/antdAdmin | a418a314df74dd5a0f91ffb26b16760a4aad9ebf | [
"Apache-2.0"
] | 1 | 2022-01-21T06:38:43.000Z | 2022-01-21T06:38:43.000Z | admin_cli/utils.py | fireicewater/antdAdmin | a418a314df74dd5a0f91ffb26b16760a4aad9ebf | [
"Apache-2.0"
] | null | null | null | admin_cli/utils.py | fireicewater/antdAdmin | a418a314df74dd5a0f91ffb26b16760a4aad9ebf | [
"Apache-2.0"
] | null | null | null | import os
import re
import sys
import django
from django.utils import translation
def init_django_env(project_name_settings):
pwd = os.path.dirname(os.path.realpath(__file__))
# 将项目目录加入setting
sys.path.append(pwd)
# manage.py中
os.environ.setdefault("DJANGO_SETTINGS_MODULE", f"{project_name_settings}")
django.setup()
def get_lower_case_name(text):
lst = []
for index, char in enumerate(text):
if char.isupper() and index != 0:
lst.append("_")
lst.append(char)
return "".join(lst).lower()
def trans(txt):
cur_language = translation.get_language()
try:
translation.activate(cur_language)
text = translation.gettext(txt)
finally:
translation.activate(cur_language)
return text
zh_pattern = re.compile(u'[\u4e00-\u9fa5]+')
def contain_zh(word):
global zh_pattern
match = zh_pattern.search(word)
return match
def get_model_import_path(model):
'''
Get model import path from class.__str__
# <class 'django.contrib.admin.models.LogEntry'> => django.contrib.admin
'''
assert(isinstance(model, django.db.models.base.ModelBase))
return re.search(r'\'(\S+)\.models', str(model)).group(1)
def format_json_string(raw_s, indent = 4):
''' format a json string '''
formatted_s = ''
indention = 0
for line in raw_s.splitlines():
line = line.lstrip()
if not line:
continue
# apply for current line
if line[0] in ['}', ']']:
indention -= 1
formatted_s += ' ' *(indent * indention) + line + '\n'
# apply for next line
if line[0] in ['{', '[']:
indention += 1
return formatted_s
| 21.725 | 79 | 0.620829 |
0f985ee16e6a8b660f128d79fe93bb60ea612032 | 4,666 | py | Python | nova/tests/unit/image/test_fake.py | zaina/nova | 181358c172d606b23c9cc14b58d677d911013c02 | [
"Apache-2.0"
] | 7 | 2015-09-22T11:27:16.000Z | 2015-11-02T12:33:46.000Z | nova/tests/unit/image/test_fake.py | zaina/nova | 181358c172d606b23c9cc14b58d677d911013c02 | [
"Apache-2.0"
] | 2 | 2015-09-07T22:14:46.000Z | 2020-08-12T08:51:56.000Z | nova/tests/unit/image/test_fake.py | zaina/nova | 181358c172d606b23c9cc14b58d677d911013c02 | [
"Apache-2.0"
] | 4 | 2015-09-09T16:48:56.000Z | 2022-03-15T20:52:57.000Z | # Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import StringIO
from nova import context
from nova import exception
from nova import test
import nova.tests.unit.image.fake
class FakeImageServiceTestCase(test.NoDBTestCase):
def setUp(self):
super(FakeImageServiceTestCase, self).setUp()
self.image_service = nova.tests.unit.image.fake.FakeImageService()
self.context = context.get_admin_context()
def tearDown(self):
super(FakeImageServiceTestCase, self).tearDown()
nova.tests.unit.image.fake.FakeImageService_reset()
def test_detail(self):
res = self.image_service.detail(self.context)
for image in res:
keys = set(image.keys())
self.assertEqual(keys, set(['id', 'name', 'created_at',
'updated_at', 'deleted_at', 'deleted',
'status', 'is_public', 'properties',
'disk_format', 'container_format',
'size']))
self.assertIsInstance(image['created_at'], datetime.datetime)
self.assertIsInstance(image['updated_at'], datetime.datetime)
if not (isinstance(image['deleted_at'], datetime.datetime) or
image['deleted_at'] is None):
self.fail('image\'s "deleted_at" attribute was neither a '
'datetime object nor None')
def check_is_bool(image, key):
val = image.get('deleted')
if not isinstance(val, bool):
self.fail('image\'s "%s" attribute wasn\'t '
'a bool: %r' % (key, val))
check_is_bool(image, 'deleted')
check_is_bool(image, 'is_public')
def test_show_raises_imagenotfound_for_invalid_id(self):
self.assertRaises(exception.ImageNotFound,
self.image_service.show,
self.context,
'this image does not exist')
def test_create_adds_id(self):
index = self.image_service.detail(self.context)
image_count = len(index)
self.image_service.create(self.context, {})
index = self.image_service.detail(self.context)
self.assertEqual(len(index), image_count + 1)
self.assertTrue(index[0]['id'])
def test_create_keeps_id(self):
self.image_service.create(self.context, {'id': '34'})
self.image_service.show(self.context, '34')
def test_create_rejects_duplicate_ids(self):
self.image_service.create(self.context, {'id': '34'})
self.assertRaises(exception.CouldNotUploadImage,
self.image_service.create,
self.context,
{'id': '34'})
# Make sure there's still one left
self.image_service.show(self.context, '34')
def test_update(self):
self.image_service.create(self.context,
{'id': '34', 'foo': 'bar'})
self.image_service.update(self.context, '34',
{'id': '34', 'foo': 'baz'})
img = self.image_service.show(self.context, '34')
self.assertEqual(img['foo'], 'baz')
def test_delete(self):
self.image_service.create(self.context, {'id': '34', 'foo': 'bar'})
self.image_service.delete(self.context, '34')
self.assertRaises(exception.NotFound,
self.image_service.show,
self.context,
'34')
def test_create_then_get(self):
blob = 'some data'
s1 = StringIO.StringIO(blob)
self.image_service.create(self.context,
{'id': '32', 'foo': 'bar'},
data=s1)
s2 = StringIO.StringIO()
self.image_service.download(self.context, '32', data=s2)
self.assertEqual(s2.getvalue(), blob, 'Did not get blob back intact')
| 39.542373 | 78 | 0.575225 |
41de66130d854d632038ee90892a27e4fb1494cb | 5,280 | py | Python | Scripts/BunnyAnimation/rotation_animation1.py | WeakKnight/Falcor | 9c0306a3d0525902b042efd2581369141bb95339 | [
"BSD-3-Clause"
] | null | null | null | Scripts/BunnyAnimation/rotation_animation1.py | WeakKnight/Falcor | 9c0306a3d0525902b042efd2581369141bb95339 | [
"BSD-3-Clause"
] | null | null | null | Scripts/BunnyAnimation/rotation_animation1.py | WeakKnight/Falcor | 9c0306a3d0525902b042efd2581369141bb95339 | [
"BSD-3-Clause"
] | null | null | null | baseName = "frame1"
scenePath = "VPLMedia/bunny/Tutorial1.pyscene"
from falcor import *
photonPathCount = 25000000
perFramePathCount = 10000
def render_graph_vbnl(pPhotonPathCount, pPerFramePathCount):
g = RenderGraph('g')
loadRenderPassLibrary('SceneDebugger.dll')
loadRenderPassLibrary('BSDFViewer.dll')
loadRenderPassLibrary('AccumulatePass.dll')
loadRenderPassLibrary('DepthPass.dll')
loadRenderPassLibrary('Antialiasing.dll')
loadRenderPassLibrary('GBufferSlim.dll')
loadRenderPassLibrary('BlitPass.dll')
loadRenderPassLibrary('DebugPasses.dll')
loadRenderPassLibrary('CSM.dll')
loadRenderPassLibrary('MinimalPathTracer.dll')
loadRenderPassLibrary('DirectLighting.dll')
loadRenderPassLibrary('ErrorMeasurePass.dll')
loadRenderPassLibrary('ForwardLightingPass.dll')
loadRenderPassLibrary('VirtualLightGeneratePass.dll')
loadRenderPassLibrary('GBuffer.dll')
loadRenderPassLibrary('ImageLoader.dll')
loadRenderPassLibrary('MegakernelPathTracer.dll')
loadRenderPassLibrary('PassLibraryTemplate.dll')
loadRenderPassLibrary('PixelInspectorPass.dll')
loadRenderPassLibrary('PrepareLights.dll')
loadRenderPassLibrary('RichVPL.dll')
loadRenderPassLibrary('ReSTIRPass.dll')
loadRenderPassLibrary('RISPass.dll')
loadRenderPassLibrary('SampleEliminatePass.dll')
loadRenderPassLibrary('SimplePathTracer.dll')
loadRenderPassLibrary('SSAO.dll')
loadRenderPassLibrary('SkyBox.dll')
loadRenderPassLibrary('SVGFPass.dll')
loadRenderPassLibrary('TemporalDelayPass.dll')
loadRenderPassLibrary('ToneMapper.dll')
loadRenderPassLibrary('Utils.dll')
loadRenderPassLibrary('VirtualLightEstimatePass.dll')
loadRenderPassLibrary('VirtualLightVisPass.dll')
loadRenderPassLibrary('WhittedRayTracer.dll')
GBufferSlim = createPass('GBufferSlim')
g.addPass(GBufferSlim, 'GBufferSlim')
ToneMapper = createPass('ToneMapper', {'exposureCompensation': 0.0, 'autoExposure': False, 'filmSpeed': 100.0, 'whiteBalance': False, 'whitePoint': 6500.0, 'operator': ToneMapOp.Aces, 'clamp': True, 'whiteMaxLuminance': 1.0, 'whiteScale': 11.199999809265137, 'fNumber': 1.0, 'shutter': 1.0, 'exposureMode': ExposureMode.AperturePriority})
g.addPass(ToneMapper, 'ToneMapper')
AccumulatePass = createPass('AccumulatePass', {'enableAccumulation': True, 'autoReset': True, 'precisionMode': AccumulatePrecision.Single, 'subFrameCount': 0})
g.addPass(AccumulatePass, 'AccumulatePass')
VirtualLightVisPass = createPass('VirtualLightVisPass', {'radius': 0.004000000189989805, 'visMode': 0, 'visType': 7, 'enable': True})
g.addPass(VirtualLightVisPass, 'VirtualLightVisPass')
VirtualLightGeneratePass_ = createPass('VirtualLightGeneratePass', {'TileSize': 1, 'TileSampleNum': 3, 'boundBoxRadius': 0.006000000052154064, 'EnableVSDxPath': True, 'EnableVDSxPath': True, 'EnableVDDxPath': True, 'EnableVSSxPath': True, 'StartFromLights': False, 'PhotonPathCount': 20000})
g.addPass(VirtualLightGeneratePass_, 'VirtualLightGeneratePass_')
VirtualLightEstimatePass = createPass('VirtualLightEstimatePass', {'Photon Path Count': pPhotonPathCount, 'Per Frame Path Count': pPerFramePathCount, 'Texture Item Size': 16, 'MegaTexture Capacity': 100000, 'Convert Incoming To Outgoing': True, 'Former Version': False, 'Seperate BRDF': True})
g.addPass(VirtualLightEstimatePass, 'VirtualLightEstimatePass')
SampleEliminatePass = createPass('SampleEliminatePass', {'ratio': 0.10000000149011612, 'radiusSearchRange': 0.20000000298023224, 'radiusSearchCount': 100, 'radius': 0.150000001192092896, 'uniformSE': False, 'radiusScalerForASBuilding': 1.100000023841858, 'useDMaxForASBuilding': True, 'useParallelSE': False})
g.addPass(SampleEliminatePass, 'SampleEliminatePass')
ReSTIRPass = createPass('ReSTIRPass', {'enableVSLEvaluation': True, 'enableTemporalResampling': True, 'enableSpatialResampling': True, 'GroundtruthMode': 2, 'enableDirectLighting': True, 'needUsageCount': False, 'enablePresampling': True, 'VSLRadiusFactor': 5.0 })
g.addPass(ReSTIRPass, 'ReSTIRPass')
PrepareLights = createPass('PrepareLights')
g.addPass(PrepareLights, 'PrepareLights')
g.addEdge('PrepareLights.output', 'ReSTIRPass.input')
g.addEdge('SampleEliminatePass.output', 'VirtualLightEstimatePass.input')
g.addEdge('AccumulatePass.output', 'ToneMapper.src')
g.addEdge('VirtualLightGeneratePass_.output', 'SampleEliminatePass.input')
g.addEdge('GBufferSlim.output', 'VirtualLightGeneratePass_.input')
g.addEdge('ReSTIRPass.output', 'AccumulatePass.input')
g.addEdge('VirtualLightEstimatePass.output', 'VirtualLightVisPass.dummy')
g.addEdge('VirtualLightEstimatePass.output', 'PrepareLights.input')
g.markOutput('ToneMapper.dst')
g.markOutput('VirtualLightVisPass.output')
return g
g = render_graph_vbnl(photonPathCount, perFramePathCount)
try: m.addGraph(g)
except NameError: None
prepareFrame = int(photonPathCount / perFramePathCount) + 2
m.loadScene(scenePath)
captureTime = 5000
m.clock.exitFrame = prepareFrame + captureTime + 1
m.frameCapture.outputDir = "../../../Scripts/BunnyAnimation/"
m.frameCapture.baseFilename = baseName
m.frameCapture.addFrames(m.activeGraph, [prepareFrame + captureTime]) | 61.395349 | 342 | 0.771023 |
1735e22751d3831002d8cf2d03043d12fbaaf95b | 283 | py | Python | FusionIIIT/applications/central_mess/migrations/0013_merge_20200606_1159.py | sabhishekpratap5/sonarcubeTest2 | 9bd8105e457f6feb8c38fa94b335e54783fca99e | [
"bzip2-1.0.6"
] | 2 | 2020-06-17T11:59:08.000Z | 2020-07-10T12:17:35.000Z | FusionIIIT/applications/central_mess/migrations/0013_merge_20200606_1159.py | sabhishekpratap5/sonarcubeTest2 | 9bd8105e457f6feb8c38fa94b335e54783fca99e | [
"bzip2-1.0.6"
] | 19 | 2019-09-08T06:01:14.000Z | 2020-05-21T09:08:20.000Z | FusionIIIT/applications/central_mess/migrations/0013_merge_20200606_1159.py | sabhishekpratap5/sonarcubeTest2 | 9bd8105e457f6feb8c38fa94b335e54783fca99e | [
"bzip2-1.0.6"
] | 14 | 2019-08-31T12:25:42.000Z | 2022-01-12T08:05:33.000Z | # Generated by Django 3.0.6 on 2020-06-06 11:59
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('central_mess', '0012_auto_20200605_1951'),
('central_mess', '0004_auto_20200603_2137'),
]
operations = [
]
| 18.866667 | 52 | 0.660777 |
6672fde8e7495a4ec4e7096b26ff436efa777ab6 | 2,865 | py | Python | nova/tests/virt/vmwareapi/test_vim_util.py | bopopescu/nova-master | 58809056f3a219c6ea3667003f906eeaf581fa95 | [
"Apache-2.0"
] | 3 | 2015-06-01T18:32:50.000Z | 2015-11-05T01:07:01.000Z | nova/tests/virt/vmwareapi/test_vim_util.py | bopopescu/nova-master | 58809056f3a219c6ea3667003f906eeaf581fa95 | [
"Apache-2.0"
] | null | null | null | nova/tests/virt/vmwareapi/test_vim_util.py | bopopescu/nova-master | 58809056f3a219c6ea3667003f906eeaf581fa95 | [
"Apache-2.0"
] | 1 | 2020-07-24T06:01:37.000Z | 2020-07-24T06:01:37.000Z | # Copyright (c) 2013 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
from nova import test
from nova.virt.vmwareapi import fake
from nova.virt.vmwareapi import vim_util
def _fake_get_object_properties(vim, collector, mobj,
type, properties):
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.ObjectContent(None))
return fake_objects
def _fake_get_object_properties_missing(vim, collector, mobj,
type, properties):
fake_objects = fake.FakeRetrieveResult()
ml = [fake.MissingProperty()]
fake_objects.add_object(fake.ObjectContent(None, missing_list=ml))
return fake_objects
class VMwareVIMUtilTestCase(test.NoDBTestCase):
def setUp(self):
super(VMwareVIMUtilTestCase, self).setUp()
fake.reset(vc=True)
self.vim = fake.FakeVim()
self.vim._login()
def test_get_dynamic_properties_missing(self):
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.vmwareapi.vim_util.get_object_properties',
_fake_get_object_properties))
res = vim_util.get_dynamic_property('fake-vim', 'fake-obj',
'fake-type', 'fake-property')
self.assertIsNone(res)
def test_get_dynamic_properties_missing_path_exists(self):
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.vmwareapi.vim_util.get_object_properties',
_fake_get_object_properties_missing))
res = vim_util.get_dynamic_property('fake-vim', 'fake-obj',
'fake-type', 'fake-property')
self.assertIsNone(res)
def test_get_inner_objects(self):
property = ['summary.name']
# Get the fake datastores directly from the cluster
cluster_refs = fake._get_object_refs('ClusterComputeResource')
cluster = fake._get_object(cluster_refs[0])
expected_ds = cluster.datastore.ManagedObjectReference
# Get the fake datastores using inner objects utility method
result = vim_util.get_inner_objects(
self.vim, cluster_refs[0], 'datastore', 'Datastore', property)
datastores = [oc.obj for oc in result.objects]
self.assertEqual(expected_ds, datastores)
| 39.791667 | 78 | 0.675393 |
5309b20972a809496471958acc2193a09b0ca035 | 2,456 | py | Python | doc/source/conf.py | SpamapS/gearhorn | c69dae8a751d1e461fda88fd53ddda453cfab91f | [
"Apache-2.0"
] | null | null | null | doc/source/conf.py | SpamapS/gearhorn | c69dae8a751d1e461fda88fd53ddda453cfab91f | [
"Apache-2.0"
] | null | null | null | doc/source/conf.py | SpamapS/gearhorn | c69dae8a751d1e461fda88fd53ddda453cfab91f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
#'sphinx.ext.intersphinx',
'oslosphinx'
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'gearhorn'
copyright = u'2013, OpenStack Foundation'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Foundation', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None}
| 32.315789 | 79 | 0.696661 |
54d3b6ef98f5970133178dfc78438e944be7f4b9 | 7,208 | py | Python | multiagent/scenarios/multi_circular.py | AdityaKapoor74/multiagent-particle-envs | 01484668465609c14c2cc95432cb17305d9b2ef6 | [
"MIT"
] | null | null | null | multiagent/scenarios/multi_circular.py | AdityaKapoor74/multiagent-particle-envs | 01484668465609c14c2cc95432cb17305d9b2ef6 | [
"MIT"
] | null | null | null | multiagent/scenarios/multi_circular.py | AdityaKapoor74/multiagent-particle-envs | 01484668465609c14c2cc95432cb17305d9b2ef6 | [
"MIT"
] | null | null | null | import numpy as np
from multiagent.core import World, Agent, Landmark
from multiagent.scenario import BaseScenario
import webcolors
import math
class Scenario(BaseScenario):
def make_world(self):
world = World()
# set any world properties first
# world.dim_c = 2
self.num_agents = 8
self.num_landmarks = 8
self.num_circles = 2
self.num_agents_per_circle = self.num_agents//self.num_circles # keeping it uniform (try to make it a perfectly divisible)
self.col_pen = 0.1
world.col_pen = self.col_pen
print('COL PEN: ', self.col_pen)
self.existence_pen = 0.0 #0.01
world.existence_pen = self.existence_pen
print('existence PEN: ', self.existence_pen)
self.radius_circle = {1: 1, 2: 0.4, 3: 0.4, 4: 0.15} #(2/(self.num_circles*2))
self.centers = {1: [(0.0,0.0)], 2:[(-0.5,0.0), (0.5,0.0)], 3:[(-0.5,-0.5), (0.5,-0.5), (0.0,0.5)], 4:[(-0.5,-0.5), (-0.5,0.5), (0.5, -0.5), (0.5, 0.5)]}#[(-0.5,0.0), (0.5,0.0)]
print("NUMBER OF AGENTS:",self.num_agents)
print("NUMBER OF LANDMARKS:",self.num_landmarks)
print("NUMBER OF CIRCLES:", self.num_circles)
print("NUMBER OF AGENTS PER CIRCLE:", self.num_agents_per_circle)
world.collaborative = True
# add agents
agent_size = .1
world.agent_size = agent_size
world.agents = [Agent() for i in range(self.num_agents)]
for i, agent in enumerate(world.agents):
agent.name = 'agent %d' % i
agent.collide = False
agent.silent = True
agent.size = agent_size
agent.prevDistance = None
# add landmarks
world.landmarks = [Landmark() for i in range(self.num_landmarks)]
for i, landmark in enumerate(world.landmarks):
landmark.name = 'landmark %d' % i
landmark.collide = False
landmark.movable = False
# make initial conditions
self.reset_world(world)
return world
def check_collision_before_spawning(self,agent,agent_list):
if agent is not None and agent_list is not None:
for other_agent in agent_list:
if agent.name == other_agent.name:
continue
delta_pos = agent.state.p_pos - other_agent.state.p_pos
dist = np.sqrt(np.sum(np.square(delta_pos)))
dist_min = (agent.size + other_agent.size)
if dist < dist_min:
# print("COLLISION WHILE SPAWNING")
return True
return False
def reset_world(self, world):
color_choice = [np.array([255,0,0]), np.array([0,255,0]), np.array([0,0,255]), np.array([0,0,0]), np.array([128,0,0]), np.array([0,128,0]), np.array([0,0,128]), np.array([128,0,128]), np.array([128,128,0]), np.array([128,128,128])]
# AGENT 0 : red
# AGENT 1 : lime
# AGENT 2 : blue
# AGENT 3 : black
# base_color = np.array([0.1, 0.1, 0.1])
agent_list = []
landmark_list = []
for i in range(self.num_agents):
rgb = np.random.uniform(-1,1,3)
# world.agents[i].color = rgb
# world.landmarks[i].color = rgb
world.agents[i].color = color_choice[i]
world.landmarks[i].color = color_choice[i]
print("AGENT", world.agents[i].name[-1], ":", webcolors.rgb_to_name((color_choice[i][0],color_choice[i][1],color_choice[i][2])))
# set random initial states
agent_list = []
radius = self.radius_circle[self.num_circles]
start_agent_index = 0
end_agent_index = self.num_agents_per_circle
for center_index, center in enumerate(self.centers[self.num_circles]):
for agent in world.agents[start_agent_index:end_agent_index]:
theta = np.random.uniform(-math.pi, math.pi)
x = center[0] + radius*math.cos(theta)
y = center[1] + radius*math.sin(theta)
agent.state.p_pos = np.array([x,y])
x_g = center[0] + radius*math.cos(theta+math.pi)
y_g = center[1] + radius*math.sin(theta+math.pi)
world.landmarks[int(agent.name[-1])].state.p_pos = np.array([x_g,y_g])
while self.check_collision_before_spawning(agent, agent_list):
theta = np.random.uniform(-math.pi, math.pi)
x = center[0] + radius*math.cos(theta)
y = center[1] + radius*math.sin(theta)
agent.state.p_pos = np.array([x,y])
x_g = center[0] + radius*math.cos(theta+math.pi)
y_g = center[1] + radius*math.sin(theta+math.pi)
world.landmarks[int(agent.name[-1])].state.p_pos = np.array([x_g,y_g])
agent.state.p_vel = np.zeros(world.dim_p)
agent.state.c = np.zeros(world.dim_c)
agent.prevDistance = None
agent_list.append(agent)
start_agent_index += self.num_agents_per_circle
if (center_index == len(self.centers[self.num_circles]) - 2) and (end_agent_index+self.num_agents_per_circle < len(world.agents)):
end_agent_index += len(world.agents) - start_agent_index
elif end_agent_index+self.num_agents_per_circle < len(world.agents):
end_agent_index += self.num_agents_per_circle
else:
end_agent_index += len(world.agents) - start_agent_index
def benchmark_data(self, agent, world):
rew = 0
collisions = 0
occupied_landmarks = 0
min_dists = 0
for l in world.landmarks:
dists = [np.sqrt(np.sum(np.square(a.state.p_pos - l.state.p_pos))) for a in world.agents]
min_dists += min(dists)
rew -= min(dists)
if min(dists) < 0.1:
occupied_landmarks += 1
if agent.collide:
for a in world.agents:
if self.is_collision(a, agent):
rew -= 1
collisions += 1
return (rew, collisions, min_dists, occupied_landmarks)
def is_collision(self, agent1, agent2):
if agent1.name == agent2.name:
return False
delta_pos = agent1.state.p_pos - agent2.state.p_pos
dist = np.sqrt(np.sum(np.square(delta_pos)))
dist_min = agent1.size + agent2.size
return True if dist < dist_min else False
def reward(self, agent, world):
my_index = int(agent.name[-1])
agent_dist_from_goal = np.sqrt(np.sum(np.square(world.agents[my_index].state.p_pos - world.landmarks[my_index].state.p_pos)))
if agent.prevDistance is None:
rew = 0
else:
rew = agent.prevDistance - agent_dist_from_goal
agent.prevDistance = agent_dist_from_goal
# penalise on collision
# for a in world.agents:
# if self.is_collision(a, agent):
# rew -= self.col_pen
# assert False
# # SHARED COLLISION REWARD
# for a in world.agents:
# for o in world.agents:
# if self.is_collision(a,o):
# rew -=0.01
# COLLISION REWARD FOR OTHER AGENTS
for a in world.agents:
if a.name != agent.name:
for o in world.agents:
if o.name != agent.name:
if self.is_collision(a,o):
# print(str(a.name) +' in collision with '+str(o.name)+' would add pen to '+str(world.agents[my_index].name))
rew -= self.col_pen/2 # divide by 2 so as not to overcount collisions
# Penalty of existence
# if agent_dist_from_goal > .1:
# rew -= self.existence_pen
return rew
def observation(self, agent, world):
curr_agent_index = world.agents.index(agent)
current_agent_critic = [agent.state.p_pos,agent.state.p_vel,world.landmarks[curr_agent_index].state.p_pos]
current_agent_actor = [agent.state.p_pos,agent.state.p_vel,world.landmarks[curr_agent_index].state.p_pos]
return np.concatenate(current_agent_critic),np.concatenate(current_agent_actor)
def isFinished(self,agent,world):
index = int(agent.name[-1])
dist = np.sqrt(np.sum(np.square(world.agents[index].state.p_pos - world.landmarks[index].state.p_pos)))
if dist<0.1:
return True
return False | 33.06422 | 233 | 0.685766 |
00f29ba181a8599a217eadb356e0b3361f02e4df | 3,550 | py | Python | plot_wheel_diagram.py | riquri/wheel-diagram | 30b4cd73edffb4f57cd5d9065fc8541badde5f12 | [
"MIT"
] | null | null | null | plot_wheel_diagram.py | riquri/wheel-diagram | 30b4cd73edffb4f57cd5d9065fc8541badde5f12 | [
"MIT"
] | null | null | null | plot_wheel_diagram.py | riquri/wheel-diagram | 30b4cd73edffb4f57cd5d9065fc8541badde5f12 | [
"MIT"
] | null | null | null | import sys
import math
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib
import matplotlib.patheffects as PathEffects
import subprocess
matplotlib.font_manager._rebuild()
plt.rcParams["font.family"] = "Helvetica"
plt.rcParams["font.weight"] = "bold"
def wheel_diagram(seq):
result=[]
RESIDUES = ["A","L","I","V","F","Y","W","M","C","G","P","N","Q","S","T","H","R","K","D","E", "*"]
kyte_doolittle = [1.8,3.8,4.5,4.2,2.8,-1.3,-0.9,1.9,2.5,-0.4,-1.6,-3.5,-3.5,-0.8,-0.7,-3.2,-4.5,-3.9,-3.5,-3.5, 0]
cycle = 0
moment = [0,0]
for index, res in enumerate(seq):
result.append([index, cycle, res, kyte_doolittle[RESIDUES.index(res)]])
moment[0] = moment[0] + (kyte_doolittle[RESIDUES.index(res)] * math.cos(index*100/180*math.pi))
moment[1] = moment[1] + (kyte_doolittle[RESIDUES.index(res)] * math.sin(index*100/180*math.pi))
if (index+1)%18 == 0 and index is not 0:
cycle += 1
return result, moment, math.sqrt(moment[0]**2+moment[1]**2)/len(seq)
def plot_wheel_diagram(seq):
initial_radius = 3
marker_size = 1
result, moment_vec, moment_val = wheel_diagram(seq)
prev_point = [initial_radius,0]
plt.figure(figsize=(4, 4))
if len(seq) > 18*3:
font_size = 8
else:
font_size = 16
for res in result:
if res[2] in ["R", "K", "H"]:
color="#007AB7"
elif res[2] in ["D", "E"]:
color="#C7243A"
elif res[2] in ["N", "Q", "S", "T"]:
color="#23AC0E"
else:
color="#333333"
current_point = [math.cos(res[0]*100/180*math.pi)*(initial_radius+res[1]*marker_size),math.sin(res[0]*100/180*math.pi)*(initial_radius+res[1]*marker_size)]
boxdic = {
"facecolor" : "white",
"edgecolor" : color,
"boxstyle" : "Circle",
"linewidth" : 2
}
plt.text(current_point[0], current_point[1], r"$\bf{" + res[2] + "}$", color=color, fontsize=font_size, fontweight="bold", zorder=999-res[0], bbox=None, va="center", ha="center")
c = patches.Circle(xy=(current_point[0]+0.05, current_point[1]-0.05), radius=0.5, linewidth=2, fc="white", ec=color, zorder=999-res[0])
plt.gca().add_patch(c)
plt.plot([prev_point[0],current_point[0]],[prev_point[1],current_point[1]], color="#DDDDDD", linewidth=1, zorder=100)
prev_point = current_point
final_radius = (initial_radius+result[-1][1]*marker_size)*1.2
#plt.plot([0,moment_vec[0]/10],[0,moment_vec[1]/10])
center_text = plt.text(0, 0, "{:.3g}".format(moment_val),ha='center', va="center", zorder=120, fontsize=16, weight="bold", color="#333333")
center_text.set_path_effects([PathEffects.withStroke(linewidth=3, foreground='w')])
an = plt.annotate("", xy=[moment_vec[0]/30,moment_vec[1]/30], xytext=[0,0], arrowprops=dict(shrink=0, width=2, headwidth=8,headlength=8, connectionstyle='arc3',facecolor="#999999", edgecolor="#999999"))
an.set_zorder(101)
plt.xlim(-final_radius, final_radius)
plt.ylim(-final_radius, final_radius)
plt.xticks([])
plt.yticks([])
plt.gca().invert_yaxis()
plt.gca().set_aspect("equal")
plt.margins(0,0)
plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0, hspace = 0, wspace = 0)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.savefig(f"{seq[0:8]}_wheel.png", dpi=300,pad_inches = 0)
plot_wheel_diagram(sys.argv[1])
| 39.88764 | 206 | 0.614648 |
a25397ad0891c541c2e7970d5d8e8e4dc7f17c0e | 154 | py | Python | TITADOweb/web/urls.py | KomeilParseh/TITA-DO | 714685fa18bfd2ef07f5c0d656927039b05d7997 | [
"MIT"
] | 9 | 2020-08-27T10:10:11.000Z | 2021-04-21T04:46:15.000Z | TITADOweb/web/urls.py | mdk1384/TITA-DO-1 | 714685fa18bfd2ef07f5c0d656927039b05d7997 | [
"MIT"
] | 2 | 2020-08-27T12:09:57.000Z | 2021-01-05T09:29:19.000Z | TITADOweb/web/urls.py | mdk1384/TITA-DO-1 | 714685fa18bfd2ef07f5c0d656927039b05d7997 | [
"MIT"
] | 2 | 2020-08-27T10:10:18.000Z | 2021-01-01T06:20:20.000Z | from django.conf.urls import url
from django.urls import path
from .views import Todo
urlpatterns = [
url(r'^submitTodo/$', Todo, name='Todo'),
]
| 15.4 | 45 | 0.694805 |
94c780253723fd5131310dc163b93d35efaefee8 | 3,447 | py | Python | blockapi/api/terra_money.py | andrejkljucko/blockapi | 6ff08ebc237f305faede1817be360e50b5b72705 | [
"MIT"
] | null | null | null | blockapi/api/terra_money.py | andrejkljucko/blockapi | 6ff08ebc237f305faede1817be360e50b5b72705 | [
"MIT"
] | null | null | null | blockapi/api/terra_money.py | andrejkljucko/blockapi | 6ff08ebc237f305faede1817be360e50b5b72705 | [
"MIT"
] | null | null | null | from decimal import Decimal
from blockapi.services import BlockchainAPI
class TerraMoneyApi(BlockchainAPI):
"""
Terra Money
API docs: UNKNOWN
Explorer: https://fcd.terra.dev/
"""
symbol = 'LUNA'
base_url = 'https://fcd.terra.dev/v1'
rate_limit = 0.5
coef = Decimal(1e-6)
max_items_per_page = 100
page_offset_step = 1
supported_requests = {
'get_balance': '/bank/{address}',
'get_delegations': '/staking/{address}',
'get_txs': '/txs?account={address}&limit={limit}&page={page}'
}
symbols = {
'uluna': 'LUNA',
'ukrw': 'KRT',
'usdr': 'SDT',
'uusd': 'UST',
'umnt': 'MNT'
}
tx_kinds = {
'bank/MsgSend': 'deposit',
'staking/MsgDelegate': 'delegation',
'distribution/MsgWithdrawDelegationReward': 'delegation_withdrawal'
}
def get_balance(self):
balances = self.request('get_balance', address=self.address)
if not balances:
return None
return_balances = []
for bal in balances['balance']:
return_balances.append({
'symbol': self._get_symbol(bal['denom']),
'amount': Decimal(bal['available']) * self.coef
})
# Add staked amount in LUNA
for delegation in balances['delegations']:
luna = next((b for b in return_balances if b['symbol'] == 'LUNA'),
{'symbol': 'LUNA', 'amount': 0})
luna['amount'] += Decimal(delegation['amount']) * self.coef
return return_balances
def get_txs(self, offset=1, limit=100, unconfirmed=False):
txs_req = self.request('get_txs', address=self.address, limit=limit,
page=offset)
if not txs_req:
return None
txs = []
for tx in txs_req['txs']:
tx_item = {
'kind': self.tx_kinds.get(tx['tx']['value']['msg'][0]['type']),
'result': self.parse_tx(tx)
}
txs.append(tx_item)
return txs
def get_delegations(self):
delegations = self.request('get_delegations', address=self.address)
# Convert all numbers
return self._load(delegations)
def parse_tx(self, tx):
fee = tx['tx']['value']['fee']
msg = tx['tx']['value']['msg']
return {
'date': tx['timestamp'],
'fee': [{
'symbol': self._get_symbol(f['denom']),
'amount': Decimal(f['amount']) * self.coef
} for f in fee['amount']],
'amount': [self.parse_tx_amount(m['value'])for m in msg]
}
def parse_tx_amount(self, tx_value):
if 'amount' not in tx_value:
return None
tx_amount = tx_value['amount']
if isinstance(tx_amount, list):
amount = [{
'symbol': self._get_symbol(t['denom']),
'amount': Decimal(t['amount']) * self.coef
} for t in tx_amount]
else:
amount = {
'symbol': self._get_symbol(tx_amount['denom']),
'amount': Decimal(tx_amount['amount']) * self.coef
}
return amount
@classmethod
def _get_symbol(cls, denom):
"""It seems that API returns only denom instead of correct
symbols.
"""
return cls.symbols.get(denom, 'unknown')
| 29.211864 | 79 | 0.531477 |
bb6a1969abe9463280cb26168933cf2a859d648a | 3,457 | py | Python | ideaseed/queyd.py | ewen-lbh/ideasprout | d4ea7df7d0a1d6ecd20d3bd0f8d0af2f9ab78c7a | [
"MIT"
] | null | null | null | ideaseed/queyd.py | ewen-lbh/ideasprout | d4ea7df7d0a1d6ecd20d3bd0f8d0af2f9ab78c7a | [
"MIT"
] | null | null | null | ideaseed/queyd.py | ewen-lbh/ideasprout | d4ea7df7d0a1d6ecd20d3bd0f8d0af2f9ab78c7a | [
"MIT"
] | null | null | null | import json
from pathlib import Path
from subprocess import call
from typing import Any, Callable, NamedTuple, Tuple
import requests
from requests.models import Response
from rich.prompt import InvalidResponse
from ideaseed import ui
from ideaseed.authentication import Cache, T
from ideaseed.ondisk import Idea
from ideaseed.utils import ask
"""
curl 'http://localhost:8000/' -H 'Accept-Encoding: gzip, deflate, br' -H 'Content-Type: application/json' -H 'Accept: application/json' -H 'Connection: keep-alive' -H 'DNT: 1' -H 'Origin: http://localhost:8000' --data-binary '{"query":"query {\n notes {\n id, title\n }\n}"}' --compressed
"""
def _to_gql(query: dict[str, Any] | list[str]) -> str:
if isinstance(query, dict):
return "\n".join(
f"{key} {{ {_to_gql(value)} }}" for key, value in query.items()
)
elif isinstance(query, list):
return ", ".join(map(str, query))
return ""
def _gql_call(name: str, **arguments) -> str:
return f"{name}({', '.join(f'{key}: {json.dumps(value)}' for key, value in arguments.items())})"
class QueydClient(NamedTuple):
query: Callable[[dict[str, Any]], Response]
mutation: Callable[[dict[str, Any]], Response]
@classmethod
def authenticated(cls, auth_token: str, endpoint: str) -> "QueydClient":
return cls(
query=lambda query: requests.post(
endpoint,
json={"query": _to_gql({"query": query})},
headers={"Authentication": f"Bearer {auth_token}"},
),
mutation=lambda mutation: requests.post(
endpoint,
json={"query": _to_gql({"mutation": mutation})},
headers={"Authentication": f"Bearer {auth_token}"},
),
)
def add(self, idea: Idea) -> Response:
return self.mutation(
{
_gql_call(
"add",
title=idea.title,
project=idea.project,
body=idea.body,
tags=idea.labels,
): ["id"]
}
)
def is_correct_password(password: str, endpoint: str) -> bool:
client = QueydClient.authenticated(password, endpoint)
response = client.query({"notes": {"id"}})
if response.status_code == 403:
return False
if response.status_code // 100 <= 2:
return True
raise Exception(
f"Unexpected status code: {response.status_code}. Response is: {response.text}"
)
class AuthCache(Cache):
graphql_endpoint: str
def __init__(self, path: Path, graphql_endpoint: str):
self.graphql_endpoint = graphql_endpoint
super().__init__(path, "queyd")
def login_from_cache(self) -> QueydClient:
return QueydClient.authenticated(
self.cache["token"], endpoint=self.graphql_endpoint
)
def login_manually(self, **params) -> Tuple[Any, dict[str, Any]]:
def _validate(ans: str) -> bool:
if not is_correct_password(ans, self.graphql_endpoint):
raise InvalidResponse("Incorrect password")
return True
cache = {
"token": ask(
"Password",
is_valid=_validate,
password=True,
)
}
return QueydClient.authenticated(cache["token"], self.graphql_endpoint), cache
def using() -> bool:
return True
| 31.427273 | 293 | 0.587504 |
4eeab16c7058aeb424bf8064be51da01786a0f51 | 2,116 | py | Python | scripts/site-sitemap.py | mpercy/mod_mbox | ce9f5707aca5b74cfc4216418df969498fcea27d | [
"Apache-2.0"
] | 1 | 2017-11-09T23:27:56.000Z | 2017-11-09T23:27:56.000Z | scripts/site-sitemap.py | mpercy/mod_mbox | ce9f5707aca5b74cfc4216418df969498fcea27d | [
"Apache-2.0"
] | null | null | null | scripts/site-sitemap.py | mpercy/mod_mbox | ce9f5707aca5b74cfc4216418df969498fcea27d | [
"Apache-2.0"
] | null | null | null | #!/usr/local/bin/python
import os
from os.path import join as pjoin
import sys
import subprocess
def get_output(cmd):
s = subprocess.Popen(cmd, stdout=subprocess.PIPE)
out = s.communicate()[0]
s.wait()
return out.strip()
# you could use os.path.walk to calculate this... or you could use du(1).
def duhack(path):
cmd = ['du', '-k', path]
out = get_output(cmd).split()
return int(out[0]) * 1024
BASEPATH=sys.argv[1]
ROOT="/x1/mail-archives/mod_mbox"
HOSTNAME="http://mail-archives.apache.org/mod_mbox/"
PARITION_SIZE=100 * 1024 * 1024
tlps={}
for files in os.listdir(ROOT):
path = files
tlp = path[0:path.find('-')]
list = path[path.find('-')+1:]
# print "%s - %s %s" % (tlp, list, path)
if not os.access("%s/%s/listinfo.db" % (ROOT, path), os.F_OK):
continue
if tlp == "www":
tlp = "asf"
if not tlps.has_key(tlp):
tlps[tlp] = {}
tlps[tlp][list] = [path, duhack(pjoin(ROOT, path))]
keys = tlps.keys()
keys.sort()
count = 0
fcount = 0
def write_sitemap_header(fp):
fp.write("""<?xml version="1.0" encoding="UTF-8"?>\n<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">\n""")
def write_sitemap_footer(fp):
fp.write("</sitemapindex>\n")
fp = open(BASEPATH % (fcount), 'w')
write_sitemap_header(fp)
for tlp in keys:
klist = tlps[tlp].keys()
klist.sort()
for list in klist:
name = tlps[tlp][list][0]
size = tlps[tlp][list][1]
if size < PARITION_SIZE:
count += 1
fp.write("<sitemap><loc>%s%s/?format=sitemap</loc></sitemap>\n" % (HOSTNAME, name))
else:
part = (size / PARITION_SIZE) + 1
for i in range(0, part):
count += 1
fp.write("<sitemap><loc>%s%s/?format=sitemap&pmax=%d&part=%d</loc></sitemap>\n" % (HOSTNAME, name, part, i))
if count > 500:
write_sitemap_footer(fp)
fp.close()
count = 0
fcount += 1
fp = open(BASEPATH % (fcount), 'w')
write_sitemap_header(fp)
write_sitemap_footer(fp)
| 27.480519 | 132 | 0.581285 |
84b728fe61b4397d5b83b13685fdca6b7e0fd67b | 4,432 | py | Python | deliravision/torch/models/gans/info/models.py | delira-dev/vision_torch | d944aa67d319bd63a2add5cb89e8308413943de6 | [
"BSD-2-Clause"
] | 4 | 2019-08-03T09:56:50.000Z | 2019-09-05T09:32:06.000Z | deliravision/torch/models/gans/info/models.py | delira-dev/vision_torch | d944aa67d319bd63a2add5cb89e8308413943de6 | [
"BSD-2-Clause"
] | 23 | 2019-08-03T14:16:47.000Z | 2019-10-22T10:15:10.000Z | deliravision/torch/models/gans/info/models.py | delira-dev/vision_torch | d944aa67d319bd63a2add5cb89e8308413943de6 | [
"BSD-2-Clause"
] | null | null | null | import torch
class Generator(torch.nn.Module):
"""
Simple Generator Network
"""
def __init__(self, latent_dim, n_classes, code_dim, img_size, num_channels):
"""
Parameters
----------
latent_dim : int
size of the latent dimension
n_classes : int
number of classes
code_dim : int
size of the code dimension
img_size : int
number of pixels per image side
num_channels : int
number of channels to generate
"""
super().__init__()
input_dim = latent_dim + n_classes + code_dim
self.init_size = img_size // 4 # Initial size before upsampling
self.l1 = torch.nn.Linear(input_dim, 128 * self.init_size ** 2)
self.conv_blocks = torch.nn.Sequential(
torch.nn.BatchNorm2d(128),
torch.nn.Upsample(scale_factor=2),
torch.nn.Conv2d(128, 128, 3, stride=1, padding=1),
torch.nn.BatchNorm2d(128, 0.8),
torch.nn.LeakyReLU(0.2, inplace=True),
torch.nn.Upsample(scale_factor=2),
torch.nn.Conv2d(128, 64, 3, stride=1, padding=1),
torch.nn.BatchNorm2d(64, 0.8),
torch.nn.LeakyReLU(0.2, inplace=True),
torch.nn.Conv2d(64, num_channels, 3, stride=1, padding=1),
torch.nn.Tanh(),
)
def forward(self, noise, labels, code):
"""
Forwards a single batch through the network
Parameters
----------
noise : :class:`torch.Tensor`
the noise vector
labels : :class:`torch.Tensor`
the label batch
code : :class:`torch.Tensor`
the code
Returns
-------
:class:`torch.Tensor`
the image batch
"""
gen_input = torch.cat((noise, labels.to(noise.dtype), code), -1)
out = self.l1(gen_input)
out = out.view(out.shape[0], 128, self.init_size, self.init_size)
img = self.conv_blocks(out)
return img
class Discriminator(torch.nn.Module):
"""
A simple discriminator network
"""
def __init__(self, code_dim, n_classes, num_channels, img_size):
"""
Parameters
----------
code_dim : int
size of the code dimension
n_classes : int
number of image classes
num_channels : int
number of image channels
img_size : int
number of pixels per side
"""
super().__init__()
def discriminator_block(in_filters, out_filters, bn=True):
"""Returns layers of each discriminator block"""
block = [torch.nn.Conv2d(in_filters, out_filters, 3, 2, 1),
torch.nn.LeakyReLU(0.2, inplace=True),
torch.nn.Dropout2d(0.25)]
if bn:
block.append(torch.nn.BatchNorm2d(out_filters, 0.8))
return block
self.conv_blocks = torch.nn.Sequential(
*discriminator_block(num_channels, 16, bn=False),
*discriminator_block(16, 32),
*discriminator_block(32, 64),
*discriminator_block(64, 128),
)
# The height and width of downsampled image
ds_size = self.conv_blocks(torch.rand(1, num_channels, img_size,
img_size)).size(2)
# Output layers
self.adv_layer = torch.nn.Linear(128 * ds_size ** 2, 1)
self.aux_layer = torch.nn.Sequential(
torch.nn.Linear(128 * ds_size ** 2, n_classes), torch.nn.Softmax())
self.latent_layer = torch.nn.Linear(128 * ds_size ** 2, code_dim)
def forward(self, img):
"""
Feeds a single image batch through the network
Parameters
----------
img : :class:`torch.Tensor`
the image batch
Returns
-------
:class:`torch.Tensor`
the validity for each image
:class:`torch.Tensor`
the predicted label for each image
:class:`torch.Tensor`
the predicted latent code for each image
"""
out = self.conv_blocks(img)
out = out.view(out.shape[0], -1)
validity = self.adv_layer(out)
label = self.aux_layer(out)
latent_code = self.latent_layer(out)
return validity, label, latent_code
| 31.211268 | 80 | 0.549639 |
2841b1741c7a11516f5ac73d4bc1eeeec407da4d | 9,465 | py | Python | algorithms/symmetry/absences/screw_axes.py | Anthchirp/dials | 211cf7646d6711769b86643b010cb2fe5aaf71b9 | [
"BSD-3-Clause"
] | null | null | null | algorithms/symmetry/absences/screw_axes.py | Anthchirp/dials | 211cf7646d6711769b86643b010cb2fe5aaf71b9 | [
"BSD-3-Clause"
] | 2 | 2020-07-31T22:37:30.000Z | 2020-07-31T23:08:55.000Z | algorithms/symmetry/absences/screw_axes.py | Anthchirp/dials | 211cf7646d6711769b86643b010cb2fe5aaf71b9 | [
"BSD-3-Clause"
] | null | null | null | """Definitions of screw axes with methods for scoring against data."""
from __future__ import absolute_import, division, print_function
import math
import logging
from scitbx.array_family import flex
from dials.algorithms.symmetry.absences.plots import plot_screw_axes
from dials.util.observer import Observer, Subject, singleton
from jinja2 import Environment, ChoiceLoader, PackageLoader
logger = logging.getLogger("dials.space_group")
@singleton
class ScrewAxisObserver(Observer):
"""Observer to record data used in screw axis analysis."""
def update(self, screw_axis):
self.data[screw_axis.name] = {
"miller_axis_vals": screw_axis.miller_axis_vals,
"i_over_sigma": screw_axis.i_over_sigma,
"intensities": screw_axis.intensities,
"sigmas": screw_axis.sigmas,
}
def generate_html_report(self, filename):
"""Generate a html report using the data."""
screw_axes_graphs = plot_screw_axes(self.data)
self.data["screw_axes"] = screw_axes_graphs
loader = ChoiceLoader(
[
PackageLoader("dials", "templates"),
PackageLoader("dials", "static", encoding="utf-8"),
]
)
env = Environment(loader=loader)
template = env.get_template("simple_report.html")
html = template.render(
page_title="DIALS systematic absences report",
panel_title="Screw axes analysis",
panel_id="screw_axes",
graphs=self.data["screw_axes"],
)
with open(filename, "wb") as f:
f.write(html.encode("utf-8", "xmlcharrefreplace"))
class ScrewAxis(Subject):
"""Definition of a generic screw axis."""
axis_idx = None # x=0, y=1, z=2
axis_repeat = None # repeat of present reflections e.g =4 for 41, =2 for 42
name = None
def __init__(self):
super(ScrewAxis, self).__init__(events=["selected data for scoring"])
self.equivalent_axes = []
self.n_refl_used = (0.0, 0.0)
self.miller_axis_vals = []
self.i_over_sigma = []
self.intensities = []
self.sigmas = []
self.mean_I_sigma_abs = 0.0
self.mean_I_sigma = 0.0
self.mean_I_abs = 0.0
self.mean_I = 0.0
def add_equivalent_axis(self, equivalent):
"""Add a symmetry equivalent axis."""
self.equivalent_axes.append(equivalent)
def select_axial_reflections(self, miller_indices):
"""Select reflections along the screw axis."""
h, k, l = miller_indices.as_vec3_double().parts()
if self.axis_idx == 0:
selection = (k == 0) & (l == 0)
elif self.axis_idx == 1:
selection = (h == 0) & (l == 0)
else:
selection = (h == 0) & (k == 0)
return selection
@Subject.notify_event(event="selected data for scoring")
def get_all_suitable_reflections(self, reflection_table):
"""Select suitable reflections for testing the screw axis."""
refl = reflection_table
sel = self.select_axial_reflections(refl["miller_index"])
miller_idx = refl["miller_index"].select(sel)
self.miller_axis_vals = miller_idx.as_vec3_double().parts()[self.axis_idx]
self.intensities = refl["intensity"].select(sel)
self.sigmas = flex.sqrt(refl["variance"].select(sel))
self.i_over_sigma = self.intensities / self.sigmas
if self.equivalent_axes:
for a in self.equivalent_axes:
sel = a.select_axial_reflections(refl["miller_index"])
miller_idx = refl["miller_index"].select(sel)
intensities = refl["intensity"].select(sel)
sigmas = flex.sqrt(refl["variance"].select(sel))
self.i_over_sigma.extend(intensities / sigmas)
self.miller_axis_vals.extend(
miller_idx.as_vec3_double().parts()[a.axis_idx]
)
self.intensities.extend(intensities)
self.sigmas.extend(sigmas)
def score_axis(self, reflection_table, significance_level=0.95):
"""Score the axis give a reflection table of data."""
assert significance_level in [0.95, 0.975, 0.99]
self.get_all_suitable_reflections(reflection_table)
expected_sel = self.miller_axis_vals.iround() % self.axis_repeat == 0
expected = self.i_over_sigma.select(expected_sel)
expected_abs = self.i_over_sigma.select(~expected_sel)
self.n_refl_used = (expected.size(), expected_abs.size())
# Limit to best #n reflections to avoid weak at high res - use wilson B?
if not expected or not expected_abs:
return 0.0
# z = (sample mean - population mean) / standard error
S_E_abs = 1.0 # errors probably correlated so say standard error = 1
S_E_pres = 1.0 # / expected.size() ** 0.5
self.mean_I_sigma_abs = flex.mean(expected_abs)
self.mean_I_sigma = flex.mean(expected)
self.mean_I = flex.mean(self.intensities.select(expected_sel))
self.mean_I_abs = flex.mean(self.intensities.select(~expected_sel))
z_score_absent = self.mean_I_sigma_abs / S_E_abs
z_score_present = self.mean_I_sigma / S_E_pres
# get a p-value for z > z_score
P_absent = 0.5 * (1.0 + math.erf(z_score_absent / (2 ** 0.5)))
P_present = 0.5 * (1.0 + math.erf(z_score_present / (2 ** 0.5)))
# sanity check - is most of intensity in 'expected' channel?
intensity_test = self.mean_I_sigma > (20.0 * self.mean_I_sigma_abs)
cutoffs = {0.95: 1.645, 0.975: 1.960, 0.99: 2.326}
cutoff = cutoffs[significance_level]
if z_score_absent > cutoff and not intensity_test:
# z > 1.65 in only 5% of cases for normal dist
# significant nonzero intensity where expected absent.
return (1.0 - P_absent) * P_present
elif z_score_absent > cutoff:
# results appear inconsistent - significant i_over_sigma_abs, but this
# is still low compared to i_over_sigma_expected
# try removing the highest absent reflection in case its an outlier
outlier_msg = (
"""Screw axis %s could only be assigned after removing a suspected outlier
from the expected 'absent' reflections."""
% self.name
)
if expected_abs.size() <= 1:
logger.info(outlier_msg)
return P_present
sel = flex.sort_permutation(expected_abs)
sorted_exp_abs = expected_abs.select(sel)
mean_i_sigma_abs = flex.mean(sorted_exp_abs[:-1])
if (mean_i_sigma_abs / S_E_abs) > cutoff:
# Still looks like reflections in expected absent
logger.info(
"""Test results for %s appear inconsistent (significant nonzero intensity for
'absent' reflections, but majority of intensity in reflection condition).
There may be a set of weak reflections due to pseudosymmetry.""",
self.name,
)
# Still high intensity of absent, so return as before
return (1.0 - P_absent) * P_present
# Looks like there was an outlier, now 'absent' reflections ~ 0.
self.mean_I_sigma_abs = mean_i_sigma_abs
self.mean_I_abs = flex.mean(
self.intensities.select(~expected_sel).select(sel)[:-1]
)
if z_score_present > cutoff:
logger.info(outlier_msg)
return P_present
# else in the uncertain case
elif z_score_present > cutoff: # evidence with confidence
return P_present
logger.info(
"""No evidence to suggest a screw axis for %s, but insufficient
evidence to rule out completely, possibly due to limited data.""",
self.name,
)
return 0.0 # should this be zero or a small number?
class ScrewAxis21c(ScrewAxis):
"""Definition of a 21c screw axis"""
axis_idx = 2
axis_repeat = 2
name = "21c"
class ScrewAxis21b(ScrewAxis):
"""Definition of a 21b screw axis"""
axis_idx = 1
axis_repeat = 2
name = "21b"
class ScrewAxis21a(ScrewAxis):
"""Definition of a 21a screw axis"""
axis_idx = 0
axis_repeat = 2
name = "21a"
class ScrewAxis41c(ScrewAxis):
"""Definition of a 41c screw axis"""
axis_idx = 2
axis_repeat = 4
name = "41c"
class ScrewAxis42c(ScrewAxis):
"""Definition of a 42c screw axis"""
axis_idx = 2
axis_repeat = 2
name = "42c"
class ScrewAxis41b(ScrewAxis):
"""Definition of a 41b screw axis"""
axis_idx = 1
axis_repeat = 4
name = "41b"
class ScrewAxis41a(ScrewAxis):
"""Definition of a 41a screw axis"""
axis_idx = 0
axis_repeat = 4
name = "41a"
class ScrewAxis31c(ScrewAxis):
"""Definition of a 31c screw axis"""
axis_idx = 2
axis_repeat = 3
name = "31c"
class ScrewAxis61c(ScrewAxis):
"""Definition of a 61c screw axis"""
axis_idx = 2
axis_repeat = 6
name = "61c"
class ScrewAxis62c(ScrewAxis):
"""Definition of a 62c screw axis"""
axis_idx = 2
axis_repeat = 3
name = "62c"
class ScrewAxis63c(ScrewAxis):
"""Definition of a 63c screw axis"""
axis_idx = 2
axis_repeat = 2
name = "63c"
| 34.046763 | 97 | 0.619123 |
4aeddd8486f249705fa1b32dac576a9ab3746140 | 407 | py | Python | backend/flat_wind_29352/wsgi.py | crowdbotics-apps/flat-wind-29352 | 313d3feaee0702f05e6ccb519acef99f1710ce3b | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/flat_wind_29352/wsgi.py | crowdbotics-apps/flat-wind-29352 | 313d3feaee0702f05e6ccb519acef99f1710ce3b | [
"FTL",
"AML",
"RSA-MD"
] | 46 | 2021-08-02T22:56:11.000Z | 2022-01-23T13:44:27.000Z | backend/flat_wind_29352/wsgi.py | crowdbotics-apps/flat-wind-29352 | 313d3feaee0702f05e6ccb519acef99f1710ce3b | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | """
WSGI config for flat_wind_29352 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'flat_wind_29352.settings')
application = get_wsgi_application()
| 23.941176 | 78 | 0.793612 |
117db7f2112f525361be073f0fe7681634cb73d2 | 19,121 | py | Python | lib/GUI/Shortcuts.py | Connor22/hydrus | d0ae4a8898742a0d13601e1167d5ba697b31c395 | [
"WTFPL"
] | null | null | null | lib/GUI/Shortcuts.py | Connor22/hydrus | d0ae4a8898742a0d13601e1167d5ba697b31c395 | [
"WTFPL"
] | null | null | null | lib/GUI/Shortcuts.py | Connor22/hydrus | d0ae4a8898742a0d13601e1167d5ba697b31c395 | [
"WTFPL"
] | null | null | null | from . import ClientConstants as CC
from . import ClientData
from . import ClientGUICommon
from . import HydrusConstants as HC
from . import HydrusData
from . import HydrusGlobals as HG
from . import HydrusSerialisable
import wx
FLASHWIN_OK = False
if HC.PLATFORM_WINDOWS:
try:
import wx.lib.flashwin
FLASHWIN_OK = True
except Exception as e:
pass
def ConvertKeyEventToShortcut( event ):
key = event.KeyCode
if ClientData.OrdIsSensibleASCII( key ) or key in list(CC.wxk_code_string_lookup.keys()):
modifiers = []
if event.AltDown():
modifiers.append( CC.SHORTCUT_MODIFIER_ALT )
if event.CmdDown():
modifiers.append( CC.SHORTCUT_MODIFIER_CTRL )
if event.ShiftDown():
modifiers.append( CC.SHORTCUT_MODIFIER_SHIFT )
shortcut = Shortcut( CC.SHORTCUT_TYPE_KEYBOARD, key, modifiers )
if HG.gui_report_mode:
HydrusData.ShowText( 'key event caught: ' + repr( shortcut ) )
return shortcut
return None
def ConvertKeyEventToSimpleTuple( event ):
modifier = wx.ACCEL_NORMAL
if event.AltDown(): modifier = wx.ACCEL_ALT
elif event.CmdDown(): modifier = wx.ACCEL_CTRL
elif event.ShiftDown(): modifier = wx.ACCEL_SHIFT
key = event.KeyCode
return ( modifier, key )
def ConvertMouseEventToShortcut( event ):
key = None
if event.LeftDown() or event.LeftDClick():
key = CC.SHORTCUT_MOUSE_LEFT
elif event.MiddleDown() or event.MiddleDClick():
key = CC.SHORTCUT_MOUSE_MIDDLE
elif event.RightDown() or event.RightDClick():
key = CC.SHORTCUT_MOUSE_RIGHT
elif event.GetWheelRotation() > 0:
key = CC.SHORTCUT_MOUSE_SCROLL_UP
elif event.GetWheelRotation() < 0:
key = CC.SHORTCUT_MOUSE_SCROLL_DOWN
if key is not None:
modifiers = []
if event.AltDown():
modifiers.append( CC.SHORTCUT_MODIFIER_ALT )
if event.CmdDown():
modifiers.append( CC.SHORTCUT_MODIFIER_CTRL )
if event.ShiftDown():
modifiers.append( CC.SHORTCUT_MODIFIER_SHIFT )
shortcut = Shortcut( CC.SHORTCUT_TYPE_MOUSE, key, modifiers )
if HG.gui_report_mode:
HydrusData.ShowText( 'mouse event caught: ' + repr( shortcut ) )
return shortcut
return None
def IShouldCatchCharHook( evt_handler ):
if HC.PLATFORM_WINDOWS and FLASHWIN_OK:
window = wx.FindWindowAtPointer()
if window is not None and isinstance( window, wx.lib.flashwin.FlashWindow ):
return False
if HG.client_controller.MenuIsOpen():
return False
if not ClientGUICommon.WindowOrSameTLPChildHasFocus( evt_handler ):
return False
return True
class Shortcut( HydrusSerialisable.SerialisableBase ):
SERIALISABLE_TYPE = HydrusSerialisable.SERIALISABLE_TYPE_SHORTCUT
SERIALISABLE_NAME = 'Shortcut'
SERIALISABLE_VERSION = 1
def __init__( self, shortcut_type = None, shortcut_key = None, modifiers = None ):
if shortcut_type is None:
shortcut_type = CC.SHORTCUT_TYPE_KEYBOARD
if shortcut_key is None:
shortcut_key = wx.WXK_F7
if modifiers is None:
modifiers = []
modifiers.sort()
HydrusSerialisable.SerialisableBase.__init__( self )
self._shortcut_type = shortcut_type
self._shortcut_key = shortcut_key
self._modifiers = modifiers
def __eq__( self, other ):
return self.__hash__() == other.__hash__()
def __hash__( self ):
return ( self._shortcut_type, self._shortcut_key, tuple( self._modifiers ) ).__hash__()
def __repr__( self ):
return 'Shortcut: ' + self.ToString()
def _GetSerialisableInfo( self ):
return ( self._shortcut_type, self._shortcut_key, self._modifiers )
def _InitialiseFromSerialisableInfo( self, serialisable_info ):
( self._shortcut_type, self._shortcut_key, self._modifiers ) = serialisable_info
def GetShortcutType( self ):
return self._shortcut_type
def ToString( self ):
components = []
if CC.SHORTCUT_MODIFIER_CTRL in self._modifiers:
components.append( 'ctrl' )
if CC.SHORTCUT_MODIFIER_ALT in self._modifiers:
components.append( 'alt' )
if CC.SHORTCUT_MODIFIER_SHIFT in self._modifiers:
components.append( 'shift' )
if self._shortcut_type == CC.SHORTCUT_TYPE_KEYBOARD:
if self._shortcut_key in CC.wxk_code_string_lookup:
components.append( CC.wxk_code_string_lookup[ self._shortcut_key ] )
elif ClientData.OrdIsAlphaUpper( self._shortcut_key ):
components.append( chr( self._shortcut_key + 32 ) ) # + 32 for converting ascii A -> a
elif ClientData.OrdIsSensibleASCII( self._shortcut_key ):
components.append( chr( self._shortcut_key ) )
else:
components.append( 'unknown key' )
elif self._shortcut_type == CC.SHORTCUT_TYPE_MOUSE:
components.append( CC.shortcut_mouse_string_lookup[ self._shortcut_key ] )
return '+'.join( components )
HydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIALISABLE_TYPE_SHORTCUT ] = Shortcut
class ShortcutPanel( wx.Panel ):
def __init__( self, parent ):
wx.Panel.__init__( self, parent )
self._mouse_radio = wx.RadioButton( self, style = wx.RB_GROUP, label = 'mouse' )
self._mouse_shortcut = ShortcutMouse( self, self._mouse_radio )
self._keyboard_radio = wx.RadioButton( self, label = 'keyboard' )
self._keyboard_shortcut = ShortcutKeyboard( self, self._keyboard_radio )
#
vbox = wx.BoxSizer( wx.VERTICAL )
vbox.Add( ClientGUICommon.BetterStaticText( self, 'Mouse events only work for the duplicate and archive/delete filters atm!' ), CC.FLAGS_EXPAND_PERPENDICULAR )
gridbox = wx.FlexGridSizer( 2 )
gridbox.AddGrowableCol( 1, 1 )
gridbox.Add( self._mouse_radio, CC.FLAGS_VCENTER )
gridbox.Add( self._mouse_shortcut, CC.FLAGS_EXPAND_BOTH_WAYS )
gridbox.Add( self._keyboard_radio, CC.FLAGS_VCENTER )
gridbox.Add( self._keyboard_shortcut, CC.FLAGS_EXPAND_BOTH_WAYS )
vbox.Add( gridbox, CC.FLAGS_EXPAND_BOTH_WAYS )
self.SetSizer( vbox )
def GetValue( self ):
if self._mouse_radio.GetValue() == True:
return self._mouse_shortcut.GetValue()
else:
return self._keyboard_shortcut.GetValue()
def SetValue( self, shortcut ):
if shortcut.GetShortcutType() == CC.SHORTCUT_TYPE_MOUSE:
self._mouse_radio.SetValue( True )
self._mouse_shortcut.SetValue( shortcut )
else:
self._keyboard_radio.SetValue( True )
self._keyboard_shortcut.SetValue( shortcut )
class ShortcutKeyboard( wx.TextCtrl ):
def __init__( self, parent, related_radio = None ):
self._shortcut = Shortcut( CC.SHORTCUT_TYPE_KEYBOARD, wx.WXK_F7, [] )
self._related_radio = related_radio
wx.TextCtrl.__init__( self, parent, style = wx.TE_PROCESS_ENTER )
self.Bind( wx.EVT_KEY_DOWN, self.EventKeyDown )
self._SetShortcutString()
def _SetShortcutString( self ):
display_string = self._shortcut.ToString()
wx.TextCtrl.SetValue( self, display_string )
def EventKeyDown( self, event ):
shortcut = ConvertKeyEventToShortcut( event )
if shortcut is not None:
self._shortcut = shortcut
if self._related_radio is not None:
self._related_radio.SetValue( True )
self._SetShortcutString()
def GetValue( self ):
return self._shortcut
def SetValue( self, shortcut ):
self._shortcut = shortcut
self._SetShortcutString()
class ShortcutMouse( wx.Button ):
def __init__( self, parent, related_radio = None ):
self._shortcut = Shortcut( CC.SHORTCUT_TYPE_MOUSE, CC.SHORTCUT_MOUSE_LEFT, [] )
self._related_radio = related_radio
wx.Button.__init__( self, parent )
self.Bind( wx.EVT_MOUSE_EVENTS, self.EventMouse )
self._SetShortcutString()
def _SetShortcutString( self ):
display_string = self._shortcut.ToString()
self.SetLabel( display_string )
def EventMouse( self, event ):
self.SetFocus()
shortcut = ConvertMouseEventToShortcut( event )
if shortcut is not None:
self._shortcut = shortcut
if self._related_radio is not None:
self._related_radio.SetValue( True )
self._SetShortcutString()
def GetValue( self ):
return self._shortcut
def SetValue( self, shortcut ):
self._shortcut = shortcut
self._SetShortcutString()
class Shortcuts( HydrusSerialisable.SerialisableBaseNamed ):
SERIALISABLE_TYPE = HydrusSerialisable.SERIALISABLE_TYPE_SHORTCUTS
SERIALISABLE_NAME = 'Shortcuts'
SERIALISABLE_VERSION = 2
def __init__( self, name ):
HydrusSerialisable.SerialisableBaseNamed.__init__( self, name )
self._shortcuts_to_commands = {}
def __iter__( self ):
for ( shortcut, command ) in list(self._shortcuts_to_commands.items()):
yield ( shortcut, command )
def __len__( self ):
return len( self._shortcuts_to_commands )
def _GetSerialisableInfo( self ):
return [ ( shortcut.GetSerialisableTuple(), command.GetSerialisableTuple() ) for ( shortcut, command ) in list(self._shortcuts_to_commands.items()) ]
def _InitialiseFromSerialisableInfo( self, serialisable_info ):
for ( serialisable_shortcut, serialisable_command ) in serialisable_info:
shortcut = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_shortcut )
command = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_command )
self._shortcuts_to_commands[ shortcut ] = command
def _UpdateSerialisableInfo( self, version, old_serialisable_info ):
if version == 1:
( serialisable_mouse_actions, serialisable_keyboard_actions ) = old_serialisable_info
shortcuts_to_commands = {}
# this never stored mouse actions, so skip
services_manager = HG.client_controller.services_manager
for ( modifier, key, ( serialisable_service_key, data ) ) in serialisable_keyboard_actions:
if modifier not in CC.shortcut_wx_to_hydrus_lookup:
modifiers = []
else:
modifiers = [ CC.shortcut_wx_to_hydrus_lookup[ modifier ] ]
shortcut = Shortcut( CC.SHORTCUT_TYPE_KEYBOARD, key, modifiers )
if serialisable_service_key is None:
command = ClientData.ApplicationCommand( CC.APPLICATION_COMMAND_TYPE_SIMPLE, data )
else:
service_key = bytes.fromhex( serialisable_service_key )
if not services_manager.ServiceExists( service_key ):
continue
action = HC.CONTENT_UPDATE_FLIP
value = data
service = services_manager.GetService( service_key )
service_type = service.GetServiceType()
if service_type in HC.TAG_SERVICES:
content_type = HC.CONTENT_TYPE_MAPPINGS
elif service_type in HC.RATINGS_SERVICES:
content_type = HC.CONTENT_TYPE_RATINGS
else:
continue
command = ClientData.ApplicationCommand( CC.APPLICATION_COMMAND_TYPE_CONTENT, ( service_key, content_type, action, value ) )
shortcuts_to_commands[ shortcut ] = command
new_serialisable_info = ( ( shortcut.GetSerialisableTuple(), command.GetSerialisableTuple() ) for ( shortcut, command ) in list(shortcuts_to_commands.items()) )
return ( 2, new_serialisable_info )
def GetCommand( self, shortcut ):
if shortcut in self._shortcuts_to_commands:
return self._shortcuts_to_commands[ shortcut ]
else:
return None
def SetCommand( self, shortcut, command ):
self._shortcuts_to_commands[ shortcut ] = command
HydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIALISABLE_TYPE_SHORTCUTS ] = Shortcuts
class ShortcutsHandler( object ):
def __init__( self, parent, initial_shortcuts_names = None ):
if initial_shortcuts_names is None:
initial_shortcuts_names = []
self._parent = parent
self._shortcuts_names = list( initial_shortcuts_names )
self._parent.Bind( wx.EVT_CHAR_HOOK, self.EventCharHook )
#self._parent.Bind( wx.EVT_MOUSE_EVENTS, self.EventMouse ) # let's not mess with this until we are doing something clever with it
def _ProcessShortcut( self, shortcut ):
shortcut_processed = False
command = HG.client_controller.GetCommandFromShortcut( self._shortcuts_names, shortcut )
if command is not None:
command_processed = self._parent.ProcessApplicationCommand( command )
if command_processed:
shortcut_processed = True
if HG.shortcut_report_mode:
message = 'Shortcut "' + shortcut.ToString() + '" matched to command "' + command.ToString() + '" on ' + repr( self._parent ) + '.'
if command_processed:
message += ' It was processed.'
else:
message += ' It was not processed.'
HydrusData.ShowText( message )
return shortcut_processed
def EventCharHook( self, event ):
shortcut = ConvertKeyEventToShortcut( event )
if shortcut is not None:
if HG.shortcut_report_mode:
message = 'Key shortcut "' + shortcut.ToString() + '" passing through ' + repr( self._parent ) + '.'
if IShouldCatchCharHook( self._parent ):
message += ' I am in a state to catch it.'
else:
message += ' I am not in a state to catch it.'
HydrusData.ShowText( message )
if IShouldCatchCharHook( self._parent ):
shortcut_processed = self._ProcessShortcut( shortcut )
if shortcut_processed:
return
event.Skip()
def EventMouse( self, event ):
shortcut = ConvertMouseEventToShortcut( event )
if shortcut is not None:
shortcut_processed = self._ProcessShortcut( shortcut )
if shortcut_processed:
return
event.Skip()
def AddShortcuts( self, shortcuts_name ):
if shortcuts_name not in self._shortcuts_names:
self._shortcuts_names.append( shortcuts_name )
def RemoveShortcuts( self, shortcuts_name ):
if shortcuts_name in self._shortcuts_names:
self._shortcuts_names.remove( shortcuts_name )
| 28.243722 | 172 | 0.516919 |
52a60f2702208284fcac12ebcdc194a5b8fc7639 | 985 | py | Python | services/flaskr/geocoding/google_geocoder/google_geocoder.py | Tigenzero/clutch-code-project | 3921ee810f286884f9b3bf02e98c00b0e1159c4a | [
"MIT"
] | null | null | null | services/flaskr/geocoding/google_geocoder/google_geocoder.py | Tigenzero/clutch-code-project | 3921ee810f286884f9b3bf02e98c00b0e1159c4a | [
"MIT"
] | 1 | 2021-06-02T00:54:33.000Z | 2021-06-02T00:54:33.000Z | services/flaskr/geocoding/google_geocoder/google_geocoder.py | Tigenzero/clutch-code-project | 3921ee810f286884f9b3bf02e98c00b0e1159c4a | [
"MIT"
] | null | null | null | import googlemaps
from collections import deque
class GoogleGeocoder(object):
def __init__(self, api_key):
self.gmap = googlemaps.Client(key=api_key)
self.queue = deque()
def _get_geocoded_lat_long(self, address):
geocode_object = self.gmap.geocode(address)
return self._parse_geocode_object(geocode_object)
def _parse_geocode_object(self, geocode_object):
if len(geocode_object) > 0 and "geometry" in geocode_object[0] and "location" in geocode_object[0]["geometry"]:
return geocode_object[0]["geometry"]["location"]
elif len(geocode_object) == 0:
raise LookupError(f"Address provided not found: {geocode_object}")
elif len(geocode_object) > 0 and "geometry" in geocode_object[0]:
raise KeyError(f"Address was found but location could not be determined. {geocode_object}")
def get_lat_long_from_address(self, address):
return self._get_geocoded_lat_long(address)
| 41.041667 | 119 | 0.705584 |
3cd45ef737cb1b1f2de3e43169be42bfea8e10d5 | 1,052 | py | Python | scripts/generate_lava.py | konsulko/meta-meson | 443aa3ce80bef09eb2f45dc070ee6b8e3cf38e89 | [
"MIT"
] | null | null | null | scripts/generate_lava.py | konsulko/meta-meson | 443aa3ce80bef09eb2f45dc070ee6b8e3cf38e89 | [
"MIT"
] | null | null | null | scripts/generate_lava.py | konsulko/meta-meson | 443aa3ce80bef09eb2f45dc070ee6b8e3cf38e89 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from jinja2 import Environment, FileSystemLoader
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--template")
parser.add_argument("--base-artifacts-url")
parser.add_argument("--device-type")
parser.add_argument("--kernel-image-name")
parser.add_argument("--dtb-name")
parser.add_argument("--ramdisk-name")
parser.add_argument("--ramdisk-compression")
parser.add_argument("--boot-method")
parser.add_argument("--tags")
args = parser.parse_args()
env = Environment(loader = FileSystemLoader('.'), trim_blocks=True, lstrip_blocks=True)
template = env.get_template(args.template)
values = {}
values['base_artifacts_url'] = args.base_artifacts_url
values['device_type'] = args.device_type
values['kernel_image_name'] = args.kernel_image_name
values['dtb_name'] = args.dtb_name
values['ramdisk_name'] = args.ramdisk_name
values['ramdisk_compression'] = args.ramdisk_compression
values['boot_method'] = args.boot_method
if args.tags:
values['tags'] = args.tags.split(",")
print(template.render(values))
| 31.878788 | 87 | 0.770913 |
c86980de94b7931fa6af5747f97a095c8ac3412f | 1,080 | py | Python | xlsxwriter/test/comparison/test_rich_string08.py | Rippling/XlsxWriter-1 | be8d1cb8f8b156cf87bbe5d591f1f5475804be44 | [
"BSD-2-Clause"
] | null | null | null | xlsxwriter/test/comparison/test_rich_string08.py | Rippling/XlsxWriter-1 | be8d1cb8f8b156cf87bbe5d591f1f5475804be44 | [
"BSD-2-Clause"
] | null | null | null | xlsxwriter/test/comparison/test_rich_string08.py | Rippling/XlsxWriter-1 | be8d1cb8f8b156cf87bbe5d591f1f5475804be44 | [
"BSD-2-Clause"
] | null | null | null | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('rich_string08.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
bold = workbook.add_format({'bold': 1})
italic = workbook.add_format({'italic': 1})
format = workbook.add_format({'align': 'center'})
worksheet.write('A1', 'Foo', bold)
worksheet.write('A2', 'Bar', italic)
worksheet.write_rich_string('A3', 'ab', bold, 'cd', 'efg', format)
workbook.close()
self.assertExcelEqual()
| 26.341463 | 79 | 0.606481 |
1a2589abea4d93dd888f98c498469d333d1be6f5 | 30,273 | py | Python | qiskit/visualization/circuit_visualization.py | meamy/qiskit-terra | 353918ba2c92b9d1fdda71d9a1d0262be6389c1f | [
"Apache-2.0"
] | 1 | 2021-07-11T18:17:38.000Z | 2021-07-11T18:17:38.000Z | qiskit/visualization/circuit_visualization.py | meamy/qiskit-terra | 353918ba2c92b9d1fdda71d9a1d0262be6389c1f | [
"Apache-2.0"
] | 35 | 2019-03-07T02:09:22.000Z | 2022-03-22T19:55:15.000Z | qiskit/visualization/circuit_visualization.py | meamy/qiskit-terra | 353918ba2c92b9d1fdda71d9a1d0262be6389c1f | [
"Apache-2.0"
] | 1 | 2020-10-31T09:26:39.000Z | 2020-10-31T09:26:39.000Z | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Module for the primary interface to the circuit drawers.
This module contains the end user facing API for drawing quantum circuits.
There are 3 available drawer backends available:
0. Ascii art
1. LaTeX
2. Matplotlib
This provides a single function entrypoint to drawing a circuit object with
any of the backends.
"""
import errno
import logging
import os
import subprocess
import tempfile
from warnings import warn
try:
from PIL import Image
HAS_PIL = True
except ImportError:
HAS_PIL = False
from qiskit import user_config
from qiskit.visualization import exceptions
from qiskit.visualization import latex as _latex
from qiskit.visualization import text as _text
from qiskit.visualization import utils
from qiskit.visualization import matplotlib as _matplotlib
logger = logging.getLogger(__name__)
def circuit_drawer(circuit,
scale=None,
filename=None,
style=None,
output=None,
interactive=False,
plot_barriers=True,
reverse_bits=False,
justify=None,
vertical_compression='medium',
idle_wires=True,
with_layout=True,
fold=None,
ax=None,
initial_state=False,
cregbundle=True):
"""Draw a quantum circuit to different formats (set by output parameter):
**text**: ASCII art TextDrawing that can be printed in the console.
**latex**: high-quality images compiled via latex.
**latex_source**: raw uncompiled latex output.
**matplotlib**: images with color rendered purely in Python.
Args:
circuit (QuantumCircuit): the quantum circuit to draw
scale (float): scale of image to draw (shrink if < 1). Only used by the ``mpl``,
``latex``, and ``latex_source`` outputs.
filename (str): file path to save image to
style (dict or str): dictionary of style or file name of style file.
This option is only used by the ``mpl`` output type. If a str is
passed in that is the path to a json file which contains that will
be open, parsed, and then used just as the input dict. See:
:ref:`Style Dict Doc <style-dict-doc>` for more information on the
contents.
output (str): Select the output method to use for drawing the circuit.
Valid choices are ``text``, ``latex``, ``latex_source``, ``mpl``.
By default the `'text`' drawer is used unless a user config file
has an alternative backend set as the default. If the output kwarg
is set, that backend will always be used over the default in a user
config file.
interactive (bool): when set true show the circuit in a new window
(for `mpl` this depends on the matplotlib backend being used
supporting this). Note when used with either the `text` or the
`latex_source` output type this has no effect and will be silently
ignored.
reverse_bits (bool): When set to True reverse the bit order inside
registers for the output visualization.
plot_barriers (bool): Enable/disable drawing barriers in the output
circuit. Defaults to True.
justify (string): Options are ``left``, ``right`` or ``none``, if
anything else is supplied it defaults to left justified. It refers
to where gates should be placed in the output circuit if there is
an option. ``none`` results in each gate being placed in its own
column.
vertical_compression (string): ``high``, ``medium`` or ``low``. It
merges the lines generated by the ``text`` output so the drawing
will take less vertical room. Default is ``medium``. Only used by
the ``text`` output, will be silently ignored otherwise.
idle_wires (bool): Include idle wires (wires with no circuit elements)
in output visualization. Default is True.
with_layout (bool): Include layout information, with labels on the
physical layout. Default is True.
fold (int): Sets pagination. It can be disabled using -1.
In `text`, sets the length of the lines. This useful when the
drawing does not fit in the console. If None (default), it will try
to guess the console width using ``shutil.get_terminal_size()``.
However, if running in jupyter, the default line length is set to
80 characters. In ``mpl`` it is the number of (visual) layers before
folding. Default is 25.
ax (matplotlib.axes.Axes): An optional Axes object to be used for
the visualization output. If none is specified a new matplotlib
Figure will be created and used. Additionally, if specified there
will be no returned Figure since it is redundant. This is only used
when the ``output`` kwarg is set to use the ``mpl`` backend. It
will be silently ignored with all other outputs.
initial_state (bool): Optional. Adds ``|0>`` in the beginning of the wire.
Default: ``False``.
cregbundle (bool): Optional. If set True bundle classical registers.
Default: ``True``.
Returns:
:class:`PIL.Image` or :class:`matplotlib.figure` or :class:`str` or
:class:`TextDrawing`:
* `PIL.Image` (output='latex')
an in-memory representation of the image of the circuit diagram.
* `matplotlib.figure.Figure` (output='mpl')
a matplotlib figure object for the circuit diagram.
* `str` (output='latex_source')
The LaTeX source code for visualizing the circuit diagram.
* `TextDrawing` (output='text')
A drawing that can be printed as ascii art
Raises:
VisualizationError: when an invalid output method is selected
ImportError: when the output methods requires non-installed libraries.
.. _style-dict-doc:
**Style Dict Details**
The style dict kwarg contains numerous options that define the style of the
output circuit visualization. The style dict is only used by the ``mpl``
output. The options available in the style dict are defined below:
Args:
textcolor (str): The color code to use for text. Defaults to
`'#000000'`
subtextcolor (str): The color code to use for subtext. Defaults to
`'#000000'`
linecolor (str): The color code to use for lines. Defaults to
`'#000000'`
creglinecolor (str): The color code to use for classical register
lines. Defaults to `'#778899'`
gatetextcolor (str): The color code to use for gate text. Defaults to
`'#000000'`
gatefacecolor (str): The color code to use for gates. Defaults to
`'#ffffff'`
barrierfacecolor (str): The color code to use for barriers. Defaults to
`'#bdbdbd'`
backgroundcolor (str): The color code to use for the background.
Defaults to `'#ffffff'`
fontsize (int): The font size to use for text. Defaults to 13
subfontsize (int): The font size to use for subtext. Defaults to 8
displaytext (dict): A dictionary of the text to use for each element
type in the output visualization. The default values are::
{
'id': 'id',
'u0': 'U_0',
'u1': 'U_1',
'u2': 'U_2',
'u3': 'U_3',
'x': 'X',
'y': 'Y',
'z': 'Z',
'h': 'H',
's': 'S',
'sdg': 'S^\\dagger',
't': 'T',
'tdg': 'T^\\dagger',
'rx': 'R_x',
'ry': 'R_y',
'rz': 'R_z',
'reset': '\\left|0\\right\\rangle'
}
You must specify all the necessary values if using this. There is
no provision for passing an incomplete dict in.
displaycolor (dict):
The color codes to use for each circuit element. The default values are::
{
'id': '#F0E442',
'u0': '#E7AB3B',
'u1': '#E7AB3B',
'u2': '#E7AB3B',
'u3': '#E7AB3B',
'x': '#58C698',
'y': '#58C698',
'z': '#58C698',
'h': '#70B7EB',
's': '#E0722D',
'sdg': '#E0722D',
't': '#E0722D',
'tdg': '#E0722D',
'rx': '#ffffff',
'ry': '#ffffff',
'rz': '#ffffff',
'reset': '#D188B4',
'target': '#70B7EB',
'meas': '#D188B4'
}
Also, just like `displaytext` there is no provision for an
incomplete dict passed in.
latexdrawerstyle (bool): When set to True enable latex mode which will
draw gates like the `latex` output modes.
usepiformat (bool): When set to True use radians for output
fold (int): The number of circuit elements to fold the circuit at.
Defaults to 20
cregbundle (bool): If set True bundle classical registers
showindex (bool): If set True draw an index.
compress (bool): If set True draw a compressed circuit
figwidth (int): The maximum width (in inches) for the output figure.
dpi (int): The DPI to use for the output image. Defaults to 150
margin (list): A list of margin values to adjust spacing around output
image. Takes a list of 4 ints: [x left, x right, y bottom, y top].
creglinestyle (str): The style of line to use for classical registers.
Choices are `'solid'`, `'doublet'`, or any valid matplotlib
`linestyle` kwarg value. Defaults to `doublet`
Example:
.. jupyter-execute::
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.tools.visualization import circuit_drawer
q = QuantumRegister(1)
c = ClassicalRegister(1)
qc = QuantumCircuit(q, c)
qc.h(q)
qc.measure(q, c)
circuit_drawer(qc)
"""
image = None
config = user_config.get_config()
# Get default from config file else use text
default_output = 'text'
if config:
default_output = config.get('circuit_drawer', 'text')
if default_output == 'auto':
if _matplotlib.HAS_MATPLOTLIB:
default_output = 'mpl'
else:
default_output = 'text'
if output is None:
output = default_output
if output == 'text':
return _text_circuit_drawer(circuit, filename=filename,
reverse_bits=reverse_bits,
plot_barriers=plot_barriers,
justify=justify,
vertical_compression=vertical_compression,
idle_wires=idle_wires,
with_layout=with_layout,
fold=fold,
initial_state=initial_state,
cregbundle=cregbundle)
elif output == 'latex':
image = _latex_circuit_drawer(circuit, scale=scale,
filename=filename, style=style,
plot_barriers=plot_barriers,
reverse_bits=reverse_bits,
justify=justify,
idle_wires=idle_wires,
with_layout=with_layout,
initial_state=initial_state,
cregbundle=cregbundle)
elif output == 'latex_source':
return _generate_latex_source(circuit,
filename=filename, scale=scale,
style=style,
plot_barriers=plot_barriers,
reverse_bits=reverse_bits,
justify=justify,
idle_wires=idle_wires,
with_layout=with_layout,
initial_state=initial_state,
cregbundle=cregbundle)
elif output == 'mpl':
image = _matplotlib_circuit_drawer(circuit, scale=scale,
filename=filename, style=style,
plot_barriers=plot_barriers,
reverse_bits=reverse_bits,
justify=justify,
idle_wires=idle_wires,
with_layout=with_layout,
fold=fold,
ax=ax,
initial_state=initial_state,
cregbundle=cregbundle)
else:
raise exceptions.VisualizationError(
'Invalid output type %s selected. The only valid choices '
'are latex, latex_source, text, and mpl' % output)
if image and interactive:
image.show()
return image
# -----------------------------------------------------------------------------
# Plot style sheet option
# -----------------------------------------------------------------------------
def qx_color_scheme():
"""Return default style for matplotlib_circuit_drawer (IBM QX style)."""
warn('The qx_color_scheme function is deprecated as of 0.11, and '
'will be removed no earlier than 3 months after that release '
'date.', DeprecationWarning, stacklevel=2)
return {
"comment": "Style file for matplotlib_circuit_drawer (IBM QX Composer style)",
"textcolor": "#000000",
"gatetextcolor": "#000000",
"subtextcolor": "#000000",
"linecolor": "#000000",
"creglinecolor": "#b9b9b9",
"gatefacecolor": "#ffffff",
"barrierfacecolor": "#bdbdbd",
"backgroundcolor": "#ffffff",
"fold": 20,
"fontsize": 13,
"subfontsize": 8,
"figwidth": -1,
"dpi": 150,
"displaytext": {
"id": "id",
"u0": "U_0",
"u1": "U_1",
"u2": "U_2",
"u3": "U_3",
"x": "X",
"y": "Y",
"z": "Z",
"h": "H",
"s": "S",
"sdg": "S^\\dagger",
"t": "T",
"tdg": "T^\\dagger",
"rx": "R_x",
"ry": "R_y",
"rz": "R_z",
"reset": "\\left|0\\right\\rangle"
},
"displaycolor": {
"id": "#ffca64",
"u0": "#f69458",
"u1": "#f69458",
"u2": "#f69458",
"u3": "#f69458",
"x": "#a6ce38",
"y": "#a6ce38",
"z": "#a6ce38",
"h": "#00bff2",
"s": "#00bff2",
"sdg": "#00bff2",
"t": "#ff6666",
"tdg": "#ff6666",
"rx": "#ffca64",
"ry": "#ffca64",
"rz": "#ffca64",
"reset": "#d7ddda",
"target": "#00bff2",
"meas": "#f070aa"
},
"latexdrawerstyle": True,
"usepiformat": False,
"cregbundle": False,
"showindex": False,
"compress": True,
"margin": [2.0, 0.0, 0.0, 0.3],
"creglinestyle": "solid",
"reversebits": False
}
# -----------------------------------------------------------------------------
# _text_circuit_drawer
# -----------------------------------------------------------------------------
def _text_circuit_drawer(circuit, filename=None, reverse_bits=False,
plot_barriers=True, justify=None, vertical_compression='high',
idle_wires=True, with_layout=True, fold=None, initial_state=True,
cregbundle=False, encoding=None):
"""Draws a circuit using ascii art.
Args:
circuit (QuantumCircuit): Input circuit
filename (str): optional filename to write the result
reverse_bits (bool): Rearrange the bits in reverse order.
plot_barriers (bool): Draws the barriers when they are there.
justify (str) : `left`, `right` or `none`. Defaults to `left`. Says how
the circuit should be justified.
vertical_compression (string): `high`, `medium`, or `low`. It merges the
lines so the drawing will take less vertical room. Default is `high`.
idle_wires (bool): Include idle wires. Default is True.
with_layout (bool): Include layout information, with labels on the physical
layout. Default: True
fold (int): Optional. Breaks the circuit drawing to this length. This
useful when the drawing does not fit in the console. If
None (default), it will try to guess the console width using
`shutil.get_terminal_size()`. If you don't want pagination
at all, set `fold=-1`.
initial_state (bool): Optional. Adds |0> in the beginning of the line. Default: `True`.
cregbundle (bool): Optional. If set True bundle classical registers. Default: ``False``.
encoding (str): Optional. Sets the encoding preference of the output.
Default: ``sys.stdout.encoding``.
Returns:
TextDrawing: An instances that, when printed, draws the circuit in ascii art.
"""
qregs, cregs, ops = utils._get_layered_instructions(circuit,
reverse_bits=reverse_bits,
justify=justify,
idle_wires=idle_wires)
if with_layout:
layout = circuit._layout
else:
layout = None
global_phase = circuit.global_phase if hasattr(circuit, 'global_phase') else None
text_drawing = _text.TextDrawing(qregs, cregs, ops, layout=layout, initial_state=initial_state,
cregbundle=cregbundle, global_phase=global_phase,
encoding=encoding)
text_drawing.plotbarriers = plot_barriers
text_drawing.line_length = fold
text_drawing.vertical_compression = vertical_compression
if filename:
text_drawing.dump(filename, encoding=encoding)
return text_drawing
# -----------------------------------------------------------------------------
# latex_circuit_drawer
# -----------------------------------------------------------------------------
def _latex_circuit_drawer(circuit,
scale=0.7,
filename=None,
style=None,
plot_barriers=True,
reverse_bits=False,
justify=None,
idle_wires=True,
with_layout=True,
initial_state=False,
cregbundle=False):
"""Draw a quantum circuit based on latex (Qcircuit package)
Requires version >=2.6.0 of the qcircuit LaTeX package.
Args:
circuit (QuantumCircuit): a quantum circuit
scale (float): scaling factor
filename (str): file path to save image to
style (dict or str): dictionary of style or file name of style file
reverse_bits (bool): When set to True reverse the bit order inside
registers for the output visualization.
plot_barriers (bool): Enable/disable drawing barriers in the output
circuit. Defaults to True.
justify (str) : `left`, `right` or `none`. Defaults to `left`. Says how
the circuit should be justified.
idle_wires (bool): Include idle wires. Default is True.
with_layout (bool): Include layout information, with labels on the physical
layout. Default: True
initial_state (bool): Optional. Adds |0> in the beginning of the line. Default: `False`.
cregbundle (bool): Optional. If set True bundle classical registers.
Default: ``False``.
Returns:
PIL.Image: an in-memory representation of the circuit diagram
Raises:
OSError: usually indicates that ```pdflatex``` or ```pdftocairo``` is
missing.
CalledProcessError: usually points errors during diagram creation.
ImportError: if pillow is not installed
"""
tmpfilename = 'circuit'
with tempfile.TemporaryDirectory() as tmpdirname:
tmppath = os.path.join(tmpdirname, tmpfilename + '.tex')
_generate_latex_source(circuit, filename=tmppath,
scale=scale, style=style,
plot_barriers=plot_barriers,
reverse_bits=reverse_bits, justify=justify,
idle_wires=idle_wires, with_layout=with_layout,
initial_state=initial_state,
cregbundle=cregbundle)
try:
subprocess.run(["pdflatex", "-halt-on-error",
"-output-directory={}".format(tmpdirname),
"{}".format(tmpfilename + '.tex')],
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL,
check=True)
except OSError as ex:
if ex.errno == errno.ENOENT:
logger.warning('WARNING: Unable to compile latex. '
'Is `pdflatex` installed? '
'Skipping latex circuit drawing...')
raise
except subprocess.CalledProcessError as ex:
with open('latex_error.log', 'wb') as error_file:
error_file.write(ex.stdout)
logger.warning('WARNING Unable to compile latex. '
'The output from the pdflatex command can '
'be found in latex_error.log')
raise
else:
if not HAS_PIL:
raise ImportError('The latex drawer needs pillow installed. '
'Run "pip install pillow" before using the '
'latex drawer.')
try:
base = os.path.join(tmpdirname, tmpfilename)
subprocess.run(["pdftocairo", "-singlefile", "-png", "-q",
base + '.pdf', base], check=True)
image = Image.open(base + '.png')
image = utils._trim(image)
os.remove(base + '.png')
if filename:
image.save(filename, 'PNG')
except (OSError, subprocess.CalledProcessError) as ex:
logger.warning('WARNING: Unable to convert pdf to image. '
'Is `poppler` installed? '
'Skipping circuit drawing...')
raise
return image
def _generate_latex_source(circuit, filename=None,
scale=0.7, style=None, reverse_bits=False,
plot_barriers=True, justify=None, idle_wires=True,
with_layout=True, initial_state=False, cregbundle=False):
"""Convert QuantumCircuit to LaTeX string.
Args:
circuit (QuantumCircuit): input circuit
scale (float): image scaling
filename (str): optional filename to write latex
style (dict or str): dictionary of style or file name of style file
reverse_bits (bool): When set to True reverse the bit order inside
registers for the output visualization.
plot_barriers (bool): Enable/disable drawing barriers in the output
circuit. Defaults to True.
justify (str) : `left`, `right` or `none`. Defaults to `left`. Says how
the circuit should be justified.
idle_wires (bool): Include idle wires. Default is True.
with_layout (bool): Include layout information, with labels on the physical
layout. Default: True
initial_state (bool): Optional. Adds |0> in the beginning of the line. Default: `False`.
cregbundle (bool): Optional. If set True bundle classical registers.
Default: ``False``.
Returns:
str: Latex string appropriate for writing to file.
"""
qregs, cregs, ops = utils._get_layered_instructions(circuit,
reverse_bits=reverse_bits,
justify=justify, idle_wires=idle_wires)
if with_layout:
layout = circuit._layout
else:
layout = None
global_phase = circuit.global_phase if hasattr(circuit, 'global_phase') else None
qcimg = _latex.QCircuitImage(qregs, cregs, ops, scale, style=style,
plot_barriers=plot_barriers,
reverse_bits=reverse_bits, layout=layout,
initial_state=initial_state,
cregbundle=cregbundle,
global_phase=global_phase)
latex = qcimg.latex()
if filename:
with open(filename, 'w') as latex_file:
latex_file.write(latex)
return latex
# -----------------------------------------------------------------------------
# matplotlib_circuit_drawer
# -----------------------------------------------------------------------------
def _matplotlib_circuit_drawer(circuit,
scale=None,
filename=None,
style=None,
plot_barriers=True,
reverse_bits=False,
justify=None,
idle_wires=True,
with_layout=True,
fold=None,
ax=None,
initial_state=False,
cregbundle=True):
"""Draw a quantum circuit based on matplotlib.
If `%matplotlib inline` is invoked in a Jupyter notebook, it visualizes a circuit inline.
We recommend `%config InlineBackend.figure_format = 'svg'` for the inline visualization.
Args:
circuit (QuantumCircuit): a quantum circuit
scale (float): scaling factor
filename (str): file path to save image to
style (dict or str): dictionary of style or file name of style file
reverse_bits (bool): When set to True reverse the bit order inside
registers for the output visualization.
plot_barriers (bool): Enable/disable drawing barriers in the output
circuit. Defaults to True.
justify (str): `left`, `right` or `none`. Defaults to `left`. Says how
the circuit should be justified.
idle_wires (bool): Include idle wires. Default is True.
with_layout (bool): Include layout information, with labels on the physical
layout. Default: True.
fold (int): amount ops allowed before folding. Default is 25.
ax (matplotlib.axes.Axes): An optional Axes object to be used for
the visualization output. If none is specified a new matplotlib
Figure will be created and used. Additionally, if specified there
will be no returned Figure since it is redundant.
initial_state (bool): Optional. Adds |0> in the beginning of the line.
Default: `False`.
cregbundle (bool): Optional. If set True bundle classical registers.
Default: ``True``.
Returns:
matplotlib.figure: a matplotlib figure object for the circuit diagram
if the ``ax`` kwarg is not set.
"""
qregs, cregs, ops = utils._get_layered_instructions(circuit,
reverse_bits=reverse_bits,
justify=justify,
idle_wires=idle_wires)
if with_layout:
layout = circuit._layout
else:
layout = None
if fold is None:
fold = 25
global_phase = circuit.global_phase if hasattr(circuit, 'global_phase') else None
qcd = _matplotlib.MatplotlibDrawer(qregs, cregs, ops, scale=scale, style=style,
plot_barriers=plot_barriers,
reverse_bits=reverse_bits, layout=layout,
fold=fold, ax=ax, initial_state=initial_state,
cregbundle=cregbundle, global_phase=global_phase)
return qcd.draw(filename)
| 44.782544 | 99 | 0.532256 |
234831a467040bf1125b69c84e216d95c118117e | 481 | py | Python | estrutura-repeticao-while/ex066.py | TacilioRodriguez/Python | 0b98dc8336e014046c579b387013b2871024e3d0 | [
"Unlicense"
] | null | null | null | estrutura-repeticao-while/ex066.py | TacilioRodriguez/Python | 0b98dc8336e014046c579b387013b2871024e3d0 | [
"Unlicense"
] | null | null | null | estrutura-repeticao-while/ex066.py | TacilioRodriguez/Python | 0b98dc8336e014046c579b387013b2871024e3d0 | [
"Unlicense"
] | null | null | null | """
Crie um programa que leia varios numeros inteiros pelo teclado. O programa só vai para quando o usuário digitar
o valor 999, que é a condição de parada. No final, mostre quantos numeros foram digitados e qual foi a soma entre eles,
desconsiderando a flag.
"""
cont = 0
soma = 0
dig = 0
while dig != 999:
dig = int(input('Digite um numero: '))
if dig == 999:
break
cont = cont + 1
soma += dig
print(f'Foram digitados {cont} numeros e a soma é {soma}')
| 26.722222 | 119 | 0.677755 |
d5987e61fad8106876673d1b906e3c92aaf75147 | 449 | py | Python | solutions/713_subarray_product_less_than_k.py | YiqunPeng/leetcode_pro | 7e6376984f9baec49a5e827d98330fe3d1b656f0 | [
"MIT"
] | null | null | null | solutions/713_subarray_product_less_than_k.py | YiqunPeng/leetcode_pro | 7e6376984f9baec49a5e827d98330fe3d1b656f0 | [
"MIT"
] | null | null | null | solutions/713_subarray_product_less_than_k.py | YiqunPeng/leetcode_pro | 7e6376984f9baec49a5e827d98330fe3d1b656f0 | [
"MIT"
] | null | null | null | class Solution:
def numSubarrayProductLessThanK(self, nums: List[int], k: int) -> int:
"""Two pointers.
Running time: O(n) where n == len(nums).
"""
if k <= 1:
return 0
p = 1
i = 0
res = 0
for j in range(len(nums)):
p *= nums[j]
while p >= k:
p //= nums[i]
i += 1
res += j - i + 1
return res
| 23.631579 | 74 | 0.389755 |
e6e71eefeffd866db33c1fa179576bd5fd6a306c | 3,479 | py | Python | scripts/bigwig_helper.py | kundajelab/retina-models | bd458c65f5917f5dccb87f123e9be672472bd4b6 | [
"MIT"
] | null | null | null | scripts/bigwig_helper.py | kundajelab/retina-models | bd458c65f5917f5dccb87f123e9be672472bd4b6 | [
"MIT"
] | null | null | null | scripts/bigwig_helper.py | kundajelab/retina-models | bd458c65f5917f5dccb87f123e9be672472bd4b6 | [
"MIT"
] | null | null | null | import pyBigWig
import numpy as np
def read_chrom_sizes(fname):
with open(fname) as f:
gs = [x.strip().split('\t') for x in f]
gs = [(x[0], int(x[1])) for x in gs if len(x)==2]
return gs
def get_regions(regions_file, seqlen):
# regions file is assumed to be centered at summit (2nd + 10th column)
# it is adjusted to be of length seqlen centered at summit
assert(seqlen%2==0)
with open(regions_file) as r:
regions = [x.strip().split('\t') for x in r]
regions = [[x[0], int(x[1])+int(x[9])-seqlen//2, int(x[1])+int(x[9])+seqlen//2, int(x[1])+int(x[9])] for x in regions]
return regions
def write_bigwig(data, regions, gs, bw_out, outstats_file, debug_chr=None, use_tqdm=False):
# regions may overlap but as we go in sorted order, at a given position,
# we will pick the value from the interval whose summit is closest to
# current position
chr_to_idx = {}
for i,x in enumerate(gs):
chr_to_idx[x[0]] = i
bw = pyBigWig.open(bw_out, 'w')
bw.addHeader(gs)
# regions may not be sorted, so get their sorted order
order_of_regs = sorted(range(len(regions)), key=lambda x:(chr_to_idx[regions[x][0]], regions[x][1]))
all_entries = []
cur_chr = ""
cur_end = 0
iterator = range(len(order_of_regs))
if use_tqdm:
from tqdm import tqdm
iterator = tqdm(iterator)
for itr in iterator:
# subset to chromosome (debugging)
if debug_chr and regions[i][0]!=debug_chr:
continue
i = order_of_regs[itr]
i_chr, i_start, i_end, i_mid = regions[i]
if i_chr != cur_chr:
cur_chr = i_chr
cur_end = 0
# bring current end to at least start of current region
if cur_end < i_start:
cur_end = i_start
assert(regions[i][2]>=cur_end)
# figure out where to stop for this region, get next region
# which may partially overlap with this one
next_end = i_end
if itr+1 != len(order_of_regs):
n = order_of_regs[itr+1]
next_chr, next_start, _, next_mid = regions[n]
if next_chr == i_chr and next_start < i_end:
# if next region overlaps with this, end between their midpoints
next_end = (i_mid+next_mid)//2
vals = data[i][cur_end - i_start:next_end - i_start]
bw.addEntries([i_chr]*(next_end-cur_end),
list(range(cur_end,next_end)),
ends = list(range(cur_end+1, next_end+1)),
values=[float(x) for x in vals])
all_entries.append(vals)
cur_end = next_end
bw.close()
all_entries = np.hstack(all_entries)
with open(outstats_file, 'w') as f:
f.write("Min\t{:.6f}\n".format(np.min(all_entries)))
f.write(".1%\t{:.6f}\n".format(np.quantile(all_entries, 0.001)))
f.write("1%\t{:.6f}\n".format(np.quantile(all_entries, 0.01)))
f.write("50%\t{:.6f}\n".format(np.quantile(all_entries, 0.5)))
f.write("99%\t{:.6f}\n".format(np.quantile(all_entries, 0.99)))
f.write("99.9%\t{:.6f}\n".format(np.quantile(all_entries, 0.999)))
f.write("99.95%\t{:.6f}\n".format(np.quantile(all_entries, 0.9995)))
f.write("99.99%\t{:.6f}\n".format(np.quantile(all_entries, 0.9999)))
f.write("Max\t{:.6f}\n".format(np.max(all_entries)))
| 33.776699 | 122 | 0.585513 |
9308fc317f6144814880379b3898d58bf8a2f9a4 | 3,747 | py | Python | brainscore/benchmarks/__init__.py | pmcgrath249/brain-score | 6e7d7288c2df44c1563499a7e4d81a2d509255e4 | [
"MIT"
] | 1 | 2020-12-17T14:51:01.000Z | 2020-12-17T14:51:01.000Z | brainscore/benchmarks/__init__.py | pmcgrath249/brain-score | 6e7d7288c2df44c1563499a7e4d81a2d509255e4 | [
"MIT"
] | 2 | 2021-07-30T11:30:17.000Z | 2021-11-15T09:29:10.000Z | brainscore/benchmarks/__init__.py | pmcgrath249/brain-score | 6e7d7288c2df44c1563499a7e4d81a2d509255e4 | [
"MIT"
] | 2 | 2020-02-11T12:50:48.000Z | 2021-06-22T09:51:08.000Z | from abc import ABC
from result_caching import cache, store
from brainscore.metrics import Score
from brainscore.utils import LazyLoad
class Benchmark(ABC):
def __call__(self, candidate):
raise NotImplementedError()
@property
def identifier(self):
raise NotImplementedError()
@property
def version(self):
raise NotImplementedError()
@property
def ceiling(self):
raise NotImplementedError()
class BenchmarkBase(Benchmark):
def __init__(self, identifier, ceiling_func, version, parent=None, paper_link=None):
self._identifier = identifier
self._ceiling_func = ceiling_func
self._version = version
self.parent = parent
self.paper_link = paper_link
@property
def identifier(self):
return self._identifier
@property
def version(self):
return self._version
@property
def ceiling(self):
return self._ceiling(identifier=self.identifier)
@store()
def _ceiling(self, identifier):
return self._ceiling_func()
def ceil_score(score, ceiling):
ceiled_center = score.sel(aggregation='center').values / ceiling.sel(aggregation='center').values
ceiled_score = type(score)([ceiled_center, score.sel(aggregation='error').values],
coords=score.coords, dims=score.dims)
ceiled_score.attrs[Score.RAW_VALUES_KEY] = score
ceiled_score.attrs['ceiling'] = ceiling
return ceiled_score
class BenchmarkPool(dict):
def __init__(self):
super(BenchmarkPool, self).__init__()
# local imports to avoid circular imports
# neural benchmarks
from .majaj2015 import DicarloMajaj2015V4PLS, DicarloMajaj2015ITPLS, \
DicarloMajaj2015V4Mask, DicarloMajaj2015ITMask, \
DicarloMajaj2015V4RDM, DicarloMajaj2015ITRDM
self['dicarlo.Majaj2015.V4-pls'] = LazyLoad(DicarloMajaj2015V4PLS)
self['dicarlo.Majaj2015.IT-pls'] = LazyLoad(DicarloMajaj2015ITPLS)
self['dicarlo.Majaj2015.V4-mask'] = LazyLoad(DicarloMajaj2015V4Mask)
self['dicarlo.Majaj2015.IT-mask'] = LazyLoad(DicarloMajaj2015ITMask)
self['dicarlo.Majaj2015.V4-rdm'] = LazyLoad(DicarloMajaj2015V4RDM)
self['dicarlo.Majaj2015.IT-rdm'] = LazyLoad(DicarloMajaj2015ITRDM)
from .freemanziemba2013 import MovshonFreemanZiemba2013V1PLS, MovshonFreemanZiemba2013V2PLS, \
MovshonFreemanZiemba2013V1RDM, MovshonFreemanZiemba2013V2RDM
self['movshon.FreemanZiemba2013.V1-pls'] = LazyLoad(MovshonFreemanZiemba2013V1PLS)
self['movshon.FreemanZiemba2013.V2-pls'] = LazyLoad(MovshonFreemanZiemba2013V2PLS)
self['movshon.FreemanZiemba2013.V1-rdm'] = LazyLoad(MovshonFreemanZiemba2013V1RDM)
self['movshon.FreemanZiemba2013.V2-rdm'] = LazyLoad(MovshonFreemanZiemba2013V2RDM)
from .cadena2017 import ToliasCadena2017PLS, ToliasCadena2017Mask
self['tolias.Cadena2017-pls'] = LazyLoad(ToliasCadena2017PLS)
self['tolias.Cadena2017-mask'] = LazyLoad(ToliasCadena2017Mask)
from .kar2019 import DicarloKar2019OST
self['dicarlo.Kar2019-ost'] = LazyLoad(DicarloKar2019OST)
# behavioral benchmarks
from .rajalingham2018 import DicarloRajalingham2018I2n
self['dicarlo.Rajalingham2018-i2n'] = LazyLoad(DicarloRajalingham2018I2n)
# engineering (ML) benchmarks
from .imagenet import Imagenet2012
self['fei-fei.Deng2009-top1'] = LazyLoad(Imagenet2012)
benchmark_pool = BenchmarkPool()
@cache()
def load(name):
if name not in benchmark_pool:
raise ValueError(f"Unknown benchmark '{name}' - must choose from {list(benchmark_pool.keys())}")
return benchmark_pool[name]
| 36.378641 | 104 | 0.711236 |
e8c59b856e183a55f71a54038e6f1b6909c1ce32 | 989 | py | Python | app/app/urls.py | hamidrezaorouji/recipe-app-api | 0500f4f59389fad63a13bc65800646cc980f58e6 | [
"MIT"
] | null | null | null | app/app/urls.py | hamidrezaorouji/recipe-app-api | 0500f4f59389fad63a13bc65800646cc980f58e6 | [
"MIT"
] | 10 | 2020-02-12T02:27:38.000Z | 2022-03-12T00:08:16.000Z | app/app/urls.py | hamidrezaorouji/recipe-app-api | 0500f4f59389fad63a13bc65800646cc980f58e6 | [
"MIT"
] | null | null | null | """app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('api/user/', include('user.urls')),
path('api/recipe/', include('recipe.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 36.62963 | 77 | 0.715875 |
51a2b38f89f1299e33d1eda09607f82c8ca231a9 | 7,612 | py | Python | autoPyTorch/pipeline/traditional_tabular_regression.py | franchuterivera/Auto-PyTorch | ff5c5c99e424a47b5cdb0e40ec68fbf33943e764 | [
"Apache-2.0"
] | 1 | 2021-05-12T10:11:58.000Z | 2021-05-12T10:11:58.000Z | autoPyTorch/pipeline/traditional_tabular_regression.py | franchuterivera/Auto-PyTorch | ff5c5c99e424a47b5cdb0e40ec68fbf33943e764 | [
"Apache-2.0"
] | 1 | 2021-06-23T21:48:03.000Z | 2021-06-23T21:48:03.000Z | autoPyTorch/pipeline/traditional_tabular_regression.py | franchuterivera/Auto-PyTorch | ff5c5c99e424a47b5cdb0e40ec68fbf33943e764 | [
"Apache-2.0"
] | null | null | null | import warnings
from typing import Any, Dict, List, Optional, Tuple, cast
from ConfigSpace.configuration_space import Configuration, ConfigurationSpace
import numpy as np
from sklearn.base import RegressorMixin
from autoPyTorch.pipeline.base_pipeline import BaseDatasetPropertiesType, BasePipeline, PipelineStepType
from autoPyTorch.pipeline.components.base_choice import autoPyTorchChoice
from autoPyTorch.pipeline.components.setup.traditional_ml import ModelChoice
class TraditionalTabularRegressionPipeline(RegressorMixin, BasePipeline):
"""
A pipeline that contains steps to fit traditional ML methods for tabular regression.
Args:
config (Configuration)
The configuration to evaluate.
random_state (Optional[RandomState): random_state is the random number generator
Attributes:
"""
def __init__(
self,
config: Optional[Configuration] = None,
steps: Optional[List[Tuple[str, autoPyTorchChoice]]] = None,
dataset_properties: Optional[Dict[str, Any]] = None,
include: Optional[Dict[str, Any]] = None,
exclude: Optional[Dict[str, Any]] = None,
random_state: Optional[np.random.RandomState] = None,
init_params: Optional[Dict[str, Any]] = None
):
super().__init__(
config, steps, dataset_properties, include, exclude,
random_state, init_params)
def predict(self, X: np.ndarray, batch_size: Optional[int] = None
) -> np.ndarray:
"""Predict the output using the selected model.
Args:
X (np.ndarray): input data to the array
batch_size (Optional[int]): batch_size controls whether the pipeline will be
called on small chunks of the data. Useful when calling the
predict method on the whole array X results in a MemoryError.
Returns:
np.ndarray: the predicted values given input X
"""
if batch_size is None:
return self.named_steps['model_trainer'].predict(X)
else:
if not isinstance(batch_size, int):
raise ValueError("Argument 'batch_size' must be of type int, "
"but is '%s'" % type(batch_size))
if batch_size <= 0:
raise ValueError("Argument 'batch_size' must be positive, "
"but is %d" % batch_size)
else:
# Probe for the target array dimensions
target = self.predict(X[0:2].copy())
if (target.shape) == 1:
target = target.reshape((-1, 1))
y = np.zeros((X.shape[0], target.shape[1]),
dtype=np.float32)
for k in range(max(1, int(np.ceil(float(X.shape[0]) / batch_size)))):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size, X.shape[0]])
pred_prob = self.predict(X[batch_from:batch_to], batch_size=None)
y[batch_from:batch_to] = pred_prob.astype(np.float32)
return y
def _get_hyperparameter_search_space(self,
dataset_properties: Dict[str, Any],
include: Optional[Dict[str, Any]] = None,
exclude: Optional[Dict[str, Any]] = None,
) -> ConfigurationSpace:
"""Create the hyperparameter configuration space.
For the given steps, and the Choices within that steps,
this procedure returns a configuration space object to
explore.
Args:
include (Optional[Dict[str, Any]]): what hyper-parameter configurations
to honor when creating the configuration space
exclude (Optional[Dict[str, Any]]): what hyper-parameter configurations
to remove from the configuration space
dataset_properties (Optional[Dict[str, Union[str, int]]]): Characteristics
of the dataset to guide the pipeline choices of components
Returns:
cs (Configuration): The configuration space describing the TabularRegressionPipeline.
"""
cs = ConfigurationSpace()
if not isinstance(dataset_properties, dict):
warnings.warn('The given dataset_properties argument contains an illegal value.'
'Proceeding with the default value')
dataset_properties = dict()
if 'target_type' not in dataset_properties:
dataset_properties['target_type'] = 'tabular_regression'
if dataset_properties['target_type'] != 'tabular_regression':
warnings.warn('Tabular regression is being used, however the target_type'
'is not given as "tabular_regression". Overriding it.')
dataset_properties['target_type'] = 'tabular_regression'
# get the base search space given this
# dataset properties. Then overwrite with custom
# regression requirements
cs = self._get_base_search_space(
cs=cs, dataset_properties=dataset_properties,
exclude=exclude, include=include, pipeline=self.steps)
# Here we add custom code, like this with this
# is not a valid configuration
self.configuration_space = cs
self.dataset_properties = dataset_properties
return cs
def _get_pipeline_steps(
self,
dataset_properties: Optional[Dict[str, BaseDatasetPropertiesType]]
) -> List[Tuple[str, PipelineStepType]]:
"""
Defines what steps a pipeline should follow.
The step itself has choices given via autoPyTorchChoice.
Returns:
List[Tuple[str, PipelineStepType]]: list of steps sequentially exercised
by the pipeline.
"""
steps = [] # type: List[Tuple[str, autoPyTorchChoice]]
default_dataset_properties: Dict[str, BaseDatasetPropertiesType] = {'target_type': 'tabular_regression'}
if dataset_properties is not None:
default_dataset_properties.update(dataset_properties)
steps.extend([
("model_trainer", ModelChoice(default_dataset_properties,
random_state=self.random_state)),
])
return steps
def get_pipeline_representation(self) -> Dict[str, str]:
"""
Returns a representation of the pipeline, so that it can be
consumed and formatted by the API.
It should be a representation that follows:
[{'PreProcessing': <>, 'Estimator': <>}]
Returns:
Dict: contains the pipeline representation in a short format
"""
estimator_name = 'TraditionalTabularRegression'
if self.steps[0][1].choice is not None:
if self.steps[0][1].choice.model is None:
estimator_name = self.steps[0][1].choice.__class__.__name__
else:
estimator_name = cast(
str,
self.steps[0][1].choice.model.get_properties()['shortname']
)
return {
'Preprocessing': 'None',
'Estimator': estimator_name,
}
def _get_estimator_hyperparameter_name(self) -> str:
"""
Returns the name of the current estimator.
Returns:
str: name of the pipeline type
"""
return "traditional_tabular_regressor"
| 40.489362 | 112 | 0.605623 |
7ccfa56cdf90168eda6c6a607e3dfe7fb9cc8fe2 | 5,899 | py | Python | tests/common/test_op/smooth_l1_loss_grad.py | KnowingNothing/akg-test | 114d8626b824b9a31af50a482afc07ab7121862b | [
"Apache-2.0"
] | 1 | 2020-08-31T02:43:43.000Z | 2020-08-31T02:43:43.000Z | tests/common/test_op/smooth_l1_loss_grad.py | KnowingNothing/akg-test | 114d8626b824b9a31af50a482afc07ab7121862b | [
"Apache-2.0"
] | null | null | null | tests/common/test_op/smooth_l1_loss_grad.py | KnowingNothing/akg-test | 114d8626b824b9a31af50a482afc07ab7121862b | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""operator dsl function: smooth_l1_loss_grad"""
import akg.tvm
import akg.topi
from akg import dim
from akg.dim import DIM
from akg.utils import kernel_exec as utils, custom_tiling as ct_util, \
validation_check as vc_util
from akg.utils.format_transform import get_shape
smooth_l1_loss_grad_set_dim_map = {
str(((32, 8732, 4), "float16", "int32")): ((1, 1), (236, 236), (4, 4)),
}
def smooth_l1_loss_grad_set_dim_func(_dloss, prediction, _target, anchor_samples,
_sigma, _anchor_sample_correct):
"""dim function"""
key = get_shape(prediction)
hash_key = str((tuple(key), prediction.dtype, anchor_samples.dtype))
return ct_util.set_dims_by_key(hash_key, smooth_l1_loss_grad_set_dim_map), hash_key
@vc_util.check_input_type(akg.tvm.tensor.Tensor, akg.tvm.tensor.Tensor,
akg.tvm.tensor.Tensor, akg.tvm.tensor.Tensor,
float, int)
def smooth_l1_loss_grad(dloss, prediction, target, anchor_samples,
sigma, anchor_sample_correct):
"""
do backprop for smooth L1 loss (Huber loss)
Args:
dloss (tvm.tensor.Tensor): Tensor [x,y], derivative of loss.
prediction (tvm.tensor.Tensor): Tensor [x,y,z], output of the forward pass.
target (tvm.tensor.Tensor): Tensor [x,y,z], ground truth.
anchor_samples (tvm.tensor.Tensor): Tensor [x,y], == anchor_sample_correct indicates correct classification, otherwise no meaning.
sigma (float): Constant parameter.
anchor_sample_correct (int): Constant parameter.
Returns:
dpredirection (tvm.tensor.Tensor): output tensor [x,y,z]
"""
if len(dloss.shape) != len(anchor_samples.shape):
raise RuntimeError("anchor_samples shape should equal to dloss shape!")
if len(prediction.shape) != len(target.shape):
raise RuntimeError("prediction shape should equal to target shape!")
if (len(dloss.shape) + 1) != len(prediction.shape):
raise RuntimeError("prediction shape should be dloss shape + 1!")
out_shape = get_shape(prediction)
original_dtype = dloss.dtype
vc_util.ops_dtype_check(original_dtype, vc_util.DtypeForDavinci.ALL_FLOAT)
dim_info, _ = smooth_l1_loss_grad_set_dim_func(
dloss, prediction, target, anchor_samples, sigma, anchor_sample_correct)
attrs = {DIM: dim_info}
if utils.product_is_mini():
dtype = "float16"
else:
dtype = original_dtype
# unify the data type of tensors
if dloss.dtype != dtype:
dloss = akg.topi.cast(dloss, dtype)
if prediction.dtype != dtype:
prediction = akg.topi.cast(prediction, dtype)
if target.dtype != dtype:
target = akg.topi.cast(target, dtype)
if anchor_samples.dtype != dtype:
anchor_samples = akg.topi.cast(anchor_samples, dtype)
def eltwise_compute_func(_prediction, _target, _dloss, _anchor_sample, dtype):
_diff = akg.tvm.expr.Sub(_prediction, _target)
_first_branch = akg.tvm.expr.Mul(_diff, akg.tvm.const(sigma * sigma, dtype))
_second_branch = akg.tvm.expr.Select(
akg.tvm.const(0, dtype) < _diff, akg.tvm.const(1, dtype), akg.tvm.const(-1, dtype))
_abs_diff = akg.tvm.expr.Mul(_second_branch, _diff)
_derivative = akg.tvm.expr.Select(_abs_diff <= akg.tvm.const(
1.0 / (sigma * sigma), dtype), _first_branch, _second_branch)
_mult_dloss = akg.tvm.expr.Mul(_derivative, _dloss)
_output = akg.tvm.expr.Select(
_anchor_sample == anchor_sample_correct, akg.tvm.const(0, dtype), _mult_dloss)
return _output
dprediction = akg.tvm.compute(out_shape, lambda *i: eltwise_compute_func(
prediction(*i), target(*i), dloss(*i[:-1]), anchor_samples(*i[:-1]), dtype))
if dprediction.dtype.lower() != original_dtype:
dprediction = akg.topi.cast(dprediction, original_dtype)
return dprediction, attrs
def smooth_l1_loss_grad_get_dim(shape):
"""
get dim attr for smooth L1 loss grad
Args:
shape: the shape of prediction tensor (e.g. [8, 4718, 4])
Returns:
dim string for akg.op.build(attrs=...)
"""
# example shape: [8, 4718, 4]
# cut dim: ((1,1), (1024,1024))
tensor_size = 1
for i in shape[:-1]:
tensor_size *= i
# if tensor_size >= threshold, cut
ub_size = 256 * 1024
# estimated maximum number of data copies in UB
num_data_copies = 32
data_size = 4
# do not cut the last dim
max_tensor_size = int(ub_size / data_size / num_data_copies / shape[-1])
if tensor_size > max_tensor_size:
# find the largest divisor of tensor_size to be the tile size
# currently the dim size must be divisible by tile size
tile_size = 1
for i in range(max_tensor_size, 1, -1):
if tensor_size % i == 0:
tile_size = i
break
# generate setdim string
info = dim.Dim()
# do not cut last dim
for i in range(0, len(shape) - 2):
info.setdim(index=0, axis=i, tilel1=1, tilel0=1)
# cut -2 dim
info.setdim(index=0, axis=len(shape) - 2,
tilel1=tile_size, tilel0=tile_size)
return str(info)
return ''
| 38.809211 | 138 | 0.659095 |
5945ec63798395d8fbbdf999eda58163d852d003 | 2,360 | py | Python | src/api/migrations/0002_application_sponsorship.py | kwanj-k/geoprotest_server | 4bc07c5c175f2de761895e299276422e61011146 | [
"MIT"
] | null | null | null | src/api/migrations/0002_application_sponsorship.py | kwanj-k/geoprotest_server | 4bc07c5c175f2de761895e299276422e61011146 | [
"MIT"
] | 5 | 2021-04-08T21:12:12.000Z | 2022-02-10T10:07:34.000Z | src/api/migrations/0002_application_sponsorship.py | kwanj-k/geoprotest_server | 4bc07c5c175f2de761895e299276422e61011146 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.9 on 2020-01-20 17:50
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.manager
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Sponsorship',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('deleted', models.BooleanField(default=False, help_text='This is to make sure deletes are not actual deletes')),
('active', models.BooleanField(default=True)),
('name', models.CharField(max_length=30)),
('description', models.CharField(max_length=30)),
],
options={
'ordering': ['-updated_at', '-created_at'],
'abstract': False,
},
managers=[
('everything', django.db.models.manager.Manager()),
],
),
migrations.CreateModel(
name='Application',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('deleted', models.BooleanField(default=False, help_text='This is to make sure deletes are not actual deletes')),
('active', models.BooleanField(default=True)),
('sponsorship', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='applications', to='api.Sponsorship')),
('student', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='applications', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-updated_at', '-created_at'],
'abstract': False,
},
managers=[
('everything', django.db.models.manager.Manager()),
],
),
]
| 42.909091 | 150 | 0.577542 |
049dccf07038a619ab0b9ee395ea801e31d7f49b | 3,602 | py | Python | examples/main.py | mineshmathew/WordDetector | 37469d96e5003c3579ecbee1e90f5002b6b8c7f6 | [
"MIT"
] | null | null | null | examples/main.py | mineshmathew/WordDetector | 37469d96e5003c3579ecbee1e90f5002b6b8c7f6 | [
"MIT"
] | null | null | null | examples/main.py | mineshmathew/WordDetector | 37469d96e5003c3579ecbee1e90f5002b6b8c7f6 | [
"MIT"
] | null | null | null | import argparse
from typing import List
import cv2
import matplotlib.pyplot as plt
from path import Path
from word_detector import detect, prepare_img, sort_multiline
def get_img_files(data_dir: Path) -> List[Path]:
"""Return all image files contained in a folder."""
res = []
for ext in ['*.png', '*.jpg', '*.bmp', '*.tif']:
res += Path(data_dir).files(ext)
return res
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data', type=Path, default=Path('../data/line'))
parser.add_argument('--kernel_size', type=int, default=25)
parser.add_argument('--sigma', type=float, default=11)
parser.add_argument('--theta', type=float, default=7)
parser.add_argument('--min_area', type=int, default=100)
parser.add_argument('--img_height', type=int, default=50)
parsed = parser.parse_args()
for fn_img in get_img_files(parsed.data):
print(f'Processing file {fn_img}')
# load image and process it
img = cv2.imread(fn_img)
#resize page image to a new height so that text lines are of around 50 pixels in height
height, width = img.shape[:2]
average_text_height_for_language = 67.63
img_height = int (height * average_text_height_for_language / 50 )
average_aspect_ratio = 5.61
theta = average_aspect_ratio
img = prepare_img(img, img_height)
detections = detect(img,
kernel_size=parsed.kernel_size,
sigma=parsed.sigma,
theta=theta,
min_area=parsed.min_area)
# sort detections: cluster into lines, then sort each line
lines = sort_multiline(detections)
# plot results word wise
'''
plt.imshow(img, cmap='gray')
num_colors = 7
colors = plt.cm.get_cmap('rainbow', num_colors)
for line_idx, line in enumerate(lines):
for word_idx, det in enumerate(line):
#import pdb; pdb.set_trace()
xs = [det.bbox.x, det.bbox.x, det.bbox.x + det.bbox.w, det.bbox.x + det.bbox.w, det.bbox.x]
ys = [det.bbox.y, det.bbox.y + det.bbox.h, det.bbox.y + det.bbox.h, det.bbox.y, det.bbox.y]
plt.plot(xs, ys, c=colors(line_idx % num_colors))
plt.text(det.bbox.x, det.bbox.y, f'{line_idx}/{word_idx}')
plt.show()
'''
plt.imshow(img, cmap='gray')
num_colors = 2
colors = plt.cm.get_cmap('rainbow', num_colors)
for line_idx, line in enumerate(lines):
lowest_x = 100000
largest_x = -1
lowest_y = 100000
largest_y = -1
words_count = 0
for word_idx, det in enumerate (line):
words_count += 1
x1 = det.bbox.x
y1 = det.bbox.y
x2 = x1 + det.bbox.w
y2 = y1 + det.bbox.h
if x1 < lowest_x :
lowest_x = x1
if x2 > largest_x :
largest_x = x2
if y1 < lowest_y:
lowest_y = y1
if y2 > largest_y:
largest_y = y2
if words_count > 0:
xs = [lowest_x, lowest_x, largest_x, largest_x, lowest_x]
ys = [lowest_y, largest_y, largest_y, lowest_y, lowest_y]
plt.plot(xs, ys, c=colors(line_idx % num_colors))
plt.show()
if __name__ == '__main__':
main()
| 33.351852 | 107 | 0.549417 |
875cfc71b7b73fe02289dcb5ec76d1930a17f3be | 1,640 | py | Python | flunt/notifications/notifiable.py | alexiusstrauss/PyFlunt | 8ef79181c0ad053bfee7690da2ed91c1b2d6c390 | [
"MIT"
] | null | null | null | flunt/notifications/notifiable.py | alexiusstrauss/PyFlunt | 8ef79181c0ad053bfee7690da2ed91c1b2d6c390 | [
"MIT"
] | 1 | 2021-12-08T13:59:32.000Z | 2021-12-08T13:59:32.000Z | flunt/notifications/notifiable.py | alexiusstrauss/PyFlunt | 8ef79181c0ad053bfee7690da2ed91c1b2d6c390 | [
"MIT"
] | null | null | null | """Module Notifiable."""
from flunt.notifications.notification import Notification
class Notifiable(Notification):
"""Class Notifiable."""
def __init__(self) -> None:
"""Found 'Constructor'."""
self._notifications: list = []
def add_notification(self, notification: Notification):
"""Add a new notification.
:param notification: Notification
"""
self._notifications.append(notification)
def add_notifications_of_contract(self, *notifications):
"""Add notification of contract object."""
self._notifications += self._filter_and_map_notifiables(notifications)
def _filter_and_map_notifiables(self, notifications):
return [
notification
for notifiable in notifications
if isinstance(notifiable, Notifiable)
for notification in notifiable._notifications
]
def _filter_notifications(self, notifications):
return [
notification
for notification in notifications
if isinstance(notification, Notification)
]
def get_notifications(self) -> list:
"""Get all notifications.
:return: list
"""
return self._notifications
def clear(self):
"""Clear all existing notifications."""
self._notifications.clear()
def is_valid(self) -> bool:
"""Return if notifiable is valid, if not notified.
:return: bool
"""
return len(self._notifications) <= 0
def __str__(self):
"""Print object string."""
return self._notifications.__str__()
| 27.79661 | 78 | 0.629878 |
48f0eda75c0481e5d6735b64c984c6ce26e6c7a8 | 18,223 | py | Python | zappa/utilities.py | dharmeshspatel4u/Zappa | f3e0e823f6aa5bd21e590e39b96ac592c1f064d1 | [
"MIT"
] | null | null | null | zappa/utilities.py | dharmeshspatel4u/Zappa | f3e0e823f6aa5bd21e590e39b96ac592c1f064d1 | [
"MIT"
] | 2 | 2019-03-27T05:51:03.000Z | 2019-06-05T08:27:04.000Z | zappa/utilities.py | dharmeshspatel4u/Zappa | f3e0e823f6aa5bd21e590e39b96ac592c1f064d1 | [
"MIT"
] | 1 | 2019-11-20T05:52:47.000Z | 2019-11-20T05:52:47.000Z | import botocore
import calendar
import datetime
import durationpy
import fnmatch
import io
import json
import logging
import os
import re
import shutil
import stat
import sys
from past.builtins import basestring
if sys.version_info[0] < 3:
from urlparse import urlparse
else:
from urllib.parse import urlparse
LOG = logging.getLogger(__name__)
##
# Settings / Packaging
##
def copytree(src, dst, metadata=True, symlinks=False, ignore=None):
"""
This is a contributed re-implementation of 'copytree' that
should work with the exact same behavior on multiple platforms.
When `metadata` is False, file metadata such as permissions and modification
times are not copied.
"""
if not os.path.exists(dst):
os.makedirs(dst)
if metadata:
shutil.copystat(src, dst)
lst = os.listdir(src)
if ignore:
excl = ignore(src, lst)
lst = [x for x in lst if x not in excl]
for item in lst:
s = os.path.join(src, item)
d = os.path.join(dst, item)
if symlinks and os.path.islink(s): # pragma: no cover
if os.path.lexists(d):
os.remove(d)
os.symlink(os.readlink(s), d)
if metadata:
try:
st = os.lstat(s)
mode = stat.S_IMODE(st.st_mode)
os.lchmod(d, mode)
except:
pass # lchmod not available
elif os.path.isdir(s):
copytree(s, d, metadata, symlinks, ignore)
else:
shutil.copy2(s, d) if metadata else shutil.copy(s, d)
def parse_s3_url(url):
"""
Parses S3 URL.
Returns bucket (domain) and file (full path).
"""
bucket = ''
path = ''
if url:
result = urlparse(url)
bucket = result.netloc
path = result.path.strip('/')
return bucket, path
def human_size(num, suffix='B'):
"""
Convert bytes length to a human-readable version
"""
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "{0:3.1f}{1!s}{2!s}".format(num, unit, suffix)
num /= 1024.0
return "{0:.1f}{1!s}{2!s}".format(num, 'Yi', suffix)
def string_to_timestamp(timestring):
"""
Accepts a str, returns an int timestamp.
"""
ts = None
# Uses an extended version of Go's duration string.
try:
delta = durationpy.from_str(timestring);
past = datetime.datetime.utcnow() - delta
ts = calendar.timegm(past.timetuple())
return ts
except Exception as e:
pass
if ts:
return ts
# else:
# print("Unable to parse timestring.")
return 0
##
# `init` related
##
def detect_django_settings():
"""
Automatically try to discover Django settings files,
return them as relative module paths.
"""
matches = []
for root, dirnames, filenames in os.walk(os.getcwd()):
for filename in fnmatch.filter(filenames, '*settings.py'):
full = os.path.join(root, filename)
if 'site-packages' in full:
continue
full = os.path.join(root, filename)
package_path = full.replace(os.getcwd(), '')
package_module = package_path.replace(os.sep, '.').split('.', 1)[1].replace('.py', '')
matches.append(package_module)
return matches
def detect_flask_apps():
"""
Automatically try to discover Flask apps files,
return them as relative module paths.
"""
matches = []
for root, dirnames, filenames in os.walk(os.getcwd()):
for filename in fnmatch.filter(filenames, '*.py'):
full = os.path.join(root, filename)
if 'site-packages' in full:
continue
full = os.path.join(root, filename)
with io.open(full, 'r', encoding='utf-8') as f:
lines = f.readlines()
for line in lines:
app = None
# Kind of janky..
if '= Flask(' in line:
app = line.split('= Flask(')[0].strip()
if '=Flask(' in line:
app = line.split('=Flask(')[0].strip()
if not app:
continue
package_path = full.replace(os.getcwd(), '')
package_module = package_path.replace(os.sep, '.').split('.', 1)[1].replace('.py', '')
app_module = package_module + '.' + app
matches.append(app_module)
return matches
def get_venv_from_python_version():
return 'python' + str(sys.version_info[0]) + '.' + str(sys.version_info[1])
def get_runtime_from_python_version():
"""
"""
if sys.version_info[0] < 3:
return 'python2.7'
else:
return 'python3.6'
##
# Async Tasks
##
def get_topic_name(lambda_name):
""" Topic name generation """
return '%s-zappa-async' % lambda_name
##
# Event sources / Kappa
##
def get_event_source(event_source, lambda_arn, target_function, boto_session, dry=False):
"""
Given an event_source dictionary item, a session and a lambda_arn,
hack into Kappa's Gibson, create out an object we can call
to schedule this event, and return the event source.
"""
import kappa.function
import kappa.restapi
import kappa.event_source.base
import kappa.event_source.dynamodb_stream
import kappa.event_source.kinesis
import kappa.event_source.s3
import kappa.event_source.sns
import kappa.event_source.cloudwatch
import kappa.policy
import kappa.role
import kappa.awsclient
class PseudoContext(object):
def __init__(self):
return
class PseudoFunction(object):
def __init__(self):
return
# Mostly adapted from kappa - will probably be replaced by kappa support
class SqsEventSource(kappa.event_source.base.EventSource):
def __init__(self, context, config):
super(SqsEventSource, self).__init__(context, config)
self._lambda = kappa.awsclient.create_client(
'lambda', context.session)
def _get_uuid(self, function):
uuid = None
response = self._lambda.call(
'list_event_source_mappings',
FunctionName=function.name,
EventSourceArn=self.arn)
LOG.debug(response)
if len(response['EventSourceMappings']) > 0:
uuid = response['EventSourceMappings'][0]['UUID']
return uuid
def add(self, function):
try:
response = self._lambda.call(
'create_event_source_mapping',
FunctionName=function.name,
EventSourceArn=self.arn,
BatchSize=self.batch_size,
Enabled=self.enabled
)
LOG.debug(response)
except Exception:
LOG.exception('Unable to add event source')
def enable(self, function):
self._config['enabled'] = True
try:
response = self._lambda.call(
'update_event_source_mapping',
UUID=self._get_uuid(function),
Enabled=self.enabled
)
LOG.debug(response)
except Exception:
LOG.exception('Unable to enable event source')
def disable(self, function):
self._config['enabled'] = False
try:
response = self._lambda.call(
'update_event_source_mapping',
FunctionName=function.name,
Enabled=self.enabled
)
LOG.debug(response)
except Exception:
LOG.exception('Unable to disable event source')
def update(self, function):
response = None
uuid = self._get_uuid(function)
if uuid:
try:
response = self._lambda.call(
'update_event_source_mapping',
BatchSize=self.batch_size,
Enabled=self.enabled,
FunctionName=function.arn)
LOG.debug(response)
except Exception:
LOG.exception('Unable to update event source')
def remove(self, function):
response = None
uuid = self._get_uuid(function)
if uuid:
response = self._lambda.call(
'delete_event_source_mapping',
UUID=uuid)
LOG.debug(response)
return response
def status(self, function):
response = None
LOG.debug('getting status for event source %s', self.arn)
uuid = self._get_uuid(function)
if uuid:
try:
response = self._lambda.call(
'get_event_source_mapping',
UUID=self._get_uuid(function))
LOG.debug(response)
except botocore.exceptions.ClientError:
LOG.debug('event source %s does not exist', self.arn)
response = None
else:
LOG.debug('No UUID for event source %s', self.arn)
return response
class ExtendedSnsEventSource(kappa.event_source.sns.SNSEventSource):
@property
def filters(self):
return self._config.get('filters')
def add_filters(self, function):
try:
subscription = self.exists(function)
if subscription:
response = self._sns.call(
'set_subscription_attributes',
SubscriptionArn=subscription['SubscriptionArn'],
AttributeName='FilterPolicy',
AttributeValue=json.dumps(self.filters)
)
kappa.event_source.sns.LOG.debug(response)
except Exception:
kappa.event_source.sns.LOG.exception('Unable to add filters for SNS topic %s', self.arn)
def add(self, function):
super(ExtendedSnsEventSource, self).add(function)
if self.filters:
self.add_filters(function)
event_source_map = {
'dynamodb': kappa.event_source.dynamodb_stream.DynamoDBStreamEventSource,
'kinesis': kappa.event_source.kinesis.KinesisEventSource,
's3': kappa.event_source.s3.S3EventSource,
'sns': ExtendedSnsEventSource,
'sqs': SqsEventSource,
'events': kappa.event_source.cloudwatch.CloudWatchEventSource
}
arn = event_source['arn']
_, _, svc, _ = arn.split(':', 3)
event_source_func = event_source_map.get(svc, None)
if not event_source_func:
raise ValueError('Unknown event source: {0}'.format(arn))
def autoreturn(self, function_name):
return function_name
event_source_func._make_notification_id = autoreturn
ctx = PseudoContext()
ctx.session = boto_session
funk = PseudoFunction()
funk.name = lambda_arn
# Kappa 0.6.0 requires this nasty hacking,
# hopefully we can remove at least some of this soon.
# Kappa 0.7.0 introduces a whole host over other changes we don't
# really want, so we're stuck here for a little while.
# Related: https://github.com/Miserlou/Zappa/issues/684
# https://github.com/Miserlou/Zappa/issues/688
# https://github.com/Miserlou/Zappa/commit/3216f7e5149e76921ecdf9451167846b95616313
if svc == 's3':
split_arn = lambda_arn.split(':')
arn_front = ':'.join(split_arn[:-1])
arn_back = split_arn[-1]
ctx.environment = arn_back
funk.arn = arn_front
funk.name = ':'.join([arn_back, target_function])
else:
funk.arn = lambda_arn
funk._context = ctx
event_source_obj = event_source_func(ctx, event_source)
return event_source_obj, ctx, funk
def add_event_source(event_source, lambda_arn, target_function, boto_session, dry=False):
"""
Given an event_source dictionary, create the object and add the event source.
"""
event_source_obj, ctx, funk = get_event_source(event_source, lambda_arn, target_function, boto_session, dry=False)
# TODO: Detect changes in config and refine exists algorithm
if not dry:
if not event_source_obj.status(funk):
event_source_obj.add(funk)
if event_source_obj.status(funk):
return 'successful'
else:
return 'failed'
else:
return 'exists'
return 'dryrun'
def remove_event_source(event_source, lambda_arn, target_function, boto_session, dry=False):
"""
Given an event_source dictionary, create the object and remove the event source.
"""
event_source_obj, ctx, funk = get_event_source(event_source, lambda_arn, target_function, boto_session, dry=False)
# This is slightly dirty, but necessary for using Kappa this way.
funk.arn = lambda_arn
if not dry:
rule_response = event_source_obj.remove(funk)
return rule_response
else:
return event_source_obj
def get_event_source_status(event_source, lambda_arn, target_function, boto_session, dry=False):
"""
Given an event_source dictionary, create the object and get the event source status.
"""
event_source_obj, ctx, funk = get_event_source(event_source, lambda_arn, target_function, boto_session, dry=False)
return event_source_obj.status(funk)
##
# Analytics / Surveillance / Nagging
##
def check_new_version_available(this_version):
"""
Checks if a newer version of Zappa is available.
Returns True is updateable, else False.
"""
import requests
pypi_url = 'https://pypi.python.org/pypi/Zappa/json'
resp = requests.get(pypi_url, timeout=1.5)
top_version = resp.json()['info']['version']
if this_version != top_version:
return True
else:
return False
class InvalidAwsLambdaName(Exception):
"""Exception: proposed AWS Lambda name is invalid"""
pass
def validate_name(name, maxlen=80):
"""Validate name for AWS Lambda function.
name: actual name (without `arn:aws:lambda:...:` prefix and without
`:$LATEST`, alias or version suffix.
maxlen: max allowed length for name without prefix and suffix.
The value 80 was calculated from prefix with longest known region name
and assuming that no alias or version would be longer than `$LATEST`.
Based on AWS Lambda spec
http://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html
Return: the name
Raise: InvalidAwsLambdaName, if the name is invalid.
"""
if not isinstance(name, basestring):
msg = "Name must be of type string"
raise InvalidAwsLambdaName(msg)
if len(name) > maxlen:
msg = "Name is longer than {maxlen} characters."
raise InvalidAwsLambdaName(msg.format(maxlen=maxlen))
if len(name) == 0:
msg = "Name must not be empty string."
raise InvalidAwsLambdaName(msg)
if not re.match("^[a-zA-Z0-9-_]+$", name):
msg = "Name can only contain characters from a-z, A-Z, 0-9, _ and -"
raise InvalidAwsLambdaName(msg)
return name
def contains_python_files_or_subdirs(folder):
"""
Checks (recursively) if the directory contains .py or .pyc files
"""
for root, dirs, files in os.walk(folder):
if [filename for filename in files if filename.endswith('.py') or filename.endswith('.pyc')]:
return True
for d in dirs:
for _, subdirs, subfiles in os.walk(d):
if [filename for filename in subfiles if filename.endswith('.py') or filename.endswith('.pyc')]:
return True
return False
def conflicts_with_a_neighbouring_module(directory_path):
"""
Checks if a directory lies in the same directory as a .py file with the same name.
"""
parent_dir_path, current_dir_name = os.path.split(os.path.normpath(directory_path))
neighbours = os.listdir(parent_dir_path)
conflicting_neighbour_filename = current_dir_name+'.py'
return conflicting_neighbour_filename in neighbours
# https://github.com/Miserlou/Zappa/issues/1188
def titlecase_keys(d):
"""
Takes a dict with keys of type str and returns a new dict with all keys titlecased.
"""
return {k.title(): v for k, v in d.items()}
# https://github.com/Miserlou/Zappa/issues/1688
def is_valid_bucket_name(name):
"""
Checks if an S3 bucket name is valid according to https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html#bucketnamingrules
"""
# Bucket names must be at least 3 and no more than 63 characters long.
if (len(name) < 3 or len(name) > 63):
return False
# Bucket names must not contain uppercase characters or underscores.
if (any(x.isupper() for x in name)):
return False
if "_" in name:
return False
# Bucket names must start with a lowercase letter or number.
if not (name[0].islower() or name[0].isdigit()):
return False
# Bucket names must be a series of one or more labels. Adjacent labels are separated by a single period (.).
for label in name.split("."):
# Each label must start and end with a lowercase letter or a number.
if len(label) < 1:
return False
if not (label[0].islower() or label[0].isdigit()):
return False
if not (label[-1].islower() or label[-1].isdigit()):
return False
# Bucket names must not be formatted as an IP address (for example, 192.168.5.4).
looks_like_IP = True
for label in name.split("."):
if not label.isdigit():
looks_like_IP = False
break
if looks_like_IP:
return False
return True
| 32.253097 | 143 | 0.598913 |
569d686ac9714004429c2077788f864bcdb8d7ed | 2,982 | py | Python | challs/ctf_sync.py | iver-ics/iver-ctf-2021 | 5db451ff5d587b1df1ec213a297b273e36237fc9 | [
"MIT"
] | null | null | null | challs/ctf_sync.py | iver-ics/iver-ctf-2021 | 5db451ff5d587b1df1ec213a297b273e36237fc9 | [
"MIT"
] | null | null | null | challs/ctf_sync.py | iver-ics/iver-ctf-2021 | 5db451ff5d587b1df1ec213a297b273e36237fc9 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
"""CTF challenges synchronizer
This script publishes or updates the challenges to CTFd, based on the
configurations from `challenge.yml` in each challenge's directory, together
with any pre-built files that should be download'able.
This script is written and maintained by Iver.
"""
import argparse
import json
import os
import re
import threading
import yaml
# Initialize ctfcli with the CTFD_TOKEN and CTFD_URL.
# export CTFD_URL=http://localhost:8000
# export CTFD_TOKEN=xxxx
dry_run = False
def init():
CTFD_TOKEN = os.getenv("CTFD_TOKEN", default=None)
CTFD_URL = os.getenv("CTFD_URL", default=None)
if not CTFD_TOKEN or not CTFD_URL:
print("Missing environment variables:")
print(" export CTFD_URL=http://localhost:8000")
print(" export CTFD_TOKEN=xxxx")
exit(1)
if not os.path.exists(".ctf"):
os_run(f"echo '{CTFD_URL}\n{CTFD_TOKEN}\ny' | ctf init")
def get_challenges():
return [f"{name}" for name in os.listdir(f".") if name[0] != '.']
def sync(challenge):
print()
print(f"## Syncing {challenge}")
if os.path.exists(f"{challenge}/challenge.yml"):
os_run(
f"ctf challenge sync '{challenge}'; ctf challenge install '{challenge}'")
else:
print(f"Skipping {challenge}: missing file {challenge}/challenge.yml")
def os_run(cmd):
if dry_run:
print(f"dry-run: $ {cmd}")
else:
print(f"$ {cmd}")
exitcode = os.system(cmd)
if exitcode != 0:
raise RuntimeError(f"Non-zero exit code: {exitcode}: {cmd}")
def sync_challanges(challanges, parallel):
if parallel:
jobs = []
for challenge in challenges:
jobs.append(threading.Thread(target=sync, args=(challenge, )))
for job in jobs:
job.start()
for job in jobs:
job.join()
else:
for challenge in challenges:
sync(challenge)
if __name__ == "__main__":
challenges = get_challenges()
parser = argparse.ArgumentParser()
parser.add_argument("--dry-run", action="store_true", help="only validate")
parser.add_argument("--parallel", action="store_true",
help="run all challenges at the same time")
parser.add_argument("chall", nargs="+", choices=[
"all", *challenges], help="challenge names to run. specify 'all' for all")
args = parser.parse_args()
dry_run = args.dry_run
all_challenges = "all" in args.chall
if all_challenges and len(args.chall) > 1:
print("Cannot specify 'all' and specific challanges")
exit(1)
if not all_challenges:
challenges = [x for x in challenges if x in args.chall]
print(f"{challenges=}")
if args.dry_run:
print("Running a dry-run")
if not os.path.exists(f".ctf/config"):
init()
sync_challanges(challenges, args.parallel)
print()
print("Synchronized successfully!")
| 26.864865 | 98 | 0.636486 |
1462f4215d74a529b68a4acef0c6c5ddadbc5286 | 19,927 | py | Python | python/services/bigquery/routine.py | trodge/declarative-resource-client-library | 2cb7718a5074776b3113cc18a7483b54022238f3 | [
"Apache-2.0"
] | null | null | null | python/services/bigquery/routine.py | trodge/declarative-resource-client-library | 2cb7718a5074776b3113cc18a7483b54022238f3 | [
"Apache-2.0"
] | null | null | null | python/services/bigquery/routine.py | trodge/declarative-resource-client-library | 2cb7718a5074776b3113cc18a7483b54022238f3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.bigquery import routine_pb2
from google3.cloud.graphite.mmv2.services.google.bigquery import routine_pb2_grpc
from typing import List
class Routine(object):
def __init__(
self,
etag: str = None,
name: str = None,
project: str = None,
dataset: str = None,
routine_type: str = None,
creation_time: int = None,
last_modified_time: int = None,
language: str = None,
arguments: list = None,
return_type: dict = None,
imported_libraries: list = None,
definition_body: str = None,
description: str = None,
determinism_level: str = None,
strict_mode: bool = None,
service_account_file: str = "",
):
channel.initialize()
self.name = name
self.project = project
self.dataset = dataset
self.routine_type = routine_type
self.language = language
self.arguments = arguments
self.return_type = return_type
self.imported_libraries = imported_libraries
self.definition_body = definition_body
self.description = description
self.determinism_level = determinism_level
self.strict_mode = strict_mode
self.service_account_file = service_account_file
def apply(self):
stub = routine_pb2_grpc.BigqueryRoutineServiceStub(channel.Channel())
request = routine_pb2.ApplyBigqueryRoutineRequest()
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.dataset):
request.resource.dataset = Primitive.to_proto(self.dataset)
if RoutineRoutineTypeEnum.to_proto(self.routine_type):
request.resource.routine_type = RoutineRoutineTypeEnum.to_proto(
self.routine_type
)
if RoutineLanguageEnum.to_proto(self.language):
request.resource.language = RoutineLanguageEnum.to_proto(self.language)
if RoutineArgumentsArray.to_proto(self.arguments):
request.resource.arguments.extend(
RoutineArgumentsArray.to_proto(self.arguments)
)
if RoutineArgumentsDataType.to_proto(self.return_type):
request.resource.return_type.CopyFrom(
RoutineArgumentsDataType.to_proto(self.return_type)
)
else:
request.resource.ClearField("return_type")
if Primitive.to_proto(self.imported_libraries):
request.resource.imported_libraries.extend(
Primitive.to_proto(self.imported_libraries)
)
if Primitive.to_proto(self.definition_body):
request.resource.definition_body = Primitive.to_proto(self.definition_body)
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if RoutineDeterminismLevelEnum.to_proto(self.determinism_level):
request.resource.determinism_level = RoutineDeterminismLevelEnum.to_proto(
self.determinism_level
)
if Primitive.to_proto(self.strict_mode):
request.resource.strict_mode = Primitive.to_proto(self.strict_mode)
request.service_account_file = self.service_account_file
response = stub.ApplyBigqueryRoutine(request)
self.etag = Primitive.from_proto(response.etag)
self.name = Primitive.from_proto(response.name)
self.project = Primitive.from_proto(response.project)
self.dataset = Primitive.from_proto(response.dataset)
self.routine_type = RoutineRoutineTypeEnum.from_proto(response.routine_type)
self.creation_time = Primitive.from_proto(response.creation_time)
self.last_modified_time = Primitive.from_proto(response.last_modified_time)
self.language = RoutineLanguageEnum.from_proto(response.language)
self.arguments = RoutineArgumentsArray.from_proto(response.arguments)
self.return_type = RoutineArgumentsDataType.from_proto(response.return_type)
self.imported_libraries = Primitive.from_proto(response.imported_libraries)
self.definition_body = Primitive.from_proto(response.definition_body)
self.description = Primitive.from_proto(response.description)
self.determinism_level = RoutineDeterminismLevelEnum.from_proto(
response.determinism_level
)
self.strict_mode = Primitive.from_proto(response.strict_mode)
def delete(self):
stub = routine_pb2_grpc.BigqueryRoutineServiceStub(channel.Channel())
request = routine_pb2.DeleteBigqueryRoutineRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.dataset):
request.resource.dataset = Primitive.to_proto(self.dataset)
if RoutineRoutineTypeEnum.to_proto(self.routine_type):
request.resource.routine_type = RoutineRoutineTypeEnum.to_proto(
self.routine_type
)
if RoutineLanguageEnum.to_proto(self.language):
request.resource.language = RoutineLanguageEnum.to_proto(self.language)
if RoutineArgumentsArray.to_proto(self.arguments):
request.resource.arguments.extend(
RoutineArgumentsArray.to_proto(self.arguments)
)
if RoutineArgumentsDataType.to_proto(self.return_type):
request.resource.return_type.CopyFrom(
RoutineArgumentsDataType.to_proto(self.return_type)
)
else:
request.resource.ClearField("return_type")
if Primitive.to_proto(self.imported_libraries):
request.resource.imported_libraries.extend(
Primitive.to_proto(self.imported_libraries)
)
if Primitive.to_proto(self.definition_body):
request.resource.definition_body = Primitive.to_proto(self.definition_body)
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if RoutineDeterminismLevelEnum.to_proto(self.determinism_level):
request.resource.determinism_level = RoutineDeterminismLevelEnum.to_proto(
self.determinism_level
)
if Primitive.to_proto(self.strict_mode):
request.resource.strict_mode = Primitive.to_proto(self.strict_mode)
response = stub.DeleteBigqueryRoutine(request)
@classmethod
def list(self, project, dataset, service_account_file=""):
stub = routine_pb2_grpc.BigqueryRoutineServiceStub(channel.Channel())
request = routine_pb2.ListBigqueryRoutineRequest()
request.service_account_file = service_account_file
request.Project = project
request.Dataset = dataset
return stub.ListBigqueryRoutine(request).items
def to_proto(self):
resource = routine_pb2.BigqueryRoutine()
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.dataset):
resource.dataset = Primitive.to_proto(self.dataset)
if RoutineRoutineTypeEnum.to_proto(self.routine_type):
resource.routine_type = RoutineRoutineTypeEnum.to_proto(self.routine_type)
if RoutineLanguageEnum.to_proto(self.language):
resource.language = RoutineLanguageEnum.to_proto(self.language)
if RoutineArgumentsArray.to_proto(self.arguments):
resource.arguments.extend(RoutineArgumentsArray.to_proto(self.arguments))
if RoutineArgumentsDataType.to_proto(self.return_type):
resource.return_type.CopyFrom(
RoutineArgumentsDataType.to_proto(self.return_type)
)
else:
resource.ClearField("return_type")
if Primitive.to_proto(self.imported_libraries):
resource.imported_libraries.extend(
Primitive.to_proto(self.imported_libraries)
)
if Primitive.to_proto(self.definition_body):
resource.definition_body = Primitive.to_proto(self.definition_body)
if Primitive.to_proto(self.description):
resource.description = Primitive.to_proto(self.description)
if RoutineDeterminismLevelEnum.to_proto(self.determinism_level):
resource.determinism_level = RoutineDeterminismLevelEnum.to_proto(
self.determinism_level
)
if Primitive.to_proto(self.strict_mode):
resource.strict_mode = Primitive.to_proto(self.strict_mode)
return resource
class RoutineArguments(object):
def __init__(
self,
name: str = None,
argument_kind: str = None,
mode: str = None,
data_type: dict = None,
):
self.name = name
self.argument_kind = argument_kind
self.mode = mode
self.data_type = data_type
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = routine_pb2.BigqueryRoutineArguments()
if Primitive.to_proto(resource.name):
res.name = Primitive.to_proto(resource.name)
if RoutineArgumentsArgumentKindEnum.to_proto(resource.argument_kind):
res.argument_kind = RoutineArgumentsArgumentKindEnum.to_proto(
resource.argument_kind
)
if RoutineArgumentsModeEnum.to_proto(resource.mode):
res.mode = RoutineArgumentsModeEnum.to_proto(resource.mode)
if RoutineArgumentsDataType.to_proto(resource.data_type):
res.data_type.CopyFrom(
RoutineArgumentsDataType.to_proto(resource.data_type)
)
else:
res.ClearField("data_type")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return RoutineArguments(
name=Primitive.from_proto(resource.name),
argument_kind=RoutineArgumentsArgumentKindEnum.from_proto(
resource.argument_kind
),
mode=RoutineArgumentsModeEnum.from_proto(resource.mode),
data_type=RoutineArgumentsDataType.from_proto(resource.data_type),
)
class RoutineArgumentsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [RoutineArguments.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [RoutineArguments.from_proto(i) for i in resources]
class RoutineArgumentsDataType(object):
def __init__(
self,
type_kind: str = None,
array_element_type: dict = None,
struct_type: dict = None,
):
self.type_kind = type_kind
self.array_element_type = array_element_type
self.struct_type = struct_type
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = routine_pb2.BigqueryRoutineArgumentsDataType()
if RoutineArgumentsDataTypeTypeKindEnum.to_proto(resource.type_kind):
res.type_kind = RoutineArgumentsDataTypeTypeKindEnum.to_proto(
resource.type_kind
)
if RoutineArgumentsDataType.to_proto(resource.array_element_type):
res.array_element_type.CopyFrom(
RoutineArgumentsDataType.to_proto(resource.array_element_type)
)
else:
res.ClearField("array_element_type")
if RoutineArgumentsDataTypeStructType.to_proto(resource.struct_type):
res.struct_type.CopyFrom(
RoutineArgumentsDataTypeStructType.to_proto(resource.struct_type)
)
else:
res.ClearField("struct_type")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return RoutineArgumentsDataType(
type_kind=RoutineArgumentsDataTypeTypeKindEnum.from_proto(
resource.type_kind
),
array_element_type=RoutineArgumentsDataType.from_proto(
resource.array_element_type
),
struct_type=RoutineArgumentsDataTypeStructType.from_proto(
resource.struct_type
),
)
class RoutineArgumentsDataTypeArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [RoutineArgumentsDataType.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [RoutineArgumentsDataType.from_proto(i) for i in resources]
class RoutineArgumentsDataTypeStructType(object):
def __init__(self, fields: list = None):
self.fields = fields
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = routine_pb2.BigqueryRoutineArgumentsDataTypeStructType()
if RoutineArgumentsDataTypeStructTypeFieldsArray.to_proto(resource.fields):
res.fields.extend(
RoutineArgumentsDataTypeStructTypeFieldsArray.to_proto(resource.fields)
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return RoutineArgumentsDataTypeStructType(
fields=RoutineArgumentsDataTypeStructTypeFieldsArray.from_proto(
resource.fields
),
)
class RoutineArgumentsDataTypeStructTypeArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [RoutineArgumentsDataTypeStructType.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [RoutineArgumentsDataTypeStructType.from_proto(i) for i in resources]
class RoutineArgumentsDataTypeStructTypeFields(object):
def __init__(self, name: str = None, type: dict = None):
self.name = name
self.type = type
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = routine_pb2.BigqueryRoutineArgumentsDataTypeStructTypeFields()
if Primitive.to_proto(resource.name):
res.name = Primitive.to_proto(resource.name)
if RoutineArgumentsDataType.to_proto(resource.type):
res.type.CopyFrom(RoutineArgumentsDataType.to_proto(resource.type))
else:
res.ClearField("type")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return RoutineArgumentsDataTypeStructTypeFields(
name=Primitive.from_proto(resource.name),
type=RoutineArgumentsDataType.from_proto(resource.type),
)
class RoutineArgumentsDataTypeStructTypeFieldsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [RoutineArgumentsDataTypeStructTypeFields.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [
RoutineArgumentsDataTypeStructTypeFields.from_proto(i) for i in resources
]
class RoutineRoutineTypeEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return routine_pb2.BigqueryRoutineRoutineTypeEnum.Value(
"BigqueryRoutineRoutineTypeEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return routine_pb2.BigqueryRoutineRoutineTypeEnum.Name(resource)[
len("BigqueryRoutineRoutineTypeEnum") :
]
class RoutineLanguageEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return routine_pb2.BigqueryRoutineLanguageEnum.Value(
"BigqueryRoutineLanguageEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return routine_pb2.BigqueryRoutineLanguageEnum.Name(resource)[
len("BigqueryRoutineLanguageEnum") :
]
class RoutineArgumentsArgumentKindEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return routine_pb2.BigqueryRoutineArgumentsArgumentKindEnum.Value(
"BigqueryRoutineArgumentsArgumentKindEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return routine_pb2.BigqueryRoutineArgumentsArgumentKindEnum.Name(resource)[
len("BigqueryRoutineArgumentsArgumentKindEnum") :
]
class RoutineArgumentsModeEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return routine_pb2.BigqueryRoutineArgumentsModeEnum.Value(
"BigqueryRoutineArgumentsModeEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return routine_pb2.BigqueryRoutineArgumentsModeEnum.Name(resource)[
len("BigqueryRoutineArgumentsModeEnum") :
]
class RoutineArgumentsDataTypeTypeKindEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return routine_pb2.BigqueryRoutineArgumentsDataTypeTypeKindEnum.Value(
"BigqueryRoutineArgumentsDataTypeTypeKindEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return routine_pb2.BigqueryRoutineArgumentsDataTypeTypeKindEnum.Name(resource)[
len("BigqueryRoutineArgumentsDataTypeTypeKindEnum") :
]
class RoutineDeterminismLevelEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return routine_pb2.BigqueryRoutineDeterminismLevelEnum.Value(
"BigqueryRoutineDeterminismLevelEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return routine_pb2.BigqueryRoutineDeterminismLevelEnum.Name(resource)[
len("BigqueryRoutineDeterminismLevelEnum") :
]
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
| 35.969314 | 88 | 0.673107 |
e2f86c09794f9a75036150e2e33cbb6bc6bc911d | 23,538 | py | Python | src/azure-cli/azure/cli/command_modules/appconfig/_help.py | dlijah/azure-cli | 06c71648030d11fe2a46121519bced5712be99be | [
"MIT"
] | 1 | 2020-10-06T18:34:24.000Z | 2020-10-06T18:34:24.000Z | src/azure-cli/azure/cli/command_modules/appconfig/_help.py | dlijah/azure-cli | 06c71648030d11fe2a46121519bced5712be99be | [
"MIT"
] | 1 | 2020-08-04T14:17:13.000Z | 2020-08-04T14:17:13.000Z | src/azure-cli/azure/cli/command_modules/appconfig/_help.py | hackathon-cli-recommendation/azure-cli | b9df3c9cfd400627912e5751bb6dcd429670b2c7 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.help_files import helps # pylint: disable=unused-import
# pylint: disable=line-too-long, too-many-lines
helps['appconfig'] = """
type: group
short-summary: Manage App Configurations.
"""
helps['appconfig create'] = """
type: command
short-summary: Create an App Configuration.
examples:
- name: Create an App Configuration with name, location, sku and resource group.
text: az appconfig create -g MyResourceGroup -n MyAppConfiguration -l westus --sku Standard
- name: Create an App Configuration with name, location, sku and resource group with system assigned identity.
text: az appconfig create -g MyResourceGroup -n MyAppConfiguration -l westus --sku Standard --assign-identity
- name: Create an App Configuration with name, location, sku and resource group with user assigned identity.
text: az appconfig create -g MyResourceGroup -n MyAppConfiguration -l westus --sku Standard --assign-identity /subscriptions/<SUBSCRIPTON ID>/resourcegroups/<RESOURCEGROUP>/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myUserAssignedIdentity
- name: Create an App Configuration with name, location and resource group and enable public network access.
text: az appconfig create -g MyResourceGroup -n MyAppConfiguration -l westus --enable-public-network
"""
helps['appconfig identity'] = """
type: group
short-summary: Managed identities for App Configurations.
"""
helps['appconfig identity assign'] = """
type: command
short-summary: Update managed identities for an App Configuration.
examples:
- name: Enable the system-assigned identity for an existing App Configuration
text: az appconfig identity assign -g MyResourceGroup -n MyAppConfiguration
- name: Assign a user-assigned managed identity for an existing App Configuration
text: az appconfig identity assign -g MyResourceGroup -n MyAppConfiguration --identities "/subscriptions/<SUBSCRIPTON ID>/resourcegroups/<RESOURCEGROUP>/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myUserAssignedIdentity"
- name: Assign both system-assigned and user assigned identities for an existing App Configuration
text: az appconfig identity assign -g MyResourceGroup -n MyAppConfiguration --identities [system] "/subscriptions/<SUBSCRIPTON ID>/resourcegroups/<RESOURCEGROUP>/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myUserAssignedIdentity"
"""
helps['appconfig identity remove'] = """
type: command
short-summary: Remove managed identities for an App Configuration.
examples:
- name: Remove the system-assigned identity from a App Configuration.
text: az appconfig identity remove -g MyResourceGroup -n MyAppConfiguration
- name: Remove a user assigned identity from a App Configuration.
text: az appconfig identity remove -g MyResourceGroup -n MyAppConfiguration --identities "/subscriptions/<SUBSCRIPTON ID>/resourcegroups/<RESOURCEGROUP>/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myUserAssignedIdentity"
- name: Remove all identities from an App Configuration.
text: az appconfig identity remove -g MyResourceGroup -n MyAppConfiguration --identities [all]
"""
helps['appconfig identity show'] = """
type: command
short-summary: Display managed identities for an App Configuration.
examples:
- name: Display managed identities for a task.
text: az appconfig identity show -g MyResourceGroup -n MyAppConfiguration
"""
helps['appconfig credential'] = """
type: group
short-summary: Manage credentials for App Configurations.
"""
helps['appconfig credential list'] = """
type: command
short-summary: List access keys of an App Configuration.
examples:
- name: List access keys of an App Configuration
text: az appconfig credential list -g MyResourceGroup -n MyAppConfiguration
"""
helps['appconfig credential regenerate'] = """
type: command
short-summary: Regenerate an access key for an App Configuration.
examples:
- name: Regenerate a read only access key for an App Configuration
text: az appconfig credential regenerate -g MyResourceGroup -n MyAppConfiguration --id 0-l0-s0:8ldbreMVH+d7EjaSUg3H
"""
helps['appconfig delete'] = """
type: command
short-summary: Delete an App Configuration.
examples:
- name: Delete an App Configuration under resource group
text: az appconfig delete -g MyResourceGroup -n MyAppConfiguration
"""
helps['appconfig kv'] = """
type: group
short-summary: Manage key-values stored in an App Configuration.
"""
helps['appconfig kv delete'] = """
type: command
short-summary: Delete key-values.
examples:
- name: Delete a key using App Configuration name without confirmation.
text: az appconfig kv delete -n MyAppConfiguration --key color --label MyLabel --yes
- name: Delete a key using connection string.
text: az appconfig kv delete --connection-string Endpoint=https://contoso.azconfig.io;Id=xxx;Secret=xxx --key color --label MyLabel
"""
helps['appconfig kv export'] = """
type: command
short-summary: Export configurations to another place from your App Configuration.
examples:
- name: Export all keys and feature flags with label test to a json file.
text: az appconfig kv export -n MyAppConfiguration --label test -d file --path D:/abc.json --format json
- name: Export all keys with null label to an App Service application.
text: az appconfig kv export -n MyAppConfiguration -d appservice --appservice-account MyAppService
- name: Export all keys with label test excluding feature flags to a json file.
text: az appconfig kv export -n MyAppConfiguration --label test -d file --path D:/abc.json --format json --skip-features
- name: Export all keys and feature flags with all labels to another App Configuration.
text: az appconfig kv export -n MyAppConfiguration -d appconfig --dest-name AnotherAppConfiguration --key * --label * --preserve-labels
- name: Export all keys and feature flags with all labels to another App Configuration and overwrite destination labels.
text: az appconfig kv export -n MyAppConfiguration -d appconfig --dest-name AnotherAppConfiguration --key * --label * --dest-label ExportedKeys
"""
helps['appconfig kv import'] = """
type: command
short-summary: Import configurations into your App Configuration from another place.
examples:
- name: Import all keys and feature flags from a file and apply test label.
text: az appconfig kv import -n MyAppConfiguration --label test -s file --path D:/abc.json --format json
- name: Import all keys and feature flags with null label and apply new label from an App Configuration.
text: az appconfig kv import -n MyAppConfiguration -s appconfig --src-name AnotherAppConfiguration --label ImportedKeys
- name: Import all keys and apply null label from an App Service application.
text: az appconfig kv import -n MyAppConfiguration -s appservice --appservice-account MyAppService
- name: Import all keys with label test and apply test2 label excluding feature flags from an App Configuration.
text: az appconfig kv import -n MyAppConfiguration -s appconfig --src-label test --label test2 --src-name AnotherAppConfiguration --skip-features
- name: Import all keys and feature flags with all labels to another App Configuration.
text: az appconfig kv import -n MyAppConfiguration -s appconfig --src-name AnotherAppConfiguration --src-key * --src-label * --preserve-labels
- name: Import all keys and feature flags from a JSON file and apply JSON content type.
text: az appconfig kv import -n MyAppConfiguration -s file --path D:/abc.json --format json --separator . --content-type application/json
"""
helps['appconfig kv list'] = """
type: command
short-summary: List key-values.
examples:
- name: List all key-values with null label.
text: az appconfig kv list -n MyAppConfiguration --label \\0
- name: List a specific key for any label start with v1. using connection string.
text: az appconfig kv list --key color --connection-string Endpoint=https://contoso.azconfig.io;Id=xxx;Secret=xxx --label v1.*
- name: List all keys with any labels and query only key, value and tags.
text: az appconfig kv list --connection-string Endpoint=https://contoso.azconfig.io;Id=xxx;Secret=xxx --fields key value tags --datetime "2019-05-01T11:24:12Z"
- name: List content of key vault reference with key prefix 'KVRef_' and only select key and value.
text: az appconfig kv list -n MyAppConfiguration --key "KVRef_*" --resolve-keyvault --query "[*].{key:key, value:value}"
- name: List key-values with multiple labels.
text: az appconfig kv list --label test,prod,\\0 -n MyAppConfiguration
"""
helps['appconfig kv lock'] = """
type: command
short-summary: Lock a key-value to prohibit write operations.
examples:
- name: Lock a key-value using App Configuration name.
text: az appconfig kv lock -n MyAppConfiguration --key color --label test
- name: Force locking a key-value using connection string.
text: az appconfig kv lock --connection-string Endpoint=https://contoso.azconfig.io;Id=xxx;Secret=xxx --key color --label test --yes
"""
helps['appconfig kv restore'] = """
type: command
short-summary: Restore key-values.
examples:
- name: Restore all key-values to a specific point in time.
text: az appconfig kv restore -n MyAppConfiguration --datetime "2019-05-01T11:24:12Z"
- name: Restore a specific key for any label start with v1. using connection string to a specific point in time.
text: az appconfig kv restore --key color --connection-string Endpoint=https://contoso.azconfig.io;Id=xxx;Secret=xxx --label v1.* --datetime "2019-05-01T11:24:12Z"
"""
helps['appconfig kv set'] = """
type: command
short-summary: Set a key-value.
examples:
- name: Set a key-value with label MyLabel.
text: az appconfig kv set -n MyAppConfiguration --key color --label MyLabel --value red
- name: Set a key with null label using connection string.
text: az appconfig kv set --connection-string Endpoint=https://contoso.azconfig.io;Id=xxx;Secret=xxx --key color --value red --tags key1=value1 key2=value2
- name: Set a key with string value and JSON content type.
text: az appconfig kv set -n MyAppConfiguration --key color --value \\"red\\" --content-type application/json
- name: Set a key with list value and JSON content type.
text: az appconfig kv set -n MyAppConfiguration --key options --value [1,2,3] --content-type application/activity+json;charset=utf-8
- name: Set a key with null value and JSON content type.
text: az appconfig kv set -n MyAppConfiguration --key foo --value null --content-type application/json
"""
helps['appconfig kv set-keyvault'] = """
type: command
short-summary: Set a keyvault reference.
examples:
- name: Set a keyvault reference with label MyLabel.
text: az appconfig kv set-keyvault -n MyAppConfiguration --key HostSecret --label MyLabel --secret-identifier https://contoso.vault.azure.net/Secrets/DummySecret/Dummyversion
- name: Set a keyvault reference with null label and multiple tags using connection string.
text: az appconfig kv set-keyvault --connection-string Endpoint=https://contoso.azconfig.io;Id=xxx;Secret=xxx --key HostSecret --secret-identifier https://contoso.vault.azure.net/Secrets/DummySecret --tags tag1=value1 tag2=value2
"""
helps['appconfig kv show'] = """
type: command
short-summary: Show all attributes of a key-value.
examples:
- name: Show a key-value using App Configuration name with a specific label and datetime
text: az appconfig kv show -n MyAppConfiguration --key color --label MyLabel --datetime "2019-05-01T11:24:12Z"
- name: Show a key-value using connection string with label
text: az appconfig kv show --connection-string Endpoint=https://contoso.azconfig.io;Id=xxx;Secret=xxx --key color --label MyLabel
"""
helps['appconfig kv unlock'] = """
type: command
short-summary: Unlock a key-value to gain write operations.
examples:
- name: Unlock a key-value using App Configuration name.
text: az appconfig kv unlock -n MyAppConfiguration --key color --label test
- name: Force unlocking a key-value using connection string.
text: az appconfig kv unlock --connection-string Endpoint=https://contoso.azconfig.io;Id=xxx;Secret=xxx --key color --label test --yes
"""
helps['appconfig list'] = """
type: command
short-summary: Lists all App Configurations under the current subscription.
examples:
- name: List all App Configurations under a resource group
text: az appconfig list -g MyResourceGroup
"""
helps['appconfig revision'] = """
type: group
short-summary: Manage revisions for key-values stored in an App Configuration.
"""
helps['appconfig revision list'] = """
type: command
short-summary: Lists revision history of key-values.
examples:
- name: List revision history of a key-value using App Configuration name.
text: az appconfig revision list -n MyAppConfiguration --key color --label test
- name: List revision history of a key-value with multiple labels.
text: az appconfig revision list -n MyAppConfiguration --key color --label test,prod,\\0
- name: List revision history for key "color" with any labels using connection string
text: az appconfig revision list --connection-string Endpoint=https://contoso.azconfig.io;Id=xxx;Secret=xxx --key color --datetime "2019-05-01T11:24:12Z"
- name: List revision history for all items and query only key, value and last_modified.
text: az appconfig revision list --connection-string Endpoint=https://contoso.azconfig.io;Id=xxx;Secret=xxx --fields key value last_modified
"""
helps['appconfig show'] = """
type: command
short-summary: Show properties of an App Configuration.
examples:
- name: Show properties of an App Configuration
text: az appconfig show -g MyResourceGroup -n MyAppConfiguration
"""
helps['appconfig update'] = """
type: command
short-summary: Update an App Configuration.
examples:
- name: Update tags of an App Configuration
text: az appconfig update -g MyResourceGroup -n MyAppConfiguration --tags key1=value1 key2=value2
- name: Upgrade sku of an App Configuration to standard
text: az appconfig update -g MyResourceGroup -n MyAppConfiguration --sku Standard
- name: Enable customer encryption key with system assigned identity
text: az appconfig update -g MyResourceGroup -n MyAppConfiguration --encryption-key-name myKey --encryption-key-version keyVersion --encryption-key-vault https://keyVaultName.vault.azure.net
- name: Remove customer encryption key
text: az appconfig update -g MyResourceGroup -n MyAppConfiguration --encryption-key-name ""
- name: Update an App Configuration to enable public network access.
text: az appconfig update -g MyResourceGroup -n MyAppConfiguration --enable-public-network true
"""
helps['appconfig feature'] = """
type: group
short-summary: Manage feature flags stored in an App Configuration.
"""
helps['appconfig feature set'] = """
type: command
short-summary: Set a feature flag.
examples:
- name: Set a feature flag with label MyLabel.
text:
az appconfig feature set -n MyAppConfiguration --feature color --label MyLabel
- name: Set a feature flag with null label using connection string and set a description.
text:
az appconfig feature set --connection-string Endpoint=https://contoso.azconfig.io;Id=xxx;Secret=xxx --feature color --description "This is a colorful feature"
"""
helps['appconfig feature delete'] = """
type: command
short-summary: Delete feature flag.
examples:
- name: Delete a feature using App Configuration name without confirmation.
text:
az appconfig feature delete -n MyAppConfiguration --feature color --label MyLabel --yes
- name: Delete a feature using connection string.
text:
az appconfig feature delete --connection-string Endpoint=https://contoso.azconfig.io;Id=xxx;Secret=xxx --feature color --label MyLabel
"""
helps['appconfig feature show'] = """
type: command
short-summary: Show all attributes of a feature flag.
examples:
- name: Show a feature flag using App Configuration name with a specific label
text:
az appconfig feature show -n MyAppConfiguration --feature color --label MyLabel
- name: Show a feature flag using connection string and field filters
text:
az appconfig feature show --connection-string Endpoint=https://contoso.azconfig.io;Id=xxx;Secret=xxx --feature color --fields key locked conditions state
"""
helps['appconfig feature list'] = """
type: command
short-summary: List feature flags.
examples:
- name: List all feature flags.
text:
az appconfig feature list -n MyAppConfiguration
- name: List all feature flags with null labels.
text:
az appconfig feature list -n MyAppConfiguration --label \\0
- name: List a specfic feature for any label start with v1. using connection string.
text:
az appconfig feature list --feature color --connection-string Endpoint=https://contoso.azconfig.io;Id=xxx;Secret=xxx --label v1.*
- name: List all features with any labels and query only key, state and conditions.
text:
az appconfig feature list --connection-string Endpoint=https://contoso.azconfig.io;Id=xxx;Secret=xxx --fields key state conditions
- name: List 150 feature flags with any labels.
text:
az appconfig feature list --connection-string Endpoint=https://contoso.azconfig.io;Id=xxx;Secret=xxx --top 150
- name: List feature flags with multiple labels.
text:
az appconfig feature list --label test,prod,\\0 -n MyAppConfiguration
"""
helps['appconfig feature lock'] = """
type: command
short-summary: Lock a feature flag to prohibit write operations.
examples:
- name: Lock a feature using App Configuration name.
text:
az appconfig feature lock -n MyAppConfiguration --feature color --label test
- name: Force locking a feature using connection string.
text:
az appconfig feature lock --connection-string Endpoint=https://contoso.azconfig.io;Id=xxx;Secret=xxx --feature color --label test --yes
"""
helps['appconfig feature unlock'] = """
type: command
short-summary: Unlock a feature to gain write operations.
examples:
- name: Unlock a feature using App Configuration name.
text:
az appconfig feature unlock -n MyAppConfiguration --feature color --label test
- name: Force unlocking a feature using connection string.
text:
az appconfig feature unlock --connection-string Endpoint=https://contoso.azconfig.io;Id=xxx;Secret=xxx --feature color --label test --yes
"""
helps['appconfig feature enable'] = """
type: command
short-summary: Enable a feature flag to turn it ON for use.
examples:
- name: enable a feature using App Configuration name.
text:
az appconfig feature enable -n MyAppConfiguration --feature color --label test
- name: Force enabling a feature using connection string.
text:
az appconfig feature enable --connection-string Endpoint=https://contoso.azconfig.io;Id=xxx;Secret=xxx --feature color --label test --yes
"""
helps['appconfig feature disable'] = """
type: command
short-summary: Disable a feature flag to turn it OFF for use.
examples:
- name: disable a feature using App Configuration name.
text:
az appconfig feature disable -n MyAppConfiguration --feature color --label test
- name: Force disabling a feature using connection string.
text:
az appconfig feature disable --connection-string Endpoint=https://contoso.azconfig.io;Id=xxx;Secret=xxx --feature color --label test --yes
"""
helps['appconfig feature filter'] = """
type: group
short-summary: Manage filters associated with feature flags stored in an App Configuration.
"""
helps['appconfig feature filter add'] = """
type: command
short-summary: Add a filter to a feature flag.
examples:
- name: Add a filter for feature 'color' with label MyLabel with name 'MyFilter' and 2 parameters.
text:
az appconfig feature filter add -n MyAppConfiguration --feature color --label MyLabel --filter-name MyFilter --filter-parameters Name=Value Name2=Value2
- name: Insert a filter at index 2 (zero-based index) for feature 'color' with label MyLabel and filter name 'MyFilter' with no parameters
text:
az appconfig feature filter add -n MyAppConfiguration --feature color --label MyLabel --filter-name MyFilter --index 2
- name: Add a filter with name 'MyFilter' using connection string.
text:
az appconfig feature filter add --connection-string Endpoint=https://contoso.azconfig.io;Id=xxx;Secret=xxx --feature color --filter-name MyFilter
"""
helps['appconfig feature filter delete'] = """
type: command
short-summary: Delete a filter from a feature flag.
examples:
- name: Delete a filter from a feature using App Configuration name without confirmation.
text:
az appconfig feature filter delete -n MyAppConfiguration --feature color --filter-name MyFilter --yes
- name: Delete a filter from a feature when you have multiple filters with that same name.
text:
az appconfig feature filter delete --feature color --filter-name MyFilter --index 2 --connection-string Endpoint=https://contoso.azconfig.io;Id=xxx;Secret=xxx
- name: Delete all filters of a feature using App Configuration name without confirmation.
text:
az appconfig feature filter delete -n MyAppConfiguration --feature color --label MyLabel --all --yes
"""
helps['appconfig feature filter show'] = """
type: command
short-summary: Show filters of a feature flag.
examples:
- name: Show one unique feature filter when you have multiple filters with that same name.
text:
az appconfig feature filter show -n MyAppConfiguration --feature color --filter-name MyFilter --index 2
- name: Show all instances of a feature filter when you have multiple filters with that same name.
text:
az appconfig feature filter show --connection-string Endpoint=https://contoso.azconfig.io;Id=xxx;Secret=xxx --feature color --filter-name MyFilter
"""
helps['appconfig feature filter list'] = """
type: command
short-summary: List all filters for a feature flag.
examples:
- name: List all filters for feature flag 'color'.
text:
az appconfig feature filter list -n MyAppConfiguration --feature color --all
- name: List 150 filters for feature flag 'color'
text:
az appconfig feature filter list --connection-string Endpoint=https://contoso.azconfig.io;Id=xxx;Secret=xxx --feature color --top 150
"""
| 52.540179 | 258 | 0.720919 |
da02df618e1047600a1e6d3cc10b54e19ca250ab | 3,717 | py | Python | pytrol/control/agent/HPLEstimator.py | mothguib/pytrol | 1b9634fb97e77407b4c609399c27663396a8d7e3 | [
"MIT"
] | null | null | null | pytrol/control/agent/HPLEstimator.py | mothguib/pytrol | 1b9634fb97e77407b4c609399c27663396a8d7e3 | [
"MIT"
] | null | null | null | pytrol/control/agent/HPLEstimator.py | mothguib/pytrol | 1b9634fb97e77407b4c609399c27663396a8d7e3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import numpy as np
import torch
import pytrol.util.argsparser as parser
from pytrol.control.agent.HPAgent import HPAgent
from pytrol.control.agent.MAPTrainerModelAgent import MAPTrainerModelAgent
from pytrol.model.knowledge.EnvironmentKnowledge import EnvironmentKnowledge
from pytrol.util.net.Connection import Connection
# Heuristic Pathfinder Linear Predictor
class HPLEstimator(HPAgent, MAPTrainerModelAgent):
def __init__(self,
id_: int,
original_id: str,
env_knl: EnvironmentKnowledge,
connection: Connection,
agts_addrs: list,
datasrc: str = None,
variant: str = '',
gpu: bool = False,
depth: float = 3.0,
model_type: str = "Linear",
model_variant: str = "IdentityWeights",
interaction: bool = True):
r"""
Args:
id_ (int):
original_id (str):
env_knl (EnvironmentKnowledge):
connection (Connection):
agts_addrs (list):
datasrc (str):
variant (str):
gpu (bool):
depth (float):
model_type (str):
model_variant (str):
interaction (bool):
"""
HPAgent.__init__(self, id_=id_, original_id=original_id,
env_knl=env_knl, connection=connection,
agts_addrs=agts_addrs, variant=variant, depth=depth,
interaction=interaction)
if datasrc is None:
args = parser.parse_args()
datasrc = args.datasrc
MAPTrainerModelAgent.__init__(self, id_=id_,
original_id=original_id,
env_knl=env_knl,
connection=connection,
agts_addrs=agts_addrs,
variant=variant,
depth=depth,
gpu=gpu,
model_type=model_type,
model_variant=model_variant,
interaction=interaction,
datasrc=datasrc)
def run_model(self, input_) -> torch.Tensor:
r"""
Args:
input_:
"""
input_ = self.prepare_input(input_)
output = self.model(input_)
self.model_estm_idls = output
return self.model_estm_idls
def estimate_idls(self) -> np.ndarray:
r"""Predictor function: return the model's estimation of idlenesses"""
estimated_idls = self.run_model(self.env_knl.idls).detach().cpu(). \
numpy()
# TODO: changing the model, meanwhile any negative idleness is
# frozen (set to 0)
# Positive estimated idlenesses
positive_estm_idls = np.maximum(estimated_idls,
np.zeros(
np.array(estimated_idls).shape)
)
# For each node the best idleness between the estimated,
# the individual and the previous estimated one incremented of 1 is
# selected
best_iidl_estm = \
np.minimum(np.minimum(positive_estm_idls,
np.array(self.prev_estimated_idls,
dtype=np.float32) + 1.0),
self.env_knl.shared_idls)
return best_iidl_estm
| 35.4 | 78 | 0.503094 |
e3e88d30a3c832b9f01d22de0fbaf5d2fc4acc6e | 1,331 | py | Python | cybox/test/objects/obj_metadata_test.py | Mattlk13/python-cybox | ee82c7da40ca4638e3ca8d70766150c0dace1b55 | [
"BSD-3-Clause"
] | 40 | 2015-03-05T18:22:51.000Z | 2022-03-06T07:29:25.000Z | cybox/test/objects/obj_metadata_test.py | Mattlk13/python-cybox | ee82c7da40ca4638e3ca8d70766150c0dace1b55 | [
"BSD-3-Clause"
] | 106 | 2015-01-12T18:52:20.000Z | 2021-04-25T22:57:52.000Z | cybox/test/objects/obj_metadata_test.py | Mattlk13/python-cybox | ee82c7da40ca4638e3ca8d70766150c0dace1b55 | [
"BSD-3-Clause"
] | 30 | 2015-03-25T07:24:40.000Z | 2021-07-23T17:10:11.000Z | # Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import unittest
from cybox.objects import _ObjectMetadata, UnknownObjectType
class ObjectTypeTest(unittest.TestCase):
def setUp(self):
obj_list = [
("!!MissingAPIClass", None, None, None, None),
("!!MissingModule", 'some.nonexistent.module', None, None, None),
("!!BadClassName", 'cybox.NonexistentClass', None, None, None),
]
self.meta = _ObjectMetadata(obj_list)
def test_unknown_object(self):
self.assertRaises(UnknownObjectType,
self.meta.get_class_for_object_type,
"!!BadType")
def test_missing_api_class(self):
self.assertRaises(UnknownObjectType,
self.meta.get_class_for_object_type,
"!!MissingAPIClass")
def test_missing_module(self):
self.assertRaises(ImportError,
self.meta.get_class_for_object_type,
"!!MissingModule")
def test_missing_class(self):
self.assertRaises(AttributeError,
self.meta.get_class_for_object_type,
"!!BadClassName")
if __name__ == "__main__":
unittest.main()
| 31.690476 | 77 | 0.595793 |
f270009828b45836f4bb33e0583ec3a144774933 | 1,168 | py | Python | __init__.py | rborstgithub/l10n_gt_extra | 12cec30cddd4e7e5fdc8caaffc001ba21e73f2ec | [
"MIT"
] | null | null | null | __init__.py | rborstgithub/l10n_gt_extra | 12cec30cddd4e7e5fdc8caaffc001ba21e73f2ec | [
"MIT"
] | 2 | 2020-11-04T14:48:18.000Z | 2021-04-13T15:59:54.000Z | __init__.py | rborstgithub/l10n_gt_extra | 12cec30cddd4e7e5fdc8caaffc001ba21e73f2ec | [
"MIT"
] | 17 | 2017-12-11T17:36:01.000Z | 2022-03-30T03:56:06.000Z | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2009-2012 Soluciones Tecnologócias Prisma S.A. All Rights Reserved.
# José Rodrigo Fernández Menegazzo, Soluciones Tecnologócias Prisma S.A.
# (http://www.solucionesprisma.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_invoice
import res_partner
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| 41.714286 | 84 | 0.645548 |
a34a23ec56a28899614d9528243f3e0db1c9b0c8 | 7,675 | py | Python | bitbots_live_tool_rqt/scripts/test_node.py | bit-bots/bitbots_misc | b6dd979bd4f0dc0019d9ff1b52510d9cb32bc284 | [
"MIT"
] | null | null | null | bitbots_live_tool_rqt/scripts/test_node.py | bit-bots/bitbots_misc | b6dd979bd4f0dc0019d9ff1b52510d9cb32bc284 | [
"MIT"
] | 57 | 2019-03-02T10:59:05.000Z | 2021-12-09T18:57:34.000Z | bitbots_live_tool_rqt/scripts/test_node.py | bit-bots/bitbots_misc | b6dd979bd4f0dc0019d9ff1b52510d9cb32bc284 | [
"MIT"
] | 1 | 2019-07-28T11:26:47.000Z | 2019-07-28T11:26:47.000Z | #!/usr/bin/env python3
import rospy
from std_msgs.msg import Header
from humanoid_league_msgs.msg import BallRelative, ObstacleRelativeArray, ObstacleRelative, Strategy, GameState, RobotControlState
from geometry_msgs.msg import Point, PoseWithCovarianceStamped, Pose2D
import math
import yaml
import rospkg
import os
import tf
from bitbots_ros_patches.rate import Rate
# Dictonary for roles
actionDecoder = {'ROLE_IDLING': 0, 'ROLE_OTHER': 1, 'ROLE_STRIKER': 2, 'ROLE_SUPPORTER': 3, 'ROLE_DEFENDER': 4, 'ROLE_GOALIE': 5 }
# Loads the dictonary of coordinates from pathmaker
def getCoordinates(filename):
rp = rospkg.RosPack()
fname = os.path.join(rp.get_path('bitbots_live_tool_rqt'), 'resource', 'paths', filename)
with open(fname, "r") as file:
positions = yaml.load(file) # da ist ein Dictonary drin
file.close()
return positions.get("positions")
def vec_rotate(x, y, angle_rad):
xneu = x * math.cos(angle_rad) - y * math.sin(angle_rad)
yneu = y * math.cos(angle_rad) + x * math.sin(angle_rad)
return [xneu, yneu]
def publisher_main():
#initiieren des publishers
rospy.init_node('publisher')
print('started publisher node')
pub = rospy.Publisher('ball_relative', BallRelative, queue_size = 10)
pubRobo = rospy.Publisher('amcl_pose', PoseWithCovarianceStamped, queue_size = 10)
pubTeam = rospy.Publisher('obstacles_relative', ObstacleRelativeArray, queue_size = 10)
pubStrategy = rospy.Publisher('strategy', Strategy, queue_size = 10)
pubGame = rospy.Publisher('gamestate', GameState, queue_size = 10)
pubState = rospy.Publisher('robot_state', RobotControlState, queue_size = 10)
pubTarget = rospy.Publisher('move_base_simple/goal', Pose2D, queue_size = 10)
rate = Rate(10)
timeCounter = 30
roboActionCounter = 30
firsthalf = True
durationHalfGame = 60
# Coordinates from pathMaker ========================================================================================
# robo1 with pathmaker
robo1 = getCoordinates("robo4.yaml")
robo1Length = len(robo1)
robo1Counter = 1
# teammates with pathmaker
teammate1 = getCoordinates('TeamClubMate1.yaml')
team1Length = len(teammate1) #anzahl eintraege
team1Counter = 1
teammate2 = getCoordinates('TeamClubMate2.yaml')
team2Length = len(teammate2)
team2Counter = 1
# opponents with pathmaker
opponent1 = getCoordinates('SuperScaryOpponent.yaml')
op1Length = len(opponent1)
op1Counter = 1
# opponents with pathmaker
undef = getCoordinates('undef.yaml')
undefLength = len(opponent1)
undefCounter = 1
# ball with pathmaker
ball = getCoordinates('HeartBall.yaml')
ballLength = len(ball)
ballCounter = 1
#teammate1[0 % length ].get('x') # fuer 0 ein counter, dann entsteht loop
#teammate1[1].get('x') # an der ersten Stelle x-wert
while not rospy.is_shutdown():
# Ball with pathmaker
msgBall = BallRelative()
msgBall.header.stamp = rospy.Time.now()
msgBall.header.frame_id = "base_link"
msgBall.ball_relative.y = ball[ballCounter % ballLength].get('x')
msgBall.ball_relative.x = ball[ballCounter % ballLength].get('y')
msgBall.confidence = 1.0
pub.publish(msgBall)
ballCounter += 1
# Robo1 with pathmaker
msgRobo = PoseWithCovarianceStamped()
msgRobo.header.stamp = rospy.Time.now()
msgRobo.pose.pose.position.x = robo1[int(robo1Counter) % robo1Length].get('x')
msgRobo.pose.pose.position.y = robo1[int(robo1Counter) % robo1Length].get('y')
# Angle of robot in quaternions
angle = robo1[int(robo1Counter) % robo1Length].get('ang')
quaternion = tf.transformations.quaternion_from_euler(0, 0, float(angle))
msgRobo.pose.pose.orientation.x = quaternion[0]
msgRobo.pose.pose.orientation.y = quaternion[1]
msgRobo.pose.pose.orientation.z = quaternion[2]
msgRobo.pose.pose.orientation.w = quaternion[3]
pubRobo.publish(msgRobo)
# Role of Robo1, gets information from pathMaker
msgStrategy = Strategy()
msgRoleString = robo1[int(robo1Counter) % robo1Length].get('action')
msgStrategy.role = actionDecoder.get(msgRoleString) #actiondecoder gleicht den string ab mit dictonary und gibt int zurueck
# Action of Robo1, changes after short time (roboActionCounter)
if roboActionCounter == 0:
msgStrategy.action = 3 # TRYING_TO_SCORE
else:
msgStrategy.action = 2 # GOING_TO_BALL
pubStrategy.publish(msgStrategy)
roboActionCounter -= 1
roboActionCounter = max(roboActionCounter, 0)
robo1Counter += 1
# Teammates with pathmaker, contains list of teammates
msgTeam = ObstacleRelativeArray()
msgTeam1 = ObstacleRelative()
msgTeam1.type = 2 # magenta
msgTeam1.pose.pose.pose.position.x = teammate1[int(team1Counter) % team1Length].get('x')
msgTeam1.pose.pose.pose.position.y = teammate1[int(team1Counter) % team1Length].get('y')
msgTeam2 = ObstacleRelative()
msgTeam2.type = 2 # magenta
msgTeam2.pose.pose.pose.position.x = teammate2[int(team2Counter) % team2Length].get('x')
msgTeam2.pose.pose.pose.position.y = teammate2[int(team2Counter) % team2Length].get('y')
# Opponents with pathmaker, contains list of opponents
msgOp = ObstacleRelativeArray()
msgUndef = ObstacleRelative()
msgUndef.type = 1 # undef
msgUndef.pose.pose.pose.position.x = undef[int(undefCounter) % undefLength].get('x')
msgUndef.pose.pose.pose.position.y = undef[int(undefCounter) % undefLength].get('y')
msgOp1 = ObstacleRelative()
msgOp1.type = 3 # cyan
msgOp1.pose.pose.pose.position.x = opponent1[int(op1Counter) % op1Length].get('x')
msgOp1.pose.pose.pose.position.y = opponent1[int(op1Counter) % op1Length].get('y')
# Publish all obstacles
msgTeam.obstacles = [msgTeam1, msgTeam2, msgOp1, msgUndef]
pubTeam.publish(msgTeam)
team1Counter += 1
team2Counter += 1
op1Counter += 1
undefCounter += 1
# GameState msgs ===========================================================================================
# Penalty: Seconds till unpenalized and boolean
msgGame = GameState()
msgBall.header.stamp = rospy.Time.now()
msgGame.secondsTillUnpenalized = timeCounter
# Penalty boolean
msgGame.penalized = timeCounter > 0
# Sets halftime and rest secs
msgGame.firstHalf = firsthalf
msgGame.secondsRemaining = durationHalfGame
# Sets Score
msgGame.ownScore = 7
msgGame.rivalScore = 1
# team colors
msgGame.teamColor = 1 # magenta
pubGame.publish(msgGame)
timeCounter -= 1
timeCounter = max(timeCounter, 0)
durationHalfGame -= 1
if durationHalfGame == 0:
durationHalfGame = 60
firsthalf = False
# Sets hardware state
msgState = RobotControlState()
msgState.state = 10
pubState.publish(msgState)
# Target
msgTarget = Pose2D()
if firsthalf:
msgTarget.x = 3.5
msgTarget.y = 2.0
else:
msgTarget.x = 2.0
msgTarget.y = 1.0
pubTarget.publish(msgTarget)
rate.sleep()
if __name__ == '__main__':
try:
publisher_main()
except rospy.ROSInterruptException:
pass
| 29.633205 | 131 | 0.643127 |
a1e6756783f193ffd7adc8bf89112786f02239e0 | 27,177 | py | Python | src/engine/SCons/UtilTests.py | azverkan/scons | 704ddb9270e14c7771d0c58c04c7afa7bc009603 | [
"MIT"
] | 1 | 2015-11-04T22:22:10.000Z | 2015-11-04T22:22:10.000Z | src/engine/SCons/UtilTests.py | azverkan/scons | 704ddb9270e14c7771d0c58c04c7afa7bc009603 | [
"MIT"
] | null | null | null | src/engine/SCons/UtilTests.py | azverkan/scons | 704ddb9270e14c7771d0c58c04c7afa7bc009603 | [
"MIT"
] | null | null | null | #
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import SCons.compat
import io
import os
import sys
import unittest
from collections import UserDict, UserList, UserString
import TestCmd
import SCons.Errors
from SCons.Util import *
try: eval('unicode')
except NameError: HasUnicode = False
else: HasUnicode = True
class OutBuffer(object):
def __init__(self):
self.buffer = ""
def write(self, str):
self.buffer = self.buffer + str
class dictifyTestCase(unittest.TestCase):
def test_dictify(self):
"""Test the dictify() function"""
r = SCons.Util.dictify(['a', 'b', 'c'], [1, 2, 3])
assert r == {'a':1, 'b':2, 'c':3}, r
r = {}
SCons.Util.dictify(['a'], [1], r)
SCons.Util.dictify(['b'], [2], r)
SCons.Util.dictify(['c'], [3], r)
assert r == {'a':1, 'b':2, 'c':3}, r
class UtilTestCase(unittest.TestCase):
def test_splitext(self):
assert splitext('foo') == ('foo','')
assert splitext('foo.bar') == ('foo','.bar')
assert splitext(os.path.join('foo.bar', 'blat')) == (os.path.join('foo.bar', 'blat'),'')
class Node(object):
def __init__(self, name, children=[]):
self.children = children
self.name = name
self.nocache = None
def __str__(self):
return self.name
def exists(self):
return 1
def rexists(self):
return 1
def has_builder(self):
return 1
def has_explicit_builder(self):
return 1
def side_effect(self):
return 1
def precious(self):
return 1
def always_build(self):
return 1
def is_up_to_date(self):
return 1
def noclean(self):
return 1
def tree_case_1(self):
"""Fixture for the render_tree() and print_tree() tests."""
windows_h = self.Node("windows.h")
stdlib_h = self.Node("stdlib.h")
stdio_h = self.Node("stdio.h")
bar_c = self.Node("bar.c", [stdlib_h, windows_h])
bar_o = self.Node("bar.o", [bar_c])
foo_c = self.Node("foo.c", [stdio_h])
foo_o = self.Node("foo.o", [foo_c])
foo = self.Node("foo", [foo_o, bar_o])
expect = """\
+-foo
+-foo.o
| +-foo.c
| +-stdio.h
+-bar.o
+-bar.c
+-stdlib.h
+-windows.h
"""
lines = expect.split('\n')[:-1]
lines = ['[E BSPACN ]'+l for l in lines]
withtags = '\n'.join(lines) + '\n'
return foo, expect, withtags
def tree_case_2(self, prune=1):
"""Fixture for the render_tree() and print_tree() tests."""
stdlib_h = self.Node("stdlib.h")
bar_h = self.Node('bar.h', [stdlib_h])
blat_h = self.Node('blat.h', [stdlib_h])
blat_c = self.Node('blat.c', [blat_h, bar_h])
blat_o = self.Node('blat.o', [blat_c])
expect = """\
+-blat.o
+-blat.c
+-blat.h
| +-stdlib.h
+-bar.h
+-[stdlib.h]
"""
if not prune:
expect = expect.replace('[', '')
expect = expect.replace(']', '')
lines = expect.split('\n')[:-1]
lines = ['[E BSPACN ]'+l for l in lines]
withtags = '\n'.join(lines) + '\n'
return blat_o, expect, withtags
def test_render_tree(self):
"""Test the render_tree() function"""
def get_children(node):
return node.children
node, expect, withtags = self.tree_case_1()
actual = render_tree(node, get_children)
assert expect == actual, (expect, actual)
node, expect, withtags = self.tree_case_2()
actual = render_tree(node, get_children, 1)
assert expect == actual, (expect, actual)
def test_print_tree(self):
"""Test the print_tree() function"""
def get_children(node):
return node.children
save_stdout = sys.stdout
try:
node, expect, withtags = self.tree_case_1()
sys.stdout = io.StringIO()
print_tree(node, get_children)
actual = sys.stdout.getvalue()
assert expect == actual, (expect, actual)
sys.stdout = io.StringIO()
print_tree(node, get_children, showtags=1)
actual = sys.stdout.getvalue()
assert withtags == actual, (withtags, actual)
node, expect, withtags = self.tree_case_2(prune=0)
sys.stdout = io.StringIO()
print_tree(node, get_children, 1)
actual = sys.stdout.getvalue()
assert expect == actual, (expect, actual)
sys.stdout = io.StringIO()
# The following call should work here:
# print_tree(node, get_children, 1, showtags=1)
# For some reason I don't understand, though, *this*
# time that we call print_tree, the visited dictionary
# is still populated with the values from the last call!
# I can't see why this would be, short of a bug in Python,
# and rather than continue banging my head against the
# brick wall for a *test*, we're going to going with
# the cheap, easy workaround:
print_tree(node, get_children, 1, showtags=1, visited={})
actual = sys.stdout.getvalue()
assert withtags == actual, (withtags, actual)
finally:
sys.stdout = save_stdout
def test_is_Dict(self):
assert is_Dict({})
assert is_Dict(UserDict())
assert is_Dict(os.environ)
try:
class mydict(dict):
pass
except TypeError:
pass
else:
assert is_Dict(mydict({}))
assert not is_Dict([])
assert not is_Dict(())
assert not is_Dict("")
if HasUnicode:
exec "assert not is_Dict(u'')"
def test_is_List(self):
assert is_List([])
assert is_List(UserList())
try:
class mylist(list):
pass
except TypeError:
pass
else:
assert is_List(mylist([]))
assert not is_List(())
assert not is_List({})
assert not is_List("")
if HasUnicode:
exec "assert not is_List(u'')"
def test_is_String(self):
assert is_String("")
if HasUnicode:
exec "assert is_String(u'')"
assert is_String(UserString(''))
try:
class mystr(str):
pass
except TypeError:
pass
else:
assert is_String(mystr(''))
assert not is_String({})
assert not is_String([])
assert not is_String(())
def test_is_Tuple(self):
assert is_Tuple(())
try:
class mytuple(tuple):
pass
except TypeError:
pass
else:
assert is_Tuple(mytuple(()))
assert not is_Tuple([])
assert not is_Tuple({})
assert not is_Tuple("")
if HasUnicode:
exec "assert not is_Tuple(u'')"
def test_to_String(self):
"""Test the to_String() method."""
assert to_String(1) == "1", to_String(1)
assert to_String([ 1, 2, 3]) == str([1, 2, 3]), to_String([1,2,3])
assert to_String("foo") == "foo", to_String("foo")
s1=UserString('blah')
assert to_String(s1) == s1, s1
assert to_String(s1) == 'blah', s1
class Derived(UserString):
pass
s2 = Derived('foo')
assert to_String(s2) == s2, s2
assert to_String(s2) == 'foo', s2
if HasUnicode:
s3=UserString(unicode('bar'))
assert to_String(s3) == s3, s3
assert to_String(s3) == unicode('bar'), s3
assert isinstance(to_String(s3), unicode), \
type(to_String(s3))
if HasUnicode:
s4 = unicode('baz')
assert to_String(s4) == unicode('baz'), to_String(s4)
assert isinstance(to_String(s4), unicode), \
type(to_String(s4))
def test_WhereIs(self):
test = TestCmd.TestCmd(workdir = '')
sub1_xxx_exe = test.workpath('sub1', 'xxx.exe')
sub2_xxx_exe = test.workpath('sub2', 'xxx.exe')
sub3_xxx_exe = test.workpath('sub3', 'xxx.exe')
sub4_xxx_exe = test.workpath('sub4', 'xxx.exe')
test.subdir('subdir', 'sub1', 'sub2', 'sub3', 'sub4')
if sys.platform != 'win32':
test.write(sub1_xxx_exe, "\n")
os.mkdir(sub2_xxx_exe)
test.write(sub3_xxx_exe, "\n")
os.chmod(sub3_xxx_exe, 0777)
test.write(sub4_xxx_exe, "\n")
os.chmod(sub4_xxx_exe, 0777)
env_path = os.environ['PATH']
try:
pathdirs_1234 = [ test.workpath('sub1'),
test.workpath('sub2'),
test.workpath('sub3'),
test.workpath('sub4'),
] + env_path.split(os.pathsep)
pathdirs_1243 = [ test.workpath('sub1'),
test.workpath('sub2'),
test.workpath('sub4'),
test.workpath('sub3'),
] + env_path.split(os.pathsep)
os.environ['PATH'] = os.pathsep.join(pathdirs_1234)
wi = WhereIs('xxx.exe')
assert wi == test.workpath(sub3_xxx_exe), wi
wi = WhereIs('xxx.exe', pathdirs_1243)
assert wi == test.workpath(sub4_xxx_exe), wi
wi = WhereIs('xxx.exe', os.pathsep.join(pathdirs_1243))
assert wi == test.workpath(sub4_xxx_exe), wi
wi = WhereIs('xxx.exe',reject = sub3_xxx_exe)
assert wi == test.workpath(sub4_xxx_exe), wi
wi = WhereIs('xxx.exe', pathdirs_1243, reject = sub3_xxx_exe)
assert wi == test.workpath(sub4_xxx_exe), wi
os.environ['PATH'] = os.pathsep.join(pathdirs_1243)
wi = WhereIs('xxx.exe')
assert wi == test.workpath(sub4_xxx_exe), wi
wi = WhereIs('xxx.exe', pathdirs_1234)
assert wi == test.workpath(sub3_xxx_exe), wi
wi = WhereIs('xxx.exe', os.pathsep.join(pathdirs_1234))
assert wi == test.workpath(sub3_xxx_exe), wi
if sys.platform == 'win32':
wi = WhereIs('xxx', pathext = '')
assert wi is None, wi
wi = WhereIs('xxx', pathext = '.exe')
assert wi == test.workpath(sub4_xxx_exe), wi
wi = WhereIs('xxx', path = pathdirs_1234, pathext = '.BAT;.EXE')
assert wi.lower() == test.workpath(sub3_xxx_exe).lower(), wi
# Test that we return a normalized path even when
# the path contains forward slashes.
forward_slash = test.workpath('') + '/sub3'
wi = WhereIs('xxx', path = forward_slash, pathext = '.EXE')
assert wi.lower() == test.workpath(sub3_xxx_exe).lower(), wi
del os.environ['PATH']
wi = WhereIs('xxx.exe')
assert wi is None, wi
finally:
os.environ['PATH'] = env_path
def test_get_env_var(self):
"""Testing get_environment_var()."""
assert get_environment_var("$FOO") == "FOO", get_environment_var("$FOO")
assert get_environment_var("${BAR}") == "BAR", get_environment_var("${BAR}")
assert get_environment_var("$FOO_BAR1234") == "FOO_BAR1234", get_environment_var("$FOO_BAR1234")
assert get_environment_var("${BAR_FOO1234}") == "BAR_FOO1234", get_environment_var("${BAR_FOO1234}")
assert get_environment_var("${BAR}FOO") == None, get_environment_var("${BAR}FOO")
assert get_environment_var("$BAR ") == None, get_environment_var("$BAR ")
assert get_environment_var("FOO$BAR") == None, get_environment_var("FOO$BAR")
assert get_environment_var("$FOO[0]") == None, get_environment_var("$FOO[0]")
assert get_environment_var("${some('complex expression')}") == None, get_environment_var("${some('complex expression')}")
def test_Proxy(self):
"""Test generic Proxy class."""
class Subject(object):
def foo(self):
return 1
def bar(self):
return 2
s=Subject()
s.baz = 3
class ProxyTest(Proxy):
def bar(self):
return 4
p=ProxyTest(s)
assert p.foo() == 1, p.foo()
assert p.bar() == 4, p.bar()
assert p.baz == 3, p.baz
p.baz = 5
s.baz = 6
assert p.baz == 5, p.baz
assert p.get() == s, p.get()
def test_display(self):
old_stdout = sys.stdout
sys.stdout = OutBuffer()
display("line1")
display.set_mode(0)
display("line2")
display.set_mode(1)
display("line3")
display("line4\n", append_newline=0)
display.set_mode(0)
display("dont print1")
display("dont print2\n", append_newline=0)
display.set_mode(1)
assert sys.stdout.buffer == "line1\nline3\nline4\n"
sys.stdout = old_stdout
def test_get_native_path(self):
"""Test the get_native_path() function."""
import tempfile
filename = tempfile.mktemp()
str = '1234567890 ' + filename
try:
open(filename, 'w').write(str)
assert open(get_native_path(filename)).read() == str
finally:
try:
os.unlink(filename)
except OSError:
pass
def test_PrependPath(self):
"""Test prepending to a path"""
p1 = r'C:\dir\num\one;C:\dir\num\two'
p2 = r'C:\mydir\num\one;C:\mydir\num\two'
# have to include the pathsep here so that the test will work on UNIX too.
p1 = PrependPath(p1,r'C:\dir\num\two',sep = ';')
p1 = PrependPath(p1,r'C:\dir\num\three',sep = ';')
p2 = PrependPath(p2,r'C:\mydir\num\three',sep = ';')
p2 = PrependPath(p2,r'C:\mydir\num\one',sep = ';')
assert(p1 == r'C:\dir\num\three;C:\dir\num\two;C:\dir\num\one')
assert(p2 == r'C:\mydir\num\one;C:\mydir\num\three;C:\mydir\num\two')
def test_AppendPath(self):
"""Test appending to a path."""
p1 = r'C:\dir\num\one;C:\dir\num\two'
p2 = r'C:\mydir\num\one;C:\mydir\num\two'
# have to include the pathsep here so that the test will work on UNIX too.
p1 = AppendPath(p1,r'C:\dir\num\two',sep = ';')
p1 = AppendPath(p1,r'C:\dir\num\three',sep = ';')
p2 = AppendPath(p2,r'C:\mydir\num\three',sep = ';')
p2 = AppendPath(p2,r'C:\mydir\num\one',sep = ';')
assert(p1 == r'C:\dir\num\one;C:\dir\num\two;C:\dir\num\three')
assert(p2 == r'C:\mydir\num\two;C:\mydir\num\three;C:\mydir\num\one')
def test_PrependPathPreserveOld(self):
"""Test prepending to a path while preserving old paths"""
p1 = r'C:\dir\num\one;C:\dir\num\two'
# have to include the pathsep here so that the test will work on UNIX too.
p1 = PrependPath(p1,r'C:\dir\num\two',sep = ';', delete_existing=0)
p1 = PrependPath(p1,r'C:\dir\num\three',sep = ';')
assert(p1 == r'C:\dir\num\three;C:\dir\num\one;C:\dir\num\two')
def test_AppendPathPreserveOld(self):
"""Test appending to a path while preserving old paths"""
p1 = r'C:\dir\num\one;C:\dir\num\two'
# have to include the pathsep here so that the test will work on UNIX too.
p1 = AppendPath(p1,r'C:\dir\num\one',sep = ';', delete_existing=0)
p1 = AppendPath(p1,r'C:\dir\num\three',sep = ';')
assert(p1 == r'C:\dir\num\one;C:\dir\num\two;C:\dir\num\three')
def test_CLVar(self):
"""Test the command-line construction variable class"""
f = SCons.Util.CLVar('a b')
r = f + 'c d'
assert isinstance(r, SCons.Util.CLVar), type(r)
assert r.data == ['a', 'b', 'c', 'd'], r.data
assert str(r) == 'a b c d', str(r)
r = f + ' c d'
assert isinstance(r, SCons.Util.CLVar), type(r)
assert r.data == ['a', 'b', 'c', 'd'], r.data
assert str(r) == 'a b c d', str(r)
r = f + ['c d']
assert isinstance(r, SCons.Util.CLVar), type(r)
assert r.data == ['a', 'b', 'c d'], r.data
assert str(r) == 'a b c d', str(r)
r = f + [' c d']
assert isinstance(r, SCons.Util.CLVar), type(r)
assert r.data == ['a', 'b', ' c d'], r.data
assert str(r) == 'a b c d', str(r)
r = f + ['c', 'd']
assert isinstance(r, SCons.Util.CLVar), type(r)
assert r.data == ['a', 'b', 'c', 'd'], r.data
assert str(r) == 'a b c d', str(r)
r = f + [' c', 'd']
assert isinstance(r, SCons.Util.CLVar), type(r)
assert r.data == ['a', 'b', ' c', 'd'], r.data
assert str(r) == 'a b c d', str(r)
f = SCons.Util.CLVar(['a b'])
r = f + 'c d'
assert isinstance(r, SCons.Util.CLVar), type(r)
assert r.data == ['a b', 'c', 'd'], r.data
assert str(r) == 'a b c d', str(r)
r = f + ' c d'
assert isinstance(r, SCons.Util.CLVar), type(r)
assert r.data == ['a b', 'c', 'd'], r.data
assert str(r) == 'a b c d', str(r)
r = f + ['c d']
assert isinstance(r, SCons.Util.CLVar), type(r)
assert r.data == ['a b', 'c d'], r.data
assert str(r) == 'a b c d', str(r)
r = f + [' c d']
assert isinstance(r, SCons.Util.CLVar), type(r)
assert r.data == ['a b', ' c d'], r.data
assert str(r) == 'a b c d', str(r)
r = f + ['c', 'd']
assert isinstance(r, SCons.Util.CLVar), type(r)
assert r.data == ['a b', 'c', 'd'], r.data
assert str(r) == 'a b c d', str(r)
r = f + [' c', 'd']
assert isinstance(r, SCons.Util.CLVar), type(r)
assert r.data == ['a b', ' c', 'd'], r.data
assert str(r) == 'a b c d', str(r)
f = SCons.Util.CLVar(['a', 'b'])
r = f + 'c d'
assert isinstance(r, SCons.Util.CLVar), type(r)
assert r.data == ['a', 'b', 'c', 'd'], r.data
assert str(r) == 'a b c d', str(r)
r = f + ' c d'
assert isinstance(r, SCons.Util.CLVar), type(r)
assert r.data == ['a', 'b', 'c', 'd'], r.data
assert str(r) == 'a b c d', str(r)
r = f + ['c d']
assert isinstance(r, SCons.Util.CLVar), type(r)
assert r.data == ['a', 'b', 'c d'], r.data
assert str(r) == 'a b c d', str(r)
r = f + [' c d']
assert isinstance(r, SCons.Util.CLVar), type(r)
assert r.data == ['a', 'b', ' c d'], r.data
assert str(r) == 'a b c d', str(r)
r = f + ['c', 'd']
assert isinstance(r, SCons.Util.CLVar), type(r)
assert r.data == ['a', 'b', 'c', 'd'], r.data
assert str(r) == 'a b c d', str(r)
r = f + [' c', 'd']
assert isinstance(r, SCons.Util.CLVar), type(r)
assert r.data == ['a', 'b', ' c', 'd'], r.data
assert str(r) == 'a b c d', str(r)
def test_Selector(self):
"""Test the Selector class"""
class MyNode(object):
def __init__(self, name):
self.name = name
self.suffix = os.path.splitext(name)[1]
def __str__(self):
return self.name
s = Selector({'a' : 'AAA', 'b' : 'BBB'})
assert s['a'] == 'AAA', s['a']
assert s['b'] == 'BBB', s['b']
exc_caught = None
try:
x = s['c']
except KeyError:
exc_caught = 1
assert exc_caught, "should have caught a KeyError"
s['c'] = 'CCC'
assert s['c'] == 'CCC', s['c']
class DummyEnv(UserDict):
def subst(self, key):
if key[0] == '$':
return self[key[1:]]
return key
env = DummyEnv()
s = Selector({'.d' : 'DDD', '.e' : 'EEE'})
ret = s(env, [])
assert ret is None, ret
ret = s(env, [MyNode('foo.d')])
assert ret == 'DDD', ret
ret = s(env, [MyNode('bar.e')])
assert ret == 'EEE', ret
ret = s(env, [MyNode('bar.x')])
assert ret is None, ret
s[None] = 'XXX'
ret = s(env, [MyNode('bar.x')])
assert ret == 'XXX', ret
env = DummyEnv({'FSUFF' : '.f', 'GSUFF' : '.g'})
s = Selector({'$FSUFF' : 'FFF', '$GSUFF' : 'GGG'})
ret = s(env, [MyNode('foo.f')])
assert ret == 'FFF', ret
ret = s(env, [MyNode('bar.g')])
assert ret == 'GGG', ret
def test_adjustixes(self):
"""Test the adjustixes() function"""
r = adjustixes('file', 'pre-', '-suf')
assert r == 'pre-file-suf', r
r = adjustixes('pre-file', 'pre-', '-suf')
assert r == 'pre-file-suf', r
r = adjustixes('file-suf', 'pre-', '-suf')
assert r == 'pre-file-suf', r
r = adjustixes('pre-file-suf', 'pre-', '-suf')
assert r == 'pre-file-suf', r
r = adjustixes('pre-file.xxx', 'pre-', '-suf')
assert r == 'pre-file.xxx', r
r = adjustixes('dir/file', 'pre-', '-suf')
assert r == os.path.join('dir', 'pre-file-suf'), r
def test_containsAny(self):
"""Test the containsAny() function"""
assert containsAny('*.py', '*?[]')
assert not containsAny('file.txt', '*?[]')
def test_containsAll(self):
"""Test the containsAll() function"""
assert containsAll('43221', '123')
assert not containsAll('134', '123')
def test_containsOnly(self):
"""Test the containsOnly() function"""
assert containsOnly('.83', '0123456789.')
assert not containsOnly('43221', '123')
def test_LogicalLines(self):
"""Test the LogicalLines class"""
content = r"""
foo \
bar \
baz
foo
bling \
bling \ bling
bling
"""
try:
fobj = io.StringIO(content)
except TypeError:
# Python 2.7 and beyond require unicode strings.
fobj = io.StringIO(unicode(content))
lines = LogicalLines(fobj).readlines()
assert lines == [
'\n',
'foo bar baz\n',
'foo\n',
'bling bling \\ bling\n',
'bling\n',
], lines
def test_intern(self):
s1 = silent_intern("spam")
# Python 3.x does not have a unicode() global function
if sys.version[0] == '2':
s2 = silent_intern(unicode("unicode spam"))
s3 = silent_intern(42)
s4 = silent_intern("spam")
assert id(s1) == id(s4)
class MD5TestCase(unittest.TestCase):
def test_collect(self):
"""Test collecting a list of signatures into a new signature value
"""
s = list(map(MD5signature, ('111', '222', '333')))
assert '698d51a19d8a121ce581499d7b701668' == MD5collect(s[0:1])
assert '8980c988edc2c78cc43ccb718c06efd5' == MD5collect(s[0:2])
assert '53fd88c84ff8a285eb6e0a687e55b8c7' == MD5collect(s)
def test_MD5signature(self):
"""Test generating a signature"""
s = MD5signature('111')
assert '698d51a19d8a121ce581499d7b701668' == s, s
s = MD5signature('222')
assert 'bcbe3365e6ac95ea2c0343a2395834dd' == s, s
class NodeListTestCase(unittest.TestCase):
def test_simple_attributes(self):
"""Test simple attributes of a NodeList class"""
class TestClass(object):
def __init__(self, name, child=None):
self.child = child
self.bar = name
t1 = TestClass('t1', TestClass('t1child'))
t2 = TestClass('t2', TestClass('t2child'))
t3 = TestClass('t3')
nl = NodeList([t1, t2, t3])
assert nl.bar == [ 't1', 't2', 't3' ], nl.bar
assert nl[0:2].child.bar == [ 't1child', 't2child' ], \
nl[0:2].child.bar
def test_callable_attributes(self):
"""Test callable attributes of a NodeList class"""
class TestClass(object):
def __init__(self, name, child=None):
self.child = child
self.bar = name
def foo(self):
return self.bar + "foo"
def getself(self):
return self
t1 = TestClass('t1', TestClass('t1child'))
t2 = TestClass('t2', TestClass('t2child'))
t3 = TestClass('t3')
nl = NodeList([t1, t2, t3])
assert nl.foo() == [ 't1foo', 't2foo', 't3foo' ], nl.foo()
assert nl.bar == [ 't1', 't2', 't3' ], nl.bar
assert nl.getself().bar == [ 't1', 't2', 't3' ], nl.getself().bar
assert nl[0:2].child.foo() == [ 't1childfoo', 't2childfoo' ], \
nl[0:2].child.foo()
assert nl[0:2].child.bar == [ 't1child', 't2child' ], \
nl[0:2].child.bar
def test_null(self):
"""Test a null NodeList"""
nl = NodeList([])
r = str(nl)
assert r == '', r
for node in nl:
raise Exception("should not enter this loop")
class flattenTestCase(unittest.TestCase):
def test_scalar(self):
"""Test flattening a scalar"""
result = flatten('xyz')
assert result == ['xyz'], result
if __name__ == "__main__":
suite = unittest.TestSuite()
tclasses = [ dictifyTestCase,
flattenTestCase,
MD5TestCase,
NodeListTestCase,
UtilTestCase,
]
for tclass in tclasses:
names = unittest.getTestCaseNames(tclass, 'test_')
suite.addTests(list(map(tclass, names)))
if not unittest.TextTestRunner().run(suite).wasSuccessful():
sys.exit(1)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 33.97125 | 129 | 0.534202 |
d2b22c7a4c2e3909dbc2895f29ce01add87606ce | 134,197 | py | Python | pandas/tests/plotting/test_frame.py | Ethanator/pandas | 229722e6a24b9658a27fc06b617701ad02aaa435 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2020-10-08T00:29:41.000Z | 2020-10-08T00:29:41.000Z | pandas/tests/plotting/test_frame.py | Ethanator/pandas | 229722e6a24b9658a27fc06b617701ad02aaa435 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/tests/plotting/test_frame.py | Ethanator/pandas | 229722e6a24b9658a27fc06b617701ad02aaa435 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | """ Test cases for DataFrame.plot """
from datetime import date, datetime
import itertools
import string
import warnings
import numpy as np
from numpy.random import rand, randn
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.api import is_list_like
import pandas as pd
from pandas import DataFrame, MultiIndex, PeriodIndex, Series, bdate_range, date_range
import pandas._testing as tm
from pandas.core.arrays import integer_array
from pandas.tests.plotting.common import TestPlotBase, _check_plot_works
from pandas.io.formats.printing import pprint_thing
import pandas.plotting as plotting
@td.skip_if_no_mpl
class TestDataFramePlots(TestPlotBase):
def setup_method(self, method):
TestPlotBase.setup_method(self, method)
import matplotlib as mpl
mpl.rcdefaults()
self.tdf = tm.makeTimeDataFrame()
self.hexbin_df = DataFrame(
{
"A": np.random.uniform(size=20),
"B": np.random.uniform(size=20),
"C": np.arange(20) + np.random.uniform(size=20),
}
)
def _assert_ytickslabels_visibility(self, axes, expected):
for ax, exp in zip(axes, expected):
self._check_visible(ax.get_yticklabels(), visible=exp)
def _assert_xtickslabels_visibility(self, axes, expected):
for ax, exp in zip(axes, expected):
self._check_visible(ax.get_xticklabels(), visible=exp)
@pytest.mark.xfail(reason="Waiting for PR 34334", strict=True)
@pytest.mark.slow
def test_plot(self):
from pandas.plotting._matplotlib.compat import mpl_ge_3_1_0
df = self.tdf
_check_plot_works(df.plot, grid=False)
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot, subplots=True)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot, subplots=True, layout=(-1, 2))
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot, subplots=True, use_index=False)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
df = DataFrame({"x": [1, 2], "y": [3, 4]})
if mpl_ge_3_1_0():
msg = "'Line2D' object has no property 'blarg'"
else:
msg = "Unknown property blarg"
with pytest.raises(AttributeError, match=msg):
df.plot.line(blarg=True)
df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))
_check_plot_works(df.plot, use_index=True)
_check_plot_works(df.plot, sort_columns=False)
_check_plot_works(df.plot, yticks=[1, 5, 10])
_check_plot_works(df.plot, xticks=[1, 5, 10])
_check_plot_works(df.plot, ylim=(-100, 100), xlim=(-100, 100))
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.plot, subplots=True, title="blah")
# We have to redo it here because _check_plot_works does two plots,
# once without an ax kwarg and once with an ax kwarg and the new sharex
# behaviour does not remove the visibility of the latter axis (as ax is
# present). see: https://github.com/pandas-dev/pandas/issues/9737
axes = df.plot(subplots=True, title="blah")
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
# axes[0].figure.savefig("test.png")
for ax in axes[:2]:
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
self._check_visible([ax.xaxis.get_label()], visible=False)
for ax in [axes[2]]:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible([ax.xaxis.get_label()])
self._check_ticks_props(ax, xrot=0)
_check_plot_works(df.plot, title="blah")
tuples = zip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3), index=MultiIndex.from_tuples(tuples))
_check_plot_works(df.plot, use_index=True)
# unicode
index = MultiIndex.from_tuples(
[
("\u03b1", 0),
("\u03b1", 1),
("\u03b2", 2),
("\u03b2", 3),
("\u03b3", 4),
("\u03b3", 5),
("\u03b4", 6),
("\u03b4", 7),
],
names=["i0", "i1"],
)
columns = MultiIndex.from_tuples(
[("bar", "\u0394"), ("bar", "\u0395")], names=["c0", "c1"]
)
df = DataFrame(np.random.randint(0, 10, (8, 2)), columns=columns, index=index)
_check_plot_works(df.plot, title="\u03A3")
# GH 6951
# Test with single column
df = DataFrame({"x": np.random.rand(10)})
axes = _check_plot_works(df.plot.bar, subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
axes = _check_plot_works(df.plot.bar, subplots=True, layout=(-1, 1))
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
# When ax is supplied and required number of axes is 1,
# passed ax should be used:
fig, ax = self.plt.subplots()
axes = df.plot.bar(subplots=True, ax=ax)
assert len(axes) == 1
result = ax.axes
assert result is axes[0]
def test_integer_array_plot(self):
# GH 25587
arr = integer_array([1, 2, 3, 4], dtype="UInt32")
s = Series(arr)
_check_plot_works(s.plot.line)
_check_plot_works(s.plot.bar)
_check_plot_works(s.plot.hist)
_check_plot_works(s.plot.pie)
df = DataFrame({"x": arr, "y": arr})
_check_plot_works(df.plot.line)
_check_plot_works(df.plot.bar)
_check_plot_works(df.plot.hist)
_check_plot_works(df.plot.pie, y="y")
_check_plot_works(df.plot.scatter, x="x", y="y")
_check_plot_works(df.plot.hexbin, x="x", y="y")
def test_mpl2_color_cycle_str(self):
# GH 15516
colors = ["C" + str(x) for x in range(10)]
df = DataFrame(randn(10, 3), columns=["a", "b", "c"])
for c in colors:
_check_plot_works(df.plot, color=c)
def test_color_single_series_list(self):
# GH 3486
df = DataFrame({"A": [1, 2, 3]})
_check_plot_works(df.plot, color=["red"])
def test_rgb_tuple_color(self):
# GH 16695
df = DataFrame({"x": [1, 2], "y": [3, 4]})
_check_plot_works(df.plot, x="x", y="y", color=(1, 0, 0))
_check_plot_works(df.plot, x="x", y="y", color=(1, 0, 0, 0.5))
def test_color_empty_string(self):
df = DataFrame(randn(10, 2))
with pytest.raises(ValueError):
df.plot(color="")
def test_color_and_style_arguments(self):
df = DataFrame({"x": [1, 2], "y": [3, 4]})
# passing both 'color' and 'style' arguments should be allowed
# if there is no color symbol in the style strings:
ax = df.plot(color=["red", "black"], style=["-", "--"])
# check that the linestyles are correctly set:
linestyle = [line.get_linestyle() for line in ax.lines]
assert linestyle == ["-", "--"]
# check that the colors are correctly set:
color = [line.get_color() for line in ax.lines]
assert color == ["red", "black"]
# passing both 'color' and 'style' arguments should not be allowed
# if there is a color symbol in the style strings:
with pytest.raises(ValueError):
df.plot(color=["red", "black"], style=["k-", "r--"])
@pytest.mark.parametrize(
"color, expected",
[
("green", ["green"] * 4),
(["yellow", "red", "green", "blue"], ["yellow", "red", "green", "blue"]),
],
)
def test_color_and_marker(self, color, expected):
# GH 21003
df = DataFrame(np.random.random((7, 4)))
ax = df.plot(color=color, style="d--")
# check colors
result = [i.get_color() for i in ax.lines]
assert result == expected
# check markers and linestyles
assert all(i.get_linestyle() == "--" for i in ax.lines)
assert all(i.get_marker() == "d" for i in ax.lines)
def test_nonnumeric_exclude(self):
df = DataFrame({"A": ["x", "y", "z"], "B": [1, 2, 3]})
ax = df.plot()
assert len(ax.get_lines()) == 1 # B was plotted
@pytest.mark.slow
def test_implicit_label(self):
df = DataFrame(randn(10, 3), columns=["a", "b", "c"])
ax = df.plot(x="a", y="b")
self._check_text_labels(ax.xaxis.get_label(), "a")
@pytest.mark.slow
def test_donot_overwrite_index_name(self):
# GH 8494
df = DataFrame(randn(2, 2), columns=["a", "b"])
df.index.name = "NAME"
df.plot(y="b", label="LABEL")
assert df.index.name == "NAME"
@pytest.mark.slow
def test_plot_xy(self):
# columns.inferred_type == 'string'
df = self.tdf
self._check_data(df.plot(x=0, y=1), df.set_index("A")["B"].plot())
self._check_data(df.plot(x=0), df.set_index("A").plot())
self._check_data(df.plot(y=0), df.B.plot())
self._check_data(df.plot(x="A", y="B"), df.set_index("A").B.plot())
self._check_data(df.plot(x="A"), df.set_index("A").plot())
self._check_data(df.plot(y="B"), df.B.plot())
# columns.inferred_type == 'integer'
df.columns = np.arange(1, len(df.columns) + 1)
self._check_data(df.plot(x=1, y=2), df.set_index(1)[2].plot())
self._check_data(df.plot(x=1), df.set_index(1).plot())
self._check_data(df.plot(y=1), df[1].plot())
# figsize and title
ax = df.plot(x=1, y=2, title="Test", figsize=(16, 8))
self._check_text_labels(ax.title, "Test")
self._check_axes_shape(ax, axes_num=1, layout=(1, 1), figsize=(16.0, 8.0))
# columns.inferred_type == 'mixed'
# TODO add MultiIndex test
@pytest.mark.slow
@pytest.mark.parametrize(
"input_log, expected_log", [(True, "log"), ("sym", "symlog")]
)
def test_logscales(self, input_log, expected_log):
df = DataFrame({"a": np.arange(100)}, index=np.arange(100))
ax = df.plot(logy=input_log)
self._check_ax_scales(ax, yaxis=expected_log)
assert ax.get_yscale() == expected_log
ax = df.plot(logx=input_log)
self._check_ax_scales(ax, xaxis=expected_log)
assert ax.get_xscale() == expected_log
ax = df.plot(loglog=input_log)
self._check_ax_scales(ax, xaxis=expected_log, yaxis=expected_log)
assert ax.get_xscale() == expected_log
assert ax.get_yscale() == expected_log
@pytest.mark.parametrize("input_param", ["logx", "logy", "loglog"])
def test_invalid_logscale(self, input_param):
# GH: 24867
df = DataFrame({"a": np.arange(100)}, index=np.arange(100))
msg = "Boolean, None and 'sym' are valid options, 'sm' is given."
with pytest.raises(ValueError, match=msg):
df.plot(**{input_param: "sm"})
@pytest.mark.slow
def test_xcompat(self):
import pandas as pd
df = self.tdf
ax = df.plot(x_compat=True)
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
pd.plotting.plot_params["xaxis.compat"] = True
ax = df.plot()
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
pd.plotting.plot_params["x_compat"] = False
ax = df.plot()
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
assert isinstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex)
tm.close()
# useful if you're plotting a bunch together
with pd.plotting.plot_params.use("x_compat", True):
ax = df.plot()
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
ax = df.plot()
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
assert isinstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex)
def test_period_compat(self):
# GH 9012
# period-array conversions
df = DataFrame(
np.random.rand(21, 2),
index=bdate_range(datetime(2000, 1, 1), datetime(2000, 1, 31)),
columns=["a", "b"],
)
df.plot()
self.plt.axhline(y=0)
tm.close()
def test_unsorted_index(self):
df = DataFrame(
{"y": np.arange(100)}, index=np.arange(99, -1, -1), dtype=np.int64
)
ax = df.plot()
lines = ax.get_lines()[0]
rs = lines.get_xydata()
rs = Series(rs[:, 1], rs[:, 0], dtype=np.int64, name="y")
tm.assert_series_equal(rs, df.y, check_index_type=False)
tm.close()
df.index = pd.Index(np.arange(99, -1, -1), dtype=np.float64)
ax = df.plot()
lines = ax.get_lines()[0]
rs = lines.get_xydata()
rs = Series(rs[:, 1], rs[:, 0], dtype=np.int64, name="y")
tm.assert_series_equal(rs, df.y)
def test_unsorted_index_lims(self):
df = DataFrame({"y": [0.0, 1.0, 2.0, 3.0]}, index=[1.0, 0.0, 3.0, 2.0])
ax = df.plot()
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= np.nanmin(lines[0].get_data()[0])
assert xmax >= np.nanmax(lines[0].get_data()[0])
df = DataFrame(
{"y": [0.0, 1.0, np.nan, 3.0, 4.0, 5.0, 6.0]},
index=[1.0, 0.0, 3.0, 2.0, np.nan, 3.0, 2.0],
)
ax = df.plot()
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= np.nanmin(lines[0].get_data()[0])
assert xmax >= np.nanmax(lines[0].get_data()[0])
df = DataFrame({"y": [0.0, 1.0, 2.0, 3.0], "z": [91.0, 90.0, 93.0, 92.0]})
ax = df.plot(x="z", y="y")
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= np.nanmin(lines[0].get_data()[0])
assert xmax >= np.nanmax(lines[0].get_data()[0])
@pytest.mark.slow
def test_subplots(self):
df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))
for kind in ["bar", "barh", "line", "area"]:
axes = df.plot(kind=kind, subplots=True, sharex=True, legend=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
assert axes.shape == (3,)
for ax, column in zip(axes, df.columns):
self._check_legend_labels(ax, labels=[pprint_thing(column)])
for ax in axes[:-2]:
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
if not (kind == "bar" and self.mpl_ge_3_1_0):
# change https://github.com/pandas-dev/pandas/issues/26714
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
self._check_visible(ax.xaxis.get_label(), visible=False)
self._check_visible(ax.get_yticklabels())
self._check_visible(axes[-1].xaxis)
self._check_visible(axes[-1].get_xticklabels())
self._check_visible(axes[-1].get_xticklabels(minor=True))
self._check_visible(axes[-1].xaxis.get_label())
self._check_visible(axes[-1].get_yticklabels())
axes = df.plot(kind=kind, subplots=True, sharex=False)
for ax in axes:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible(ax.get_xticklabels(minor=True))
self._check_visible(ax.xaxis.get_label())
self._check_visible(ax.get_yticklabels())
axes = df.plot(kind=kind, subplots=True, legend=False)
for ax in axes:
assert ax.get_legend() is None
def test_groupby_boxplot_sharey(self):
# https://github.com/pandas-dev/pandas/issues/20968
# sharey can now be switched check whether the right
# pair of axes is turned on or off
df = DataFrame(
{
"a": [-1.43, -0.15, -3.70, -1.43, -0.14],
"b": [0.56, 0.84, 0.29, 0.56, 0.85],
"c": [0, 1, 2, 3, 1],
},
index=[0, 1, 2, 3, 4],
)
# behavior without keyword
axes = df.groupby("c").boxplot()
expected = [True, False, True, False]
self._assert_ytickslabels_visibility(axes, expected)
# set sharey=True should be identical
axes = df.groupby("c").boxplot(sharey=True)
expected = [True, False, True, False]
self._assert_ytickslabels_visibility(axes, expected)
# sharey=False, all yticklabels should be visible
axes = df.groupby("c").boxplot(sharey=False)
expected = [True, True, True, True]
self._assert_ytickslabels_visibility(axes, expected)
def test_groupby_boxplot_sharex(self):
# https://github.com/pandas-dev/pandas/issues/20968
# sharex can now be switched check whether the right
# pair of axes is turned on or off
df = DataFrame(
{
"a": [-1.43, -0.15, -3.70, -1.43, -0.14],
"b": [0.56, 0.84, 0.29, 0.56, 0.85],
"c": [0, 1, 2, 3, 1],
},
index=[0, 1, 2, 3, 4],
)
# behavior without keyword
axes = df.groupby("c").boxplot()
expected = [True, True, True, True]
self._assert_xtickslabels_visibility(axes, expected)
# set sharex=False should be identical
axes = df.groupby("c").boxplot(sharex=False)
expected = [True, True, True, True]
self._assert_xtickslabels_visibility(axes, expected)
# sharex=True, yticklabels should be visible
# only for bottom plots
axes = df.groupby("c").boxplot(sharex=True)
expected = [False, False, True, True]
self._assert_xtickslabels_visibility(axes, expected)
@pytest.mark.xfail(reason="Waiting for PR 34334", strict=True)
@pytest.mark.slow
def test_subplots_timeseries(self):
idx = date_range(start="2014-07-01", freq="M", periods=10)
df = DataFrame(np.random.rand(10, 3), index=idx)
for kind in ["line", "area"]:
axes = df.plot(kind=kind, subplots=True, sharex=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
for ax in axes[:-2]:
# GH 7801
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
self._check_visible(ax.xaxis.get_label(), visible=False)
self._check_visible(ax.get_yticklabels())
self._check_visible(axes[-1].xaxis)
self._check_visible(axes[-1].get_xticklabels())
self._check_visible(axes[-1].get_xticklabels(minor=True))
self._check_visible(axes[-1].xaxis.get_label())
self._check_visible(axes[-1].get_yticklabels())
self._check_ticks_props(axes, xrot=0)
axes = df.plot(kind=kind, subplots=True, sharex=False, rot=45, fontsize=7)
for ax in axes:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible(ax.get_xticklabels(minor=True))
self._check_visible(ax.xaxis.get_label())
self._check_visible(ax.get_yticklabels())
self._check_ticks_props(ax, xlabelsize=7, xrot=45, ylabelsize=7)
def test_subplots_timeseries_y_axis(self):
# GH16953
data = {
"numeric": np.array([1, 2, 5]),
"timedelta": [
pd.Timedelta(-10, unit="s"),
pd.Timedelta(10, unit="m"),
pd.Timedelta(10, unit="h"),
],
"datetime_no_tz": [
pd.to_datetime("2017-08-01 00:00:00"),
pd.to_datetime("2017-08-01 02:00:00"),
pd.to_datetime("2017-08-02 00:00:00"),
],
"datetime_all_tz": [
pd.to_datetime("2017-08-01 00:00:00", utc=True),
pd.to_datetime("2017-08-01 02:00:00", utc=True),
pd.to_datetime("2017-08-02 00:00:00", utc=True),
],
"text": ["This", "should", "fail"],
}
testdata = DataFrame(data)
ax_numeric = testdata.plot(y="numeric")
assert (
ax_numeric.get_lines()[0].get_data()[1] == testdata["numeric"].values
).all()
ax_timedelta = testdata.plot(y="timedelta")
assert (
ax_timedelta.get_lines()[0].get_data()[1] == testdata["timedelta"].values
).all()
ax_datetime_no_tz = testdata.plot(y="datetime_no_tz")
assert (
ax_datetime_no_tz.get_lines()[0].get_data()[1]
== testdata["datetime_no_tz"].values
).all()
ax_datetime_all_tz = testdata.plot(y="datetime_all_tz")
assert (
ax_datetime_all_tz.get_lines()[0].get_data()[1]
== testdata["datetime_all_tz"].values
).all()
msg = "no numeric data to plot"
with pytest.raises(TypeError, match=msg):
testdata.plot(y="text")
@pytest.mark.xfail(reason="not support for period, categorical, datetime_mixed_tz")
def test_subplots_timeseries_y_axis_not_supported(self):
"""
This test will fail for:
period:
since period isn't yet implemented in ``select_dtypes``
and because it will need a custom value converter +
tick formatter (as was done for x-axis plots)
categorical:
because it will need a custom value converter +
tick formatter (also doesn't work for x-axis, as of now)
datetime_mixed_tz:
because of the way how pandas handles ``Series`` of
``datetime`` objects with different timezone,
generally converting ``datetime`` objects in a tz-aware
form could help with this problem
"""
data = {
"numeric": np.array([1, 2, 5]),
"period": [
pd.Period("2017-08-01 00:00:00", freq="H"),
pd.Period("2017-08-01 02:00", freq="H"),
pd.Period("2017-08-02 00:00:00", freq="H"),
],
"categorical": pd.Categorical(
["c", "b", "a"], categories=["a", "b", "c"], ordered=False
),
"datetime_mixed_tz": [
pd.to_datetime("2017-08-01 00:00:00", utc=True),
pd.to_datetime("2017-08-01 02:00:00"),
pd.to_datetime("2017-08-02 00:00:00"),
],
}
testdata = pd.DataFrame(data)
ax_period = testdata.plot(x="numeric", y="period")
assert (
ax_period.get_lines()[0].get_data()[1] == testdata["period"].values
).all()
ax_categorical = testdata.plot(x="numeric", y="categorical")
assert (
ax_categorical.get_lines()[0].get_data()[1]
== testdata["categorical"].values
).all()
ax_datetime_mixed_tz = testdata.plot(x="numeric", y="datetime_mixed_tz")
assert (
ax_datetime_mixed_tz.get_lines()[0].get_data()[1]
== testdata["datetime_mixed_tz"].values
).all()
@pytest.mark.slow
def test_subplots_layout(self):
# GH 6667
df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True, layout=(2, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
assert axes.shape == (2, 2)
axes = df.plot(subplots=True, layout=(-1, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
assert axes.shape == (2, 2)
axes = df.plot(subplots=True, layout=(2, -1))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
assert axes.shape == (2, 2)
axes = df.plot(subplots=True, layout=(1, 4))
self._check_axes_shape(axes, axes_num=3, layout=(1, 4))
assert axes.shape == (1, 4)
axes = df.plot(subplots=True, layout=(-1, 4))
self._check_axes_shape(axes, axes_num=3, layout=(1, 4))
assert axes.shape == (1, 4)
axes = df.plot(subplots=True, layout=(4, -1))
self._check_axes_shape(axes, axes_num=3, layout=(4, 1))
assert axes.shape == (4, 1)
with pytest.raises(ValueError):
df.plot(subplots=True, layout=(1, 1))
with pytest.raises(ValueError):
df.plot(subplots=True, layout=(-1, -1))
# single column
df = DataFrame(np.random.rand(10, 1), index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
assert axes.shape == (1,)
axes = df.plot(subplots=True, layout=(3, 3))
self._check_axes_shape(axes, axes_num=1, layout=(3, 3))
assert axes.shape == (3, 3)
@pytest.mark.slow
def test_subplots_warnings(self):
# GH 9464
with tm.assert_produces_warning(None):
df = DataFrame(np.random.randn(100, 4))
df.plot(subplots=True, layout=(3, 2))
df = DataFrame(
np.random.randn(100, 4), index=date_range("1/1/2000", periods=100)
)
df.plot(subplots=True, layout=(3, 2))
@pytest.mark.slow
def test_subplots_multiple_axes(self):
# GH 5353, 6970, GH 7069
fig, axes = self.plt.subplots(2, 3)
df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))
returned = df.plot(subplots=True, ax=axes[0], sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
assert returned.shape == (3,)
assert returned[0].figure is fig
# draw on second row
returned = df.plot(subplots=True, ax=axes[1], sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
assert returned.shape == (3,)
assert returned[0].figure is fig
self._check_axes_shape(axes, axes_num=6, layout=(2, 3))
tm.close()
with pytest.raises(ValueError):
fig, axes = self.plt.subplots(2, 3)
# pass different number of axes from required
df.plot(subplots=True, ax=axes)
# pass 2-dim axes and invalid layout
# invalid lauout should not affect to input and return value
# (show warning is tested in
# TestDataFrameGroupByPlots.test_grouped_box_multiple_axes
fig, axes = self.plt.subplots(2, 2)
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
df = DataFrame(np.random.rand(10, 4), index=list(string.ascii_letters[:10]))
returned = df.plot(
subplots=True, ax=axes, layout=(2, 1), sharex=False, sharey=False
)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
assert returned.shape == (4,)
returned = df.plot(
subplots=True, ax=axes, layout=(2, -1), sharex=False, sharey=False
)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
assert returned.shape == (4,)
returned = df.plot(
subplots=True, ax=axes, layout=(-1, 2), sharex=False, sharey=False
)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
assert returned.shape == (4,)
# single column
fig, axes = self.plt.subplots(1, 1)
df = DataFrame(np.random.rand(10, 1), index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True, ax=[axes], sharex=False, sharey=False)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
assert axes.shape == (1,)
def test_subplots_ts_share_axes(self):
# GH 3964
fig, axes = self.plt.subplots(3, 3, sharex=True, sharey=True)
self.plt.subplots_adjust(left=0.05, right=0.95, hspace=0.3, wspace=0.3)
df = DataFrame(
np.random.randn(10, 9),
index=date_range(start="2014-07-01", freq="M", periods=10),
)
for i, ax in enumerate(axes.ravel()):
df[i].plot(ax=ax, fontsize=5)
# Rows other than bottom should not be visible
for ax in axes[0:-1].ravel():
self._check_visible(ax.get_xticklabels(), visible=False)
# Bottom row should be visible
for ax in axes[-1].ravel():
self._check_visible(ax.get_xticklabels(), visible=True)
# First column should be visible
for ax in axes[[0, 1, 2], [0]].ravel():
self._check_visible(ax.get_yticklabels(), visible=True)
# Other columns should not be visible
for ax in axes[[0, 1, 2], [1]].ravel():
self._check_visible(ax.get_yticklabels(), visible=False)
for ax in axes[[0, 1, 2], [2]].ravel():
self._check_visible(ax.get_yticklabels(), visible=False)
def test_subplots_sharex_axes_existing_axes(self):
# GH 9158
d = {"A": [1.0, 2.0, 3.0, 4.0], "B": [4.0, 3.0, 2.0, 1.0], "C": [5, 1, 3, 4]}
df = DataFrame(d, index=date_range("2014 10 11", "2014 10 14"))
axes = df[["A", "B"]].plot(subplots=True)
df["C"].plot(ax=axes[0], secondary_y=True)
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
for ax in axes.ravel():
self._check_visible(ax.get_yticklabels(), visible=True)
@pytest.mark.slow
def test_subplots_dup_columns(self):
# GH 10962
df = DataFrame(np.random.rand(5, 5), columns=list("aaaaa"))
axes = df.plot(subplots=True)
for ax in axes:
self._check_legend_labels(ax, labels=["a"])
assert len(ax.lines) == 1
tm.close()
axes = df.plot(subplots=True, secondary_y="a")
for ax in axes:
# (right) is only attached when subplots=False
self._check_legend_labels(ax, labels=["a"])
assert len(ax.lines) == 1
tm.close()
ax = df.plot(secondary_y="a")
self._check_legend_labels(ax, labels=["a (right)"] * 5)
assert len(ax.lines) == 0
assert len(ax.right_ax.lines) == 5
def test_negative_log(self):
df = -DataFrame(
rand(6, 4),
index=list(string.ascii_letters[:6]),
columns=["x", "y", "z", "four"],
)
with pytest.raises(ValueError):
df.plot.area(logy=True)
with pytest.raises(ValueError):
df.plot.area(loglog=True)
def _compare_stacked_y_cood(self, normal_lines, stacked_lines):
base = np.zeros(len(normal_lines[0].get_data()[1]))
for nl, sl in zip(normal_lines, stacked_lines):
base += nl.get_data()[1] # get y coordinates
sy = sl.get_data()[1]
tm.assert_numpy_array_equal(base, sy)
def test_line_area_stacked(self):
with tm.RNGContext(42):
df = DataFrame(rand(6, 4), columns=["w", "x", "y", "z"])
neg_df = -df
# each column has either positive or negative value
sep_df = DataFrame(
{"w": rand(6), "x": rand(6), "y": -rand(6), "z": -rand(6)}
)
# each column has positive-negative mixed value
mixed_df = DataFrame(
randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["w", "x", "y", "z"],
)
for kind in ["line", "area"]:
ax1 = _check_plot_works(df.plot, kind=kind, stacked=False)
ax2 = _check_plot_works(df.plot, kind=kind, stacked=True)
self._compare_stacked_y_cood(ax1.lines, ax2.lines)
ax1 = _check_plot_works(neg_df.plot, kind=kind, stacked=False)
ax2 = _check_plot_works(neg_df.plot, kind=kind, stacked=True)
self._compare_stacked_y_cood(ax1.lines, ax2.lines)
ax1 = _check_plot_works(sep_df.plot, kind=kind, stacked=False)
ax2 = _check_plot_works(sep_df.plot, kind=kind, stacked=True)
self._compare_stacked_y_cood(ax1.lines[:2], ax2.lines[:2])
self._compare_stacked_y_cood(ax1.lines[2:], ax2.lines[2:])
_check_plot_works(mixed_df.plot, stacked=False)
with pytest.raises(ValueError):
mixed_df.plot(stacked=True)
# Use an index with strictly positive values, preventing
# matplotlib from warning about ignoring xlim
df2 = df.set_index(df.index + 1)
_check_plot_works(df2.plot, kind=kind, logx=True, stacked=True)
def test_line_area_nan_df(self):
values1 = [1, 2, np.nan, 3]
values2 = [3, np.nan, 2, 1]
df = DataFrame({"a": values1, "b": values2})
tdf = DataFrame({"a": values1, "b": values2}, index=tm.makeDateIndex(k=4))
for d in [df, tdf]:
ax = _check_plot_works(d.plot)
masked1 = ax.lines[0].get_ydata()
masked2 = ax.lines[1].get_ydata()
# remove nan for comparison purpose
exp = np.array([1, 2, 3], dtype=np.float64)
tm.assert_numpy_array_equal(np.delete(masked1.data, 2), exp)
exp = np.array([3, 2, 1], dtype=np.float64)
tm.assert_numpy_array_equal(np.delete(masked2.data, 1), exp)
tm.assert_numpy_array_equal(
masked1.mask, np.array([False, False, True, False])
)
tm.assert_numpy_array_equal(
masked2.mask, np.array([False, True, False, False])
)
expected1 = np.array([1, 2, 0, 3], dtype=np.float64)
expected2 = np.array([3, 0, 2, 1], dtype=np.float64)
ax = _check_plot_works(d.plot, stacked=True)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected1 + expected2)
ax = _check_plot_works(d.plot.area)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected1 + expected2)
ax = _check_plot_works(d.plot.area, stacked=False)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected2)
def test_line_lim(self):
df = DataFrame(rand(6, 3), columns=["x", "y", "z"])
ax = df.plot()
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= lines[0].get_data()[0][0]
assert xmax >= lines[0].get_data()[0][-1]
ax = df.plot(secondary_y=True)
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= lines[0].get_data()[0][0]
assert xmax >= lines[0].get_data()[0][-1]
axes = df.plot(secondary_y=True, subplots=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
for ax in axes:
assert hasattr(ax, "left_ax")
assert not hasattr(ax, "right_ax")
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= lines[0].get_data()[0][0]
assert xmax >= lines[0].get_data()[0][-1]
def test_area_lim(self):
df = DataFrame(rand(6, 4), columns=["x", "y", "z", "four"])
neg_df = -df
for stacked in [True, False]:
ax = _check_plot_works(df.plot.area, stacked=stacked)
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
lines = ax.get_lines()
assert xmin <= lines[0].get_data()[0][0]
assert xmax >= lines[0].get_data()[0][-1]
assert ymin == 0
ax = _check_plot_works(neg_df.plot.area, stacked=stacked)
ymin, ymax = ax.get_ylim()
assert ymax == 0
@pytest.mark.slow
def test_bar_colors(self):
import matplotlib.pyplot as plt
default_colors = self._unpack_cycler(plt.rcParams)
df = DataFrame(randn(5, 5))
ax = df.plot.bar()
self._check_colors(ax.patches[::5], facecolors=default_colors[:5])
tm.close()
custom_colors = "rgcby"
ax = df.plot.bar(color=custom_colors)
self._check_colors(ax.patches[::5], facecolors=custom_colors)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot.bar(colormap="jet")
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]
self._check_colors(ax.patches[::5], facecolors=rgba_colors)
tm.close()
# Test colormap functionality
ax = df.plot.bar(colormap=cm.jet)
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]
self._check_colors(ax.patches[::5], facecolors=rgba_colors)
tm.close()
ax = df.loc[:, [0]].plot.bar(color="DodgerBlue")
self._check_colors([ax.patches[0]], facecolors=["DodgerBlue"])
tm.close()
ax = df.plot(kind="bar", color="green")
self._check_colors(ax.patches[::5], facecolors=["green"] * 5)
tm.close()
def test_bar_user_colors(self):
df = pd.DataFrame(
{"A": range(4), "B": range(1, 5), "color": ["red", "blue", "blue", "red"]}
)
# This should *only* work when `y` is specified, else
# we use one color per column
ax = df.plot.bar(y="A", color=df["color"])
result = [p.get_facecolor() for p in ax.patches]
expected = [
(1.0, 0.0, 0.0, 1.0),
(0.0, 0.0, 1.0, 1.0),
(0.0, 0.0, 1.0, 1.0),
(1.0, 0.0, 0.0, 1.0),
]
assert result == expected
@pytest.mark.slow
def test_bar_linewidth(self):
df = DataFrame(randn(5, 5))
# regular
ax = df.plot.bar(linewidth=2)
for r in ax.patches:
assert r.get_linewidth() == 2
# stacked
ax = df.plot.bar(stacked=True, linewidth=2)
for r in ax.patches:
assert r.get_linewidth() == 2
# subplots
axes = df.plot.bar(linewidth=2, subplots=True)
self._check_axes_shape(axes, axes_num=5, layout=(5, 1))
for ax in axes:
for r in ax.patches:
assert r.get_linewidth() == 2
@pytest.mark.slow
def test_bar_barwidth(self):
df = DataFrame(randn(5, 5))
width = 0.9
# regular
ax = df.plot.bar(width=width)
for r in ax.patches:
assert r.get_width() == width / len(df.columns)
# stacked
ax = df.plot.bar(stacked=True, width=width)
for r in ax.patches:
assert r.get_width() == width
# horizontal regular
ax = df.plot.barh(width=width)
for r in ax.patches:
assert r.get_height() == width / len(df.columns)
# horizontal stacked
ax = df.plot.barh(stacked=True, width=width)
for r in ax.patches:
assert r.get_height() == width
# subplots
axes = df.plot.bar(width=width, subplots=True)
for ax in axes:
for r in ax.patches:
assert r.get_width() == width
# horizontal subplots
axes = df.plot.barh(width=width, subplots=True)
for ax in axes:
for r in ax.patches:
assert r.get_height() == width
@pytest.mark.slow
def test_bar_barwidth_position(self):
df = DataFrame(randn(5, 5))
self._check_bar_alignment(
df, kind="bar", stacked=False, width=0.9, position=0.2
)
self._check_bar_alignment(df, kind="bar", stacked=True, width=0.9, position=0.2)
self._check_bar_alignment(
df, kind="barh", stacked=False, width=0.9, position=0.2
)
self._check_bar_alignment(
df, kind="barh", stacked=True, width=0.9, position=0.2
)
self._check_bar_alignment(
df, kind="bar", subplots=True, width=0.9, position=0.2
)
self._check_bar_alignment(
df, kind="barh", subplots=True, width=0.9, position=0.2
)
@pytest.mark.slow
def test_bar_barwidth_position_int(self):
# GH 12979
df = DataFrame(randn(5, 5))
for w in [1, 1.0]:
ax = df.plot.bar(stacked=True, width=w)
ticks = ax.xaxis.get_ticklocs()
tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4]))
assert ax.get_xlim() == (-0.75, 4.75)
# check left-edge of bars
assert ax.patches[0].get_x() == -0.5
assert ax.patches[-1].get_x() == 3.5
self._check_bar_alignment(df, kind="bar", stacked=True, width=1)
self._check_bar_alignment(df, kind="barh", stacked=False, width=1)
self._check_bar_alignment(df, kind="barh", stacked=True, width=1)
self._check_bar_alignment(df, kind="bar", subplots=True, width=1)
self._check_bar_alignment(df, kind="barh", subplots=True, width=1)
@pytest.mark.slow
def test_bar_bottom_left(self):
df = DataFrame(rand(5, 5))
ax = df.plot.bar(stacked=False, bottom=1)
result = [p.get_y() for p in ax.patches]
assert result == [1] * 25
ax = df.plot.bar(stacked=True, bottom=[-1, -2, -3, -4, -5])
result = [p.get_y() for p in ax.patches[:5]]
assert result == [-1, -2, -3, -4, -5]
ax = df.plot.barh(stacked=False, left=np.array([1, 1, 1, 1, 1]))
result = [p.get_x() for p in ax.patches]
assert result == [1] * 25
ax = df.plot.barh(stacked=True, left=[1, 2, 3, 4, 5])
result = [p.get_x() for p in ax.patches[:5]]
assert result == [1, 2, 3, 4, 5]
axes = df.plot.bar(subplots=True, bottom=-1)
for ax in axes:
result = [p.get_y() for p in ax.patches]
assert result == [-1] * 5
axes = df.plot.barh(subplots=True, left=np.array([1, 1, 1, 1, 1]))
for ax in axes:
result = [p.get_x() for p in ax.patches]
assert result == [1] * 5
@pytest.mark.slow
def test_bar_nan(self):
df = DataFrame({"A": [10, np.nan, 20], "B": [5, 10, 20], "C": [1, 2, 3]})
ax = df.plot.bar()
expected = [10, 0, 20, 5, 10, 20, 1, 2, 3]
result = [p.get_height() for p in ax.patches]
assert result == expected
ax = df.plot.bar(stacked=True)
result = [p.get_height() for p in ax.patches]
assert result == expected
result = [p.get_y() for p in ax.patches]
expected = [0.0, 0.0, 0.0, 10.0, 0.0, 20.0, 15.0, 10.0, 40.0]
assert result == expected
@pytest.mark.slow
def test_bar_categorical(self):
# GH 13019
df1 = pd.DataFrame(
np.random.randn(6, 5),
index=pd.Index(list("ABCDEF")),
columns=pd.Index(list("abcde")),
)
# categorical index must behave the same
df2 = pd.DataFrame(
np.random.randn(6, 5),
index=pd.CategoricalIndex(list("ABCDEF")),
columns=pd.CategoricalIndex(list("abcde")),
)
for df in [df1, df2]:
ax = df.plot.bar()
ticks = ax.xaxis.get_ticklocs()
tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4, 5]))
assert ax.get_xlim() == (-0.5, 5.5)
# check left-edge of bars
assert ax.patches[0].get_x() == -0.25
assert ax.patches[-1].get_x() == 5.15
ax = df.plot.bar(stacked=True)
tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4, 5]))
assert ax.get_xlim() == (-0.5, 5.5)
assert ax.patches[0].get_x() == -0.25
assert ax.patches[-1].get_x() == 4.75
@pytest.mark.slow
def test_plot_scatter(self):
df = DataFrame(
randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["x", "y", "z", "four"],
)
_check_plot_works(df.plot.scatter, x="x", y="y")
_check_plot_works(df.plot.scatter, x=1, y=2)
with pytest.raises(TypeError):
df.plot.scatter(x="x")
with pytest.raises(TypeError):
df.plot.scatter(y="y")
# GH 6951
axes = df.plot(x="x", y="y", kind="scatter", subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
def test_raise_error_on_datetime_time_data(self):
# GH 8113, datetime.time type is not supported by matplotlib in scatter
df = pd.DataFrame(np.random.randn(10), columns=["a"])
df["dtime"] = pd.date_range(start="2014-01-01", freq="h", periods=10).time
msg = "must be a string or a number, not 'datetime.time'"
with pytest.raises(TypeError, match=msg):
df.plot(kind="scatter", x="dtime", y="a")
def test_scatterplot_datetime_data(self):
# GH 30391
dates = pd.date_range(start=date(2019, 1, 1), periods=12, freq="W")
vals = np.random.normal(0, 1, len(dates))
df = pd.DataFrame({"dates": dates, "vals": vals})
_check_plot_works(df.plot.scatter, x="dates", y="vals")
_check_plot_works(df.plot.scatter, x=0, y=1)
def test_scatterplot_object_data(self):
# GH 18755
df = pd.DataFrame(dict(a=["A", "B", "C"], b=[2, 3, 4]))
_check_plot_works(df.plot.scatter, x="a", y="b")
_check_plot_works(df.plot.scatter, x=0, y=1)
df = pd.DataFrame(dict(a=["A", "B", "C"], b=["a", "b", "c"]))
_check_plot_works(df.plot.scatter, x="a", y="b")
_check_plot_works(df.plot.scatter, x=0, y=1)
@pytest.mark.slow
def test_if_scatterplot_colorbar_affects_xaxis_visibility(self):
# addressing issue #10611, to ensure colobar does not
# interfere with x-axis label and ticklabels with
# ipython inline backend.
random_array = np.random.random((1000, 3))
df = pd.DataFrame(random_array, columns=["A label", "B label", "C label"])
ax1 = df.plot.scatter(x="A label", y="B label")
ax2 = df.plot.scatter(x="A label", y="B label", c="C label")
vis1 = [vis.get_visible() for vis in ax1.xaxis.get_minorticklabels()]
vis2 = [vis.get_visible() for vis in ax2.xaxis.get_minorticklabels()]
assert vis1 == vis2
vis1 = [vis.get_visible() for vis in ax1.xaxis.get_majorticklabels()]
vis2 = [vis.get_visible() for vis in ax2.xaxis.get_majorticklabels()]
assert vis1 == vis2
assert (
ax1.xaxis.get_label().get_visible() == ax2.xaxis.get_label().get_visible()
)
@pytest.mark.slow
def test_if_hexbin_xaxis_label_is_visible(self):
# addressing issue #10678, to ensure colobar does not
# interfere with x-axis label and ticklabels with
# ipython inline backend.
random_array = np.random.random((1000, 3))
df = pd.DataFrame(random_array, columns=["A label", "B label", "C label"])
ax = df.plot.hexbin("A label", "B label", gridsize=12)
assert all(vis.get_visible() for vis in ax.xaxis.get_minorticklabels())
assert all(vis.get_visible() for vis in ax.xaxis.get_majorticklabels())
assert ax.xaxis.get_label().get_visible()
@pytest.mark.slow
def test_if_scatterplot_colorbars_are_next_to_parent_axes(self):
import matplotlib.pyplot as plt
random_array = np.random.random((1000, 3))
df = pd.DataFrame(random_array, columns=["A label", "B label", "C label"])
fig, axes = plt.subplots(1, 2)
df.plot.scatter("A label", "B label", c="C label", ax=axes[0])
df.plot.scatter("A label", "B label", c="C label", ax=axes[1])
plt.tight_layout()
points = np.array([ax.get_position().get_points() for ax in fig.axes])
axes_x_coords = points[:, :, 0]
parent_distance = axes_x_coords[1, :] - axes_x_coords[0, :]
colorbar_distance = axes_x_coords[3, :] - axes_x_coords[2, :]
assert np.isclose(parent_distance, colorbar_distance, atol=1e-7).all()
@pytest.mark.parametrize("x, y", [("x", "y"), ("y", "x"), ("y", "y")])
@pytest.mark.slow
def test_plot_scatter_with_categorical_data(self, x, y):
# after fixing GH 18755, should be able to plot categorical data
df = pd.DataFrame(
{"x": [1, 2, 3, 4], "y": pd.Categorical(["a", "b", "a", "c"])}
)
_check_plot_works(df.plot.scatter, x=x, y=y)
@pytest.mark.slow
def test_plot_scatter_with_c(self):
df = DataFrame(
randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["x", "y", "z", "four"],
)
axes = [df.plot.scatter(x="x", y="y", c="z"), df.plot.scatter(x=0, y=1, c=2)]
for ax in axes:
# default to Greys
assert ax.collections[0].cmap.name == "Greys"
# n.b. there appears to be no public method
# to get the colorbar label
assert ax.collections[0].colorbar._label == "z"
cm = "cubehelix"
ax = df.plot.scatter(x="x", y="y", c="z", colormap=cm)
assert ax.collections[0].cmap.name == cm
# verify turning off colorbar works
ax = df.plot.scatter(x="x", y="y", c="z", colorbar=False)
assert ax.collections[0].colorbar is None
# verify that we can still plot a solid color
ax = df.plot.scatter(x=0, y=1, c="red")
assert ax.collections[0].colorbar is None
self._check_colors(ax.collections, facecolors=["r"])
# Ensure that we can pass an np.array straight through to matplotlib,
# this functionality was accidentally removed previously.
# See https://github.com/pandas-dev/pandas/issues/8852 for bug report
#
# Exercise colormap path and non-colormap path as they are independent
#
df = DataFrame({"A": [1, 2], "B": [3, 4]})
red_rgba = [1.0, 0.0, 0.0, 1.0]
green_rgba = [0.0, 1.0, 0.0, 1.0]
rgba_array = np.array([red_rgba, green_rgba])
ax = df.plot.scatter(x="A", y="B", c=rgba_array)
# expect the face colors of the points in the non-colormap path to be
# identical to the values we supplied, normally we'd be on shaky ground
# comparing floats for equality but here we expect them to be
# identical.
tm.assert_numpy_array_equal(ax.collections[0].get_facecolor(), rgba_array)
# we don't test the colors of the faces in this next plot because they
# are dependent on the spring colormap, which may change its colors
# later.
float_array = np.array([0.0, 1.0])
df.plot.scatter(x="A", y="B", c=float_array, cmap="spring")
@pytest.mark.parametrize("cmap", [None, "Greys"])
def test_scatter_with_c_column_name_with_colors(self, cmap):
# https://github.com/pandas-dev/pandas/issues/34316
df = pd.DataFrame(
[[5.1, 3.5], [4.9, 3.0], [7.0, 3.2], [6.4, 3.2], [5.9, 3.0]],
columns=["length", "width"],
)
df["species"] = ["r", "r", "g", "g", "b"]
ax = df.plot.scatter(x=0, y=1, c="species", cmap=cmap)
assert ax.collections[0].colorbar is None
def test_plot_scatter_with_s(self):
# this refers to GH 32904
df = DataFrame(np.random.random((10, 3)) * 100, columns=["a", "b", "c"])
ax = df.plot.scatter(x="a", y="b", s="c")
tm.assert_numpy_array_equal(df["c"].values, right=ax.collections[0].get_sizes())
def test_scatter_colors(self):
df = DataFrame({"a": [1, 2, 3], "b": [1, 2, 3], "c": [1, 2, 3]})
with pytest.raises(TypeError):
df.plot.scatter(x="a", y="b", c="c", color="green")
default_colors = self._unpack_cycler(self.plt.rcParams)
ax = df.plot.scatter(x="a", y="b", c="c")
tm.assert_numpy_array_equal(
ax.collections[0].get_facecolor()[0],
np.array(self.colorconverter.to_rgba(default_colors[0])),
)
ax = df.plot.scatter(x="a", y="b", color="white")
tm.assert_numpy_array_equal(
ax.collections[0].get_facecolor()[0],
np.array([1, 1, 1, 1], dtype=np.float64),
)
def test_scatter_colorbar_different_cmap(self):
# GH 33389
import matplotlib.pyplot as plt
df = pd.DataFrame({"x": [1, 2, 3], "y": [1, 3, 2], "c": [1, 2, 3]})
df["x2"] = df["x"] + 1
fig, ax = plt.subplots()
df.plot("x", "y", c="c", kind="scatter", cmap="cividis", ax=ax)
df.plot("x2", "y", c="c", kind="scatter", cmap="magma", ax=ax)
assert ax.collections[0].cmap.name == "cividis"
assert ax.collections[1].cmap.name == "magma"
@pytest.mark.slow
def test_plot_bar(self):
df = DataFrame(
randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["one", "two", "three", "four"],
)
_check_plot_works(df.plot.bar)
_check_plot_works(df.plot.bar, legend=False)
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.plot.bar, subplots=True)
_check_plot_works(df.plot.bar, stacked=True)
df = DataFrame(
randn(10, 15), index=list(string.ascii_letters[:10]), columns=range(15)
)
_check_plot_works(df.plot.bar)
df = DataFrame({"a": [0, 1], "b": [1, 0]})
ax = _check_plot_works(df.plot.bar)
self._check_ticks_props(ax, xrot=90)
ax = df.plot.bar(rot=35, fontsize=10)
self._check_ticks_props(ax, xrot=35, xlabelsize=10, ylabelsize=10)
ax = _check_plot_works(df.plot.barh)
self._check_ticks_props(ax, yrot=0)
ax = df.plot.barh(rot=55, fontsize=11)
self._check_ticks_props(ax, yrot=55, ylabelsize=11, xlabelsize=11)
def _check_bar_alignment(
self,
df,
kind="bar",
stacked=False,
subplots=False,
align="center",
width=0.5,
position=0.5,
):
axes = df.plot(
kind=kind,
stacked=stacked,
subplots=subplots,
align=align,
width=width,
position=position,
grid=True,
)
axes = self._flatten_visible(axes)
for ax in axes:
if kind == "bar":
axis = ax.xaxis
ax_min, ax_max = ax.get_xlim()
min_edge = min(p.get_x() for p in ax.patches)
max_edge = max(p.get_x() + p.get_width() for p in ax.patches)
elif kind == "barh":
axis = ax.yaxis
ax_min, ax_max = ax.get_ylim()
min_edge = min(p.get_y() for p in ax.patches)
max_edge = max(p.get_y() + p.get_height() for p in ax.patches)
else:
raise ValueError
# GH 7498
# compare margins between lim and bar edges
tm.assert_almost_equal(ax_min, min_edge - 0.25)
tm.assert_almost_equal(ax_max, max_edge + 0.25)
p = ax.patches[0]
if kind == "bar" and (stacked is True or subplots is True):
edge = p.get_x()
center = edge + p.get_width() * position
elif kind == "bar" and stacked is False:
center = p.get_x() + p.get_width() * len(df.columns) * position
edge = p.get_x()
elif kind == "barh" and (stacked is True or subplots is True):
center = p.get_y() + p.get_height() * position
edge = p.get_y()
elif kind == "barh" and stacked is False:
center = p.get_y() + p.get_height() * len(df.columns) * position
edge = p.get_y()
else:
raise ValueError
# Check the ticks locates on integer
assert (axis.get_ticklocs() == np.arange(len(df))).all()
if align == "center":
# Check whether the bar locates on center
tm.assert_almost_equal(axis.get_ticklocs()[0], center)
elif align == "edge":
# Check whether the bar's edge starts from the tick
tm.assert_almost_equal(axis.get_ticklocs()[0], edge)
else:
raise ValueError
return axes
@pytest.mark.slow
def test_bar_stacked_center(self):
# GH2157
df = DataFrame({"A": [3] * 5, "B": list(range(5))}, index=range(5))
self._check_bar_alignment(df, kind="bar", stacked=True)
self._check_bar_alignment(df, kind="bar", stacked=True, width=0.9)
self._check_bar_alignment(df, kind="barh", stacked=True)
self._check_bar_alignment(df, kind="barh", stacked=True, width=0.9)
@pytest.mark.slow
def test_bar_center(self):
df = DataFrame({"A": [3] * 5, "B": list(range(5))}, index=range(5))
self._check_bar_alignment(df, kind="bar", stacked=False)
self._check_bar_alignment(df, kind="bar", stacked=False, width=0.9)
self._check_bar_alignment(df, kind="barh", stacked=False)
self._check_bar_alignment(df, kind="barh", stacked=False, width=0.9)
@pytest.mark.slow
def test_bar_subplots_center(self):
df = DataFrame({"A": [3] * 5, "B": list(range(5))}, index=range(5))
self._check_bar_alignment(df, kind="bar", subplots=True)
self._check_bar_alignment(df, kind="bar", subplots=True, width=0.9)
self._check_bar_alignment(df, kind="barh", subplots=True)
self._check_bar_alignment(df, kind="barh", subplots=True, width=0.9)
@pytest.mark.slow
def test_bar_align_single_column(self):
df = DataFrame(randn(5))
self._check_bar_alignment(df, kind="bar", stacked=False)
self._check_bar_alignment(df, kind="bar", stacked=True)
self._check_bar_alignment(df, kind="barh", stacked=False)
self._check_bar_alignment(df, kind="barh", stacked=True)
self._check_bar_alignment(df, kind="bar", subplots=True)
self._check_bar_alignment(df, kind="barh", subplots=True)
@pytest.mark.slow
def test_bar_edge(self):
df = DataFrame({"A": [3] * 5, "B": list(range(5))}, index=range(5))
self._check_bar_alignment(df, kind="bar", stacked=True, align="edge")
self._check_bar_alignment(df, kind="bar", stacked=True, width=0.9, align="edge")
self._check_bar_alignment(df, kind="barh", stacked=True, align="edge")
self._check_bar_alignment(
df, kind="barh", stacked=True, width=0.9, align="edge"
)
self._check_bar_alignment(df, kind="bar", stacked=False, align="edge")
self._check_bar_alignment(
df, kind="bar", stacked=False, width=0.9, align="edge"
)
self._check_bar_alignment(df, kind="barh", stacked=False, align="edge")
self._check_bar_alignment(
df, kind="barh", stacked=False, width=0.9, align="edge"
)
self._check_bar_alignment(df, kind="bar", subplots=True, align="edge")
self._check_bar_alignment(
df, kind="bar", subplots=True, width=0.9, align="edge"
)
self._check_bar_alignment(df, kind="barh", subplots=True, align="edge")
self._check_bar_alignment(
df, kind="barh", subplots=True, width=0.9, align="edge"
)
@pytest.mark.slow
def test_bar_log_no_subplots(self):
# GH3254, GH3298 matplotlib/matplotlib#1882, #1892
# regressions in 1.2.1
expected = np.array([0.1, 1.0, 10.0, 100])
# no subplots
df = DataFrame({"A": [3] * 5, "B": list(range(1, 6))}, index=range(5))
ax = df.plot.bar(grid=True, log=True)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
@pytest.mark.slow
def test_bar_log_subplots(self):
expected = np.array([0.1, 1.0, 10.0, 100.0, 1000.0, 1e4])
ax = DataFrame([Series([200, 300]), Series([300, 500])]).plot.bar(
log=True, subplots=True
)
tm.assert_numpy_array_equal(ax[0].yaxis.get_ticklocs(), expected)
tm.assert_numpy_array_equal(ax[1].yaxis.get_ticklocs(), expected)
@pytest.mark.slow
def test_boxplot(self):
df = self.hist_df
series = df["height"]
numeric_cols = df._get_numeric_data().columns
labels = [pprint_thing(c) for c in numeric_cols]
ax = _check_plot_works(df.plot.box)
self._check_text_labels(ax.get_xticklabels(), labels)
tm.assert_numpy_array_equal(
ax.xaxis.get_ticklocs(), np.arange(1, len(numeric_cols) + 1)
)
assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)
tm.close()
axes = series.plot.box(rot=40)
self._check_ticks_props(axes, xrot=40, yrot=0)
tm.close()
ax = _check_plot_works(series.plot.box)
positions = np.array([1, 6, 7])
ax = df.plot.box(positions=positions)
numeric_cols = df._get_numeric_data().columns
labels = [pprint_thing(c) for c in numeric_cols]
self._check_text_labels(ax.get_xticklabels(), labels)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), positions)
assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)
@pytest.mark.slow
def test_boxplot_vertical(self):
df = self.hist_df
numeric_cols = df._get_numeric_data().columns
labels = [pprint_thing(c) for c in numeric_cols]
# if horizontal, yticklabels are rotated
ax = df.plot.box(rot=50, fontsize=8, vert=False)
self._check_ticks_props(ax, xrot=0, yrot=50, ylabelsize=8)
self._check_text_labels(ax.get_yticklabels(), labels)
assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot.box, subplots=True, vert=False, logx=True)
self._check_axes_shape(axes, axes_num=3, layout=(1, 3))
self._check_ax_scales(axes, xaxis="log")
for ax, label in zip(axes, labels):
self._check_text_labels(ax.get_yticklabels(), [label])
assert len(ax.lines) == self.bp_n_objects
positions = np.array([3, 2, 8])
ax = df.plot.box(positions=positions, vert=False)
self._check_text_labels(ax.get_yticklabels(), labels)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), positions)
assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)
@pytest.mark.slow
def test_boxplot_return_type(self):
df = DataFrame(
randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["one", "two", "three", "four"],
)
with pytest.raises(ValueError):
df.plot.box(return_type="NOTATYPE")
result = df.plot.box(return_type="dict")
self._check_box_return_type(result, "dict")
result = df.plot.box(return_type="axes")
self._check_box_return_type(result, "axes")
result = df.plot.box() # default axes
self._check_box_return_type(result, "axes")
result = df.plot.box(return_type="both")
self._check_box_return_type(result, "both")
@pytest.mark.slow
def test_boxplot_subplots_return_type(self):
df = self.hist_df
# normal style: return_type=None
result = df.plot.box(subplots=True)
assert isinstance(result, Series)
self._check_box_return_type(
result, None, expected_keys=["height", "weight", "category"]
)
for t in ["dict", "axes", "both"]:
returned = df.plot.box(return_type=t, subplots=True)
self._check_box_return_type(
returned,
t,
expected_keys=["height", "weight", "category"],
check_ax_title=False,
)
@pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_df(self):
df = DataFrame(randn(100, 4))
ax = _check_plot_works(df.plot, kind="kde")
expected = [pprint_thing(c) for c in df.columns]
self._check_legend_labels(ax, labels=expected)
self._check_ticks_props(ax, xrot=0)
ax = df.plot(kind="kde", rot=20, fontsize=5)
self._check_ticks_props(ax, xrot=20, xlabelsize=5, ylabelsize=5)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot, kind="kde", subplots=True)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
axes = df.plot(kind="kde", logy=True, subplots=True)
self._check_ax_scales(axes, yaxis="log")
@pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_missing_vals(self):
df = DataFrame(np.random.uniform(size=(100, 4)))
df.loc[0, 0] = np.nan
_check_plot_works(df.plot, kind="kde")
@pytest.mark.slow
def test_hist_df(self):
from matplotlib.patches import Rectangle
df = DataFrame(randn(100, 4))
series = df[0]
ax = _check_plot_works(df.plot.hist)
expected = [pprint_thing(c) for c in df.columns]
self._check_legend_labels(ax, labels=expected)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot.hist, subplots=True, logy=True)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
self._check_ax_scales(axes, yaxis="log")
axes = series.plot.hist(rot=40)
self._check_ticks_props(axes, xrot=40, yrot=0)
tm.close()
ax = series.plot.hist(cumulative=True, bins=4, density=True)
# height of last bin (index 5) must be 1.0
rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]
tm.assert_almost_equal(rects[-1].get_height(), 1.0)
tm.close()
ax = series.plot.hist(cumulative=True, bins=4)
rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]
tm.assert_almost_equal(rects[-2].get_height(), 100.0)
tm.close()
# if horizontal, yticklabels are rotated
axes = df.plot.hist(rot=50, fontsize=8, orientation="horizontal")
self._check_ticks_props(axes, xrot=0, yrot=50, ylabelsize=8)
@pytest.mark.parametrize(
"weights", [0.1 * np.ones(shape=(100,)), 0.1 * np.ones(shape=(100, 2))]
)
def test_hist_weights(self, weights):
# GH 33173
np.random.seed(0)
df = pd.DataFrame(dict(zip(["A", "B"], np.random.randn(2, 100))))
ax1 = _check_plot_works(df.plot, kind="hist", weights=weights)
ax2 = _check_plot_works(df.plot, kind="hist")
patch_height_with_weights = [patch.get_height() for patch in ax1.patches]
# original heights with no weights, and we manually multiply with example
# weights, so after multiplication, they should be almost same
expected_patch_height = [0.1 * patch.get_height() for patch in ax2.patches]
tm.assert_almost_equal(patch_height_with_weights, expected_patch_height)
def _check_box_coord(
self,
patches,
expected_y=None,
expected_h=None,
expected_x=None,
expected_w=None,
):
result_y = np.array([p.get_y() for p in patches])
result_height = np.array([p.get_height() for p in patches])
result_x = np.array([p.get_x() for p in patches])
result_width = np.array([p.get_width() for p in patches])
# dtype is depending on above values, no need to check
if expected_y is not None:
tm.assert_numpy_array_equal(result_y, expected_y, check_dtype=False)
if expected_h is not None:
tm.assert_numpy_array_equal(result_height, expected_h, check_dtype=False)
if expected_x is not None:
tm.assert_numpy_array_equal(result_x, expected_x, check_dtype=False)
if expected_w is not None:
tm.assert_numpy_array_equal(result_width, expected_w, check_dtype=False)
@pytest.mark.slow
def test_hist_df_coord(self):
normal_df = DataFrame(
{
"A": np.repeat(np.array([1, 2, 3, 4, 5]), np.array([10, 9, 8, 7, 6])),
"B": np.repeat(np.array([1, 2, 3, 4, 5]), np.array([8, 8, 8, 8, 8])),
"C": np.repeat(np.array([1, 2, 3, 4, 5]), np.array([6, 7, 8, 9, 10])),
},
columns=["A", "B", "C"],
)
nan_df = DataFrame(
{
"A": np.repeat(
np.array([np.nan, 1, 2, 3, 4, 5]), np.array([3, 10, 9, 8, 7, 6])
),
"B": np.repeat(
np.array([1, np.nan, 2, 3, 4, 5]), np.array([8, 3, 8, 8, 8, 8])
),
"C": np.repeat(
np.array([1, 2, 3, np.nan, 4, 5]), np.array([6, 7, 8, 3, 9, 10])
),
},
columns=["A", "B", "C"],
)
for df in [normal_df, nan_df]:
ax = df.plot.hist(bins=5)
self._check_box_coord(
ax.patches[:5],
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]),
)
self._check_box_coord(
ax.patches[5:10],
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([8, 8, 8, 8, 8]),
)
self._check_box_coord(
ax.patches[10:],
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([6, 7, 8, 9, 10]),
)
ax = df.plot.hist(bins=5, stacked=True)
self._check_box_coord(
ax.patches[:5],
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]),
)
self._check_box_coord(
ax.patches[5:10],
expected_y=np.array([10, 9, 8, 7, 6]),
expected_h=np.array([8, 8, 8, 8, 8]),
)
self._check_box_coord(
ax.patches[10:],
expected_y=np.array([18, 17, 16, 15, 14]),
expected_h=np.array([6, 7, 8, 9, 10]),
)
axes = df.plot.hist(bins=5, stacked=True, subplots=True)
self._check_box_coord(
axes[0].patches,
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]),
)
self._check_box_coord(
axes[1].patches,
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([8, 8, 8, 8, 8]),
)
self._check_box_coord(
axes[2].patches,
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([6, 7, 8, 9, 10]),
)
# horizontal
ax = df.plot.hist(bins=5, orientation="horizontal")
self._check_box_coord(
ax.patches[:5],
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]),
)
self._check_box_coord(
ax.patches[5:10],
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([8, 8, 8, 8, 8]),
)
self._check_box_coord(
ax.patches[10:],
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([6, 7, 8, 9, 10]),
)
ax = df.plot.hist(bins=5, stacked=True, orientation="horizontal")
self._check_box_coord(
ax.patches[:5],
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]),
)
self._check_box_coord(
ax.patches[5:10],
expected_x=np.array([10, 9, 8, 7, 6]),
expected_w=np.array([8, 8, 8, 8, 8]),
)
self._check_box_coord(
ax.patches[10:],
expected_x=np.array([18, 17, 16, 15, 14]),
expected_w=np.array([6, 7, 8, 9, 10]),
)
axes = df.plot.hist(
bins=5, stacked=True, subplots=True, orientation="horizontal"
)
self._check_box_coord(
axes[0].patches,
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]),
)
self._check_box_coord(
axes[1].patches,
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([8, 8, 8, 8, 8]),
)
self._check_box_coord(
axes[2].patches,
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([6, 7, 8, 9, 10]),
)
@pytest.mark.slow
def test_plot_int_columns(self):
df = DataFrame(randn(100, 4)).cumsum()
_check_plot_works(df.plot, legend=True)
@pytest.mark.slow
def test_df_legend_labels(self):
kinds = ["line", "bar", "barh", "kde", "area", "hist"]
df = DataFrame(rand(3, 3), columns=["a", "b", "c"])
df2 = DataFrame(rand(3, 3), columns=["d", "e", "f"])
df3 = DataFrame(rand(3, 3), columns=["g", "h", "i"])
df4 = DataFrame(rand(3, 3), columns=["j", "k", "l"])
for kind in kinds:
ax = df.plot(kind=kind, legend=True)
self._check_legend_labels(ax, labels=df.columns)
ax = df2.plot(kind=kind, legend=False, ax=ax)
self._check_legend_labels(ax, labels=df.columns)
ax = df3.plot(kind=kind, legend=True, ax=ax)
self._check_legend_labels(ax, labels=df.columns.union(df3.columns))
ax = df4.plot(kind=kind, legend="reverse", ax=ax)
expected = list(df.columns.union(df3.columns)) + list(reversed(df4.columns))
self._check_legend_labels(ax, labels=expected)
# Secondary Y
ax = df.plot(legend=True, secondary_y="b")
self._check_legend_labels(ax, labels=["a", "b (right)", "c"])
ax = df2.plot(legend=False, ax=ax)
self._check_legend_labels(ax, labels=["a", "b (right)", "c"])
ax = df3.plot(kind="bar", legend=True, secondary_y="h", ax=ax)
self._check_legend_labels(
ax, labels=["a", "b (right)", "c", "g", "h (right)", "i"]
)
# Time Series
ind = date_range("1/1/2014", periods=3)
df = DataFrame(randn(3, 3), columns=["a", "b", "c"], index=ind)
df2 = DataFrame(randn(3, 3), columns=["d", "e", "f"], index=ind)
df3 = DataFrame(randn(3, 3), columns=["g", "h", "i"], index=ind)
ax = df.plot(legend=True, secondary_y="b")
self._check_legend_labels(ax, labels=["a", "b (right)", "c"])
ax = df2.plot(legend=False, ax=ax)
self._check_legend_labels(ax, labels=["a", "b (right)", "c"])
ax = df3.plot(legend=True, ax=ax)
self._check_legend_labels(ax, labels=["a", "b (right)", "c", "g", "h", "i"])
# scatter
ax = df.plot.scatter(x="a", y="b", label="data1")
self._check_legend_labels(ax, labels=["data1"])
ax = df2.plot.scatter(x="d", y="e", legend=False, label="data2", ax=ax)
self._check_legend_labels(ax, labels=["data1"])
ax = df3.plot.scatter(x="g", y="h", label="data3", ax=ax)
self._check_legend_labels(ax, labels=["data1", "data3"])
# ensure label args pass through and
# index name does not mutate
# column names don't mutate
df5 = df.set_index("a")
ax = df5.plot(y="b")
self._check_legend_labels(ax, labels=["b"])
ax = df5.plot(y="b", label="LABEL_b")
self._check_legend_labels(ax, labels=["LABEL_b"])
self._check_text_labels(ax.xaxis.get_label(), "a")
ax = df5.plot(y="c", label="LABEL_c", ax=ax)
self._check_legend_labels(ax, labels=["LABEL_b", "LABEL_c"])
assert df5.columns.tolist() == ["b", "c"]
def test_missing_marker_multi_plots_on_same_ax(self):
# GH 18222
df = pd.DataFrame(
data=[[1, 1, 1, 1], [2, 2, 4, 8]], columns=["x", "r", "g", "b"]
)
fig, ax = self.plt.subplots(nrows=1, ncols=3)
# Left plot
df.plot(x="x", y="r", linewidth=0, marker="o", color="r", ax=ax[0])
df.plot(x="x", y="g", linewidth=1, marker="x", color="g", ax=ax[0])
df.plot(x="x", y="b", linewidth=1, marker="o", color="b", ax=ax[0])
self._check_legend_labels(ax[0], labels=["r", "g", "b"])
self._check_legend_marker(ax[0], expected_markers=["o", "x", "o"])
# Center plot
df.plot(x="x", y="b", linewidth=1, marker="o", color="b", ax=ax[1])
df.plot(x="x", y="r", linewidth=0, marker="o", color="r", ax=ax[1])
df.plot(x="x", y="g", linewidth=1, marker="x", color="g", ax=ax[1])
self._check_legend_labels(ax[1], labels=["b", "r", "g"])
self._check_legend_marker(ax[1], expected_markers=["o", "o", "x"])
# Right plot
df.plot(x="x", y="g", linewidth=1, marker="x", color="g", ax=ax[2])
df.plot(x="x", y="b", linewidth=1, marker="o", color="b", ax=ax[2])
df.plot(x="x", y="r", linewidth=0, marker="o", color="r", ax=ax[2])
self._check_legend_labels(ax[2], labels=["g", "b", "r"])
self._check_legend_marker(ax[2], expected_markers=["x", "o", "o"])
def test_legend_name(self):
multi = DataFrame(
randn(4, 4),
columns=[np.array(["a", "a", "b", "b"]), np.array(["x", "y", "x", "y"])],
)
multi.columns.names = ["group", "individual"]
ax = multi.plot()
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, "group,individual")
df = DataFrame(randn(5, 5))
ax = df.plot(legend=True, ax=ax)
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, "group,individual")
df.columns.name = "new"
ax = df.plot(legend=False, ax=ax)
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, "group,individual")
ax = df.plot(legend=True, ax=ax)
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, "new")
@pytest.mark.slow
def test_no_legend(self):
kinds = ["line", "bar", "barh", "kde", "area", "hist"]
df = DataFrame(rand(3, 3), columns=["a", "b", "c"])
for kind in kinds:
ax = df.plot(kind=kind, legend=False)
self._check_legend_labels(ax, visible=False)
@pytest.mark.slow
def test_style_by_column(self):
import matplotlib.pyplot as plt
fig = plt.gcf()
df = DataFrame(randn(100, 3))
for markers in [
{0: "^", 1: "+", 2: "o"},
{0: "^", 1: "+"},
["^", "+", "o"],
["^", "+"],
]:
fig.clf()
fig.add_subplot(111)
ax = df.plot(style=markers)
for i, l in enumerate(ax.get_lines()[: len(markers)]):
assert l.get_marker() == markers[i]
@pytest.mark.slow
def test_line_label_none(self):
s = Series([1, 2])
ax = s.plot()
assert ax.get_legend() is None
ax = s.plot(legend=True)
assert ax.get_legend().get_texts()[0].get_text() == "None"
@pytest.mark.slow
def test_line_colors(self):
from matplotlib import cm
custom_colors = "rgcby"
df = DataFrame(randn(5, 5))
ax = df.plot(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tm.close()
ax2 = df.plot(color=custom_colors)
lines2 = ax2.get_lines()
for l1, l2 in zip(ax.get_lines(), lines2):
assert l1.get_color() == l2.get_color()
tm.close()
ax = df.plot(colormap="jet")
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
ax = df.plot(colormap=cm.jet)
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
ax = df.loc[:, [0]].plot(color="DodgerBlue")
self._check_colors(ax.lines, linecolors=["DodgerBlue"])
ax = df.plot(color="red")
self._check_colors(ax.get_lines(), linecolors=["red"] * 5)
tm.close()
# GH 10299
custom_colors = ["#FF0000", "#0000FF", "#FFFF00", "#000000", "#FFFFFF"]
ax = df.plot(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tm.close()
@pytest.mark.slow
def test_dont_modify_colors(self):
colors = ["r", "g", "b"]
pd.DataFrame(np.random.rand(10, 2)).plot(color=colors)
assert len(colors) == 3
@pytest.mark.slow
def test_line_colors_and_styles_subplots(self):
# GH 9894
from matplotlib import cm
default_colors = self._unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
axes = df.plot(subplots=True)
for ax, c in zip(axes, list(default_colors)):
c = [c]
self._check_colors(ax.get_lines(), linecolors=c)
tm.close()
# single color char
axes = df.plot(subplots=True, color="k")
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=["k"])
tm.close()
# single color str
axes = df.plot(subplots=True, color="green")
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=["green"])
tm.close()
custom_colors = "rgcby"
axes = df.plot(color=custom_colors, subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
axes = df.plot(color=list(custom_colors), subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# GH 10299
custom_colors = ["#FF0000", "#0000FF", "#FFFF00", "#000000", "#FFFFFF"]
axes = df.plot(color=custom_colors, subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
for cmap in ["jet", cm.jet]:
axes = df.plot(colormap=cmap, subplots=True)
for ax, c in zip(axes, rgba_colors):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
axes = df.loc[:, [0]].plot(color="DodgerBlue", subplots=True)
self._check_colors(axes[0].lines, linecolors=["DodgerBlue"])
# single character style
axes = df.plot(style="r", subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=["r"])
tm.close()
# list of styles
styles = list("rgcby")
axes = df.plot(style=styles, subplots=True)
for ax, c in zip(axes, styles):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
@pytest.mark.slow
def test_area_colors(self):
from matplotlib import cm
from matplotlib.collections import PolyCollection
custom_colors = "rgcby"
df = DataFrame(rand(5, 5))
ax = df.plot.area(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
self._check_colors(poly, facecolors=custom_colors)
handles, labels = ax.get_legend_handles_labels()
self._check_colors(handles, facecolors=custom_colors)
for h in handles:
assert h.get_alpha() is None
tm.close()
ax = df.plot.area(colormap="jet")
jet_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
self._check_colors(ax.get_lines(), linecolors=jet_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
self._check_colors(poly, facecolors=jet_colors)
handles, labels = ax.get_legend_handles_labels()
self._check_colors(handles, facecolors=jet_colors)
for h in handles:
assert h.get_alpha() is None
tm.close()
# When stacked=False, alpha is set to 0.5
ax = df.plot.area(colormap=cm.jet, stacked=False)
self._check_colors(ax.get_lines(), linecolors=jet_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
jet_with_alpha = [(c[0], c[1], c[2], 0.5) for c in jet_colors]
self._check_colors(poly, facecolors=jet_with_alpha)
handles, labels = ax.get_legend_handles_labels()
linecolors = jet_with_alpha
self._check_colors(handles[: len(jet_colors)], linecolors=linecolors)
for h in handles:
assert h.get_alpha() == 0.5
@pytest.mark.slow
def test_hist_colors(self):
default_colors = self._unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
ax = df.plot.hist()
self._check_colors(ax.patches[::10], facecolors=default_colors[:5])
tm.close()
custom_colors = "rgcby"
ax = df.plot.hist(color=custom_colors)
self._check_colors(ax.patches[::10], facecolors=custom_colors)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot.hist(colormap="jet")
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]
self._check_colors(ax.patches[::10], facecolors=rgba_colors)
tm.close()
# Test colormap functionality
ax = df.plot.hist(colormap=cm.jet)
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]
self._check_colors(ax.patches[::10], facecolors=rgba_colors)
tm.close()
ax = df.loc[:, [0]].plot.hist(color="DodgerBlue")
self._check_colors([ax.patches[0]], facecolors=["DodgerBlue"])
ax = df.plot(kind="hist", color="green")
self._check_colors(ax.patches[::10], facecolors=["green"] * 5)
tm.close()
@pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_colors(self):
from matplotlib import cm
custom_colors = "rgcby"
df = DataFrame(rand(5, 5))
ax = df.plot.kde(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tm.close()
ax = df.plot.kde(colormap="jet")
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
ax = df.plot.kde(colormap=cm.jet)
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
@pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_colors_and_styles_subplots(self):
from matplotlib import cm
default_colors = self._unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
axes = df.plot(kind="kde", subplots=True)
for ax, c in zip(axes, list(default_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# single color char
axes = df.plot(kind="kde", color="k", subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=["k"])
tm.close()
# single color str
axes = df.plot(kind="kde", color="red", subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=["red"])
tm.close()
custom_colors = "rgcby"
axes = df.plot(kind="kde", color=custom_colors, subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
for cmap in ["jet", cm.jet]:
axes = df.plot(kind="kde", colormap=cmap, subplots=True)
for ax, c in zip(axes, rgba_colors):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
axes = df.loc[:, [0]].plot(kind="kde", color="DodgerBlue", subplots=True)
self._check_colors(axes[0].lines, linecolors=["DodgerBlue"])
# single character style
axes = df.plot(kind="kde", style="r", subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=["r"])
tm.close()
# list of styles
styles = list("rgcby")
axes = df.plot(kind="kde", style=styles, subplots=True)
for ax, c in zip(axes, styles):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
@pytest.mark.slow
def test_boxplot_colors(self):
def _check_colors(bp, box_c, whiskers_c, medians_c, caps_c="k", fliers_c=None):
# TODO: outside this func?
if fliers_c is None:
fliers_c = "k"
self._check_colors(bp["boxes"], linecolors=[box_c] * len(bp["boxes"]))
self._check_colors(
bp["whiskers"], linecolors=[whiskers_c] * len(bp["whiskers"])
)
self._check_colors(
bp["medians"], linecolors=[medians_c] * len(bp["medians"])
)
self._check_colors(bp["fliers"], linecolors=[fliers_c] * len(bp["fliers"]))
self._check_colors(bp["caps"], linecolors=[caps_c] * len(bp["caps"]))
default_colors = self._unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
bp = df.plot.box(return_type="dict")
_check_colors(bp, default_colors[0], default_colors[0], default_colors[2])
tm.close()
dict_colors = dict(
boxes="#572923", whiskers="#982042", medians="#804823", caps="#123456"
)
bp = df.plot.box(color=dict_colors, sym="r+", return_type="dict")
_check_colors(
bp,
dict_colors["boxes"],
dict_colors["whiskers"],
dict_colors["medians"],
dict_colors["caps"],
"r",
)
tm.close()
# partial colors
dict_colors = dict(whiskers="c", medians="m")
bp = df.plot.box(color=dict_colors, return_type="dict")
_check_colors(bp, default_colors[0], "c", "m")
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
bp = df.plot.box(colormap="jet", return_type="dict")
jet_colors = [cm.jet(n) for n in np.linspace(0, 1, 3)]
_check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2])
tm.close()
# Test colormap functionality
bp = df.plot.box(colormap=cm.jet, return_type="dict")
_check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2])
tm.close()
# string color is applied to all artists except fliers
bp = df.plot.box(color="DodgerBlue", return_type="dict")
_check_colors(bp, "DodgerBlue", "DodgerBlue", "DodgerBlue", "DodgerBlue")
# tuple is also applied to all artists except fliers
bp = df.plot.box(color=(0, 1, 0), sym="#123456", return_type="dict")
_check_colors(bp, (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), "#123456")
with pytest.raises(ValueError):
# Color contains invalid key results in ValueError
df.plot.box(color=dict(boxes="red", xxxx="blue"))
@pytest.mark.parametrize(
"props, expected",
[
("boxprops", "boxes"),
("whiskerprops", "whiskers"),
("capprops", "caps"),
("medianprops", "medians"),
],
)
def test_specified_props_kwd_plot_box(self, props, expected):
# GH 30346
df = DataFrame({k: np.random.random(100) for k in "ABC"})
kwd = {props: dict(color="C1")}
result = df.plot.box(return_type="dict", **kwd)
assert result[expected][0].get_color() == "C1"
def test_default_color_cycle(self):
import cycler
import matplotlib.pyplot as plt
colors = list("rgbk")
plt.rcParams["axes.prop_cycle"] = cycler.cycler("color", colors)
df = DataFrame(randn(5, 3))
ax = df.plot()
expected = self._unpack_cycler(plt.rcParams)[:3]
self._check_colors(ax.get_lines(), linecolors=expected)
def test_unordered_ts(self):
df = DataFrame(
np.array([3.0, 2.0, 1.0]),
index=[date(2012, 10, 1), date(2012, 9, 1), date(2012, 8, 1)],
columns=["test"],
)
ax = df.plot()
xticks = ax.lines[0].get_xdata()
assert xticks[0] < xticks[1]
ydata = ax.lines[0].get_ydata()
tm.assert_numpy_array_equal(ydata, np.array([1.0, 2.0, 3.0]))
@td.skip_if_no_scipy
def test_kind_both_ways(self):
df = DataFrame({"x": [1, 2, 3]})
for kind in plotting.PlotAccessor._common_kinds:
df.plot(kind=kind)
getattr(df.plot, kind)()
for kind in ["scatter", "hexbin"]:
df.plot("x", "x", kind=kind)
getattr(df.plot, kind)("x", "x")
def test_all_invalid_plot_data(self):
df = DataFrame(list("abcd"))
for kind in plotting.PlotAccessor._common_kinds:
msg = "no numeric data to plot"
with pytest.raises(TypeError, match=msg):
df.plot(kind=kind)
@pytest.mark.slow
def test_partially_invalid_plot_data(self):
with tm.RNGContext(42):
df = DataFrame(randn(10, 2), dtype=object)
df[np.random.rand(df.shape[0]) > 0.5] = "a"
for kind in plotting.PlotAccessor._common_kinds:
msg = "no numeric data to plot"
with pytest.raises(TypeError, match=msg):
df.plot(kind=kind)
with tm.RNGContext(42):
# area plot doesn't support positive/negative mixed data
kinds = ["area"]
df = DataFrame(rand(10, 2), dtype=object)
df[np.random.rand(df.shape[0]) > 0.5] = "a"
for kind in kinds:
with pytest.raises(TypeError):
df.plot(kind=kind)
def test_invalid_kind(self):
df = DataFrame(randn(10, 2))
with pytest.raises(ValueError):
df.plot(kind="aasdf")
@pytest.mark.parametrize(
"x,y,lbl",
[
(["B", "C"], "A", "a"),
(["A"], ["B", "C"], ["b", "c"]),
("A", ["B", "C"], "badlabel"),
],
)
def test_invalid_xy_args(self, x, y, lbl):
# GH 18671, 19699 allows y to be list-like but not x
df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
with pytest.raises(ValueError):
df.plot(x=x, y=y, label=lbl)
@pytest.mark.parametrize("x,y", [("A", "B"), (["A"], "B")])
def test_invalid_xy_args_dup_cols(self, x, y):
# GH 18671, 19699 allows y to be list-like but not x
df = DataFrame([[1, 3, 5], [2, 4, 6]], columns=list("AAB"))
with pytest.raises(ValueError):
df.plot(x=x, y=y)
@pytest.mark.parametrize(
"x,y,lbl,colors",
[
("A", ["B"], ["b"], ["red"]),
("A", ["B", "C"], ["b", "c"], ["red", "blue"]),
(0, [1, 2], ["bokeh", "cython"], ["green", "yellow"]),
],
)
def test_y_listlike(self, x, y, lbl, colors):
# GH 19699: tests list-like y and verifies lbls & colors
df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
_check_plot_works(df.plot, x="A", y=y, label=lbl)
ax = df.plot(x=x, y=y, label=lbl, color=colors)
assert len(ax.lines) == len(y)
self._check_colors(ax.get_lines(), linecolors=colors)
@pytest.mark.parametrize("x,y,colnames", [(0, 1, ["A", "B"]), (1, 0, [0, 1])])
def test_xy_args_integer(self, x, y, colnames):
# GH 20056: tests integer args for xy and checks col names
df = DataFrame({"A": [1, 2], "B": [3, 4]})
df.columns = colnames
_check_plot_works(df.plot, x=x, y=y)
@pytest.mark.slow
def test_hexbin_basic(self):
df = self.hexbin_df
ax = df.plot.hexbin(x="A", y="B", gridsize=10)
# TODO: need better way to test. This just does existence.
assert len(ax.collections) == 1
# GH 6951
axes = df.plot.hexbin(x="A", y="B", subplots=True)
# hexbin should have 2 axes in the figure, 1 for plotting and another
# is colorbar
assert len(axes[0].figure.axes) == 2
# return value is single axes
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_hexbin_with_c(self):
df = self.hexbin_df
ax = df.plot.hexbin(x="A", y="B", C="C")
assert len(ax.collections) == 1
ax = df.plot.hexbin(x="A", y="B", C="C", reduce_C_function=np.std)
assert len(ax.collections) == 1
@pytest.mark.slow
def test_hexbin_cmap(self):
df = self.hexbin_df
# Default to BuGn
ax = df.plot.hexbin(x="A", y="B")
assert ax.collections[0].cmap.name == "BuGn"
cm = "cubehelix"
ax = df.plot.hexbin(x="A", y="B", colormap=cm)
assert ax.collections[0].cmap.name == cm
@pytest.mark.slow
def test_no_color_bar(self):
df = self.hexbin_df
ax = df.plot.hexbin(x="A", y="B", colorbar=None)
assert ax.collections[0].colorbar is None
@pytest.mark.slow
def test_allow_cmap(self):
df = self.hexbin_df
ax = df.plot.hexbin(x="A", y="B", cmap="YlGn")
assert ax.collections[0].cmap.name == "YlGn"
with pytest.raises(TypeError):
df.plot.hexbin(x="A", y="B", cmap="YlGn", colormap="BuGn")
@pytest.mark.slow
def test_pie_df(self):
df = DataFrame(
np.random.rand(5, 3),
columns=["X", "Y", "Z"],
index=["a", "b", "c", "d", "e"],
)
with pytest.raises(ValueError):
df.plot.pie()
ax = _check_plot_works(df.plot.pie, y="Y")
self._check_text_labels(ax.texts, df.index)
ax = _check_plot_works(df.plot.pie, y=2)
self._check_text_labels(ax.texts, df.index)
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot.pie, subplots=True)
assert len(axes) == len(df.columns)
for ax in axes:
self._check_text_labels(ax.texts, df.index)
for ax, ylabel in zip(axes, df.columns):
assert ax.get_ylabel() == ylabel
labels = ["A", "B", "C", "D", "E"]
color_args = ["r", "g", "b", "c", "m"]
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(
df.plot.pie, subplots=True, labels=labels, colors=color_args
)
assert len(axes) == len(df.columns)
for ax in axes:
self._check_text_labels(ax.texts, labels)
self._check_colors(ax.patches, facecolors=color_args)
def test_pie_df_nan(self):
df = DataFrame(np.random.rand(4, 4))
for i in range(4):
df.iloc[i, i] = np.nan
fig, axes = self.plt.subplots(ncols=4)
df.plot.pie(subplots=True, ax=axes, legend=True)
base_expected = ["0", "1", "2", "3"]
for i, ax in enumerate(axes):
expected = list(base_expected) # force copy
expected[i] = ""
result = [x.get_text() for x in ax.texts]
assert result == expected
# legend labels
# NaN's not included in legend with subplots
# see https://github.com/pandas-dev/pandas/issues/8390
assert [x.get_text() for x in ax.get_legend().get_texts()] == base_expected[
:i
] + base_expected[i + 1 :]
@pytest.mark.slow
def test_errorbar_plot(self):
with warnings.catch_warnings():
d = {"x": np.arange(12), "y": np.arange(12, 0, -1)}
df = DataFrame(d)
d_err = {"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4}
df_err = DataFrame(d_err)
# check line plots
ax = _check_plot_works(df.plot, yerr=df_err, logy=True)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err, logx=True, logy=True)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err, loglog=True)
self._check_has_errorbars(ax, xerr=0, yerr=2)
kinds = ["line", "bar", "barh"]
for kind in kinds:
ax = _check_plot_works(df.plot, yerr=df_err["x"], kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=d_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err, xerr=df_err, kind=kind)
self._check_has_errorbars(ax, xerr=2, yerr=2)
ax = _check_plot_works(
df.plot, yerr=df_err["x"], xerr=df_err["x"], kind=kind
)
self._check_has_errorbars(ax, xerr=2, yerr=2)
ax = _check_plot_works(df.plot, xerr=0.2, yerr=0.2, kind=kind)
self._check_has_errorbars(ax, xerr=2, yerr=2)
# _check_plot_works adds an ax so catch warning. see GH #13188
axes = _check_plot_works(
df.plot, yerr=df_err, xerr=df_err, subplots=True, kind=kind
)
self._check_has_errorbars(axes, xerr=1, yerr=1)
ax = _check_plot_works(
(df + 1).plot, yerr=df_err, xerr=df_err, kind="bar", log=True
)
self._check_has_errorbars(ax, xerr=2, yerr=2)
# yerr is raw error values
ax = _check_plot_works(df["y"].plot, yerr=np.ones(12) * 0.4)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(df.plot, yerr=np.ones((2, 12)) * 0.4)
self._check_has_errorbars(ax, xerr=0, yerr=2)
# yerr is column name
for yerr in ["yerr", "誤差"]:
s_df = df.copy()
s_df[yerr] = np.ones(12) * 0.2
ax = _check_plot_works(s_df.plot, yerr=yerr)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(s_df.plot, y="y", x="x", yerr=yerr)
self._check_has_errorbars(ax, xerr=0, yerr=1)
with pytest.raises(ValueError):
df.plot(yerr=np.random.randn(11))
df_err = DataFrame({"x": ["zzz"] * 12, "y": ["zzz"] * 12})
with pytest.raises((ValueError, TypeError)):
df.plot(yerr=df_err)
@pytest.mark.xfail(reason="Iterator is consumed", raises=ValueError)
@pytest.mark.slow
def test_errorbar_plot_iterator(self):
with warnings.catch_warnings():
d = {"x": np.arange(12), "y": np.arange(12, 0, -1)}
df = DataFrame(d)
# yerr is iterator
ax = _check_plot_works(df.plot, yerr=itertools.repeat(0.1, len(df)))
self._check_has_errorbars(ax, xerr=0, yerr=2)
@pytest.mark.slow
def test_errorbar_with_integer_column_names(self):
# test with integer column names
df = DataFrame(np.random.randn(10, 2))
df_err = DataFrame(np.random.randn(10, 2))
ax = _check_plot_works(df.plot, yerr=df_err)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, y=0, yerr=1)
self._check_has_errorbars(ax, xerr=0, yerr=1)
@pytest.mark.slow
def test_errorbar_with_partial_columns(self):
df = DataFrame(np.random.randn(10, 3))
df_err = DataFrame(np.random.randn(10, 2), columns=[0, 2])
kinds = ["line", "bar"]
for kind in kinds:
ax = _check_plot_works(df.plot, yerr=df_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ix = date_range("1/1/2000", periods=10, freq="M")
df.set_index(ix, inplace=True)
df_err.set_index(ix, inplace=True)
ax = _check_plot_works(df.plot, yerr=df_err, kind="line")
self._check_has_errorbars(ax, xerr=0, yerr=2)
d = {"x": np.arange(12), "y": np.arange(12, 0, -1)}
df = DataFrame(d)
d_err = {"x": np.ones(12) * 0.2, "z": np.ones(12) * 0.4}
df_err = DataFrame(d_err)
for err in [d_err, df_err]:
ax = _check_plot_works(df.plot, yerr=err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
@pytest.mark.slow
def test_errorbar_timeseries(self):
with warnings.catch_warnings():
d = {"x": np.arange(12), "y": np.arange(12, 0, -1)}
d_err = {"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4}
# check time-series plots
ix = date_range("1/1/2000", "1/1/2001", freq="M")
tdf = DataFrame(d, index=ix)
tdf_err = DataFrame(d_err, index=ix)
kinds = ["line", "bar", "barh"]
for kind in kinds:
ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(tdf.plot, yerr=d_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(tdf.plot, y="y", yerr=tdf_err["x"], kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(tdf.plot, y="y", yerr="x", kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
# _check_plot_works adds an ax so catch warning. see GH #13188
axes = _check_plot_works(
tdf.plot, kind=kind, yerr=tdf_err, subplots=True
)
self._check_has_errorbars(axes, xerr=0, yerr=1)
def test_errorbar_asymmetrical(self):
np.random.seed(0)
err = np.random.rand(3, 2, 5)
# each column is [0, 1, 2, 3, 4], [3, 4, 5, 6, 7]...
df = DataFrame(np.arange(15).reshape(3, 5)).T
ax = df.plot(yerr=err, xerr=err / 2)
yerr_0_0 = ax.collections[1].get_paths()[0].vertices[:, 1]
expected_0_0 = err[0, :, 0] * np.array([-1, 1])
tm.assert_almost_equal(yerr_0_0, expected_0_0)
with pytest.raises(ValueError):
df.plot(yerr=err.T)
tm.close()
def test_table(self):
df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))
_check_plot_works(df.plot, table=True)
_check_plot_works(df.plot, table=df)
# GH 35945 UserWarning
with tm.assert_produces_warning(None):
ax = df.plot()
assert len(ax.tables) == 0
plotting.table(ax, df.T)
assert len(ax.tables) == 1
def test_errorbar_scatter(self):
df = DataFrame(np.random.randn(5, 2), index=range(5), columns=["x", "y"])
df_err = DataFrame(
np.random.randn(5, 2) / 5, index=range(5), columns=["x", "y"]
)
ax = _check_plot_works(df.plot.scatter, x="x", y="y")
self._check_has_errorbars(ax, xerr=0, yerr=0)
ax = _check_plot_works(df.plot.scatter, x="x", y="y", xerr=df_err)
self._check_has_errorbars(ax, xerr=1, yerr=0)
ax = _check_plot_works(df.plot.scatter, x="x", y="y", yerr=df_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(df.plot.scatter, x="x", y="y", xerr=df_err, yerr=df_err)
self._check_has_errorbars(ax, xerr=1, yerr=1)
def _check_errorbar_color(containers, expected, has_err="has_xerr"):
lines = []
errs = [c.lines for c in ax.containers if getattr(c, has_err, False)][0]
for el in errs:
if is_list_like(el):
lines.extend(el)
else:
lines.append(el)
err_lines = [x for x in lines if x in ax.collections]
self._check_colors(
err_lines, linecolors=np.array([expected] * len(err_lines))
)
# GH 8081
df = DataFrame(np.random.randn(10, 5), columns=["a", "b", "c", "d", "e"])
ax = df.plot.scatter(x="a", y="b", xerr="d", yerr="e", c="red")
self._check_has_errorbars(ax, xerr=1, yerr=1)
_check_errorbar_color(ax.containers, "red", has_err="has_xerr")
_check_errorbar_color(ax.containers, "red", has_err="has_yerr")
ax = df.plot.scatter(x="a", y="b", yerr="e", color="green")
self._check_has_errorbars(ax, xerr=0, yerr=1)
_check_errorbar_color(ax.containers, "green", has_err="has_yerr")
@pytest.mark.slow
def test_sharex_and_ax(self):
# https://github.com/pandas-dev/pandas/issues/9737 using gridspec,
# the axis in fig.get_axis() are sorted differently than pandas
# expected them, so make sure that only the right ones are removed
import matplotlib.pyplot as plt
plt.close("all")
gs, axes = _generate_4_axes_via_gridspec()
df = DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [1, 2, 3, 4, 5, 6],
"c": [1, 2, 3, 4, 5, 6],
"d": [1, 2, 3, 4, 5, 6],
}
)
def _check(axes):
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
for ax in [axes[0], axes[2]]:
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
for ax in [axes[1], axes[3]]:
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax, sharex=True)
gs.tight_layout(plt.gcf())
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharex=True)
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
# without sharex, no labels should be touched!
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax)
gs.tight_layout(plt.gcf())
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
@pytest.mark.slow
def test_sharey_and_ax(self):
# https://github.com/pandas-dev/pandas/issues/9737 using gridspec,
# the axis in fig.get_axis() are sorted differently than pandas
# expected them, so make sure that only the right ones are removed
import matplotlib.pyplot as plt
gs, axes = _generate_4_axes_via_gridspec()
df = DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [1, 2, 3, 4, 5, 6],
"c": [1, 2, 3, 4, 5, 6],
"d": [1, 2, 3, 4, 5, 6],
}
)
def _check(axes):
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
for ax in [axes[0], axes[1]]:
self._check_visible(ax.get_yticklabels(), visible=True)
for ax in [axes[2], axes[3]]:
self._check_visible(ax.get_yticklabels(), visible=False)
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax, sharey=True)
gs.tight_layout(plt.gcf())
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharey=True)
gs.tight_layout(plt.gcf())
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
# without sharex, no labels should be touched!
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax)
gs.tight_layout(plt.gcf())
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
@td.skip_if_no_scipy
def test_memory_leak(self):
""" Check that every plot type gets properly collected. """
import gc
import weakref
results = {}
for kind in plotting.PlotAccessor._all_kinds:
args = {}
if kind in ["hexbin", "scatter", "pie"]:
df = self.hexbin_df
args = {"x": "A", "y": "B"}
elif kind == "area":
df = self.tdf.abs()
else:
df = self.tdf
# Use a weakref so we can see if the object gets collected without
# also preventing it from being collected
results[kind] = weakref.proxy(df.plot(kind=kind, **args))
# have matplotlib delete all the figures
tm.close()
# force a garbage collection
gc.collect()
for key in results:
# check that every plot was collected
with pytest.raises(ReferenceError):
# need to actually access something to get an error
results[key].lines
@pytest.mark.slow
def test_df_subplots_patterns_minorticks(self):
# GH 10657
import matplotlib.pyplot as plt
df = DataFrame(
np.random.randn(10, 2),
index=date_range("1/1/2000", periods=10),
columns=list("AB"),
)
# shared subplots
fig, axes = plt.subplots(2, 1, sharex=True)
axes = df.plot(subplots=True, ax=axes)
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
# xaxis of 1st ax must be hidden
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
tm.close()
fig, axes = plt.subplots(2, 1)
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharex=True)
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
# xaxis of 1st ax must be hidden
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
tm.close()
# not shared
fig, axes = plt.subplots(2, 1)
axes = df.plot(subplots=True, ax=axes)
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
@pytest.mark.slow
def test_df_gridspec_patterns(self):
# GH 10819
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
ts = Series(np.random.randn(10), index=date_range("1/1/2000", periods=10))
df = DataFrame(np.random.randn(10, 2), index=ts.index, columns=list("AB"))
def _get_vertical_grid():
gs = gridspec.GridSpec(3, 1)
fig = plt.figure()
ax1 = fig.add_subplot(gs[:2, :])
ax2 = fig.add_subplot(gs[2, :])
return ax1, ax2
def _get_horizontal_grid():
gs = gridspec.GridSpec(1, 3)
fig = plt.figure()
ax1 = fig.add_subplot(gs[:, :2])
ax2 = fig.add_subplot(gs[:, 2])
return ax1, ax2
for ax1, ax2 in [_get_vertical_grid(), _get_horizontal_grid()]:
ax1 = ts.plot(ax=ax1)
assert len(ax1.lines) == 1
ax2 = df.plot(ax=ax2)
assert len(ax2.lines) == 2
for ax in [ax1, ax2]:
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
# subplots=True
for ax1, ax2 in [_get_vertical_grid(), _get_horizontal_grid()]:
axes = df.plot(subplots=True, ax=[ax1, ax2])
assert len(ax1.lines) == 1
assert len(ax2.lines) == 1
for ax in axes:
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
# vertical / subplots / sharex=True / sharey=True
ax1, ax2 = _get_vertical_grid()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=[ax1, ax2], sharex=True, sharey=True)
assert len(axes[0].lines) == 1
assert len(axes[1].lines) == 1
for ax in [ax1, ax2]:
# yaxis are visible because there is only one column
self._check_visible(ax.get_yticklabels(), visible=True)
# xaxis of axes0 (top) are hidden
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
tm.close()
# horizontal / subplots / sharex=True / sharey=True
ax1, ax2 = _get_horizontal_grid()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=[ax1, ax2], sharex=True, sharey=True)
assert len(axes[0].lines) == 1
assert len(axes[1].lines) == 1
self._check_visible(axes[0].get_yticklabels(), visible=True)
# yaxis of axes1 (right) are hidden
self._check_visible(axes[1].get_yticklabels(), visible=False)
for ax in [ax1, ax2]:
# xaxis are visible because there is only one column
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
# boxed
def _get_boxed_grid():
gs = gridspec.GridSpec(3, 3)
fig = plt.figure()
ax1 = fig.add_subplot(gs[:2, :2])
ax2 = fig.add_subplot(gs[:2, 2])
ax3 = fig.add_subplot(gs[2, :2])
ax4 = fig.add_subplot(gs[2, 2])
return ax1, ax2, ax3, ax4
axes = _get_boxed_grid()
df = DataFrame(np.random.randn(10, 4), index=ts.index, columns=list("ABCD"))
axes = df.plot(subplots=True, ax=axes)
for ax in axes:
assert len(ax.lines) == 1
# axis are visible because these are not shared
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
# subplots / sharex=True / sharey=True
axes = _get_boxed_grid()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharex=True, sharey=True)
for ax in axes:
assert len(ax.lines) == 1
for ax in [axes[0], axes[2]]: # left column
self._check_visible(ax.get_yticklabels(), visible=True)
for ax in [axes[1], axes[3]]: # right column
self._check_visible(ax.get_yticklabels(), visible=False)
for ax in [axes[0], axes[1]]: # top row
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
for ax in [axes[2], axes[3]]: # bottom row
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
@pytest.mark.slow
def test_df_grid_settings(self):
# Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
self._check_grid_settings(
DataFrame({"a": [1, 2, 3], "b": [2, 3, 4]}),
plotting.PlotAccessor._dataframe_kinds,
kws={"x": "a", "y": "b"},
)
def test_invalid_colormap(self):
df = DataFrame(randn(3, 2), columns=["A", "B"])
with pytest.raises(ValueError):
df.plot(colormap="invalid_colormap")
def test_plain_axes(self):
# supplied ax itself is a SubplotAxes, but figure contains also
# a plain Axes object (GH11556)
fig, ax = self.plt.subplots()
fig.add_axes([0.2, 0.2, 0.2, 0.2])
Series(rand(10)).plot(ax=ax)
# supplied ax itself is a plain Axes, but because the cmap keyword
# a new ax is created for the colorbar -> also multiples axes (GH11520)
df = DataFrame({"a": randn(8), "b": randn(8)})
fig = self.plt.figure()
ax = fig.add_axes((0, 0, 1, 1))
df.plot(kind="scatter", ax=ax, x="a", y="b", c="a", cmap="hsv")
# other examples
fig, ax = self.plt.subplots()
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
Series(rand(10)).plot(ax=ax)
Series(rand(10)).plot(ax=cax)
fig, ax = self.plt.subplots()
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
iax = inset_axes(ax, width="30%", height=1.0, loc=3)
Series(rand(10)).plot(ax=ax)
Series(rand(10)).plot(ax=iax)
def test_passed_bar_colors(self):
import matplotlib as mpl
color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)]
colormap = mpl.colors.ListedColormap(color_tuples)
barplot = pd.DataFrame([[1, 2, 3]]).plot(kind="bar", cmap=colormap)
assert color_tuples == [c.get_facecolor() for c in barplot.patches]
def test_rcParams_bar_colors(self):
import matplotlib as mpl
color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)]
with mpl.rc_context(rc={"axes.prop_cycle": mpl.cycler("color", color_tuples)}):
barplot = pd.DataFrame([[1, 2, 3]]).plot(kind="bar")
assert color_tuples == [c.get_facecolor() for c in barplot.patches]
@pytest.mark.parametrize("method", ["line", "barh", "bar"])
def test_secondary_axis_font_size(self, method):
# GH: 12565
df = (
pd.DataFrame(np.random.randn(15, 2), columns=list("AB"))
.assign(C=lambda df: df.B.cumsum())
.assign(D=lambda df: df.C * 1.1)
)
fontsize = 20
sy = ["C", "D"]
kwargs = dict(secondary_y=sy, fontsize=fontsize, mark_right=True)
ax = getattr(df.plot, method)(**kwargs)
self._check_ticks_props(axes=ax.right_ax, ylabelsize=fontsize)
@pytest.mark.slow
def test_x_string_values_ticks(self):
# Test if string plot index have a fixed xtick position
# GH: 7612, GH: 22334
df = pd.DataFrame(
{
"sales": [3, 2, 3],
"visits": [20, 42, 28],
"day": ["Monday", "Tuesday", "Wednesday"],
}
)
ax = df.plot.area(x="day")
ax.set_xlim(-1, 3)
xticklabels = [t.get_text() for t in ax.get_xticklabels()]
labels_position = dict(zip(xticklabels, ax.get_xticks()))
# Testing if the label stayed at the right position
assert labels_position["Monday"] == 0.0
assert labels_position["Tuesday"] == 1.0
assert labels_position["Wednesday"] == 2.0
@pytest.mark.slow
def test_x_multiindex_values_ticks(self):
# Test if multiindex plot index have a fixed xtick position
# GH: 15912
index = pd.MultiIndex.from_product([[2012, 2013], [1, 2]])
df = pd.DataFrame(np.random.randn(4, 2), columns=["A", "B"], index=index)
ax = df.plot()
ax.set_xlim(-1, 4)
xticklabels = [t.get_text() for t in ax.get_xticklabels()]
labels_position = dict(zip(xticklabels, ax.get_xticks()))
# Testing if the label stayed at the right position
assert labels_position["(2012, 1)"] == 0.0
assert labels_position["(2012, 2)"] == 1.0
assert labels_position["(2013, 1)"] == 2.0
assert labels_position["(2013, 2)"] == 3.0
@pytest.mark.parametrize("kind", ["line", "area"])
def test_xlim_plot_line(self, kind):
# test if xlim is set correctly in plot.line and plot.area
# GH 27686
df = pd.DataFrame([2, 4], index=[1, 2])
ax = df.plot(kind=kind)
xlims = ax.get_xlim()
assert xlims[0] < 1
assert xlims[1] > 2
def test_xlim_plot_line_correctly_in_mixed_plot_type(self):
# test if xlim is set correctly when ax contains multiple different kinds
# of plots, GH 27686
fig, ax = self.plt.subplots()
indexes = ["k1", "k2", "k3", "k4"]
df = pd.DataFrame(
{
"s1": [1000, 2000, 1500, 2000],
"s2": [900, 1400, 2000, 3000],
"s3": [1500, 1500, 1600, 1200],
"secondary_y": [1, 3, 4, 3],
},
index=indexes,
)
df[["s1", "s2", "s3"]].plot.bar(ax=ax, stacked=False)
df[["secondary_y"]].plot(ax=ax, secondary_y=True)
xlims = ax.get_xlim()
assert xlims[0] < 0
assert xlims[1] > 3
# make sure axis labels are plotted correctly as well
xticklabels = [t.get_text() for t in ax.get_xticklabels()]
assert xticklabels == indexes
def test_subplots_sharex_false(self):
# test when sharex is set to False, two plots should have different
# labels, GH 25160
df = pd.DataFrame(np.random.rand(10, 2))
df.iloc[5:, 1] = np.nan
df.iloc[:5, 0] = np.nan
figs, axs = self.plt.subplots(2, 1)
df.plot.line(ax=axs, subplots=True, sharex=False)
expected_ax1 = np.arange(4.5, 10, 0.5)
expected_ax2 = np.arange(-0.5, 5, 0.5)
tm.assert_numpy_array_equal(axs[0].get_xticks(), expected_ax1)
tm.assert_numpy_array_equal(axs[1].get_xticks(), expected_ax2)
def test_plot_no_rows(self):
# GH 27758
df = pd.DataFrame(columns=["foo"], dtype=int)
assert df.empty
ax = df.plot()
assert len(ax.get_lines()) == 1
line = ax.get_lines()[0]
assert len(line.get_xdata()) == 0
assert len(line.get_ydata()) == 0
def test_plot_no_numeric_data(self):
df = pd.DataFrame(["a", "b", "c"])
with pytest.raises(TypeError):
df.plot()
def test_missing_markers_legend(self):
# 14958
df = pd.DataFrame(np.random.randn(8, 3), columns=["A", "B", "C"])
ax = df.plot(y=["A"], marker="x", linestyle="solid")
df.plot(y=["B"], marker="o", linestyle="dotted", ax=ax)
df.plot(y=["C"], marker="<", linestyle="dotted", ax=ax)
self._check_legend_labels(ax, labels=["A", "B", "C"])
self._check_legend_marker(ax, expected_markers=["x", "o", "<"])
def test_missing_markers_legend_using_style(self):
# 14563
df = pd.DataFrame(
{
"A": [1, 2, 3, 4, 5, 6],
"B": [2, 4, 1, 3, 2, 4],
"C": [3, 3, 2, 6, 4, 2],
"X": [1, 2, 3, 4, 5, 6],
}
)
fig, ax = self.plt.subplots()
for kind in "ABC":
df.plot("X", kind, label=kind, ax=ax, style=".")
self._check_legend_labels(ax, labels=["A", "B", "C"])
self._check_legend_marker(ax, expected_markers=[".", ".", "."])
def test_colors_of_columns_with_same_name(self):
# ISSUE 11136 -> https://github.com/pandas-dev/pandas/issues/11136
# Creating a DataFrame with duplicate column labels and testing colors of them.
df = pd.DataFrame({"b": [0, 1, 0], "a": [1, 2, 3]})
df1 = pd.DataFrame({"a": [2, 4, 6]})
df_concat = pd.concat([df, df1], axis=1)
result = df_concat.plot()
for legend, line in zip(result.get_legend().legendHandles, result.lines):
assert legend.get_color() == line.get_color()
@pytest.mark.parametrize(
"index_name, old_label, new_label",
[
(None, "", "new"),
("old", "old", "new"),
(None, "", ""),
(None, "", 1),
(None, "", [1, 2]),
],
)
@pytest.mark.parametrize("kind", ["line", "area", "bar"])
def test_xlabel_ylabel_dataframe_single_plot(
self, kind, index_name, old_label, new_label
):
# GH 9093
df = pd.DataFrame([[1, 2], [2, 5]], columns=["Type A", "Type B"])
df.index.name = index_name
# default is the ylabel is not shown and xlabel is index name
ax = df.plot(kind=kind)
assert ax.get_xlabel() == old_label
assert ax.get_ylabel() == ""
# old xlabel will be overriden and assigned ylabel will be used as ylabel
ax = df.plot(kind=kind, ylabel=new_label, xlabel=new_label)
assert ax.get_ylabel() == str(new_label)
assert ax.get_xlabel() == str(new_label)
@pytest.mark.parametrize(
"index_name, old_label, new_label",
[
(None, "", "new"),
("old", "old", "new"),
(None, "", ""),
(None, "", 1),
(None, "", [1, 2]),
],
)
@pytest.mark.parametrize("kind", ["line", "area", "bar"])
def test_xlabel_ylabel_dataframe_subplots(
self, kind, index_name, old_label, new_label
):
# GH 9093
df = pd.DataFrame([[1, 2], [2, 5]], columns=["Type A", "Type B"])
df.index.name = index_name
# default is the ylabel is not shown and xlabel is index name
axes = df.plot(kind=kind, subplots=True)
assert all(ax.get_ylabel() == "" for ax in axes)
assert all(ax.get_xlabel() == old_label for ax in axes)
# old xlabel will be overriden and assigned ylabel will be used as ylabel
axes = df.plot(kind=kind, ylabel=new_label, xlabel=new_label, subplots=True)
assert all(ax.get_ylabel() == str(new_label) for ax in axes)
assert all(ax.get_xlabel() == str(new_label) for ax in axes)
def _generate_4_axes_via_gridspec():
import matplotlib as mpl
import matplotlib.gridspec # noqa
import matplotlib.pyplot as plt
gs = mpl.gridspec.GridSpec(2, 2)
ax_tl = plt.subplot(gs[0, 0])
ax_ll = plt.subplot(gs[1, 0])
ax_tr = plt.subplot(gs[0, 1])
ax_lr = plt.subplot(gs[1, 1])
return gs, [ax_tl, ax_ll, ax_tr, ax_lr]
| 38.83015 | 88 | 0.566749 |
46428abf5eaf0aee9b7ec2c3cb2fef41a8d23b43 | 7,697 | py | Python | beartype/_util/hint/pep/proposal/pep484585/utilpepfunc.py | vcokltfre/beartype | 9a1f81bea37a0eb4f51443dded8cec2e751e42a8 | [
"MIT"
] | null | null | null | beartype/_util/hint/pep/proposal/pep484585/utilpepfunc.py | vcokltfre/beartype | 9a1f81bea37a0eb4f51443dded8cec2e751e42a8 | [
"MIT"
] | null | null | null | beartype/_util/hint/pep/proposal/pep484585/utilpepfunc.py | vcokltfre/beartype | 9a1f81bea37a0eb4f51443dded8cec2e751e42a8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2021 Beartype authors.
# See "LICENSE" for further details.
'''
Project-wide :pep:`484`- and :pep:`585`-compliant **decorated callable type
hint utilities** (i.e., callables generically applicable to both :pep:`484`-
and :pep:`585`-compliant type hints directly annotating the user-defined
callable currently being decorated by :func:`beartype.beartype`).
This private submodule is *not* intended for importation by downstream callers.
'''
# ....................{ IMPORTS }....................
from beartype.roar import BeartypeDecorHintPep484585Exception
from beartype._data.hint.pep.sign.datapepsigns import (
HintSignAsyncGenerator,
HintSignCoroutine,
HintSignGenerator,
)
from beartype._util.func.utilfunctest import (
is_func_async_coroutine,
is_func_async_generator,
is_func_sync_generator,
)
from beartype._util.hint.pep.proposal.pep484585.utilpeparg import (
get_hint_pep484585_args_3)
from beartype._util.hint.pep.utilpepget import get_hint_pep_sign_or_none
from beartype._util.text.utiltextlabel import (
prefix_callable_decorated_return)
from beartype._util.utiltyping import TypeException
from collections.abc import Callable
# See the "beartype.cave" submodule for further commentary.
__all__ = ['STAR_IMPORTS_CONSIDERED_HARMFUL']
# ....................{ GETTERS }....................
#FIXME: Unit test us up, please.
def reduce_hint_pep484585_func_return(func: Callable) -> object:
'''
Reduce the possibly PEP-noncompliant type hint annotating the return of the
passed callable if any to a simpler form to generate
optimally efficient type-checking by the :func:`beartype.beartype`
decorator.
Parameters
----------
func : Callable
Currently decorated callable to be inspected.
exception_prefix : str, optional
Human-readable label prefixing the representation of this object in the
exception message. Defaults to the empty string.
Returns
----------
object
Single argument subscripting this hint.
Raises
----------
:exc:`BeartypeDecorHintPep484585Exception`
If this callable is either:
* A generator *not* annotated by a :attr:`typing.Generator` type hint.
* An asynchronous generator *not* annotated by a
:attr:`typing.AsyncGenerator` type hint.
'''
# Type hint annotating this callable's return, which the caller has already
# explicitly guaranteed to exist.
hint = func.__annotations__['return']
# Sign uniquely identifying this hint if any *OR* "None" otherwise (e.g.,
# if this hint is an isinstanceable class).
hint_sign = get_hint_pep_sign_or_none(hint)
# If the decorated callable is a coroutine...
if is_func_async_coroutine(func):
# If this hint is "Coroutine[...]"...
if hint_sign is HintSignCoroutine:
# 3-tuple of all child type hints subscripting this hint if
# subscripted by three such hints *OR* raise an exception.
hint_args = get_hint_pep484585_args_3(hint)
# Reduce this hint to the last child type hint subscripting this
# hint, whose value is the return type hint for this coroutine.
#
# All other child type hints are currently ignorable, as the *ONLY*
# means of validating objects sent to and yielded from a coroutine
# is to wrap that coroutine with a @beartype-specific wrapper
# object, which we are currently unwilling to do. Why? Because
# safety and efficiency. Coroutines receiving and yielding multiple
# objects are effectively iterators; type-checking all iterator
# values is an O(n) rather than O(1) operation, violating the core
# @beartype guarantee.
#
# Likewise, the parent "Coroutine" type is *ALWAYS* ignorable.
# Since Python itself implicitly guarantees *ALL* coroutines to
# return coroutine objects, validating that constraint is silly.
hint = hint_args[-1]
# Else, this hint is *NOT* "Coroutine[...]". In this case, silently
# accept this hint as if this hint had instead been expanded to the
# semantically equivalent PEP 484- or 585-compliant coroutine hint
# "Coroutine[None, None, {hint}]".
# Else, the decorated callable is *NOT* a coroutine.
#
# If the decorated callable is an asynchronous generator...
elif is_func_async_generator(func):
# If this hint is *NOT* "AsyncGenerator[...]", this type hint is
# invalid. In this case, raise an exception.
if hint_sign is not HintSignAsyncGenerator:
_die_of_hint_return_invalid(
func=func,
exception_suffix=(
' (i.e., expected either '
'collections.abc.AsyncGenerator[...] or '
'typing.AsyncGenerator[...] type hint).'
),
)
# Else, this hint is "AsyncGenerator[...]".
# Else, the decorated callable is *NOT* an asynchronous generator.
#
# If the decorated callable is a synchronous generator...
elif is_func_sync_generator(func):
# If this hint is *NOT* "Generator[...]", this type hint is invalid.
# In this case, raise an exception.
if hint_sign is not HintSignGenerator:
_die_of_hint_return_invalid(
func=func,
exception_suffix=(
' (i.e., expected either '
'collections.abc.Generator[...] or '
'typing.Generator[...] type hint).'
),
)
# Else, this hint is "Generator[...]".
# Else, the decorated callable is none of the kinds detected above.
# Return this possibly reduced hint.
return hint
# ....................{ PRIVATE ~ validators }....................
def _die_of_hint_return_invalid(
# Mandatory parameters.
func: Callable,
exception_suffix: str,
# Optional parameters.
exception_cls: TypeException = BeartypeDecorHintPep484585Exception,
) -> str:
'''
Raise an exception of the passed type with a message suffixed by the passed
substring explaining why the possibly PEP-noncompliant type hint annotating
the return of the passed decorated callable is contextually invalid.
Parameters
----------
func : Callable
Decorated callable whose return is annotated by an invalid type hint.
exception_cls : TypeException
Type of exception to be raised. Defaults to
:exc:`BeartypeDecorHintPep484585Exception`.
exception_suffix : str
Substring suffixing the exception message to be raised.
Raises
----------
:exc:`exception_cls`
Exception explaining the invalidity of this return type hint.
'''
assert callable(func), f'{repr(func)} not callable.'
assert isinstance(exception_cls, type), f'{repr(exception_cls)} not class.'
assert isinstance(exception_suffix, str), (
f'{repr(exception_suffix)} not string.')
# Type hint annotating this callable's return, which the caller has already
# explicitly guaranteed to exist.
hint = func.__annotations__['return']
# Raise an exception of this type with a message suffixed by this suffix.
raise exception_cls(
f'{prefix_callable_decorated_return(func)}type hint '
f'{repr(hint)} contextually invalid{exception_suffix}.'
)
| 41.38172 | 79 | 0.647655 |
5d7a58a62ebb1847820a05f978db56aae334076d | 432 | py | Python | zconnect/util/exceptions/api.py | zconnect-iot/zconnect-django | 5c569f54f100e23d72e2ac4de795739ea461a431 | [
"MIT"
] | 2 | 2018-08-19T16:16:39.000Z | 2019-06-11T02:23:50.000Z | zconnect/util/exceptions/api.py | zconnect-iot/zconnect-django | 5c569f54f100e23d72e2ac4de795739ea461a431 | [
"MIT"
] | 2 | 2018-07-05T09:51:54.000Z | 2018-07-06T13:12:04.000Z | zconnect/util/exceptions/api.py | zconnect-iot/zconnect-django | 5c569f54f100e23d72e2ac4de795739ea461a431 | [
"MIT"
] | null | null | null | from django.utils.translation import ugettext_lazy as _
from rest_framework import exceptions, status
class BadRequestError(exceptions.APIException):
"""This is the same as the rest_framework.ValidationError, but for some
reason that returns a list rather than a dict, which we want to use
normally"""
status_code = status.HTTP_400_BAD_REQUEST
default_detail = _('Invalid input.')
default_code = 'invalid'
| 36 | 75 | 0.766204 |
e713383c90e7630975cb05368f5e60a2e460e0e0 | 11,342 | py | Python | api/serializers.py | hotosm/hot-exports-two | d60530445e89b2a46bd55ea3b7c2e72409b0f493 | [
"BSD-3-Clause"
] | 95 | 2017-09-29T13:20:38.000Z | 2022-03-14T06:43:47.000Z | api/serializers.py | hotosm/hot-exports-two | d60530445e89b2a46bd55ea3b7c2e72409b0f493 | [
"BSD-3-Clause"
] | 229 | 2015-07-29T08:50:27.000Z | 2017-09-21T18:05:56.000Z | api/serializers.py | hotosm/hot-exports-two | d60530445e89b2a46bd55ea3b7c2e72409b0f493 | [
"BSD-3-Clause"
] | 30 | 2017-10-06T23:53:48.000Z | 2022-03-10T06:17:07.000Z | """
Provides serialization for API responses.
See `DRF serializer documentation <http://www.django-rest-framework.org/api-guide/serializers/>`_
Used by the View classes api/views.py to serialize API responses as JSON or HTML.
See DEFAULT_RENDERER_CLASSES setting in core.settings.contrib for the enabled renderers.
"""
# -*- coding: utf-8 -*-
import logging
import django.core.exceptions
from django.contrib.auth.models import User
from django.db import transaction
from jobs.models import HDXExportRegion, Job, SavedFeatureSelection, validate_aoi, validate_mbtiles, PartnerExportRegion
from rest_framework import serializers
from rest_framework_gis import serializers as geo_serializers
from tasks.models import ExportRun, ExportTask
# Get an instance of a logger
LOG = logging.getLogger(__name__)
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('username', )
class ExportTaskSerializer(serializers.ModelSerializer):
class Meta:
model = ExportTask
fields = ('uid', 'name', 'status', 'started_at', 'finished_at',
'duration', 'filesize_bytes', 'download_urls')
class ExportRunSerializer(serializers.ModelSerializer):
tasks = ExportTaskSerializer(many=True, read_only=True)
user = UserSerializer(
read_only=True, default=serializers.CurrentUserDefault())
class Meta:
model = ExportRun
lookup_field = 'uid'
fields = ('uid', 'started_at', 'finished_at', 'duration',
'elapsed_time', 'user', 'size', 'status', 'tasks')
class ConfigurationSerializer(serializers.ModelSerializer):
user = UserSerializer(
read_only=True, default=serializers.CurrentUserDefault())
class Meta:
model = SavedFeatureSelection
fields = ('uid', 'name', 'description', 'yaml', 'public', 'user','pinned')
class JobGeomSerializer(serializers.ModelSerializer):
""" Since Job Geoms can be large, these are serialized separately,
instead of nested within Jobs."""
class Meta:
model = Job
fields = ('the_geom', )
class JobSerializer(serializers.ModelSerializer):
user = UserSerializer(
read_only=True, default=serializers.CurrentUserDefault())
class Meta:
model = Job
fields = ('id', 'uid', 'user', 'name', 'description', 'event',
'export_formats', 'published', 'feature_selection',
'buffer_aoi', 'osma_link', 'created_at', 'area', 'the_geom',
'simplified_geom', 'mbtiles_source', 'mbtiles_minzoom', 'mbtiles_maxzoom','pinned','unfiltered')
extra_kwargs = {
'the_geom': {
'write_only': True
},
'simplified_geom': {
'read_only': True
}
}
def validate(self,data):
try:
validate_aoi(data['the_geom'])
except django.core.exceptions.ValidationError as e:
raise serializers.ValidationError({'the_geom':e.messages[0]})
try:
validate_mbtiles(data)
except django.core.exceptions.ValidationError as e:
raise serializers.ValidationError({'mbtiles_source': e.messages[0]})
return data
def validate_model(model):
try:
model.full_clean()
except django.core.exceptions.ValidationError as e:
raise serializers.ValidationError(e.message_dict)
class PartnerExportRegionListSerializer(serializers.ModelSerializer):
export_formats = serializers.ListField()
feature_selection = serializers.CharField()
simplified_geom = geo_serializers.GeometryField(required=False)
name = serializers.CharField()
class Meta: # noqa
model = PartnerExportRegion
fields = ('id', 'feature_selection',
'schedule_period', 'schedule_hour', 'export_formats',
'name', 'last_run', 'next_run',
'simplified_geom', 'job_uid', 'last_size','group_name')
class PartnerExportRegionSerializer(serializers.ModelSerializer): # noqa
export_formats = serializers.ListField()
feature_selection = serializers.CharField()
simplified_geom = geo_serializers.GeometryField(required=False)
the_geom = geo_serializers.GeometryField()
name = serializers.CharField()
event = serializers.CharField()
description = serializers.CharField()
class Meta: # noqa
model = PartnerExportRegion
fields = ('id', 'feature_selection',
'schedule_period', 'schedule_hour', 'export_formats',
'name', 'event', 'description', 'last_run', 'next_run',
'simplified_geom', 'job_uid',
'the_geom','group','planet_file', 'polygon_centroid')
extra_kwargs = {
'simplified_geom': {
'read_only': True
},
'the_geom': {
'write_only': True
}
}
def create(self, validated_data): # noqa
def slice_dict(in_dict, wanted_keys):
return dict((k, in_dict[k]) for k in wanted_keys if k in in_dict)
job_dict = slice_dict(validated_data, [
'the_geom', 'export_formats', 'feature_selection',
])
job_dict['user'] = self.context['request'].user
job_dict['name'] = validated_data.get('name')
job_dict['event'] = validated_data.get('event') or ""
job_dict['description'] = validated_data.get('description') or ""
region_dict = slice_dict(validated_data, [
'schedule_period', 'schedule_hour','group','planet_file', 'polygon_centroid'
])
job = Job(**job_dict)
job.hidden = True
job.unlimited_extent = True
validate_model(job)
# check on creation that i'm a member of the group
if not self.context['request'].user.groups.filter(name=region_dict['group'].name).exists():
raise serializers.ValidationError({'group':'You are not a member of this group.'})
with transaction.atomic():
job.save()
region_dict['job'] = job
region = PartnerExportRegion(**region_dict)
validate_model(region)
region.save()
return region
def update(self, instance, validated_data): # noqa
def update_attrs(model, v_data, keys):
for key in keys:
if key in v_data:
setattr(model, key, v_data[key])
# if re-assigning, check group membership
if not self.context['request'].user.groups.filter(name= validated_data['group'].name).exists():
raise serializers.ValidationError({'group':'You are not a member of this group.'})
job = instance.job
update_attrs(job, validated_data, [
'the_geom', 'export_formats', 'feature_selection'
])
job.name = validated_data.get('name')
job.event = validated_data.get('event') or ""
job.description = validated_data.get('description') or ""
validate_model(job)
update_attrs(instance, validated_data, [
'schedule_period', 'schedule_hour', 'group','planet_file', 'polygon_centroid'
])
validate_model(instance)
with transaction.atomic():
instance.save()
job.save()
return instance
class HDXExportRegionListSerializer(serializers.ModelSerializer): # noqa
""" The list serializer does not expose the Geom, as it can be large."""
export_formats = serializers.ListField()
dataset_prefix = serializers.CharField()
feature_selection = serializers.CharField()
simplified_geom = geo_serializers.GeometryField(required=False)
name = serializers.CharField()
buffer_aoi = serializers.BooleanField()
class Meta: # noqa
model = HDXExportRegion
fields = ('id', 'dataset_prefix', 'datasets', 'feature_selection',
'schedule_period', 'schedule_hour', 'export_formats',
'locations', 'name', 'last_run', 'next_run',
'simplified_geom', 'dataset_prefix', 'job_uid', 'license',
'subnational', 'extra_notes', 'is_private', 'buffer_aoi', 'last_size')
class HDXExportRegionSerializer(serializers.ModelSerializer): # noqa
""" Internally, an export region is a job model + an export region model
but to API users, it appears as a single entity. """
export_formats = serializers.ListField()
dataset_prefix = serializers.CharField()
feature_selection = serializers.CharField()
simplified_geom = geo_serializers.GeometryField(required=False)
the_geom = geo_serializers.GeometryField()
name = serializers.CharField()
buffer_aoi = serializers.BooleanField()
class Meta: # noqa
model = HDXExportRegion
fields = ('id', 'dataset_prefix', 'datasets', 'feature_selection',
'schedule_period', 'schedule_hour', 'export_formats',
'locations', 'name', 'last_run', 'next_run',
'simplified_geom', 'dataset_prefix', 'job_uid', 'license',
'subnational', 'extra_notes', 'is_private', 'buffer_aoi',
'the_geom','planet_file')
extra_kwargs = {
'simplified_geom': {
'read_only': True
},
'the_geom': {
'write_only': True
}
}
def create(self, validated_data): # noqa
def slice_dict(in_dict, wanted_keys):
return dict((k, in_dict[k]) for k in wanted_keys if k in in_dict)
job_dict = slice_dict(validated_data, [
'the_geom', 'export_formats', 'feature_selection', 'buffer_aoi'
])
job_dict['user'] = self.context['request'].user
job_dict['name'] = validated_data.get('dataset_prefix')
job_dict['description'] = validated_data.get('name')
region_dict = slice_dict(validated_data, [
'extra_notes', 'is_private', 'locations', 'license',
'schedule_period', 'schedule_hour', 'subnational','planet_file'
])
job = Job(**job_dict)
job.hidden = True
job.unlimited_extent = True
validate_model(job)
with transaction.atomic():
job.save()
region_dict['job'] = job
region = HDXExportRegion(**region_dict)
validate_model(region)
region.save()
return region
def update(self, instance, validated_data): # noqa
def update_attrs(model, v_data, keys):
for key in keys:
if key in v_data:
setattr(model, key, v_data[key])
job = instance.job
update_attrs(job, validated_data, [
'the_geom', 'export_formats', 'feature_selection', 'buffer_aoi'
])
job.name = validated_data.get('dataset_prefix')
job.description = validated_data.get('name')
validate_model(job)
update_attrs(instance, validated_data, [
'extra_notes', 'is_private', 'locations', 'license',
'schedule_period', 'schedule_hour', 'subnational', 'planet_file'
])
validate_model(instance)
with transaction.atomic():
instance.save()
job.save()
return instance
| 37.93311 | 120 | 0.62608 |
73e79e34b76061568863171e1d94a7587a931977 | 2,996 | py | Python | src/getargs.py | bmoxon/azfinsim | 3e203855410abd6c9636377b93ed5d33ac896c41 | [
"MIT"
] | 5 | 2021-02-24T19:10:34.000Z | 2022-02-24T21:11:24.000Z | src/getargs.py | bmoxon/azfinsim | 3e203855410abd6c9636377b93ed5d33ac896c41 | [
"MIT"
] | null | null | null | src/getargs.py | bmoxon/azfinsim | 3e203855410abd6c9636377b93ed5d33ac896c41 | [
"MIT"
] | 2 | 2021-05-03T11:57:31.000Z | 2021-12-09T10:24:29.000Z | #! /usr/bin/env python3
# Common arg parser
import argparse
def getargs(progname):
parser = argparse.ArgumentParser(progname)
#-- Batch parameters
parser.add_argument("-p","--pool-id", help="<name of the azure batch pool to submit the job to>")
parser.add_argument("-j","--job-id", default="AzFinSimJob", help="<jobid prefix string>")
parser.add_argument("-t","--threads", type=int, help='number of client-side threads to use when submitting to batch')
#-- Cache parameters
parser.add_argument("--cache-type", default="none", required=True,
choices=['redis','filesystem','none'],
help="cache type: redis|filesystem|none"),
parser.add_argument("--cache-name", required=True, default="None", help="<redis or filesystem hostname/ip (port must be open)>")
parser.add_argument("--cache-port", default=6380, type=int, help="redis port number: default=6380 [SSL]")
parser.add_argument("--cache-key", default="None", help="cache access key (pulled from keyvault)")
parser.add_argument("--cache-ssl", default="yes", choices=['yes','no'], help="use SSL for redis cache access")
parser.add_argument("--cache-path", default="None", help="Cache Filesystem Path (not needed for redis")
#-- algorithm/work per thread
parser.add_argument("--tasks", default=0, type=int, help="tasks to run on the compute pool (batch tasks)")
parser.add_argument('--harvester', default=False, type=lambda x: (str(x).lower() == 'true'), help="use harvester scheduler: true or false")
parser.add_argument("-f", "--format", default="varxml", choices=['varxml','eyxml'],help="format of trade data: varxml|eyxml")
parser.add_argument("-s", "--start-trade", default=0, type=int, help="trade range to process: starting trade number")
parser.add_argument("-w", "--trade-window", required=True, type=int, help="number of trades to process")
parser.add_argument("-a", "--algorithm", default="deltavega", choices=['deltavega','pvonly','synthetic'],help="pricing algorithm")
#-- synthetic workload options
parser.add_argument("-d", "--delay-start", type=int, default=0, help="delay startup time in seconds")
parser.add_argument("-m", "--mem-usage", type=int, default=16, help="memory usage for task in MB")
parser.add_argument("--task-duration", type=int, default=20, help="task duration in milliseconds")
parser.add_argument("--failure", type=float, default=0.0, help="inject random task failure with this probability")
#-- logs & metrics
# parser.add_argument("-l", "--loglevel", type=int, default=20, choices=range(0,60,10),
# help="loglevel: 0=NOTSET,10=DEBUG,20=INFO,30=WARN,40=ERROR,50=CRITICAL")
#
parser.add_argument('--verbose', default=False, type=lambda x: (str(x).lower() == 'true'), help="verbose output: true or false")
parser.add_argument("--appinsights-key", help="Azure Application Insights Key")
return parser.parse_args()
| 61.142857 | 143 | 0.678238 |
63e0a025ca9432dd8c70bd28ba63750a56c5a98a | 400 | py | Python | fdk_client/platform/models/SubscriptionConfigResponse.py | kavish-d/fdk-client-python | a1023eb530473322cb52e095fc4ceb226c1e6037 | [
"MIT"
] | null | null | null | fdk_client/platform/models/SubscriptionConfigResponse.py | kavish-d/fdk-client-python | a1023eb530473322cb52e095fc4ceb226c1e6037 | [
"MIT"
] | null | null | null | fdk_client/platform/models/SubscriptionConfigResponse.py | kavish-d/fdk-client-python | a1023eb530473322cb52e095fc4ceb226c1e6037 | [
"MIT"
] | null | null | null | """Platform Models."""
from marshmallow import fields, Schema
from marshmallow.validate import OneOf
from ..enums import *
from ..models.BaseSchema import BaseSchema
class SubscriptionConfigResponse(BaseSchema):
# Payment swagger.json
config = fields.Dict(required=False)
success = fields.Boolean(required=False)
aggregator = fields.Str(required=False)
| 15.384615 | 45 | 0.7125 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.