hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a02be85ee1128250c007a1ebdfb06ba8e895abb
| 67
|
py
|
Python
|
lost_ds/detection/api.py
|
l3p-cv/lost_ds
|
4a2f3ef027128b759d28e67cb1fdaa0a557e343c
|
[
"MIT"
] | 1
|
2022-03-30T11:29:57.000Z
|
2022-03-30T11:29:57.000Z
|
lost_ds/detection/api.py
|
l3p-cv/lost_ds
|
4a2f3ef027128b759d28e67cb1fdaa0a557e343c
|
[
"MIT"
] | null | null | null |
lost_ds/detection/api.py
|
l3p-cv/lost_ds
|
4a2f3ef027128b759d28e67cb1fdaa0a557e343c
|
[
"MIT"
] | null | null | null |
from lost_ds.detection.detection import detection_dataset, bbox_nms
| 67
| 67
| 0.895522
|
4a02bf94ca2c2762a277da6df12ccea5a26653a2
| 11,689
|
py
|
Python
|
tools/python/boutiques/creator.py
|
shots47s/boutiques
|
831f937a6b1491af63a800786967e4d9bca1e262
|
[
"MIT"
] | 2
|
2016-11-01T15:08:37.000Z
|
2018-09-07T20:56:43.000Z
|
tools/python/boutiques/creator.py
|
shots47s/boutiques
|
831f937a6b1491af63a800786967e4d9bca1e262
|
[
"MIT"
] | null | null | null |
tools/python/boutiques/creator.py
|
shots47s/boutiques
|
831f937a6b1491af63a800786967e4d9bca1e262
|
[
"MIT"
] | 1
|
2018-03-20T15:51:00.000Z
|
2018-03-20T15:51:00.000Z
|
#!/usr/bin/env python
import tempfile
import argparse
import sys
import os
import simplejson as json
import os.path as op
from jsonschema import validate, ValidationError
from argparse import ArgumentParser
from boutiques import __file__ as bfile
from boutiques.logger import raise_error, print_info, print_warning
import subprocess
# An exception class specific to creating descriptors.
class CreatorError(ValidationError):
pass
class CreateDescriptor(object):
def __init__(self, parser=None, **kwargs):
template = op.join(op.split(bfile)[0], "templates", "basic.json")
with open(template) as f:
self.descriptor = json.load(f)
if(kwargs.get('docker_image')):
self.parse_docker(self.descriptor,
kwargs.get('docker_image'),
kwargs.get('use_singularity'))
self.sp_count = 0
if parser is not None:
self.parser = parser
self.descriptor["inputs"] = []
self.descriptor["tags"] = kwargs.get("tags") or {}
del self.descriptor["groups"]
del self.descriptor["output-files"]
del self.descriptor["container-image"]
del self.descriptor["error-codes"]
if type(parser) is not argparse.ArgumentParser:
raise_error(CreatorError, "Invalid argument parser")
self.parseParser(**kwargs)
def save(self, filename):
with open(filename, "w") as f:
f.write(json.dumps(self.descriptor, indent=4, sort_keys=True))
def createInvocation(self, arguments):
argdict = vars(arguments)
argdict = {k: v for k, v in argdict.items() if v is not None}
return argdict
def parse_docker(self, descriptor, docker_image_name, use_singularity):
cont_image = {}
# Basic image config
if use_singularity:
cont_image['type'] = 'singularity'
cont_image['index'] = 'docker://'
cont_image['image'] = docker_image_name
else:
cont_image['type'] = 'docker'
cont_image['image'] = docker_image_name
descriptor['container-image'] = cont_image
# If Docker isn't installed, that's all we can do!
if subprocess.Popen("type docker", shell=True).wait():
return
# If Docker is here, let's fetch metadata from the image
# Properties found in the image metadata
((stdout, stderr),
returncode) = self.executor("docker pull "+docker_image_name)
if returncode:
raise_error(CreatorError, "Cannot pull Docker image {0}: {1} "
"{2} {3}".format(docker_image_name, stdout,
os.linesep, stderr))
((stdout, stderr),
returncode) = self.executor("docker inspect "+docker_image_name)
if returncode:
raise_error(CreatorError, "Cannot inspect Docker image {0}: {1} "
"{2} {3}".format(docker_image_name, stdout,
os.linesep, stderr))
image_attrs = json.loads(stdout.decode("utf-8"))[0]
if (image_attrs.get('ContainerConfig')):
container_config = image_attrs['ContainerConfig']
entrypoint = container_config.get('Entrypoint')
if entrypoint:
cont_image['entrypoint'] = True
tokens = descriptor['command-line'].split(" ")
# Replace the first token in the command line template,
# presumably the executable, by the entry point
descriptor['command-line'] = (" ".join(entrypoint +
tokens[1:])
)
descriptor['name'] = entrypoint[0]
workingDir = container_config.get('WorkingDir')
if workingDir:
raise_error(CreatorError, "The container image has a working "
"dir, this is currently not supported.")
if image_attrs.get('Author'):
descriptor['author'] = image_attrs.get('Author')
if image_attrs.get('RepoTags'):
descriptor['tool-version'] = " ".join(image_attrs.get('RepoTags'))
if image_attrs.get('Comment'):
descriptor['description'] = image_attrs.get('Comment')
def parseParser(self, **kwargs):
self.descriptor["command-line"] = kwargs.get("execname")
for act in self.parser._actions:
tmp = self.parseAction(act, **kwargs)
if bool(tmp):
self.descriptor["inputs"] += [tmp]
def parseAction(self, action, **kwargs):
# Case 1: input is a help flag
# Desired outcome: we skip it
if type(action) is argparse._HelpAction:
if kwargs.get("verbose"):
print_info("_HelpAction: Skipping")
# If this action belongs to a subparser, return a flag alongside
# the empty object, indicating it is not required
if kwargs.get("subaction"):
return {}, False
return {}
# Case 2: input is a subparser
# Desired outcome: we add the subparser and options, and an input for
# each of the subparser options
elif (type(action) is argparse._SubParsersAction and
not kwargs.get("addParser")):
if kwargs.get("verbose"):
print_info("_SubParsersAction: Interpretting & Adding")
# First, add the subparser itself as an input.
subparser = self.parseAction(action, addParser=True)
subparser["value-requires"] = {}
inpts = {}
# For every option specified by the subparser...
for act in subparser["value-choices"]:
inpts[act] = []
subparser["value-requires"][act] = []
# ... And for each choice specified by each subparser...
for subact in action.choices[act]._actions:
# Process the action, and record its "required" status
tmpinput, reqd = self.parseAction(subact, subaction=True,
**kwargs)
# If it's not empty, add it to an inputs dictionaryi, and
# add the input to the descriptor.
if tmpinput != {}:
inpts[act] += [tmpinput["id"]]
# If the input was required by the subparser, record it
if reqd:
subparser["value-requires"][act] += [tmpinput["id"]]
self.descriptor["inputs"] += [tmpinput]
# Once all subparsers are processed, identify which inputs need to
# be disabled by which subparsers.
inpt_ids = set([inp
for iact in inpts
for inp in inpts[iact]])
subparser["value-disables"] = {}
for act in subparser["value-choices"]:
# Add all IDs created by the subparser that do not also belong
# to the current selection to the disabled list.
subparser["value-disables"][act] = [ckey
for ckey in inpt_ids
if ckey not in inpts[act]]
return subparser
# Case 3: input is a regular input
# Desired outcome: we add it, unless it's already been added
else:
if kwargs.get("verbose"):
actstring = str(type(action))
actstring = actstring.split("'")[1].split(".")[-1]
print_info("{0}: Adding".format(actstring))
actdict = vars(action)
if action.dest == "==SUPPRESS==":
adest = "subparser_{0}".format(self.sp_count)
if kwargs.get("verbose"):
print_warning("Subparser has no destination set, "
"invocation parsing may not work as "
"expected. This can be fixed by adding "
"\"dest='mysubparser'\" to subparser "
"creation.")
self.sp_count += 1
else:
adest = action.dest
# If an input already exists with this ID, don't re-add it
if any(adest == it["id"] for it in self.descriptor["inputs"]):
if kwargs.get("verbose"):
print_info("Duplicate: Argument won't be added multiple "
"times ({0})".format(adest))
# If this action belongs to a subparser return a flag alongside
# the empty object, indicating it is not required
if kwargs.get("subaction"):
return {}, False
return {}
# If no argument exists yet by this name, process and add it.
# First, by setting some reasonable defaults or obvious values,
# and then by updating others.
newinput = {
"id": adest,
"name": adest,
"description": action.help,
"optional": kwargs.get("subaction") or not action.required,
"type": "String",
"value-key": "[{0}]".format(adest.upper().strip("[]"))
}
if action.type is not None:
if action.type in [int, float]:
newinput["type"] = "Number"
if action.type == list:
newinput["list"] = True
if action.nargs is not None:
if ((isinstance(action.nargs, str) and action.nargs == "+")
or (isinstance(action.nargs, int) and action.nargs > 1)):
newinput["list"] = True
if action.default:
newinput["default-value"] = action.default
if action.choices:
try:
# Subparsers have choices in the form of OrderedDicts...
newinput["value-choices"] = list(action.choices.keys())
except AttributeError as e:
# ... but "choice" inputs have them in the form a list.
newinput["value-choices"] = action.choices
if len(action.option_strings):
newinput["command-line-flag"] = action.option_strings[0]
if type(action) is argparse._StoreTrueAction:
newinput["type"] = "Flag"
self.descriptor["command-line"] += " [{0}]".format(
adest.upper().strip("[]"))
# If this action belongs to a subparser, return a flag along
# with the object, indicating its required/not required status.
if kwargs.get("subaction"):
return newinput, action.required
return newinput
def executor(self, command):
try:
process = subprocess.Popen(command, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError as e:
sys.stderr.write('OS Error during attempted execution!')
raise e
except ValueError as e:
sys.stderr.write('Input Value Error during attempted execution!')
raise e
else:
return process.communicate(), process.returncode
| 43.779026
| 80
| 0.531098
|
4a02c12c561e394af9d25a14b03e2e022a0575e9
| 4,016
|
py
|
Python
|
workflow/scripts/combine-motevo-results.py
|
zavolanlab/bindz-rbp
|
7dddbb65c00450f0bb8e91465dbc8a9c9ef4690c
|
[
"Apache-2.0"
] | null | null | null |
workflow/scripts/combine-motevo-results.py
|
zavolanlab/bindz-rbp
|
7dddbb65c00450f0bb8e91465dbc8a9c9ef4690c
|
[
"Apache-2.0"
] | 2
|
2022-02-13T22:22:44.000Z
|
2022-02-14T16:15:26.000Z
|
workflow/scripts/combine-motevo-results.py
|
zavolanlab/bindz-rbp
|
7dddbb65c00450f0bb8e91465dbc8a9c9ef4690c
|
[
"Apache-2.0"
] | null | null | null |
##############################################################################
# AUTHOR: Krish Agarwal
# AFFILIATION: University_of_Basel
# CONTACT: akrish136@gmail.com
# CREATED: 14-07-2020
# LICENSE: Apache_2.0
##############################################################################
##### Importing necessary libraries #####
import os
import sys
import pandas as pd
import csv
from argparse import ArgumentParser, RawTextHelpFormatter
##### Using argparse to get input from command line #####
parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument(
"--input_directories",
nargs="+",
dest="input_directories",
help="input directory for the script to search",
required=True,
metavar="DIR",
)
parser.add_argument(
"--filename",
dest="filename",
help="filename to be searched in input_directories",
required=True,
metavar="NAME",
)
parser.add_argument(
"--outfile",
dest="outfile",
help="location and name of the tsv file",
required=True,
metavar="FILE",
)
try:
options = parser.parse_args()
except Exception:
parser.print_help()
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
##### Initializing dictionaries and list #####
tabb = dict()
tabb["strand"] = []
tabb["binding_position"] = []
tabb["binding_posterior"] = []
tabb["pwm_id"] = []
tabb["binding_region"] = []
tabb["binding_sequence"] = []
tabb["LogLik_ratio_fg_bg"] = []
tabb["fasta_record"] = []
##### Taking input from commandline #####
filenameparam = str(options.filename)
dirrs = options.input_directories
outdir = str(options.outfile)
##### Sorting the input directories so as to prevent different orders for same input #####
dirrs = sorted(dirrs)
rel_outfile = os.path.relpath(outdir)
##### Processing each directory one by one using a for loop #####
for dirr in dirrs:
abs_path = os.path.join(dirr, filenameparam) # absolute path of the input file
filename = os.path.relpath(abs_path) # relative path of input file
i = 0 # this variable will be used to check whether we are at the first line of the record or second
file = open(filename, "r") # open the file in read mode
for each in file: # processing the file line by line
each_word = each.split(" ") # splitting the line into individual words
if i % 2 == 0: # if i is even then we are at the first line
##### Appending values #####
tabb["binding_position"] = tabb["binding_position"] + [each_word[0]]
tabb["strand"] = tabb["strand"] + [each_word[1]]
tabb["binding_posterior"] = tabb["binding_posterior"] + [each_word[2]]
tabb["pwm_id"] = tabb["pwm_id"] + [each_word[3]]
tabb["binding_region"] = tabb["binding_region"] + [each_word[4]]
else: # if i is odd then we are at the second line
##### Appending values #####
tabb["binding_sequence"] = tabb["binding_sequence"] + [each_word[0]]
tabb["LogLik_ratio_fg_bg"] = tabb["LogLik_ratio_fg_bg"] + [each_word[1]]
tabb["fasta_record"] = tabb["fasta_record"] + [each_word[2]]
i = i + 1 # increment the value of i after we are done processing each line
list_items = [
"pwm_id",
"binding_position",
"binding_sequence",
"binding_posterior",
"LogLik_ratio_fg_bg",
] # these will be the headers in the final tsv file
tempDict1 = (
{}
) # initializing empty dictionary that will contain data only of the headers mentioned in list_items
for item in list_items:
tempDict = {item: pd.Series(tabb[item])} # appending list of values for each key
tempDict1.update(tempDict) # concating dictionary
df = pd.DataFrame(tempDict1) # convert dictionary to dataframe
df = df.sort_values(by=['binding_posterior'], ascending=False) # sort values according to binding posterior in descending
df.to_csv(rel_outfile, index=False, sep="\t", header=list_items) # creating a tsv file
| 31.622047
| 121
| 0.641434
|
4a02c166750025825945e772f39566ab9cd953ce
| 113,047
|
py
|
Python
|
blender/arm/props_ui.py
|
trisadmeslek/armory
|
0abd91c72886aa267552cad98c54425445bb5810
|
[
"Zlib"
] | null | null | null |
blender/arm/props_ui.py
|
trisadmeslek/armory
|
0abd91c72886aa267552cad98c54425445bb5810
|
[
"Zlib"
] | null | null | null |
blender/arm/props_ui.py
|
trisadmeslek/armory
|
0abd91c72886aa267552cad98c54425445bb5810
|
[
"Zlib"
] | null | null | null |
import json
import os
import shutil
import bpy
from bpy.props import *
from arm.lightmapper.panels import scene
import arm.api
import arm.assets as assets
from arm.exporter import ArmoryExporter
import arm.log as log
import arm.logicnode.replacement
import arm.make as make
import arm.make_state as state
import arm.props as props
import arm.props_properties
import arm.props_traits
import arm.nodes_logic
import arm.proxy
import arm.ui_icons as ui_icons
import arm.utils
if arm.is_reload(__name__):
arm.api = arm.reload_module(arm.api)
assets = arm.reload_module(assets)
arm.exporter = arm.reload_module(arm.exporter)
from arm.exporter import ArmoryExporter
log = arm.reload_module(log)
arm.logicnode.replacement = arm.reload_module(arm.logicnode.replacement)
make = arm.reload_module(make)
state = arm.reload_module(state)
props = arm.reload_module(props)
arm.props_properties = arm.reload_module(arm.props_properties)
arm.props_traits = arm.reload_module(arm.props_traits)
arm.nodes_logic = arm.reload_module(arm.nodes_logic)
arm.proxy = arm.reload_module(arm.proxy)
ui_icons = arm.reload_module(ui_icons)
arm.utils = arm.reload_module(arm.utils)
else:
arm.enable_reload(__name__)
class ARM_PT_ObjectPropsPanel(bpy.types.Panel):
"""Menu in object region."""
bl_label = "Armory Props"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "object"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
obj = bpy.context.object
if obj == None:
return
col = layout.column()
col.prop(obj, 'arm_export')
if not obj.arm_export:
return
col.prop(obj, 'arm_spawn')
col.prop(obj, 'arm_mobile')
col.prop(obj, 'arm_animation_enabled')
if obj.type == 'MESH':
layout.prop(obj, 'arm_instanced')
wrd = bpy.data.worlds['Arm']
layout.prop_search(obj, "arm_tilesheet", wrd, "arm_tilesheetlist", text="Tilesheet")
if obj.arm_tilesheet != '':
selected_ts = None
for ts in wrd.arm_tilesheetlist:
if ts.name == obj.arm_tilesheet:
selected_ts = ts
break
layout.prop_search(obj, "arm_tilesheet_action", selected_ts, "arm_tilesheetactionlist", text="Action")
# Properties list
arm.props_properties.draw_properties(layout, obj)
# Lightmapping props
if obj.type == "MESH":
row = layout.row(align=True)
row.prop(obj.TLM_ObjectProperties, "tlm_mesh_lightmap_use")
if obj.TLM_ObjectProperties.tlm_mesh_lightmap_use:
row = layout.row()
row.prop(obj.TLM_ObjectProperties, "tlm_use_default_channel")
if not obj.TLM_ObjectProperties.tlm_use_default_channel:
row = layout.row()
row.prop_search(obj.TLM_ObjectProperties, "tlm_uv_channel", obj.data, "uv_layers", text='UV Channel')
row = layout.row()
row.prop(obj.TLM_ObjectProperties, "tlm_mesh_lightmap_resolution")
if obj.TLM_ObjectProperties.tlm_use_default_channel:
row = layout.row()
row.prop(obj.TLM_ObjectProperties, "tlm_mesh_lightmap_unwrap_mode")
row = layout.row()
if obj.TLM_ObjectProperties.tlm_mesh_lightmap_unwrap_mode == "AtlasGroupA":
if scene.TLM_AtlasListItem >= 0 and len(scene.TLM_AtlasList) > 0:
row = layout.row()
item = scene.TLM_AtlasList[scene.TLM_AtlasListItem]
row.prop_search(obj.TLM_ObjectProperties, "tlm_atlas_pointer", scene, "TLM_AtlasList", text='Atlas Group')
row = layout.row()
else:
row = layout.label(text="Add Atlas Groups from the scene lightmapping settings.")
row = layout.row()
else:
row = layout.row()
row.prop(obj.TLM_ObjectProperties, "tlm_postpack_object")
row = layout.row()
if obj.TLM_ObjectProperties.tlm_postpack_object and obj.TLM_ObjectProperties.tlm_mesh_lightmap_unwrap_mode != "AtlasGroupA":
if scene.TLM_PostAtlasListItem >= 0 and len(scene.TLM_PostAtlasList) > 0:
row = layout.row()
item = scene.TLM_PostAtlasList[scene.TLM_PostAtlasListItem]
row.prop_search(obj.TLM_ObjectProperties, "tlm_postatlas_pointer", scene, "TLM_PostAtlasList", text='Atlas Group')
row = layout.row()
else:
row = layout.label(text="Add Atlas Groups from the scene lightmapping settings.")
row = layout.row()
row.prop(obj.TLM_ObjectProperties, "tlm_mesh_unwrap_margin")
row = layout.row()
row.prop(obj.TLM_ObjectProperties, "tlm_mesh_filter_override")
row = layout.row()
if obj.TLM_ObjectProperties.tlm_mesh_filter_override:
row = layout.row(align=True)
row.prop(obj.TLM_ObjectProperties, "tlm_mesh_filtering_mode")
row = layout.row(align=True)
if obj.TLM_ObjectProperties.tlm_mesh_filtering_mode == "Gaussian":
row.prop(obj.TLM_ObjectProperties, "tlm_mesh_filtering_gaussian_strength")
row = layout.row(align=True)
row.prop(obj.TLM_ObjectProperties, "tlm_mesh_filtering_iterations")
elif obj.TLM_ObjectProperties.tlm_mesh_filtering_mode == "Box":
row.prop(obj.TLM_ObjectProperties, "tlm_mesh_filtering_box_strength")
row = layout.row(align=True)
row.prop(obj.TLM_ObjectProperties, "tlm_mesh_filtering_iterations")
elif obj.TLM_ObjectProperties.tlm_mesh_filtering_mode == "Bilateral":
row.prop(obj.TLM_ObjectProperties, "tlm_mesh_filtering_bilateral_diameter")
row = layout.row(align=True)
row.prop(obj.TLM_ObjectProperties, "tlm_mesh_filtering_bilateral_color_deviation")
row = layout.row(align=True)
row.prop(obj.TLM_ObjectProperties, "tlm_mesh_filtering_bilateral_coordinate_deviation")
row = layout.row(align=True)
row.prop(obj.TLM_ObjectProperties, "tlm_mesh_filtering_iterations")
else:
row.prop(obj.TLM_ObjectProperties, "tlm_mesh_filtering_median_kernel", expand=True)
row = layout.row(align=True)
row.prop(obj.TLM_ObjectProperties, "tlm_mesh_filtering_iterations")
#If UV Packer installed
if "UV-Packer" in bpy.context.preferences.addons.keys():
row.prop(obj.TLM_ObjectProperties, "tlm_use_uv_packer")
if obj.TLM_ObjectProperties.tlm_use_uv_packer:
row = layout.row(align=True)
row.prop(obj.TLM_ObjectProperties, "tlm_uv_packer_padding")
row = layout.row(align=True)
row.prop(obj.TLM_ObjectProperties, "tlm_uv_packer_packing_engine")
class ARM_PT_ModifiersPropsPanel(bpy.types.Panel):
bl_label = "Armory Props"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "modifier"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
obj = bpy.context.object
if obj == None:
return
layout.operator("arm.invalidate_cache")
class ARM_PT_ParticlesPropsPanel(bpy.types.Panel):
bl_label = "Armory Props"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "particle"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
obj = bpy.context.particle_system
if obj == None:
return
layout.prop(obj.settings, 'arm_loop')
layout.prop(obj.settings, 'arm_count_mult')
class ARM_PT_PhysicsPropsPanel(bpy.types.Panel):
bl_label = "Armory Props"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "physics"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
obj = bpy.context.object
if obj == None:
return
rb = obj.rigid_body
if rb is not None:
col = layout.column()
row = col.row()
row.alignment = 'RIGHT'
rb_type = 'Dynamic'
if ArmoryExporter.rigid_body_static(rb):
rb_type = 'Static'
if rb.kinematic:
rb_type = 'Kinematic'
row.label(text=(f'Rigid Body Export Type: {rb_type}'), icon='AUTO')
layout.prop(obj, 'arm_rb_linear_factor')
layout.prop(obj, 'arm_rb_angular_factor')
layout.prop(obj, 'arm_rb_trigger')
layout.prop(obj, 'arm_rb_ccd')
if obj.soft_body is not None:
layout.prop(obj, 'arm_soft_body_margin')
if obj.rigid_body_constraint is not None:
layout.prop(obj, 'arm_relative_physics_constraint')
# Menu in data region
class ARM_PT_DataPropsPanel(bpy.types.Panel):
bl_label = "Armory Props"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "data"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
obj = bpy.context.object
if obj == None:
return
wrd = bpy.data.worlds['Arm']
if obj.type == 'CAMERA':
layout.prop(obj.data, 'arm_frustum_culling')
elif obj.type == 'MESH' or obj.type == 'FONT' or obj.type == 'META':
layout.prop(obj.data, 'arm_dynamic_usage')
layout.operator("arm.invalidate_cache")
elif obj.type == 'LIGHT':
layout.prop(obj.data, 'arm_clip_start')
layout.prop(obj.data, 'arm_clip_end')
layout.prop(obj.data, 'arm_fov')
layout.prop(obj.data, 'arm_shadows_bias')
layout.prop(wrd, 'arm_light_ies_texture')
layout.prop(wrd, 'arm_light_clouds_texture')
elif obj.type == 'SPEAKER':
layout.prop(obj.data, 'arm_play_on_start')
layout.prop(obj.data, 'arm_loop')
layout.prop(obj.data, 'arm_stream')
elif obj.type == 'ARMATURE':
layout.prop(obj.data, 'arm_autobake')
pass
class ARM_PT_WorldPropsPanel(bpy.types.Panel):
bl_label = "Armory World Properties"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "world"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
world = context.world
if world is None:
return
layout.prop(world, 'arm_use_clouds')
col = layout.column(align=True)
col.enabled = world.arm_use_clouds
col.prop(world, 'arm_darken_clouds')
col.prop(world, 'arm_clouds_lower')
col.prop(world, 'arm_clouds_upper')
col.prop(world, 'arm_clouds_precipitation')
col.prop(world, 'arm_clouds_secondary')
col.prop(world, 'arm_clouds_wind')
col.prop(world, 'arm_clouds_steps')
class ARM_PT_ScenePropsPanel(bpy.types.Panel):
bl_label = "Armory Props"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "scene"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
scene = bpy.context.scene
if scene == None:
return
row = layout.row()
column = row.column()
row.prop(scene, 'arm_export')
class InvalidateCacheButton(bpy.types.Operator):
"""Delete cached mesh data"""
bl_idname = "arm.invalidate_cache"
bl_label = "Invalidate Cache"
def execute(self, context):
context.object.data.arm_cached = False
return{'FINISHED'}
class InvalidateMaterialCacheButton(bpy.types.Operator):
"""Delete cached material data"""
bl_idname = "arm.invalidate_material_cache"
bl_label = "Invalidate Cache"
def execute(self, context):
context.material.arm_cached = False
context.material.signature = ''
return{'FINISHED'}
class ARM_OT_NewCustomMaterial(bpy.types.Operator):
bl_idname = "arm.new_custom_material"
bl_label = "New Custom Material"
bl_description = "Add a new custom material. This will create all the necessary files and folders"
def poll_mat_name(self, context):
project_dir = arm.utils.get_fp()
shader_dir_dst = os.path.join(project_dir, 'Shaders')
mat_name = arm.utils.safestr(self.mat_name)
self.mat_exists = os.path.isdir(os.path.join(project_dir, 'Bundled', mat_name))
vert_exists = os.path.isfile(os.path.join(shader_dir_dst, f'{mat_name}.vert.glsl'))
frag_exists = os.path.isfile(os.path.join(shader_dir_dst, f'{mat_name}.frag.glsl'))
self.shader_exists = vert_exists or frag_exists
mat_name: StringProperty(
name='Material Name', description='The name of the new material',
default='MyCustomMaterial',
update=poll_mat_name)
mode: EnumProperty(
name='Target RP', description='Choose for which render path mode the new material is created',
default='deferred',
items=[('deferred', 'Deferred', 'Create the material for a deferred render path'),
('forward', 'Forward', 'Create the material for a forward render path')])
mat_exists: BoolProperty(
name='Material Already Exists',
default=False,
options={'HIDDEN', 'SKIP_SAVE'})
shader_exists: BoolProperty(
name='Shaders Already Exist',
default=False,
options={'HIDDEN', 'SKIP_SAVE'})
def invoke(self, context, event):
if not bpy.data.is_saved:
self.report({'INFO'}, "Please save your file first")
return {"CANCELLED"}
# Try to set deferred/forward based on the selected render path
try:
self.mode = 'forward' if arm.utils.get_rp().rp_renderer == 'Forward' else 'deferred'
except IndexError:
# No render path, use default (deferred)
pass
self.poll_mat_name(context)
wm = context.window_manager
return wm.invoke_props_dialog(self, width=300)
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
layout.prop(self, 'mat_name')
layout.prop(self, 'mode', expand=True)
if self.mat_exists:
box = layout.box()
box.alert = True
col = box.column(align=True)
col.label(text='A custom material with that name already exists,', icon='ERROR')
col.label(text='clicking on \'OK\' will override the material!', icon='BLANK1')
if self.shader_exists:
box = layout.box()
box.alert = True
col = box.column(align=True)
col.label(text='Shader file(s) with that name already exists,', icon='ERROR')
col.label(text='clicking on \'OK\' will override the shader(s)!', icon='BLANK1')
def execute(self, context):
if self.mat_name == '':
return {'CANCELLED'}
project_dir = arm.utils.get_fp()
shader_dir_src = os.path.join(arm.utils.get_sdk_path(), 'armory', 'Shaders', 'custom_mat_presets')
shader_dir_dst = os.path.join(project_dir, 'Shaders')
mat_name = arm.utils.safestr(self.mat_name)
mat_dir = os.path.join(project_dir, 'Bundled', mat_name)
os.makedirs(mat_dir, exist_ok=True)
os.makedirs(shader_dir_dst, exist_ok=True)
# Shader data
if self.mode == 'forward':
col_attachments = ['RGBA64']
constants = [{'link': '_worldViewProjectionMatrix', 'name': 'WVP', 'type': 'mat4'}]
vertex_elems = [{'name': 'pos', 'data': 'short4norm'}]
else:
col_attachments = ['RGBA64', 'RGBA64']
constants = [
{'link': '_worldViewProjectionMatrix', 'name': 'WVP', 'type': 'mat4'},
{'link': '_normalMatrix', 'name': 'N', 'type': 'mat3'}
]
vertex_elems = [
{'name': 'pos', 'data': 'short4norm'},
{'name': 'nor', 'data': 'short2norm'}
]
con = {
'color_attachments': col_attachments,
'compare_mode': 'less',
'constants': constants,
'cull_mode': 'clockwise',
'depth_write': True,
'fragment_shader': f'{mat_name}.frag',
'name': 'mesh',
'texture_units': [],
'vertex_shader': f'{mat_name}.vert',
'vertex_elements': vertex_elems
}
data = {
'shader_datas': [{
'contexts': [con],
'name': f'{mat_name}'
}]
}
# Save shader data file
with open(os.path.join(mat_dir, f'{mat_name}.json'), 'w') as datafile:
json.dump(data, datafile, indent=4, sort_keys=True)
# Copy preset shaders to project
if self.mode == 'forward':
shutil.copy(os.path.join(shader_dir_src, 'custom_mat_forward.frag.glsl'), os.path.join(shader_dir_dst, f'{mat_name}.frag.glsl'))
shutil.copy(os.path.join(shader_dir_src, 'custom_mat_forward.vert.glsl'), os.path.join(shader_dir_dst, f'{mat_name}.vert.glsl'))
else:
shutil.copy(os.path.join(shader_dir_src, 'custom_mat_deferred.frag.glsl'), os.path.join(shader_dir_dst, f'{mat_name}.frag.glsl'))
shutil.copy(os.path.join(shader_dir_src, 'custom_mat_deferred.vert.glsl'), os.path.join(shader_dir_dst, f'{mat_name}.vert.glsl'))
# True if called from the material properties tab, else it was called from the search menu
if hasattr(context, 'material') and context.material is not None:
context.material.arm_custom_material = mat_name
return{'FINISHED'}
class ARM_PG_BindTexturesListItem(bpy.types.PropertyGroup):
uniform_name: StringProperty(
name='Uniform Name',
description='The name of the sampler uniform as used in the shader',
default='ImageTexture',
)
image: PointerProperty(
name='Image',
type=bpy.types.Image,
description='The image to attach to the texture unit',
)
class ARM_UL_BindTexturesList(bpy.types.UIList):
def draw_item(self, context, layout, data, item: ARM_PG_BindTexturesListItem, icon, active_data, active_propname, index):
row = layout.row(align=True)
if item.image is not None:
row.label(text=item.uniform_name, icon_value=item.image.preview.icon_id)
else:
row.label(text='<empty>', icon='ERROR')
class ARM_OT_BindTexturesListNewItem(bpy.types.Operator):
bl_idname = "arm_bind_textures_list.new_item"
bl_label = "Add Texture Binding"
bl_description = "Add a new texture binding to the list"
bl_options = {'INTERNAL'}
@classmethod
def poll(cls, context):
mat = context.material
if mat is None:
return False
return True
def execute(self, context):
mat = context.material
mat.arm_bind_textures_list.add()
mat.arm_bind_textures_list_index = len(mat.arm_bind_textures_list) - 1
return{'FINISHED'}
class ARM_OT_BindTexturesListDeleteItem(bpy.types.Operator):
bl_idname = "arm_bind_textures_list.delete_item"
bl_label = "Remove Texture Binding"
bl_description = "Delete the selected texture binding from the list"
bl_options = {'INTERNAL'}
@classmethod
def poll(cls, context):
mat = context.material
if mat is None:
return False
return len(mat.arm_bind_textures_list) > 0
def execute(self, context):
mat = context.material
lst = mat.arm_bind_textures_list
index = mat.arm_bind_textures_list_index
if len(lst) <= index:
return{'FINISHED'}
lst.remove(index)
if index > 0:
index = index - 1
mat.arm_bind_textures_list_index = index
return{'FINISHED'}
class ARM_PT_MaterialPropsPanel(bpy.types.Panel):
bl_label = "Armory Props"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "material"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
mat = bpy.context.material
if mat is None:
return
layout.prop(mat, 'arm_cast_shadow')
columnb = layout.column()
wrd = bpy.data.worlds['Arm']
columnb.enabled = len(wrd.arm_rplist) > 0 and arm.utils.get_rp().rp_renderer == 'Forward'
columnb.prop(mat, 'arm_receive_shadow')
layout.prop(mat, 'arm_ignore_irradiance')
layout.prop(mat, 'arm_two_sided')
columnb = layout.column()
columnb.enabled = not mat.arm_two_sided
columnb.prop(mat, 'arm_cull_mode')
layout.prop(mat, 'arm_material_id')
layout.prop(mat, 'arm_depth_read')
layout.prop(mat, 'arm_overlay')
layout.prop(mat, 'arm_decal')
layout.prop(mat, 'arm_discard')
columnb = layout.column()
columnb.enabled = mat.arm_discard
columnb.prop(mat, 'arm_discard_opacity')
columnb.prop(mat, 'arm_discard_opacity_shadows')
row = layout.row(align=True)
row.prop(mat, 'arm_custom_material')
row.operator('arm.new_custom_material', text='', icon='ADD')
layout.prop(mat, 'arm_skip_context')
layout.prop(mat, 'arm_particle_fade')
layout.prop(mat, 'arm_billboard')
layout.operator("arm.invalidate_material_cache")
class ARM_PT_BindTexturesPropsPanel(bpy.types.Panel):
bl_label = "Bind Textures"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "material"
bl_options = {'DEFAULT_CLOSED'}
bl_parent_id = "ARM_PT_MaterialPropsPanel"
@classmethod
def poll(cls, context):
mat = context.material
if mat is None:
return False
return mat.arm_custom_material != ''
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
mat = bpy.context.material
if mat is None:
return
row = layout.row(align=True)
col = row.column(align=True)
col.template_list('ARM_UL_BindTexturesList', '', mat, 'arm_bind_textures_list', mat, 'arm_bind_textures_list_index')
if mat.arm_bind_textures_list_index >= 0 and len(mat.arm_bind_textures_list) > 0:
item = mat.arm_bind_textures_list[mat.arm_bind_textures_list_index]
box = col.box()
if item.image is None:
_row = box.row()
_row.alert = True
_row.alignment = 'RIGHT'
_row.label(text="No image selected, skipping export")
box.prop(item, 'uniform_name')
box.prop(item, 'image')
col = row.column(align=True)
col.operator("arm_bind_textures_list.new_item", icon='ADD', text="")
col.operator("arm_bind_textures_list.delete_item", icon='REMOVE', text="")
class ARM_PT_MaterialDriverPropsPanel(bpy.types.Panel):
"""Per-material properties for custom render path drivers"""
bl_label = "Armory Driver Properties"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "material"
@classmethod
def poll(cls, context):
mat = context.material
if mat is None:
return False
wrd = bpy.data.worlds['Arm']
if wrd.arm_rplist_index < 0 or len(wrd.arm_rplist) == 0:
return False
if len(arm.api.drivers) == 0:
return False
rpdat = wrd.arm_rplist[wrd.arm_rplist_index]
return rpdat.rp_driver != 'Armory' and arm.api.drivers[rpdat.rp_driver]['draw_mat_props'] is not None
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
wrd = bpy.data.worlds['Arm']
rpdat = wrd.arm_rplist[wrd.arm_rplist_index]
arm.api.drivers[rpdat.rp_driver]['draw_mat_props'](layout, context.material)
class ARM_PT_MaterialBlendingPropsPanel(bpy.types.Panel):
bl_label = "Blending"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "material"
bl_options = {'DEFAULT_CLOSED'}
bl_parent_id = "ARM_PT_MaterialPropsPanel"
def draw_header(self, context):
if context.material is None:
return
self.layout.prop(context.material, 'arm_blending', text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
mat = bpy.context.material
if mat is None:
return
flow = layout.grid_flow()
flow.enabled = mat.arm_blending
col = flow.column(align=True)
col.prop(mat, 'arm_blending_source')
col.prop(mat, 'arm_blending_destination')
col.prop(mat, 'arm_blending_operation')
flow.separator()
col = flow.column(align=True)
col.prop(mat, 'arm_blending_source_alpha')
col.prop(mat, 'arm_blending_destination_alpha')
col.prop(mat, 'arm_blending_operation_alpha')
class ARM_PT_ArmoryPlayerPanel(bpy.types.Panel):
bl_label = "Armory Player"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "render"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
wrd = bpy.data.worlds['Arm']
row = layout.row(align=True)
row.alignment = 'EXPAND'
row.scale_y = 1.3
if state.proc_play is None and state.proc_build is None:
row.operator("arm.play", icon="PLAY")
else:
row.operator("arm.stop", icon="MESH_PLANE")
row.operator("arm.clean_menu", icon="BRUSH_DATA")
col = layout.box().column()
col.prop(wrd, 'arm_runtime')
col.prop(wrd, 'arm_play_camera')
col.prop(wrd, 'arm_play_scene')
col.prop_search(wrd, 'arm_play_renderpath', wrd, 'arm_rplist', text='Render Path')
if log.num_warnings > 0:
box = layout.box()
box.alert = True
col = box.column(align=True)
warnings = 'warnings' if log.num_warnings > 1 else 'warning'
col.label(text=f'{log.num_warnings} {warnings} occurred during compilation!', icon='ERROR')
# Blank icon to achieve the same indentation as the line before
# prevent showing "open console" twice:
if log.num_errors == 0:
col.label(text='Please open the console to get more information.', icon='BLANK1')
if log.num_errors > 0:
box = layout.box()
box.alert = True
# Less spacing between lines
col = box.column(align=True)
errors = 'errors' if log.num_errors > 1 else 'error'
col.label(text=f'{log.num_errors} {errors} occurred during compilation!', icon='CANCEL')
# Blank icon to achieve the same indentation as the line before
col.label(text='Please open the console to get more information.', icon='BLANK1')
class ARM_PT_ArmoryExporterPanel(bpy.types.Panel):
bl_label = "Armory Exporter"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "render"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
wrd = bpy.data.worlds['Arm']
row = layout.row(align=True)
row.alignment = 'EXPAND'
row.scale_y = 1.3
row.operator("arm.build_project", icon="MOD_BUILD")
# row.operator("arm.patch_project")
row.operator("arm.publish_project", icon="EXPORT")
rows = 2
if len(wrd.arm_exporterlist) > 1:
rows = 4
row = layout.row()
row.template_list("ARM_UL_ExporterList", "The_List", wrd, "arm_exporterlist", wrd, "arm_exporterlist_index", rows=rows)
col = row.column(align=True)
col.operator("arm_exporterlist.new_item", icon='ADD', text="")
col.operator("arm_exporterlist.delete_item", icon='REMOVE', text="")
col.menu("ARM_MT_ExporterListSpecials", icon='DOWNARROW_HLT', text="")
if len(wrd.arm_exporterlist) > 1:
col.separator()
op = col.operator("arm_exporterlist.move_item", icon='TRIA_UP', text="")
op.direction = 'UP'
op = col.operator("arm_exporterlist.move_item", icon='TRIA_DOWN', text="")
op.direction = 'DOWN'
if wrd.arm_exporterlist_index >= 0 and len(wrd.arm_exporterlist) > 0:
item = wrd.arm_exporterlist[wrd.arm_exporterlist_index]
box = layout.box().column()
box.prop(item, 'arm_project_target')
if item.arm_project_target == 'custom':
box.prop(item, 'arm_project_khamake')
box.prop(item, arm.utils.target_to_gapi(item.arm_project_target))
wrd.arm_rpcache_list.clear() # Make UIList work with prop_search()
for i in wrd.arm_rplist:
wrd.arm_rpcache_list.add().name = i.name
box.prop_search(item, "arm_project_rp", wrd, "arm_rpcache_list", text="Render Path")
box.prop_search(item, 'arm_project_scene', bpy.data, 'scenes', text='Scene')
layout.separator()
col = layout.column(align=True)
col.prop(wrd, 'arm_project_name')
col.prop(wrd, 'arm_project_package')
col.prop(wrd, 'arm_project_bundle')
col = layout.column(align=True)
col.prop(wrd, 'arm_project_version')
col.prop(wrd, 'arm_project_version_autoinc')
col = layout.column()
col.prop(wrd, 'arm_project_icon')
col = layout.column(heading='Code Output', align=True)
col.prop(wrd, 'arm_dce')
col.prop(wrd, 'arm_compiler_inline')
col.prop(wrd, 'arm_minify_js')
col.prop(wrd, 'arm_no_traces')
col = layout.column(heading='Data', align=True)
col.prop(wrd, 'arm_minimize')
col.prop(wrd, 'arm_optimize_data')
col.prop(wrd, 'arm_asset_compression')
col.prop(wrd, 'arm_single_data_file')
class ExporterTargetSettingsMixin:
"""Mixin for common exporter setting subpanel functionality.
Panels that inherit from this mixin need to have a arm_target
variable for polling."""
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = 'render'
bl_parent_id = 'ARM_PT_ArmoryExporterPanel'
# Override this in sub classes
arm_panel = ''
@classmethod
def poll(cls, context):
wrd = bpy.data.worlds['Arm']
if (len(wrd.arm_exporterlist) > 0) and (wrd.arm_exporterlist_index >= 0):
item = wrd.arm_exporterlist[wrd.arm_exporterlist_index]
return item.arm_project_target == cls.arm_target
return False
def draw_header(self, context):
self.layout.label(text='', icon='SETTINGS')
class ARM_PT_ArmoryExporterAndroidSettingsPanel(ExporterTargetSettingsMixin, bpy.types.Panel):
bl_label = "Android Settings"
arm_target = 'android-hl' # See ExporterTargetSettingsMixin
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
wrd = bpy.data.worlds['Arm']
col = layout.column()
col.prop(wrd, 'arm_winorient')
col.prop(wrd, 'arm_project_android_sdk_min')
col.prop(wrd, 'arm_project_android_sdk_target')
col.prop(wrd, 'arm_project_android_sdk_compile')
class ARM_PT_ArmoryExporterAndroidPermissionsPanel(bpy.types.Panel):
bl_label = "Permissions"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "render"
bl_options = {'DEFAULT_CLOSED'}
bl_parent_id = "ARM_PT_ArmoryExporterAndroidSettingsPanel"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
wrd = bpy.data.worlds['Arm']
# Permission
row = layout.row()
rows = 2
if len(wrd.arm_exporter_android_permission_list) > 1:
rows = 4
row.template_list("ARM_UL_Exporter_AndroidPermissionList", "The_List", wrd, "arm_exporter_android_permission_list", wrd, "arm_exporter_android_permission_list_index", rows=rows)
col = row.column(align=True)
col.operator("arm_exporter_android_permission_list.new_item", icon='ADD', text="")
col.operator("arm_exporter_android_permission_list.delete_item", icon='REMOVE', text="")
row = layout.row()
if wrd.arm_exporter_android_permission_list_index >= 0 and len(wrd.arm_exporter_android_permission_list) > 0:
item = wrd.arm_exporter_android_permission_list[wrd.arm_exporter_android_permission_list_index]
row = layout.row()
row.prop(item, 'arm_android_permissions')
class ARM_PT_ArmoryExporterAndroidAbiPanel(bpy.types.Panel):
bl_label = "Android ABI Filters"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "render"
bl_options = { 'DEFAULT_CLOSED'}
bl_parent_id = "ARM_PT_ArmoryExporterAndroidSettingsPanel"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
wrd = bpy.data.worlds['Arm']
# ABIs
row = layout.row()
rows = 2
if len(wrd.arm_exporter_android_abi_list) > 1:
rows = 4
row.template_list("ARM_UL_Exporter_AndroidAbiList", "The_List", wrd, "arm_exporter_android_abi_list", wrd, "arm_exporter_android_abi_list_index", rows=rows)
col = row.column(align=True)
col.operator("arm_exporter_android_abi_list.new_item", icon='ADD', text="")
col.operator("arm_exporter_android_abi_list.delete_item", icon='REMOVE', text="")
row = layout.row()
if wrd.arm_exporter_android_abi_list_index >= 0 and len(wrd.arm_exporter_android_abi_list) > 0:
item = wrd.arm_exporter_android_abi_list[wrd.arm_exporter_android_abi_list_index]
row = layout.row()
row.prop(item, 'arm_android_abi')
class ARM_PT_ArmoryExporterAndroidBuildAPKPanel(bpy.types.Panel):
bl_label = "Building APK"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "render"
bl_options = {'DEFAULT_CLOSED'}
bl_parent_id = "ARM_PT_ArmoryExporterAndroidSettingsPanel"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
wrd = bpy.data.worlds['Arm']
path = arm.utils.get_android_sdk_root_path()
col = layout.column()
row = col.row()
row.enabled = len(path) > 0
row.prop(wrd, 'arm_project_android_build_apk')
row = col.row()
row.enabled = wrd.arm_project_android_build_apk
row.prop(wrd, 'arm_project_android_rename_apk')
row = col.row()
row.enabled = wrd.arm_project_android_build_apk and len(arm.utils.get_android_apk_copy_path()) > 0
row.prop(wrd, 'arm_project_android_copy_apk')
row = col.row(align=True)
row.prop(wrd, 'arm_project_android_list_avd')
sub = row.column(align=True)
sub.enabled = len(path) > 0
sub.operator('arm.update_list_android_emulator', text='', icon='FILE_REFRESH')
sub = row.column(align=True)
sub.enabled = len(path) > 0 and len(arm.utils.get_android_emulator_name()) > 0
sub.operator('arm.run_android_emulator', text='', icon='PLAY')
row = col.row()
row.enabled = arm.utils.get_project_android_build_apk() and len(arm.utils.get_android_emulator_name()) > 0
row.prop(wrd, 'arm_project_android_run_avd')
class ARM_PT_ArmoryExporterHTML5SettingsPanel(ExporterTargetSettingsMixin, bpy.types.Panel):
bl_label = "HTML5 Settings"
arm_target = 'html5' # See ExporterTargetSettingsMixin
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
wrd = bpy.data.worlds['Arm']
col = layout.column()
col.prop(wrd, 'arm_project_html5_popupmenu_in_browser')
row = col.row()
row.enabled = len(arm.utils.get_html5_copy_path()) > 0
row.prop(wrd, 'arm_project_html5_copy')
row = col.row()
row.enabled = len(arm.utils.get_html5_copy_path()) > 0 and wrd.arm_project_html5_copy and len(arm.utils.get_link_web_server()) > 0
row.prop(wrd, 'arm_project_html5_start_browser')
class ARM_PT_ArmoryExporterWindowsSettingsPanel(ExporterTargetSettingsMixin, bpy.types.Panel):
bl_label = "Windows Settings"
arm_target = 'windows-hl' # See ExporterTargetSettingsMixin
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
wrd = bpy.data.worlds['Arm']
col = layout.column()
row = col.row(align=True)
row.prop(wrd, 'arm_project_win_list_vs')
sub = row.column(align=True)
sub.enabled = arm.utils.get_os_is_windows()
sub.operator('arm.update_list_installed_vs', text='', icon='FILE_REFRESH')
row = col.row()
row.enabled = arm.utils.get_os_is_windows()
row.prop(wrd, 'arm_project_win_build', text='After Publish')
layout.separator()
col = layout.column()
col.enabled = arm.utils.get_os_is_windows() and wrd.arm_project_win_build != '0' and wrd.arm_project_win_build != '1'
col.prop(wrd, 'arm_project_win_build_mode')
col.prop(wrd, 'arm_project_win_build_arch')
col.prop(wrd, 'arm_project_win_build_log')
col.prop(wrd, 'arm_project_win_build_cpu')
col.prop(wrd, 'arm_project_win_build_open')
class ARM_PT_ArmoryProjectPanel(bpy.types.Panel):
bl_label = "Armory Project"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "render"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
row = layout.row(align=True)
row.operator("arm.open_editor", icon="DESKTOP")
row.operator("arm.open_project_folder", icon="FILE_FOLDER")
class ARM_PT_ProjectFlagsPanel(bpy.types.Panel):
bl_label = "Flags"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "render"
bl_parent_id = "ARM_PT_ArmoryProjectPanel"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
wrd = bpy.data.worlds['Arm']
col = layout.column(heading='Debug', align=True)
col.prop(wrd, 'arm_verbose_output')
col.prop(wrd, 'arm_cache_build')
col.prop(wrd, 'arm_assert_level')
col.prop(wrd, 'arm_assert_quit')
col = layout.column(heading='Runtime', align=True)
col.prop(wrd, 'arm_live_patch')
col.prop(wrd, 'arm_stream_scene')
col.prop(wrd, 'arm_loadscreen')
col.prop(wrd, 'arm_write_config')
col = layout.column(heading='Renderer', align=True)
col.prop(wrd, 'arm_batch_meshes')
col.prop(wrd, 'arm_batch_materials')
col.prop(wrd, 'arm_deinterleaved_buffers')
col.prop(wrd, 'arm_export_tangents')
col = layout.column(heading='Quality')
col.prop(wrd, 'arm_texture_quality')
col.prop(wrd, 'arm_sound_quality')
class ARM_PT_ProjectFlagsDebugConsolePanel(bpy.types.Panel):
bl_label = "Debug Console"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "render"
bl_options = {'DEFAULT_CLOSED'}
bl_parent_id = "ARM_PT_ProjectFlagsPanel"
def draw_header(self, context):
wrd = bpy.data.worlds['Arm']
self.layout.prop(wrd, 'arm_debug_console', text='')
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
wrd = bpy.data.worlds['Arm']
col = layout.column()
col.enabled = wrd.arm_debug_console
col.prop(wrd, 'arm_debug_console_position')
col.prop(wrd, 'arm_debug_console_scale')
col.prop(wrd, 'arm_debug_console_visible')
col.prop(wrd, 'arm_debug_console_trace_pos')
class ARM_PT_ProjectWindowPanel(bpy.types.Panel):
bl_label = "Window"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "render"
bl_options = {'DEFAULT_CLOSED'}
bl_parent_id = "ARM_PT_ArmoryProjectPanel"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
wrd = bpy.data.worlds['Arm']
layout.prop(wrd, 'arm_winmode')
col = layout.column(align=True)
col.prop(wrd, 'arm_winresize')
sub = col.column()
sub.enabled = wrd.arm_winresize
sub.prop(wrd, 'arm_winmaximize')
col.enabled = True
col.prop(wrd, 'arm_winminimize')
layout.prop(wrd, 'arm_vsync')
class ARM_PT_ProjectModulesPanel(bpy.types.Panel):
bl_label = "Modules"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "render"
bl_options = {'DEFAULT_CLOSED'}
bl_parent_id = "ARM_PT_ArmoryProjectPanel"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
wrd = bpy.data.worlds['Arm']
layout.prop(wrd, 'arm_audio')
layout.prop(wrd, 'arm_physics')
if wrd.arm_physics != 'Disabled':
layout.prop(wrd, 'arm_physics_engine')
layout.prop(wrd, 'arm_navigation')
if wrd.arm_navigation != 'Disabled':
layout.prop(wrd, 'arm_navigation_engine')
layout.prop(wrd, 'arm_ui')
layout.prop_search(wrd, 'arm_khafile', bpy.data, 'texts', text='Khafile')
layout.prop(wrd, 'arm_project_root')
class ArmVirtualInputPanel(bpy.types.Panel):
bl_label = "Armory Virtual Input"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "render"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
class ArmoryPlayButton(bpy.types.Operator):
'''Launch player in new window'''
bl_idname = 'arm.play'
bl_label = 'Play'
def invoke(self, context, event):
if event.shift:
state.is_play = True
make.build_success()
return{'FINISHED'}
return self.execute(context)
def execute(self, context):
if state.proc_build != None:
return {"CANCELLED"}
# Compare version Blender and Armory (major, minor)
if not arm.utils.compare_version_blender_arm():
self.report({'INFO'}, 'For Armory to work correctly, you need Blender 2.93 LTS.')
if not arm.utils.check_saved(self):
return {"CANCELLED"}
if not arm.utils.check_sdkpath(self):
return {"CANCELLED"}
arm.utils.check_projectpath(None)
arm.utils.check_default_props()
assets.invalidate_enabled = False
make.play()
assets.invalidate_enabled = True
return{'FINISHED'}
class ArmoryStopButton(bpy.types.Operator):
'''Stop currently running player'''
bl_idname = 'arm.stop'
bl_label = 'Stop'
def execute(self, context):
if state.proc_play != None:
state.proc_play.terminate()
state.proc_play = None
elif state.proc_build != None:
state.proc_build.terminate()
state.proc_build = None
return{'FINISHED'}
class ArmoryBuildProjectButton(bpy.types.Operator):
"""Build and compile project"""
bl_idname = 'arm.build_project'
bl_label = 'Build'
@classmethod
def poll(cls, context):
wrd = bpy.data.worlds['Arm']
return wrd.arm_exporterlist_index >= 0 and len(wrd.arm_exporterlist) > 0
def execute(self, context):
# Compare version Blender and Armory (major, minor)
if not arm.utils.compare_version_blender_arm():
self.report({'INFO'}, 'For Armory to work correctly, you need Blender 2.93 LTS.')
if not arm.utils.check_saved(self):
return {"CANCELLED"}
if not arm.utils.check_sdkpath(self):
return {"CANCELLED"}
arm.utils.check_projectpath(self)
arm.utils.check_default_props()
wrd = bpy.data.worlds['Arm']
item = wrd.arm_exporterlist[wrd.arm_exporterlist_index]
if item.arm_project_rp == '':
item.arm_project_rp = wrd.arm_rplist[wrd.arm_rplist_index].name
if item.arm_project_scene == None:
item.arm_project_scene = context.scene
# Assume unique rp names
rplist_index = wrd.arm_rplist_index
for i in range(0, len(wrd.arm_rplist)):
if wrd.arm_rplist[i].name == item.arm_project_rp:
wrd.arm_rplist_index = i
break
assets.invalidate_shader_cache(None, None)
assets.invalidate_enabled = False
make.build(item.arm_project_target, is_export=True)
make.compile()
wrd.arm_rplist_index = rplist_index
assets.invalidate_enabled = True
return{'FINISHED'}
class ArmoryPublishProjectButton(bpy.types.Operator):
"""Build project ready for publishing."""
bl_idname = 'arm.publish_project'
bl_label = 'Publish'
@classmethod
def poll(cls, context):
wrd = bpy.data.worlds['Arm']
return wrd.arm_exporterlist_index >= 0 and len(wrd.arm_exporterlist) > 0
def execute(self, context):
# Compare version Blender and Armory (major, minor)
if not arm.utils.compare_version_blender_arm():
self.report({'INFO'}, 'For Armory to work correctly, you need Blender 2.93 LTS.')
if not arm.utils.check_saved(self):
return {"CANCELLED"}
if not arm.utils.check_sdkpath(self):
return {"CANCELLED"}
self.report({'INFO'}, 'Publishing project, check console for details.')
arm.utils.check_projectpath(self)
arm.utils.check_default_props()
wrd = bpy.data.worlds['Arm']
item = wrd.arm_exporterlist[wrd.arm_exporterlist_index]
if item.arm_project_rp == '':
item.arm_project_rp = wrd.arm_rplist[wrd.arm_rplist_index].name
if item.arm_project_scene == None:
item.arm_project_scene = context.scene
# Assume unique rp names
rplist_index = wrd.arm_rplist_index
for i in range(0, len(wrd.arm_rplist)):
if wrd.arm_rplist[i].name == item.arm_project_rp:
wrd.arm_rplist_index = i
break
make.clean()
assets.invalidate_enabled = False
make.build(item.arm_project_target, is_publish=True, is_export=True)
make.compile()
wrd.arm_rplist_index = rplist_index
assets.invalidate_enabled = True
return{'FINISHED'}
class ArmoryOpenProjectFolderButton(bpy.types.Operator):
'''Open project folder'''
bl_idname = 'arm.open_project_folder'
bl_label = 'Project Folder'
def execute(self, context):
if not arm.utils.check_saved(self):
return {"CANCELLED"}
arm.utils.open_folder(arm.utils.get_fp())
return{'FINISHED'}
class ArmoryOpenEditorButton(bpy.types.Operator):
'''Launch this project in the IDE'''
bl_idname = 'arm.open_editor'
bl_label = 'Code Editor'
bl_description = 'Open Project in IDE'
def execute(self, context):
if not arm.utils.check_saved(self):
return {"CANCELLED"}
arm.utils.check_default_props()
if not os.path.exists(arm.utils.get_fp() + "/khafile.js"):
print('Generating Krom project for IDE build configuration')
make.build('krom')
arm.utils.open_editor()
return{'FINISHED'}
class CleanMenu(bpy.types.Menu):
bl_label = "Ok?"
bl_idname = "OBJECT_MT_clean_menu"
def draw(self, context):
layout = self.layout
layout.operator("arm.clean_project")
class CleanButtonMenu(bpy.types.Operator):
'''Clean cached data'''
bl_label = "Clean"
bl_idname = "arm.clean_menu"
def execute(self, context):
bpy.ops.wm.call_menu(name=CleanMenu.bl_idname)
return {"FINISHED"}
class ArmoryCleanProjectButton(bpy.types.Operator):
'''Delete all cached project data'''
bl_idname = 'arm.clean_project'
bl_label = 'Clean Project'
def execute(self, context):
if not arm.utils.check_saved(self):
return {"CANCELLED"}
make.clean()
return{'FINISHED'}
def draw_view3d_header(self, context):
if state.proc_build is not None:
self.layout.label(text='Compiling..')
elif log.info_text != '':
self.layout.label(text=log.info_text)
def draw_view3d_object_menu(self, context):
self.layout.separator()
self.layout.operator_context = 'INVOKE_DEFAULT'
self.layout.operator('arm.copy_traits_to_active')
class ARM_PT_RenderPathPanel(bpy.types.Panel):
bl_label = "Armory Render Path"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "render"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
wrd = bpy.data.worlds['Arm']
rows = 2
if len(wrd.arm_rplist) > 1:
rows = 4
row = layout.row()
row.template_list("ARM_UL_RPList", "The_List", wrd, "arm_rplist", wrd, "arm_rplist_index", rows=rows)
col = row.column(align=True)
col.operator("arm_rplist.new_item", icon='ADD', text="")
col.operator("arm_rplist.delete_item", icon='REMOVE', text="")
if len(wrd.arm_rplist) > 1:
col.separator()
op = col.operator("arm_rplist.move_item", icon='TRIA_UP', text="")
op.direction = 'UP'
op = col.operator("arm_rplist.move_item", icon='TRIA_DOWN', text="")
op.direction = 'DOWN'
if wrd.arm_rplist_index < 0 or len(wrd.arm_rplist) == 0:
return
rpdat = wrd.arm_rplist[wrd.arm_rplist_index]
if len(arm.api.drivers) > 0:
rpdat.rp_driver_list.clear()
rpdat.rp_driver_list.add().name = 'Armory'
for d in arm.api.drivers:
rpdat.rp_driver_list.add().name = arm.api.drivers[d]['driver_name']
layout.prop_search(rpdat, "rp_driver", rpdat, "rp_driver_list", text="Driver")
layout.separator()
if rpdat.rp_driver != 'Armory' and arm.api.drivers[rpdat.rp_driver]['draw_props'] != None:
arm.api.drivers[rpdat.rp_driver]['draw_props'](layout)
return
class ARM_PT_RenderPathRendererPanel(bpy.types.Panel):
bl_label = "Renderer"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "render"
bl_options = {'DEFAULT_CLOSED'}
bl_parent_id = "ARM_PT_RenderPathPanel"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
wrd = bpy.data.worlds['Arm']
if len(wrd.arm_rplist) <= wrd.arm_rplist_index:
return
rpdat = wrd.arm_rplist[wrd.arm_rplist_index]
layout.prop(rpdat, 'rp_renderer')
if rpdat.rp_renderer == 'Forward':
layout.prop(rpdat, 'rp_depthprepass')
layout.prop(rpdat, 'arm_material_model')
layout.prop(rpdat, 'rp_translucency_state')
layout.prop(rpdat, 'rp_overlays_state')
layout.prop(rpdat, 'rp_decals_state')
layout.prop(rpdat, 'rp_blending_state')
layout.prop(rpdat, 'rp_depth_texture_state')
layout.prop(rpdat, 'rp_draw_order')
layout.prop(rpdat, 'arm_samples_per_pixel')
layout.prop(rpdat, 'arm_texture_filter')
layout.prop(rpdat, 'rp_sss_state')
col = layout.column()
col.enabled = rpdat.rp_sss_state != 'Off'
col.prop(rpdat, 'arm_sss_width')
layout.prop(rpdat, 'arm_rp_displacement')
if rpdat.arm_rp_displacement == 'Tessellation':
layout.label(text='Mesh')
layout.prop(rpdat, 'arm_tess_mesh_inner')
layout.prop(rpdat, 'arm_tess_mesh_outer')
layout.label(text='Shadow')
layout.prop(rpdat, 'arm_tess_shadows_inner')
layout.prop(rpdat, 'arm_tess_shadows_outer')
layout.prop(rpdat, 'arm_particles')
layout.separator(factor=0.1)
col = layout.column()
col.prop(rpdat, 'arm_skin')
col = col.column()
col.enabled = rpdat.arm_skin == 'On'
col.prop(rpdat, 'arm_skin_max_bones_auto')
row = col.row()
row.enabled = not rpdat.arm_skin_max_bones_auto
row.prop(rpdat, 'arm_skin_max_bones')
layout.separator(factor=0.1)
col = layout.column()
col.prop(rpdat, 'arm_morph_target')
col = col.column()
col.enabled = rpdat.arm_morph_target == 'On'
layout.separator(factor=0.1)
col = layout.column()
col.prop(rpdat, "rp_hdr")
col.prop(rpdat, "rp_stereo")
col.prop(rpdat, 'arm_culling')
col.prop(rpdat, 'rp_pp')
class ARM_PT_RenderPathShadowsPanel(bpy.types.Panel):
bl_label = "Shadows"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "render"
bl_options = {'DEFAULT_CLOSED'}
bl_parent_id = "ARM_PT_RenderPathPanel"
def draw_header(self, context):
wrd = bpy.data.worlds['Arm']
if len(wrd.arm_rplist) <= wrd.arm_rplist_index:
return
rpdat = wrd.arm_rplist[wrd.arm_rplist_index]
self.layout.prop(rpdat, "rp_shadows", text="")
def compute_subdivs(self, max, subdivs):
l = [max]
for i in range(subdivs - 1):
l.append(int(max / 2))
max = max / 2
return l
def tiles_per_light_type(self, rpdat: arm.props_renderpath.ArmRPListItem, light_type: str) -> int:
if light_type == 'point':
return 6
elif light_type == 'spot':
return 1
else:
return int(rpdat.rp_shadowmap_cascades)
def lights_number_atlas(self, rpdat: arm.props_renderpath.ArmRPListItem, atlas_size: int, shadowmap_size: int, light_type: str) -> int:
'''Compute number lights that could fit in an atlas'''
lights = atlas_size / shadowmap_size
lights *= lights / self.tiles_per_light_type(rpdat, light_type)
return int(lights)
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
wrd = bpy.data.worlds['Arm']
if len(wrd.arm_rplist) <= wrd.arm_rplist_index:
return
rpdat = wrd.arm_rplist[wrd.arm_rplist_index]
layout.enabled = rpdat.rp_shadows
col = layout.column()
col.enabled = not rpdat.rp_shadowmap_atlas_single_map or not rpdat.rp_shadowmap_atlas
col.prop(rpdat, 'rp_shadowmap_cube')
layout.prop(rpdat, 'rp_shadowmap_cascade')
layout.prop(rpdat, 'rp_shadowmap_cascades')
col = layout.column()
col2 = col.column()
col2.enabled = rpdat.rp_shadowmap_cascades != '1'
col2.prop(rpdat, 'arm_shadowmap_split')
col.prop(rpdat, 'arm_shadowmap_bounds')
col.prop(rpdat, 'arm_pcfsize')
layout.separator()
layout.prop(rpdat, 'rp_shadowmap_atlas')
colatlas = layout.column()
colatlas.enabled = rpdat.rp_shadowmap_atlas
colatlas.prop(rpdat, 'rp_max_lights')
colatlas.prop(rpdat, 'rp_max_lights_cluster')
colatlas.prop(rpdat, 'rp_shadowmap_atlas_lod')
colatlas_lod = colatlas.column()
colatlas_lod.enabled = rpdat.rp_shadowmap_atlas_lod
colatlas_lod.prop(rpdat, 'rp_shadowmap_atlas_lod_subdivisions')
colatlas_lod_info = colatlas_lod.row()
colatlas_lod_info.alignment = 'RIGHT'
subdivs_list = self.compute_subdivs(int(rpdat.rp_shadowmap_cascade), int(rpdat.rp_shadowmap_atlas_lod_subdivisions))
subdiv_text = "Subdivisions for spot lights: " + ', '.join(map(str, subdivs_list))
colatlas_lod_info.label(text=subdiv_text, icon="IMAGE_ZDEPTH")
if not rpdat.rp_shadowmap_atlas_single_map:
colatlas_lod_info = colatlas_lod.row()
colatlas_lod_info.alignment = 'RIGHT'
subdivs_list = self.compute_subdivs(int(rpdat.rp_shadowmap_cube), int(rpdat.rp_shadowmap_atlas_lod_subdivisions))
subdiv_text = "Subdivisions for point lights: " + ', '.join(map(str, subdivs_list))
colatlas_lod_info.label(text=subdiv_text, icon="IMAGE_ZDEPTH")
size_warning = int(rpdat.rp_shadowmap_cascade) > 2048 or int(rpdat.rp_shadowmap_cube) > 2048
colatlas.prop(rpdat, 'rp_shadowmap_atlas_single_map')
# show size for single texture
if rpdat.rp_shadowmap_atlas_single_map:
colatlas_single = colatlas.column()
colatlas_single.prop(rpdat, 'rp_shadowmap_atlas_max_size')
if rpdat.rp_shadowmap_atlas_max_size != '':
atlas_size = int(rpdat.rp_shadowmap_atlas_max_size)
shadowmap_size = int(rpdat.rp_shadowmap_cascade)
if shadowmap_size > 2048:
size_warning = True
point_lights = self.lights_number_atlas(rpdat, atlas_size, shadowmap_size, 'point')
spot_lights = self.lights_number_atlas(rpdat, atlas_size, shadowmap_size, 'spot')
dir_lights = self.lights_number_atlas(rpdat, atlas_size, shadowmap_size, 'sun')
col = colatlas_single.row()
col.alignment = 'RIGHT'
col.label(text=f'Enough space for { point_lights } point lights or { spot_lights } spot lights or { dir_lights } directional lights.')
else:
# show size for all types
colatlas_mixed = colatlas.column()
colatlas_mixed.prop(rpdat, 'rp_shadowmap_atlas_max_size_spot')
if rpdat.rp_shadowmap_atlas_max_size_spot != '':
atlas_size = int(rpdat.rp_shadowmap_atlas_max_size_spot)
shadowmap_size = int(rpdat.rp_shadowmap_cascade)
spot_lights = self.lights_number_atlas(rpdat, atlas_size, shadowmap_size, 'spot')
if shadowmap_size > 2048:
size_warning = True
col = colatlas_mixed.row()
col.alignment = 'RIGHT'
col.label(text=f'Enough space for {spot_lights} spot lights.')
colatlas_mixed.prop(rpdat, 'rp_shadowmap_atlas_max_size_point')
if rpdat.rp_shadowmap_atlas_max_size_point != '':
atlas_size = int(rpdat.rp_shadowmap_atlas_max_size_point)
shadowmap_size = int(rpdat.rp_shadowmap_cube)
point_lights = self.lights_number_atlas(rpdat, atlas_size, shadowmap_size, 'point')
if shadowmap_size > 2048:
size_warning = True
col = colatlas_mixed.row()
col.alignment = 'RIGHT'
col.label(text=f'Enough space for {point_lights} point lights.')
colatlas_mixed.prop(rpdat, 'rp_shadowmap_atlas_max_size_sun')
if rpdat.rp_shadowmap_atlas_max_size_sun != '':
atlas_size = int(rpdat.rp_shadowmap_atlas_max_size_sun)
shadowmap_size = int(rpdat.rp_shadowmap_cascade)
dir_lights = self.lights_number_atlas(rpdat, atlas_size, shadowmap_size, 'sun')
if shadowmap_size > 2048:
size_warning = True
col = colatlas_mixed.row()
col.alignment = 'RIGHT'
col.label(text=f'Enough space for {dir_lights} directional lights.')
# show warning when user picks a size higher than 2048 (arbitrary number).
if size_warning:
col = layout.column()
row = col.row()
row.alignment = 'RIGHT'
row.label(text='Warning: Game will crash if texture size is higher than max texture size allowed by target.', icon='ERROR')
class ARM_PT_RenderPathVoxelsPanel(bpy.types.Panel):
bl_label = "Voxel AO"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "render"
bl_options = {'DEFAULT_CLOSED'}
bl_parent_id = "ARM_PT_RenderPathPanel"
def draw_header(self, context):
wrd = bpy.data.worlds['Arm']
if len(wrd.arm_rplist) <= wrd.arm_rplist_index:
return
rpdat = wrd.arm_rplist[wrd.arm_rplist_index]
self.layout.prop(rpdat, "rp_voxelao", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
wrd = bpy.data.worlds['Arm']
if len(wrd.arm_rplist) <= wrd.arm_rplist_index:
return
rpdat = wrd.arm_rplist[wrd.arm_rplist_index]
layout.enabled = rpdat.rp_voxelao
layout.prop(rpdat, 'arm_voxelgi_shadows')
layout.prop(rpdat, 'arm_voxelgi_cones')
layout.prop(rpdat, 'rp_voxelgi_resolution')
layout.prop(rpdat, 'rp_voxelgi_resolution_z')
layout.prop(rpdat, 'arm_voxelgi_dimensions')
layout.prop(rpdat, 'arm_voxelgi_revoxelize')
col2 = layout.column()
col2.enabled = rpdat.arm_voxelgi_revoxelize
col2.prop(rpdat, 'arm_voxelgi_camera')
col2.prop(rpdat, 'arm_voxelgi_temporal')
layout.prop(rpdat, 'arm_voxelgi_occ')
layout.prop(rpdat, 'arm_voxelgi_step')
layout.prop(rpdat, 'arm_voxelgi_range')
layout.prop(rpdat, 'arm_voxelgi_offset')
layout.prop(rpdat, 'arm_voxelgi_aperture')
class ARM_PT_RenderPathWorldPanel(bpy.types.Panel):
bl_label = "World"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "render"
bl_options = {'DEFAULT_CLOSED'}
bl_parent_id = "ARM_PT_RenderPathPanel"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
wrd = bpy.data.worlds['Arm']
if len(wrd.arm_rplist) <= wrd.arm_rplist_index:
return
rpdat = wrd.arm_rplist[wrd.arm_rplist_index]
layout.prop(rpdat, "rp_background")
col = layout.column()
col.prop(rpdat, 'arm_irradiance')
colb = col.column()
colb.enabled = rpdat.arm_irradiance
colb.prop(rpdat, 'arm_radiance')
sub = colb.row()
sub.enabled = rpdat.arm_radiance
sub.prop(rpdat, 'arm_radiance_size')
layout.separator()
layout.prop(rpdat, 'arm_clouds')
col = layout.column(align=True)
col.prop(rpdat, "rp_water")
col = col.column(align=True)
col.enabled = rpdat.rp_water
col.prop(rpdat, 'arm_water_level')
col.prop(rpdat, 'arm_water_density')
col.prop(rpdat, 'arm_water_displace')
col.prop(rpdat, 'arm_water_speed')
col.prop(rpdat, 'arm_water_freq')
col.prop(rpdat, 'arm_water_refract')
col.prop(rpdat, 'arm_water_reflect')
col.prop(rpdat, 'arm_water_color')
class ARM_PT_RenderPathPostProcessPanel(bpy.types.Panel):
bl_label = "Post Process"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "render"
bl_options = {'DEFAULT_CLOSED'}
bl_parent_id = "ARM_PT_RenderPathPanel"
def draw_header(self, context):
wrd = bpy.data.worlds['Arm']
if len(wrd.arm_rplist) <= wrd.arm_rplist_index:
return
rpdat = wrd.arm_rplist[wrd.arm_rplist_index]
self.layout.prop(rpdat, "rp_render_to_texture", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
wrd = bpy.data.worlds['Arm']
if len(wrd.arm_rplist) <= wrd.arm_rplist_index:
return
rpdat = wrd.arm_rplist[wrd.arm_rplist_index]
layout.enabled = rpdat.rp_render_to_texture
col = layout.column()
col.prop(rpdat, "rp_antialiasing")
col.prop(rpdat, "rp_supersampling")
col = layout.column()
col.prop(rpdat, 'arm_rp_resolution')
if rpdat.arm_rp_resolution == 'Custom':
col.prop(rpdat, 'arm_rp_resolution_size')
col.prop(rpdat, 'arm_rp_resolution_filter')
col.prop(rpdat, 'rp_dynres')
layout.separator()
col = layout.column()
col.prop(rpdat, "rp_ssgi")
sub = col.column()
sub.enabled = rpdat.rp_ssgi != 'Off'
sub.prop(rpdat, 'arm_ssgi_half_res')
sub.prop(rpdat, 'arm_ssgi_rays')
sub.prop(rpdat, 'arm_ssgi_radius')
sub.prop(rpdat, 'arm_ssgi_strength')
sub.prop(rpdat, 'arm_ssgi_max_steps')
layout.separator(factor=0.5)
layout.prop(rpdat, 'arm_micro_shadowing')
layout.separator()
col = layout.column()
col.prop(rpdat, "rp_ssr")
col = col.column()
col.enabled = rpdat.rp_ssr
col.prop(rpdat, 'arm_ssr_half_res')
col.prop(rpdat, 'arm_ssr_ray_step')
col.prop(rpdat, 'arm_ssr_min_ray_step')
col.prop(rpdat, 'arm_ssr_search_dist')
col.prop(rpdat, 'arm_ssr_falloff_exp')
col.prop(rpdat, 'arm_ssr_jitter')
layout.separator()
col = layout.column()
col.prop(rpdat, 'arm_ssrs')
col = col.column()
col.enabled = rpdat.arm_ssrs
col.prop(rpdat, 'arm_ssrs_ray_step')
layout.separator()
col = layout.column()
col.prop(rpdat, "rp_bloom")
col = col.column()
col.enabled = rpdat.rp_bloom
col.prop(rpdat, 'arm_bloom_threshold')
col.prop(rpdat, 'arm_bloom_strength')
col.prop(rpdat, 'arm_bloom_radius')
layout.separator()
col = layout.column()
col.prop(rpdat, "rp_motionblur")
col = col.column()
col.enabled = rpdat.rp_motionblur != 'Off'
col.prop(rpdat, 'arm_motion_blur_intensity')
layout.separator()
col = layout.column()
col.prop(rpdat, "rp_volumetriclight")
col = col.column()
col.enabled = rpdat.rp_volumetriclight
col.prop(rpdat, 'arm_volumetric_light_air_color')
col.prop(rpdat, 'arm_volumetric_light_air_turbidity')
col.prop(rpdat, 'arm_volumetric_light_steps')
layout.separator()
col = layout.column()
col.prop(rpdat, "rp_chromatic_aberration")
col = col.column()
col.enabled = rpdat.rp_chromatic_aberration
col.prop(rpdat, 'arm_chromatic_aberration_type')
col.prop(rpdat, 'arm_chromatic_aberration_strength')
if rpdat.arm_chromatic_aberration_type == "Spectral":
col.prop(rpdat, 'arm_chromatic_aberration_samples')
class ARM_PT_RenderPathCompositorPanel(bpy.types.Panel):
bl_label = "Compositor"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "render"
bl_options = {'DEFAULT_CLOSED'}
bl_parent_id = "ARM_PT_RenderPathPanel"
def draw_header(self, context):
wrd = bpy.data.worlds['Arm']
if len(wrd.arm_rplist) <= wrd.arm_rplist_index:
return
rpdat = wrd.arm_rplist[wrd.arm_rplist_index]
self.layout.prop(rpdat, "rp_compositornodes", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
wrd = bpy.data.worlds['Arm']
if len(wrd.arm_rplist) <= wrd.arm_rplist_index:
return
rpdat = wrd.arm_rplist[wrd.arm_rplist_index]
layout.enabled = rpdat.rp_compositornodes
layout.prop(rpdat, 'arm_tonemap')
layout.separator()
col = layout.column()
draw_conditional_prop(col, 'Letterbox', rpdat, 'arm_letterbox', 'arm_letterbox_size')
draw_conditional_prop(col, 'Sharpen', rpdat, 'arm_sharpen', 'arm_sharpen_strength')
draw_conditional_prop(col, 'Vignette', rpdat, 'arm_vignette', 'arm_vignette_strength')
draw_conditional_prop(col, 'Film Grain', rpdat, 'arm_grain', 'arm_grain_strength')
layout.separator()
col = layout.column()
col.prop(rpdat, 'arm_fog')
col = col.column(align=True)
col.enabled = rpdat.arm_fog
col.prop(rpdat, 'arm_fog_color')
col.prop(rpdat, 'arm_fog_amounta')
col.prop(rpdat, 'arm_fog_amountb')
layout.separator()
col = layout.column()
col.prop(rpdat, "rp_autoexposure")
sub = col.column(align=True)
sub.enabled = rpdat.rp_autoexposure
sub.prop(rpdat, 'arm_autoexposure_strength', text='Strength')
sub.prop(rpdat, 'arm_autoexposure_speed', text='Speed')
layout.separator()
col = layout.column()
col.prop(rpdat, 'arm_lensflare')
col.prop(rpdat, 'arm_fisheye')
layout.separator()
col = layout.column()
col.prop(rpdat, 'arm_lens_texture')
if rpdat.arm_lens_texture != "":
col.prop(rpdat, 'arm_lens_texture_masking')
if rpdat.arm_lens_texture_masking:
sub = col.column(align=True)
sub.prop(rpdat, 'arm_lens_texture_masking_centerMinClip')
sub.prop(rpdat, 'arm_lens_texture_masking_centerMaxClip')
sub = col.column(align=True)
sub.prop(rpdat, 'arm_lens_texture_masking_luminanceMin')
sub.prop(rpdat, 'arm_lens_texture_masking_luminanceMax')
col.prop(rpdat, 'arm_lens_texture_masking_brightnessExp')
layout.separator()
layout.prop(rpdat, 'arm_lut_texture')
class ARM_PT_BakePanel(bpy.types.Panel):
bl_label = "Armory Bake"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "render"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
scn = bpy.data.scenes[context.scene.name]
row = layout.row(align=True)
row.prop(scn, "arm_bakemode", expand=True)
if scn.arm_bakemode == "Static Map":
row = layout.row(align=True)
row.alignment = 'EXPAND'
row.operator("arm.bake_textures", icon="RENDER_STILL")
row.operator("arm.bake_apply")
col = layout.column()
col.prop(scn, 'arm_bakelist_scale')
col.prop(scn.cycles, "samples")
layout.prop(scn, 'arm_bakelist_unwrap')
rows = 2
if len(scn.arm_bakelist) > 1:
rows = 4
row = layout.row()
row.template_list("ARM_UL_BakeList", "The_List", scn, "arm_bakelist", scn, "arm_bakelist_index", rows=rows)
col = row.column(align=True)
col.operator("arm_bakelist.new_item", icon='ADD', text="")
col.operator("arm_bakelist.delete_item", icon='REMOVE', text="")
col.menu("ARM_MT_BakeListSpecials", icon='DOWNARROW_HLT', text="")
if len(scn.arm_bakelist) > 1:
col.separator()
op = col.operator("arm_bakelist.move_item", icon='TRIA_UP', text="")
op.direction = 'UP'
op = col.operator("arm_bakelist.move_item", icon='TRIA_DOWN', text="")
op.direction = 'DOWN'
if scn.arm_bakelist_index >= 0 and len(scn.arm_bakelist) > 0:
item = scn.arm_bakelist[scn.arm_bakelist_index]
layout.prop_search(item, "obj", bpy.data, "objects", text="Object")
layout.prop(item, "res_x")
layout.prop(item, "res_y")
class ArmGenLodButton(bpy.types.Operator):
"""Automatically generate LoD levels."""
bl_idname = 'arm.generate_lod'
bl_label = 'Auto Generate'
def lod_name(self, name, level):
return name + '_LOD' + str(level + 1)
def execute(self, context):
obj = context.object
if obj == None:
return{'CANCELLED'}
# Clear
mdata = context.object.data
mdata.arm_lodlist_index = 0
mdata.arm_lodlist.clear()
# Lod levels
wrd = bpy.data.worlds['Arm']
ratio = wrd.arm_lod_gen_ratio
num_levels = wrd.arm_lod_gen_levels
for level in range(0, num_levels):
new_obj = obj.copy()
for i in range(0, 3):
new_obj.location[i] = 0
new_obj.rotation_euler[i] = 0
new_obj.scale[i] = 1
new_obj.data = obj.data.copy()
new_obj.name = self.lod_name(obj.name, level)
new_obj.parent = obj
new_obj.hide_viewport = True
new_obj.hide_render = True
mod = new_obj.modifiers.new('Decimate', 'DECIMATE')
mod.ratio = ratio
ratio *= wrd.arm_lod_gen_ratio
context.scene.collection.objects.link(new_obj)
# Screen sizes
for level in range(0, num_levels):
mdata.arm_lodlist.add()
mdata.arm_lodlist[-1].name = self.lod_name(obj.name, level)
mdata.arm_lodlist[-1].screen_size_prop = (1 - (1 / (num_levels + 1)) * level) - (1 / (num_levels + 1))
return{'FINISHED'}
class ARM_PT_LodPanel(bpy.types.Panel):
bl_label = "Armory Lod"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "object"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
obj = bpy.context.object
# Mesh only for now
if obj.type != 'MESH':
return
mdata = obj.data
rows = 2
if len(mdata.arm_lodlist) > 1:
rows = 4
row = layout.row()
row.template_list("ARM_UL_LodList", "The_List", mdata, "arm_lodlist", mdata, "arm_lodlist_index", rows=rows)
col = row.column(align=True)
col.operator("arm_lodlist.new_item", icon='ADD', text="")
col.operator("arm_lodlist.delete_item", icon='REMOVE', text="")
if len(mdata.arm_lodlist) > 1:
col.separator()
op = col.operator("arm_lodlist.move_item", icon='TRIA_UP', text="")
op.direction = 'UP'
op = col.operator("arm_lodlist.move_item", icon='TRIA_DOWN', text="")
op.direction = 'DOWN'
if mdata.arm_lodlist_index >= 0 and len(mdata.arm_lodlist) > 0:
item = mdata.arm_lodlist[mdata.arm_lodlist_index]
layout.prop_search(item, "name", bpy.data, "objects", text="Object")
layout.prop(item, "screen_size_prop")
layout.prop(mdata, "arm_lod_material")
# Auto lod for meshes
if obj.type == 'MESH':
layout.separator()
layout.operator("arm.generate_lod")
wrd = bpy.data.worlds['Arm']
layout.prop(wrd, 'arm_lod_gen_levels')
layout.prop(wrd, 'arm_lod_gen_ratio')
class ArmGenTerrainButton(bpy.types.Operator):
'''Generate terrain sectors'''
bl_idname = 'arm.generate_terrain'
bl_label = 'Generate'
def execute(self, context):
scn = context.scene
if scn == None:
return{'CANCELLED'}
sectors = scn.arm_terrain_sectors
size = scn.arm_terrain_sector_size
height_scale = scn.arm_terrain_height_scale
# Create material
mat = bpy.data.materials.new(name="Terrain")
mat.use_nodes = True
nodes = mat.node_tree.nodes
links = mat.node_tree.links
node = nodes.new('ShaderNodeDisplacement')
node.location = (-200, 100)
node.inputs[2].default_value = height_scale
node.space = 'WORLD'
links.new(nodes['Material Output'].inputs[2], node.outputs[0])
node = nodes.new('ShaderNodeTexImage')
node.location = (-600, 100)
node.interpolation = 'Closest'
node.extension = 'EXTEND'
node.arm_material_param = True
node.name = '_TerrainHeight'
node.label = '_TerrainHeight' # Height-map texture link for this sector
links.new(nodes['Displacement'].inputs[0], nodes['_TerrainHeight'].outputs[0])
node = nodes.new('ShaderNodeBump')
node.location = (-200, -200)
node.inputs[0].default_value = 5.0
links.new(nodes['Bump'].inputs[2], nodes['_TerrainHeight'].outputs[0])
links.new(nodes['Principled BSDF'].inputs[20], nodes['Bump'].outputs[0])
# Create sectors
root_obj = bpy.data.objects.new("Terrain", None)
root_obj.location[0] = 0
root_obj.location[1] = 0
root_obj.location[2] = 0
root_obj.arm_export = False
scn.collection.objects.link(root_obj)
scn.arm_terrain_object = root_obj
for i in range(sectors[0] * sectors[1]):
j = str(i + 1).zfill(2)
x = i % sectors[0]
y = int(i / sectors[0])
bpy.ops.mesh.primitive_plane_add(location=(x * size, -y * size, 0))
slice_obj = bpy.context.active_object
slice_obj.scale[0] = size / 2
slice_obj.scale[1] = -(size / 2)
slice_obj.scale[2] = height_scale
slice_obj.data.materials.append(mat)
for p in slice_obj.data.polygons:
p.use_smooth = True
slice_obj.name = 'Terrain.' + j
slice_obj.parent = root_obj
sub_mod = slice_obj.modifiers.new('Subdivision', 'SUBSURF')
sub_mod.subdivision_type = 'SIMPLE'
disp_mod = slice_obj.modifiers.new('Displace', 'DISPLACE')
disp_mod.texture_coords = 'UV'
disp_mod.texture = bpy.data.textures.new(name='Terrain.' + j, type='IMAGE')
disp_mod.texture.extension = 'EXTEND'
disp_mod.texture.use_interpolation = False
disp_mod.texture.use_mipmap = False
disp_mod.texture.image = bpy.data.images.load(filepath=scn.arm_terrain_textures+'/heightmap_' + j + '.png')
f = 1
levels = 0
while f < disp_mod.texture.image.size[0]:
f *= 2
levels += 1
sub_mod.levels = sub_mod.render_levels = levels
return{'FINISHED'}
class ARM_PT_TerrainPanel(bpy.types.Panel):
bl_label = "Armory Terrain"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "scene"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
scn = bpy.context.scene
if scn == None:
return
layout.prop(scn, 'arm_terrain_textures')
layout.prop(scn, 'arm_terrain_sectors')
layout.prop(scn, 'arm_terrain_sector_size')
layout.prop(scn, 'arm_terrain_height_scale')
layout.operator('arm.generate_terrain')
layout.prop(scn, 'arm_terrain_object')
class ARM_PT_TilesheetPanel(bpy.types.Panel):
bl_label = "Armory Tilesheet"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "material"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
wrd = bpy.data.worlds['Arm']
rows = 2
if len(wrd.arm_tilesheetlist) > 1:
rows = 4
row = layout.row()
row.template_list("ARM_UL_TilesheetList", "The_List", wrd, "arm_tilesheetlist", wrd, "arm_tilesheetlist_index", rows=rows)
col = row.column(align=True)
col.operator("arm_tilesheetlist.new_item", icon='ADD', text="")
col.operator("arm_tilesheetlist.delete_item", icon='REMOVE', text="")
if len(wrd.arm_tilesheetlist) > 1:
col.separator()
op = col.operator("arm_tilesheetlist.move_item", icon='TRIA_UP', text="")
op.direction = 'UP'
op = col.operator("arm_tilesheetlist.move_item", icon='TRIA_DOWN', text="")
op.direction = 'DOWN'
if wrd.arm_tilesheetlist_index >= 0 and len(wrd.arm_tilesheetlist) > 0:
dat = wrd.arm_tilesheetlist[wrd.arm_tilesheetlist_index]
layout.prop(dat, "tilesx_prop")
layout.prop(dat, "tilesy_prop")
layout.prop(dat, "framerate_prop")
layout.label(text='Actions')
rows = 2
if len(dat.arm_tilesheetactionlist) > 1:
rows = 4
row = layout.row()
row.template_list("ARM_UL_TilesheetList", "The_List", dat, "arm_tilesheetactionlist", dat, "arm_tilesheetactionlist_index", rows=rows)
col = row.column(align=True)
col.operator("arm_tilesheetactionlist.new_item", icon='ADD', text="")
col.operator("arm_tilesheetactionlist.delete_item", icon='REMOVE', text="")
if len(dat.arm_tilesheetactionlist) > 1:
col.separator()
op = col.operator("arm_tilesheetactionlist.move_item", icon='TRIA_UP', text="")
op.direction = 'UP'
op = col.operator("arm_tilesheetactionlist.move_item", icon='TRIA_DOWN', text="")
op.direction = 'DOWN'
if dat.arm_tilesheetactionlist_index >= 0 and len(dat.arm_tilesheetactionlist) > 0:
adat = dat.arm_tilesheetactionlist[dat.arm_tilesheetactionlist_index]
layout.prop(adat, "start_prop")
layout.prop(adat, "end_prop")
layout.prop(adat, "loop_prop")
class ARM_PT_ProxyPanel(bpy.types.Panel):
bl_label = "Armory Proxy"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "object"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
layout.operator("arm.make_proxy")
obj = bpy.context.object
if obj is not None and obj.proxy is not None:
col = layout.column(heading="Sync")
col.prop(obj, "arm_proxy_sync_loc")
col.prop(obj, "arm_proxy_sync_rot")
col.prop(obj, "arm_proxy_sync_scale")
col.separator()
col.prop(obj, "arm_proxy_sync_materials")
col.prop(obj, "arm_proxy_sync_modifiers")
col.separator()
col.prop(obj, "arm_proxy_sync_traits")
row = col.row()
row.enabled = obj.arm_proxy_sync_traits
row.prop(obj, "arm_proxy_sync_trait_props")
row = layout.row(align=True)
row.operator("arm.proxy_toggle_all")
row.operator("arm.proxy_apply_all")
class ArmMakeProxyButton(bpy.types.Operator):
'''Create proxy from linked object'''
bl_idname = 'arm.make_proxy'
bl_label = 'Make Proxy'
def execute(self, context):
obj = context.object
if obj == None:
return{'CANCELLED'}
if obj.library == None:
self.report({'ERROR'}, 'Select linked object')
arm.proxy.make(obj)
return{'FINISHED'}
class ArmProxyToggleAllButton(bpy.types.Operator):
bl_idname = 'arm.proxy_toggle_all'
bl_label = 'Toggle All'
def execute(self, context):
obj = context.object
b = not obj.arm_proxy_sync_loc
obj.arm_proxy_sync_loc = b
obj.arm_proxy_sync_rot = b
obj.arm_proxy_sync_scale = b
obj.arm_proxy_sync_materials = b
obj.arm_proxy_sync_modifiers = b
obj.arm_proxy_sync_traits = b
obj.arm_proxy_sync_trait_props = b
return{'FINISHED'}
class ArmProxyApplyAllButton(bpy.types.Operator):
bl_idname = 'arm.proxy_apply_all'
bl_label = 'Apply to All'
def execute(self, context):
for obj in bpy.data.objects:
if obj.proxy == None:
continue
if obj.proxy == context.object.proxy:
obj.arm_proxy_sync_loc = context.object.arm_proxy_sync_loc
obj.arm_proxy_sync_rot = context.object.arm_proxy_sync_rot
obj.arm_proxy_sync_scale = context.object.arm_proxy_sync_scale
obj.arm_proxy_sync_materials = context.object.arm_proxy_sync_materials
obj.arm_proxy_sync_modifiers = context.object.arm_proxy_sync_modifiers
obj.arm_proxy_sync_traits = context.object.arm_proxy_sync_traits
obj.arm_proxy_sync_trait_props = context.object.arm_proxy_sync_trait_props
return{'FINISHED'}
class ArmSyncProxyButton(bpy.types.Operator):
bl_idname = 'arm.sync_proxy'
bl_label = 'Sync'
def execute(self, context):
if len(bpy.data.libraries) > 0:
for obj in bpy.data.objects:
if obj == None or obj.proxy == None:
continue
if obj.arm_proxy_sync_loc:
arm.proxy.sync_location(obj)
if obj.arm_proxy_sync_rot:
arm.proxy.sync_rotation(obj)
if obj.arm_proxy_sync_scale:
arm.proxy.sync_scale(obj)
if obj.arm_proxy_sync_materials:
arm.proxy.sync_materials(obj)
if obj.arm_proxy_sync_modifiers:
arm.proxy.sync_modifiers(obj)
if obj.arm_proxy_sync_traits:
arm.proxy.sync_traits(obj)
print('Proxy objects synchronized')
return{'FINISHED'}
class ArmPrintTraitsButton(bpy.types.Operator):
bl_idname = 'arm.print_traits'
bl_label = 'Print Traits'
def execute(self, context):
for s in bpy.data.scenes:
print(s.name + ' traits:')
for o in s.objects:
for t in o.arm_traitlist:
if not t.enabled_prop:
continue
tname = t.node_tree_prop.name if t.type_prop == 'Logic Nodes' else t.class_name_prop
print('Object {0} - {1}'.format(o.name, tname))
return{'FINISHED'}
class ARM_PT_MaterialNodePanel(bpy.types.Panel):
bl_label = 'Armory Material Node'
bl_idname = 'ARM_PT_MaterialNodePanel'
bl_space_type = 'NODE_EDITOR'
bl_region_type = 'UI'
bl_category = 'Armory'
@classmethod
def poll(cls, context):
return (context.space_data.tree_type == 'ShaderNodeTree'
and context.space_data.edit_tree
and context.space_data.shader_type == 'OBJECT')
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
n = context.active_node
if n != None and (n.bl_idname == 'ShaderNodeRGB' or n.bl_idname == 'ShaderNodeValue' or n.bl_idname == 'ShaderNodeTexImage'):
layout.prop(context.active_node, 'arm_material_param')
class ARM_OT_ShowFileVersionInfo(bpy.types.Operator):
bl_label = 'Show old file version info'
bl_idname = 'arm.show_old_file_version_info'
bl_description = ('Displays an info panel that warns about opening a file'
'which was created in a previous version of Armory')
bl_options = {'INTERNAL'}
wrd = None
def draw_message_box(self, context):
file_version = ARM_OT_ShowFileVersionInfo.wrd.arm_version
current_version = props.arm_version
layout = self.layout
layout = layout.column(align=True)
layout.alignment = 'EXPAND'
if current_version == file_version:
layout.label('This file was saved in', icon='INFO')
layout.label('the current Armory version', icon='BLANK1')
layout.separator()
layout.label(f'(version: {current_version}')
row = layout.row(align=True)
row.active_default = True
row.operator('arm.discard_popup', text='Ok')
# this will help order versions better, somewhat.
# note: this is NOT complete
current_version = tuple( current_version.split('.') )
file_version = tuple( file_version.split('.') )
if current_version > file_version:
layout.label(text='Warning: This file was saved in a', icon='ERROR')
layout.label(text='previous version of Armory!', icon='BLANK1')
layout.separator()
layout.label(text='Please inform yourself about breaking changes!', icon='BLANK1')
layout.label(text=f'File saved in: {file_version}', icon='BLANK1')
layout.label(text=f'Current version: {current_version}', icon='BLANK1')
layout.separator()
layout.separator()
layout.label(text='Should Armory try to automatically update', icon='BLANK1')
layout.label(text='the file to the current SDK version?', icon='BLANK1')
layout.separator()
row = layout.row(align=True)
row.active_default = True
row.operator('arm.update_file_sdk', text='Yes')
row.active_default = False
row.operator('arm.discard_popup', text='No')
else:
layout.label(text='Warning: This file was saved in a', icon='ERROR')
layout.label(text='future version of Armory!', icon='BLANK1')
layout.separator()
layout.label(text='It is impossible to downgrade a file,', icon='BLANK1')
layout.label(text='Something will probably be broken here.', icon='BLANK1')
layout.label(text=f'File saved in: {file_version}', icon='BLANK1')
layout.label(text=f'Current version: {current_version}', icon='BLANK1')
layout.separator()
layout.separator()
layout.label(text='Please check how this file was created', icon='BLANK1')
layout.separator()
row = layout.row(align=True)
row.active_default = True
row.operator('arm.discard_popup', text='Ok')
def execute(self, context):
ARM_OT_ShowFileVersionInfo.wrd = bpy.data.worlds['Arm']
context.window_manager.popover(ARM_OT_ShowFileVersionInfo.draw_message_box, ui_units_x=16)
return {"FINISHED"}
class ARM_OT_ShowNodeUpdateErrors(bpy.types.Operator):
bl_label = 'Show upgrade failure details'
bl_idname = 'arm.show_node_update_errors'
bl_description = ('Displays an info panel that shows the different errors that occurred when upgrading nodes')
wrd = None # a helper internal variable
def draw_message_box(self, context):
list_of_errors = arm.logicnode.replacement.replacement_errors.copy()
# note: list_of_errors is a set of tuples: `(error_type, node_class, tree_name)`
# where `error_type` can be "unregistered", "update failed", "future version", "bad version", or "misc."
file_version = ARM_OT_ShowNodeUpdateErrors.wrd.arm_version
current_version = props.arm_version
# this will help order versions better, somewhat.
# note: this is NOT complete
current_version_2 = tuple(current_version.split('.'))
file_version_2 = tuple(file_version.split('.'))
is_armory_upgrade = (current_version_2 > file_version_2)
error_types = set()
errored_trees = set()
errored_nodes = set()
for error_entry in list_of_errors:
error_types.add(error_entry[0])
errored_nodes.add(error_entry[1])
errored_trees.add(error_entry[2])
layout = self.layout
layout = layout.column(align=True)
layout.alignment = 'EXPAND'
layout.label(text="Some nodes failed to be updated to the current Armory version", icon="ERROR")
if current_version == file_version:
layout.label(text="(This might be because you are using a development snapshot, or a homemade version ;) )", icon='BLANK1')
elif not is_armory_upgrade:
layout.label(text="(Please note that it is not possible do downgrade nodes to a previous version either.", icon='BLANK1')
layout.label(text="This might be the cause of your problem.)", icon='BLANK1')
layout.label(text=f'File saved in: {file_version}', icon='BLANK1')
layout.label(text=f'Current version: {current_version}', icon='BLANK1')
layout.separator()
if 'update failed' in error_types:
layout.label(text="Some nodes do not have an update procedure to deal with the version saved in this file.", icon='BLANK1')
if current_version == file_version:
layout.label(text="(if you are a developer, this might be because you didn't implement it yet.)", icon='BLANK1')
if 'bad version' in error_types:
layout.label(text="Some nodes do not have version information attached to them.", icon='BLANK1')
if 'unregistered' in error_types:
if is_armory_upgrade:
layout.label(text='Some nodes seem to be too old to be understood by armory anymore', icon='BLANK1')
else:
layout.label(text="Some nodes are unknown to armory, either because they are too new or too old.", icon='BLANK1')
if 'future version' in error_types:
if is_armory_upgrade:
layout.label(text='Somehow, some nodes seem to have been created with a future version of armory.', icon='BLANK1')
else:
layout.label(text='Some nodes seem to have been created with a future version of armory.', icon='BLANK1')
if 'misc.' in error_types:
layout.label(text="Some nodes' update procedure failed to complete")
layout.separator()
layout.label(text='the nodes impacted are the following:', icon='BLANK1')
for node in errored_nodes:
layout.label(text=f' {node}', icon='BLANK1')
layout.separator()
layout.label(text='the node trees impacted are the following:', icon='BLANK1')
for tree in errored_trees:
layout.label(text=f' "{tree}"', icon='BLANK1')
layout.separator()
layout.label(text="A detailed error report has been saved next to the blender file.", icon='BLANK1')
layout.label(text="the file name is \"node_update_failure\", followed by the current time.", icon='BLANK1')
layout.separator()
row = layout.row(align=True)
row.active_default = False
row.operator('arm.discard_popup', text='Ok')
row.operator('arm.open_project_folder', text='Open Project Folder', icon="FILE_FOLDER")
def execute(self, context):
ARM_OT_ShowNodeUpdateErrors.wrd = bpy.data.worlds['Arm']
context.window_manager.popover(ARM_OT_ShowNodeUpdateErrors.draw_message_box, ui_units_x=32)
return {"FINISHED"}
class ARM_OT_UpdateFileSDK(bpy.types.Operator):
bl_idname = 'arm.update_file_sdk'
bl_label = 'Update file to current SDK version'
bl_description = bl_label
bl_options = {'INTERNAL'}
def execute(self, context):
wrd = bpy.data.worlds['Arm']
# This allows for seamless migration from ealier versions of Armory
for rp in wrd.arm_rplist: # TODO: deprecated
if rp.rp_gi != 'Off':
rp.rp_gi = 'Off'
rp.rp_voxelao = True
# Replace deprecated nodes
arm.logicnode.replacement.replace_all()
wrd.arm_version = props.arm_version
wrd.arm_commit = props.arm_commit
arm.make.clean()
print(f'Project updated to SDK {props.arm_version}. Please save the .blend file.')
return {'FINISHED'}
class ARM_OT_DiscardPopup(bpy.types.Operator):
"""Empty operator for discarding dialogs."""
bl_idname = 'arm.discard_popup'
bl_label = 'OK'
bl_description = 'Discard'
bl_options = {'INTERNAL'}
def execute(self, context):
return {'FINISHED'}
class ArmoryUpdateListAndroidEmulatorButton(bpy.types.Operator):
'''Updating the list of emulators for the Android platform'''
bl_idname = 'arm.update_list_android_emulator'
bl_label = 'Update List Emulators'
def execute(self, context):
if not arm.utils.check_saved(self):
return {"CANCELLED"}
if not arm.utils.check_sdkpath(self):
return {"CANCELLED"}
if len(arm.utils.get_android_sdk_root_path()) == 0:
return {"CANCELLED"}
os.environ['ANDROID_SDK_ROOT'] = arm.utils.get_android_sdk_root_path()
items, err = arm.utils.get_android_emulators_list()
if len(err) > 0:
print('Update List Emulators Warning: File "'+ arm.utils.get_android_emulator_file() +'" not found. Check that the variable ANDROID_SDK_ROOT is correct in environment variables or in "Android SDK Path" setting: \n- If you specify an environment variable ANDROID_SDK_ROOT, then you need to restart Blender;\n- If you specify the setting "Android SDK Path", then repeat operation "Publish"')
return{'FINISHED'}
if len(items) > 0:
items_enum = []
for i in items:
items_enum.append((i, i, i))
bpy.types.World.arm_project_android_list_avd = EnumProperty(items=items_enum, name="Emulator", update=assets.invalidate_compiler_cache)
return{'FINISHED'}
class ArmoryUpdateListAndroidEmulatorRunButton(bpy.types.Operator):
'''Launch Android emulator selected from the list'''
bl_idname = 'arm.run_android_emulator'
bl_label = 'Launch Emulator'
def execute(self, context):
if not arm.utils.check_saved(self):
return {"CANCELLED"}
if not arm.utils.check_sdkpath(self):
return {"CANCELLED"}
if len(arm.utils.get_android_sdk_root_path()) == 0:
return {"CANCELLED"}
make.run_android_emulators(arm.utils.get_android_emulator_name())
return{'FINISHED'}
class ArmoryUpdateListInstalledVSButton(bpy.types.Operator):
"""Update the list of installed Visual Studio versions for the Windows platform"""
bl_idname = 'arm.update_list_installed_vs'
bl_label = 'Update List of Installed Visual Studio Versions'
def execute(self, context):
if not arm.utils.check_saved(self):
return {"CANCELLED"}
if not arm.utils.check_sdkpath(self):
return {"CANCELLED"}
if not arm.utils.get_os_is_windows():
return {"CANCELLED"}
wrd = bpy.data.worlds['Arm']
items, err = arm.utils.get_list_installed_vs_version()
if len(err) > 0:
print('Warning for operation Update List Installed Visual Studio: '+ err +'. Check if ArmorySDK is installed correctly.')
return{'FINISHED'}
if len(items) > 0:
items_enum = [('10', '2010', 'Visual Studio 2010 (version 10)'),
('11', '2012', 'Visual Studio 2012 (version 11)'),
('12', '2013', 'Visual Studio 2013 (version 12)'),
('14', '2015', 'Visual Studio 2015 (version 14)'),
('15', '2017', 'Visual Studio 2017 (version 15)'),
('16', '2019', 'Visual Studio 2019 (version 16)'),
('17', '2022', 'Visual Studio 2022 (version 17)')]
prev_select = wrd.arm_project_win_list_vs
res_items_enum = []
for vs in items_enum:
l_vs = list(vs)
for ver in items:
if l_vs[0] == ver[0]:
l_vs[1] = l_vs[1] + ' (installed)'
l_vs[2] = l_vs[2] + ' (installed)'
break
res_items_enum.append((l_vs[0], l_vs[1], l_vs[2]))
bpy.types.World.arm_project_win_list_vs = EnumProperty(items=res_items_enum, name="Visual Studio Version", default=prev_select, update=assets.invalidate_compiler_cache)
return{'FINISHED'}
def draw_custom_node_menu(self, context):
"""Extension of the node context menu.
https://blender.stackexchange.com/questions/150101/python-how-to-add-items-in-context-menu-in-2-8
"""
if context.selected_nodes is None or len(context.selected_nodes) != 1:
return
if context.space_data.tree_type == 'ArmLogicTreeType':
if context.selected_nodes[0].bl_idname.startswith('LN'):
layout = self.layout
layout.separator()
layout.operator("arm.open_node_documentation", text="Show documentation for this node", icon='HELP')
layout.operator("arm.open_node_haxe_source", text="Open .hx source in the browser", icon_value=ui_icons.get_id("haxe"))
layout.operator("arm.open_node_python_source", text="Open .py source in the browser", icon='FILE_SCRIPT')
elif context.space_data.tree_type == 'ShaderNodeTree':
if context.active_node.bl_idname in ('ShaderNodeRGB', 'ShaderNodeValue', 'ShaderNodeTexImage'):
layout = self.layout
layout.separator()
layout.prop(context.active_node, 'arm_material_param', text='Armory: Material Parameter')
def draw_conditional_prop(layout: bpy.types.UILayout, heading: str, data: bpy.types.AnyType, prop_condition: str, prop_value: str) -> None:
"""Draws a property row with a checkbox that enables a value field.
The function fails when prop_condition is not a boolean property.
"""
col = layout.column(heading=heading)
row = col.row()
row.prop(data, prop_condition, text='')
sub = row.row()
sub.enabled = getattr(data, prop_condition)
sub.prop(data, prop_value, expand=True)
def register():
bpy.utils.register_class(ARM_PT_ObjectPropsPanel)
bpy.utils.register_class(ARM_PT_ModifiersPropsPanel)
bpy.utils.register_class(ARM_PT_ParticlesPropsPanel)
bpy.utils.register_class(ARM_PT_PhysicsPropsPanel)
bpy.utils.register_class(ARM_PT_DataPropsPanel)
bpy.utils.register_class(ARM_PT_ScenePropsPanel)
bpy.utils.register_class(ARM_PT_WorldPropsPanel)
bpy.utils.register_class(InvalidateCacheButton)
bpy.utils.register_class(InvalidateMaterialCacheButton)
bpy.utils.register_class(ARM_OT_NewCustomMaterial)
bpy.utils.register_class(ARM_PG_BindTexturesListItem)
bpy.utils.register_class(ARM_UL_BindTexturesList)
bpy.utils.register_class(ARM_OT_BindTexturesListNewItem)
bpy.utils.register_class(ARM_OT_BindTexturesListDeleteItem)
bpy.utils.register_class(ARM_PT_MaterialPropsPanel)
bpy.utils.register_class(ARM_PT_BindTexturesPropsPanel)
bpy.utils.register_class(ARM_PT_MaterialBlendingPropsPanel)
bpy.utils.register_class(ARM_PT_MaterialDriverPropsPanel)
bpy.utils.register_class(ARM_PT_ArmoryPlayerPanel)
bpy.utils.register_class(ARM_PT_ArmoryExporterPanel)
bpy.utils.register_class(ARM_PT_ArmoryExporterAndroidSettingsPanel)
bpy.utils.register_class(ARM_PT_ArmoryExporterAndroidPermissionsPanel)
bpy.utils.register_class(ARM_PT_ArmoryExporterAndroidAbiPanel)
bpy.utils.register_class(ARM_PT_ArmoryExporterAndroidBuildAPKPanel)
bpy.utils.register_class(ARM_PT_ArmoryExporterHTML5SettingsPanel)
bpy.utils.register_class(ARM_PT_ArmoryExporterWindowsSettingsPanel)
bpy.utils.register_class(ARM_PT_ArmoryProjectPanel)
bpy.utils.register_class(ARM_PT_ProjectFlagsPanel)
bpy.utils.register_class(ARM_PT_ProjectFlagsDebugConsolePanel)
bpy.utils.register_class(ARM_PT_ProjectWindowPanel)
bpy.utils.register_class(ARM_PT_ProjectModulesPanel)
bpy.utils.register_class(ARM_PT_RenderPathPanel)
bpy.utils.register_class(ARM_PT_RenderPathRendererPanel)
bpy.utils.register_class(ARM_PT_RenderPathShadowsPanel)
bpy.utils.register_class(ARM_PT_RenderPathVoxelsPanel)
bpy.utils.register_class(ARM_PT_RenderPathWorldPanel)
bpy.utils.register_class(ARM_PT_RenderPathPostProcessPanel)
bpy.utils.register_class(ARM_PT_RenderPathCompositorPanel)
bpy.utils.register_class(ARM_PT_BakePanel)
# bpy.utils.register_class(ArmVirtualInputPanel)
bpy.utils.register_class(ArmoryPlayButton)
bpy.utils.register_class(ArmoryStopButton)
bpy.utils.register_class(ArmoryBuildProjectButton)
bpy.utils.register_class(ArmoryOpenProjectFolderButton)
bpy.utils.register_class(ArmoryOpenEditorButton)
bpy.utils.register_class(CleanMenu)
bpy.utils.register_class(CleanButtonMenu)
bpy.utils.register_class(ArmoryCleanProjectButton)
bpy.utils.register_class(ArmoryPublishProjectButton)
bpy.utils.register_class(ArmGenLodButton)
bpy.utils.register_class(ARM_PT_LodPanel)
bpy.utils.register_class(ArmGenTerrainButton)
bpy.utils.register_class(ARM_PT_TerrainPanel)
bpy.utils.register_class(ARM_PT_TilesheetPanel)
bpy.utils.register_class(ARM_PT_ProxyPanel)
bpy.utils.register_class(ArmMakeProxyButton)
bpy.utils.register_class(ArmProxyToggleAllButton)
bpy.utils.register_class(ArmProxyApplyAllButton)
bpy.utils.register_class(ArmSyncProxyButton)
bpy.utils.register_class(ArmPrintTraitsButton)
bpy.utils.register_class(ARM_PT_MaterialNodePanel)
bpy.utils.register_class(ARM_OT_UpdateFileSDK)
bpy.utils.register_class(ARM_OT_ShowFileVersionInfo)
bpy.utils.register_class(ARM_OT_ShowNodeUpdateErrors)
bpy.utils.register_class(ARM_OT_DiscardPopup)
bpy.utils.register_class(ArmoryUpdateListAndroidEmulatorButton)
bpy.utils.register_class(ArmoryUpdateListAndroidEmulatorRunButton)
bpy.utils.register_class(ArmoryUpdateListInstalledVSButton)
bpy.utils.register_class(scene.TLM_PT_Settings)
bpy.utils.register_class(scene.TLM_PT_Denoise)
bpy.utils.register_class(scene.TLM_PT_Filtering)
bpy.utils.register_class(scene.TLM_PT_Encoding)
bpy.utils.register_class(scene.TLM_PT_Utility)
bpy.utils.register_class(scene.TLM_PT_Additional)
bpy.types.VIEW3D_HT_header.append(draw_view3d_header)
bpy.types.VIEW3D_MT_object.append(draw_view3d_object_menu)
bpy.types.NODE_MT_context_menu.append(draw_custom_node_menu)
bpy.types.Material.arm_bind_textures_list = CollectionProperty(type=ARM_PG_BindTexturesListItem)
bpy.types.Material.arm_bind_textures_list_index = IntProperty(name='Index for arm_bind_textures_list', default=0)
def unregister():
bpy.types.NODE_MT_context_menu.remove(draw_custom_node_menu)
bpy.types.VIEW3D_MT_object.remove(draw_view3d_object_menu)
bpy.types.VIEW3D_HT_header.remove(draw_view3d_header)
bpy.utils.unregister_class(ArmoryUpdateListInstalledVSButton)
bpy.utils.unregister_class(ArmoryUpdateListAndroidEmulatorRunButton)
bpy.utils.unregister_class(ArmoryUpdateListAndroidEmulatorButton)
bpy.utils.unregister_class(ARM_OT_DiscardPopup)
bpy.utils.unregister_class(ARM_OT_ShowNodeUpdateErrors)
bpy.utils.unregister_class(ARM_OT_ShowFileVersionInfo)
bpy.utils.unregister_class(ARM_OT_UpdateFileSDK)
bpy.utils.unregister_class(ARM_PT_ObjectPropsPanel)
bpy.utils.unregister_class(ARM_PT_ModifiersPropsPanel)
bpy.utils.unregister_class(ARM_PT_ParticlesPropsPanel)
bpy.utils.unregister_class(ARM_PT_PhysicsPropsPanel)
bpy.utils.unregister_class(ARM_PT_DataPropsPanel)
bpy.utils.unregister_class(ARM_PT_WorldPropsPanel)
bpy.utils.unregister_class(ARM_PT_ScenePropsPanel)
bpy.utils.unregister_class(InvalidateCacheButton)
bpy.utils.unregister_class(InvalidateMaterialCacheButton)
bpy.utils.unregister_class(ARM_OT_NewCustomMaterial)
bpy.utils.unregister_class(ARM_PT_MaterialDriverPropsPanel)
bpy.utils.unregister_class(ARM_PT_MaterialBlendingPropsPanel)
bpy.utils.unregister_class(ARM_PT_BindTexturesPropsPanel)
bpy.utils.unregister_class(ARM_PT_MaterialPropsPanel)
bpy.utils.unregister_class(ARM_OT_BindTexturesListDeleteItem)
bpy.utils.unregister_class(ARM_OT_BindTexturesListNewItem)
bpy.utils.unregister_class(ARM_UL_BindTexturesList)
bpy.utils.unregister_class(ARM_PG_BindTexturesListItem)
bpy.utils.unregister_class(ARM_PT_ArmoryPlayerPanel)
bpy.utils.unregister_class(ARM_PT_ArmoryExporterWindowsSettingsPanel)
bpy.utils.unregister_class(ARM_PT_ArmoryExporterHTML5SettingsPanel)
bpy.utils.unregister_class(ARM_PT_ArmoryExporterAndroidBuildAPKPanel)
bpy.utils.unregister_class(ARM_PT_ArmoryExporterAndroidAbiPanel)
bpy.utils.unregister_class(ARM_PT_ArmoryExporterAndroidPermissionsPanel)
bpy.utils.unregister_class(ARM_PT_ArmoryExporterAndroidSettingsPanel)
bpy.utils.unregister_class(ARM_PT_ArmoryExporterPanel)
bpy.utils.unregister_class(ARM_PT_ArmoryProjectPanel)
bpy.utils.unregister_class(ARM_PT_ProjectFlagsDebugConsolePanel)
bpy.utils.unregister_class(ARM_PT_ProjectFlagsPanel)
bpy.utils.unregister_class(ARM_PT_ProjectWindowPanel)
bpy.utils.unregister_class(ARM_PT_ProjectModulesPanel)
bpy.utils.unregister_class(ARM_PT_RenderPathPanel)
bpy.utils.unregister_class(ARM_PT_RenderPathRendererPanel)
bpy.utils.unregister_class(ARM_PT_RenderPathShadowsPanel)
bpy.utils.unregister_class(ARM_PT_RenderPathVoxelsPanel)
bpy.utils.unregister_class(ARM_PT_RenderPathWorldPanel)
bpy.utils.unregister_class(ARM_PT_RenderPathPostProcessPanel)
bpy.utils.unregister_class(ARM_PT_RenderPathCompositorPanel)
bpy.utils.unregister_class(ARM_PT_BakePanel)
# bpy.utils.unregister_class(ArmVirtualInputPanel)
bpy.utils.unregister_class(ArmoryPlayButton)
bpy.utils.unregister_class(ArmoryStopButton)
bpy.utils.unregister_class(ArmoryBuildProjectButton)
bpy.utils.unregister_class(ArmoryOpenProjectFolderButton)
bpy.utils.unregister_class(ArmoryOpenEditorButton)
bpy.utils.unregister_class(CleanMenu)
bpy.utils.unregister_class(CleanButtonMenu)
bpy.utils.unregister_class(ArmoryCleanProjectButton)
bpy.utils.unregister_class(ArmoryPublishProjectButton)
bpy.utils.unregister_class(ArmGenLodButton)
bpy.utils.unregister_class(ARM_PT_LodPanel)
bpy.utils.unregister_class(ArmGenTerrainButton)
bpy.utils.unregister_class(ARM_PT_TerrainPanel)
bpy.utils.unregister_class(ARM_PT_TilesheetPanel)
bpy.utils.unregister_class(ARM_PT_ProxyPanel)
bpy.utils.unregister_class(ArmMakeProxyButton)
bpy.utils.unregister_class(ArmProxyToggleAllButton)
bpy.utils.unregister_class(ArmProxyApplyAllButton)
bpy.utils.unregister_class(ArmSyncProxyButton)
bpy.utils.unregister_class(ArmPrintTraitsButton)
bpy.utils.unregister_class(ARM_PT_MaterialNodePanel)
bpy.utils.unregister_class(scene.TLM_PT_Settings)
bpy.utils.unregister_class(scene.TLM_PT_Denoise)
bpy.utils.unregister_class(scene.TLM_PT_Filtering)
bpy.utils.unregister_class(scene.TLM_PT_Encoding)
bpy.utils.unregister_class(scene.TLM_PT_Utility)
bpy.utils.unregister_class(scene.TLM_PT_Additional)
| 39.903636
| 401
| 0.645112
|
4a02c1e82ee00739541884ea01157e2c2d85bac7
| 7,493
|
py
|
Python
|
argo/workflows/client/models/io_argoproj_events_v1alpha1_resource_event_source.py
|
argentumcode/argo-client-python
|
31c1519056379d3f046d4b522f37af87243fdbb4
|
[
"Apache-2.0"
] | null | null | null |
argo/workflows/client/models/io_argoproj_events_v1alpha1_resource_event_source.py
|
argentumcode/argo-client-python
|
31c1519056379d3f046d4b522f37af87243fdbb4
|
[
"Apache-2.0"
] | null | null | null |
argo/workflows/client/models/io_argoproj_events_v1alpha1_resource_event_source.py
|
argentumcode/argo-client-python
|
31c1519056379d3f046d4b522f37af87243fdbb4
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Argo Server API
You can get examples of requests and responses by using the CLI with `--gloglevel=9`, e.g. `argo list --gloglevel=9` # noqa: E501
The version of the OpenAPI document: v3.0.4
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from argo.workflows.client.configuration import Configuration
class IoArgoprojEventsV1alpha1ResourceEventSource(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'event_types': 'list[str]',
'filter': 'IoArgoprojEventsV1alpha1ResourceFilter',
'group_version_resource': 'V1GroupVersionResource',
'metadata': 'dict(str, str)',
'namespace': 'str'
}
attribute_map = {
'event_types': 'eventTypes',
'filter': 'filter',
'group_version_resource': 'groupVersionResource',
'metadata': 'metadata',
'namespace': 'namespace'
}
def __init__(self, event_types=None, filter=None, group_version_resource=None, metadata=None, namespace=None, local_vars_configuration=None): # noqa: E501
"""IoArgoprojEventsV1alpha1ResourceEventSource - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._event_types = None
self._filter = None
self._group_version_resource = None
self._metadata = None
self._namespace = None
self.discriminator = None
if event_types is not None:
self.event_types = event_types
if filter is not None:
self.filter = filter
if group_version_resource is not None:
self.group_version_resource = group_version_resource
if metadata is not None:
self.metadata = metadata
if namespace is not None:
self.namespace = namespace
@property
def event_types(self):
"""Gets the event_types of this IoArgoprojEventsV1alpha1ResourceEventSource. # noqa: E501
EventTypes is the list of event type to watch. Possible values are - ADD, UPDATE and DELETE. # noqa: E501
:return: The event_types of this IoArgoprojEventsV1alpha1ResourceEventSource. # noqa: E501
:rtype: list[str]
"""
return self._event_types
@event_types.setter
def event_types(self, event_types):
"""Sets the event_types of this IoArgoprojEventsV1alpha1ResourceEventSource.
EventTypes is the list of event type to watch. Possible values are - ADD, UPDATE and DELETE. # noqa: E501
:param event_types: The event_types of this IoArgoprojEventsV1alpha1ResourceEventSource. # noqa: E501
:type: list[str]
"""
self._event_types = event_types
@property
def filter(self):
"""Gets the filter of this IoArgoprojEventsV1alpha1ResourceEventSource. # noqa: E501
:return: The filter of this IoArgoprojEventsV1alpha1ResourceEventSource. # noqa: E501
:rtype: IoArgoprojEventsV1alpha1ResourceFilter
"""
return self._filter
@filter.setter
def filter(self, filter):
"""Sets the filter of this IoArgoprojEventsV1alpha1ResourceEventSource.
:param filter: The filter of this IoArgoprojEventsV1alpha1ResourceEventSource. # noqa: E501
:type: IoArgoprojEventsV1alpha1ResourceFilter
"""
self._filter = filter
@property
def group_version_resource(self):
"""Gets the group_version_resource of this IoArgoprojEventsV1alpha1ResourceEventSource. # noqa: E501
:return: The group_version_resource of this IoArgoprojEventsV1alpha1ResourceEventSource. # noqa: E501
:rtype: V1GroupVersionResource
"""
return self._group_version_resource
@group_version_resource.setter
def group_version_resource(self, group_version_resource):
"""Sets the group_version_resource of this IoArgoprojEventsV1alpha1ResourceEventSource.
:param group_version_resource: The group_version_resource of this IoArgoprojEventsV1alpha1ResourceEventSource. # noqa: E501
:type: V1GroupVersionResource
"""
self._group_version_resource = group_version_resource
@property
def metadata(self):
"""Gets the metadata of this IoArgoprojEventsV1alpha1ResourceEventSource. # noqa: E501
:return: The metadata of this IoArgoprojEventsV1alpha1ResourceEventSource. # noqa: E501
:rtype: dict(str, str)
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this IoArgoprojEventsV1alpha1ResourceEventSource.
:param metadata: The metadata of this IoArgoprojEventsV1alpha1ResourceEventSource. # noqa: E501
:type: dict(str, str)
"""
self._metadata = metadata
@property
def namespace(self):
"""Gets the namespace of this IoArgoprojEventsV1alpha1ResourceEventSource. # noqa: E501
:return: The namespace of this IoArgoprojEventsV1alpha1ResourceEventSource. # noqa: E501
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""Sets the namespace of this IoArgoprojEventsV1alpha1ResourceEventSource.
:param namespace: The namespace of this IoArgoprojEventsV1alpha1ResourceEventSource. # noqa: E501
:type: str
"""
self._namespace = namespace
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IoArgoprojEventsV1alpha1ResourceEventSource):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, IoArgoprojEventsV1alpha1ResourceEventSource):
return True
return self.to_dict() != other.to_dict()
| 33.008811
| 159
| 0.648205
|
4a02c21fa0fceb55a21e7c5660f451deb3e29332
| 125
|
py
|
Python
|
week3/apps.py
|
CharityMutonii/award
|
4847a61542869851016fb4fb87016ddf558ec2fb
|
[
"MIT"
] | 2
|
2020-11-02T18:58:18.000Z
|
2021-01-06T10:59:23.000Z
|
week3/apps.py
|
CharityMutonii/award
|
4847a61542869851016fb4fb87016ddf558ec2fb
|
[
"MIT"
] | null | null | null |
week3/apps.py
|
CharityMutonii/award
|
4847a61542869851016fb4fb87016ddf558ec2fb
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from django.apps import AppConfig
class Week3Config(AppConfig):
name = 'week3'
| 17.857143
| 39
| 0.792
|
4a02c23247891fd7bbb18fe2449d3ed603614f37
| 1,173
|
py
|
Python
|
examples/utils/event_callback.py
|
cclauss/pivy
|
55de2ba4dd32f62ce2d4e33ca28459cf3ea5167a
|
[
"ISC"
] | 29
|
2019-12-28T10:37:16.000Z
|
2022-02-09T10:48:04.000Z
|
examples/utils/event_callback.py
|
cclauss/pivy
|
55de2ba4dd32f62ce2d4e33ca28459cf3ea5167a
|
[
"ISC"
] | 29
|
2019-12-26T13:46:11.000Z
|
2022-03-29T18:14:33.000Z
|
examples/utils/event_callback.py
|
cclauss/pivy
|
55de2ba4dd32f62ce2d4e33ca28459cf3ea5167a
|
[
"ISC"
] | 17
|
2019-12-29T11:49:32.000Z
|
2022-02-23T00:28:18.000Z
|
import sys
from PySide2.QtGui import QColor
from PySide2.QtWidgets import QApplication
from pivy import quarter, coin
# testing if a eventcallback can remove itself.
class test(coin.SoSeparator):
def __init__(self):
super(test, self).__init__()
self.events = coin.SoEventCallback()
self += self.events
self.cb = self.events.addEventCallback(
coin.SoLocation2Event.getClassTypeId(), self.my_cb)
self.cb1 = self.events.addEventCallback(
coin.SoEvent.getClassTypeId(), self.my_cb_1)
def my_cb(self, *args):
self.events.removeEventCallback(
coin.SoLocation2Event.getClassTypeId(), self.cb)
def my_cb_1(self, *args):
self.events.removeEventCallback(
coin.SoEvent.getClassTypeId(), self.cb1)
def main():
app = QApplication(sys.argv)
viewer = quarter.QuarterWidget()
root = coin.SoSeparator()
root += coin.SoCone()
root += test()
viewer.setSceneGraph(root)
viewer.setBackgroundColor(QColor(255, 255, 255))
viewer.setWindowTitle("minimal")
viewer.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| 26.066667
| 63
| 0.665814
|
4a02c3bd546cd85b4f15c49a62086146a6a6f013
| 4,929
|
py
|
Python
|
discovery-provider/src/utils/config.py
|
atticwip/audius-protocol
|
9758e849fae01508fa1d27675741228b11533e6e
|
[
"Apache-2.0"
] | 4
|
2021-12-24T14:16:05.000Z
|
2022-01-13T22:41:33.000Z
|
discovery-provider/src/utils/config.py
|
SNOmad1/audius-protocol
|
3d5fc2bf688265eb529060f1f3234ef2b95ed231
|
[
"Apache-2.0"
] | null | null | null |
discovery-provider/src/utils/config.py
|
SNOmad1/audius-protocol
|
3d5fc2bf688265eb529060f1f3234ef2b95ed231
|
[
"Apache-2.0"
] | null | null | null |
# TODO: Add DocString
import logging
import configparser
import os
import os.path
import datetime
from flask import current_app
logger = logging.getLogger(__name__)
config_files = []
if os.path.isfile("default_config.ini"):
config_files.append("default_config.ini")
if os.path.isfile("contract_config.ini"):
config_files.append("contract_config.ini")
if "LOCAL_CONFIG" in os.environ and os.path.isfile(os.environ["LOCAL_CONFIG"]):
config_files.append(os.environ["LOCAL_CONFIG"])
if "default_config.ini" not in config_files:
raise RuntimeError("Missing required configuration: default_config.ini")
if not config_files:
raise RuntimeError(
"No valid configuration file found. Please set the "
"LOCAL_CONFIG envvar or run the app from the correct "
"directory."
)
def env_config_update(config, section_name, key):
env_var_base = f"{section_name}_{key}"
env_var_name = f"audius_{env_var_base}"
env_var_value = os.environ.get(env_var_name)
env_var_exists = env_var_value != None
logger.error(f"{env_var_name} : Exists? {env_var_exists}")
if env_var_exists:
# Override any config values with environment variables if present
# Variables are formatted as audius_<section_name>_<key>
config[section_name][key] = env_var_value
class ConfigIni(configparser.ConfigParser): # pylint: disable=too-many-ancestors
"""Subclass of ConfigParser.ConfigParser that must be run inside a
flask app context. It looks for a special [flask] section of the config
file and uses that to configure flask's own built-in variables."""
def read(self, filenames, encoding=None):
"""Overridden read() method to call parse_flask_section() at the end"""
ret = configparser.ConfigParser.read(self, filenames, encoding)
self.parse_flask_section()
return ret
def parse_flask_section(self):
"""Parse the [flask] section of your config and hand off the config
to the app in context.
Config vars should have the same name as their flask equivalent except
in all lower-case."""
for section_name in self.sections():
current_app.config[section_name] = {}
for section_name in self.sections():
for item in self.items(section_name):
self._load_item(section_name, item[0])
# Set db_read_replica url to same as db url if none provided
if ("url_read_replica" not in current_app.config["db"]) or (
not current_app.config["db"]["url_read_replica"]
):
current_app.config["db"]["url_read_replica"] = current_app.config["db"][
"url"
]
# Always disable (not included in app.default_config)
# See https://flask-restx.readthedocs.io/en/latest/mask.html#usage
current_app.config["RESTX_MASK_SWAGGER"] = False
def _load_item(self, section_name, key):
"""Load the specified item from the [flask] section. Type is
determined by the type of the equivalent value in app.default_config
or string if unknown."""
default = current_app.default_config.get(key)
if isinstance(default, datetime.timedelta):
# One of the default config vars is a timedelta - interpret it
# as an int and construct using it
current_app.config[section_name][key] = datetime.timedelta(
self.getint(section_name, key)
)
elif isinstance(default, bool):
current_app.config[section_name][key] = self.getboolean(section_name, key)
elif isinstance(default, float):
current_app.config[section_name][key] = self.getfloat(section_name, key)
elif isinstance(default, int):
current_app.config[section_name][key] = self.getint(section_name, key)
else:
# All the string keys need to be coerced into str()
# because Flask expects some of them not to be unicode
current_app.config[section_name][key] = str(self.get(section_name, key))
env_config_update(current_app.config, section_name, key)
shared_config = configparser.ConfigParser()
shared_config.read(config_files)
# Set up section-specific dictionaries for convenient access (i.e.
# config.section_name['VAR_NAME'])
for section in shared_config.sections():
for static_item in shared_config.items(section):
static_key = static_item[0]
env_config_update(shared_config, section, static_key)
try:
owner_wallet = shared_config["delegate"]["owner_wallet"]
private_key = shared_config["delegate"]["private_key"]
if not owner_wallet or not private_key:
raise RuntimeError()
except (KeyError, RuntimeError) as e:
raise RuntimeError(
f"""
Missing delegate owner wallet ({owner_wallet}) and/or delgate private key ({private_key}): {e}
"""
) from e
| 38.811024
| 98
| 0.685332
|
4a02c3d4035bbfbfa3bf126623d2d36e4469c88e
| 934
|
py
|
Python
|
019.py
|
reiyw/nlp100
|
d664e30a7e75de1730a672f705b4d76548c45bb8
|
[
"MIT"
] | null | null | null |
019.py
|
reiyw/nlp100
|
d664e30a7e75de1730a672f705b4d76548c45bb8
|
[
"MIT"
] | null | null | null |
019.py
|
reiyw/nlp100
|
d664e30a7e75de1730a672f705b4d76548c45bb8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import collections
def main():
"""19. 各行の1コラム目の文字列の出現頻度を求め,出現頻度の高い順に並べる
各行の1列目の文字列の出現頻度を求め,その高い順に並べて表示せよ.
確認にはcut, uniq, sortコマンドを用いよ.
"""
# class collections.Counter([iterable-or-mapping])
# 要素を辞書のキーとして保存し,そのカウントを辞書の値として保存する
# 順序付けされていないコレクション.
# dict のサブクラス.
# 要素は iterable から数え上げられたり他の mapping から初期化される.
counter = collections.Counter(
line.split()[0].strip() for line in sys.stdin)
# most_common([n])
# カウントが多いものから少ないものまで順に並べた長さ n の
# リストを返す. n が指定されなければすべての要素を返す.
for word, count in counter.most_common():
print '{} {}'.format(count, word)
# $ cut -f 1 hightemp.txt | sort | uniq -c | sort -nr
# 3 山形県
# 3 埼玉県
# 3 群馬県
# 3 山梨県
# 2 愛知県
# 2 千葉県
# 2 岐阜県
# 2 静岡県
# 1 高知県
# 1 和歌山県
# 1 大阪府
# 1 愛媛県
if __name__ == '__main__':
main()
| 20.755556
| 57
| 0.59636
|
4a02c45161fd0fdc22ced66bacde21d98c4def85
| 10,694
|
py
|
Python
|
clox/test/test.py
|
nomoid/CraftingInterpretersStudy
|
7b57174cb9627ffdb9e95228d0984061b2727ae8
|
[
"MIT"
] | null | null | null |
clox/test/test.py
|
nomoid/CraftingInterpretersStudy
|
7b57174cb9627ffdb9e95228d0984061b2727ae8
|
[
"MIT"
] | null | null | null |
clox/test/test.py
|
nomoid/CraftingInterpretersStudy
|
7b57174cb9627ffdb9e95228d0984061b2727ae8
|
[
"MIT"
] | null | null | null |
# Code adapted from https://github.com/munificent/craftinginterpreters/blob/master/util/test.py
# The above code is licensed under the MIT license
import re
from collections import defaultdict
from os import listdir
from os.path import abspath, basename, dirname, isdir, isfile, join, realpath, relpath, splitext
import re
from subprocess import Popen, PIPE
import sys
import time
import term
# Runs the tests.
REPO_DIR = dirname(dirname(dirname(realpath(__file__))))
OUTPUT_EXPECT = re.compile(r'// expect: ?(.*)')
ERROR_EXPECT = re.compile(r'// (Error.*)')
ERROR_LINE_EXPECT = re.compile(r'// \[((java|c) )?line (\d+)\] (Error.*)')
RUNTIME_ERROR_EXPECT = re.compile(r'// expect runtime error: (.+)')
SYNTAX_ERROR_RE = re.compile(r'\[.*line (\d+)\] (Error.+)')
STACK_TRACE_RE = re.compile(r'\[line (\d+)\]')
NONTEST_RE = re.compile(r'// nontest')
interpreter_language = 'c'
interpreter_args = [join(REPO_DIR, 'clox', 'clox.exe')]
passed = 0
failed = 0
num_skipped = 0
expectations = 0
class Test:
def __init__(self, path):
self.path = path
self.output = []
self.compile_errors = set()
self.runtime_error_line = 0
self.runtime_error_message = None
self.exit_code = 0
self.failures = []
def parse(self):
global expectations
line_num = 1
with open(self.path, 'r') as file:
for line in file:
match = OUTPUT_EXPECT.search(line)
if match:
self.output.append((match.group(1), line_num))
expectations += 1
match = ERROR_EXPECT.search(line)
if match:
self.compile_errors.add(
"[{0}] {1}".format(line_num, match.group(1)))
# If we expect a compile error, it should exit with EX_DATAERR.
self.exit_code = 65
expectations += 1
match = ERROR_LINE_EXPECT.search(line)
if match:
# The two interpreters are slightly different in terms of which
# cascaded errors may appear after an initial compile error because
# their panic mode recovery is a little different. To handle that,
# the tests can indicate if an error line should only appear for a
# certain interpreter.
language = match.group(2)
if not language or language == interpreter_language:
self.compile_errors.add("[{0}] {1}".format(
match.group(3), match.group(4)))
# If we expect a compile error, it should exit with EX_DATAERR.
self.exit_code = 65
expectations += 1
match = RUNTIME_ERROR_EXPECT.search(line)
if match:
self.runtime_error_line = line_num
self.runtime_error_message = match.group(1)
# If we expect a runtime error, it should exit with EX_SOFTWARE.
self.exit_code = 70
expectations += 1
match = NONTEST_RE.search(line)
if match:
# Not a test file at all, so ignore it.
return False
line_num += 1
# If we got here, it's a valid test.
return True
def run(self):
# Invoke the interpreter and run the test.
args = interpreter_args[:]
args.append(self.path)
proc = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
self.validate(proc.returncode, out, err)
def validate(self, exit_code, out, err):
if self.compile_errors and self.runtime_error_message:
self.fail(
"Test error: Cannot expect both compile and runtime errors.")
return
try:
out = out.decode("utf-8").replace('\r\n', '\n')
err = err.decode("utf-8").replace('\r\n', '\n')
except:
self.fail('Error decoding output.')
error_lines = err.split('\n')
# Validate that an expected runtime error occurred.
if self.runtime_error_message:
self.validate_runtime_error(error_lines)
else:
self.validate_compile_errors(error_lines)
self.validate_exit_code(exit_code, error_lines)
self.validate_output(out)
def validate_runtime_error(self, error_lines):
if len(error_lines) < 2:
self.fail('Expected runtime error "{0}" and got none.',
self.runtime_error_message)
return
# Skip any compile errors. This can happen if there is a compile error in
# a module loaded by the module being tested.
line = 0
while SYNTAX_ERROR_RE.search(error_lines[line]):
line += 1
if error_lines[line] != self.runtime_error_message:
self.fail('Expected runtime error "{0}" and got:',
self.runtime_error_message)
self.fail(error_lines[line])
# Make sure the stack trace has the right line. Skip over any lines that
# come from builtin libraries.
match = False
stack_lines = error_lines[line + 1:]
for stack_line in stack_lines:
match = STACK_TRACE_RE.search(stack_line)
if match:
break
if not match:
self.fail('Expected stack trace and got:')
for stack_line in stack_lines:
self.fail(stack_line)
else:
stack_line = int(match.group(1))
if stack_line != self.runtime_error_line:
self.fail('Expected runtime error on line {0} but was on line {1}.',
self.runtime_error_line, stack_line)
def validate_compile_errors(self, error_lines):
# Validate that every compile error was expected.
found_errors = set()
num_unexpected = 0
for line in error_lines:
match = SYNTAX_ERROR_RE.search(line)
if match:
error = "[{0}] {1}".format(match.group(1), match.group(2))
if error in self.compile_errors:
found_errors.add(error)
else:
if num_unexpected < 10:
self.fail('Unexpected error:')
self.fail(line)
num_unexpected += 1
elif line != '':
if num_unexpected < 10:
self.fail('Unexpected output on stderr:')
self.fail(line)
num_unexpected += 1
if num_unexpected > 10:
self.fail('(truncated ' + str(num_unexpected - 10) + ' more...)')
# Validate that every expected error occurred.
for error in self.compile_errors - found_errors:
self.fail('Missing expected error: {0}', error)
def validate_exit_code(self, exit_code, error_lines):
if exit_code == self.exit_code:
return
if len(error_lines) > 10:
error_lines = error_lines[0:10]
error_lines.append('(truncated...)')
self.fail('Expected return code {0} and got {1}. Stderr:',
self.exit_code, exit_code)
self.failures += error_lines
def validate_output(self, out):
# Remove the trailing last empty line.
out_lines = out.split('\n')
if out_lines[-1] == '':
del out_lines[-1]
index = 0
for line in out_lines:
if sys.version_info < (3, 0):
line = line.encode('utf-8')
if index >= len(self.output):
self.fail('Got output "{0}" when none was expected.', line)
elif self.output[index][0] != line:
self.fail('Expected output "{0}" on line {1} and got "{2}".',
self.output[index][0], self.output[index][1], line)
index += 1
while index < len(self.output):
self.fail('Missing expected output "{0}" on line {1}.',
self.output[index][0], self.output[index][1])
index += 1
def fail(self, message, *args):
if args:
message = message.format(*args)
self.failures.append(message)
def walk(dir, callback):
"""
Walks [dir], and executes [callback] on each file.
"""
dir = abspath(dir)
for file in listdir(dir):
nfile = join(dir, file)
if isdir(nfile):
walk(nfile, callback)
else:
callback(nfile)
def run_script(path):
if "benchmark" in path:
return
global passed
global failed
global num_skipped
if (splitext(path)[1] != '.lox'):
return
# Make a nice short path relative to the working directory.
# Normalize it to use "/" since, among other things, the interpreters expect
# the argument to use that.
path = relpath(path).replace("\\", "/")
# Update the status line.
term.print_line('Passed: {} Failed: {} Skipped: {} {}'.format(
term.green(passed),
term.red(failed),
term.yellow(num_skipped),
term.gray('({})'.format(path))))
# Read the test and parse out the expectations.
test = Test(path)
if not test.parse():
# It's a skipped or non-test file.
return
test.run()
# Display the results.
if len(test.failures) == 0:
passed += 1
else:
failed += 1
term.print_line(term.red('FAIL') + ': ' + path)
print('')
for failure in test.failures:
print(' ' + term.pink(failure))
print('')
def run_suite():
global passed
global failed
global num_skipped
global expectations
passed = 0
failed = 0
num_skipped = 0
expectations = 0
start_time = time.time()
walk(join(REPO_DIR, 'clox', 'test', 'lox'), run_script)
time_passed = time.time() - start_time
term.print_line()
print()
if failed == 0:
print('All {} tests passed ({} expectations) in {:.2f} seconds.'.format(
term.green(passed), str(expectations), time_passed))
else:
print('{} tests passed and {} tests failed in {:.2f} seconds.'.format(
term.green(passed), term.red(failed), time_passed))
return failed == 0
def main(argv):
if len(argv) > 1:
print('Usage: test.py')
sys.exit(1)
run_suite()
if __name__ == '__main__':
main(sys.argv)
| 32.210843
| 96
| 0.556293
|
4a02c493bfbe35777d5ea94511f74d0237b4a1e8
| 20,867
|
py
|
Python
|
misc/Rhythmbox-Plugin/upnp_coherence/MediaStore.py
|
wd8rde/Coherence
|
792ee3ddf28e4748c116056915a2bb7155137e7f
|
[
"MIT"
] | null | null | null |
misc/Rhythmbox-Plugin/upnp_coherence/MediaStore.py
|
wd8rde/Coherence
|
792ee3ddf28e4748c116056915a2bb7155137e7f
|
[
"MIT"
] | null | null | null |
misc/Rhythmbox-Plugin/upnp_coherence/MediaStore.py
|
wd8rde/Coherence
|
792ee3ddf28e4748c116056915a2bb7155137e7f
|
[
"MIT"
] | null | null | null |
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
#
# Copyright 2011, Caleb Callaway <enlightened-despot@gmail.com>
# Copyright 2007-2010, Frank Scholz <dev@coherence-project.org>
# Copyright 2007, James Livingston <doclivingston@gmail.com>
import os.path
import rhythmdb
import coherence.extern.louie as louie
import urllib
from coherence import __version_info__
from coherence.upnp.core import DIDLLite
from coherence.backend import BackendItem, BackendStore
ROOT_CONTAINER_ID = 0
AUDIO_CONTAINER = 100
AUDIO_ALL_CONTAINER_ID = 101
AUDIO_ARTIST_CONTAINER_ID = 102
AUDIO_ALBUM_CONTAINER_ID = 103
AUDIO_PLAYLIST_CONTAINER_ID = 104
CONTAINER_COUNT = 10000
TRACK_COUNT = 1000000
# most of this class is from Coherence, originally under the MIT licence
class Container(BackendItem):
logCategory = 'rb_media_store'
def __init__(self, id, parent_id, name, children_callback=None, store=None, play_container=False):
self.id = id
self.parent_id = parent_id
self.name = name
self.mimetype = 'directory'
self.store = store
self.play_container = play_container
self.update_id = 0
if children_callback != None:
self.children = children_callback
else:
self.children = []
def add_child(self, child):
self.children.append(child)
def get_children(self, start=0, request_count=0):
if callable(self.children):
children = self.children(self.id)
else:
children = self.children
self.info("Container get_children %r (%r,%r)", children, start, request_count)
if request_count == 0:
return children[start:]
else:
return children[start:request_count]
def get_child_count(self):
return len(self.get_children())
def get_item(self, parent_id=None):
item = DIDLLite.Container(self.id, self.parent_id, self.name)
item.childCount = self.get_child_count()
if self.store and self.play_container == True:
if item.childCount > 0:
res = DIDLLite.PlayContainerResource(self.store.server.uuid, cid=self.get_id(), fid=str(TRACK_COUNT + int(self.get_children()[0].get_id())))
item.res.append(res)
return item
def get_name(self):
return self.name
def get_id(self):
return self.id
class Playlist(BackendItem):
logCategory = 'rb_media_store'
def __init__(self, store, child, id):
self.id = id
self.store = store
# 2: RB_SOURCELIST_MODEL_COLUMN_NAME
# 3: RB_SOURCELIST_MODEL_COLUMN_SOURCE
self.title, self.source = self.store.playlist_model.get(child, 2, 3)
self.children = None
query = self.store.db.query_new()
self.store.db.query_append(query,
[rhythmdb.QUERY_PROP_EQUALS,
rhythmdb.PROP_TYPE,
self.store.db.entry_type_get_by_name('song')],
[rhythmdb.QUERY_PROP_EQUALS,
rhythmdb.PROP_ALBUM,
self.title])
def get_children(self, start=0, request_count=0):
if self.children == None:
self.children = map(self._create_track_from_playlist_item,
# who knows what the other children/magic numbers mean
self.source.get_children()[0].get_children()[1].get_children()[0].get_model())
return self.children
def _create_track_from_playlist_item(self, item):
uri = item[0].get_playback_uri()
entry = self.store.db.entry_lookup_by_location(uri)
id = self.store.db.entry_get(entry, rhythmdb.PROP_ENTRY_ID)
return Track(self.store, id, self.id)
def get_child_count(self):
try:
return len(self.get_children())
except:
return 0
def get_item(self):
item = DIDLLite.PlaylistContainer(self.id, AUDIO_PLAYLIST_CONTAINER_ID, self.title)
if __version_info__ >= (0, 6, 4):
if self.get_child_count() > 0:
res = DIDLLite.PlayContainerResource(self.store.server.uuid, cid=self.get_id(), fid=str(TRACK_COUNT + int(self.get_children()[0].get_id())))
item.res.append(res)
return item
def get_id(self):
return self.id
def get_name(self):
return self.title
class Album(BackendItem):
logCategory = 'rb_media_store'
def __init__(self, store, title, id, parent_id):
self.id = id
self.title = title
self.store = store
query = self.store.db.query_new()
self.store.db.query_append(query, [rhythmdb.QUERY_PROP_EQUALS, rhythmdb.PROP_TYPE, self.store.db.entry_type_get_by_name('song')],
[rhythmdb.QUERY_PROP_EQUALS, rhythmdb.PROP_ALBUM, self.title])
self.tracks_per_album_query = self.store.db.query_model_new(query)
#self.tracks_per_album_query.set_sort_order(rhythmdb.rhythmdb_query_model_track_sort_func)
self.store.db.do_full_query_async_parsed(self.tracks_per_album_query, query)
def get_children(self, start=0, request_count=0):
children = []
def track_sort(x, y):
entry = self.store.db.entry_lookup_by_id(x.id)
x_track = self.store.db.entry_get(entry, rhythmdb.PROP_TRACK_NUMBER)
entry = self.store.db.entry_lookup_by_id(y.id)
y_track = self.store.db.entry_get(entry, rhythmdb.PROP_TRACK_NUMBER)
return cmp(x_track, y_track)
def collate (model, path, iter):
self.info("Album get_children %r %r %r" % (model, path, iter))
id = model.get(iter, 0)[0]
children.append(Track(self.store, id, self.id))
self.tracks_per_album_query.foreach(collate)
children.sort(cmp=track_sort)
if request_count == 0:
return children[start:]
else:
return children[start:request_count]
def get_child_count(self):
return len(self.get_children())
def get_item(self, parent_id=AUDIO_ALBUM_CONTAINER_ID):
item = DIDLLite.MusicAlbum(self.id, parent_id, self.title)
if __version_info__ >= (0, 6, 4):
if self.get_child_count() > 0:
res = DIDLLite.PlayContainerResource(self.store.server.uuid, cid=self.get_id(), fid=str(TRACK_COUNT + int(self.get_children()[0].get_id())))
item.res.append(res)
return item
def get_id(self):
return self.id
def get_name(self):
return self.title
def get_cover(self):
return self.cover
class Artist(BackendItem):
logCategory = 'rb_media_store'
def __init__(self, store, name, id, parent_id):
self.id = id
self.name = name
self.store = store
query = self.store.db.query_new()
self.store.db.query_append(query, [rhythmdb.QUERY_PROP_EQUALS, rhythmdb.PROP_TYPE, self.store.db.entry_type_get_by_name('song')],
[rhythmdb.QUERY_PROP_EQUALS, rhythmdb.PROP_ARTIST, self.name])
self.tracks_per_artist_query = self.store.db.query_model_new(query)
self.store.db.do_full_query_async_parsed(self.tracks_per_artist_query, query)
self.albums_per_artist_query = self.store.db.property_model_new(rhythmdb.PROP_ALBUM)
self.albums_per_artist_query.props.query_model = self.tracks_per_artist_query
def get_artist_all_tracks(self, id):
children = []
def collate (model, path, iter):
id = model.get(iter, 0)[0]
print id
children.append(Track(self.store, id, self.id))
self.tracks_per_artist_query.foreach(collate)
return children
def get_children(self, start=0, request_count=0):
children = []
def collate (model, path, iter):
name = model.get(iter, 0)[0]
priority = model.get(iter, 1)[0]
self.info("get_children collate %r %r", name, priority)
if priority is False:
try:
album = self.store.albums[name]
children.append(album)
except:
self.warning("hmm, a new album %r, that shouldn't happen", name)
self.albums_per_artist_query.foreach(collate)
if len(children):
all_id = 'artist_all_tracks_%d' % (self.id)
if all_id not in self.store.containers:
self.store.containers[all_id] = \
Container(all_id, self.id, 'All tracks of %s' % self.name,
children_callback=self.get_artist_all_tracks,
store=self.store, play_container=True)
children.insert(0, self.store.containers[all_id])
if request_count == 0:
return children[start:]
else:
return children[start:request_count]
def get_child_count(self):
return len(self.get_children())
def get_item(self, parent_id=AUDIO_ARTIST_CONTAINER_ID):
item = DIDLLite.MusicArtist(self.id, parent_id, self.name)
return item
def get_id(self):
return self.id
def get_name(self):
return self.name
class Track(BackendItem):
logCategory = 'rb_media_store'
def __init__(self, store, id, parent_id):
self.store = store
if type(id) == int:
self.id = id
else:
self.id = self.store.db.entry_get(id, rhythmdb.PROP_ENTRY_ID)
self.parent_id = parent_id
def get_children(self, start=0, request_count=0):
return []
def get_child_count(self):
return 0
def get_item(self, parent_id=None):
self.info("Track get_item %r @ %r" % (self.id, self.parent_id))
host = ""
# load common values
entry = self.store.db.entry_lookup_by_id(self.id)
# Bitrate is in bytes/second, not kilobits/second
bitrate = self.store.db.entry_get(entry, rhythmdb.PROP_BITRATE) * 1024 / 8
# Duration is in HH:MM:SS format
seconds = self.store.db.entry_get(entry, rhythmdb.PROP_DURATION)
hours = seconds / 3600
seconds = seconds - hours * 3600
minutes = seconds / 60
seconds = seconds - minutes * 60
duration = ("%02d:%02d:%02d") % (hours, minutes, seconds)
location = self.get_path(entry)
mimetype = self.store.db.entry_get(entry, rhythmdb.PROP_MIMETYPE)
# This isn't a real mime-type
if mimetype == "application/x-id3":
mimetype = "audio/mpeg"
size = self.store.db.entry_get(entry, rhythmdb.PROP_FILE_SIZE)
album = self.store.db.entry_get(entry, rhythmdb.PROP_ALBUM)
if self.parent_id == None:
try:
self.parent_id = self.store.albums[album].id
except:
pass
# create item
item = DIDLLite.MusicTrack(self.id + TRACK_COUNT, self.parent_id)
item.album = album
item.artist = self.store.db.entry_get(entry, rhythmdb.PROP_ARTIST)
#item.date =
item.genre = self.store.db.entry_get(entry, rhythmdb.PROP_GENRE)
item.originalTrackNumber = str(self.store.db.entry_get(entry, rhythmdb.PROP_TRACK_NUMBER))
item.title = self.store.db.entry_get(entry, rhythmdb.PROP_TITLE) # much nicer if it was entry.title
cover = self.store.db.entry_request_extra_metadata(entry, "rb:coverArt-uri")
#self.warning("cover for %r is %r", item.title, cover)
if cover != None:
_, ext = os.path.splitext(cover)
item.albumArtURI = ''.join((self.get_url(), '?cover', ext))
# add http resource
res = DIDLLite.Resource(self.get_url(), 'http-get:*:%s:*' % mimetype)
if size > 0:
res.size = size
if duration > 0:
res.duration = str(duration)
if bitrate > 0:
res.bitrate = str(bitrate)
item.res.append(res)
# add internal resource
res = DIDLLite.Resource('track-%d' % self.id, 'rhythmbox:%s:%s:*' % (self.store.server.coherence.hostname, mimetype))
if size > 0:
res.size = size
if duration > 0:
res.duration = str(duration)
if bitrate > 0:
res.bitrate = str(bitrate)
item.res.append(res)
return item
def get_id(self):
return self.id
def get_name(self):
entry = self.store.db.entry_lookup_by_id(self.id)
return self.store.db.entry_get(entry, rhythmdb.PROP_TITLE)
def get_url(self):
return self.store.urlbase + str(self.id + TRACK_COUNT)
def get_path(self, entry=None):
if entry is None:
entry = self.store.db.entry_lookup_by_id(self.id)
uri = self.store.db.entry_get(entry, rhythmdb.PROP_LOCATION)
self.info("Track get_path uri = %r", uri)
location = None
if uri.startswith("file://"):
location = unicode(urllib.unquote(uri[len("file://"):]))
self.info("Track get_path location = %r", location)
return location
def get_cover(self):
entry = self.store.db.entry_lookup_by_id(self.id)
cover = self.store.db.entry_request_extra_metadata(entry, "rb:coverArt-uri")
return cover
class MediaStore(BackendStore):
logCategory = 'rb_media_store'
implements = ['MediaServer']
def __init__(self, server, **kwargs):
BackendStore.__init__(self, server, **kwargs)
self.warning("__init__ MediaStore %r", kwargs)
self.db = kwargs['db']
self.plugin = kwargs['plugin']
self.wmc_mapping.update({'4': lambda: self.get_by_id(AUDIO_ALL_CONTAINER_ID), # all tracks
'7': lambda: self.get_by_id(AUDIO_ALBUM_CONTAINER_ID), # all albums
'6': lambda: self.get_by_id(AUDIO_ARTIST_CONTAINER_ID), # all artists
'14': lambda: self.get_by_id(AUDIO_PLAYLIST_CONTAINER_ID), # all playlists
})
self.next_id = CONTAINER_COUNT
self.albums = None
self.artists = None
self.tracks = None
self.playlists = None
self.urlbase = kwargs.get('urlbase', '')
if(len(self.urlbase) > 0 and self.urlbase[len(self.urlbase) - 1] != '/'):
self.urlbase += '/'
try:
self.name = kwargs['name']
except KeyError:
self.name = "Rhythmbox on %s" % self.server.coherence.hostname
query = self.db.query_new()
self.info(query)
self.db.query_append(query, [rhythmdb.QUERY_PROP_EQUALS, rhythmdb.PROP_TYPE, self.db.entry_type_get_by_name('song')])
qm = self.db.query_model_new(query)
self.db.do_full_query_async_parsed(qm, query)
self.album_query = self.db.property_model_new(rhythmdb.PROP_ALBUM)
self.album_query.props.query_model = qm
self.artist_query = self.db.property_model_new(rhythmdb.PROP_ARTIST)
self.artist_query.props.query_model = qm
self.playlist_model = self.plugin.shell.get_playlist_manager().props.display_page_model
self.containers = {}
self.containers[ROOT_CONTAINER_ID] = \
Container(ROOT_CONTAINER_ID, -1, "Rhythmbox on %s" % self.server.coherence.hostname)
self.containers[AUDIO_ALL_CONTAINER_ID] = \
Container(AUDIO_ALL_CONTAINER_ID, ROOT_CONTAINER_ID, 'All tracks',
children_callback=self.children_tracks,
store=self, play_container=True)
self.containers[ROOT_CONTAINER_ID].add_child(self.containers[AUDIO_ALL_CONTAINER_ID])
self.containers[AUDIO_ALBUM_CONTAINER_ID] = \
Container(AUDIO_ALBUM_CONTAINER_ID, ROOT_CONTAINER_ID, 'Albums',
children_callback=self.children_albums)
self.containers[ROOT_CONTAINER_ID].add_child(self.containers[AUDIO_ALBUM_CONTAINER_ID])
self.containers[AUDIO_ARTIST_CONTAINER_ID] = \
Container(AUDIO_ARTIST_CONTAINER_ID, ROOT_CONTAINER_ID, 'Artists',
children_callback=self.children_artists)
self.containers[ROOT_CONTAINER_ID].add_child(self.containers[AUDIO_ARTIST_CONTAINER_ID])
self.containers[AUDIO_PLAYLIST_CONTAINER_ID] = \
Container(AUDIO_PLAYLIST_CONTAINER_ID, ROOT_CONTAINER_ID, 'Playlists',
children_callback=self.children_playlists)
self.containers[ROOT_CONTAINER_ID].add_child(self.containers[AUDIO_PLAYLIST_CONTAINER_ID])
louie.send('Coherence.UPnP.Backend.init_completed', None, backend=self)
def get_by_id(self, id):
self.info("looking for id %r", id)
if isinstance(id, basestring) and id.startswith('artist_all_tracks_'):
try:
return self.containers[id]
except:
return None
id = id.split('@', 1)
item_id = id[0]
item_id = int(item_id)
if item_id < TRACK_COUNT:
try:
item = self.containers[item_id]
except KeyError:
item = None
else:
item = Track(self, (item_id - TRACK_COUNT), None)
return item
def get_next_container_id(self):
ret = self.next_id
self.next_id += 1
return ret
def upnp_init(self):
if self.server:
self.server.connection_manager_server.set_variable(0, 'SourceProtocolInfo', [
'rhythmbox:%s:*:*' % self.server.coherence.hostname,
'http-get:*:audio/mpeg:*',
])
self.warning("__init__ MediaStore initialized")
def children_tracks(self, parent_id):
tracks = []
def track_cb (entry):
if self.db.entry_get(entry, rhythmdb.PROP_HIDDEN):
return
id = self.db.entry_get(entry, rhythmdb.PROP_ENTRY_ID)
track = Track(self, id, parent_id)
tracks.append(track)
self.db.entry_foreach_by_type(self.db.entry_type_get_by_name('song'), track_cb)
return tracks
def children_albums(self, parent_id):
albums = {}
self.info('children_albums')
def album_sort(x, y):
r = cmp(x.title, y.title)
self.info("sort %r - %r = %r", x.title, y.title, r)
return r
def collate (model, path, iter):
name = model.get(iter, 0)[0]
priority = model.get(iter, 1)[0]
self.info("children_albums collate %r %r", name, priority)
if priority is False:
id = self.get_next_container_id()
album = Album(self, name, id, parent_id)
self.containers[id] = album
albums[name] = album
if self.albums is None:
self.album_query.foreach(collate)
self.albums = albums
albums = self.albums.values() # .sort(cmp=album_sort)
albums.sort(cmp=album_sort)
return albums
def children_artists(self, parent_id):
artists = []
def collate (model, path, iter):
name = model.get(iter, 0)[0]
priority = model.get(iter, 1)[0]
if priority is False:
id = self.get_next_container_id()
artist = Artist(self, name, id, parent_id)
self.containers[id] = artist
artists.append(artist)
if self.artists is None:
self.artist_query.foreach(collate)
self.artists = artists
return self.artists
def children_playlists(self, killbug=False):
playlists = []
def playlist_sort(x, y):
r = cmp(x.title, y.title)
self.info("sort %r - %r = %r", x.title, y.title, r)
return r
def collate (model, path, iter, parent_path):
parent = model.iter_parent(iter)
if parent and model.get_path(parent) == parent_path:
id = self.get_next_container_id()
playlist = Playlist(self, iter, id)
self.containers[id] = playlist
playlists.append(playlist)
if self.playlists is None:
PLAYLISTS_PARENT = 2 # 0 -> Library, 1 -> Stores, 2 -> Playlists
parent = self.playlist_model.iter_nth_child(None, PLAYLISTS_PARENT)
parent_path = self.playlist_model.get_path(parent)
self.playlist_model.foreach(collate, parent_path)
self.playlists = playlists
self.playlists.sort(cmp=playlist_sort)
return self.playlists
| 35.915663
| 156
| 0.609719
|
4a02c5702f40bf972aa3bff87bf6e907cd4a1bde
| 2,946
|
py
|
Python
|
applications/tensorflow/cnns/training/inception_v1/train_images.py
|
Splendon/examples
|
ed4a8a01857b6ddca49559141acf5d0986eb01e1
|
[
"MIT"
] | null | null | null |
applications/tensorflow/cnns/training/inception_v1/train_images.py
|
Splendon/examples
|
ed4a8a01857b6ddca49559141acf5d0986eb01e1
|
[
"MIT"
] | null | null | null |
applications/tensorflow/cnns/training/inception_v1/train_images.py
|
Splendon/examples
|
ed4a8a01857b6ddca49559141acf5d0986eb01e1
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import numpy as np
import pdb
import os
from create_tf_record import *
train_record_file = 'dataset/record/train224.tfrecords'
val_record_file = 'dataset/record/val224.tfrecords'
base_lr = 0.01
batch_size = 32
labels_nums = 5
max_steps = 100
resize_height = 224
resize_width = 224
input_images = tf.placeholder(dtype=tf.float32, shape=[batch_size, resize_height, resize_width, 3], name='input')
input_labels = tf.placeholder(dtype=tf.int32, shape=[batch_size, labels_nums], name='label')
sess = tf.Session()
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
train_nums = get_example_nums(train_record_file)
val_nums = get_example_nums(val_record_file)
print('train nums:%d,val nums:%d' % (train_nums, val_nums))
# get train data for record
# during training, it needs shuffle=True
train_images, train_labels = read_records(train_record_file, 224, 224, type='normalization')
train_images_batch, train_labels_batch = get_batch_images(train_images, train_labels,
batch_size=batch_size, labels_nums=labels_nums,
one_hot=True, shuffle=True)
# train_images: Tensor("mul:0", shape=(224, 224, 3), dtype=float32)
# train_labels: Tensor("Cast:0", shape=(), dtype=int32)
print(train_images_batch, train_labels_batch)
# during val, shuffle=True is not necessary
val_images, val_labels = read_records(val_record_file, 224, 224, type='normalization')
val_images_batch, val_labels_batch = get_batch_images(val_images, val_labels,
batch_size=batch_size, labels_nums=labels_nums,
one_hot=True, shuffle=False)
print(val_images_batch, val_labels_batch)
#val_x, val_y = sess.run([val_images_batch, val_labels_batch])
def batch_test(record_file,resize_height, resize_width):
# 读取record函数
tf_image,tf_label = read_records(record_file,resize_height,resize_width,type='normalization')
image_batch, label_batch= get_batch_images(tf_image,tf_label,batch_size=batch_size,labels_nums=labels_nums,one_hot=False,shuffle=False)
init = tf.global_variables_initializer()
with tf.Session() as sess: # 开始一个会话
sess.run(init)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in range(max_steps + 1):
# 在会话中取出images和labels
images, labels = sess.run([image_batch, label_batch])
# 这里仅显示每个batch里第一张图片
#show_image("image", images[0, :, :, :])
print('images.shape:{},images.tpye:{}'.format(images.shape,images.dtype))
print('labels.shape:{},labels.tpye:{},labels:{}'.format(labels.shape, labels.dtype, labels))
# 停止所有线程
coord.request_stop()
coord.join(threads)
batch_test(train_record_file, 224, 224)
| 40.916667
| 139
| 0.692804
|
4a02c621228e07c13bb3d4f44a73010711c0e96f
| 3,951
|
py
|
Python
|
src/oci/bastion/models/work_request_error.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/bastion/models/work_request_error.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/bastion/models/work_request_error.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class WorkRequestError(object):
"""
An error encountered while executing a work request.
"""
def __init__(self, **kwargs):
"""
Initializes a new WorkRequestError object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param code:
The value to assign to the code property of this WorkRequestError.
:type code: str
:param message:
The value to assign to the message property of this WorkRequestError.
:type message: str
:param timestamp:
The value to assign to the timestamp property of this WorkRequestError.
:type timestamp: datetime
"""
self.swagger_types = {
'code': 'str',
'message': 'str',
'timestamp': 'datetime'
}
self.attribute_map = {
'code': 'code',
'message': 'message',
'timestamp': 'timestamp'
}
self._code = None
self._message = None
self._timestamp = None
@property
def code(self):
"""
**[Required]** Gets the code of this WorkRequestError.
A machine-usable code for the error that occurred. Error codes are listed on
(https://docs.cloud.oracle.com/Content/API/References/apierrors.htm)
:return: The code of this WorkRequestError.
:rtype: str
"""
return self._code
@code.setter
def code(self, code):
"""
Sets the code of this WorkRequestError.
A machine-usable code for the error that occurred. Error codes are listed on
(https://docs.cloud.oracle.com/Content/API/References/apierrors.htm)
:param code: The code of this WorkRequestError.
:type: str
"""
self._code = code
@property
def message(self):
"""
**[Required]** Gets the message of this WorkRequestError.
A human readable description of the issue encountered.
:return: The message of this WorkRequestError.
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""
Sets the message of this WorkRequestError.
A human readable description of the issue encountered.
:param message: The message of this WorkRequestError.
:type: str
"""
self._message = message
@property
def timestamp(self):
"""
**[Required]** Gets the timestamp of this WorkRequestError.
The time the error occurred. An RFC3339 formatted datetime string.
:return: The timestamp of this WorkRequestError.
:rtype: datetime
"""
return self._timestamp
@timestamp.setter
def timestamp(self, timestamp):
"""
Sets the timestamp of this WorkRequestError.
The time the error occurred. An RFC3339 formatted datetime string.
:param timestamp: The timestamp of this WorkRequestError.
:type: datetime
"""
self._timestamp = timestamp
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 29.266667
| 245
| 0.627436
|
4a02c7d33266f2188ba14f8988c67b98b9ba8fdd
| 383
|
py
|
Python
|
PythonExercicios/ex049.py
|
MatthewsTomts/Python_Class
|
f326d521d62c45a4fcb429d2a22cf2ab958492cb
|
[
"MIT"
] | null | null | null |
PythonExercicios/ex049.py
|
MatthewsTomts/Python_Class
|
f326d521d62c45a4fcb429d2a22cf2ab958492cb
|
[
"MIT"
] | null | null | null |
PythonExercicios/ex049.py
|
MatthewsTomts/Python_Class
|
f326d521d62c45a4fcb429d2a22cf2ab958492cb
|
[
"MIT"
] | null | null | null |
n = int(input('Digite um número inteiro: '))
# Pede para que o usuário digite um número e armazena na variável
print('\033[34;1m-' * 12)
# Cria um traçado e deixa todos os caracteres a partir do traçado em magenta
for i in range(0, 11):
print(f' {i} x {n} = \033[1;36m{n * i}\033[34;1m')
# Mostra a tabuada do número digitado pelo usuário
print('-' * 12)
# Cria outro traçado
| 29.461538
| 76
| 0.678851
|
4a02c7faad346291172e99df500eba2b60283237
| 1,262
|
py
|
Python
|
setup.py
|
pvyParts/allianceauth-srp-mod
|
1b10910923141a6e2500f69bda67a81de172591b
|
[
"MIT"
] | null | null | null |
setup.py
|
pvyParts/allianceauth-srp-mod
|
1b10910923141a6e2500f69bda67a81de172591b
|
[
"MIT"
] | null | null | null |
setup.py
|
pvyParts/allianceauth-srp-mod
|
1b10910923141a6e2500f69bda67a81de172591b
|
[
"MIT"
] | 3
|
2020-07-25T07:34:50.000Z
|
2022-03-09T23:31:45.000Z
|
import os
from setuptools import find_packages, setup
from srpmod import __version__
# read the contents of your README file
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='allianceauth-srp-mod',
version=__version__,
packages=find_packages(),
include_package_data=True,
license='MIT',
description='SRP Mod expansion for ESI hookups and QOL',
long_description=long_description,
long_description_content_type='text/markdown',
author='AaronKable',
author_email='aaronkable@gmail.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 2.2',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
]
)
| 33.210526
| 78
| 0.664025
|
4a02c87a5777aca2d087b00aeaf99bdbe82a5257
| 4,159
|
py
|
Python
|
test_fields_bool_null.py
|
kezabelle/django-strictmodels
|
87ff7d7850dcfec437d1a9751938ed932844cb45
|
[
"BSD-2-Clause-FreeBSD"
] | 2
|
2015-11-11T13:42:32.000Z
|
2015-11-11T16:38:45.000Z
|
test_fields_bool_null.py
|
kezabelle/django-strictmodels
|
87ff7d7850dcfec437d1a9751938ed932844cb45
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
test_fields_bool_null.py
|
kezabelle/django-strictmodels
|
87ff7d7850dcfec437d1a9751938ed932844cb45
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from django.core.exceptions import ValidationError
from django.forms.models import model_to_dict, modelform_factory
from model_mommy.mommy import Mommy
import pytest
from fakeapp.models import NullBooleanFieldModel
from strictmodels import MODEL_MOMMY_MAPPING
def test_StrictNullBooleanField_default():
"""
Default is null
"""
value = NullBooleanFieldModel()
assert value.field is None
@pytest.mark.django_db
def test_StrictNullBooleanField_save():
x = NullBooleanFieldModel(field=None)
x.save()
assert model_to_dict(x) == model_to_dict(NullBooleanFieldModel.objects.get(pk=x.pk))
@pytest.mark.django_db
def test_StrictNullBooleanField_mommy():
mommy = Mommy(model=NullBooleanFieldModel)
mommy.type_mapping.update(MODEL_MOMMY_MAPPING)
mommy.prepare()
mommy.make()
@pytest.mark.django_db
def test_StrictNullBooleanField_form_with_instance_valid():
x = NullBooleanFieldModel(field=None)
form_class = modelform_factory(model=NullBooleanFieldModel, fields=['field'])
form = form_class(data={'field': '2'}, instance=x)
assert form.is_valid() is True
assert form.errors == {}
assert form.save().field is True
@pytest.mark.django_db
def test_StrictNullBooleanField_form_without_instance_valid():
form_class = modelform_factory(model=NullBooleanFieldModel, fields=['field'])
form = form_class(data={'field': 6})
assert form.is_valid() is True
assert form.errors == {}
assert form.save().field is None
def test_StrictNullBooleanField_descriptor_doesnt_disappear():
"""
don't clobber the descriptor
"""
value = NullBooleanFieldModel()
assert value.field is None
for x in range(1, 3):
value.field = False
assert value.field == False
with pytest.raises(ValidationError):
value.field = 'ghost'
value.field = True
assert value.field == True
def test_StrictNullBooleanField_trues():
"""
Cannot be null
"""
assert NullBooleanFieldModel(field='t').field == True
assert NullBooleanFieldModel(field='1').field == True
assert NullBooleanFieldModel(field=1).field == True
assert NullBooleanFieldModel(field='True').field == True
assert NullBooleanFieldModel(field=True).field == True
def test_StrictNullBooleanField_false():
"""
Cannot be null
"""
assert NullBooleanFieldModel(field='f').field == False
assert NullBooleanFieldModel(field='0').field == False
assert NullBooleanFieldModel(field=0).field == False
assert NullBooleanFieldModel(field='False').field == False
assert NullBooleanFieldModel(field=False).field == False
def test_StrictNullBooleanField_can_be_null():
NullBooleanFieldModel(field=None)
NullBooleanFieldModel(field='None')
def test_StrictNullBooleanField_invalid():
with pytest.raises(ValidationError):
NullBooleanFieldModel(field='troo')
def test_StrictNullBooleanField_ok_until_changed():
"""
Ensure this value cannot change to an invalid state
"""
model = NullBooleanFieldModel(field=True)
assert model.field == True
with pytest.raises(ValidationError):
model.field = 'faaaaalse'
@pytest.mark.django_db
def test_StrictNullBooleanField_create_via_queryset():
"""
Ensure this value is less than or equal to 15.
"""
assert NullBooleanFieldModel.objects.count() == 0
with pytest.raises(ValidationError):
NullBooleanFieldModel.objects.create(field=16)
assert NullBooleanFieldModel.objects.count() == 0
@pytest.mark.django_db
def test_StrictNullBooleanField_update_via_queryset_invalid_then_get():
"""
So for whatever reason, by the time this gets to the FieldCleaningDescriptor
the 'blep' has been converted into True ... fun.
"""
model = NullBooleanFieldModel.objects.create(field=False)
model.__class__.objects.filter(pk=model.pk).update(field='blep')
assert model.__class__.objects.get(pk=model.pk).field == True
| 29.920863
| 88
| 0.735994
|
4a02ca7ee158b1cc7cf361466817c40f48ab304e
| 3,896
|
py
|
Python
|
main-2.py
|
MiaoJiawei/Python-voltage-correct
|
317f70105d9ae3f1e4dddde76c1c062c7140ed42
|
[
"Apache-2.0"
] | null | null | null |
main-2.py
|
MiaoJiawei/Python-voltage-correct
|
317f70105d9ae3f1e4dddde76c1c062c7140ed42
|
[
"Apache-2.0"
] | null | null | null |
main-2.py
|
MiaoJiawei/Python-voltage-correct
|
317f70105d9ae3f1e4dddde76c1c062c7140ed42
|
[
"Apache-2.0"
] | null | null | null |
import numpy
import scipy
import matplotlib.pyplot as plt
from openpyxl import load_workbook
# Calculate resistor value by least square.
def leastsq(x_array, y_array):
n = len(x_array)
x_ave = sum(x_array) / n
y_ave = sum(y_array) / n
b_up_sum = 0
b_sub_sum = 0
for i in range(n):
b_up_sum += (x_array[i])*(y_array[i])
b_sub_sum += ((x_array[i])*(x_array[i]))
b = (b_up_sum - n*x_ave*y_ave)/(b_sub_sum - n*x_ave*x_ave)
a = y_ave - b*x_ave
return b,a
def resistor(worksheet):
# Pick maxium current value and voltage.
def maxvalue(voltage, current):
res = []
cur_max = max(current)
vol_max = voltage[current.index(max(current))]
res.append(vol_max)
res.append(cur_max)
print(res)
return res
plt.figure(figsize=(8, 6))
plt.grid(alpha=0.25)
plt.xlabel('Voltage')
plt.ylabel('Current')
vol = []
cur = []
current_peaks = []
voltage_peaks = []
i = 0
for col in worksheet.iter_cols(values_only=True):
if i % 2 == 0:
vol_temp = []
j = 1
for cell in col:
if (j > 3) and (cell != None):
vol_temp.append(cell)
j = j + 1
vol = vol_temp
else:
cur_temp = []
j = 1
for cell in col:
if (j > 3) and (cell != None):
cur_temp.append(cell)
j = j + 1
cur = cur_temp
scrt = worksheet.cell(1, int( i+1 ))
plt.plot(vol, cur, label='%s mV / Sec' %str(scrt.value))
maxres = maxvalue(vol, cur)
voltage_peaks.append(maxres[0])
current_peaks.append(maxres[1])
i = i + 1
# Set initer value.
print(voltage_peaks)
print(current_peaks)
# Fit function.
fit = leastsq(voltage_peaks, current_peaks)
resistor = fit
print(resistor)
plt.legend(loc='upper left')
plt.show()
return resistor[0]
def correct(voltage, current, resistor):
voltage_correct = []
for i in range(len(voltage)):
voltage_correct.append(voltage[i] - (current[i] / resistor))
return voltage_correct
wb = load_workbook('raw.xlsx')
ws_raw = wb.active
ws_cor = wb.create_sheet("Corrected")
resistance = resistor(ws_raw)
# Set header of table.
ws_cor.freeze_panes = 'A4'
i = 1
for raw in ws_raw.iter_rows(values_only=True):
if i < 4:
j = 1
for cont in raw:
ws_cor.cell(i, j).value = cont
j = j + 1
i = i + 1
else:
break
plt.figure(figsize=(8, 6))
plt.grid(alpha=0.25)
plt.xlabel('Voltage_correct')
plt.ylabel('Current')
vol = []
cur = []
voltage_corrected = []
i = 0
for col in ws_raw.iter_cols(values_only=True):
if i % 2 == 0:
vol_temp = []
j = 1
for cell in col:
if (j > 3) and (cell != None):
vol_temp.append(cell)
j = j + 1
vol = vol_temp
else:
cur_temp = []
j = 1
for cell in col:
if (j > 3) and (cell != None):
cur_temp.append(cell)
j = j + 1
cur = cur_temp
voltage_corrected = correct(vol, cur, resistance)
j = 4
for cont in voltage_corrected:
ws_cor.cell(j, i).value = cont
j = j + 1
j = 4
for cont in cur:
ws_cor.cell(j, (i+1)).value = cont
j = j + 1
scrt = ws_raw.cell(1, int( i+1 ))
plt.plot(voltage_corrected, cur, label='%s mV / Sec' %str(scrt.value))
i = i + 1
# Fit the relathionship between current and scan rate.
# Plot the line.
# plt.plot(voltage, current, label='raw data')
# plt.plot(voltage_correct, current, label='corrected data')
plt.legend(loc='upper left')
plt.show()
# wb.save('raw.xlsx')
| 24.658228
| 78
| 0.540298
|
4a02cb97b7141547cf5c7617b8ec198dbdd4aeca
| 99
|
py
|
Python
|
leveltwo/algorithm/square/__init__.py
|
LilianBoulard/LevelTwo
|
23013a53100875d77dfae99494d2ef415d12b0df
|
[
"MIT"
] | 1
|
2021-05-03T08:21:36.000Z
|
2021-05-03T08:21:36.000Z
|
leveltwo/algorithm/square/__init__.py
|
LilianBoulard/LevelTwo
|
23013a53100875d77dfae99494d2ef415d12b0df
|
[
"MIT"
] | 2
|
2021-05-06T08:37:10.000Z
|
2021-05-06T14:08:46.000Z
|
leveltwo/algorithm/square/__init__.py
|
LilianBoulard/LevelTwo
|
23013a53100875d77dfae99494d2ef415d12b0df
|
[
"MIT"
] | null | null | null |
from .tremaux import TremauxSquare
from .manual import ManualSquare
from .astar import AstarSquare
| 24.75
| 34
| 0.848485
|
4a02cc3b07c00f64a42292e6b76549108c9b0c40
| 1,289
|
py
|
Python
|
tests/server/test_server.py
|
niklassiemer/pyiron_base
|
4f2fc35819279798a6deb6394354722378a7816b
|
[
"BSD-3-Clause"
] | null | null | null |
tests/server/test_server.py
|
niklassiemer/pyiron_base
|
4f2fc35819279798a6deb6394354722378a7816b
|
[
"BSD-3-Clause"
] | 61
|
2021-05-17T15:25:43.000Z
|
2022-03-31T04:14:19.000Z
|
tests/server/test_server.py
|
niklassiemer/pyiron_base
|
4f2fc35819279798a6deb6394354722378a7816b
|
[
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
import unittest
from pyiron_base.server.generic import Server
class TestRunmode(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.server = Server()
def test_queue_set_None(self):
try:
self.server.queue = None
except:
self.fail("queue should accept None")
self.assertEqual(self.server._active_queue, None,
"active queue not set to None")
self.assertTrue(self.server.run_mode.modal,
"run_mode default not restored after reseting queue")
self.assertEqual(self.server.cores, 1,
"cores default not restored after reseting queue")
self.assertEqual(self.server.threads, 1,
"threads default not restored after reseting queue")
self.assertEqual(self.server.run_time, None,
"run_time default not restored after reseting queue")
self.assertEqual(self.server.memory_limit, None,
"memory_limit default not restored after reseting queue")
if __name__ == "__main__":
unittest.main()
| 36.828571
| 108
| 0.666408
|
4a02cc7806db36327a47dea65bfa7afe06ee8872
| 2,191
|
py
|
Python
|
deepdrive_zero/experiments/discrete2/intersection_discrete_micro_turn_lower_lane_pen2_diag_lane9.py
|
deepdrive/deepdrive-2d
|
53796f84a49f29eadc1c43284e77bd4717a1e2d1
|
[
"MIT"
] | 30
|
2020-02-15T09:46:36.000Z
|
2022-03-18T08:16:52.000Z
|
deepdrive_zero/experiments/discrete2/intersection_discrete_micro_turn_lower_lane_pen2_diag_lane9.py
|
crizCraig/spud
|
53796f84a49f29eadc1c43284e77bd4717a1e2d1
|
[
"MIT"
] | 6
|
2020-02-05T05:53:53.000Z
|
2020-02-10T20:17:13.000Z
|
deepdrive_zero/experiments/discrete2/intersection_discrete_micro_turn_lower_lane_pen2_diag_lane9.py
|
deepdrive/deepdrive-2d
|
53796f84a49f29eadc1c43284e77bd4717a1e2d1
|
[
"MIT"
] | 4
|
2020-02-15T19:07:23.000Z
|
2020-04-04T17:48:32.000Z
|
import os
import sys
from deepdrive_zero.discrete.comfortable_actions2 import COMFORTABLE_ACTIONS2
from deepdrive_zero.experiments import utils
from spinup.utils.run_utils import ExperimentGrid
from spinup import ppo_pytorch
import torch
experiment_name = os.path.basename(__file__)[:-3]
notes = """Use old intersection lanes. Swerving up and to right was more result
of negative progress reward while yielding - which is now zero"""
env_config = dict(
env_name='deepdrive-2d-intersection-w-gs-allow-decel-v0',
is_intersection_map=True,
expect_normalized_action_deltas=False,
jerk_penalty_coeff=3.3e-4,
gforce_penalty_coeff=0.006 * 5,
collision_penalty_coeff=4,
lane_penalty_coeff=0.02,
speed_reward_coeff=0.50,
gforce_threshold=1.0,
end_on_lane_violation=False,
# https://iopscience.iop.org/article/10.1088/0143-0807/37/6/065008/pdf
# Importantly they depict the threshold
# for admissible acceleration onset or jerk as j = 15g/s or ~150m/s^3.
jerk_threshold=150.0, # 15g/s
incent_win=True,
constrain_controls=False,
incent_yield_to_oncoming_traffic=True,
physics_steps_per_observation=12,
discrete_actions=COMFORTABLE_ACTIONS2,
)
net_config = dict(
hidden_units=(256, 256),
activation=torch.nn.Tanh
)
eg = ExperimentGrid(name=experiment_name)
eg.add('env_name', env_config['env_name'], '', False)
# eg.add('seed', 0)
# eg.add('resume', '/home/c2/src/tmp/spinningup/data/intersection_2_agents_fine_tune_add_left_yield2/intersection_2_agents_fine_tune_add_left_yield2_s0_2020_03-23_22-40.11')
# eg.add('reinitialize_optimizer_on_resume', True)
# eg.add('num_inputs_to_add', 0)
# eg.add('pi_lr', 3e-6)
# eg.add('vf_lr', 1e-5)
# eg.add('boost_explore', 5)
eg.add('epochs', 20000)
eg.add('steps_per_epoch', 4000)
eg.add('ac_kwargs:hidden_sizes', net_config['hidden_units'], 'hid')
eg.add('ac_kwargs:activation', net_config['activation'], '')
eg.add('notes', notes, '')
eg.add('run_filename', os.path.realpath(__file__), '')
eg.add('env_config', env_config, '')
def train():
eg.run(ppo_pytorch)
if __name__ == '__main__':
utils.run(train_fn=train, env_config=env_config, net_config=net_config)
| 34.234375
| 173
| 0.752168
|
4a02ccaed78eaa0b098feedf43108ecb53c55958
| 4,700
|
py
|
Python
|
GameServer/Robot_UAV.py
|
kedlly/XDFramework
|
2e8e270acf200e8b9762b017e8f2e6683db44af1
|
[
"MIT"
] | 1
|
2018-09-10T10:37:36.000Z
|
2018-09-10T10:37:36.000Z
|
GameServer/Robot_UAV.py
|
kedlly/XDFramework
|
2e8e270acf200e8b9762b017e8f2e6683db44af1
|
[
"MIT"
] | null | null | null |
GameServer/Robot_UAV.py
|
kedlly/XDFramework
|
2e8e270acf200e8b9762b017e8f2e6683db44af1
|
[
"MIT"
] | null | null | null |
#coding:utf8
from Core.MessageMap import Serialize, Deserialize
from Messages.RequestMessages_pb2 import *
from Messages.RespondMessages_pb2 import *
from Core.Delay import Delay
from Servers.LogicServer.Bussiness.PlayerManager import PyVector3
from twisted.internet import reactor, task
from twisted.internet.protocol import ClientFactory, Protocol
import optparse
def parse_args():
usage = """usage: %prog [options] [hostname]:port ...
This is the Get Poetry Now! client, Twisted version 8.0
Run it like this:
python Robot.py xform-port port1 port2 ...
If you are in the base directory of the twisted-intro package,
you could run it like this:
python Robot.py 10001 10002 10003
to grab poetry from servers on ports 10002, and 10003 and transform
it using the server on port 10001.
Of course, there need to be appropriate servers listening on those
ports for that to work.
"""
parser = optparse.OptionParser(usage)
parser.add_option("-t", "--type", action = 'store', type = 'string', default = '', dest = 'playerType')
parser.add_option('-n', '--name', action = 'store', type = 'string', default = '' ,dest = 'name')
parser.add_option('-p', '--password', action = 'store', type = 'string', default = '',dest = 'pwd')
#parser.add_option('-p', '--password', action='store', type='string', dest='pwd')
options, addresses = parser.parse_args()
if len(addresses) < 2:
#print parser.format_help()
#parser.exit()
addresses = "127.0.0.1:8192"
def parse_address(addr):
if ':' not in addr:
host = '127.0.0.1'
port = addr
else:
host, port = addr.split(':', 1)
if not port.isdigit():
parser.error('Ports must be integers.')
return host, int(port)
return options, parse_address(addresses)
class RobotProtocol(Protocol):
def connectionMade(self):
print "request login ...."
self.transport.write(login())
global transport
transport = self.transport
def dataReceived(self, data):
respond = Deserialize(data)
global processors
if processors.has_key(type(respond)):
processors[type(respond)](respond)
def connectionLost(self, reason):
print "----lost connection :", reason
class RobotClientFactory(ClientFactory):
protocol = RobotProtocol
def startedConnecting(self, connector):
print('Started to connect.')
print connector
def clientConnectionLost(self, connector, reason):
print('Lost connection. Reason:', reason)
if reactor.running:
reactor.stop()
def clientConnectionFailed(self, connector, reason):
print('Connection failed. Reason:', reason)
if reactor.running:
reactor.stop()
transport = None
def login():
from Messages.RequestMessages_pb2 import Request_LoginAuth
import os
username = _NAME_PREFIX + str(os.getpid())
pwd = "password"
login_request = Request_LoginAuth()
login_request.username = username
login_request.password = pwd
return Serialize(login_request)
__velocity = PyVector3()
__position = PyVector3()
def onLoginRespond(data):
if data.player.pid == -1:
print "login failed, username or password error"
Delay().schedule(5, reactor.stop)
else:
print "login succeed, pid = " + str(data.player.pid) + " accessToken = " + data.token
global __velocity, __position
__velocity = PyVector3.fromProtoVector3(data.player.movement.velocity)
__position = PyVector3.fromProtoVector3(data.player.movement.position)
Delay().schedule(240, transport.write, "1\n")
Delay().schedule(1, setDir, PyVector3(0,0,-3.5))
Delay().schedule(0.75, reportMovement)
def reportMovement():
from Messages.RawData.InternalData_pb2 import MovementData
try:
md = MovementData()
md.position.CopyFrom(__position.toProtoVector3())
md.velocity.CopyFrom(__velocity.toProtoVector3())
rm = Request_Moving()
rm.movement.CopyFrom(md)
transport.write(Serialize(rm))
except Exception as e:
print e
Delay().schedule(0.75, reportMovement)
def setDir(dir):
global __velocity
__velocity = dir
Delay().schedule(8, setDir, dir * -1)
processors = {}
processors[Respond_LoginAuth] = onLoginRespond
#----------------------------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------------------------
UpdateFrequency = 0.25
def walk():
global __velocity, __position
__position += __velocity * UpdateFrequency
def update():
Delay().update(UpdateFrequency)
walk()
pass
#__opitions, addr_port =parse_args()
_NAME_PREFIX = 'UAV'
if __name__ == "__main__":
factory = RobotClientFactory()
reactor.connectTCP("127.0.0.1", 8192, factory)
l = task.LoopingCall(update)
l.start(UpdateFrequency)
reactor.run()
| 27.16763
| 125
| 0.689574
|
4a02cd11cc893366e0fc428ed314253e87b65735
| 3,262
|
py
|
Python
|
tests/test_generator__context.py
|
lurch/asciidoxy
|
9781ba696637fadbf62f1b7c5da843b0d292007d
|
[
"Apache-2.0"
] | null | null | null |
tests/test_generator__context.py
|
lurch/asciidoxy
|
9781ba696637fadbf62f1b7c5da843b0d292007d
|
[
"Apache-2.0"
] | null | null | null |
tests/test_generator__context.py
|
lurch/asciidoxy
|
9781ba696637fadbf62f1b7c5da843b0d292007d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2019-2020, TomTom (http://tomtom.com).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the generator's context."""
from pathlib import Path
from asciidoxy.model import ReferableElement
def test_context_create_sub_context(context):
context.namespace = "ns"
context.language = "lang"
context.preprocessing_run = False
context.warnings_are_errors = True
context.mult_page = True
sub = context.sub_context()
assert sub is not context
assert sub.base_dir == context.base_dir
assert sub.build_dir == context.build_dir
assert sub.fragment_dir == context.fragment_dir
assert sub.namespace == context.namespace
assert sub.language == context.language
assert sub.preprocessing_run == context.preprocessing_run
assert sub.warnings_are_errors == context.warnings_are_errors
assert sub.multipage == context.multipage
assert sub.reference is context.reference
assert sub.linked is context.linked
assert sub.inserted is context.inserted
sub.namespace = "other"
sub.language = "objc"
assert sub.namespace != context.namespace
assert sub.language != context.language
assert len(context.linked) == 0
assert "element" not in context.inserted
sub.linked.append(ReferableElement("element"))
sub.inserted["element"] = Path("path")
assert len(context.linked) == 1
assert "element" in context.inserted
def test_context_link_to_element_singlepage(context):
element_id = "element"
file_containing_element = "other_file.adoc"
link_text = "Link"
context.inserted[element_id] = context.current_document.in_file.parent / file_containing_element
assert context.link_to_element(element_id, link_text) == f"xref:{element_id}[{link_text}]"
def test_context_link_to_element_multipage(context, multipage):
element_id = "element"
file_containing_element = "other_file.adoc"
link_text = "Link"
context.inserted[element_id] = context.current_document.in_file.parent / file_containing_element
assert (context.link_to_element(
element_id, link_text) == f"xref:{file_containing_element}#{element_id}[{link_text}]")
def test_context_link_to_element_multipage_element_in_the_same_document(context, multipage):
element_id = "element"
link_text = "Link"
context.inserted[element_id] = context.current_document.in_file
assert (context.link_to_element(element_id, link_text) == f"xref:{element_id}[{link_text}]")
def test_context_link_to_element_element_not_inserted(context, single_and_multipage):
element_id = "element"
link_text = "Link"
assert element_id not in context.inserted
assert context.link_to_element(element_id, link_text) == f"xref:{element_id}[{link_text}]"
| 37.068182
| 100
| 0.751993
|
4a02cd7107acd91caaf24e824c99b9657b0e8111
| 4,691
|
py
|
Python
|
tests/integration_tests/dashboards/dao_tests.py
|
delorenzosoftware/superset
|
5403f1ec163a52623f34f459d89f20e4e190371d
|
[
"Apache-2.0"
] | 18,621
|
2017-06-19T09:57:44.000Z
|
2021-01-05T06:28:21.000Z
|
tests/integration_tests/dashboards/dao_tests.py
|
changeiot/superset
|
299b5dc64448d04abe6b35ee85fbd2b938c781bc
|
[
"Apache-2.0"
] | 9,043
|
2017-07-05T16:10:48.000Z
|
2021-01-05T17:58:01.000Z
|
tests/integration_tests/dashboards/dao_tests.py
|
changeiot/superset
|
299b5dc64448d04abe6b35ee85fbd2b938c781bc
|
[
"Apache-2.0"
] | 5,527
|
2017-07-06T01:39:43.000Z
|
2021-01-05T06:01:11.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# isort:skip_file
import copy
import json
import time
import pytest
import tests.integration_tests.test_app # pylint: disable=unused-import
from superset import db
from superset.dashboards.dao import DashboardDAO
from superset.models.dashboard import Dashboard
from tests.integration_tests.base_tests import SupersetTestCase
from tests.integration_tests.fixtures.world_bank_dashboard import (
load_world_bank_dashboard_with_slices,
load_world_bank_data,
)
class TestDashboardDAO(SupersetTestCase):
@pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
def test_set_dash_metadata(self):
dash = db.session.query(Dashboard).filter_by(slug="world_health").first()
data = dash.data
positions = data["position_json"]
data.update({"positions": positions})
original_data = copy.deepcopy(data)
# add filter scopes
filter_slice = dash.slices[0]
immune_slices = dash.slices[2:]
filter_scopes = {
str(filter_slice.id): {
"region": {
"scope": ["ROOT_ID"],
"immune": [slc.id for slc in immune_slices],
}
}
}
data.update({"filter_scopes": json.dumps(filter_scopes)})
DashboardDAO.set_dash_metadata(dash, data)
updated_metadata = json.loads(dash.json_metadata)
self.assertEqual(updated_metadata["filter_scopes"], filter_scopes)
# remove a slice and change slice ids (as copy slices)
removed_slice = immune_slices.pop()
removed_component = [
key
for (key, value) in positions.items()
if isinstance(value, dict)
and value.get("type") == "CHART"
and value["meta"]["chartId"] == removed_slice.id
]
positions.pop(removed_component[0], None)
data.update({"positions": positions})
DashboardDAO.set_dash_metadata(dash, data)
updated_metadata = json.loads(dash.json_metadata)
expected_filter_scopes = {
str(filter_slice.id): {
"region": {
"scope": ["ROOT_ID"],
"immune": [slc.id for slc in immune_slices],
}
}
}
self.assertEqual(updated_metadata["filter_scopes"], expected_filter_scopes)
# reset dash to original data
DashboardDAO.set_dash_metadata(dash, original_data)
@pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
def test_get_dashboard_changed_on(self):
self.login(username="admin")
session = db.session()
dashboard = session.query(Dashboard).filter_by(slug="world_health").first()
changed_on = dashboard.changed_on.replace(microsecond=0)
assert changed_on == DashboardDAO.get_dashboard_changed_on(dashboard)
assert changed_on == DashboardDAO.get_dashboard_changed_on("world_health")
old_changed_on = dashboard.changed_on
# freezegun doesn't work for some reason, so we need to sleep here :(
time.sleep(1)
data = dashboard.data
positions = data["position_json"]
data.update({"positions": positions})
original_data = copy.deepcopy(data)
data.update({"foo": "bar"})
DashboardDAO.set_dash_metadata(dashboard, data)
session.merge(dashboard)
session.commit()
new_changed_on = DashboardDAO.get_dashboard_changed_on(dashboard)
assert old_changed_on.replace(microsecond=0) < new_changed_on
assert new_changed_on == DashboardDAO.get_dashboard_and_datasets_changed_on(
dashboard
)
assert new_changed_on == DashboardDAO.get_dashboard_and_slices_changed_on(
dashboard
)
DashboardDAO.set_dash_metadata(dashboard, original_data)
session.merge(dashboard)
session.commit()
| 38.45082
| 84
| 0.674057
|
4a02ce06ee94620370894fcaa793fa86a7f5cd0b
| 585
|
py
|
Python
|
janitriapp/models.py
|
kumarisneha/daily_news
|
dc067f7474cac94f6df351133efdfffb41c52627
|
[
"MIT"
] | null | null | null |
janitriapp/models.py
|
kumarisneha/daily_news
|
dc067f7474cac94f6df351133efdfffb41c52627
|
[
"MIT"
] | null | null | null |
janitriapp/models.py
|
kumarisneha/daily_news
|
dc067f7474cac94f6df351133efdfffb41c52627
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
from janitriapp.choices import *
class UserInterest(models.Model):
user = models.OneToOneField(User)
interest = models.IntegerField(choices=INTEREST_CHOICES, default=1)
class NewsWebsite(models.Model):
title = models.TextField()
interest = models.IntegerField(choices=INTEREST_CHOICES, default=1)
url = models.URLField(unique=True)
description = models.TextField(blank=True)
| 29.25
| 74
| 0.738462
|
4a02cf2676f4b6bf0df2748a2ea9e59a5047dacb
| 36,878
|
py
|
Python
|
homeassistant/components/sonos/media_player.py
|
akhan69/home-assistant
|
fb460a325e25fdea9043136bccaf546ec1c04eab
|
[
"Apache-2.0"
] | 1
|
2020-02-23T21:08:33.000Z
|
2020-02-23T21:08:33.000Z
|
homeassistant/components/sonos/media_player.py
|
bdurrer/home-assistant
|
8874d0e2321fccb4fe854927c259099f1fe355f5
|
[
"Apache-2.0"
] | 1
|
2019-02-27T01:22:47.000Z
|
2019-02-27T01:22:47.000Z
|
homeassistant/components/sonos/media_player.py
|
akhan69/home-assistant
|
fb460a325e25fdea9043136bccaf546ec1c04eab
|
[
"Apache-2.0"
] | null | null | null |
"""Support to interface with Sonos players."""
import datetime
import functools as ft
import logging
import socket
import threading
import urllib
import requests
import voluptuous as vol
from homeassistant.components.media_player import (
MediaPlayerDevice, PLATFORM_SCHEMA)
from homeassistant.components.media_player.const import (
ATTR_MEDIA_ENQUEUE, DOMAIN, MEDIA_TYPE_MUSIC,
SUPPORT_CLEAR_PLAYLIST, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA, SUPPORT_PREVIOUS_TRACK, SUPPORT_SEEK,
SUPPORT_SELECT_SOURCE, SUPPORT_SHUFFLE_SET, SUPPORT_STOP,
SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET)
from homeassistant.components.sonos import DOMAIN as SONOS_DOMAIN
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_TIME, CONF_HOSTS, STATE_IDLE, STATE_OFF, STATE_PAUSED,
STATE_PLAYING)
import homeassistant.helpers.config_validation as cv
from homeassistant.util.dt import utcnow
DEPENDENCIES = ('sonos',)
_LOGGER = logging.getLogger(__name__)
PARALLEL_UPDATES = 0
# Quiet down pysonos logging to just actual problems.
logging.getLogger('pysonos').setLevel(logging.WARNING)
logging.getLogger('pysonos.data_structures_entry').setLevel(logging.ERROR)
SUPPORT_SONOS = SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE |\
SUPPORT_PLAY | SUPPORT_PAUSE | SUPPORT_STOP | SUPPORT_SELECT_SOURCE |\
SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | SUPPORT_SEEK |\
SUPPORT_PLAY_MEDIA | SUPPORT_SHUFFLE_SET | SUPPORT_CLEAR_PLAYLIST
SERVICE_JOIN = 'sonos_join'
SERVICE_UNJOIN = 'sonos_unjoin'
SERVICE_SNAPSHOT = 'sonos_snapshot'
SERVICE_RESTORE = 'sonos_restore'
SERVICE_SET_TIMER = 'sonos_set_sleep_timer'
SERVICE_CLEAR_TIMER = 'sonos_clear_sleep_timer'
SERVICE_UPDATE_ALARM = 'sonos_update_alarm'
SERVICE_SET_OPTION = 'sonos_set_option'
DATA_SONOS = 'sonos_media_player'
SOURCE_LINEIN = 'Line-in'
SOURCE_TV = 'TV'
CONF_ADVERTISE_ADDR = 'advertise_addr'
CONF_INTERFACE_ADDR = 'interface_addr'
# Service call validation schemas
ATTR_SLEEP_TIME = 'sleep_time'
ATTR_ALARM_ID = 'alarm_id'
ATTR_VOLUME = 'volume'
ATTR_ENABLED = 'enabled'
ATTR_INCLUDE_LINKED_ZONES = 'include_linked_zones'
ATTR_MASTER = 'master'
ATTR_WITH_GROUP = 'with_group'
ATTR_NIGHT_SOUND = 'night_sound'
ATTR_SPEECH_ENHANCE = 'speech_enhance'
ATTR_SONOS_GROUP = 'sonos_group'
UPNP_ERRORS_TO_IGNORE = ['701', '711', '712']
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_ADVERTISE_ADDR): cv.string,
vol.Optional(CONF_INTERFACE_ADDR): cv.string,
vol.Optional(CONF_HOSTS): vol.All(cv.ensure_list, [cv.string]),
})
SONOS_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
})
SONOS_JOIN_SCHEMA = SONOS_SCHEMA.extend({
vol.Required(ATTR_MASTER): cv.entity_id,
})
SONOS_STATES_SCHEMA = SONOS_SCHEMA.extend({
vol.Optional(ATTR_WITH_GROUP, default=True): cv.boolean,
})
SONOS_SET_TIMER_SCHEMA = SONOS_SCHEMA.extend({
vol.Required(ATTR_SLEEP_TIME):
vol.All(vol.Coerce(int), vol.Range(min=0, max=86399))
})
SONOS_UPDATE_ALARM_SCHEMA = SONOS_SCHEMA.extend({
vol.Required(ATTR_ALARM_ID): cv.positive_int,
vol.Optional(ATTR_TIME): cv.time,
vol.Optional(ATTR_VOLUME): cv.small_float,
vol.Optional(ATTR_ENABLED): cv.boolean,
vol.Optional(ATTR_INCLUDE_LINKED_ZONES): cv.boolean,
})
SONOS_SET_OPTION_SCHEMA = SONOS_SCHEMA.extend({
vol.Optional(ATTR_NIGHT_SOUND): cv.boolean,
vol.Optional(ATTR_SPEECH_ENHANCE): cv.boolean,
})
class SonosData:
"""Storage class for platform global data."""
def __init__(self):
"""Initialize the data."""
self.uids = set()
self.entities = []
self.topology_lock = threading.Lock()
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Sonos platform.
Deprecated.
"""
_LOGGER.warning('Loading Sonos via platform config is deprecated.')
_setup_platform(hass, config, add_entities, discovery_info)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Sonos from a config entry."""
def add_entities(entities, update_before_add=False):
"""Sync version of async add entities."""
hass.add_job(async_add_entities, entities, update_before_add)
hass.async_add_executor_job(
_setup_platform, hass, hass.data[SONOS_DOMAIN].get('media_player', {}),
add_entities, None)
def _setup_platform(hass, config, add_entities, discovery_info):
"""Set up the Sonos platform."""
import pysonos
if DATA_SONOS not in hass.data:
hass.data[DATA_SONOS] = SonosData()
advertise_addr = config.get(CONF_ADVERTISE_ADDR)
if advertise_addr:
pysonos.config.EVENT_ADVERTISE_IP = advertise_addr
players = []
if discovery_info:
player = pysonos.SoCo(discovery_info.get('host'))
# If host already exists by config
if player.uid in hass.data[DATA_SONOS].uids:
return
# If invisible, such as a stereo slave
if not player.is_visible:
return
players.append(player)
else:
hosts = config.get(CONF_HOSTS)
if hosts:
# Support retro compatibility with comma separated list of hosts
# from config
hosts = hosts[0] if len(hosts) == 1 else hosts
hosts = hosts.split(',') if isinstance(hosts, str) else hosts
for host in hosts:
try:
players.append(pysonos.SoCo(socket.gethostbyname(host)))
except OSError:
_LOGGER.warning("Failed to initialize '%s'", host)
else:
players = pysonos.discover(
interface_addr=config.get(CONF_INTERFACE_ADDR),
all_households=True)
if not players:
_LOGGER.warning("No Sonos speakers found")
return
hass.data[DATA_SONOS].uids.update(p.uid for p in players)
add_entities(SonosEntity(p) for p in players)
_LOGGER.debug("Added %s Sonos speakers", len(players))
def service_handle(service):
"""Handle for services."""
entity_ids = service.data.get('entity_id')
entities = hass.data[DATA_SONOS].entities
if entity_ids:
entities = [e for e in entities if e.entity_id in entity_ids]
with hass.data[DATA_SONOS].topology_lock:
if service.service == SERVICE_SNAPSHOT:
snapshot(entities, service.data[ATTR_WITH_GROUP])
elif service.service == SERVICE_RESTORE:
restore(entities, service.data[ATTR_WITH_GROUP])
elif service.service == SERVICE_JOIN:
master = [e for e in hass.data[DATA_SONOS].entities
if e.entity_id == service.data[ATTR_MASTER]]
if master:
master[0].join(entities)
else:
for entity in entities:
if service.service == SERVICE_UNJOIN:
entity.unjoin()
elif service.service == SERVICE_SET_TIMER:
entity.set_sleep_timer(service.data[ATTR_SLEEP_TIME])
elif service.service == SERVICE_CLEAR_TIMER:
entity.clear_sleep_timer()
elif service.service == SERVICE_UPDATE_ALARM:
entity.set_alarm(**service.data)
elif service.service == SERVICE_SET_OPTION:
entity.set_option(**service.data)
entity.schedule_update_ha_state(True)
hass.services.register(
DOMAIN, SERVICE_JOIN, service_handle,
schema=SONOS_JOIN_SCHEMA)
hass.services.register(
DOMAIN, SERVICE_UNJOIN, service_handle,
schema=SONOS_SCHEMA)
hass.services.register(
DOMAIN, SERVICE_SNAPSHOT, service_handle,
schema=SONOS_STATES_SCHEMA)
hass.services.register(
DOMAIN, SERVICE_RESTORE, service_handle,
schema=SONOS_STATES_SCHEMA)
hass.services.register(
DOMAIN, SERVICE_SET_TIMER, service_handle,
schema=SONOS_SET_TIMER_SCHEMA)
hass.services.register(
DOMAIN, SERVICE_CLEAR_TIMER, service_handle,
schema=SONOS_SCHEMA)
hass.services.register(
DOMAIN, SERVICE_UPDATE_ALARM, service_handle,
schema=SONOS_UPDATE_ALARM_SCHEMA)
hass.services.register(
DOMAIN, SERVICE_SET_OPTION, service_handle,
schema=SONOS_SET_OPTION_SCHEMA)
class _ProcessSonosEventQueue:
"""Queue like object for dispatching sonos events."""
def __init__(self, handler):
"""Initialize Sonos event queue."""
self._handler = handler
def put(self, item, block=True, timeout=None):
"""Process event."""
self._handler(item)
def _get_entity_from_soco_uid(hass, uid):
"""Return SonosEntity from SoCo uid."""
for entity in hass.data[DATA_SONOS].entities:
if uid == entity.unique_id:
return entity
return None
def soco_error(errorcodes=None):
"""Filter out specified UPnP errors from logs and avoid exceptions."""
def decorator(funct):
"""Decorate functions."""
@ft.wraps(funct)
def wrapper(*args, **kwargs):
"""Wrap for all soco UPnP exception."""
from pysonos.exceptions import SoCoUPnPException, SoCoException
try:
return funct(*args, **kwargs)
except SoCoUPnPException as err:
if errorcodes and err.error_code in errorcodes:
pass
else:
_LOGGER.error("Error on %s with %s", funct.__name__, err)
except SoCoException as err:
_LOGGER.error("Error on %s with %s", funct.__name__, err)
return wrapper
return decorator
def soco_coordinator(funct):
"""Call function on coordinator."""
@ft.wraps(funct)
def wrapper(entity, *args, **kwargs):
"""Wrap for call to coordinator."""
if entity.is_coordinator:
return funct(entity, *args, **kwargs)
return funct(entity.coordinator, *args, **kwargs)
return wrapper
def _timespan_secs(timespan):
"""Parse a time-span into number of seconds."""
if timespan in ('', 'NOT_IMPLEMENTED', None):
return None
return sum(60 ** x[0] * int(x[1]) for x in enumerate(
reversed(timespan.split(':'))))
def _is_radio_uri(uri):
"""Return whether the URI is a radio stream."""
radio_schemes = (
'x-rincon-mp3radio:', 'x-sonosapi-stream:', 'x-sonosapi-radio:',
'x-sonosapi-hls:', 'hls-radio:')
return uri.startswith(radio_schemes)
class SonosEntity(MediaPlayerDevice):
"""Representation of a Sonos entity."""
def __init__(self, player):
"""Initialize the Sonos entity."""
self._subscriptions = []
self._receives_events = False
self._volume_increment = 2
self._unique_id = player.uid
self._player = player
self._model = None
self._player_volume = None
self._player_muted = None
self._shuffle = None
self._name = None
self._coordinator = None
self._sonos_group = [self]
self._status = None
self._media_duration = None
self._media_position = None
self._media_position_updated_at = None
self._media_image_url = None
self._media_artist = None
self._media_album_name = None
self._media_title = None
self._night_sound = None
self._speech_enhance = None
self._source_name = None
self._available = True
self._favorites = None
self._soco_snapshot = None
self._snapshot_group = None
self._set_basic_information()
async def async_added_to_hass(self):
"""Subscribe sonos events."""
self.hass.data[DATA_SONOS].entities.append(self)
self.hass.async_add_executor_job(self._subscribe_to_player_events)
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
def __hash__(self):
"""Return a hash of self."""
return hash(self.unique_id)
@property
def name(self):
"""Return the name of the entity."""
return self._name
@property
def device_info(self):
"""Return information about the device."""
return {
'identifiers': {
(SONOS_DOMAIN, self._unique_id)
},
'name': self._name,
'model': self._model.replace("Sonos ", ""),
'manufacturer': 'Sonos',
}
@property
@soco_coordinator
def state(self):
"""Return the state of the entity."""
if self._status in ('PAUSED_PLAYBACK', 'STOPPED'):
return STATE_PAUSED
if self._status in ('PLAYING', 'TRANSITIONING'):
return STATE_PLAYING
if self._status == 'OFF':
return STATE_OFF
return STATE_IDLE
@property
def is_coordinator(self):
"""Return true if player is a coordinator."""
return self._coordinator is None
@property
def soco(self):
"""Return soco object."""
return self._player
@property
def coordinator(self):
"""Return coordinator of this player."""
return self._coordinator
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._available
def _check_available(self):
"""Check that we can still connect to the player."""
try:
sock = socket.create_connection(
address=(self.soco.ip_address, 1443), timeout=3)
sock.close()
return True
except socket.error:
return False
def _set_basic_information(self):
"""Set initial entity information."""
speaker_info = self.soco.get_speaker_info(True)
self._name = speaker_info['zone_name']
self._model = speaker_info['model_name']
self._shuffle = self.soco.shuffle
self.update_volume()
self._set_favorites()
def _set_favorites(self):
"""Set available favorites."""
# SoCo 0.16 raises a generic Exception on invalid xml in favorites.
# Filter those out now so our list is safe to use.
try:
self._favorites = []
for fav in self.soco.music_library.get_sonos_favorites():
try:
if fav.reference.get_uri():
self._favorites.append(fav)
except Exception: # pylint: disable=broad-except
_LOGGER.debug("Ignoring invalid favorite '%s'", fav.title)
except Exception: # pylint: disable=broad-except
_LOGGER.debug("Ignoring invalid favorite list")
def _radio_artwork(self, url):
"""Return the private URL with artwork for a radio stream."""
if url not in ('', 'NOT_IMPLEMENTED', None):
if url.find('tts_proxy') > 0:
# If the content is a tts don't try to fetch an image from it.
return None
url = 'http://{host}:{port}/getaa?s=1&u={uri}'.format(
host=self.soco.ip_address,
port=1400,
uri=urllib.parse.quote(url, safe='')
)
return url
def _subscribe_to_player_events(self):
"""Add event subscriptions."""
self._receives_events = False
# New player available, build the current group topology
for entity in self.hass.data[DATA_SONOS].entities:
entity.update_groups()
player = self.soco
def subscribe(service, action):
"""Add a subscription to a pysonos service."""
queue = _ProcessSonosEventQueue(action)
sub = service.subscribe(auto_renew=True, event_queue=queue)
self._subscriptions.append(sub)
subscribe(player.avTransport, self.update_media)
subscribe(player.renderingControl, self.update_volume)
subscribe(player.zoneGroupTopology, self.update_groups)
subscribe(player.contentDirectory, self.update_content)
def update(self):
"""Retrieve latest state."""
available = self._check_available()
if self._available != available:
self._available = available
if available:
self._set_basic_information()
self._subscribe_to_player_events()
else:
for subscription in self._subscriptions:
self.hass.async_add_executor_job(subscription.unsubscribe)
self._subscriptions = []
self._player_volume = None
self._player_muted = None
self._status = 'OFF'
self._coordinator = None
self._media_duration = None
self._media_position = None
self._media_position_updated_at = None
self._media_image_url = None
self._media_artist = None
self._media_album_name = None
self._media_title = None
self._source_name = None
elif available and not self._receives_events:
self.update_groups()
self.update_volume()
if self.is_coordinator:
self.update_media()
def update_media(self, event=None):
"""Update information about currently playing media."""
transport_info = self.soco.get_current_transport_info()
new_status = transport_info.get('current_transport_state')
# Ignore transitions, we should get the target state soon
if new_status == 'TRANSITIONING':
return
self._shuffle = self.soco.shuffle
if self.soco.is_playing_tv:
self.update_media_linein(SOURCE_TV)
elif self.soco.is_playing_line_in:
self.update_media_linein(SOURCE_LINEIN)
else:
track_info = self.soco.get_current_track_info()
if _is_radio_uri(track_info['uri']):
variables = event and event.variables
self.update_media_radio(variables, track_info)
else:
update_position = (new_status != self._status)
self.update_media_music(update_position, track_info)
self._status = new_status
self.schedule_update_ha_state()
# Also update slaves
for entity in self.hass.data[DATA_SONOS].entities:
coordinator = entity.coordinator
if coordinator and coordinator.unique_id == self.unique_id:
entity.schedule_update_ha_state()
def update_media_linein(self, source):
"""Update state when playing from line-in/tv."""
self._media_duration = None
self._media_position = None
self._media_position_updated_at = None
self._media_image_url = None
self._media_artist = source
self._media_album_name = None
self._media_title = None
self._source_name = source
def update_media_radio(self, variables, track_info):
"""Update state when streaming radio."""
self._media_duration = None
self._media_position = None
self._media_position_updated_at = None
media_info = self.soco.avTransport.GetMediaInfo([('InstanceID', 0)])
self._media_image_url = self._radio_artwork(media_info['CurrentURI'])
self._media_artist = track_info.get('artist')
self._media_album_name = None
self._media_title = track_info.get('title')
if self._media_artist and self._media_title:
# artist and album name are in the data, concatenate
# that do display as artist.
# "Information" field in the sonos pc app
self._media_artist = '{artist} - {title}'.format(
artist=self._media_artist,
title=self._media_title
)
elif variables:
# "On Now" field in the sonos pc app
current_track_metadata = variables.get('current_track_meta_data')
if current_track_metadata:
self._media_artist = \
current_track_metadata.radio_show.split(',')[0]
# For radio streams we set the radio station name as the title.
current_uri_metadata = media_info["CurrentURIMetaData"]
if current_uri_metadata not in ('', 'NOT_IMPLEMENTED', None):
# currently soco does not have an API for this
import pysonos
current_uri_metadata = pysonos.xml.XML.fromstring(
pysonos.utils.really_utf8(current_uri_metadata))
md_title = current_uri_metadata.findtext(
'.//{http://purl.org/dc/elements/1.1/}title')
if md_title not in ('', 'NOT_IMPLEMENTED', None):
self._media_title = md_title
if self._media_artist and self._media_title:
# some radio stations put their name into the artist
# name, e.g.:
# media_title = "Station"
# media_artist = "Station - Artist - Title"
# detect this case and trim from the front of
# media_artist for cosmetics
trim = '{title} - '.format(title=self._media_title)
chars = min(len(self._media_artist), len(trim))
if self._media_artist[:chars].upper() == trim[:chars].upper():
self._media_artist = self._media_artist[chars:]
# Check if currently playing radio station is in favorites
self._source_name = None
for fav in self._favorites:
if fav.reference.get_uri() == media_info['CurrentURI']:
self._source_name = fav.title
def update_media_music(self, update_media_position, track_info):
"""Update state when playing music tracks."""
self._media_duration = _timespan_secs(track_info.get('duration'))
position_info = self.soco.avTransport.GetPositionInfo(
[('InstanceID', 0),
('Channel', 'Master')]
)
rel_time = _timespan_secs(position_info.get("RelTime"))
# player no longer reports position?
update_media_position |= rel_time is None and \
self._media_position is not None
# player started reporting position?
update_media_position |= rel_time is not None and \
self._media_position is None
# position jumped?
if rel_time is not None and self._media_position is not None:
time_diff = utcnow() - self._media_position_updated_at
time_diff = time_diff.total_seconds()
calculated_position = self._media_position + time_diff
update_media_position |= abs(calculated_position - rel_time) > 1.5
if update_media_position:
self._media_position = rel_time
self._media_position_updated_at = utcnow()
self._media_image_url = track_info.get('album_art')
self._media_artist = track_info.get('artist')
self._media_album_name = track_info.get('album')
self._media_title = track_info.get('title')
self._source_name = None
def update_volume(self, event=None):
"""Update information about currently volume settings."""
if event:
variables = event.variables
if 'volume' in variables:
self._player_volume = int(variables['volume']['Master'])
if 'mute' in variables:
self._player_muted = (variables['mute']['Master'] == '1')
if 'night_mode' in variables:
self._night_sound = (variables['night_mode'] == '1')
if 'dialog_level' in variables:
self._speech_enhance = (variables['dialog_level'] == '1')
self.schedule_update_ha_state()
else:
self._player_volume = self.soco.volume
self._player_muted = self.soco.mute
self._night_sound = self.soco.night_mode
self._speech_enhance = self.soco.dialog_mode
def update_groups(self, event=None):
"""Process a zone group topology event coming from a player."""
if event:
self._receives_events = True
if not hasattr(event, 'zone_player_uui_ds_in_group'):
return
with self.hass.data[DATA_SONOS].topology_lock:
group = event and event.zone_player_uui_ds_in_group
if group:
# New group information is pushed
coordinator_uid, *slave_uids = group.split(',')
else:
coordinator_uid = self.unique_id
slave_uids = []
# Try SoCo cache for existing topology
try:
if self.soco.group and self.soco.group.coordinator:
coordinator_uid = self.soco.group.coordinator.uid
slave_uids = [p.uid for p in self.soco.group.members
if p.uid != coordinator_uid]
except requests.exceptions.RequestException:
pass
if self.unique_id == coordinator_uid:
sonos_group = []
for uid in (coordinator_uid, *slave_uids):
entity = _get_entity_from_soco_uid(self.hass, uid)
if entity:
sonos_group.append(entity)
self._coordinator = None
self._sonos_group = sonos_group
self.schedule_update_ha_state()
for slave_uid in slave_uids:
slave = _get_entity_from_soco_uid(self.hass, slave_uid)
if slave:
# pylint: disable=protected-access
slave._coordinator = self
slave._sonos_group = sonos_group
slave.schedule_update_ha_state()
def update_content(self, event=None):
"""Update information about available content."""
self._set_favorites()
self.schedule_update_ha_state()
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._player_volume / 100
@property
def is_volume_muted(self):
"""Return true if volume is muted."""
return self._player_muted
@property
@soco_coordinator
def shuffle(self):
"""Shuffling state."""
return self._shuffle
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
@soco_coordinator
def media_duration(self):
"""Duration of current playing media in seconds."""
return self._media_duration
@property
@soco_coordinator
def media_position(self):
"""Position of current playing media in seconds."""
return self._media_position
@property
@soco_coordinator
def media_position_updated_at(self):
"""When was the position of the current playing media valid."""
return self._media_position_updated_at
@property
@soco_coordinator
def media_image_url(self):
"""Image url of current playing media."""
return self._media_image_url or None
@property
@soco_coordinator
def media_artist(self):
"""Artist of current playing media, music track only."""
return self._media_artist
@property
@soco_coordinator
def media_album_name(self):
"""Album name of current playing media, music track only."""
return self._media_album_name
@property
@soco_coordinator
def media_title(self):
"""Title of current playing media."""
return self._media_title
@property
@soco_coordinator
def source(self):
"""Name of the current input source."""
return self._source_name
@property
@soco_coordinator
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_SONOS
@soco_error()
def volume_up(self):
"""Volume up media player."""
self._player.volume += self._volume_increment
@soco_error()
def volume_down(self):
"""Volume down media player."""
self._player.volume -= self._volume_increment
@soco_error()
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self.soco.volume = str(int(volume * 100))
@soco_error(UPNP_ERRORS_TO_IGNORE)
@soco_coordinator
def set_shuffle(self, shuffle):
"""Enable/Disable shuffle mode."""
self.soco.shuffle = shuffle
@soco_error()
def mute_volume(self, mute):
"""Mute (true) or unmute (false) media player."""
self.soco.mute = mute
@soco_error()
@soco_coordinator
def select_source(self, source):
"""Select input source."""
if source == SOURCE_LINEIN:
self.soco.switch_to_line_in()
elif source == SOURCE_TV:
self.soco.switch_to_tv()
else:
fav = [fav for fav in self._favorites
if fav.title == source]
if len(fav) == 1:
src = fav.pop()
uri = src.reference.get_uri()
if _is_radio_uri(uri):
self.soco.play_uri(uri, title=source)
else:
self.soco.clear_queue()
self.soco.add_to_queue(src.reference)
self.soco.play_from_queue(0)
@property
@soco_coordinator
def source_list(self):
"""List of available input sources."""
sources = [fav.title for fav in self._favorites]
model = self._model.upper()
if 'PLAY:5' in model or 'CONNECT' in model:
sources += [SOURCE_LINEIN]
elif 'PLAYBAR' in model:
sources += [SOURCE_LINEIN, SOURCE_TV]
elif 'BEAM' in model:
sources += [SOURCE_TV]
return sources
@soco_error()
def turn_on(self):
"""Turn the media player on."""
self.media_play()
@soco_error()
def turn_off(self):
"""Turn off media player."""
self.media_stop()
@soco_error(UPNP_ERRORS_TO_IGNORE)
@soco_coordinator
def media_play(self):
"""Send play command."""
self.soco.play()
@soco_error(UPNP_ERRORS_TO_IGNORE)
@soco_coordinator
def media_stop(self):
"""Send stop command."""
self.soco.stop()
@soco_error(UPNP_ERRORS_TO_IGNORE)
@soco_coordinator
def media_pause(self):
"""Send pause command."""
self.soco.pause()
@soco_error(UPNP_ERRORS_TO_IGNORE)
@soco_coordinator
def media_next_track(self):
"""Send next track command."""
self.soco.next()
@soco_error(UPNP_ERRORS_TO_IGNORE)
@soco_coordinator
def media_previous_track(self):
"""Send next track command."""
self.soco.previous()
@soco_error(UPNP_ERRORS_TO_IGNORE)
@soco_coordinator
def media_seek(self, position):
"""Send seek command."""
self.soco.seek(str(datetime.timedelta(seconds=int(position))))
@soco_error()
@soco_coordinator
def clear_playlist(self):
"""Clear players playlist."""
self.soco.clear_queue()
@soco_error()
@soco_coordinator
def play_media(self, media_type, media_id, **kwargs):
"""
Send the play_media command to the media player.
If ATTR_MEDIA_ENQUEUE is True, add `media_id` to the queue.
"""
if kwargs.get(ATTR_MEDIA_ENQUEUE):
from pysonos.exceptions import SoCoUPnPException
try:
self.soco.add_uri_to_queue(media_id)
except SoCoUPnPException:
_LOGGER.error('Error parsing media uri "%s", '
"please check it's a valid media resource "
'supported by Sonos', media_id)
else:
self.soco.play_uri(media_id)
@soco_error()
def join(self, slaves):
"""Form a group with other players."""
if self._coordinator:
self.unjoin()
for slave in slaves:
if slave.unique_id != self.unique_id:
slave.soco.join(self.soco)
# pylint: disable=protected-access
slave._coordinator = self
@soco_error()
def unjoin(self):
"""Unjoin the player from a group."""
self.soco.unjoin()
self._coordinator = None
@soco_error()
@soco_coordinator
def set_sleep_timer(self, sleep_time):
"""Set the timer on the player."""
self.soco.set_sleep_timer(sleep_time)
@soco_error()
@soco_coordinator
def clear_sleep_timer(self):
"""Clear the timer on the player."""
self.soco.set_sleep_timer(None)
@soco_error()
@soco_coordinator
def set_alarm(self, **data):
"""Set the alarm clock on the player."""
from pysonos import alarms
alarm = None
for one_alarm in alarms.get_alarms(self.soco):
# pylint: disable=protected-access
if one_alarm._alarm_id == str(data[ATTR_ALARM_ID]):
alarm = one_alarm
if alarm is None:
_LOGGER.warning("did not find alarm with id %s",
data[ATTR_ALARM_ID])
return
if ATTR_TIME in data:
alarm.start_time = data[ATTR_TIME]
if ATTR_VOLUME in data:
alarm.volume = int(data[ATTR_VOLUME] * 100)
if ATTR_ENABLED in data:
alarm.enabled = data[ATTR_ENABLED]
if ATTR_INCLUDE_LINKED_ZONES in data:
alarm.include_linked_zones = data[ATTR_INCLUDE_LINKED_ZONES]
alarm.save()
@soco_error()
def set_option(self, **data):
"""Modify playback options."""
if ATTR_NIGHT_SOUND in data and self._night_sound is not None:
self.soco.night_mode = data[ATTR_NIGHT_SOUND]
if ATTR_SPEECH_ENHANCE in data and self._speech_enhance is not None:
self.soco.dialog_mode = data[ATTR_SPEECH_ENHANCE]
@property
def device_state_attributes(self):
"""Return entity specific state attributes."""
attributes = {
ATTR_SONOS_GROUP: [e.entity_id for e in self._sonos_group],
}
if self._night_sound is not None:
attributes[ATTR_NIGHT_SOUND] = self._night_sound
if self._speech_enhance is not None:
attributes[ATTR_SPEECH_ENHANCE] = self._speech_enhance
return attributes
@soco_error()
def snapshot(entities, with_group):
"""Snapshot all the entities and optionally their groups."""
# pylint: disable=protected-access
from pysonos.snapshot import Snapshot
# Find all affected players
entities = set(entities)
if with_group:
for entity in list(entities):
entities.update(entity._sonos_group)
for entity in entities:
entity._soco_snapshot = Snapshot(entity.soco)
entity._soco_snapshot.snapshot()
if with_group:
entity._snapshot_group = entity._sonos_group.copy()
else:
entity._snapshot_group = None
@soco_error()
def restore(entities, with_group):
"""Restore snapshots for all the entities."""
# pylint: disable=protected-access
from pysonos.exceptions import SoCoException
# Find all affected players
entities = set(e for e in entities if e._soco_snapshot)
if with_group:
for entity in [e for e in entities if e._snapshot_group]:
entities.update(entity._snapshot_group)
# Pause all current coordinators
for entity in (e for e in entities if e.is_coordinator):
if entity.state == STATE_PLAYING:
entity.media_pause()
# Bring back the original group topology and clear pysonos cache
if with_group:
for entity in (e for e in entities if e._snapshot_group):
if entity._snapshot_group[0] == entity:
entity.join(entity._snapshot_group)
entity.soco._zgs_cache.clear()
# Restore slaves, then coordinators
slaves = [e for e in entities if not e.is_coordinator]
coordinators = [e for e in entities if e.is_coordinator]
for entity in slaves + coordinators:
try:
entity._soco_snapshot.restore()
except (TypeError, AttributeError, SoCoException) as ex:
# Can happen if restoring a coordinator onto a current slave
_LOGGER.warning("Error on restore %s: %s", entity.entity_id, ex)
entity._soco_snapshot = None
entity._snapshot_group = None
| 33.678539
| 79
| 0.623542
|
4a02cf863f95c31bbd897122dced1f9320dfecec
| 27,048
|
py
|
Python
|
holoviews/core/data/xarray.py
|
Guillemdb/holoviews
|
826adb4b08bb26840f9e608fe6a1555644c4497e
|
[
"BSD-3-Clause"
] | null | null | null |
holoviews/core/data/xarray.py
|
Guillemdb/holoviews
|
826adb4b08bb26840f9e608fe6a1555644c4497e
|
[
"BSD-3-Clause"
] | null | null | null |
holoviews/core/data/xarray.py
|
Guillemdb/holoviews
|
826adb4b08bb26840f9e608fe6a1555644c4497e
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
import sys
import types
from collections import OrderedDict
import numpy as np
from .. import util
from ..dimension import Dimension, asdim, dimension_name
from ..ndmapping import NdMapping, item_check, sorted_context
from ..element import Element
from .grid import GridInterface
from .interface import Interface, DataError, dask_array_module
def is_cupy(array):
if 'cupy' not in sys.modules:
return False
from cupy import ndarray
return isinstance(array, ndarray)
class XArrayInterface(GridInterface):
types = ()
datatype = 'xarray'
@classmethod
def loaded(cls):
return 'xarray' in sys.modules
@classmethod
def applies(cls, obj):
if not cls.loaded():
return False
import xarray as xr
return isinstance(obj, (xr.Dataset, xr.DataArray))
@classmethod
def dimension_type(cls, dataset, dim):
name = dataset.get_dimension(dim, strict=True).name
if cls.packed(dataset) and name in dataset.vdims:
return dataset.data.dtype.type
return dataset.data[name].dtype.type
@classmethod
def dtype(cls, dataset, dim):
name = dataset.get_dimension(dim, strict=True).name
if cls.packed(dataset) and name in dataset.vdims:
return dataset.data.dtype
return dataset.data[name].dtype
@classmethod
def packed(cls, dataset):
import xarray as xr
return isinstance(dataset.data, xr.DataArray)
@classmethod
def shape(cls, dataset, gridded=False):
if cls.packed(dataset):
array = dataset.data[..., 0]
else:
array = dataset.data[dataset.vdims[0].name]
if not gridded:
return (np.product(array.shape, dtype=np.intp), len(dataset.dimensions()))
shape_map = dict(zip(array.dims, array.shape))
return tuple(shape_map.get(kd.name, np.nan) for kd in dataset.kdims[::-1])
@classmethod
def init(cls, eltype, data, kdims, vdims):
import xarray as xr
element_params = eltype.param.objects()
kdim_param = element_params['kdims']
vdim_param = element_params['vdims']
def retrieve_unit_and_label(dim):
if isinstance(dim, Dimension):
return dim
dim = asdim(dim)
coord = data[dim.name]
unit = coord.attrs.get('units') if dim.unit is None else dim.unit
if 'long_name' in coord.attrs:
spec = (dim.name, coord.attrs['long_name'])
else:
spec = (dim.name, dim.label)
return dim.clone(spec, unit=unit)
packed = False
if isinstance(data, xr.DataArray):
kdim_len = len(kdim_param.default) if kdims is None else len(kdims)
vdim_len = len(vdim_param.default) if vdims is None else len(vdims)
if vdim_len > 1 and kdim_len == len(data.dims)-1 and data.shape[-1] == vdim_len:
packed = True
elif vdims:
vdim = vdims[0]
elif data.name:
vdim = Dimension(data.name)
vdim.unit = data.attrs.get('units')
label = data.attrs.get('long_name')
if label is not None:
vdim.label = label
elif len(vdim_param.default) == 1:
vdim = vdim_param.default[0]
if vdim.name in data.dims:
raise DataError("xarray DataArray does not define a name, "
"and the default of '%s' clashes with a "
"coordinate dimension. Give the DataArray "
"a name or supply an explicit value dimension."
% vdim.name, cls)
else:
raise DataError("xarray DataArray does not define a name "
"and %s does not define a default value "
"dimension. Give the DataArray a name or "
"supply an explicit vdim." % eltype.__name__,
cls)
if not packed:
vdims = [vdim]
data = data.to_dataset(name=vdim.name)
if not isinstance(data, (xr.Dataset, xr.DataArray)):
if kdims is None:
kdims = kdim_param.default
if vdims is None:
vdims = vdim_param.default
kdims = [asdim(kd) for kd in kdims]
vdims = [asdim(vd) for vd in vdims]
if isinstance(data, np.ndarray) and data.ndim == 2 and data.shape[1] == len(kdims+vdims):
data = tuple(data)
ndims = len(kdims)
if isinstance(data, tuple):
dimensions = [d.name for d in kdims+vdims]
if (len(data) != len(dimensions) and len(data) == (ndims+1) and
len(data[-1].shape) == (ndims+1)):
value_array = data[-1]
data = {d: v for d, v in zip(dimensions, data[:-1])}
packed = True
else:
data = {d: v for d, v in zip(dimensions, data)}
elif isinstance(data, list) and data == []:
dimensions = [d.name for d in kdims + vdims]
data = {d: np.array([]) for d in dimensions[:ndims]}
data.update({d: np.empty((0,) * ndims) for d in dimensions[ndims:]})
if not isinstance(data, dict):
raise TypeError('XArrayInterface could not interpret data type')
data = {d: np.asarray(values) if d in kdims else values
for d, values in data.items()}
coord_dims = [data[kd.name].ndim for kd in kdims]
dims = tuple('dim_%d' % i for i in range(max(coord_dims)))[::-1]
coords = OrderedDict()
for kd in kdims:
coord_vals = data[kd.name]
if coord_vals.ndim > 1:
coord = (dims[:coord_vals.ndim], coord_vals)
else:
coord = coord_vals
coords[kd.name] = coord
xr_kwargs = {'dims': dims if max(coord_dims) > 1 else list(coords)[::-1]}
if packed:
xr_kwargs['dims'] = list(coords)[::-1] + ['band']
coords['band'] = list(range(len(vdims)))
data = xr.DataArray(value_array, coords=coords, **xr_kwargs)
else:
arrays = {}
for vdim in vdims:
arr = data[vdim.name]
if not isinstance(arr, xr.DataArray):
arr = xr.DataArray(arr, coords=coords, **xr_kwargs)
arrays[vdim.name] = arr
data = xr.Dataset(arrays)
else:
if not data.coords:
data = data.assign_coords(**{k: range(v) for k, v in data.dims.items()})
if vdims is None:
vdims = list(data.data_vars)
if kdims is None:
xrdims = list(data.dims)
xrcoords = list(data.coords)
kdims = [name for name in data.indexes.keys()
if isinstance(data[name].data, np.ndarray)]
kdims = sorted(kdims, key=lambda x: (xrcoords.index(x) if x in xrcoords else float('inf'), x))
if packed:
kdims = kdims[:-1]
elif set(xrdims) != set(kdims):
virtual_dims = [xd for xd in xrdims if xd not in kdims]
for c in data.coords:
if c not in kdims and set(data[c].dims) == set(virtual_dims):
kdims.append(c)
kdims = [retrieve_unit_and_label(kd) for kd in kdims]
vdims = [retrieve_unit_and_label(vd) for vd in vdims]
not_found = []
for d in kdims:
if not any(d.name == k or (isinstance(v, xr.DataArray) and d.name in v.dims)
for k, v in data.coords.items()):
not_found.append(d)
if not isinstance(data, (xr.Dataset, xr.DataArray)):
raise TypeError('Data must be be an xarray Dataset type.')
elif not_found:
raise DataError("xarray Dataset must define coordinates "
"for all defined kdims, %s coordinates not found."
% not_found, cls)
return data, {'kdims': kdims, 'vdims': vdims}, {}
@classmethod
def validate(cls, dataset, vdims=True):
import xarray as xr
if isinstance(dataset.data, xr.Dataset):
Interface.validate(dataset, vdims)
else:
not_found = [kd.name for kd in dataset.kdims if kd.name not in dataset.data.coords]
if not_found:
raise DataError("Supplied data does not contain specified "
"dimensions, the following dimensions were "
"not found: %s" % repr(not_found), cls)
# Check whether irregular (i.e. multi-dimensional) coordinate
# array dimensionality matches
irregular = []
for kd in dataset.kdims:
if cls.irregular(dataset, kd):
irregular.append((kd, dataset.data[kd.name].dims))
if irregular:
nonmatching = ['%s: %s' % (kd, dims) for kd, dims in irregular[1:]
if set(dims) != set(irregular[0][1])]
if nonmatching:
nonmatching = ['%s: %s' % irregular[0]] + nonmatching
raise DataError("The dimensions of coordinate arrays "
"on irregular data must match. The "
"following kdims were found to have "
"non-matching array dimensions:\n\n%s"
% ('\n'.join(nonmatching)), cls)
@classmethod
def range(cls, dataset, dimension):
dim = dataset.get_dimension(dimension, strict=True).name
if dataset._binned and dimension in dataset.kdims:
data = cls.coords(dataset, dim, edges=True)
if data.dtype.kind == 'M':
dmin, dmax = data.min(), data.max()
else:
dmin, dmax = np.nanmin(data), np.nanmax(data)
else:
if cls.packed(dataset) and dim in dataset.vdims:
data = dataset.data.values[..., dataset.vdims.index(dim)]
else:
data = dataset.data[dim]
if len(data):
dmin, dmax = data.min().data, data.max().data
else:
dmin, dmax = np.NaN, np.NaN
da = dask_array_module()
if da and isinstance(dmin, da.Array):
dmin, dmax = da.compute(dmin, dmax)
if isinstance(dmin, np.ndarray) and dmin.shape == ():
dmin = dmin[()]
if isinstance(dmax, np.ndarray) and dmax.shape == ():
dmax = dmax[()]
dmin = dmin if np.isscalar(dmin) or isinstance(dmin, util.datetime_types) else dmin.item()
dmax = dmax if np.isscalar(dmax) or isinstance(dmax, util.datetime_types) else dmax.item()
return dmin, dmax
@classmethod
def groupby(cls, dataset, dimensions, container_type, group_type, **kwargs):
index_dims = [dataset.get_dimension(d, strict=True) for d in dimensions]
element_dims = [kdim for kdim in dataset.kdims
if kdim not in index_dims]
invalid = [d for d in index_dims if dataset.data[d.name].ndim > 1]
if invalid:
if len(invalid) == 1: invalid = "'%s'" % invalid[0]
raise ValueError("Cannot groupby irregularly sampled dimension(s) %s."
% invalid)
group_kwargs = {}
if group_type != 'raw' and issubclass(group_type, Element):
group_kwargs = dict(util.get_param_values(dataset),
kdims=element_dims)
group_kwargs.update(kwargs)
drop_dim = any(d not in group_kwargs['kdims'] for d in element_dims)
group_by = [d.name for d in index_dims]
data = []
if len(dimensions) == 1:
for k, v in dataset.data.groupby(index_dims[0].name):
if drop_dim:
v = v.to_dataframe().reset_index()
data.append((k, group_type(v, **group_kwargs)))
else:
unique_iters = [cls.values(dataset, d, False) for d in group_by]
indexes = zip(*util.cartesian_product(unique_iters))
for k in indexes:
sel = dataset.data.sel(**dict(zip(group_by, k)))
if drop_dim:
sel = sel.to_dataframe().reset_index()
data.append((k, group_type(sel, **group_kwargs)))
if issubclass(container_type, NdMapping):
with item_check(False), sorted_context(False):
return container_type(data, kdims=index_dims)
else:
return container_type(data)
@classmethod
def coords(cls, dataset, dimension, ordered=False, expanded=False, edges=False):
import xarray as xr
dim = dataset.get_dimension(dimension)
dim = dimension if dim is None else dim.name
irregular = cls.irregular(dataset, dim)
if irregular or expanded:
if irregular:
data = dataset.data[dim]
else:
data = util.expand_grid_coords(dataset, dim)
if edges:
data = cls._infer_interval_breaks(data, axis=1)
data = cls._infer_interval_breaks(data, axis=0)
return data.values if isinstance(data, xr.DataArray) else data
data = np.atleast_1d(dataset.data[dim].data)
if ordered and data.shape and np.all(data[1:] < data[:-1]):
data = data[::-1]
shape = cls.shape(dataset, True)
if dim in dataset.kdims:
idx = dataset.get_dimension_index(dim)
isedges = (len(shape) == dataset.ndims and len(data) == (shape[dataset.ndims-idx-1]+1))
else:
isedges = False
if edges and not isedges:
data = cls._infer_interval_breaks(data)
elif not edges and isedges:
data = np.convolve(data, [0.5, 0.5], 'valid')
return data.values if isinstance(data, xr.DataArray) else data
@classmethod
def values(cls, dataset, dim, expanded=True, flat=True, compute=True, keep_index=False):
dim = dataset.get_dimension(dim, strict=True)
packed = cls.packed(dataset) and dim in dataset.vdims
if packed:
data = dataset.data.data[..., dataset.vdims.index(dim)]
else:
data = dataset.data[dim.name]
if not keep_index:
data = data.data
irregular = cls.irregular(dataset, dim) if dim in dataset.kdims else False
irregular_kdims = [d for d in dataset.kdims if cls.irregular(dataset, d)]
if irregular_kdims:
virtual_coords = list(dataset.data[irregular_kdims[0].name].coords.dims)
else:
virtual_coords = []
if dim in dataset.vdims or irregular:
if packed:
data_coords = list(dataset.data.dims)[:-1]
else:
data_coords = list(dataset.data[dim.name].dims)
da = dask_array_module()
if compute and da and isinstance(data, da.Array):
data = data.compute()
if is_cupy(data):
import cupy
data = cupy.asnumpy(data)
if not keep_index:
data = cls.canonicalize(dataset, data, data_coords=data_coords,
virtual_coords=virtual_coords)
return data.T.flatten() if flat and not keep_index else data
elif expanded:
data = cls.coords(dataset, dim.name, expanded=True)
return data.T.flatten() if flat else data
else:
if keep_index:
return dataset.data[dim.name]
return cls.coords(dataset, dim.name, ordered=True)
@classmethod
def aggregate(cls, dataset, dimensions, function, **kwargs):
reduce_dims = [d.name for d in dataset.kdims if d not in dimensions]
return dataset.data.reduce(function, dim=reduce_dims, **kwargs), []
@classmethod
def unpack_scalar(cls, dataset, data):
"""
Given a dataset object and data in the appropriate format for
the interface, return a simple scalar.
"""
if (not cls.packed(dataset) and len(data.data_vars) == 1 and
len(data[dataset.vdims[0].name].shape) == 0):
return data[dataset.vdims[0].name].item()
return data
@classmethod
def ndloc(cls, dataset, indices):
kdims = [d for d in dataset.kdims[::-1]]
adjusted_indices = []
slice_dims = []
for kd, ind in zip(kdims, indices):
if cls.irregular(dataset, kd):
coords = [c for c in dataset.data.coords if c not in dataset.data.dims]
dim = dataset.data[kd.name].dims[coords.index(kd.name)]
shape = dataset.data[kd.name].shape[coords.index(kd.name)]
coords = np.arange(shape)
else:
coords = cls.coords(dataset, kd, False)
dim = kd.name
slice_dims.append(dim)
ncoords = len(coords)
if np.all(coords[1:] < coords[:-1]):
if np.isscalar(ind):
ind = ncoords-ind-1
elif isinstance(ind, slice):
start = None if ind.stop is None else ncoords-ind.stop
stop = None if ind.start is None else ncoords-ind.start
ind = slice(start, stop, ind.step)
elif isinstance(ind, np.ndarray) and ind.dtype.kind == 'b':
ind = ind[::-1]
elif isinstance(ind, (np.ndarray, list)):
ind = [ncoords-i-1 for i in ind]
if isinstance(ind, list):
ind = np.array(ind)
if isinstance(ind, np.ndarray) and ind.dtype.kind == 'b':
ind = np.where(ind)[0]
adjusted_indices.append(ind)
isel = dict(zip(slice_dims, adjusted_indices))
all_scalar = all(map(np.isscalar, indices))
if all_scalar and len(indices) == len(kdims) and len(dataset.vdims) == 1:
return dataset.data[dataset.vdims[0].name].isel(**isel).values.item()
# Detect if the indexing is selecting samples or slicing the array
sampled = (all(isinstance(ind, np.ndarray) and ind.dtype.kind != 'b'
for ind in adjusted_indices) and len(indices) == len(kdims))
if sampled or (all_scalar and len(indices) == len(kdims)):
import xarray as xr
if cls.packed(dataset):
selected = dataset.data.isel({k: xr.DataArray(v) for k, v in isel.items()})
df = selected.to_dataframe('vdims')[['vdims']].T
vdims = [vd.name for vd in dataset.vdims]
return df.rename(columns={i: d for i, d in enumerate(vdims)})[vdims]
if all_scalar: isel = {k: [v] for k, v in isel.items()}
selected = dataset.data.isel({k: xr.DataArray(v) for k, v in isel.items()})
return selected.to_dataframe().reset_index()
else:
return dataset.data.isel(**isel)
@classmethod
def concat_dim(cls, datasets, dim, vdims):
import xarray as xr
return xr.concat([ds.assign_coords(**{dim.name: c}) for c, ds in datasets.items()],
dim=dim.name)
@classmethod
def redim(cls, dataset, dimensions):
renames = {k: v.name for k, v in dimensions.items()}
return dataset.data.rename(renames)
@classmethod
def reindex(cls, dataset, kdims=None, vdims=None):
dropped_kdims = [kd for kd in dataset.kdims if kd not in kdims]
constant = {}
for kd in dropped_kdims:
vals = cls.values(dataset, kd.name, expanded=False)
if len(vals) == 1:
constant[kd.name] = vals[0]
if len(constant) == len(dropped_kdims):
dropped = dataset.data.sel(**{k: v for k, v in constant.items()
if k in dataset.data.dims})
if vdims and cls.packed(dataset):
return dropped.isel(**{dataset.data.dims[-1]: [dataset.vdims.index(vd) for vd in vdims]})
return dropped
elif dropped_kdims:
return tuple(dataset.columns(kdims+vdims).values())
return dataset.data
@classmethod
def sort(cls, dataset, by=[], reverse=False):
return dataset
@classmethod
def mask(cls, dataset, mask, mask_val=np.nan):
packed = cls.packed(dataset)
masked = dataset.data.copy()
if packed:
data_coords = list(dataset.data.dims)[:-1]
mask = cls.canonicalize(dataset, mask, data_coords)
try:
masked.values[mask] = mask_val
except ValueError:
masked = masked.astype('float')
masked.values[mask] = mask_val
else:
orig_mask = mask
for vd in dataset.vdims:
data_coords = list(dataset.data[vd.name].dims)
mask = cls.canonicalize(dataset, orig_mask, data_coords)
masked[vd.name] = marr = masked[vd.name].astype('float')
marr.values[mask] = mask_val
return masked
@classmethod
def select(cls, dataset, selection_mask=None, **selection):
validated = {}
for k, v in selection.items():
dim = dataset.get_dimension(k, strict=True)
if cls.irregular(dataset, dim):
return GridInterface.select(dataset, selection_mask, **selection)
dim = dim.name
if isinstance(v, slice):
v = (v.start, v.stop)
if isinstance(v, set):
validated[dim] = list(v)
elif isinstance(v, tuple):
dim_vals = dataset.data[k].values
upper = None if v[1] is None else v[1]-sys.float_info.epsilon*10
v = v[0], upper
if dim_vals.dtype.kind not in 'OSU' and np.all(dim_vals[1:] < dim_vals[:-1]):
# If coordinates are inverted invert slice
v = v[::-1]
validated[dim] = slice(*v)
elif isinstance(v, types.FunctionType):
validated[dim] = v(dataset[k])
else:
validated[dim] = v
data = dataset.data.sel(**validated)
# Restore constant dimensions
indexed = cls.indexed(dataset, selection)
dropped = OrderedDict((d.name, np.atleast_1d(data[d.name]))
for d in dataset.kdims
if not data[d.name].data.shape)
if dropped and not indexed:
data = data.expand_dims(dropped)
# see https://github.com/pydata/xarray/issues/2891
# since we only exapanded on dimnesions of size 1
# we can monkeypatch the dataarray back to writeable.
for d in data.values():
if hasattr(d.data, 'flags'):
d.data.flags.writeable = True
da = dask_array_module()
if (indexed and len(data.data_vars) == 1 and
len(data[dataset.vdims[0].name].shape) == 0):
value = data[dataset.vdims[0].name]
if da and isinstance(value.data, da.Array):
value = value.compute()
return value.item()
elif indexed:
values = []
for vd in dataset.vdims:
value = data[vd.name]
if da and isinstance(value.data, da.Array):
value = value.compute()
values.append(value.item())
return np.array(values)
return data
@classmethod
def length(cls, dataset):
return np.product([len(dataset.data[d.name]) for d in dataset.kdims], dtype=np.intp)
@classmethod
def dframe(cls, dataset, dimensions):
import xarray as xr
if cls.packed(dataset):
bands = {vd.name: dataset.data[..., i].drop('band')
for i, vd in enumerate(dataset.vdims)}
data = xr.Dataset(bands)
else:
data = dataset.data
data = data.to_dataframe().reset_index()
if dimensions:
return data[dimensions]
return data
@classmethod
def sample(cls, dataset, samples=[]):
names = [kd.name for kd in dataset.kdims]
samples = [dataset.data.sel(**{k: [v] for k, v in zip(names, s)}).to_dataframe().reset_index()
for s in samples]
return util.pd.concat(samples)
@classmethod
def add_dimension(cls, dataset, dimension, dim_pos, values, vdim):
import xarray as xr
if not vdim:
raise Exception("Cannot add key dimension to a dense representation.")
dim = dimension_name(dimension)
coords = {d.name: cls.coords(dataset, d.name) for d in dataset.kdims}
arr = xr.DataArray(values, coords=coords, name=dim,
dims=tuple(d.name for d in dataset.kdims[::-1]))
return dataset.data.assign(**{dim: arr})
@classmethod
def assign(cls, dataset, new_data):
import xarray as xr
data = dataset.data
prev_coords = set.intersection(*[
set(var.coords) for var in data.data_vars.values()
])
coords = OrderedDict()
for k, v in new_data.items():
if k not in dataset.kdims:
continue
elif isinstance(v, xr.DataArray):
coords[k] = v.rename(**{v.name: k})
continue
coord_vals = cls.coords(dataset, k)
if not coord_vals.ndim > 1 and np.all(coord_vals[1:] < coord_vals[:-1]):
v = v[::-1]
coords[k] = (k, v)
if coords:
data = data.assign_coords(**coords)
dims = tuple(kd.name for kd in dataset.kdims[::-1])
vars = OrderedDict()
for k, v in new_data.items():
if k in dataset.kdims:
continue
if isinstance(v, xr.DataArray):
vars[k] = v
else:
vars[k] = (dims, cls.canonicalize(dataset, v, data_coords=dims))
if vars:
data = data.assign(vars)
used_coords = set.intersection(*[set(var.coords) for var in data.data_vars.values()])
drop_coords = set.symmetric_difference(used_coords, prev_coords)
return data.drop(list(drop_coords)), list(drop_coords)
Interface.register(XArrayInterface)
| 41.934884
| 110
| 0.546917
|
4a02d25043128a028cfeed51174d906559293f28
| 740
|
py
|
Python
|
auxiliary/metro_sources/setup.py
|
kierannp/3dsnet
|
429c3d431b36358e43c61692e0d02df3ea255635
|
[
"MIT"
] | 15
|
2021-05-26T11:47:59.000Z
|
2022-01-20T12:34:15.000Z
|
auxiliary/metro_sources/setup.py
|
kierannp/3dsnet
|
429c3d431b36358e43c61692e0d02df3ea255635
|
[
"MIT"
] | 7
|
2021-06-08T08:35:00.000Z
|
2021-11-07T10:27:30.000Z
|
auxiliary/metro_sources/setup.py
|
kierannp/3dsnet
|
429c3d431b36358e43c61692e0d02df3ea255635
|
[
"MIT"
] | 6
|
2021-05-20T20:29:38.000Z
|
2022-03-29T05:27:27.000Z
|
# Install dependencies (python modules)
# Compile Metro: Done
import argparse
from os import makedirs, system
from shutil import rmtree
from os.path import dirname, realpath, join, exists, isfile
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--build", action="store_true")
parser.add_argument("--clean", action="store_true")
args = parser.parse_args()
cur_dir = dirname(realpath(__file__))
build_dir = join(cur_dir, "build")
if args.build:
if not exists(build_dir):
makedirs(build_dir)
system(f"cd {build_dir};echo $PWD; cmake ..; make")
elif args.clean:
if exists(build_dir):
rmtree(build_dir)
if __name__ == '__main__':
main()
| 26.428571
| 59
| 0.667568
|
4a02d3a379cff6f3581c88ab868bd7343b53e8ed
| 2,092
|
py
|
Python
|
map_label_tool/py_proto/modules/drivers/tools/image_decompress/proto/config_pb2.py
|
freeclouds/OpenHDMap
|
b61c159fbdf4f50ae1d1650421596b28863f39be
|
[
"Apache-2.0"
] | null | null | null |
map_label_tool/py_proto/modules/drivers/tools/image_decompress/proto/config_pb2.py
|
freeclouds/OpenHDMap
|
b61c159fbdf4f50ae1d1650421596b28863f39be
|
[
"Apache-2.0"
] | null | null | null |
map_label_tool/py_proto/modules/drivers/tools/image_decompress/proto/config_pb2.py
|
freeclouds/OpenHDMap
|
b61c159fbdf4f50ae1d1650421596b28863f39be
|
[
"Apache-2.0"
] | 1
|
2021-05-26T08:42:11.000Z
|
2021-05-26T08:42:11.000Z
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: modules/drivers/tools/image_decompress/proto/config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='modules/drivers/tools/image_decompress/proto/config.proto',
package='apollo.image_decompress',
syntax='proto2',
serialized_pb=_b('\n9modules/drivers/tools/image_decompress/proto/config.proto\x12\x17\x61pollo.image_decompress\"\x1e\n\x06\x43onfig\x12\x14\n\x0c\x63hannel_name\x18\x01 \x01(\t')
)
_CONFIG = _descriptor.Descriptor(
name='Config',
full_name='apollo.image_decompress.Config',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='channel_name', full_name='apollo.image_decompress.Config.channel_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=86,
serialized_end=116,
)
DESCRIPTOR.message_types_by_name['Config'] = _CONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Config = _reflection.GeneratedProtocolMessageType('Config', (_message.Message,), dict(
DESCRIPTOR = _CONFIG,
__module__ = 'modules.drivers.tools.image_decompress.proto.config_pb2'
# @@protoc_insertion_point(class_scope:apollo.image_decompress.Config)
))
_sym_db.RegisterMessage(Config)
# @@protoc_insertion_point(module_scope)
| 29.885714
| 182
| 0.767208
|
4a02d3d6869c0a6f3ae9dd5a941f4dc697ad44f4
| 321
|
py
|
Python
|
Curso_Gustavo_Guanabara/Exercicio83.py
|
Nathan120/Arq_Python_CursoEmVideo
|
d6269eeaa8db57d1ffa8ed66e1936a061803f37e
|
[
"MIT"
] | null | null | null |
Curso_Gustavo_Guanabara/Exercicio83.py
|
Nathan120/Arq_Python_CursoEmVideo
|
d6269eeaa8db57d1ffa8ed66e1936a061803f37e
|
[
"MIT"
] | null | null | null |
Curso_Gustavo_Guanabara/Exercicio83.py
|
Nathan120/Arq_Python_CursoEmVideo
|
d6269eeaa8db57d1ffa8ed66e1936a061803f37e
|
[
"MIT"
] | null | null | null |
expr = str(input('Digite a expressão: '))
pilha = list()
for c in expr:
if c == '(':
pilha.append('(')
else:
if len(pilha) > 0:
pilha.pop()
else:
pilha.append(')')
break
if len(pilha) == 0:
print('Tudo certo')
else:
print('Vou ai que tá errado')
| 21.4
| 41
| 0.479751
|
4a02d49249ca90447b23e5eb45b1d4e1dddbb164
| 1,015
|
py
|
Python
|
test/test_restore_task_by_id.py
|
blue0513/gokart
|
14db6bcb76743da4627bfad34fd10cb28d078e4e
|
[
"MIT"
] | null | null | null |
test/test_restore_task_by_id.py
|
blue0513/gokart
|
14db6bcb76743da4627bfad34fd10cb28d078e4e
|
[
"MIT"
] | null | null | null |
test/test_restore_task_by_id.py
|
blue0513/gokart
|
14db6bcb76743da4627bfad34fd10cb28d078e4e
|
[
"MIT"
] | null | null | null |
import unittest
from unittest.mock import patch
import luigi
import luigi.mock
import gokart
class _SubDummyTask(gokart.TaskOnKart):
task_namespace = __name__
param = luigi.IntParameter()
class _DummyTask(gokart.TaskOnKart):
task_namespace = __name__
sub_task = gokart.TaskInstanceParameter()
def output(self):
return self.make_target('test.txt')
def run(self):
self.dump('test')
class RestoreTaskByIDTest(unittest.TestCase):
def setUp(self) -> None:
luigi.mock.MockFileSystem().clear()
@patch('luigi.LocalTarget', new=lambda path, **kwargs: luigi.mock.MockTarget(path, **kwargs))
def test(self):
task = _DummyTask(sub_task=_SubDummyTask(param=10))
luigi.build([task], local_scheduler=True, log_level="CRITICAL")
unique_id = task.make_unique_id()
restored = _DummyTask.restore(unique_id)
self.assertTrue(task.make_unique_id(), restored.make_unique_id())
if __name__ == '__main__':
unittest.main()
| 24.166667
| 97
| 0.698522
|
4a02d51362ee48970a7339b38cf62903030f2800
| 2,136
|
py
|
Python
|
official/nlp/modeling/layers/mat_mul_with_margin_test.py
|
mcasanova1445/models
|
37be0fdb4abccca633bb3199a4e6f3f71cd174d9
|
[
"Apache-2.0"
] | 1
|
2020-05-20T11:40:56.000Z
|
2020-05-20T11:40:56.000Z
|
official/nlp/modeling/layers/mat_mul_with_margin_test.py
|
mdsaifhaider/models
|
7214e17eb425963ec3d0295be215d5d26deaeb32
|
[
"Apache-2.0"
] | 8
|
2020-05-19T00:52:30.000Z
|
2020-06-04T23:57:20.000Z
|
official/nlp/modeling/layers/mat_mul_with_margin_test.py
|
mdsaifhaider/models
|
7214e17eb425963ec3d0295be215d5d26deaeb32
|
[
"Apache-2.0"
] | 2
|
2021-10-07T04:47:04.000Z
|
2021-12-18T04:18:19.000Z
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mat_mul_with_margin layer."""
import tensorflow as tf
from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import
from official.nlp.modeling.layers import mat_mul_with_margin
class MatMulWithMarginTest(keras_parameterized.TestCase):
def test_layer_invocation(self):
"""Validate that the Keras object can be created and invoked."""
input_width = 512
test_layer = mat_mul_with_margin.MatMulWithMargin()
# Create a 2-dimensional input (the first dimension is implicit).
left_encoded = tf.keras.Input(shape=(input_width,), dtype=tf.float32)
right_encoded = tf.keras.Input(shape=(input_width,), dtype=tf.float32)
left_logits, right_logits = test_layer(left_encoded, right_encoded)
# Validate that the outputs are of the expected shape.
expected_output_shape = [None, None]
self.assertEqual(expected_output_shape, left_logits.shape.as_list())
self.assertEqual(expected_output_shape, right_logits.shape.as_list())
def test_serialize_deserialize(self):
# Create a layer object that sets all of its config options.
layer = mat_mul_with_margin.MatMulWithMargin()
# Create another layer object from the first object's config.
new_layer = mat_mul_with_margin.MatMulWithMargin.from_config(
layer.get_config())
# If the serialization was successful, the new config should match the old.
self.assertAllEqual(layer.get_config(), new_layer.get_config())
if __name__ == '__main__':
tf.test.main()
| 40.301887
| 101
| 0.764513
|
4a02d641afc1a67f6df73465de5129dafafad47b
| 5,641
|
py
|
Python
|
custom_widgets/color/colorwidget.py
|
SunChuquin/pyside2_designer_widgets
|
9ae4243dc9845ef4022524b1cf7788691bb4f257
|
[
"MIT"
] | 1
|
2021-06-27T18:44:18.000Z
|
2021-06-27T18:44:18.000Z
|
custom_widgets/color/colorwidget.py
|
SunChuquin/pyside2_designer_widgets
|
9ae4243dc9845ef4022524b1cf7788691bb4f257
|
[
"MIT"
] | null | null | null |
custom_widgets/color/colorwidget.py
|
SunChuquin/pyside2_designer_widgets
|
9ae4243dc9845ef4022524b1cf7788691bb4f257
|
[
"MIT"
] | 1
|
2021-06-24T03:50:23.000Z
|
2021-06-24T03:50:23.000Z
|
from PySide2.QtCore import QObject, QTimer, Qt, QSize
from PySide2.QtGui import QClipboard, QMouseEvent, QFont, QCursor, QScreen, QPixmap, QImage, QColor
from PySide2.QtWidgets import QWidget, QGridLayout, QVBoxLayout, QLabel, QLineEdit, QSizePolicy, QFrame, QApplication
class Singleton(type(QObject), type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class ColorWidget(QWidget, metaclass=Singleton):
"""
屏幕拾色器
作者:feiyangqingyun(QQ:517216493) 2019-10-07
译者:sunchuquin(QQ:1715216365) 2021-07-04
"""
def __init__(self, parent: QWidget = None):
super(ColorWidget, self).__init__(parent)
self.cp: QClipboard = QClipboard()
self.pressed: bool = False
self.timer: QTimer = QTimer()
self.gridLayout: QGridLayout = QGridLayout(self)
self.gridLayout.setSpacing(6)
self.gridLayout.setContentsMargins(11, 11, 11, 11)
self.verticalLayout: QVBoxLayout = QVBoxLayout()
self.verticalLayout.setSpacing(0)
self.labColor: QLabel = QLabel()
self.labColor.setText('+')
self.labColor.setStyleSheet("background-color: rgb(255, 107, 107);color: rgb(250, 250, 250);")
self.labColor.setAlignment(Qt.AlignCenter)
font: QFont = QFont()
font.setPixelSize(35)
font.setBold(True)
self.labColor.setFont(font)
sizePolicy: QSizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.labColor.sizePolicy().hasHeightForWidth())
self.labColor.setSizePolicy(sizePolicy)
self.labColor.setMinimumSize(QSize(80, 70))
self.labColor.setMaximumSize(QSize(80, 70))
self.labColor.setCursor(QCursor(Qt.CrossCursor))
self.labColor.setFrameShape(QFrame.StyledPanel)
self.labColor.setFrameShadow(QFrame.Sunken)
self.verticalLayout.addWidget(self.labColor)
self.label: QLabel = QLabel(self)
self.label.setMinimumSize(QSize(0, 18))
self.label.setStyleSheet("background-color: rgb(0, 0, 0);color: rgb(200, 200, 200);")
self.label.setAlignment(Qt.AlignCenter)
self.verticalLayout.addWidget(self.label)
self.gridLayout.addLayout(self.verticalLayout, 0, 0, 3, 1)
self.labWeb: QLabel = QLabel(self)
self.gridLayout.addWidget(self.labWeb, 0, 1, 1, 1)
self.txtWeb: QLineEdit = QLineEdit(self)
self.gridLayout.addWidget(self.txtWeb, 0, 2, 1, 1)
self.labRgb: QLabel = QLabel(self)
self.gridLayout.addWidget(self.labRgb, 1, 1, 1, 1)
self.txtRgb: QLineEdit = QLineEdit(self)
self.gridLayout.addWidget(self.txtRgb, 1, 2, 1, 1)
self.labPoint: QLabel = QLabel(self)
self.gridLayout.addWidget(self.labPoint, 2, 1, 1, 1)
self.txtPoint: QLineEdit = QLineEdit(self)
self.gridLayout.addWidget(self.txtPoint, 2, 2, 1, 1)
self.label.setText('当前颜色显示')
self.labWeb.setText('web值: ')
self.labRgb.setText('rgb值: ')
self.labPoint.setText('坐标值: ')
self.setLayout(self.gridLayout)
self.setWindowTitle('屏幕拾色器')
self.setFixedSize(270, 108)
self.cp = QApplication.clipboard()
self.pressed = False
timer = QTimer(self)
timer.setInterval(100)
timer.timeout.connect(self.showColorValue)
timer.start()
def mousePressEvent(self, event: QMouseEvent) -> None:
if not self.labColor.rect().contains(event.pos()): return
self.pressed = True
def mouseReleaseEvent(self, event: QMouseEvent) -> None:
self.pressed = False
def showColorValue(self) -> None:
if not self.pressed: return
x: int = QCursor.pos().x()
y: int = QCursor.pos().y()
self.txtPoint.setText("x:%d y:%d" % (x, y))
screen: QScreen = QApplication.primaryScreen()
pixmap: QPixmap = screen.grabWindow(QApplication.desktop().winId(), x, y, 2, 2)
red: int = 0
green: int = 0
blue: int = 0
strDecimalValue: str = ''
strHex: str = ''
if not pixmap.isNull():
image: QImage = pixmap.toImage()
if not image.isNull():
if image.valid(0, 0):
color: QColor = QColor(image.pixel(0, 0))
red = color.red()
green = color.green()
blue = color.blue()
strDecimalValue = "%d, %d, %d" % (red, green, blue)
strHex = "#%02X%02X%02X" % (red, green, blue)
# 根据背景色自动计算合适的前景色
color: QColor = QColor(red, green, blue)
gray: float = (0.299 * color.red() + 0.587 * color.green() + 0.114 * color.blue()) / 255
textColor: QColor = QColor(Qt.black) if gray > 0.5 else QColor(Qt.white)
value = "background:rgb(%s);color:%s" % (strDecimalValue, textColor.name())
self.labColor.setStyleSheet(value)
self.txtRgb.setText(strDecimalValue)
self.txtWeb.setText(strHex)
if __name__ == '__main__':
import sys
from PySide2.QtCore import QTextCodec
from PySide2.QtWidgets import QApplication
app = QApplication()
app.setFont(QFont("Microsoft Yahei", 9))
codec: QTextCodec = QTextCodec.codecForName(b"utf-8")
QTextCodec.setCodecForLocale(codec)
window = ColorWidget()
window.show()
sys.exit(app.exec_())
| 36.393548
| 117
| 0.629321
|
4a02d6e0c46ebb5329bdfb29b0ed2b14b883c562
| 164
|
py
|
Python
|
pacote-download/ex(1-100)/ex047.py
|
gssouza2051/python-exercicios
|
81e87fed7ead0adf58473a741aaa3c83064f6cb5
|
[
"MIT"
] | null | null | null |
pacote-download/ex(1-100)/ex047.py
|
gssouza2051/python-exercicios
|
81e87fed7ead0adf58473a741aaa3c83064f6cb5
|
[
"MIT"
] | null | null | null |
pacote-download/ex(1-100)/ex047.py
|
gssouza2051/python-exercicios
|
81e87fed7ead0adf58473a741aaa3c83064f6cb5
|
[
"MIT"
] | null | null | null |
'''Crie um programa que mostre na tela todos os números pares que estão no intervalo entre 1 e 50.'''
for c in range(2,51,+2):
print(c,end=' ')
print('ACABOU')
| 32.8
| 101
| 0.676829
|
4a02d7194b273860385019cebc6bf4653ffbbc73
| 236
|
py
|
Python
|
Chapter18/2.2.py
|
PacktPublishing/GettingStartedwithPythonfortheInternetofThings-
|
a5a86ae38b3a4c625dfc1213d32a3f49e1e298c6
|
[
"MIT"
] | 19
|
2018-06-28T15:48:47.000Z
|
2022-01-08T12:40:52.000Z
|
Chapter18/2.2.py
|
PacktPublishing/GettingStartedwithPythonfortheInternetofThings-
|
a5a86ae38b3a4c625dfc1213d32a3f49e1e298c6
|
[
"MIT"
] | null | null | null |
Chapter18/2.2.py
|
PacktPublishing/GettingStartedwithPythonfortheInternetofThings-
|
a5a86ae38b3a4c625dfc1213d32a3f49e1e298c6
|
[
"MIT"
] | 13
|
2018-06-30T10:33:52.000Z
|
2021-12-29T11:31:31.000Z
|
import time import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setup(23,GPIO.IN)
GPIO.setup(24,GPIO.OUT)
while True:
if GPIO.input(23) == 1:
GPIO.output(24,GPIO.HIGH)
else:
GPIO.output(24,GPIO.LOW)
time.sleep(1)
GPIO.cleanup()
| 19.666667
| 35
| 0.716102
|
4a02d74ec5b24770da90a993e1786ef5642752a9
| 759
|
py
|
Python
|
autotabular/algorithms/ctr/fm.py
|
jianzhnie/AutoTabular
|
fb407300adf97532a26d33f7442d2a606fa30512
|
[
"Apache-2.0"
] | 48
|
2021-09-06T08:09:26.000Z
|
2022-03-28T13:02:54.000Z
|
autotabular/algorithms/ctr/fm.py
|
Fanxingye/Autotabular
|
d630c78290a52f8c73885afb16884e18135c34f6
|
[
"Apache-2.0"
] | null | null | null |
autotabular/algorithms/ctr/fm.py
|
Fanxingye/Autotabular
|
d630c78290a52f8c73885afb16884e18135c34f6
|
[
"Apache-2.0"
] | 7
|
2021-09-23T07:28:46.000Z
|
2021-10-02T21:15:18.000Z
|
import torch
from autotabular.algorithms.ctr.layer import FactorizationMachine, FeaturesEmbedding, FeaturesLinear
class FactorizationMachineModel(torch.nn.Module):
"""A pytorch implementation of Factorization Machine.
Reference:
S Rendle, Factorization Machines, 2010.
"""
def __init__(self, field_dims, embed_dim):
super().__init__()
self.embedding = FeaturesEmbedding(field_dims, embed_dim)
self.linear = FeaturesLinear(field_dims)
self.fm = FactorizationMachine(reduce_sum=True)
def forward(self, x):
"""
:param x: Long tensor of size ``(batch_size, num_fields)``
"""
x = self.linear(x) + self.fm(self.embedding(x))
return torch.sigmoid(x.squeeze(1))
| 31.625
| 100
| 0.678524
|
4a02d7a13f036f175c384e187fdc46dc90d79100
| 5,788
|
py
|
Python
|
smtpd/src/mailer.py
|
scrapbird/sarlacc
|
a529e115778d0dddd01f73077432ff5c0ba6e6c6
|
[
"MIT"
] | 53
|
2018-02-10T08:59:41.000Z
|
2021-04-04T16:05:20.000Z
|
smtpd/src/mailer.py
|
deadbits/sarlacc
|
a529e115778d0dddd01f73077432ff5c0ba6e6c6
|
[
"MIT"
] | 5
|
2018-02-26T19:32:59.000Z
|
2019-06-30T22:29:58.000Z
|
smtpd/src/mailer.py
|
deadbits/sarlacc
|
a529e115778d0dddd01f73077432ff5c0ba6e6c6
|
[
"MIT"
] | 13
|
2018-02-11T09:19:16.000Z
|
2020-07-24T07:33:26.000Z
|
import email
import re
import asyncio
import logging
import functools
from datetime import datetime
from aiosmtpd.controller import Controller
from aiosmtpd.smtp import SMTP as Server
from base64 import b64decode
import storage
logger = logging.getLogger()
async def create_mailer(handler, loop, ident_hostname, ident, **kwargs):
"""Creates and initializes a ``Mailer`` object
Args:
loop -- asyncio loop.
ident_hostname -- the hostname to use in the ident message.
ident -- the version string.
Returns:
The Mailer object.
"""
return Mailer(handler, loop, ident_hostname, ident, **kwargs)
class CustomIdentController(Controller):
def __init__(self, handler, loop, ident_hostname, ident, **kwargs):
"""Init method for ``CustomIdentController``.
Args:
handler -- the smtpd MailHandler object.
loop -- the asyncio loop.
ident_hostname -- the hostname to use in the ident message.
ident -- the version string.
"""
self.loop = loop
self.ident_hostname = ident_hostname
self.ident = ident
super(CustomIdentController, self).__init__(handler, loop=loop, **kwargs)
def factory(self):
"""``CustomIdentController`` factory.
Overrides ``super.factory()``.
Creates an aiosmtpd server object.
Returns:
Returns the server object.
"""
server = Server(self.handler)
server.hostname = self.ident_hostname
server.__ident__ = self.ident
return server
class MailHandler:
def __init__(self, loop, config, plugin_manager):
"""The init method for the ``MailHandler`` class.
Args:
loop -- the ``asyncio`` loop.
config -- the sarlacc ``config`` object.
plugin_manager -- the sarlacc ``plugin_manager`` object.
"""
self.loop = loop
self.config = config
self.plugin_manager = plugin_manager
loop.create_task(self.init_store())
async def init_store(self):
"""Intialize the storage backend.
This will create the storage backend and load and run any plugins.
"""
# Init storage handlers
self.store = await storage.create_storage(self.config, self.plugin_manager, self.loop)
self.plugin_manager.load_plugins(self.store, "plugins")
self.plugin_manager.run_plugins()
async def handle_DATA(self, server, session, envelope):
"""DATA header handler.
Overrides ``super.handle_DATA``
This will be called when a DATA header is received by the mail server.
Args:
server -- the ``aiosmtpd`` server.
session -- the ``aiosmtpd`` session.
envelope -- the data envelope.
Returns:
The response string to send to the client.
"""
subject = ""
to_address_list = envelope.rcpt_tos
from_address = envelope.mail_from
body = None
attachments = []
date_sent = datetime.now()
# Parse message
try:
message = email.message_from_string(envelope.content.decode("utf8", errors="replace"))
subject = message["subject"]
if message.is_multipart():
for part in message.get_payload():
if "Content-Disposition" in part and "attachment;" in part["Content-Disposition"]:
filename = None
matches = re.findall(r'filename=".*"', part["Content-Disposition"])
if len(matches) > 0:
a = matches[0].index('"')
b = matches[0].index('"', a + 1)
filename = matches[0][a + 1:b]
content = part.get_payload(decode=True)
attachments.append({
"content": content,
"filename": filename})
elif "Content-Type" in part and "text/plain" in part["Content-Type"]:
body = part.get_payload()
elif "Content-Type" in part and "text/html" in part["Content-Type"]:
body = part.get_payload()
else:
# This is gross
if "Content-Disposition" in message and "attachment;" in message["Content-Disposition"]:
filename = None
matches = re.findall(r'filename=".*"', message["Content-Disposition"])
if len(matches) > 0:
a = matches[0].index('"')
b = matches[0].index('"', a + 1)
filename = matches[0][a + 1:b]
content = message.get_payload(decode=True)
attachments.append({
"content": content,
"filename": filename})
elif "Content-Type" in message and "text/plain" in message["Content-Type"]:
body = message.get_payload()
elif "Content-Type" in message and "text/html" in message["Content-Type"]:
body = message.get_payload()
asyncio.ensure_future(self.store.store_email(
subject = subject,
to_address_list = to_address_list,
from_address = from_address,
body = body,
attachments = attachments,
date_sent = date_sent))
except:
logger.error("Failed to parse mail")
e = sys.exc_info()[0]
logger.error(e)
return "250 Message accepted for delivery"
| 34.248521
| 104
| 0.55114
|
4a02d878313e15df605338390b856eef321cf0bc
| 338
|
py
|
Python
|
migrations/0046_merge_20190330_2353.py
|
audaciouscode/PassiveDataKit-Django
|
ed1e00c436801b9f49a3e0e6657c2adb6b2ba3d4
|
[
"Apache-2.0"
] | 5
|
2016-01-26T19:19:44.000Z
|
2018-12-12T18:04:04.000Z
|
migrations/0046_merge_20190330_2353.py
|
audacious-software/PassiveDataKit-Django
|
da91a375c075ceec938f2c9bb6b011f9f019b024
|
[
"Apache-2.0"
] | 6
|
2020-02-17T20:16:28.000Z
|
2021-12-13T21:51:20.000Z
|
migrations/0046_merge_20190330_2353.py
|
audacious-software/PassiveDataKit-Django
|
da91a375c075ceec938f2c9bb6b011f9f019b024
|
[
"Apache-2.0"
] | 4
|
2020-01-29T15:36:58.000Z
|
2021-06-01T18:55:26.000Z
|
# pylint: skip-file
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-03-31 03:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('passive_data_kit', '0045_auto_20190326_1308'),
('passive_data_kit', '0045_auto_20190225_1821'),
]
operations = [
]
| 18.777778
| 56
| 0.653846
|
4a02d8d4b673e7691a68cf230e86dd538d912668
| 5,120
|
py
|
Python
|
faculty-classifier/v2/faculty-classifier.py
|
wustep/woceng
|
3f3509629f7b91fdf2681f2011fc51af9292b01d
|
[
"MIT"
] | 1
|
2019-05-02T20:57:10.000Z
|
2019-05-02T20:57:10.000Z
|
faculty-classifier/v2/faculty-classifier.py
|
wustep/woceng
|
3f3509629f7b91fdf2681f2011fc51af9292b01d
|
[
"MIT"
] | null | null | null |
faculty-classifier/v2/faculty-classifier.py
|
wustep/woceng
|
3f3509629f7b91fdf2681f2011fc51af9292b01d
|
[
"MIT"
] | null | null | null |
# Adapted from http://stevenloria.com/how-to-build-a-text-classification-system-with-python-and-textblob/
import csv
from textblob.classifiers import NaiveBayesClassifier
from textblob import TextBlob
# Returns interpretation of faculty designation
def interpretFaculty(res):
if (str(res) == "1"):
return "tenure-track or tenured faculty"
elif (str(res) == "2"):
return "non-tenure-track faculty"
elif (str(res) == "3"):
return "not staff"
else:
return "invalid"
# Returns interpretation of staff designation
def interpretStaff(res):
if (str(res) == "1"):
return "administrator"
elif (str(res) == "2"):
return "staff"
elif (str(res) == "3"):
return "not staff"
else:
return "invalid"
# Reads data from CSV and puts into dataset array
# Column = 1 for faculty, 2 for staff designation
# Training = 1 for test/training set, 0 otherwise
def getDataset(filename, column=1, training=0):
f = open(filename)
c = csv.reader(f)
next(c)
dataset = []
next(c, None) # skip the headers
for row in c:
if (len(row[0]) < 3): # Invalid input
if not(training): # Don't bother if it's training
dataset.append([""])
else:
if (training):
res = row[column]
if (res == ""): # 3 means NOT staff / faculty. These are blank in training data for readability
res = 3
dataset.append([row[0], res])
else:
dataset.append([row[0]])
return (dataset)
print("\r\n#### Training Set ####")
trainFacultyData = getDataset("data/training-data.csv", 1, 1)
trainStaffData = getDataset("data/training-data.csv", 2, 1)
print("Training on " + str(len(trainStaffData)) + " samples from training-data")
facultyClassifier = NaiveBayesClassifier(trainFacultyData)
print("Trained faculty classifier on training-data")
staffClassifier = NaiveBayesClassifier(trainStaffData)
print("Trained staff classifier on training data")
print("\r\n#### Test Set ####")
testFacultyData = getDataset("data/test-data.csv", 1, 1)
testStaffData = getDataset("data/test-data.csv", 2, 1)
print("Testing on " + str(len(testStaffData)) + " samples from test-data")
print("Faculty Accuracy: {0}".format(facultyClassifier.accuracy(testFacultyData)))
print("Staff Accuracy: {0}".format(staffClassifier.accuracy(testStaffData)))
print("Updating faculty classifier...")
facultyClassifier.update(testFacultyData)
print("Updating staff classifier...")
staffClassifier.update(testStaffData)
print("Classifiers updated.")
print("\r\n#### Features ####")
print("Faculty Classifier")
print(facultyClassifier.show_informative_features(15))
print("Staff Classifier")
print(staffClassifier.show_informative_features(15))
inputFileName = input('Enter input file name (e.g. data/partial-randomized-faculty-data.csv):');
outputFileName = input('Enter output file name:');
print("\r\n#### Full Classification ####")
with open(inputFileName, 'r') as inputFile:
reader = csv.reader(inputFile, delimiter=",", quotechar='"')
print("Classifying partial-randomized-faculty-data")
with open(outputFileName, "w", newline='') as outputFile:
facultyResults = [0, 0, 0, 0]
facultyAverageProb = 0
staffResults = [0, 0, 0, 0]
staffAverageProb = 0
totalChecked = 0
output = csv.writer(outputFile, delimiter=",", quotechar='"')
next(reader, None) # skip the headers
for row in reader:
staffResult = 4
facultyResult = 4
line = ""
if (len(row) > 0):
line = row[0]
if (len(line) >= 3):
title = row[0].lower()
facultyProbDist = facultyClassifier.prob_classify(title)
facultyResult = facultyProbDist.max()
facultyProb = round(facultyProbDist.prob(facultyResult), 3)
staffProbDist = staffClassifier.prob_classify(title)
staffResult = staffProbDist.max()
staffProb = round(staffProbDist.prob(staffResult), 3)
if (facultyProb < 0.5 or staffProb < 0.5):
print("'" + title + "': " + str(staffResult) + ", " + str(facultyResult) + " @ " + str(facultyProb) + ", " + str(staffProb))
facultyAverageProb += facultyProb
staffAverageProb += staffProb
totalChecked += 1
output.writerow([line, facultyResult, staffResult])
facultyResults[int(facultyResult)-1] += 1
staffResults[int(staffResult)-1] += 1
print("Completed partial-randomized-faculty-data classification")
print("Faculty classes (1): " + str(facultyResults[0]) + ", (2): " + str(facultyResults[1]) + ", (3): " + str(facultyResults[2]) + ", (4): " + str(facultyResults[3]))
print("Staff classes (1): " + str(staffResults[0]) + ", (2): " + str(staffResults[1]) + ", (3): " + str(staffResults[2]) + ", (4): " + str(staffResults[3]))
if (totalChecked >= 0):
print("Faculty Average Certainty: " + str(facultyAverageProb / totalChecked))
print("Staff Average Certainty: " + str(staffAverageProb / totalChecked))
print("\r\n#### Input Test ####")
prompt = ""
while not(prompt == "stop"):
prompt = input("Position: ")
word = prompt.lower()
res = TextBlob(word, classifier=facultyClassifier).classify()
res2 = TextBlob(word, classifier=staffClassifier).classify()
print("'" + word + "': " + str(res) + " ("+ interpretFaculty(res) + "), " + str(res2) + " (" + interpretStaff(res2) + ")\r\n")
| 38.496241
| 167
| 0.689258
|
4a02d918b409599e3009538887c2a35cdd059770
| 1,111
|
py
|
Python
|
attila/util/config.py
|
sirfoga/attila
|
24e58785b736b1d6c658e2605076667de64e43cd
|
[
"MIT"
] | null | null | null |
attila/util/config.py
|
sirfoga/attila
|
24e58785b736b1d6c658e2605076667de64e43cd
|
[
"MIT"
] | 2
|
2020-11-05T19:46:42.000Z
|
2020-11-09T17:55:03.000Z
|
attila/util/config.py
|
sirfoga/attila
|
24e58785b736b1d6c658e2605076667de64e43cd
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from configparser import ConfigParser
def get_config(file_path):
config = ConfigParser()
config.read(file_path)
return config
def is_verbose(key, config):
return config.getboolean(key, 'verbose')
def check_input(config):
def _is_square_img():
return config.getint('image', 'width') == config.getint('image', 'height')
def _is_integer(x):
return x % 1 == 0
def _is_good_input():
# todo b = config.getint('image', 'width') # input size
# d =
# gamma =
# pool_size =
# a = (b - (2 ** d - 1) * gamma) / 2 ** d
assert _is_integer(0.0)
assert _is_square_img() # only square images are supported
def get_env(root_folder):
config = get_config(root_folder / './config.ini')
check_input(config)
data_path = root_folder / config.get('data', 'folder')
data_path = data_path.resolve()
out_path = Path(config.get('experiments', 'output folder')).resolve()
models_config = root_folder / 'experiments.json'
return config, data_path, out_path, models_config
| 24.152174
| 82
| 0.638164
|
4a02d9664dea0bb7dbc7c41294e4682be059a6f3
| 1,414
|
py
|
Python
|
SRX-GD-Threatfeed/gd_threatfeed/constants.py
|
Juniper/vSRX-AWS
|
a9179d8b115229b1f26aec166a85261fd2352b14
|
[
"Apache-2.0"
] | 18
|
2017-07-03T22:45:19.000Z
|
2021-04-04T16:51:28.000Z
|
SRX-GD-Threatfeed/gd_threatfeed/constants.py
|
Juniper/vSRX-AWS
|
a9179d8b115229b1f26aec166a85261fd2352b14
|
[
"Apache-2.0"
] | 13
|
2017-10-25T18:22:22.000Z
|
2021-09-15T00:09:54.000Z
|
SRX-GD-Threatfeed/gd_threatfeed/constants.py
|
Juniper/vSRX-AWS
|
a9179d8b115229b1f26aec166a85261fd2352b14
|
[
"Apache-2.0"
] | 23
|
2017-07-20T03:29:25.000Z
|
2021-02-24T20:27:05.000Z
|
# Copyright © 2020 Juniper Networks, Inc., All Rights Reserved.
"""Constants for Gaurdduty"""
CC_TTL = 3456000 # 40 days
MIN_TTL = 86400 # 1 day
MAX_TTL = 31556952 # 1 year
UPDATE_INTERVAL = 300 # 30 minutes
MAX_UPDATE_INTERVAL = 86400 # 1 Day
DEFAULT_MANIFEST_UPDATE_INTERVAL = 60 # 1 minute
DEFAULT_MAX_ENTRIES = 10000
MIN_MAX_ENTRIES = 1000
MAX_MAX_ENTRIES = 100000
DEFAULT_SEVERITY_LEVEL = 8
DATA_TAG_ADD_N = b'#add\n'
DATA_TAG_DEL_N = b'#del\n'
DATA_TAG_END_N = b'#end\n'
IP_ADDR = 'ip_addr'
DOMAIN = 'dn'
THREAT_LEVEL = 'threat_level'
PROP = 'properties'
MANIFEST_FILE_NAME = 'manifest.xml'
IP_FEED = 'ip'
DNS_FEED = 'domain'
CC_CATEGORY = 'CC'
CC_DESC = 'Command and Control data schema'
CC_SCHEMA_VER = 'c66b370237'
OBJ_CODES = {IP_ADDR: 4, DOMAIN: 1, PROP: 8, THREAT_LEVEL: 9}
IP_OBJ_FIELDS = (IP_ADDR, THREAT_LEVEL)
URL_OBJ_FIELDS = (DOMAIN, THREAT_LEVEL)
ACTIONS_PATH = {'NETWORK_CONNECTION': ('networkConnectionAction.'
'remoteIpDetails.ipAddressV4', IP_ADDR),
'AWS_API_CALL': ('awsApiCallAction.remoteIpDetails.ipAddressV4',
IP_ADDR),
'DNS_REQUEST': ('dnsRequestAction.domain', DOMAIN),
'PORT_PROBE': ('portProbeAction.portProbeDetails.*.'
'remoteIpDetails.ipAddressV4', IP_ADDR)}
OPEN_API_CC_URL = 'cc/file/{feed_type}/{feed_name}'
| 30.73913
| 80
| 0.67256
|
4a02d9a33b59cc66ec7d43715861ff8945f3e022
| 1,722
|
py
|
Python
|
core/user/migrations/0001_initial.py
|
Let-s-Play-416/skills-back
|
8a8197a6ac4c6d27dfc5c28070b5488afcc1ad63
|
[
"MIT"
] | null | null | null |
core/user/migrations/0001_initial.py
|
Let-s-Play-416/skills-back
|
8a8197a6ac4c6d27dfc5c28070b5488afcc1ad63
|
[
"MIT"
] | null | null | null |
core/user/migrations/0001_initial.py
|
Let-s-Play-416/skills-back
|
8a8197a6ac4c6d27dfc5c28070b5488afcc1ad63
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0 on 2021-05-27 17:26
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(max_length=255, unique=True)),
('email', models.EmailField(max_length=254, unique=True)),
('joined_at', models.DateTimeField(auto_now_add=True)),
('is_active', models.BooleanField(default=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 50.647059
| 266
| 0.640534
|
4a02dab6029b30070c87bbb1d838c80a6af4d562
| 232
|
py
|
Python
|
0x01-python-if_else_loops_functions/0-positive_or_negative.py
|
ricardo1470/holbertonschool-higher_level_programming
|
aab73c8efee665b0215958ee7b338871f13634bc
|
[
"CNRI-Python"
] | null | null | null |
0x01-python-if_else_loops_functions/0-positive_or_negative.py
|
ricardo1470/holbertonschool-higher_level_programming
|
aab73c8efee665b0215958ee7b338871f13634bc
|
[
"CNRI-Python"
] | null | null | null |
0x01-python-if_else_loops_functions/0-positive_or_negative.py
|
ricardo1470/holbertonschool-higher_level_programming
|
aab73c8efee665b0215958ee7b338871f13634bc
|
[
"CNRI-Python"
] | null | null | null |
#!/usr/bin/python3
import random
number = random.randint(-10, 10)
if (number > 0):
print("{} is positive".format(number))
elif number == 0:
print("{} is zero".format(number))
else:
print("{} is negative".format(number))
| 23.2
| 42
| 0.642241
|
4a02db1b676343157e68963507e02837be7b4719
| 1,109
|
py
|
Python
|
python_modules/libraries/dagster-datadog/setup.py
|
withshubh/dagster
|
ff4a0db53e126f44097a337eecef54988cc718ef
|
[
"Apache-2.0"
] | null | null | null |
python_modules/libraries/dagster-datadog/setup.py
|
withshubh/dagster
|
ff4a0db53e126f44097a337eecef54988cc718ef
|
[
"Apache-2.0"
] | 1
|
2021-06-21T18:30:02.000Z
|
2021-06-25T21:18:39.000Z
|
python_modules/libraries/dagster-datadog/setup.py
|
withshubh/dagster
|
ff4a0db53e126f44097a337eecef54988cc718ef
|
[
"Apache-2.0"
] | null | null | null |
from typing import Dict
from setuptools import find_packages, setup # type: ignore
def get_version() -> str:
version: Dict[str, str] = {}
with open("dagster_datadog/version.py") as fp:
exec(fp.read(), version) # pylint: disable=W0122
return version["__version__"]
if __name__ == "__main__":
setup(
name="dagster-datadog",
version=get_version(),
author="Elementl",
author_email="hello@elementl.com",
license="Apache-2.0",
description="Package for datadog Dagster framework components.",
url="https://github.com/dagster-io/dagster/tree/master/python_modules/libraries/dagster-datadog",
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
packages=find_packages(exclude=["test"]),
install_requires=["dagster", "datadog"],
zip_safe=False,
)
| 32.617647
| 105
| 0.614067
|
4a02db281724ed2678e84d7601f0a9c83eb3a40c
| 35,921
|
py
|
Python
|
Lib/test/test_ntpath.py
|
gerph/cpython
|
98813cb03c2371789669c3d8debf8fca2a344de9
|
[
"CNRI-Python-GPL-Compatible"
] | 2,441
|
2020-07-31T06:45:53.000Z
|
2022-03-30T15:56:49.000Z
|
Lib/test/test_ntpath.py
|
gerph/cpython
|
98813cb03c2371789669c3d8debf8fca2a344de9
|
[
"CNRI-Python-GPL-Compatible"
] | 238
|
2020-10-21T04:54:00.000Z
|
2022-03-31T21:49:03.000Z
|
Lib/test/test_ntpath.py
|
gerph/cpython
|
98813cb03c2371789669c3d8debf8fca2a344de9
|
[
"CNRI-Python-GPL-Compatible"
] | 93
|
2020-08-09T12:00:17.000Z
|
2022-03-25T07:57:24.000Z
|
import ntpath
import os
import sys
import unittest
import warnings
from test.support import TestFailed, FakePath
from test import support, test_genericpath
from tempfile import TemporaryFile
try:
import nt
except ImportError:
# Most tests can complete without the nt module,
# but for those that require it we import here.
nt = None
try:
ntpath._getfinalpathname
except AttributeError:
HAVE_GETFINALPATHNAME = False
else:
HAVE_GETFINALPATHNAME = True
try:
import ctypes
except ImportError:
HAVE_GETSHORTPATHNAME = False
else:
HAVE_GETSHORTPATHNAME = True
def _getshortpathname(path):
GSPN = ctypes.WinDLL("kernel32", use_last_error=True).GetShortPathNameW
GSPN.argtypes = [ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_uint32]
GSPN.restype = ctypes.c_uint32
result_len = GSPN(path, None, 0)
if not result_len:
raise OSError("failed to get short path name 0x{:08X}"
.format(ctypes.get_last_error()))
result = ctypes.create_unicode_buffer(result_len)
result_len = GSPN(path, result, result_len)
return result[:result_len]
def _norm(path):
if isinstance(path, (bytes, str, os.PathLike)):
return ntpath.normcase(os.fsdecode(path))
elif hasattr(path, "__iter__"):
return tuple(ntpath.normcase(os.fsdecode(p)) for p in path)
return path
def tester(fn, wantResult):
fn = fn.replace("\\", "\\\\")
gotResult = eval(fn)
if wantResult != gotResult and _norm(wantResult) != _norm(gotResult):
raise TestFailed("%s should return: %s but returned: %s" \
%(str(fn), str(wantResult), str(gotResult)))
# then with bytes
fn = fn.replace("('", "(b'")
fn = fn.replace('("', '(b"')
fn = fn.replace("['", "[b'")
fn = fn.replace('["', '[b"')
fn = fn.replace(", '", ", b'")
fn = fn.replace(', "', ', b"')
fn = os.fsencode(fn).decode('latin1')
fn = fn.encode('ascii', 'backslashreplace').decode('ascii')
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
gotResult = eval(fn)
if _norm(wantResult) != _norm(gotResult):
raise TestFailed("%s should return: %s but returned: %s" \
%(str(fn), str(wantResult), repr(gotResult)))
class NtpathTestCase(unittest.TestCase):
def assertPathEqual(self, path1, path2):
if path1 == path2 or _norm(path1) == _norm(path2):
return
self.assertEqual(path1, path2)
def assertPathIn(self, path, pathset):
self.assertIn(_norm(path), _norm(pathset))
class TestNtpath(NtpathTestCase):
def test_splitext(self):
tester('ntpath.splitext("foo.ext")', ('foo', '.ext'))
tester('ntpath.splitext("/foo/foo.ext")', ('/foo/foo', '.ext'))
tester('ntpath.splitext(".ext")', ('.ext', ''))
tester('ntpath.splitext("\\foo.ext\\foo")', ('\\foo.ext\\foo', ''))
tester('ntpath.splitext("foo.ext\\")', ('foo.ext\\', ''))
tester('ntpath.splitext("")', ('', ''))
tester('ntpath.splitext("foo.bar.ext")', ('foo.bar', '.ext'))
tester('ntpath.splitext("xx/foo.bar.ext")', ('xx/foo.bar', '.ext'))
tester('ntpath.splitext("xx\\foo.bar.ext")', ('xx\\foo.bar', '.ext'))
tester('ntpath.splitext("c:a/b\\c.d")', ('c:a/b\\c', '.d'))
def test_splitdrive(self):
tester('ntpath.splitdrive("c:\\foo\\bar")',
('c:', '\\foo\\bar'))
tester('ntpath.splitdrive("c:/foo/bar")',
('c:', '/foo/bar'))
tester('ntpath.splitdrive("\\\\conky\\mountpoint\\foo\\bar")',
('\\\\conky\\mountpoint', '\\foo\\bar'))
tester('ntpath.splitdrive("//conky/mountpoint/foo/bar")',
('//conky/mountpoint', '/foo/bar'))
tester('ntpath.splitdrive("\\\\\\conky\\mountpoint\\foo\\bar")',
('', '\\\\\\conky\\mountpoint\\foo\\bar'))
tester('ntpath.splitdrive("///conky/mountpoint/foo/bar")',
('', '///conky/mountpoint/foo/bar'))
tester('ntpath.splitdrive("\\\\conky\\\\mountpoint\\foo\\bar")',
('', '\\\\conky\\\\mountpoint\\foo\\bar'))
tester('ntpath.splitdrive("//conky//mountpoint/foo/bar")',
('', '//conky//mountpoint/foo/bar'))
# Issue #19911: UNC part containing U+0130
self.assertEqual(ntpath.splitdrive('//conky/MOUNTPOİNT/foo/bar'),
('//conky/MOUNTPOİNT', '/foo/bar'))
def test_split(self):
tester('ntpath.split("c:\\foo\\bar")', ('c:\\foo', 'bar'))
tester('ntpath.split("\\\\conky\\mountpoint\\foo\\bar")',
('\\\\conky\\mountpoint\\foo', 'bar'))
tester('ntpath.split("c:\\")', ('c:\\', ''))
tester('ntpath.split("\\\\conky\\mountpoint\\")',
('\\\\conky\\mountpoint\\', ''))
tester('ntpath.split("c:/")', ('c:/', ''))
tester('ntpath.split("//conky/mountpoint/")', ('//conky/mountpoint/', ''))
def test_isabs(self):
tester('ntpath.isabs("c:\\")', 1)
tester('ntpath.isabs("\\\\conky\\mountpoint\\")', 1)
tester('ntpath.isabs("\\foo")', 1)
tester('ntpath.isabs("\\foo\\bar")', 1)
def test_commonprefix(self):
tester('ntpath.commonprefix(["/home/swenson/spam", "/home/swen/spam"])',
"/home/swen")
tester('ntpath.commonprefix(["\\home\\swen\\spam", "\\home\\swen\\eggs"])',
"\\home\\swen\\")
tester('ntpath.commonprefix(["/home/swen/spam", "/home/swen/spam"])',
"/home/swen/spam")
def test_join(self):
tester('ntpath.join("")', '')
tester('ntpath.join("", "", "")', '')
tester('ntpath.join("a")', 'a')
tester('ntpath.join("/a")', '/a')
tester('ntpath.join("\\a")', '\\a')
tester('ntpath.join("a:")', 'a:')
tester('ntpath.join("a:", "\\b")', 'a:\\b')
tester('ntpath.join("a", "\\b")', '\\b')
tester('ntpath.join("a", "b", "c")', 'a\\b\\c')
tester('ntpath.join("a\\", "b", "c")', 'a\\b\\c')
tester('ntpath.join("a", "b\\", "c")', 'a\\b\\c')
tester('ntpath.join("a", "b", "\\c")', '\\c')
tester('ntpath.join("d:\\", "\\pleep")', 'd:\\pleep')
tester('ntpath.join("d:\\", "a", "b")', 'd:\\a\\b')
tester("ntpath.join('', 'a')", 'a')
tester("ntpath.join('', '', '', '', 'a')", 'a')
tester("ntpath.join('a', '')", 'a\\')
tester("ntpath.join('a', '', '', '', '')", 'a\\')
tester("ntpath.join('a\\', '')", 'a\\')
tester("ntpath.join('a\\', '', '', '', '')", 'a\\')
tester("ntpath.join('a/', '')", 'a/')
tester("ntpath.join('a/b', 'x/y')", 'a/b\\x/y')
tester("ntpath.join('/a/b', 'x/y')", '/a/b\\x/y')
tester("ntpath.join('/a/b/', 'x/y')", '/a/b/x/y')
tester("ntpath.join('c:', 'x/y')", 'c:x/y')
tester("ntpath.join('c:a/b', 'x/y')", 'c:a/b\\x/y')
tester("ntpath.join('c:a/b/', 'x/y')", 'c:a/b/x/y')
tester("ntpath.join('c:/', 'x/y')", 'c:/x/y')
tester("ntpath.join('c:/a/b', 'x/y')", 'c:/a/b\\x/y')
tester("ntpath.join('c:/a/b/', 'x/y')", 'c:/a/b/x/y')
tester("ntpath.join('//computer/share', 'x/y')", '//computer/share\\x/y')
tester("ntpath.join('//computer/share/', 'x/y')", '//computer/share/x/y')
tester("ntpath.join('//computer/share/a/b', 'x/y')", '//computer/share/a/b\\x/y')
tester("ntpath.join('a/b', '/x/y')", '/x/y')
tester("ntpath.join('/a/b', '/x/y')", '/x/y')
tester("ntpath.join('c:', '/x/y')", 'c:/x/y')
tester("ntpath.join('c:a/b', '/x/y')", 'c:/x/y')
tester("ntpath.join('c:/', '/x/y')", 'c:/x/y')
tester("ntpath.join('c:/a/b', '/x/y')", 'c:/x/y')
tester("ntpath.join('//computer/share', '/x/y')", '//computer/share/x/y')
tester("ntpath.join('//computer/share/', '/x/y')", '//computer/share/x/y')
tester("ntpath.join('//computer/share/a', '/x/y')", '//computer/share/x/y')
tester("ntpath.join('c:', 'C:x/y')", 'C:x/y')
tester("ntpath.join('c:a/b', 'C:x/y')", 'C:a/b\\x/y')
tester("ntpath.join('c:/', 'C:x/y')", 'C:/x/y')
tester("ntpath.join('c:/a/b', 'C:x/y')", 'C:/a/b\\x/y')
for x in ('', 'a/b', '/a/b', 'c:', 'c:a/b', 'c:/', 'c:/a/b',
'//computer/share', '//computer/share/', '//computer/share/a/b'):
for y in ('d:', 'd:x/y', 'd:/', 'd:/x/y',
'//machine/common', '//machine/common/', '//machine/common/x/y'):
tester("ntpath.join(%r, %r)" % (x, y), y)
tester("ntpath.join('\\\\computer\\share\\', 'a', 'b')", '\\\\computer\\share\\a\\b')
tester("ntpath.join('\\\\computer\\share', 'a', 'b')", '\\\\computer\\share\\a\\b')
tester("ntpath.join('\\\\computer\\share', 'a\\b')", '\\\\computer\\share\\a\\b')
tester("ntpath.join('//computer/share/', 'a', 'b')", '//computer/share/a\\b')
tester("ntpath.join('//computer/share', 'a', 'b')", '//computer/share\\a\\b')
tester("ntpath.join('//computer/share', 'a/b')", '//computer/share\\a/b')
def test_normpath(self):
tester("ntpath.normpath('A//////././//.//B')", r'A\B')
tester("ntpath.normpath('A/./B')", r'A\B')
tester("ntpath.normpath('A/foo/../B')", r'A\B')
tester("ntpath.normpath('C:A//B')", r'C:A\B')
tester("ntpath.normpath('D:A/./B')", r'D:A\B')
tester("ntpath.normpath('e:A/foo/../B')", r'e:A\B')
tester("ntpath.normpath('C:///A//B')", r'C:\A\B')
tester("ntpath.normpath('D:///A/./B')", r'D:\A\B')
tester("ntpath.normpath('e:///A/foo/../B')", r'e:\A\B')
tester("ntpath.normpath('..')", r'..')
tester("ntpath.normpath('.')", r'.')
tester("ntpath.normpath('')", r'.')
tester("ntpath.normpath('/')", '\\')
tester("ntpath.normpath('c:/')", 'c:\\')
tester("ntpath.normpath('/../.././..')", '\\')
tester("ntpath.normpath('c:/../../..')", 'c:\\')
tester("ntpath.normpath('../.././..')", r'..\..\..')
tester("ntpath.normpath('K:../.././..')", r'K:..\..\..')
tester("ntpath.normpath('C:////a/b')", r'C:\a\b')
tester("ntpath.normpath('//machine/share//a/b')", r'\\machine\share\a\b')
tester("ntpath.normpath('\\\\.\\NUL')", r'\\.\NUL')
tester("ntpath.normpath('\\\\?\\D:/XY\\Z')", r'\\?\D:/XY\Z')
def test_realpath_curdir(self):
expected = ntpath.normpath(os.getcwd())
tester("ntpath.realpath('.')", expected)
tester("ntpath.realpath('./.')", expected)
tester("ntpath.realpath('/'.join(['.'] * 100))", expected)
tester("ntpath.realpath('.\\.')", expected)
tester("ntpath.realpath('\\'.join(['.'] * 100))", expected)
def test_realpath_pardir(self):
expected = ntpath.normpath(os.getcwd())
tester("ntpath.realpath('..')", ntpath.dirname(expected))
tester("ntpath.realpath('../..')",
ntpath.dirname(ntpath.dirname(expected)))
tester("ntpath.realpath('/'.join(['..'] * 50))",
ntpath.splitdrive(expected)[0] + '\\')
tester("ntpath.realpath('..\\..')",
ntpath.dirname(ntpath.dirname(expected)))
tester("ntpath.realpath('\\'.join(['..'] * 50))",
ntpath.splitdrive(expected)[0] + '\\')
@support.skip_unless_symlink
@unittest.skipUnless(HAVE_GETFINALPATHNAME, 'need _getfinalpathname')
def test_realpath_basic(self):
ABSTFN = ntpath.abspath(support.TESTFN)
open(ABSTFN, "wb").close()
self.addCleanup(support.unlink, ABSTFN)
self.addCleanup(support.unlink, ABSTFN + "1")
os.symlink(ABSTFN, ABSTFN + "1")
self.assertPathEqual(ntpath.realpath(ABSTFN + "1"), ABSTFN)
self.assertPathEqual(ntpath.realpath(os.fsencode(ABSTFN + "1")),
os.fsencode(ABSTFN))
@support.skip_unless_symlink
@unittest.skipUnless(HAVE_GETFINALPATHNAME, 'need _getfinalpathname')
def test_realpath_relative(self):
ABSTFN = ntpath.abspath(support.TESTFN)
open(ABSTFN, "wb").close()
self.addCleanup(support.unlink, ABSTFN)
self.addCleanup(support.unlink, ABSTFN + "1")
os.symlink(ABSTFN, ntpath.relpath(ABSTFN + "1"))
self.assertPathEqual(ntpath.realpath(ABSTFN + "1"), ABSTFN)
@support.skip_unless_symlink
@unittest.skipUnless(HAVE_GETFINALPATHNAME, 'need _getfinalpathname')
def test_realpath_broken_symlinks(self):
ABSTFN = ntpath.abspath(support.TESTFN)
os.mkdir(ABSTFN)
self.addCleanup(support.rmtree, ABSTFN)
with support.change_cwd(ABSTFN):
os.mkdir("subdir")
os.chdir("subdir")
os.symlink(".", "recursive")
os.symlink("..", "parent")
os.chdir("..")
os.symlink(".", "self")
os.symlink("missing", "broken")
os.symlink(r"broken\bar", "broken1")
os.symlink(r"self\self\broken", "broken2")
os.symlink(r"subdir\parent\subdir\parent\broken", "broken3")
os.symlink(ABSTFN + r"\broken", "broken4")
os.symlink(r"recursive\..\broken", "broken5")
self.assertPathEqual(ntpath.realpath("broken"),
ABSTFN + r"\missing")
self.assertPathEqual(ntpath.realpath(r"broken\foo"),
ABSTFN + r"\missing\foo")
# bpo-38453: We no longer recursively resolve segments of relative
# symlinks that the OS cannot resolve.
self.assertPathEqual(ntpath.realpath(r"broken1"),
ABSTFN + r"\broken\bar")
self.assertPathEqual(ntpath.realpath(r"broken1\baz"),
ABSTFN + r"\broken\bar\baz")
self.assertPathEqual(ntpath.realpath("broken2"),
ABSTFN + r"\self\self\missing")
self.assertPathEqual(ntpath.realpath("broken3"),
ABSTFN + r"\subdir\parent\subdir\parent\missing")
self.assertPathEqual(ntpath.realpath("broken4"),
ABSTFN + r"\missing")
self.assertPathEqual(ntpath.realpath("broken5"),
ABSTFN + r"\missing")
self.assertPathEqual(ntpath.realpath(b"broken"),
os.fsencode(ABSTFN + r"\missing"))
self.assertPathEqual(ntpath.realpath(rb"broken\foo"),
os.fsencode(ABSTFN + r"\missing\foo"))
self.assertPathEqual(ntpath.realpath(rb"broken1"),
os.fsencode(ABSTFN + r"\broken\bar"))
self.assertPathEqual(ntpath.realpath(rb"broken1\baz"),
os.fsencode(ABSTFN + r"\broken\bar\baz"))
self.assertPathEqual(ntpath.realpath(b"broken2"),
os.fsencode(ABSTFN + r"\self\self\missing"))
self.assertPathEqual(ntpath.realpath(rb"broken3"),
os.fsencode(ABSTFN + r"\subdir\parent\subdir\parent\missing"))
self.assertPathEqual(ntpath.realpath(b"broken4"),
os.fsencode(ABSTFN + r"\missing"))
self.assertPathEqual(ntpath.realpath(b"broken5"),
os.fsencode(ABSTFN + r"\missing"))
@support.skip_unless_symlink
@unittest.skipUnless(HAVE_GETFINALPATHNAME, 'need _getfinalpathname')
def test_realpath_symlink_loops(self):
# Symlink loops are non-deterministic as to which path is returned, but
# it will always be the fully resolved path of one member of the cycle
ABSTFN = ntpath.abspath(support.TESTFN)
self.addCleanup(support.unlink, ABSTFN)
self.addCleanup(support.unlink, ABSTFN + "1")
self.addCleanup(support.unlink, ABSTFN + "2")
self.addCleanup(support.unlink, ABSTFN + "y")
self.addCleanup(support.unlink, ABSTFN + "c")
self.addCleanup(support.unlink, ABSTFN + "a")
os.symlink(ABSTFN, ABSTFN)
self.assertPathEqual(ntpath.realpath(ABSTFN), ABSTFN)
os.symlink(ABSTFN + "1", ABSTFN + "2")
os.symlink(ABSTFN + "2", ABSTFN + "1")
expected = (ABSTFN + "1", ABSTFN + "2")
self.assertPathIn(ntpath.realpath(ABSTFN + "1"), expected)
self.assertPathIn(ntpath.realpath(ABSTFN + "2"), expected)
self.assertPathIn(ntpath.realpath(ABSTFN + "1\\x"),
(ntpath.join(r, "x") for r in expected))
self.assertPathEqual(ntpath.realpath(ABSTFN + "1\\.."),
ntpath.dirname(ABSTFN))
self.assertPathEqual(ntpath.realpath(ABSTFN + "1\\..\\x"),
ntpath.dirname(ABSTFN) + "\\x")
os.symlink(ABSTFN + "x", ABSTFN + "y")
self.assertPathEqual(ntpath.realpath(ABSTFN + "1\\..\\"
+ ntpath.basename(ABSTFN) + "y"),
ABSTFN + "x")
self.assertPathIn(ntpath.realpath(ABSTFN + "1\\..\\"
+ ntpath.basename(ABSTFN) + "1"),
expected)
os.symlink(ntpath.basename(ABSTFN) + "a\\b", ABSTFN + "a")
self.assertPathEqual(ntpath.realpath(ABSTFN + "a"), ABSTFN + "a")
os.symlink("..\\" + ntpath.basename(ntpath.dirname(ABSTFN))
+ "\\" + ntpath.basename(ABSTFN) + "c", ABSTFN + "c")
self.assertPathEqual(ntpath.realpath(ABSTFN + "c"), ABSTFN + "c")
# Test using relative path as well.
self.assertPathEqual(ntpath.realpath(ntpath.basename(ABSTFN)), ABSTFN)
@support.skip_unless_symlink
@unittest.skipUnless(HAVE_GETFINALPATHNAME, 'need _getfinalpathname')
def test_realpath_symlink_prefix(self):
ABSTFN = ntpath.abspath(support.TESTFN)
self.addCleanup(support.unlink, ABSTFN + "3")
self.addCleanup(support.unlink, "\\\\?\\" + ABSTFN + "3.")
self.addCleanup(support.unlink, ABSTFN + "3link")
self.addCleanup(support.unlink, ABSTFN + "3.link")
with open(ABSTFN + "3", "wb") as f:
f.write(b'0')
os.symlink(ABSTFN + "3", ABSTFN + "3link")
with open("\\\\?\\" + ABSTFN + "3.", "wb") as f:
f.write(b'1')
os.symlink("\\\\?\\" + ABSTFN + "3.", ABSTFN + "3.link")
self.assertPathEqual(ntpath.realpath(ABSTFN + "3link"),
ABSTFN + "3")
self.assertPathEqual(ntpath.realpath(ABSTFN + "3.link"),
"\\\\?\\" + ABSTFN + "3.")
# Resolved paths should be usable to open target files
with open(ntpath.realpath(ABSTFN + "3link"), "rb") as f:
self.assertEqual(f.read(), b'0')
with open(ntpath.realpath(ABSTFN + "3.link"), "rb") as f:
self.assertEqual(f.read(), b'1')
# When the prefix is included, it is not stripped
self.assertPathEqual(ntpath.realpath("\\\\?\\" + ABSTFN + "3link"),
"\\\\?\\" + ABSTFN + "3")
self.assertPathEqual(ntpath.realpath("\\\\?\\" + ABSTFN + "3.link"),
"\\\\?\\" + ABSTFN + "3.")
@unittest.skipUnless(HAVE_GETFINALPATHNAME, 'need _getfinalpathname')
def test_realpath_nul(self):
tester("ntpath.realpath('NUL')", r'\\.\NUL')
@unittest.skipUnless(HAVE_GETFINALPATHNAME, 'need _getfinalpathname')
@unittest.skipUnless(HAVE_GETSHORTPATHNAME, 'need _getshortpathname')
def test_realpath_cwd(self):
ABSTFN = ntpath.abspath(support.TESTFN)
support.unlink(ABSTFN)
support.rmtree(ABSTFN)
os.mkdir(ABSTFN)
self.addCleanup(support.rmtree, ABSTFN)
test_dir_long = ntpath.join(ABSTFN, "MyVeryLongDirectoryName")
os.mkdir(test_dir_long)
test_dir_short = _getshortpathname(test_dir_long)
test_file_long = ntpath.join(test_dir_long, "file.txt")
test_file_short = ntpath.join(test_dir_short, "file.txt")
with open(test_file_long, "wb") as f:
f.write(b"content")
self.assertPathEqual(test_file_long, ntpath.realpath(test_file_short))
with support.change_cwd(test_dir_long):
self.assertPathEqual(test_file_long, ntpath.realpath("file.txt"))
with support.change_cwd(test_dir_long.lower()):
self.assertPathEqual(test_file_long, ntpath.realpath("file.txt"))
with support.change_cwd(test_dir_short):
self.assertPathEqual(test_file_long, ntpath.realpath("file.txt"))
def test_expandvars(self):
with support.EnvironmentVarGuard() as env:
env.clear()
env["foo"] = "bar"
env["{foo"] = "baz1"
env["{foo}"] = "baz2"
tester('ntpath.expandvars("foo")', "foo")
tester('ntpath.expandvars("$foo bar")', "bar bar")
tester('ntpath.expandvars("${foo}bar")', "barbar")
tester('ntpath.expandvars("$[foo]bar")', "$[foo]bar")
tester('ntpath.expandvars("$bar bar")', "$bar bar")
tester('ntpath.expandvars("$?bar")', "$?bar")
tester('ntpath.expandvars("$foo}bar")', "bar}bar")
tester('ntpath.expandvars("${foo")', "${foo")
tester('ntpath.expandvars("${{foo}}")', "baz1}")
tester('ntpath.expandvars("$foo$foo")', "barbar")
tester('ntpath.expandvars("$bar$bar")', "$bar$bar")
tester('ntpath.expandvars("%foo% bar")', "bar bar")
tester('ntpath.expandvars("%foo%bar")', "barbar")
tester('ntpath.expandvars("%foo%%foo%")', "barbar")
tester('ntpath.expandvars("%%foo%%foo%foo%")', "%foo%foobar")
tester('ntpath.expandvars("%?bar%")', "%?bar%")
tester('ntpath.expandvars("%foo%%bar")', "bar%bar")
tester('ntpath.expandvars("\'%foo%\'%bar")', "\'%foo%\'%bar")
tester('ntpath.expandvars("bar\'%foo%")', "bar\'%foo%")
@unittest.skipUnless(support.FS_NONASCII, 'need support.FS_NONASCII')
def test_expandvars_nonascii(self):
def check(value, expected):
tester('ntpath.expandvars(%r)' % value, expected)
with support.EnvironmentVarGuard() as env:
env.clear()
nonascii = support.FS_NONASCII
env['spam'] = nonascii
env[nonascii] = 'ham' + nonascii
check('$spam bar', '%s bar' % nonascii)
check('$%s bar' % nonascii, '$%s bar' % nonascii)
check('${spam}bar', '%sbar' % nonascii)
check('${%s}bar' % nonascii, 'ham%sbar' % nonascii)
check('$spam}bar', '%s}bar' % nonascii)
check('$%s}bar' % nonascii, '$%s}bar' % nonascii)
check('%spam% bar', '%s bar' % nonascii)
check('%{}% bar'.format(nonascii), 'ham%s bar' % nonascii)
check('%spam%bar', '%sbar' % nonascii)
check('%{}%bar'.format(nonascii), 'ham%sbar' % nonascii)
def test_expanduser(self):
tester('ntpath.expanduser("test")', 'test')
with support.EnvironmentVarGuard() as env:
env.clear()
tester('ntpath.expanduser("~test")', '~test')
env['HOMEPATH'] = 'eric\\idle'
env['HOMEDRIVE'] = 'C:\\'
tester('ntpath.expanduser("~test")', 'C:\\eric\\test')
tester('ntpath.expanduser("~")', 'C:\\eric\\idle')
del env['HOMEDRIVE']
tester('ntpath.expanduser("~test")', 'eric\\test')
tester('ntpath.expanduser("~")', 'eric\\idle')
env.clear()
env['USERPROFILE'] = 'C:\\eric\\idle'
tester('ntpath.expanduser("~test")', 'C:\\eric\\test')
tester('ntpath.expanduser("~")', 'C:\\eric\\idle')
tester('ntpath.expanduser("~test\\foo\\bar")',
'C:\\eric\\test\\foo\\bar')
tester('ntpath.expanduser("~test/foo/bar")',
'C:\\eric\\test/foo/bar')
tester('ntpath.expanduser("~\\foo\\bar")',
'C:\\eric\\idle\\foo\\bar')
tester('ntpath.expanduser("~/foo/bar")',
'C:\\eric\\idle/foo/bar')
# bpo-36264: ignore `HOME` when set on windows
env.clear()
env['HOME'] = 'F:\\'
env['USERPROFILE'] = 'C:\\eric\\idle'
tester('ntpath.expanduser("~test")', 'C:\\eric\\test')
tester('ntpath.expanduser("~")', 'C:\\eric\\idle')
@unittest.skipUnless(nt, "abspath requires 'nt' module")
def test_abspath(self):
tester('ntpath.abspath("C:\\")', "C:\\")
with support.temp_cwd(support.TESTFN) as cwd_dir: # bpo-31047
tester('ntpath.abspath("")', cwd_dir)
tester('ntpath.abspath(" ")', cwd_dir + "\\ ")
tester('ntpath.abspath("?")', cwd_dir + "\\?")
drive, _ = ntpath.splitdrive(cwd_dir)
tester('ntpath.abspath("/abc/")', drive + "\\abc")
def test_relpath(self):
tester('ntpath.relpath("a")', 'a')
tester('ntpath.relpath(ntpath.abspath("a"))', 'a')
tester('ntpath.relpath("a/b")', 'a\\b')
tester('ntpath.relpath("../a/b")', '..\\a\\b')
with support.temp_cwd(support.TESTFN) as cwd_dir:
currentdir = ntpath.basename(cwd_dir)
tester('ntpath.relpath("a", "../b")', '..\\'+currentdir+'\\a')
tester('ntpath.relpath("a/b", "../c")', '..\\'+currentdir+'\\a\\b')
tester('ntpath.relpath("a", "b/c")', '..\\..\\a')
tester('ntpath.relpath("c:/foo/bar/bat", "c:/x/y")', '..\\..\\foo\\bar\\bat')
tester('ntpath.relpath("//conky/mountpoint/a", "//conky/mountpoint/b/c")', '..\\..\\a')
tester('ntpath.relpath("a", "a")', '.')
tester('ntpath.relpath("/foo/bar/bat", "/x/y/z")', '..\\..\\..\\foo\\bar\\bat')
tester('ntpath.relpath("/foo/bar/bat", "/foo/bar")', 'bat')
tester('ntpath.relpath("/foo/bar/bat", "/")', 'foo\\bar\\bat')
tester('ntpath.relpath("/", "/foo/bar/bat")', '..\\..\\..')
tester('ntpath.relpath("/foo/bar/bat", "/x")', '..\\foo\\bar\\bat')
tester('ntpath.relpath("/x", "/foo/bar/bat")', '..\\..\\..\\x')
tester('ntpath.relpath("/", "/")', '.')
tester('ntpath.relpath("/a", "/a")', '.')
tester('ntpath.relpath("/a/b", "/a/b")', '.')
tester('ntpath.relpath("c:/foo", "C:/FOO")', '.')
def test_commonpath(self):
def check(paths, expected):
tester(('ntpath.commonpath(%r)' % paths).replace('\\\\', '\\'),
expected)
def check_error(exc, paths):
self.assertRaises(exc, ntpath.commonpath, paths)
self.assertRaises(exc, ntpath.commonpath,
[os.fsencode(p) for p in paths])
self.assertRaises(ValueError, ntpath.commonpath, [])
check_error(ValueError, ['C:\\Program Files', 'Program Files'])
check_error(ValueError, ['C:\\Program Files', 'C:Program Files'])
check_error(ValueError, ['\\Program Files', 'Program Files'])
check_error(ValueError, ['Program Files', 'C:\\Program Files'])
check(['C:\\Program Files'], 'C:\\Program Files')
check(['C:\\Program Files', 'C:\\Program Files'], 'C:\\Program Files')
check(['C:\\Program Files\\', 'C:\\Program Files'],
'C:\\Program Files')
check(['C:\\Program Files\\', 'C:\\Program Files\\'],
'C:\\Program Files')
check(['C:\\\\Program Files', 'C:\\Program Files\\\\'],
'C:\\Program Files')
check(['C:\\.\\Program Files', 'C:\\Program Files\\.'],
'C:\\Program Files')
check(['C:\\', 'C:\\bin'], 'C:\\')
check(['C:\\Program Files', 'C:\\bin'], 'C:\\')
check(['C:\\Program Files', 'C:\\Program Files\\Bar'],
'C:\\Program Files')
check(['C:\\Program Files\\Foo', 'C:\\Program Files\\Bar'],
'C:\\Program Files')
check(['C:\\Program Files', 'C:\\Projects'], 'C:\\')
check(['C:\\Program Files\\', 'C:\\Projects'], 'C:\\')
check(['C:\\Program Files\\Foo', 'C:/Program Files/Bar'],
'C:\\Program Files')
check(['C:\\Program Files\\Foo', 'c:/program files/bar'],
'C:\\Program Files')
check(['c:/program files/bar', 'C:\\Program Files\\Foo'],
'c:\\program files')
check_error(ValueError, ['C:\\Program Files', 'D:\\Program Files'])
check(['spam'], 'spam')
check(['spam', 'spam'], 'spam')
check(['spam', 'alot'], '')
check(['and\\jam', 'and\\spam'], 'and')
check(['and\\\\jam', 'and\\spam\\\\'], 'and')
check(['and\\.\\jam', '.\\and\\spam'], 'and')
check(['and\\jam', 'and\\spam', 'alot'], '')
check(['and\\jam', 'and\\spam', 'and'], 'and')
check(['C:and\\jam', 'C:and\\spam'], 'C:and')
check([''], '')
check(['', 'spam\\alot'], '')
check_error(ValueError, ['', '\\spam\\alot'])
self.assertRaises(TypeError, ntpath.commonpath,
[b'C:\\Program Files', 'C:\\Program Files\\Foo'])
self.assertRaises(TypeError, ntpath.commonpath,
[b'C:\\Program Files', 'Program Files\\Foo'])
self.assertRaises(TypeError, ntpath.commonpath,
[b'Program Files', 'C:\\Program Files\\Foo'])
self.assertRaises(TypeError, ntpath.commonpath,
['C:\\Program Files', b'C:\\Program Files\\Foo'])
self.assertRaises(TypeError, ntpath.commonpath,
['C:\\Program Files', b'Program Files\\Foo'])
self.assertRaises(TypeError, ntpath.commonpath,
['Program Files', b'C:\\Program Files\\Foo'])
def test_sameopenfile(self):
with TemporaryFile() as tf1, TemporaryFile() as tf2:
# Make sure the same file is really the same
self.assertTrue(ntpath.sameopenfile(tf1.fileno(), tf1.fileno()))
# Make sure different files are really different
self.assertFalse(ntpath.sameopenfile(tf1.fileno(), tf2.fileno()))
# Make sure invalid values don't cause issues on win32
if sys.platform == "win32":
with self.assertRaises(OSError):
# Invalid file descriptors shouldn't display assert
# dialogs (#4804)
ntpath.sameopenfile(-1, -1)
def test_ismount(self):
self.assertTrue(ntpath.ismount("c:\\"))
self.assertTrue(ntpath.ismount("C:\\"))
self.assertTrue(ntpath.ismount("c:/"))
self.assertTrue(ntpath.ismount("C:/"))
self.assertTrue(ntpath.ismount("\\\\.\\c:\\"))
self.assertTrue(ntpath.ismount("\\\\.\\C:\\"))
self.assertTrue(ntpath.ismount(b"c:\\"))
self.assertTrue(ntpath.ismount(b"C:\\"))
self.assertTrue(ntpath.ismount(b"c:/"))
self.assertTrue(ntpath.ismount(b"C:/"))
self.assertTrue(ntpath.ismount(b"\\\\.\\c:\\"))
self.assertTrue(ntpath.ismount(b"\\\\.\\C:\\"))
with support.temp_dir() as d:
self.assertFalse(ntpath.ismount(d))
if sys.platform == "win32":
#
# Make sure the current folder isn't the root folder
# (or any other volume root). The drive-relative
# locations below cannot then refer to mount points
#
drive, path = ntpath.splitdrive(sys.executable)
with support.change_cwd(ntpath.dirname(sys.executable)):
self.assertFalse(ntpath.ismount(drive.lower()))
self.assertFalse(ntpath.ismount(drive.upper()))
self.assertTrue(ntpath.ismount("\\\\localhost\\c$"))
self.assertTrue(ntpath.ismount("\\\\localhost\\c$\\"))
self.assertTrue(ntpath.ismount(b"\\\\localhost\\c$"))
self.assertTrue(ntpath.ismount(b"\\\\localhost\\c$\\"))
def assertEqualCI(self, s1, s2):
"""Assert that two strings are equal ignoring case differences."""
self.assertEqual(s1.lower(), s2.lower())
@unittest.skipUnless(nt, "OS helpers require 'nt' module")
def test_nt_helpers(self):
# Trivial validation that the helpers do not break, and support both
# unicode and bytes (UTF-8) paths
executable = nt._getfinalpathname(sys.executable)
for path in executable, os.fsencode(executable):
volume_path = nt._getvolumepathname(path)
path_drive = ntpath.splitdrive(path)[0]
volume_path_drive = ntpath.splitdrive(volume_path)[0]
self.assertEqualCI(path_drive, volume_path_drive)
cap, free = nt._getdiskusage(sys.exec_prefix)
self.assertGreater(cap, 0)
self.assertGreater(free, 0)
b_cap, b_free = nt._getdiskusage(sys.exec_prefix.encode())
# Free space may change, so only test the capacity is equal
self.assertEqual(b_cap, cap)
self.assertGreater(b_free, 0)
for path in [sys.prefix, sys.executable]:
final_path = nt._getfinalpathname(path)
self.assertIsInstance(final_path, str)
self.assertGreater(len(final_path), 0)
b_final_path = nt._getfinalpathname(path.encode())
self.assertIsInstance(b_final_path, bytes)
self.assertGreater(len(b_final_path), 0)
class NtCommonTest(test_genericpath.CommonTest, unittest.TestCase):
pathmodule = ntpath
attributes = ['relpath']
class PathLikeTests(NtpathTestCase):
path = ntpath
def setUp(self):
self.file_name = support.TESTFN.lower()
self.file_path = FakePath(support.TESTFN)
self.addCleanup(support.unlink, self.file_name)
with open(self.file_name, 'xb', 0) as file:
file.write(b"test_ntpath.PathLikeTests")
def _check_function(self, func):
self.assertPathEqual(func(self.file_path), func(self.file_name))
def test_path_normcase(self):
self._check_function(self.path.normcase)
def test_path_isabs(self):
self._check_function(self.path.isabs)
def test_path_join(self):
self.assertEqual(self.path.join('a', FakePath('b'), 'c'),
self.path.join('a', 'b', 'c'))
def test_path_split(self):
self._check_function(self.path.split)
def test_path_splitext(self):
self._check_function(self.path.splitext)
def test_path_splitdrive(self):
self._check_function(self.path.splitdrive)
def test_path_basename(self):
self._check_function(self.path.basename)
def test_path_dirname(self):
self._check_function(self.path.dirname)
def test_path_islink(self):
self._check_function(self.path.islink)
def test_path_lexists(self):
self._check_function(self.path.lexists)
def test_path_ismount(self):
self._check_function(self.path.ismount)
def test_path_expanduser(self):
self._check_function(self.path.expanduser)
def test_path_expandvars(self):
self._check_function(self.path.expandvars)
def test_path_normpath(self):
self._check_function(self.path.normpath)
def test_path_abspath(self):
self._check_function(self.path.abspath)
def test_path_realpath(self):
self._check_function(self.path.realpath)
def test_path_relpath(self):
self._check_function(self.path.relpath)
def test_path_commonpath(self):
common_path = self.path.commonpath([self.file_path, self.file_name])
self.assertPathEqual(common_path, self.file_name)
def test_path_isdir(self):
self._check_function(self.path.isdir)
if __name__ == "__main__":
unittest.main()
| 44.957447
| 95
| 0.549845
|
4a02dc20518bbc61340d97d696d69e3e4d948f33
| 3,878
|
py
|
Python
|
Learn/RawPacketSniffing.py
|
Skadiia/Scripts
|
9d6c04b2ef84144bc7015d317e4e8c75e9cebd22
|
[
"MIT"
] | null | null | null |
Learn/RawPacketSniffing.py
|
Skadiia/Scripts
|
9d6c04b2ef84144bc7015d317e4e8c75e9cebd22
|
[
"MIT"
] | null | null | null |
Learn/RawPacketSniffing.py
|
Skadiia/Scripts
|
9d6c04b2ef84144bc7015d317e4e8c75e9cebd22
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# coding: utf-8
"""
Simple raw packet sniffer for learning
Actually supprt TCP & ICMP unpacking
"""
import struct
import socket
import binascii
import signal
colors = {"red": "\033[31m", "green": "\033[32m", "yellow": "\033[33m", "blue": "\033[34m", "blank": "\033[0m"}
def ctrlc_handler(signum, frm):
logger("Shutting down program...", "err")
exit(0)
def formatmac(macAddr):
return ':'.join(str(macAddr)[i:i+2] for i in range(0, 12, 2))
def logger(msg, msgtype=''):
if msgtype == 'info':
print(colors["yellow"] + "[*] " + colors["blank"] + msg)
elif msgtype == 'err':
print(colors["red"] + "[X] " + colors["blank"] + msg)
elif msgtype == 'success':
print(colors["green"] + "[+] " + colors["blank"] + msg)
else:
print(msg)
def unpackICMP(pkt):
icmpHeader = pkt[0][34:38]
icmp_hdr = struct.unpack("!BBH", icmpHeader)
icmpType = icmp_hdr[0]
icmpCode = icmp_hdr[1]
icmpCRC = icmp_hdr[2]
Data = pkt[0][38:]
logger(''' ICMP Type : %d
ICMP Code : %d
CRC : %d
Data = %s
''' % (icmpType, icmpCode, icmpCRC, Data))
def unpackTCP(pkt):
tcpHeader = pkt[0][34:54]
tcp_hdr = struct.unpack("!HH16s", tcpHeader)
sourcePort = tcp_hdr[0]
destPort = tcp_hdr[1]
Data = pkt[0][54:]
logger(''' Source port : %d
Dest port : %d
Data = %s
''' % (sourcePort, destPort, Data))
def main():
signal.signal(signal.SIGINT, ctrlc_handler)
count = 0
# htons reference the type of packet we want to intercept here Internet Protocol Packet (IP) all are in
# /usr/include/linux/if_ether.h
logger("Creating RAW socket...", "info")
try:
rawsocket = socket.socket(socket.PF_PACKET, socket.SOCK_RAW, socket.htons(0x0800))
logger("RAW socket created successfully !", "success")
except:
logger("Failed to create RAW socket!", "err")
while True:
pkt = rawsocket.recvfrom(65535)
print(colors["yellow"] + "========== Packet n°%d ==========" % count + colors["blank"])
#####################################################
# Ethernet Info
#####################################################
ethHeader = pkt[0][:14]
eth_hdr = struct.unpack("!6s6s2s", ethHeader)
macSource = formatmac(binascii.hexlify(eth_hdr[0]))
macDest = formatmac(binascii.hexlify(eth_hdr[1]))
ethType = binascii.hexlify(eth_hdr[2])
logger("Source ETH : %s Dest ETH : %s Type ETH : %s" % (macSource, macDest, ethType))
#####################################################
# IP Info
#####################################################
ipHeader = pkt[0][14:34]
ip_hdr = struct.unpack("!BBH5sB2s4s4s", ipHeader)
ipVersion = ip_hdr[0] >> 4
ipHdrLen = ip_hdr[0] & 0x0f
ipLen = ip_hdr[2]
ipSource = socket.inet_ntoa(ip_hdr[6])
ipDest = socket.inet_ntoa(ip_hdr[7])
proto = ip_hdr[4]
print('IP LENGTH = %d HEADER LENGTH = %d' % (ipLen, ipHdrLen))
print("IP VERSION = %d" % ipVersion)
logger("Source IP : %s Dest IP : %s Proto type : %s" % (ipSource, ipDest, proto))
if proto == 1:
logger("ICMP packet found ! Unpacking...", "info")
unpackICMP(pkt)
logger("ICMP information successfully retrieved !", "success")
elif proto == 6:
logger("TCP packet found ! Unpacking...", "info")
unpackTCP(pkt)
logger("TCP information successfully retrieved !", "success")
else:
print("No specific protocol detected !")
#####################################################
# TCP Info
#####################################################
count += 1
if __name__ == '__main__':
main()
| 28.940299
| 111
| 0.523466
|
4a02dcb5eeb8cb0aa8ec660fe20b18600a431599
| 10,265
|
py
|
Python
|
pyzoo/zoo/orca/automl/xgboost/XGBoost.py
|
jiaxinying/analytics-zoo
|
c3669b1736088df911c84b38fde3e90a571f51b7
|
[
"Apache-2.0"
] | null | null | null |
pyzoo/zoo/orca/automl/xgboost/XGBoost.py
|
jiaxinying/analytics-zoo
|
c3669b1736088df911c84b38fde3e90a571f51b7
|
[
"Apache-2.0"
] | null | null | null |
pyzoo/zoo/orca/automl/xgboost/XGBoost.py
|
jiaxinying/analytics-zoo
|
c3669b1736088df911c84b38fde3e90a571f51b7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pickle
import pandas as pd
from xgboost.sklearn import XGBRegressor
from xgboost.sklearn import XGBClassifier
from zoo.automl.common.metrics import Evaluator
from zoo.automl.model.abstract import BaseModel, ModelBuilder
import logging
logger = logging.getLogger(__name__)
XGB_METRIC_NAME = {"rmse", "rmsle", "mae", "mape", "mphe", "logloss", "error", "error@t", "merror",
"mlogloss", "auc", "aucpr", "ndcg", "map", "ndcg@n", "map@n", "ndcg-", "map-",
"ndcg@n-", "map@n-", "poisson-nloglik", "gamma-nloglik", "cox-nloglik",
"gamma-deviance", "tweedie-nloglik", "aft-nloglik",
"interval-regression-accuracy"}
class XGBoost(BaseModel):
def __init__(self, model_type="regressor", config=None):
"""
Initialize hyper parameters
:param check_optional_config:
:param future_seq_len:
"""
# models
if not config:
config = {}
valid_model_type = ('regressor', 'classifier')
if model_type not in valid_model_type:
raise ValueError(f"model_type must be between {valid_model_type}. Got {model_type}")
self.model_type = model_type
self.n_estimators = config.get('n_estimators', 1000)
self.max_depth = config.get('max_depth', 5)
self.tree_method = config.get('tree_method', 'hist')
self.n_jobs = config.get('n_jobs', -1)
self.random_state = config.get('random_state', 2)
self.learning_rate = config.get('learning_rate', 0.1)
self.min_child_weight = config.get('min_child_weight', 1)
self.seed = config.get('seed', 0)
self.subsample = config.get('subsample', 0.8)
self.colsample_bytree = config.get('colsample_bytree', 0.8)
self.gamma = config.get('gamma', 0)
self.reg_alpha = config.get('reg_alpha', 0)
self.reg_lambda = config.get('reg_lambda', 1)
self.verbosity = config.get('verbosity', 0)
self.metric = config.get('metric')
self.model = None
self.model_init = False
def set_params(self, **config):
self.n_estimators = config.get('n_estimators', self.n_estimators)
self.max_depth = config.get('max_depth', self.max_depth)
self.tree_method = config.get('tree_method', self.tree_method)
self.n_jobs = config.get('n_jobs', self.n_jobs)
self.random_state = config.get('random_state', self.random_state)
self.learning_rate = config.get('learning_rate', self.learning_rate)
self.min_child_weight = config.get('min_child_weight', self.min_child_weight)
self.seed = config.get('seed', self.seed)
self.subsample = config.get('subsample', self.subsample)
self.colsample_bytree = config.get('colsample_bytree', self.colsample_bytree)
self.gamma = config.get('gamma', self.gamma)
self.reg_alpha = config.get('reg_alpha', self.reg_alpha)
self.reg_lambda = config.get('reg_lambda', self.reg_lambda)
self.verbosity = config.get('verbosity', self.verbosity)
def _build(self, **config):
"""
build the models and initialize.
:param config: hyper parameters for building the model
:return:
"""
self.set_params(**config)
if self.model_type == "regressor":
self.model = XGBRegressor(n_estimators=self.n_estimators, max_depth=self.max_depth,
n_jobs=self.n_jobs, tree_method=self.tree_method,
random_state=self.random_state,
learning_rate=self.learning_rate,
min_child_weight=self.min_child_weight, seed=self.seed,
subsample=self.subsample,
colsample_bytree=self.colsample_bytree,
gamma=self.gamma, reg_alpha=self.reg_alpha,
reg_lambda=self.reg_lambda, verbosity=self.verbosity)
elif self.model_type == "classifier":
self.model = XGBClassifier(n_estimators=self.n_estimators, max_depth=self.max_depth,
n_jobs=self.n_jobs, tree_method=self.tree_method,
random_state=self.random_state,
learning_rate=self.learning_rate,
min_child_weight=self.min_child_weight, seed=self.seed,
subsample=self.subsample,
colsample_bytree=self.colsample_bytree,
gamma=self.gamma, reg_alpha=self.reg_alpha,
objective='binary:logistic',
reg_lambda=self.reg_lambda, verbosity=self.verbosity)
else:
raise ValueError("model_type can only be \"regressor\" or \"classifier\"")
self.model_init = True
def fit_eval(self, data, validation_data=None, metric=None, **config):
"""
Fit on the training data from scratch.
Since the rolling process is very customized in this model,
we enclose the rolling process inside this method.
:param verbose:
:return: the evaluation metric value
"""
x, y = data[0], data[1]
if not self.model_init:
self._build(**config)
if validation_data is not None and type(validation_data) is not list:
eval_set = [validation_data]
else:
eval_set = validation_data
self.metric = metric or self.metric
valid_metric_names = XGB_METRIC_NAME | Evaluator.metrics_func.keys()
default_metric = 'rmse' if self.model_type == 'regressor' else 'logloss'
if not self.metric:
self.metric = default_metric
elif self.metric not in valid_metric_names:
raise ValueError(f"Got invalid metric name of {self.metric} for XGBoost. Valid metrics "
f"are {valid_metric_names}")
if self.metric in XGB_METRIC_NAME:
self.model.fit(x, y, eval_set=eval_set, eval_metric=self.metric)
vals = self.model.evals_result_.get("validation_0").get(self.metric)
return {self.metric: vals[-1]}
else:
if isinstance(validation_data, list):
validation_data = validation_data[0]
self.model.fit(x, y, eval_set=eval_set, eval_metric=default_metric)
eval_result = self.evaluate(
validation_data[0],
validation_data[1],
metrics=[self.metric])[0]
return {self.metric: eval_result}
def predict(self, x):
"""
Predict horizon time-points ahead the input x in fit_eval
:param x: We don't support input x currently.
:param horizon: horizon length to predict
:param mc:
:return:
"""
if x is None:
raise Exception("Input invalid x of None")
if self.model is None:
raise Exception("Needs to call fit_eval or restore first before calling predict")
self.model.n_jobs = self.n_jobs
out = self.model.predict(x)
return out
def evaluate(self, x, y, metrics=['mse']):
"""
Evaluate on the prediction results and y. We predict horizon time-points ahead the input x
in fit_eval before evaluation, where the horizon length equals the second dimension size of
y.
:param x: We don't support input x currently.
:param y: target. We interpret the second dimension of y as the horizon length for
evaluation.
:param metrics: a list of metrics in string format
:return: a list of metric evaluation results
"""
if x is None:
raise ValueError("Input invalid x of None")
if y is None:
raise ValueError("Input invalid y of None")
if self.model is None:
raise Exception("Needs to call fit_eval or restore first before calling predict")
if isinstance(y, pd.DataFrame):
y = y.values
self.model.n_jobs = self.n_jobs
y_pred = self.predict(x)
return [Evaluator.evaluate(m, y, y_pred) for m in metrics]
def save(self, checkpoint):
pickle.dump(self.model, open(checkpoint, "wb"))
def restore(self, checkpoint):
with open(checkpoint, 'rb') as f:
self.model = pickle.load(f)
self.model_init = True
def _get_required_parameters(self):
return {}
def _get_optional_parameters(self):
param = self.model.get_xgb_params
return param
class XGBoostModelBuilder(ModelBuilder):
def __init__(self, model_type="regressor", cpus_per_trial=1, **xgb_configs):
self.model_type = model_type
self.model_config = xgb_configs.copy()
if 'n_jobs' in xgb_configs and xgb_configs['n_jobs'] != cpus_per_trial:
logger.warning(f"Found n_jobs={xgb_configs['n_jobs']} in xgb_configs. It will not take "
f"effect since we assign cpus_per_trials(={cpus_per_trial}) to xgboost "
f"n_jobs. Please raise an issue if you do need different values for "
f"xgboost n_jobs and cpus_per_trials.")
self.model_config['n_jobs'] = cpus_per_trial
def build(self, config):
model = XGBoost(model_type=self.model_type, config=self.model_config)
model._build(**config)
return model
| 44.437229
| 100
| 0.610424
|
4a02de230a9b9c47aec307ecc3769a1fc04d382a
| 5,870
|
py
|
Python
|
taurex/opacity/ktables/nemesisktables.py
|
ucl-exoplanets/TauREx3_public
|
cf8da465448df44c3c4dcc2cd0002ef34edd3920
|
[
"BSD-3-Clause"
] | 10
|
2019-12-18T09:19:16.000Z
|
2021-06-21T11:02:06.000Z
|
taurex/opacity/ktables/nemesisktables.py
|
ucl-exoplanets/TauREx3_public
|
cf8da465448df44c3c4dcc2cd0002ef34edd3920
|
[
"BSD-3-Clause"
] | 10
|
2020-03-24T18:02:15.000Z
|
2021-08-23T20:32:09.000Z
|
taurex/opacity/ktables/nemesisktables.py
|
ucl-exoplanets/TauREx3_public
|
cf8da465448df44c3c4dcc2cd0002ef34edd3920
|
[
"BSD-3-Clause"
] | 8
|
2020-03-26T14:16:42.000Z
|
2021-12-18T22:11:25.000Z
|
from ..interpolateopacity import InterpolatingOpacity
import numpy as np
import pathlib
from .ktable import KTable
from taurex.util.util import sanitize_molecule_string
class NemesisKTables(KTable, InterpolatingOpacity):
"""
This is the base class for computing opactities
"""
@classmethod
def discover(cls):
import os
import glob
import pathlib
from taurex.cache import GlobalCache
path = GlobalCache()['ktable_path']
if path is None:
return []
path = os.path.join(path, '*.kta')
files = glob.glob(path)
discovery = []
interp = GlobalCache()['xsec_interpolation'] or 'linear'
for f in files:
splits = pathlib.Path(f).stem.split('_')
mol_name = sanitize_molecule_string(splits[0])
discovery.append((mol_name, [f, interp]))
return discovery
def __init__(self, filename, interpolation_mode='linear'):
super().__init__('NemesisKtable:{}'.format(pathlib.Path(filename).stem[0:10]),
interpolation_mode=interpolation_mode)
self._filename = filename
splits = pathlib.Path(filename).stem.split('_')
mol_name = sanitize_molecule_string(splits[0])
self._molecule_name = mol_name
self._spec_dict = None
self._resolution = None
self._decode_ktables(filename)
@property
def moleculeName(self):
return self._molecule_name
@property
def xsecGrid(self):
return self._xsec_grid
def _decode_ktables(self, filename):
self.debug('Reading NEMESIS FORMAT')
nem_file_float = np.fromfile(filename, dtype=np.float32)
nem_file_int = nem_file_float.view(np.int32)
array_counter = 0
self.debug('MAGIC NUMBER: %s', nem_file_int[0])
wncount = nem_file_int[1]
self.debug('WNCOUNT = %s', wncount)
wnstart = nem_file_float[2]
self.debug('WNSTART = %s um',wnstart)
float_num = nem_file_float[3]
self.debug('FLOAT: %s INT: %s',float_num, nem_file_int[4])
num_pressure = nem_file_int[5]
num_temperature = nem_file_int[6]
num_quads = nem_file_int[7]
self.debug('NP: %s NT: %s NQ: %s', num_pressure, num_temperature,
num_quads)
self.debug('UNKNOWN VALUES: %s %s', nem_file_int[8], nem_file_int[9])
array_counter += 10+num_quads*2
self._samples, self._weights = \
nem_file_float[10:array_counter].reshape(2, -1).astype(np.float64)
self.debug('Samples: %s, Weights: %s', self._samples, self._weights)
self.debug('%s', nem_file_int[array_counter])
array_counter += 1
self.debug('%s', nem_file_int[array_counter])
array_counter += 1
self._pressure_grid = nem_file_float[array_counter:array_counter+num_pressure].astype(np.float64)*1e5
self.debug('Pgrid: %s',self._pressure_grid)
array_counter+=num_pressure
self._temperature_grid = nem_file_float[array_counter:array_counter+num_temperature].astype(np.float64)
array_counter += num_temperature
self.debug('Tgrid: %s',self._temperature_grid)
self._wavenumber_grid = 10000/nem_file_float[array_counter:array_counter+wncount].astype(np.float64)
self._wavenumber_grid = self._wavenumber_grid[::-1]
array_counter += wncount
self.debug('Wngrid: %s',self._wavenumber_grid)
self._xsec_grid=(nem_file_float[array_counter:].reshape(wncount, num_pressure,num_temperature,num_quads) * 1e-20).astype(np.float64)
self._xsec_grid = self._xsec_grid.transpose((1,2,0,3))
self._xsec_grid = self._xsec_grid[::,::,::-1,:]
self._min_pressure = self._pressure_grid.min()
self._max_pressure = self._pressure_grid.max()
self._min_temperature = self._temperature_grid.min()
self._max_temperature = self._temperature_grid.max()
# 0
# 0
# PRESSURE POINTS
# TEMPERATUREPOINTS
# WNGRID
# KCOEFFS SCALED 1e-20 SHAPE(WNGRID,????)
# def _load_pickle_file(self, filename):
# #Load the pickle file
# self.info('Loading opacity from {}'.format(filename))
# try:
# with open(filename, 'rb') as f:
# self._spec_dict = pickle.load(f)
# except UnicodeDecodeError:
# with open(filename, 'rb') as f:
# self._spec_dict = pickle.load(f, encoding='latin1')
# self._wavenumber_grid = self._spec_dict['bin_centers']
# self._ngauss = self._spec_dict['ngauss']
# self._temperature_grid = self._spec_dict['t']
# self._pressure_grid = self._spec_dict['p']*1e5
# self._xsec_grid = self._spec_dict['kcoeff']
# self._weights = self._spec_dict['weights']
# self._molecule_name = self._spec_dict['name']
# self._min_pressure = self._pressure_grid.min()
# self._max_pressure = self._pressure_grid.max()
# self._min_temperature = self._temperature_grid.min()
# self._max_temperature = self._temperature_grid.max()
# self.clean_molecule_name()
def clean_molecule_name(self):
splits = self.moleculeName.split('_')
self._molecule_name = splits[0]
@property
def wavenumberGrid(self):
return self._wavenumber_grid
@property
def temperatureGrid(self):
return self._temperature_grid
@property
def pressureGrid(self):
return self._pressure_grid
@property
def resolution(self):
return self._resolution
@property
def weights(self):
return self._weights
#return factor*(q_11*(Pmax-P)*(Tmax-T) + q_21*(P-Pmin)*(Tmax-T) + q_12*(Pmax-P)*(T-Tmin) + q_22*(P-Pmin)*(T-Tmin))
| 35.575758
| 140
| 0.634242
|
4a02de40a803e354141e828179ae1bc4061915ac
| 361
|
py
|
Python
|
archive.py
|
cedadev/ingest_web_monitor
|
0ee30ae99ddbc988300331c02569870c925c4c25
|
[
"BSD-2-Clause"
] | null | null | null |
archive.py
|
cedadev/ingest_web_monitor
|
0ee30ae99ddbc988300331c02569870c925c4c25
|
[
"BSD-2-Clause"
] | 3
|
2020-07-20T15:33:58.000Z
|
2020-11-04T18:18:36.000Z
|
archive.py
|
cedadev/ingest_web_monitor
|
0ee30ae99ddbc988300331c02569870c925c4c25
|
[
"BSD-2-Clause"
] | null | null | null |
from jinja2 import Environment, FileSystemLoader
import os
os.system("dot -Tsvg archive.dot -o graph1.svg")
root = os.path.dirname(os.path.abspath(__file__))
env = Environment(loader=FileSystemLoader("."))
template = env.get_template('archive_temp.html')
svg = open("graph1.svg").read()
f = open("archive2.html", "w")
f.write(template.render(svg=svg))
| 19
| 49
| 0.728532
|
4a02df2e759aeb057b4b58b78fad30e2f5f8e693
| 2,488
|
py
|
Python
|
scripts/util/get_y_class.py
|
gwaygenomics/nf1_inactivation
|
09c6292448cb121b3077a3df1399fc6d4d56d5d8
|
[
"BSD-3-Clause"
] | 11
|
2016-07-25T21:53:23.000Z
|
2019-10-30T12:58:45.000Z
|
scripts/util/get_y_class.py
|
gwaygenomics/nf1_inactivation
|
09c6292448cb121b3077a3df1399fc6d4d56d5d8
|
[
"BSD-3-Clause"
] | 10
|
2016-07-26T18:02:05.000Z
|
2016-12-06T13:54:55.000Z
|
scripts/util/get_y_class.py
|
greenelab/nf1_inactivation
|
09c6292448cb121b3077a3df1399fc6d4d56d5d8
|
[
"BSD-3-Clause"
] | 6
|
2016-08-03T20:34:33.000Z
|
2021-03-03T23:50:37.000Z
|
"""
Gregory Way 2016
GBM NF1 Classifier
scripts/util/get_y_class.py
Description:
Function to return Y matrix as well as different classes of samples that
indicate if the sample is mutated, not mutated, or not assayed.
Usage:
Import only by build_y.py
"""
import os
import pandas as pd
def get_y_class(mut, tis, gene, mut_filt, x_base='data/X/normalized/'):
"""
Output classification for samples in X
Arguments:
:param mut: pandas dataframe with mutation info
:param tis: the tissue to load X
:param gene: list of genes in mutation dataframe
:param mut_filt: list of classes of mutations to filter
:param x_base: string of where the X matrices are stored
Output:
list of classifications for each sample
0 - Not mutated
1 - Mutated
2 - Not assessed
list of samples belonging to each assignment
"""
# Read in X
x_files = os.listdir(x_base)
x_file = [x for x in x_files if x.split('_')[0] == tis]
X = pd.read_csv(x_base + x_file[0], delimiter='\t', index_col=0)
y_class = []
zero_samp = []
one_samp = []
two_samp = []
for samp in X.columns:
# Subset mutation file to only consider that sample
m = mut[mut['#sample'] == samp]
# If the sample is not in the mutation file
if m.shape[0] == 0:
y_class.append(0)
two_samp.append(samp)
# If the sample is in the mutation file
else:
# Subset the subsetted mutation file to only consider the gene
m = m.loc[m['gene'].isin(gene), :]
# If the gene is not in the file, append 0
if m.shape[0] == 0:
y_class.append(0)
zero_samp.append(samp)
# Gene is in the file, but test if not filtered and append status
else:
if mut_filt is None:
filt = m
else:
# Subset the mutation file again to silent mutations
filt = m.loc[m['effect'].isin(mut_filt), :]
# If the shape is the same as the filtered, then append 0
if m.shape[0] == filt.shape[0]:
y_class.append(0)
zero_samp.append(samp)
# If not, then there is least one non-filtered mutation
else:
y_class.append(1)
one_samp.append(samp)
return y_class, zero_samp, one_samp, two_samp, X
| 29.619048
| 77
| 0.581994
|
4a02e0c70e7e3be14a3aab64ac1191e5989cd57e
| 6,064
|
py
|
Python
|
train_test01.py
|
chulminkw/keras-yolo3
|
bae367fa1ddf26c0ff8ba20ddc0b141f852e6e82
|
[
"MIT"
] | null | null | null |
train_test01.py
|
chulminkw/keras-yolo3
|
bae367fa1ddf26c0ff8ba20ddc0b141f852e6e82
|
[
"MIT"
] | null | null | null |
train_test01.py
|
chulminkw/keras-yolo3
|
bae367fa1ddf26c0ff8ba20ddc0b141f852e6e82
|
[
"MIT"
] | null | null | null |
"""
Retrain the YOLO model for your own dataset.
"""
import numpy as np
import tensorflow.keras.backend as K
from tensorflow.keras.layers import Input, Lambda
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss
from yolo3.utils import get_random_data
from generator2 import YoloSequence
import albumentations as A
def _main():
annotation_path = r'C:\Users\q\raccoon\annotations\raccoon_anno.csv'
log_dir = 'logs/000/'
classes_path = 'model_data/voc_classes.txt'
anchors_path = 'model_data/yolo_anchors.txt'
class_names = get_classes(classes_path)
num_classes = len(class_names)
anchors = get_anchors(anchors_path)
input_shape = (416,416) # multiple of 32, hw
model = create_model(input_shape, anchors, num_classes, load_pretrained=False,
freeze_body=2, weights_path=None) # make sure you know what you freeze
print('after create_model')
logging = TensorBoard(log_dir=log_dir)
checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
monitor='val_loss', save_weights_only=True, save_best_only=True, period=3)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)
val_split = 0.1
with open(annotation_path) as f:
lines = f.readlines()
np.random.seed(10101)
np.random.shuffle(lines)
np.random.seed(None)
num_val = int(len(lines)*val_split)
num_train = len(lines) - num_val
batch_size = 32
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
augmentor = A.Compose([
A.HorizontalFlip(p=1.0)
], bbox_params=A.BboxParams(format='pascal_voc'))
#gen_result = data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes)
# result1, result2 = next(gen_result)
train_set = YoloSequence(lines[:num_train], input_shape, batch_size, num_classes, anchors, max_boxes=20, augmentor=augmentor)
valid_set = YoloSequence(lines[num_train:], input_shape, batch_size, num_classes, anchors, max_boxes=20, augmentor=None)
model.compile(optimizer=Adam(lr=1e-3), loss={
# use custom yolo_loss Lambda layer.
'yolo_loss': lambda y_true, y_pred: y_pred})
model.fit_generator(train_set,
steps_per_epoch=max(1, num_train // batch_size),
validation_data=valid_set,
validation_steps=max(1, num_val // batch_size),
epochs=50,
initial_epoch=0,
callbacks=[checkpoint, reduce_lr, early_stopping])
def get_classes(classes_path):
'''loads the classes'''
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
'''loads the anchors from a file'''
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,
weights_path='model_data/yolo_weights.h5'):
'''create the training model'''
K.clear_session() # get a new session
image_input = Input(shape=(None, None, 3))
h, w = input_shape
num_anchors = len(anchors)
y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \
num_anchors//3, num_classes+5)) for l in range(3)]
model_body = yolo_body(image_input, num_anchors//3, num_classes)
print('Create YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
if load_pretrained:
model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
print('Load weights {}.'.format(weights_path))
if freeze_body in [1, 2]:
# Freeze darknet53 body or freeze all but 3 output layers.
num = (185, len(model_body.layers)-3)[freeze_body-1]
for i in range(num): model_body.layers[i].trainable = False
print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
print('model_body.output type:', type(model_body.output))
model = Model([model_body.input, *y_true], model_loss)
return model
def data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes):
'''data generator for fit_generator'''
n = len(annotation_lines)
i = 0
while True:
image_data = []
box_data = []
for b in range(batch_size):
if i==0:
np.random.shuffle(annotation_lines)
image, box = get_random_data(annotation_lines[i], input_shape, random=True)
image_data.append(image)
box_data.append(box)
i = (i+1) % n
image_data = np.array(image_data)
box_data = np.array(box_data)
y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes)
yield [image_data, *y_true], np.zeros(batch_size)
def data_generator_wrapper(annotation_lines, batch_size, input_shape, anchors, num_classes):
n = len(annotation_lines)
if n==0 or batch_size<=0: return None
return data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes)
if __name__ == '__main__':
_main()
| 40.972973
| 130
| 0.664908
|
4a02e1a6fc7d7609d924767b51d8cc9cd018a468
| 5,326
|
py
|
Python
|
var/spack/repos/builtin/packages/py-pytest/package.py
|
LiamBindle/spack
|
e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2,360
|
2017-11-06T08:47:01.000Z
|
2022-03-31T14:45:33.000Z
|
var/spack/repos/builtin/packages/py-pytest/package.py
|
LiamBindle/spack
|
e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13,838
|
2017-11-04T07:49:45.000Z
|
2022-03-31T23:38:39.000Z
|
var/spack/repos/builtin/packages/py-pytest/package.py
|
LiamBindle/spack
|
e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1,793
|
2017-11-04T07:45:50.000Z
|
2022-03-30T14:31:53.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPytest(PythonPackage):
"""pytest: simple powerful testing with Python."""
homepage = "https://pytest.org/"
pypi = "pytest/pytest-5.2.1.tar.gz"
version('6.2.5', sha256='131b36680866a76e6781d13f101efb86cf674ebb9762eb70d3082b6f29889e89')
version('6.2.4', sha256='50bcad0a0b9c5a72c8e4e7c9855a3ad496ca6a881a3641b4260605450772c54b')
version('6.2.1', sha256='66e419b1899bc27346cb2c993e12c5e5e8daba9073c1fbce33b9807abc95c306')
version('6.1.1', sha256='8f593023c1a0f916110285b6efd7f99db07d59546e3d8c36fc60e2ab05d3be92')
version('5.3.4', sha256='1d122e8be54d1a709e56f82e2d85dcba3018313d64647f38a91aec88c239b600')
version('5.2.1', sha256='ca563435f4941d0cb34767301c27bc65c510cb82e90b9ecf9cb52dc2c63caaa0')
version('5.1.1', sha256='c3d5020755f70c82eceda3feaf556af9a341334414a8eca521a18f463bcead88')
version('4.6.9', sha256='19e8f75eac01dd3f211edd465b39efbcbdc8fc5f7866d7dd49fedb30d8adf339')
version('4.6.5', sha256='8fc39199bdda3d9d025d3b1f4eb99a192c20828030ea7c9a0d2840721de7d347')
version('4.6.2', sha256='bea27a646a3d74cbbcf8d3d4a06b2dfc336baf3dc2cc85cf70ad0157e73e8322')
version('4.4.0', sha256='f21d2f1fb8200830dcbb5d8ec466a9c9120e20d8b53c7585d180125cce1d297a')
version('4.3.0', sha256='067a1d4bf827ffdd56ad21bd46674703fce77c5957f6c1eef731f6146bfcef1c')
version('3.7.2', sha256='3459a123ad5532852d36f6f4501dfe1acf4af1dd9541834a164666aa40395b02')
version('3.7.1', sha256='86a8dbf407e437351cef4dba46736e9c5a6e3c3ac71b2e942209748e76ff2086')
version('3.5.1', sha256='54713b26c97538db6ff0703a12b19aeaeb60b5e599de542e7fca0ec83b9038e8')
version('3.0.7', sha256='b70696ebd1a5e6b627e7e3ac1365a4bc60aaf3495e843c1e70448966c5224cab')
version('3.0.2', sha256='64d8937626dd2a4bc15ef0edd307d26636a72a3f3f9664c424d78e40efb1e339')
# python_requires
depends_on('python@3.6:', when='@6.2:', type=('build', 'run'))
depends_on('python@3.5:', when='@5:6.1', type=('build', 'run'))
depends_on('python@2.7:2.8,3.4:', when='@3.3:4', type=('build', 'run'))
depends_on('python@2.6:2.8,3.3:', when='@:3.2', type=('build', 'run'))
# setup_requires
depends_on('py-setuptools@42.0:', when='@6.2:', type=('build', 'run'))
depends_on('py-setuptools@40.0:', when='@3.9.2:6.1', type=('build', 'run'))
depends_on('py-setuptools@30.3:', when='@3.9.0:3.9.1', type=('build', 'run'))
depends_on('py-setuptools', type=('build', 'run'))
depends_on('py-setuptools-scm@3.4: +toml', when='@6.2:', type='build')
depends_on('py-setuptools-scm', when='@3.1:', type='build')
# install_requires
depends_on('py-attrs@19.2.0:', when='@6.2:', type=('build', 'run'))
depends_on('py-attrs@17.4.0:', when='@3.5:6.1', type=('build', 'run'))
depends_on('py-attrs@17.2.0:', when='@3.3:3.4', type=('build', 'run'))
depends_on('py-iniconfig', when='@6.0:', type=('build', 'run'))
depends_on('py-packaging', when='@4.6:', type=('build', 'run'))
depends_on('py-pluggy@0.12:1', when='@6.2:', type=('build', 'run'))
depends_on('py-pluggy@0.12:0', when='@4.6:6.1', type=('build', 'run'))
depends_on('py-pluggy@0.9.0:0.9,0.11:0', when='@4.5.0:4.5', type=('build', 'run'))
depends_on('py-pluggy@0.11:', when='@4.4.2:4.4', type=('build', 'run'))
depends_on('py-pluggy@0.9:', when='@4.4.0:4.4.1', type=('build', 'run'))
depends_on('py-pluggy@0.7:', when='@3.7:4.3', type=('build', 'run'))
depends_on('py-pluggy@0.5:0.7', when='@3.6.4:3.6', type=('build', 'run'))
depends_on('py-pluggy@0.5:0.6', when='@:3.6.3', type=('build', 'run'))
depends_on('py-py@1.8.2:', when='@6:', type=('build', 'run'))
depends_on('py-py@1.5.0:', when='@3.3:5', type=('build', 'run'))
depends_on('py-py@1.4.33:', when='@3.1.2:3.2.3,3.2.5:3.2', type=('build', 'run'))
depends_on('py-py@1.4.33:1.4', when='@3.2.4', type=('build', 'run'))
depends_on('py-py@1.4.29:', when='@:3.1.1', type=('build', 'run'))
depends_on('py-toml', when='@6.0:', type=('build', 'run'))
depends_on('py-atomicwrites@1.0:', when='@5.3: platform=windows', type=('build', 'run'))
depends_on('py-atomicwrites@1.0:', when='@3.6:5.2', type=('build', 'run'))
depends_on('py-colorama', when='platform=windows', type=('build', 'run'))
depends_on('py-importlib-metadata@0.12:', when='@4.6:5.0', type=('build', 'run'))
depends_on('py-importlib-metadata@0.12:', when='@5.1: ^python@:3.7', type=('build', 'run'))
# Historic dependencies
depends_on('py-six@1.10.0:', when='@3.3:4', type=('build', 'run'))
depends_on('py-more-itertools@4.0.0:', when='@3.5.1:5', type=('build', 'run'))
depends_on('py-more-itertools@4.0.0:6.0.0', when='@4.2.1:4.6.9 ^python@:2', type=('build', 'run'))
depends_on('py-funcsigs@1.0:', when='@4.4: ^python@:2', type=('build', 'run'))
depends_on('py-funcsigs', when='@3.3:4.3 ^python@:2', type=('build', 'run'))
depends_on('py-pathlib2@2.2.0:', when='@3.7.1: ^python@:3.5', type=('build', 'run'))
depends_on('py-pathlib2', when='@3.7.0 ^python@:3.5', type=('build', 'run'))
depends_on('py-wcwidth', when='@4.5:5', type=('build', 'run'))
| 64.95122
| 102
| 0.660158
|
4a02e245c18c0c065a12ca4cc004e99eee348d31
| 2,290
|
py
|
Python
|
aliyun-python-sdk-iot/aliyunsdkiot/request/v20180120/QueryDeviceFileListRequest.py
|
LittleJober/aliyun-openapi-python-sdk
|
f45cfa2248a5c8c47b2cebc1d4d1c2516b94df76
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-iot/aliyunsdkiot/request/v20180120/QueryDeviceFileListRequest.py
|
LittleJober/aliyun-openapi-python-sdk
|
f45cfa2248a5c8c47b2cebc1d4d1c2516b94df76
|
[
"Apache-2.0"
] | 1
|
2020-05-31T14:51:47.000Z
|
2020-05-31T14:51:47.000Z
|
aliyun-python-sdk-iot/aliyunsdkiot/request/v20180120/QueryDeviceFileListRequest.py
|
LittleJober/aliyun-openapi-python-sdk
|
f45cfa2248a5c8c47b2cebc1d4d1c2516b94df76
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkiot.endpoint import endpoint_data
class QueryDeviceFileListRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Iot', '2018-01-20', 'QueryDeviceFileList','Iot')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_IotId(self):
return self.get_query_params().get('IotId')
def set_IotId(self,IotId):
self.add_query_param('IotId',IotId)
def get_IotInstanceId(self):
return self.get_query_params().get('IotInstanceId')
def set_IotInstanceId(self,IotInstanceId):
self.add_query_param('IotInstanceId',IotInstanceId)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_CurrentPage(self):
return self.get_query_params().get('CurrentPage')
def set_CurrentPage(self,CurrentPage):
self.add_query_param('CurrentPage',CurrentPage)
def get_ProductKey(self):
return self.get_query_params().get('ProductKey')
def set_ProductKey(self,ProductKey):
self.add_query_param('ProductKey',ProductKey)
def get_DeviceName(self):
return self.get_query_params().get('DeviceName')
def set_DeviceName(self,DeviceName):
self.add_query_param('DeviceName',DeviceName)
| 33.676471
| 78
| 0.762445
|
4a02e261ba20aea20683793146688d7fc1ae2f50
| 1,368
|
py
|
Python
|
domains/ava.py
|
sutekina/osu-gulag
|
c5eed521dfae282cd0cf95d02017d0f9654ceb11
|
[
"MIT"
] | null | null | null |
domains/ava.py
|
sutekina/osu-gulag
|
c5eed521dfae282cd0cf95d02017d0f9654ceb11
|
[
"MIT"
] | 3
|
2021-02-01T14:46:05.000Z
|
2021-08-30T16:09:34.000Z
|
domains/ava.py
|
sutekina/osu-gulag
|
c5eed521dfae282cd0cf95d02017d0f9654ceb11
|
[
"MIT"
] | null | null | null |
import re
from pathlib import Path
from typing import Optional
from typing import Union
from cmyui.web import Connection
from cmyui.web import Domain
from objects import glob
HTTPResponse = Optional[Union[bytes, tuple[int, bytes]]]
""" ava: avatar server (for both ingame & external) """
BASE_DOMAIN = glob.config.domain
domain = Domain({f"a.{BASE_DOMAIN}", "a.ppy.sh"})
AVATARS_PATH = Path.cwd() / ".data/avatars"
DEFAULT_AVATAR = AVATARS_PATH / "default.png"
@domain.route(re.compile(r"^/(?:\d{1,10}(?:\.(?:jpg|jpeg|png))?|favicon\.ico)?$"))
async def get_avatar(conn: Connection) -> HTTPResponse:
filename = conn.path[1:]
if "." in filename:
# user id & file extension provided
path = AVATARS_PATH / filename
if not path.exists():
path = DEFAULT_AVATAR
elif filename not in ("", "favicon.ico"):
# user id provided - determine file extension
for ext in ("jpg", "jpeg", "png"):
path = AVATARS_PATH / f"{filename}.{ext}"
if path.exists():
break
else:
# no file exists
path = DEFAULT_AVATAR
else:
# empty path or favicon, serve default avatar
path = DEFAULT_AVATAR
ext = "png" if path.suffix == ".png" else "jpeg"
conn.resp_headers["Content-Type"] = f"image/{ext}"
return path.read_bytes()
| 28.5
| 82
| 0.625
|
4a02e27327c716ac58c7a3e1dd7f4dac14fc3ace
| 1,654
|
py
|
Python
|
plugins/start.py
|
ali-mmagneto/VideoSesDonustur
|
2afa54cde27a27c47f558ebbeb41fedd885febe4
|
[
"MIT"
] | null | null | null |
plugins/start.py
|
ali-mmagneto/VideoSesDonustur
|
2afa54cde27a27c47f558ebbeb41fedd885febe4
|
[
"MIT"
] | null | null | null |
plugins/start.py
|
ali-mmagneto/VideoSesDonustur
|
2afa54cde27a27c47f558ebbeb41fedd885febe4
|
[
"MIT"
] | null | null | null |
from pyrogram import Client, filters
from config import quee, SUDO_USERS
from functions.utils import add_task
from pyrogram.types.bots_and_keyboards import InlineKeyboardButton, InlineKeyboardMarkup
from translation import Translation
video_mimetype = [
"video/x-flv",
"video/mp4",
"video/avi",
"video/mkv",
"application/x-mpegURL",
"video/mp2t",
"video/3gpp",
"video/quicktime",
"video/x-msvideo",
"video/x-ms-wmv",
"video/x-matroska",
"video/webm",
"video/x-m4v",
"video/quicktime",
"video/mpeg"
"video/TS"
]
@Client.on_message(filters.user(SUDO_USERS) & filters.incoming & filters.command(['start', 'help']))
async def help_message(app, message):
await message.reply_text(
text=Translation.START_TEXT.format(message.from_user.mention()),
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
"Destek", url="https://t.me/botsohbet"
)
]
]
),
reply_to_message_id=message.id
)
@Client.on_message(filters.user(SUDO_USERS) & filters.incoming & (filters.video | filters.document))
async def encode_video(app, message):
if message.document:
if not message.document.mime_type in video_mimetype:
message.reply_text("```Geçersiz Video !\nBu video dosyasına benzemiyor.```", quote=True)
return
await message.reply_text(f"`✔️ Sıraya Eklendi...\nSıra: {len(quee)}\n\nSabırlı olun...\n\n#kuyruk`", quote=True)
quee.append(message)
if len(quee) == 1:
await add_task(message)
| 30.62963
| 116
| 0.631802
|
4a02e2cb6051b96c77902c14666310ad5fe3e4b4
| 1,825
|
py
|
Python
|
drfs/filesystems/memory.py
|
datarevenue-berlin/drfs
|
d44274b0ae6e1b802b7763b5088825a83cc12fa6
|
[
"MIT"
] | 2
|
2021-07-29T10:38:30.000Z
|
2021-09-08T11:48:39.000Z
|
drfs/filesystems/memory.py
|
datarevenue-berlin/drfs
|
d44274b0ae6e1b802b7763b5088825a83cc12fa6
|
[
"MIT"
] | 2
|
2020-10-07T07:47:31.000Z
|
2021-11-15T17:52:33.000Z
|
drfs/filesystems/memory.py
|
datarevenue-berlin/drfs
|
d44274b0ae6e1b802b7763b5088825a83cc12fa6
|
[
"MIT"
] | null | null | null |
from fsspec.implementations import memory as memfs
from drfs.filesystems.base import FILESYSTEMS, FileSystemBase
from drfs.filesystems.util import allow_pathlib, maybe_remove_scheme
class MemoryFileSystem(FileSystemBase):
fs_cls = memfs.MemoryFileSystem
scheme = "memory"
is_remote = True
supports_scheme = False
@allow_pathlib
def touch(self, *args, **kwargs):
return self.fs.touch(*args, **kwargs)
@allow_pathlib
def makedirs(self, *args, **kwargs):
self.fs.makedirs(*args, **kwargs)
@allow_pathlib
def rmdir(self, path, **kwargs):
self.fs.rmdir(path, **kwargs)
@allow_pathlib
@maybe_remove_scheme
def exists(self, path):
if self.fs.exists(path):
return True
elif path in self.fs.pseudo_dirs:
return True
else:
for k in self.fs.store.keys():
if k.startswith(path):
return True
return False
@allow_pathlib
@maybe_remove_scheme
def rm(self, path, recursive=False):
if recursive:
self._recursive_rm(path)
else:
self.fs.rm(path)
def _recursive_rm(self, path):
for res in self.fs.ls(path, detail=True):
if res["type"] == "directory":
self._recursive_rm(res["name"])
else:
self.fs.rm(res["name"])
def put(self, filename, path, **kwargs):
from drfs.path import asstr
filename, path = asstr(filename), asstr(path)
return self.fs.put(filename, path, **kwargs)
def get(self, path, filename, **kwargs):
from drfs.path import asstr
path, filename = asstr(path), asstr(filename)
return self.fs.get(path, filename, **kwargs)
FILESYSTEMS["memory"] = MemoryFileSystem
| 27.238806
| 68
| 0.610411
|
4a02e570bc79eed86dc32844d3958bdfda26ce9b
| 2,845
|
py
|
Python
|
Menulib.py
|
spacesanjeet/Library-Management
|
a604e639ac2da9702341690ddd5772f2fdbd98e9
|
[
"MIT"
] | 1
|
2021-11-17T05:56:05.000Z
|
2021-11-17T05:56:05.000Z
|
Menulib.py
|
spacesanjeet/Library-Management
|
a604e639ac2da9702341690ddd5772f2fdbd98e9
|
[
"MIT"
] | null | null | null |
Menulib.py
|
spacesanjeet/Library-Management
|
a604e639ac2da9702341690ddd5772f2fdbd98e9
|
[
"MIT"
] | null | null | null |
#PYTHN MODULE: MENULIB
import Book
import Member
import Issue
def Menubook():
while True:
Book.clrscreen()
print("\t\t\t Book Record Management\n")
print("==========================================================")
print("1. Add Book Record")
print("2. Search Book Record")
print("3. Delete Book Record")
print("4. Update Book Record")
print("5. Return to Main Menu")
print("==========================================================")
choice = int(input("Enter Choice between 1 to 5 -------> : "))
if choice == 1:
Book.insertData()
elif choice == 2:
Book.SearchBookRec()
elif choice == 3:
Book.deleteBook()
elif choice == 4:
Book.UpdateBook()
elif choice == 5:
return
else:
print("Wrong Choice.....Enter Your Choice again")
x = input("Enter any key to continue")
def MenuMember():
while True:
Book.clrscreen()
print("\t\t\t Member Record Management\n")
print("==========================================================")
print("1. Add Member Record")
print("2. Search Member Record")
print("3. Delete Member Record")
print("4. Update Member Record")
print("5. Return to Main Menu")
print("==========================================================")
choice = int(input("Enter Choice between 1 to 5 ------> : "))
if choice == 1:
Member.insertMember()
elif choice == 2:
Member.SearchMember()
elif choice == 3:
Member.deleteMember()
elif choice == 4:
Member.UpdateMember()
elif choice == 5:
return
else:
print("Wrong Choice.....Enter Your Choice again")
x = input("Enter any key to continue")
def MenuIssueReturn():
while True:
Book.clrscreen()
print("\t\t\t Member Record Management\n")
print("==========================================================")
print("1. Issue Book")
print("2. Search Issue Book Record")
print("3. Return Issued Book")
print("4. Return to Main Menu")
print("==========================================================")
choice = int(input("Enter Choice between 1 to 4 ------> : "))
if choice == 1:
Issue.issueBook()
elif choice == 2:
Issue.SearchIssuedBooks()
elif choice == 3:
Issue.returnBook()
elif choice == 4:
return
else:
print("Wrong Choice.....Enter Your Choice again")
x = input("Enter any key to continue")
| 35.5625
| 76
| 0.43761
|
4a02e57acf3742e49ab673c906857f8df7a24988
| 2,432
|
py
|
Python
|
programs/decisiontree/checker/semanticschecker.py
|
lsrcz/SyGuS
|
5aab1b2c324d8a3c20e51f8acb2866190a1431d3
|
[
"MIT"
] | 1
|
2021-07-11T08:32:32.000Z
|
2021-07-11T08:32:32.000Z
|
programs/decisiontree/checker/semanticschecker.py
|
lsrcz/SyGuS
|
5aab1b2c324d8a3c20e51f8acb2866190a1431d3
|
[
"MIT"
] | null | null | null |
programs/decisiontree/checker/semanticschecker.py
|
lsrcz/SyGuS
|
5aab1b2c324d8a3c20e51f8acb2866190a1431d3
|
[
"MIT"
] | 1
|
2020-12-20T16:08:10.000Z
|
2020-12-20T16:08:10.000Z
|
from z3 import Int, Bool
from semantics.semantics import Func
class SemChecker:
def __init__(self, funcproto, constraint, inputlist, inputtylist):
self.funcproto = funcproto
self.inputlist = inputlist
self.inputtylist = inputtylist
self.constraint = constraint
self.usage = []
for u in self.searchconstraint(self.constraint):
distinct = True
for oldu in self.usage:
eq = True
for k in u:
if oldu[k] != u[k]:
eq = False
break
if eq:
distinct = False
if len(self.usage) == 0:
distinct = True
if distinct:
self.usage.append(u)
def searchconstraint(self, constraintlist):
ret = []
op = constraintlist.op
if isinstance(op, Func):
ret1 = []
for a in constraintlist.arg1:
ret1 += self.searchconstraint(a)
if len(ret1) == 0:
nxtret = {}
for a, r in zip(self.funcproto.arglist, constraintlist.arg1):
nxtret[a] = r
ret.append(nxtret)
else:
ret.extend(ret1)
return ret
elif isinstance(op, int) or isinstance(op, bool):
return []
if constraintlist.arg1 is not None:
ret.extend(self.searchconstraint(constraintlist.arg1))
if constraintlist.arg2 is not None:
ret.extend(self.searchconstraint(constraintlist.arg2))
if constraintlist.arg3 is not None:
ret.extend(self.searchconstraint(constraintlist.arg3))
return ret
def getSymtab(self, outersymtab):
ret = []
for t in self.usage:
nsym = {}
for k in t:
nsym[k] = t[k].eval(outersymtab)
ret.append(nsym)
return ret
def check(self, expr, symtab):
self.funcproto.expr = expr
return self.constraint.eval(symtab)
def getz3vartab(self):
def DeclareVar(sort, name):
if sort == "Int":
return Int(name)
if sort == 'Bool':
return Bool(name)
vartab = {}
for i, t in zip(self.inputlist, self.inputtylist):
vartab[i] = DeclareVar(t, i)
return vartab
| 30.024691
| 77
| 0.514803
|
4a02e64920dfeaa37728932f6ab8efb7766ead27
| 376
|
py
|
Python
|
day06/python/day6.py
|
mattvperry/AoC_2016
|
045a687788a8d689b7fd258dc1cfebb8adcbfc41
|
[
"MIT"
] | null | null | null |
day06/python/day6.py
|
mattvperry/AoC_2016
|
045a687788a8d689b7fd258dc1cfebb8adcbfc41
|
[
"MIT"
] | null | null | null |
day06/python/day6.py
|
mattvperry/AoC_2016
|
045a687788a8d689b7fd258dc1cfebb8adcbfc41
|
[
"MIT"
] | null | null | null |
from collections import Counter
def frequencies(words):
return (Counter(x).most_common() for x in zip(*words))
def frequent(input, index):
return "".join(x[index][0] for x in frequencies(input))
def day6(input):
return frequent(input, 0), frequent(input, -1)
input = open("../input.txt").read()
input = [x.strip() for x in input.split("\n")]
print(day6(input))
| 26.857143
| 59
| 0.678191
|
4a02e75399d2182cca868f69fe1e400df8556a03
| 1,919
|
py
|
Python
|
userbot/modules/salam.py
|
nimascnay/Ice-Userbot
|
726ba2480231189fb50bde54cb79a1c314b4ab77
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/salam.py
|
nimascnay/Ice-Userbot
|
726ba2480231189fb50bde54cb79a1c314b4ab77
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/salam.py
|
nimascnay/Ice-Userbot
|
726ba2480231189fb50bde54cb79a1c314b4ab77
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1
|
2021-10-18T02:05:38.000Z
|
2021-10-18T02:05:38.000Z
|
from platform import uname
from userbot import ALIVE_NAME, CMD_HELP
from userbot.events import register
# ================= CONSTANT =================
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else uname().node
# ============================================
@register(outgoing=True, pattern="^.p(?: |$)(.*)")
async def typewriter(typew):
typew.pattern_match.group(1)
await typew.edit("**Assalamualaikum**")
@register(outgoing=True, pattern=r"^\.pe(?: |$)(.*)")
async def typewriter(typew):
typew.pattern_match.group(1)
await typew.edit("**Assalamualaikum Warahmatullahi Wabarakatuh**")
@register(outgoing=True, pattern="^.P(?: |$)(.*)")
async def typewriter(typew):
typew.pattern_match.group(1)
sleep(1)
await typew.edit(f"**Haii Salken Saya {DEFAULTUSER}**")
sleep(2)
await typew.edit("**Assalamualaikum...**")
@register(outgoing=True, pattern=r"^\.l(?: |$)(.*)")
async def typewriter(typew):
typew.pattern_match.group(1)
await typew.edit("**Wa'alaikumsalam**")
CMD_HELP.update(
{
"salam": "**Plugin : **`salam`\
\n\n • **Syntax :** `.p`\
\n • **Function : **Assalamualaikum Dulu Biar Sopan..\
\n\n • **Syntax :** `.pe`\
\n • **Function : **salam Kenal dan salam\
\n\n • **Syntax :** `.l`\
\n • **Function : **Untuk Menjawab salam\
\n\n • **Syntax :** `.ass`\
\n • **Function : **Salam Bahas arab\
\n\n • **Syntax :** `.semangat`\
\n • **Function : **Memberikan Semangat.\
\n\n • **Syntax :** `.ywc`\
\n • **Function : **nMenampilkan Sama sama\
\n\n • **Syntax :** `.sayang`\
\n • **Function : **Kata I Love You.\
\n\n • **Syntax :** `.k`\
\n • **Function : **LU SEMUA NGENTOT 🔥\
\n\n • **Syntax :** `.j`\
\n • **Function : **NIMBRUNG GOBLOKK!!!🔥\
"
}
)
| 30.951613
| 70
| 0.524752
|
4a02e83de76d7eefc5d0d86b8188e09df92edd21
| 7,186
|
py
|
Python
|
homeassistant/components/plum_lightpad/light.py
|
krisoshau/core
|
e3b90ea3f794256159d1405fd4c391c78446e1db
|
[
"Apache-2.0"
] | 4
|
2020-07-29T17:47:10.000Z
|
2020-09-16T13:39:13.000Z
|
homeassistant/components/plum_lightpad/light.py
|
krisoshau/core
|
e3b90ea3f794256159d1405fd4c391c78446e1db
|
[
"Apache-2.0"
] | 2
|
2020-08-13T21:02:26.000Z
|
2020-08-25T15:33:14.000Z
|
homeassistant/components/plum_lightpad/light.py
|
krisoshau/core
|
e3b90ea3f794256159d1405fd4c391c78446e1db
|
[
"Apache-2.0"
] | 3
|
2021-05-18T16:42:18.000Z
|
2021-07-19T22:04:21.000Z
|
"""Support for Plum Lightpad lights."""
import logging
from typing import Callable, List
from plumlightpad import Plum
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_HS_COLOR,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.entity import Entity
import homeassistant.util.color as color_util
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: Callable[[List[Entity]], None],
) -> None:
"""Set up Plum Lightpad dimmer lights and glow rings."""
plum: Plum = hass.data[DOMAIN][entry.entry_id]
def setup_entities(device) -> None:
entities = []
if "lpid" in device:
lightpad = plum.get_lightpad(device["lpid"])
entities.append(GlowRing(lightpad=lightpad))
if "llid" in device:
logical_load = plum.get_load(device["llid"])
entities.append(PlumLight(load=logical_load))
if entities:
async_add_entities(entities)
async def new_load(device):
setup_entities(device)
async def new_lightpad(device):
setup_entities(device)
device_web_session = async_get_clientsession(hass, verify_ssl=False)
hass.loop.create_task(
plum.discover(
hass.loop,
loadListener=new_load,
lightpadListener=new_lightpad,
websession=device_web_session,
)
)
class PlumLight(LightEntity):
"""Representation of a Plum Lightpad dimmer."""
def __init__(self, load):
"""Initialize the light."""
self._load = load
self._brightness = load.level
async def async_added_to_hass(self):
"""Subscribe to dimmerchange events."""
self._load.add_event_listener("dimmerchange", self.dimmerchange)
def dimmerchange(self, event):
"""Change event handler updating the brightness."""
self._brightness = event["level"]
self.schedule_update_ha_state()
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def unique_id(self):
"""Combine logical load ID with .light to guarantee it is unique."""
return f"{self._load.llid}.light"
@property
def name(self):
"""Return the name of the switch if any."""
return self._load.name
@property
def device_info(self):
"""Return the device info."""
return {
"name": self.name,
"identifiers": {(DOMAIN, self.unique_id)},
"model": "Dimmer",
"manufacturer": "Plum",
}
@property
def brightness(self) -> int:
"""Return the brightness of this switch between 0..255."""
return self._brightness
@property
def is_on(self) -> bool:
"""Return true if light is on."""
return self._brightness > 0
@property
def supported_features(self):
"""Flag supported features."""
if self._load.dimmable:
return SUPPORT_BRIGHTNESS
return 0
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_BRIGHTNESS in kwargs:
await self._load.turn_on(kwargs[ATTR_BRIGHTNESS])
else:
await self._load.turn_on()
async def async_turn_off(self, **kwargs):
"""Turn the light off."""
await self._load.turn_off()
class GlowRing(LightEntity):
"""Representation of a Plum Lightpad dimmer glow ring."""
def __init__(self, lightpad):
"""Initialize the light."""
self._lightpad = lightpad
self._name = f"{lightpad.friendly_name} Glow Ring"
self._state = lightpad.glow_enabled
self._glow_intensity = lightpad.glow_intensity
self._red = lightpad.glow_color["red"]
self._green = lightpad.glow_color["green"]
self._blue = lightpad.glow_color["blue"]
async def async_added_to_hass(self):
"""Subscribe to configchange events."""
self._lightpad.add_event_listener("configchange", self.configchange_event)
def configchange_event(self, event):
"""Handle Configuration change event."""
config = event["changes"]
self._state = config["glowEnabled"]
self._glow_intensity = config["glowIntensity"]
self._red = config["glowColor"]["red"]
self._green = config["glowColor"]["green"]
self._blue = config["glowColor"]["blue"]
self.schedule_update_ha_state()
@property
def hs_color(self):
"""Return the hue and saturation color value [float, float]."""
return color_util.color_RGB_to_hs(self._red, self._green, self._blue)
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def unique_id(self):
"""Combine LightPad ID with .glow to guarantee it is unique."""
return f"{self._lightpad.lpid}.glow"
@property
def name(self):
"""Return the name of the switch if any."""
return self._name
@property
def device_info(self):
"""Return the device info."""
return {
"name": self.name,
"identifiers": {(DOMAIN, self.unique_id)},
"model": "Glow Ring",
"manufacturer": "Plum",
}
@property
def brightness(self) -> int:
"""Return the brightness of this switch between 0..255."""
return min(max(int(round(self._glow_intensity * 255, 0)), 0), 255)
@property
def glow_intensity(self):
"""Brightness in float form."""
return self._glow_intensity
@property
def is_on(self) -> bool:
"""Return true if light is on."""
return self._state
@property
def icon(self):
"""Return the crop-portrait icon representing the glow ring."""
return "mdi:crop-portrait"
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_BRIGHTNESS in kwargs:
brightness_pct = kwargs[ATTR_BRIGHTNESS] / 255.0
await self._lightpad.set_config({"glowIntensity": brightness_pct})
elif ATTR_HS_COLOR in kwargs:
hs_color = kwargs[ATTR_HS_COLOR]
red, green, blue = color_util.color_hs_to_RGB(*hs_color)
await self._lightpad.set_glow_color(red, green, blue, 0)
else:
await self._lightpad.set_config({"glowEnabled": True})
async def async_turn_off(self, **kwargs):
"""Turn the light off."""
if ATTR_BRIGHTNESS in kwargs:
brightness_pct = kwargs[ATTR_BRIGHTNESS] / 255.0
await self._lightpad.set_config({"glowIntensity": brightness_pct})
else:
await self._lightpad.set_config({"glowEnabled": False})
| 29.694215
| 82
| 0.629001
|
4a02e8c6d7bb65a200b1192b6a397b295a0b7b10
| 671
|
py
|
Python
|
leet_code_array/mid_questions/3sum_smaller.py
|
IvanFan/leetcode-python
|
72a12a107681cc5f09f1f88537c5b0741f0818a4
|
[
"MIT"
] | null | null | null |
leet_code_array/mid_questions/3sum_smaller.py
|
IvanFan/leetcode-python
|
72a12a107681cc5f09f1f88537c5b0741f0818a4
|
[
"MIT"
] | null | null | null |
leet_code_array/mid_questions/3sum_smaller.py
|
IvanFan/leetcode-python
|
72a12a107681cc5f09f1f88537c5b0741f0818a4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class Solution:
def threeSumSmaller(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
nums.sort()
count = 0
for i in range(len(nums)-2):
if nums[i] * 3 >= target:
return count
start = i+1
end= len(nums)-1
while start < end:
if nums[i] + nums[start] + nums[end] < target:
count += end - start
start += 1
else:
end -= 1
return count
| 24.851852
| 62
| 0.390462
|
4a02e8cf3f6c5e04ad865a266c17d93218467627
| 1,392
|
py
|
Python
|
setup.py
|
fossabot/StatDP
|
91cf5a646acdd3b332979211d3b6db89dffe3153
|
[
"MIT"
] | null | null | null |
setup.py
|
fossabot/StatDP
|
91cf5a646acdd3b332979211d3b6db89dffe3153
|
[
"MIT"
] | null | null | null |
setup.py
|
fossabot/StatDP
|
91cf5a646acdd3b332979211d3b6db89dffe3153
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
from codecs import open
# Get the long description from the relevant file
with open('README.md', encoding='utf-8') as f:
long_description = f.read()
setup(
name='StatDP',
version='0.1',
description='Counterexample Detection Using Statistical Methods for Incorrect Differential-Privacy Algorithms.',
long_description=long_description,
url='',
author='Yuin Wang/Ding Ding/Danfeng Zhang/Daniel Kifer',
author_email='{yxwang,dkifer,zhang}@cse.psu.edu,dxd437@psu.edu',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Differential Privacy :: Statistics',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
keywords='Differential Privacy, Hypothesis Test, Statistics',
packages=find_packages(exclude=['tests']),
install_requires=['numpy', 'scipy', 'intervals', 'z3-solver'],
extras_require={
'test': ['pytest-cov', 'pytest', 'coverage'],
},
entry_points={
'console_scripts': [
'statdp=statdp.__main__:main',
],
},
)
| 34.8
| 116
| 0.637213
|
4a02ea2f48b2e3c1e4ed48bfd34e10b05070b4ba
| 4,498
|
py
|
Python
|
swagger_client/models/available_payment_method.py
|
yusong-shen/ecommerce-checkout-api-client-python
|
0cfe9cd0120d3453f5efec2814b367a14a703b12
|
[
"Apache-2.0"
] | null | null | null |
swagger_client/models/available_payment_method.py
|
yusong-shen/ecommerce-checkout-api-client-python
|
0cfe9cd0120d3453f5efec2814b367a14a703b12
|
[
"Apache-2.0"
] | null | null | null |
swagger_client/models/available_payment_method.py
|
yusong-shen/ecommerce-checkout-api-client-python
|
0cfe9cd0120d3453f5efec2814b367a14a703b12
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
ECommerce Checkout Flow API
Registration, Address Information, Delivery Options, Payment, Confirmation
OpenAPI spec version: 0.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class AvailablePaymentMethod(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, code=None, name=None, type=None):
"""
AvailablePaymentMethod - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'code': 'str',
'name': 'str',
'type': 'str'
}
self.attribute_map = {
'code': 'code',
'name': 'name',
'type': 'type'
}
self._code = code
self._name = name
self._type = type
@property
def code(self):
"""
Gets the code of this AvailablePaymentMethod.
:return: The code of this AvailablePaymentMethod.
:rtype: str
"""
return self._code
@code.setter
def code(self, code):
"""
Sets the code of this AvailablePaymentMethod.
:param code: The code of this AvailablePaymentMethod.
:type: str
"""
self._code = code
@property
def name(self):
"""
Gets the name of this AvailablePaymentMethod.
:return: The name of this AvailablePaymentMethod.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this AvailablePaymentMethod.
:param name: The name of this AvailablePaymentMethod.
:type: str
"""
self._name = name
@property
def type(self):
"""
Gets the type of this AvailablePaymentMethod.
:return: The type of this AvailablePaymentMethod.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this AvailablePaymentMethod.
:param type: The type of this AvailablePaymentMethod.
:type: str
"""
self._type = type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 25.128492
| 78
| 0.555136
|
4a02ea3fc9c15af7c599f335881e069c3397c3d5
| 262,069
|
py
|
Python
|
internal/core/build-support/cpplint.py
|
xige-16/milvus
|
69087ff8dd400ff4ce0ea29a7f1c2c4b3e1d0c3d
|
[
"Apache-2.0"
] | 2
|
2021-09-05T15:00:49.000Z
|
2022-01-05T06:42:23.000Z
|
internal/core/build-support/cpplint.py
|
xige-16/milvus
|
69087ff8dd400ff4ce0ea29a7f1c2c4b3e1d0c3d
|
[
"Apache-2.0"
] | 38
|
2021-11-22T11:15:27.000Z
|
2022-03-30T08:14:12.000Z
|
internal/core/build-support/cpplint.py
|
Bennu-Li/milvus
|
35612881e33ce19a7407628769f6b51a7518bfe9
|
[
"Apache-2.0"
] | 3
|
2021-11-17T09:21:42.000Z
|
2021-11-22T11:54:09.000Z
|
#!/usr/bin/env python
#
# Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Does google-lint on c++ files.
The goal of this script is to identify places in the code that *may*
be in non-compliance with google style. It does not attempt to fix
up these problems -- the point is to educate. It does also not
attempt to find all problems, or to ensure that everything it does
find is legitimately a problem.
In particular, we can get very confused by /* and // inside strings!
We do a small hack, which is to ignore //'s with "'s after them on the
same line, but it is far from perfect (in either direction).
"""
import codecs
import copy
import getopt
import glob
import itertools
import math # for log
import os
import re
import sre_compile
import string
import sys
import sysconfig
import unicodedata
import xml.etree.ElementTree
# if empty, use defaults
_valid_extensions = set([])
__VERSION__ = '1.5.4'
try:
xrange # Python 2
except NameError:
# -- pylint: disable=redefined-builtin
xrange = range # Python 3
_USAGE = """
Syntax: cpplint.py [--verbose=#] [--output=emacs|eclipse|vs7|junit|sed|gsed]
[--filter=-x,+y,...]
[--counting=total|toplevel|detailed] [--root=subdir]
[--repository=path]
[--linelength=digits] [--headers=x,y,...]
[--recursive]
[--exclude=path]
[--extensions=hpp,cpp,...]
[--includeorder=default|standardcfirst]
[--quiet]
[--version]
<file> [file] ...
Style checker for C/C++ source files.
This is a fork of the Google style checker with minor extensions.
The style guidelines this tries to follow are those in
https://google.github.io/styleguide/cppguide.html
Every problem is given a confidence score from 1-5, with 5 meaning we are
certain of the problem, and 1 meaning it could be a legitimate construct.
This will miss some errors, and is not a substitute for a code review.
To suppress false-positive errors of a certain category, add a
'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
suppresses errors of all categories on that line.
The files passed in will be linted; at least one file must be provided.
Default linted extensions are %s.
Other file types will be ignored.
Change the extensions with the --extensions flag.
Flags:
output=emacs|eclipse|vs7|junit|sed|gsed
By default, the output is formatted to ease emacs parsing. Visual Studio
compatible output (vs7) may also be used. Further support exists for
eclipse (eclipse), and JUnit (junit). XML parsers such as those used
in Jenkins and Bamboo may also be used.
The sed format outputs sed commands that should fix some of the errors.
Note that this requires gnu sed. If that is installed as gsed on your
system (common e.g. on macOS with homebrew) you can use the gsed output
format. Sed commands are written to stdout, not stderr, so you should be
able to pipe output straight to a shell to run the fixes.
verbose=#
Specify a number 0-5 to restrict errors to certain verbosity levels.
Errors with lower verbosity levels have lower confidence and are more
likely to be false positives.
quiet
Don't print anything if no errors are found.
filter=-x,+y,...
Specify a comma-separated list of category-filters to apply: only
error messages whose category names pass the filters will be printed.
(Category names are printed with the message and look like
"[whitespace/indent]".) Filters are evaluated left to right.
"-FOO" and "FOO" means "do not print categories that start with FOO".
"+FOO" means "do print categories that start with FOO".
Examples: --filter=-whitespace,+whitespace/braces
--filter=whitespace,runtime/printf,+runtime/printf_format
--filter=-,+build/include_what_you_use
To see a list of all the categories used in cpplint, pass no arg:
--filter=
counting=total|toplevel|detailed
The total number of errors found is always printed. If
'toplevel' is provided, then the count of errors in each of
the top-level categories like 'build' and 'whitespace' will
also be printed. If 'detailed' is provided, then a count
is provided for each category like 'build/class'.
repository=path
The top level directory of the repository, used to derive the header
guard CPP variable. By default, this is determined by searching for a
path that contains .git, .hg, or .svn. When this flag is specified, the
given path is used instead. This option allows the header guard CPP
variable to remain consistent even if members of a team have different
repository root directories (such as when checking out a subdirectory
with SVN). In addition, users of non-mainstream version control systems
can use this flag to ensure readable header guard CPP variables.
Examples:
Assuming that Alice checks out ProjectName and Bob checks out
ProjectName/trunk and trunk contains src/chrome/ui/browser.h, then
with no --repository flag, the header guard CPP variable will be:
Alice => TRUNK_SRC_CHROME_BROWSER_UI_BROWSER_H_
Bob => SRC_CHROME_BROWSER_UI_BROWSER_H_
If Alice uses the --repository=trunk flag and Bob omits the flag or
uses --repository=. then the header guard CPP variable will be:
Alice => SRC_CHROME_BROWSER_UI_BROWSER_H_
Bob => SRC_CHROME_BROWSER_UI_BROWSER_H_
root=subdir
The root directory used for deriving header guard CPP variable.
This directory is relative to the top level directory of the repository
which by default is determined by searching for a directory that contains
.git, .hg, or .svn but can also be controlled with the --repository flag.
If the specified directory does not exist, this flag is ignored.
Examples:
Assuming that src is the top level directory of the repository (and
cwd=top/src), the header guard CPP variables for
src/chrome/browser/ui/browser.h are:
No flag => CHROME_BROWSER_UI_BROWSER_H_
--root=chrome => BROWSER_UI_BROWSER_H_
--root=chrome/browser => UI_BROWSER_H_
--root=.. => SRC_CHROME_BROWSER_UI_BROWSER_H_
linelength=digits
This is the allowed line length for the project. The default value is
80 characters.
Examples:
--linelength=120
recursive
Search for files to lint recursively. Each directory given in the list
of files to be linted is replaced by all files that descend from that
directory. Files with extensions not in the valid extensions list are
excluded.
exclude=path
Exclude the given path from the list of files to be linted. Relative
paths are evaluated relative to the current directory and shell globbing
is performed. This flag can be provided multiple times to exclude
multiple files.
Examples:
--exclude=one.cc
--exclude=src/*.cc
--exclude=src/*.cc --exclude=test/*.cc
extensions=extension,extension,...
The allowed file extensions that cpplint will check
Examples:
--extensions=%s
includeorder=default|standardcfirst
For the build/include_order rule, the default is to blindly assume angle
bracket includes with file extension are c-system-headers (default),
even knowing this will have false classifications.
The default is established at google.
standardcfirst means to instead use an allow-list of known c headers and
treat all others as separate group of "other system headers". The C headers
included are those of the C-standard lib and closely related ones.
headers=x,y,...
The header extensions that cpplint will treat as .h in checks. Values are
automatically added to --extensions list.
(by default, only files with extensions %s will be assumed to be headers)
Examples:
--headers=%s
--headers=hpp,hxx
--headers=hpp
cpplint.py supports per-directory configurations specified in CPPLINT.cfg
files. CPPLINT.cfg file can contain a number of key=value pairs.
Currently the following options are supported:
set noparent
filter=+filter1,-filter2,...
exclude_files=regex
linelength=80
root=subdir
headers=x,y,...
"set noparent" option prevents cpplint from traversing directory tree
upwards looking for more .cfg files in parent directories. This option
is usually placed in the top-level project directory.
The "filter" option is similar in function to --filter flag. It specifies
message filters in addition to the |_DEFAULT_FILTERS| and those specified
through --filter command-line flag.
"exclude_files" allows to specify a regular expression to be matched against
a file name. If the expression matches, the file is skipped and not run
through the linter.
"linelength" allows to specify the allowed line length for the project.
The "root" option is similar in function to the --root flag (see example
above). Paths are relative to the directory of the CPPLINT.cfg.
The "headers" option is similar in function to the --headers flag
(see example above).
CPPLINT.cfg has an effect on files in the same directory and all
sub-directories, unless overridden by a nested configuration file.
Example file:
filter=-build/include_order,+build/include_alpha
exclude_files=.*\\.cc
The above example disables build/include_order warning and enables
build/include_alpha as well as excludes all .cc from being
processed by linter, in the current directory (where the .cfg
file is located) and all sub-directories.
"""
# We categorize each error message we print. Here are the categories.
# We want an explicit list so we can list them all in cpplint --filter=.
# If you add a new error message with a new category, add it to the list
# here! cpplint_unittest.py should tell you if you forget to do this.
_ERROR_CATEGORIES = [
'build/class',
'build/c++11',
'build/c++14',
'build/c++tr1',
'build/deprecated',
'build/endif_comment',
'build/explicit_make_pair',
'build/forward_decl',
'build/header_guard',
'build/include',
'build/include_subdir',
'build/include_alpha',
'build/include_order',
'build/include_what_you_use',
'build/namespaces_headers',
'build/namespaces_literals',
'build/namespaces',
'build/printf_format',
'build/storage_class',
'legal/copyright',
'readability/alt_tokens',
'readability/braces',
'readability/casting',
'readability/check',
'readability/constructors',
'readability/fn_size',
'readability/inheritance',
'readability/multiline_comment',
'readability/multiline_string',
'readability/namespace',
'readability/nolint',
'readability/nul',
'readability/strings',
'readability/todo',
'readability/utf8',
'runtime/arrays',
'runtime/casting',
'runtime/explicit',
'runtime/int',
'runtime/init',
'runtime/invalid_increment',
'runtime/member_string_references',
'runtime/memset',
'runtime/indentation_namespace',
'runtime/operator',
'runtime/printf',
'runtime/printf_format',
'runtime/references',
'runtime/string',
'runtime/threadsafe_fn',
'runtime/vlog',
'whitespace/blank_line',
'whitespace/braces',
'whitespace/comma',
'whitespace/comments',
'whitespace/empty_conditional_body',
'whitespace/empty_if_body',
'whitespace/empty_loop_body',
'whitespace/end_of_line',
'whitespace/ending_newline',
'whitespace/forcolon',
'whitespace/indent',
'whitespace/line_length',
'whitespace/newline',
'whitespace/operators',
'whitespace/parens',
'whitespace/semicolon',
'whitespace/tab',
'whitespace/todo',
]
# keywords to use with --outputs which generate stdout for machine processing
_MACHINE_OUTPUTS = [
'junit',
'sed',
'gsed'
]
# These error categories are no longer enforced by cpplint, but for backwards-
# compatibility they may still appear in NOLINT comments.
_LEGACY_ERROR_CATEGORIES = [
'readability/streams',
'readability/function',
]
# The default state of the category filter. This is overridden by the --filter=
# flag. By default all errors are on, so only add here categories that should be
# off by default (i.e., categories that must be enabled by the --filter= flags).
# All entries here should start with a '-' or '+', as in the --filter= flag.
_DEFAULT_FILTERS = ['-build/include_alpha']
# The default list of categories suppressed for C (not C++) files.
_DEFAULT_C_SUPPRESSED_CATEGORIES = [
'readability/casting',
]
# The default list of categories suppressed for Linux Kernel files.
_DEFAULT_KERNEL_SUPPRESSED_CATEGORIES = [
'whitespace/tab',
]
# We used to check for high-bit characters, but after much discussion we
# decided those were OK, as long as they were in UTF-8 and didn't represent
# hard-coded international strings, which belong in a separate i18n file.
# C++ headers
_CPP_HEADERS = frozenset([
# Legacy
'algobase.h',
'algo.h',
'alloc.h',
'builtinbuf.h',
'bvector.h',
'complex.h',
'defalloc.h',
'deque.h',
'editbuf.h',
'fstream.h',
'function.h',
'hash_map',
'hash_map.h',
'hash_set',
'hash_set.h',
'hashtable.h',
'heap.h',
'indstream.h',
'iomanip.h',
'iostream.h',
'istream.h',
'iterator.h',
'list.h',
'map.h',
'multimap.h',
'multiset.h',
'ostream.h',
'pair.h',
'parsestream.h',
'pfstream.h',
'procbuf.h',
'pthread_alloc',
'pthread_alloc.h',
'rope',
'rope.h',
'ropeimpl.h',
'set.h',
'slist',
'slist.h',
'stack.h',
'stdiostream.h',
'stl_alloc.h',
'stl_relops.h',
'streambuf.h',
'stream.h',
'strfile.h',
'strstream.h',
'tempbuf.h',
'tree.h',
'type_traits.h',
'vector.h',
# 17.6.1.2 C++ library headers
'algorithm',
'array',
'atomic',
'bitset',
'chrono',
'codecvt',
'complex',
'condition_variable',
'deque',
'exception',
'forward_list',
'fstream',
'functional',
'future',
'initializer_list',
'iomanip',
'ios',
'iosfwd',
'iostream',
'istream',
'iterator',
'limits',
'list',
'locale',
'map',
'memory',
'mutex',
'new',
'numeric',
'ostream',
'queue',
'random',
'ratio',
'regex',
'scoped_allocator',
'set',
'sstream',
'stack',
'stdexcept',
'streambuf',
'string',
'strstream',
'system_error',
'thread',
'tuple',
'typeindex',
'typeinfo',
'type_traits',
'unordered_map',
'unordered_set',
'utility',
'valarray',
'vector',
# 17.6.1.2 C++14 headers
'shared_mutex',
# 17.6.1.2 C++17 headers
'any',
'charconv',
'codecvt',
'execution',
'filesystem',
'memory_resource',
'optional',
'string_view',
'variant',
# 17.6.1.2 C++ headers for C library facilities
'cassert',
'ccomplex',
'cctype',
'cerrno',
'cfenv',
'cfloat',
'cinttypes',
'ciso646',
'climits',
'clocale',
'cmath',
'csetjmp',
'csignal',
'cstdalign',
'cstdarg',
'cstdbool',
'cstddef',
'cstdint',
'cstdio',
'cstdlib',
'cstring',
'ctgmath',
'ctime',
'cuchar',
'cwchar',
'cwctype',
])
# C headers
_C_HEADERS = frozenset([
# System C headers
'assert.h',
'complex.h',
'ctype.h',
'errno.h',
'fenv.h',
'float.h',
'inttypes.h',
'iso646.h',
'limits.h',
'locale.h',
'math.h',
'setjmp.h',
'signal.h',
'stdalign.h',
'stdarg.h',
'stdatomic.h',
'stdbool.h',
'stddef.h',
'stdint.h',
'stdio.h',
'stdlib.h',
'stdnoreturn.h',
'string.h',
'tgmath.h',
'threads.h',
'time.h',
'uchar.h',
'wchar.h',
'wctype.h',
# additional POSIX C headers
'aio.h',
'arpa/inet.h',
'cpio.h',
'dirent.h',
'dlfcn.h',
'fcntl.h',
'fmtmsg.h',
'fnmatch.h',
'ftw.h',
'glob.h',
'grp.h',
'iconv.h',
'langinfo.h',
'libgen.h',
'monetary.h',
'mqueue.h',
'ndbm.h',
'net/if.h',
'netdb.h',
'netinet/in.h',
'netinet/tcp.h',
'nl_types.h',
'poll.h',
'pthread.h',
'pwd.h',
'regex.h',
'sched.h',
'search.h',
'semaphore.h',
'setjmp.h',
'signal.h',
'spawn.h',
'strings.h',
'stropts.h',
'syslog.h',
'tar.h',
'termios.h',
'trace.h',
'ulimit.h',
'unistd.h',
'utime.h',
'utmpx.h',
'wordexp.h',
# additional GNUlib headers
'a.out.h',
'aliases.h',
'alloca.h',
'ar.h',
'argp.h',
'argz.h',
'byteswap.h',
'crypt.h',
'endian.h',
'envz.h',
'err.h',
'error.h',
'execinfo.h',
'fpu_control.h',
'fstab.h',
'fts.h',
'getopt.h',
'gshadow.h',
'ieee754.h',
'ifaddrs.h',
'libintl.h',
'mcheck.h',
'mntent.h',
'obstack.h',
'paths.h',
'printf.h',
'pty.h',
'resolv.h',
'shadow.h',
'sysexits.h',
'ttyent.h',
# Additional linux glibc headers
'dlfcn.h',
'elf.h',
'features.h',
'gconv.h',
'gnu-versions.h',
'lastlog.h',
'libio.h',
'link.h',
'malloc.h',
'memory.h',
'netash/ash.h',
'netatalk/at.h',
'netax25/ax25.h',
'neteconet/ec.h',
'netipx/ipx.h',
'netiucv/iucv.h',
'netpacket/packet.h',
'netrom/netrom.h',
'netrose/rose.h',
'nfs/nfs.h',
'nl_types.h',
'nss.h',
're_comp.h',
'regexp.h',
'sched.h',
'sgtty.h',
'stab.h',
'stdc-predef.h',
'stdio_ext.h',
'syscall.h',
'termio.h',
'thread_db.h',
'ucontext.h',
'ustat.h',
'utmp.h',
'values.h',
'wait.h',
'xlocale.h',
# Hardware specific headers
'arm_neon.h',
'emmintrin.h',
'xmmintin.h',
])
# Folders of C libraries so commonly used in C++,
# that they have parity with standard C libraries.
C_STANDARD_HEADER_FOLDERS = frozenset([
# standard C library
"sys",
# glibc for linux
"arpa",
"asm-generic",
"bits",
"gnu",
"net",
"netinet",
"protocols",
"rpc",
"rpcsvc",
"scsi",
# linux kernel header
"drm",
"linux",
"misc",
"mtd",
"rdma",
"sound",
"video",
"xen",
])
# Type names
_TYPES = re.compile(
r'^(?:'
# [dcl.type.simple]
r'(char(16_t|32_t)?)|wchar_t|'
r'bool|short|int|long|signed|unsigned|float|double|'
# [support.types]
r'(ptrdiff_t|size_t|max_align_t|nullptr_t)|'
# [cstdint.syn]
r'(u?int(_fast|_least)?(8|16|32|64)_t)|'
r'(u?int(max|ptr)_t)|'
r')$')
# These headers are excluded from [build/include] and [build/include_order]
# checks:
# - Anything not following google file name conventions (containing an
# uppercase character, such as Python.h or nsStringAPI.h, for example).
# - Lua headers.
_THIRD_PARTY_HEADERS_PATTERN = re.compile(
r'^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h)$')
# Pattern for matching FileInfo.BaseName() against test file name
_test_suffixes = ['_test', '_regtest', '_unittest']
_TEST_FILE_SUFFIX = '(' + '|'.join(_test_suffixes) + r')$'
# Pattern that matches only complete whitespace, possibly across multiple lines.
_EMPTY_CONDITIONAL_BODY_PATTERN = re.compile(r'^\s*$', re.DOTALL)
# Assertion macros. These are defined in base/logging.h and
# testing/base/public/gunit.h.
_CHECK_MACROS = [
'DCHECK', 'CHECK',
'EXPECT_TRUE', 'ASSERT_TRUE',
'EXPECT_FALSE', 'ASSERT_FALSE',
]
# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
_CHECK_REPLACEMENT = dict([(macro_var, {}) for macro_var in _CHECK_MACROS])
for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
('>=', 'GE'), ('>', 'GT'),
('<=', 'LE'), ('<', 'LT')]:
_CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
_CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
('>=', 'LT'), ('>', 'LE'),
('<=', 'GT'), ('<', 'GE')]:
_CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
# Alternative tokens and their replacements. For full list, see section 2.5
# Alternative tokens [lex.digraph] in the C++ standard.
#
# Digraphs (such as '%:') are not included here since it's a mess to
# match those on a word boundary.
_ALT_TOKEN_REPLACEMENT = {
'and': '&&',
'bitor': '|',
'or': '||',
'xor': '^',
'compl': '~',
'bitand': '&',
'and_eq': '&=',
'or_eq': '|=',
'xor_eq': '^=',
'not': '!',
'not_eq': '!='
}
# Compile regular expression that matches all the above keywords. The "[ =()]"
# bit is meant to avoid matching these keywords outside of boolean expressions.
#
# False positives include C-style multi-line comments and multi-line strings
# but those have always been troublesome for cpplint.
_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
# These constants define types of headers for use with
# _IncludeState.CheckNextIncludeOrder().
_C_SYS_HEADER = 1
_CPP_SYS_HEADER = 2
_OTHER_SYS_HEADER = 3
_LIKELY_MY_HEADER = 4
_POSSIBLE_MY_HEADER = 5
_OTHER_HEADER = 6
# These constants define the current inline assembly state
_NO_ASM = 0 # Outside of inline assembly block
_INSIDE_ASM = 1 # Inside inline assembly block
_END_ASM = 2 # Last line of inline assembly block
_BLOCK_ASM = 3 # The whole block is an inline assembly block
# Match start of assembly blocks
_MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
r'(?:\s+(volatile|__volatile__))?'
r'\s*[{(]')
# Match strings that indicate we're working on a C (not C++) file.
_SEARCH_C_FILE = re.compile(r'\b(?:LINT_C_FILE|'
r'vim?:\s*.*(\s*|:)filetype=c(\s*|:|$))')
# Match string that indicates we're working on a Linux Kernel file.
_SEARCH_KERNEL_FILE = re.compile(r'\b(?:LINT_KERNEL_FILE)')
# Commands for sed to fix the problem
_SED_FIXUPS = {
'Remove spaces around =': r's/ = /=/',
'Remove spaces around !=': r's/ != /!=/',
'Remove space before ( in if (': r's/if (/if(/',
'Remove space before ( in for (': r's/for (/for(/',
'Remove space before ( in while (': r's/while (/while(/',
'Remove space before ( in switch (': r's/switch (/switch(/',
'Should have a space between // and comment': r's/\/\//\/\/ /',
'Missing space before {': r's/\([^ ]\){/\1 {/',
'Tab found, replace by spaces': r's/\t/ /g',
'Line ends in whitespace. Consider deleting these extra spaces.': r's/\s*$//',
'You don\'t need a ; after a }': r's/};/}/',
'Missing space after ,': r's/,\([^ ]\)/, \1/g',
}
_regexp_compile_cache = {}
# {str, set(int)}: a map from error categories to sets of linenumbers
# on which those errors are expected and should be suppressed.
_error_suppressions = {}
# The root directory used for deriving header guard CPP variable.
# This is set by --root flag.
_root = None
_root_debug = False
# The top level repository directory. If set, _root is calculated relative to
# this directory instead of the directory containing version control artifacts.
# This is set by the --repository flag.
_repository = None
# Files to exclude from linting. This is set by the --exclude flag.
_excludes = None
# Whether to supress all PrintInfo messages, UNRELATED to --quiet flag
_quiet = False
# The allowed line length of files.
# This is set by --linelength flag.
_line_length = 80
# This allows to use different include order rule than default
_include_order = "default"
try:
unicode
except NameError:
# -- pylint: disable=redefined-builtin
basestring = unicode = str
try:
long
except NameError:
# -- pylint: disable=redefined-builtin
long = int
if sys.version_info < (3,):
# -- pylint: disable=no-member
# BINARY_TYPE = str
itervalues = dict.itervalues
iteritems = dict.iteritems
else:
# BINARY_TYPE = bytes
itervalues = dict.values
iteritems = dict.items
def unicode_escape_decode(x):
if sys.version_info < (3,):
return codecs.unicode_escape_decode(x)[0]
else:
return x
# Treat all headers starting with 'h' equally: .h, .hpp, .hxx etc.
# This is set by --headers flag.
_hpp_headers = set([])
# {str, bool}: a map from error categories to booleans which indicate if the
# category should be suppressed for every line.
_global_error_suppressions = {}
def ProcessHppHeadersOption(val):
global _hpp_headers
try:
_hpp_headers = {ext.strip() for ext in val.split(',')}
except ValueError:
PrintUsage('Header extensions must be comma separated list.')
def ProcessIncludeOrderOption(val):
if val is None or val == "default":
pass
elif val == "standardcfirst":
global _include_order
_include_order = val
else:
PrintUsage('Invalid includeorder value %s. Expected default|standardcfirst')
def IsHeaderExtension(file_extension):
return file_extension in GetHeaderExtensions()
def GetHeaderExtensions():
if _hpp_headers:
return _hpp_headers
if _valid_extensions:
return {h for h in _valid_extensions if 'h' in h}
return set(['h', 'hh', 'hpp', 'hxx', 'h++', 'cuh'])
# The allowed extensions for file names
# This is set by --extensions flag
def GetAllExtensions():
return GetHeaderExtensions().union(_valid_extensions or set(
['c', 'cc', 'cpp', 'cxx', 'c++', 'cu']))
def ProcessExtensionsOption(val):
global _valid_extensions
try:
extensions = [ext.strip() for ext in val.split(',')]
_valid_extensions = set(extensions)
except ValueError:
PrintUsage('Extensions should be a comma-separated list of values;'
'for example: extensions=hpp,cpp\n'
'This could not be parsed: "%s"' % (val,))
def GetNonHeaderExtensions():
return GetAllExtensions().difference(GetHeaderExtensions())
def ParseNolintSuppressions(filename, raw_line, linenum, error):
"""Updates the global list of line error-suppressions.
Parses any NOLINT comments on the current line, updating the global
error_suppressions store. Reports an error if the NOLINT comment
was malformed.
Args:
filename: str, the name of the input file.
raw_line: str, the line of input text, with comments.
linenum: int, the number of the current line.
error: function, an error handler.
"""
matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line)
if matched:
if matched.group(1):
suppressed_line = linenum + 1
else:
suppressed_line = linenum
category = matched.group(2)
if category in (None, '(*)'): # => "suppress all"
_error_suppressions.setdefault(None, set()).add(suppressed_line)
else:
if category.startswith('(') and category.endswith(')'):
category = category[1:-1]
if category in _ERROR_CATEGORIES:
_error_suppressions.setdefault(category, set()).add(suppressed_line)
elif category not in _LEGACY_ERROR_CATEGORIES:
error(filename, linenum, 'readability/nolint', 5,
'Unknown NOLINT error category: %s' % category)
def ProcessGlobalSuppresions(lines):
"""Updates the list of global error suppressions.
Parses any lint directives in the file that have global effect.
Args:
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
"""
for line in lines:
if _SEARCH_C_FILE.search(line):
for category in _DEFAULT_C_SUPPRESSED_CATEGORIES:
_global_error_suppressions[category] = True
if _SEARCH_KERNEL_FILE.search(line):
for category in _DEFAULT_KERNEL_SUPPRESSED_CATEGORIES:
_global_error_suppressions[category] = True
def ResetNolintSuppressions():
"""Resets the set of NOLINT suppressions to empty."""
_error_suppressions.clear()
_global_error_suppressions.clear()
def IsErrorSuppressedByNolint(category, linenum):
"""Returns true if the specified error category is suppressed on this line.
Consults the global error_suppressions map populated by
ParseNolintSuppressions/ProcessGlobalSuppresions/ResetNolintSuppressions.
Args:
category: str, the category of the error.
linenum: int, the current line number.
Returns:
bool, True iff the error should be suppressed due to a NOLINT comment or
global suppression.
"""
return (_global_error_suppressions.get(category, False) or
linenum in _error_suppressions.get(category, set()) or
linenum in _error_suppressions.get(None, set()))
def Match(pattern, s):
"""Matches the string with the pattern, caching the compiled regexp."""
# The regexp compilation caching is inlined in both Match and Search for
# performance reasons; factoring it out into a separate function turns out
# to be noticeably expensive.
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].match(s)
def ReplaceAll(pattern, rep, s):
"""Replaces instances of pattern in a string with a replacement.
The compiled regex is kept in a cache shared by Match and Search.
Args:
pattern: regex pattern
rep: replacement text
s: search string
Returns:
string with replacements made (or original string if no replacements)
"""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].sub(rep, s)
def Search(pattern, s):
"""Searches the string for the pattern, caching the compiled regexp."""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].search(s)
def _IsSourceExtension(s):
"""File extension (excluding dot) matches a source file extension."""
return s in GetNonHeaderExtensions()
class _IncludeState(object):
"""Tracks line numbers for includes, and the order in which includes appear.
include_list contains list of lists of (header, line number) pairs.
It's a lists of lists rather than just one flat list to make it
easier to update across preprocessor boundaries.
Call CheckNextIncludeOrder() once for each header in the file, passing
in the type constants defined above. Calls in an illegal order will
raise an _IncludeError with an appropriate error message.
"""
# self._section will move monotonically through this set. If it ever
# needs to move backwards, CheckNextIncludeOrder will raise an error.
_INITIAL_SECTION = 0
_MY_H_SECTION = 1
_C_SECTION = 2
_CPP_SECTION = 3
_OTHER_SYS_SECTION = 4
_OTHER_H_SECTION = 5
_TYPE_NAMES = {
_C_SYS_HEADER: 'C system header',
_CPP_SYS_HEADER: 'C++ system header',
_OTHER_SYS_HEADER: 'other system header',
_LIKELY_MY_HEADER: 'header this file implements',
_POSSIBLE_MY_HEADER: 'header this file may implement',
_OTHER_HEADER: 'other header',
}
_SECTION_NAMES = {
_INITIAL_SECTION: "... nothing. (This can't be an error.)",
_MY_H_SECTION: 'a header this file implements',
_C_SECTION: 'C system header',
_CPP_SECTION: 'C++ system header',
_OTHER_SYS_SECTION: 'other system header',
_OTHER_H_SECTION: 'other header',
}
def __init__(self):
self.include_list = [[]]
self._section = None
self._last_header = None
self.ResetSection('')
def FindHeader(self, header):
"""Check if a header has already been included.
Args:
header: header to check.
Returns:
Line number of previous occurrence, or -1 if the header has not
been seen before.
"""
for section_list in self.include_list:
for f in section_list:
if f[0] == header:
return f[1]
return -1
def ResetSection(self, directive):
"""Reset section checking for preprocessor directive.
Args:
directive: preprocessor directive (e.g. "if", "else").
"""
# The name of the current section.
self._section = self._INITIAL_SECTION
# The path of last found header.
self._last_header = ''
# Update list of includes. Note that we never pop from the
# include list.
if directive in ('if', 'ifdef', 'ifndef'):
self.include_list.append([])
elif directive in ('else', 'elif'):
self.include_list[-1] = []
def SetLastHeader(self, header_path):
self._last_header = header_path
def CanonicalizeAlphabeticalOrder(self, header_path):
"""Returns a path canonicalized for alphabetical comparison.
- replaces "-" with "_" so they both cmp the same.
- removes '-inl' since we don't require them to be after the main header.
- lowercase everything, just in case.
Args:
header_path: Path to be canonicalized.
Returns:
Canonicalized path.
"""
return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
"""Check if a header is in alphabetical order with the previous header.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
header_path: Canonicalized header to be checked.
Returns:
Returns true if the header is in alphabetical order.
"""
# If previous section is different from current section, _last_header will
# be reset to empty string, so it's always less than current header.
#
# If previous line was a blank line, assume that the headers are
# intentionally sorted the way they are.
if (self._last_header > header_path and
Match(r'^\s*#\s*include\b', clean_lines.elided[linenum - 1])):
return False
return True
def CheckNextIncludeOrder(self, header_type):
"""Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next include.
Args:
header_type: One of the _XXX_HEADER constants defined above.
Returns:
The empty string if the header is in the right order, or an
error message describing what's wrong.
"""
error_message = ('Found %s after %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section]))
last_section = self._section
if header_type == _C_SYS_HEADER:
if self._section <= self._C_SECTION:
self._section = self._C_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _CPP_SYS_HEADER:
if self._section <= self._CPP_SECTION:
self._section = self._CPP_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _OTHER_SYS_HEADER:
if self._section <= self._OTHER_SYS_SECTION:
self._section = self._OTHER_SYS_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _LIKELY_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
self._section = self._OTHER_H_SECTION
elif header_type == _POSSIBLE_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
# This will always be the fallback because we're not sure
# enough that the header is associated with this file.
self._section = self._OTHER_H_SECTION
else:
assert header_type == _OTHER_HEADER
self._section = self._OTHER_H_SECTION
if last_section != self._section:
self._last_header = ''
return ''
class _CppLintState(object):
"""Maintains module-wide state.."""
def __init__(self):
self.verbose_level = 1 # global setting.
self.error_count = 0 # global count of reported errors
# filters to apply when emitting error messages
self.filters = _DEFAULT_FILTERS[:]
# backup of filter list. Used to restore the state after each file.
self._filters_backup = self.filters[:]
self.counting = 'total' # In what way are we counting errors?
self.errors_by_category = {} # string to int dict storing error counts
self.quiet = False # Suppress non-error messagess?
# output format:
# "emacs" - format that emacs can parse (default)
# "eclipse" - format that eclipse can parse
# "vs7" - format that Microsoft Visual Studio 7 can parse
# "junit" - format that Jenkins, Bamboo, etc can parse
# "sed" - returns a gnu sed command to fix the problem
# "gsed" - like sed, but names the command gsed, e.g. for macOS homebrew users
self.output_format = 'emacs'
# For JUnit output, save errors and failures until the end so that they
# can be written into the XML
self._junit_errors = []
self._junit_failures = []
def SetOutputFormat(self, output_format):
"""Sets the output format for errors."""
self.output_format = output_format
def SetQuiet(self, quiet):
"""Sets the module's quiet settings, and returns the previous setting."""
last_quiet = self.quiet
self.quiet = quiet
return last_quiet
def SetVerboseLevel(self, level):
"""Sets the module's verbosity, and returns the previous setting."""
last_verbose_level = self.verbose_level
self.verbose_level = level
return last_verbose_level
def SetCountingStyle(self, counting_style):
"""Sets the module's counting options."""
self.counting = counting_style
def SetFilters(self, filters):
"""Sets the error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "+whitespace/indent").
Each filter should start with + or -; else we die.
Raises:
ValueError: The comma-separated filters did not all start with '+' or '-'.
E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
"""
# Default filters always have less priority than the flag ones.
self.filters = _DEFAULT_FILTERS[:]
self.AddFilters(filters)
def AddFilters(self, filters):
""" Adds more filters to the existing list of error-message filters. """
for filt in filters.split(','):
clean_filt = filt.strip()
if clean_filt:
self.filters.append(clean_filt)
for filt in self.filters:
if not (filt.startswith('+') or filt.startswith('-')):
raise ValueError('Every filter in --filters must start with + or -'
' (%s does not)' % filt)
def BackupFilters(self):
""" Saves the current filter list to backup storage."""
self._filters_backup = self.filters[:]
def RestoreFilters(self):
""" Restores filters previously backed up."""
self.filters = self._filters_backup[:]
def ResetErrorCounts(self):
"""Sets the module's error statistic back to zero."""
self.error_count = 0
self.errors_by_category = {}
def IncrementErrorCount(self, category):
"""Bumps the module's error statistic."""
self.error_count += 1
if self.counting in ('toplevel', 'detailed'):
if self.counting != 'detailed':
category = category.split('/')[0]
if category not in self.errors_by_category:
self.errors_by_category[category] = 0
self.errors_by_category[category] += 1
def PrintErrorCounts(self):
"""Print a summary of errors by category, and the total."""
for category, count in sorted(iteritems(self.errors_by_category)):
self.PrintInfo('Category \'%s\' errors found: %d\n' %
(category, count))
if self.error_count > 0:
self.PrintInfo('Total errors found: %d\n' % self.error_count)
def PrintInfo(self, message):
# _quiet does not represent --quiet flag.
# Hide infos from stdout to keep stdout pure for machine consumption
if not _quiet and self.output_format not in _MACHINE_OUTPUTS:
sys.stdout.write(message)
def PrintError(self, message):
if self.output_format == 'junit':
self._junit_errors.append(message)
else:
sys.stderr.write(message)
def AddJUnitFailure(self, filename, linenum, message, category, confidence):
self._junit_failures.append((filename, linenum, message, category,
confidence))
def FormatJUnitXML(self):
num_errors = len(self._junit_errors)
num_failures = len(self._junit_failures)
testsuite = xml.etree.ElementTree.Element('testsuite')
testsuite.attrib['errors'] = str(num_errors)
testsuite.attrib['failures'] = str(num_failures)
testsuite.attrib['name'] = 'cpplint'
if num_errors == 0 and num_failures == 0:
testsuite.attrib['tests'] = str(1)
xml.etree.ElementTree.SubElement(testsuite, 'testcase', name='passed')
else:
testsuite.attrib['tests'] = str(num_errors + num_failures)
if num_errors > 0:
testcase = xml.etree.ElementTree.SubElement(testsuite, 'testcase')
testcase.attrib['name'] = 'errors'
error = xml.etree.ElementTree.SubElement(testcase, 'error')
error.text = '\n'.join(self._junit_errors)
if num_failures > 0:
# Group failures by file
failed_file_order = []
failures_by_file = {}
for failure in self._junit_failures:
failed_file = failure[0]
if failed_file not in failed_file_order:
failed_file_order.append(failed_file)
failures_by_file[failed_file] = []
failures_by_file[failed_file].append(failure)
# Create a testcase for each file
for failed_file in failed_file_order:
failures = failures_by_file[failed_file]
testcase = xml.etree.ElementTree.SubElement(testsuite, 'testcase')
testcase.attrib['name'] = failed_file
failure = xml.etree.ElementTree.SubElement(testcase, 'failure')
template = '{0}: {1} [{2}] [{3}]'
texts = [template.format(f[1], f[2], f[3], f[4]) for f in failures]
failure.text = '\n'.join(texts)
xml_decl = '<?xml version="1.0" encoding="UTF-8" ?>\n'
return xml_decl + xml.etree.ElementTree.tostring(testsuite, 'utf-8').decode('utf-8')
_cpplint_state = _CppLintState()
def _OutputFormat():
"""Gets the module's output format."""
return _cpplint_state.output_format
def _SetOutputFormat(output_format):
"""Sets the module's output format."""
_cpplint_state.SetOutputFormat(output_format)
def _Quiet():
"""Return's the module's quiet setting."""
return _cpplint_state.quiet
def _SetQuiet(quiet):
"""Set the module's quiet status, and return previous setting."""
return _cpplint_state.SetQuiet(quiet)
def _VerboseLevel():
"""Returns the module's verbosity setting."""
return _cpplint_state.verbose_level
def _SetVerboseLevel(level):
"""Sets the module's verbosity, and returns the previous setting."""
return _cpplint_state.SetVerboseLevel(level)
def _SetCountingStyle(level):
"""Sets the module's counting options."""
_cpplint_state.SetCountingStyle(level)
def _Filters():
"""Returns the module's list of output filters, as a list."""
return _cpplint_state.filters
def _SetFilters(filters):
"""Sets the module's error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.SetFilters(filters)
def _AddFilters(filters):
"""Adds more filter overrides.
Unlike _SetFilters, this function does not reset the current list of filters
available.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.AddFilters(filters)
def _BackupFilters():
""" Saves the current filter list to backup storage."""
_cpplint_state.BackupFilters()
def _RestoreFilters():
""" Restores filters previously backed up."""
_cpplint_state.RestoreFilters()
class _FunctionState(object):
"""Tracks current function name and the number of lines in its body."""
_NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
_TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
def __init__(self):
self.in_a_function = False
self.lines_in_function = 0
self.current_function = ''
def Begin(self, function_name):
"""Start analyzing function body.
Args:
function_name: The name of the function being tracked.
"""
self.in_a_function = True
self.lines_in_function = 0
self.current_function = function_name
def Count(self):
"""Count line in current function body."""
if self.in_a_function:
self.lines_in_function += 1
def Check(self, error, filename, linenum):
"""Report if too many lines in function body.
Args:
error: The function to call with any errors found.
filename: The name of the current file.
linenum: The number of the line to check.
"""
if not self.in_a_function:
return
if Match(r'T(EST|est)', self.current_function):
base_trigger = self._TEST_TRIGGER
else:
base_trigger = self._NORMAL_TRIGGER
trigger = base_trigger * 2**_VerboseLevel()
if self.lines_in_function > trigger:
error_level = int(math.log(self.lines_in_function / base_trigger, 2))
# 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
if error_level > 5:
error_level = 5
error(filename, linenum, 'readability/fn_size', error_level,
'Small and focused functions are preferred:'
' %s has %d non-comment lines'
' (error triggered by exceeding %d lines).' % (
self.current_function, self.lines_in_function, trigger))
def End(self):
"""Stop analyzing function body."""
self.in_a_function = False
class _IncludeError(Exception):
"""Indicates a problem with the include order in a file."""
pass
class FileInfo(object):
"""Provides utility functions for filenames.
FileInfo provides easy access to the components of a file's path
relative to the project root.
"""
def __init__(self, filename):
self._filename = filename
def FullName(self):
"""Make Windows paths like Unix."""
return os.path.abspath(self._filename).replace('\\', '/')
def RepositoryName(self):
r"""FullName after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
the name so that we get header guards that don't include things like
"C:\\Documents and Settings\\..." or "/home/username/..." in them and thus
people on different computers who have checked the source out to different
locations won't see bogus errors.
"""
fullname = self.FullName()
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
# If the user specified a repository path, it exists, and the file is
# contained in it, use the specified repository path
if _repository:
repo = FileInfo(_repository).FullName()
root_dir = project_dir
while os.path.exists(root_dir):
# allow case insensitive compare on Windows
if os.path.normcase(root_dir) == os.path.normcase(repo):
return os.path.relpath(fullname, root_dir).replace('\\', '/')
one_up_dir = os.path.dirname(root_dir)
if one_up_dir == root_dir:
break
root_dir = one_up_dir
if os.path.exists(os.path.join(project_dir, ".svn")):
# If there's a .svn file in the current directory, we recursively look
# up the directory tree for the top of the SVN checkout
root_dir = project_dir
one_up_dir = os.path.dirname(root_dir)
while os.path.exists(os.path.join(one_up_dir, ".svn")):
root_dir = os.path.dirname(root_dir)
one_up_dir = os.path.dirname(one_up_dir)
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
# searching up from the current path.
root_dir = current_dir = os.path.dirname(fullname)
while current_dir != os.path.dirname(current_dir):
if (os.path.exists(os.path.join(current_dir, ".git")) or
os.path.exists(os.path.join(current_dir, ".hg")) or
os.path.exists(os.path.join(current_dir, ".svn"))):
root_dir = current_dir
current_dir = os.path.dirname(current_dir)
if (os.path.exists(os.path.join(root_dir, ".git")) or
os.path.exists(os.path.join(root_dir, ".hg")) or
os.path.exists(os.path.join(root_dir, ".svn"))):
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Don't know what to do; header guard warnings may be wrong...
return fullname
def Split(self):
"""Splits the file into the directory, basename, and extension.
For 'chrome/browser/browser.cc', Split() would
return ('chrome/browser', 'browser', '.cc')
Returns:
A tuple of (directory, basename, extension).
"""
googlename = self.RepositoryName()
project, rest = os.path.split(googlename)
return (project,) + os.path.splitext(rest)
def BaseName(self):
"""File base name - text after the final slash, before the final period."""
return self.Split()[1]
def Extension(self):
"""File extension - text following the final period, includes that period."""
return self.Split()[2]
def NoExtension(self):
"""File has no source file extension."""
return '/'.join(self.Split()[0:2])
def IsSource(self):
"""File has a source file extension."""
return _IsSourceExtension(self.Extension()[1:])
def _ShouldPrintError(category, confidence, linenum):
"""If confidence >= verbose, category passes filter and is not suppressed."""
# There are three ways we might decide not to print an error message:
# a "NOLINT(category)" comment appears in the source,
# the verbosity level isn't high enough, or the filters filter it out.
if IsErrorSuppressedByNolint(category, linenum):
return False
if confidence < _cpplint_state.verbose_level:
return False
is_filtered = False
for one_filter in _Filters():
if one_filter.startswith('-'):
if category.startswith(one_filter[1:]):
is_filtered = True
elif one_filter.startswith('+'):
if category.startswith(one_filter[1:]):
is_filtered = False
else:
assert False # should have been checked for in SetFilter.
if is_filtered:
return False
return True
def Error(filename, linenum, category, confidence, message):
"""Logs the fact we've found a lint error.
We log where the error was found, and also our confidence in the error,
that is, how certain we are this is a legitimate style regression, and
not a misidentification or a use that's sometimes justified.
False positives can be suppressed by the use of
"cpplint(category)" comments on the offending line. These are
parsed into _error_suppressions.
Args:
filename: The name of the file containing the error.
linenum: The number of the line containing the error.
category: A string used to describe the "category" this bug
falls under: "whitespace", say, or "runtime". Categories
may have a hierarchy separated by slashes: "whitespace/indent".
confidence: A number from 1-5 representing a confidence score for
the error, with 5 meaning that we are certain of the problem,
and 1 meaning that it could be a legitimate construct.
message: The error message.
"""
if _ShouldPrintError(category, confidence, linenum):
_cpplint_state.IncrementErrorCount(category)
if _cpplint_state.output_format == 'vs7':
_cpplint_state.PrintError('%s(%s): error cpplint: [%s] %s [%d]\n' % (
filename, linenum, category, message, confidence))
elif _cpplint_state.output_format == 'eclipse':
sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
elif _cpplint_state.output_format == 'junit':
_cpplint_state.AddJUnitFailure(filename, linenum, message, category,
confidence)
elif _cpplint_state.output_format in ['sed', 'gsed']:
if message in _SED_FIXUPS:
sys.stdout.write(_cpplint_state.output_format + " -i '%s%s' %s # %s [%s] [%d]\n" % (
linenum, _SED_FIXUPS[message], filename, message, category, confidence))
else:
sys.stderr.write('# %s:%s: "%s" [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
else:
final_message = '%s:%s: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence)
sys.stderr.write(final_message)
# Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
# Match a single C style comment on the same line.
_RE_PATTERN_C_COMMENTS = r'/\*(?:[^*]|\*(?!/))*\*/'
# Matches multi-line C style comments.
# This RE is a little bit more complicated than one might expect, because we
# have to take care of space removals tools so we can handle comments inside
# statements better.
# The current rule is: We only clear spaces from both sides when we're at the
# end of the line. Otherwise, we try to remove spaces from the right side,
# if this doesn't work we try on left side but only if there's a non-character
# on the right.
_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
r'(\s*' + _RE_PATTERN_C_COMMENTS + r'\s*$|' +
_RE_PATTERN_C_COMMENTS + r'\s+|' +
r'\s+' + _RE_PATTERN_C_COMMENTS + r'(?=\W)|' +
_RE_PATTERN_C_COMMENTS + r')')
def IsCppString(line):
"""Does line terminate so, that the next symbol is in string constant.
This function does not consider single-line nor multi-line comments.
Args:
line: is a partial line of code starting from the 0..n.
Returns:
True, if next character appended to 'line' is inside a
string constant.
"""
line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
def CleanseRawStrings(raw_lines):
"""Removes C++11 raw strings from lines.
Before:
static const char kData[] = R"(
multi-line string
)";
After:
static const char kData[] = ""
(replaced by blank line)
"";
Args:
raw_lines: list of raw lines.
Returns:
list of lines with C++11 raw strings replaced by empty strings.
"""
delimiter = None
lines_without_raw_strings = []
for line in raw_lines:
if delimiter:
# Inside a raw string, look for the end
end = line.find(delimiter)
if end >= 0:
# Found the end of the string, match leading space for this
# line and resume copying the original lines, and also insert
# a "" on the last line.
leading_space = Match(r'^(\s*)\S', line)
line = leading_space.group(1) + '""' + line[end + len(delimiter):]
delimiter = None
else:
# Haven't found the end yet, append a blank line.
line = '""'
# Look for beginning of a raw string, and replace them with
# empty strings. This is done in a loop to handle multiple raw
# strings on the same line.
while delimiter is None:
# Look for beginning of a raw string.
# See 2.14.15 [lex.string] for syntax.
#
# Once we have matched a raw string, we check the prefix of the
# line to make sure that the line is not part of a single line
# comment. It's done this way because we remove raw strings
# before removing comments as opposed to removing comments
# before removing raw strings. This is because there are some
# cpplint checks that requires the comments to be preserved, but
# we don't want to check comments that are inside raw strings.
matched = Match(r'^(.*?)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
if (matched and
not Match(r'^([^\'"]|\'(\\.|[^\'])*\'|"(\\.|[^"])*")*//',
matched.group(1))):
delimiter = ')' + matched.group(2) + '"'
end = matched.group(3).find(delimiter)
if end >= 0:
# Raw string ended on same line
line = (matched.group(1) + '""' +
matched.group(3)[end + len(delimiter):])
delimiter = None
else:
# Start of a multi-line raw string
line = matched.group(1) + '""'
else:
break
lines_without_raw_strings.append(line)
# TODO(unknown): if delimiter is not None here, we might want to
# emit a warning for unterminated string.
return lines_without_raw_strings
def FindNextMultiLineCommentStart(lines, lineix):
"""Find the beginning marker for a multiline comment."""
while lineix < len(lines):
if lines[lineix].strip().startswith('/*'):
# Only return this marker if the comment goes beyond this line
if lines[lineix].strip().find('*/', 2) < 0:
return lineix
lineix += 1
return len(lines)
def FindNextMultiLineCommentEnd(lines, lineix):
"""We are inside a comment, find the end marker."""
while lineix < len(lines):
if lines[lineix].strip().endswith('*/'):
return lineix
lineix += 1
return len(lines)
def RemoveMultiLineCommentsFromRange(lines, begin, end):
"""Clears a range of lines for multi-line comments."""
# Having // <empty> comments makes the lines non-empty, so we will not get
# unnecessary blank line warnings later in the code.
for i in range(begin, end):
lines[i] = '/**/'
def RemoveMultiLineComments(filename, lines, error):
"""Removes multiline (c-style) comments from lines."""
lineix = 0
while lineix < len(lines):
lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
if lineix_begin >= len(lines):
return
lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
if lineix_end >= len(lines):
error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
'Could not find end of multi-line comment')
return
RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
lineix = lineix_end + 1
def CleanseComments(line):
"""Removes //-comments and single-line C-style /* */ comments.
Args:
line: A line of C++ source.
Returns:
The line with single-line comments removed.
"""
commentpos = line.find('//')
if commentpos != -1 and not IsCppString(line[:commentpos]):
line = line[:commentpos].rstrip()
# get rid of /* ... */
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
class CleansedLines(object):
"""Holds 4 copies of all lines with different preprocessing applied to them.
1) elided member contains lines without strings and comments.
2) lines member contains lines without comments.
3) raw_lines member contains all the lines without processing.
4) lines_without_raw_strings member is same as raw_lines, but with C++11 raw
strings removed.
All these members are of <type 'list'>, and of the same length.
"""
def __init__(self, lines):
self.elided = []
self.lines = []
self.raw_lines = lines
self.num_lines = len(lines)
self.lines_without_raw_strings = CleanseRawStrings(lines)
for linenum in range(len(self.lines_without_raw_strings)):
self.lines.append(CleanseComments(
self.lines_without_raw_strings[linenum]))
elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
self.elided.append(CleanseComments(elided))
def NumLines(self):
"""Returns the number of lines represented."""
return self.num_lines
@staticmethod
def _CollapseStrings(elided):
"""Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
Args:
elided: The line being processed.
Returns:
The line with collapsed strings.
"""
if _RE_PATTERN_INCLUDE.match(elided):
return elided
# Remove escaped characters first to make quote/single quote collapsing
# basic. Things that look like escaped characters shouldn't occur
# outside of strings and chars.
elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
# Replace quoted strings and digit separators. Both single quotes
# and double quotes are processed in the same loop, otherwise
# nested quotes wouldn't work.
collapsed = ''
while True:
# Find the first quote character
match = Match(r'^([^\'"]*)([\'"])(.*)$', elided)
if not match:
collapsed += elided
break
head, quote, tail = match.groups()
if quote == '"':
# Collapse double quoted strings
second_quote = tail.find('"')
if second_quote >= 0:
collapsed += head + '""'
elided = tail[second_quote + 1:]
else:
# Unmatched double quote, don't bother processing the rest
# of the line since this is probably a multiline string.
collapsed += elided
break
else:
# Found single quote, check nearby text to eliminate digit separators.
#
# There is no special handling for floating point here, because
# the integer/fractional/exponent parts would all be parsed
# correctly as long as there are digits on both sides of the
# separator. So we are fine as long as we don't see something
# like "0.'3" (gcc 4.9.0 will not allow this literal).
if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head):
match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail)
collapsed += head + match_literal.group(1).replace("'", '')
elided = match_literal.group(2)
else:
second_quote = tail.find('\'')
if second_quote >= 0:
collapsed += head + "''"
elided = tail[second_quote + 1:]
else:
# Unmatched single quote
collapsed += elided
break
return collapsed
def FindEndOfExpressionInLine(line, startpos, stack):
"""Find the position just after the end of current parenthesized expression.
Args:
line: a CleansedLines line.
startpos: start searching at this position.
stack: nesting stack at startpos.
Returns:
On finding matching end: (index just after matching end, None)
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at end of this line)
"""
for i in xrange(startpos, len(line)):
char = line[i]
if char in '([{':
# Found start of parenthesized expression, push to expression stack
stack.append(char)
elif char == '<':
# Found potential start of template argument list
if i > 0 and line[i - 1] == '<':
# Left shift operator
if stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
elif i > 0 and Search(r'\boperator\s*$', line[0:i]):
# operator<, don't add to stack
continue
else:
# Tentative start of template argument list
stack.append('<')
elif char in ')]}':
# Found end of parenthesized expression.
#
# If we are currently expecting a matching '>', the pending '<'
# must have been an operator. Remove them from expression stack.
while stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
if ((stack[-1] == '(' and char == ')') or
(stack[-1] == '[' and char == ']') or
(stack[-1] == '{' and char == '}')):
stack.pop()
if not stack:
return (i + 1, None)
else:
# Mismatched parentheses
return (-1, None)
elif char == '>':
# Found potential end of template argument list.
# Ignore "->" and operator functions
if (i > 0 and
(line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))):
continue
# Pop the stack if there is a matching '<'. Otherwise, ignore
# this '>' since it must be an operator.
if stack:
if stack[-1] == '<':
stack.pop()
if not stack:
return (i + 1, None)
elif char == ';':
# Found something that look like end of statements. If we are currently
# expecting a '>', the matching '<' must have been an operator, since
# template argument list should not contain statements.
while stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
# Did not find end of expression or unbalanced parentheses on this line
return (-1, stack)
def CloseExpression(clean_lines, linenum, pos):
"""If input points to ( or { or [ or <, finds the position that closes it.
If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
linenum/pos that correspond to the closing of the expression.
TODO(unknown): cpplint spends a fair bit of time matching parentheses.
Ideally we would want to index all opening and closing parentheses once
and have CloseExpression be just a simple lookup, but due to preprocessor
tricks, this is not so easy.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *past* the closing brace, or
(line, len(lines), -1) if we never find a close. Note we ignore
strings and comments when matching; and the line we return is the
'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]):
return (line, clean_lines.NumLines(), -1)
# Check first line
(end_pos, stack) = FindEndOfExpressionInLine(line, pos, [])
if end_pos > -1:
return (line, linenum, end_pos)
# Continue scanning forward
while stack and linenum < clean_lines.NumLines() - 1:
linenum += 1
line = clean_lines.elided[linenum]
(end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack)
if end_pos > -1:
return (line, linenum, end_pos)
# Did not find end of expression before end of file, give up
return (line, clean_lines.NumLines(), -1)
def FindStartOfExpressionInLine(line, endpos, stack):
"""Find position at the matching start of current expression.
This is almost the reverse of FindEndOfExpressionInLine, but note
that the input position and returned position differs by 1.
Args:
line: a CleansedLines line.
endpos: start searching at this position.
stack: nesting stack at endpos.
Returns:
On finding matching start: (index at matching start, None)
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at beginning of this line)
"""
i = endpos
while i >= 0:
char = line[i]
if char in ')]}':
# Found end of expression, push to expression stack
stack.append(char)
elif char == '>':
# Found potential end of template argument list.
#
# Ignore it if it's a "->" or ">=" or "operator>"
if (i > 0 and
(line[i - 1] == '-' or
Match(r'\s>=\s', line[i - 1:]) or
Search(r'\boperator\s*$', line[0:i]))):
i -= 1
else:
stack.append('>')
elif char == '<':
# Found potential start of template argument list
if i > 0 and line[i - 1] == '<':
# Left shift operator
i -= 1
else:
# If there is a matching '>', we can pop the expression stack.
# Otherwise, ignore this '<' since it must be an operator.
if stack and stack[-1] == '>':
stack.pop()
if not stack:
return (i, None)
elif char in '([{':
# Found start of expression.
#
# If there are any unmatched '>' on the stack, they must be
# operators. Remove those.
while stack and stack[-1] == '>':
stack.pop()
if not stack:
return (-1, None)
if ((char == '(' and stack[-1] == ')') or
(char == '[' and stack[-1] == ']') or
(char == '{' and stack[-1] == '}')):
stack.pop()
if not stack:
return (i, None)
else:
# Mismatched parentheses
return (-1, None)
elif char == ';':
# Found something that look like end of statements. If we are currently
# expecting a '<', the matching '>' must have been an operator, since
# template argument list should not contain statements.
while stack and stack[-1] == '>':
stack.pop()
if not stack:
return (-1, None)
i -= 1
return (-1, stack)
def ReverseCloseExpression(clean_lines, linenum, pos):
"""If input points to ) or } or ] or >, finds the position that opens it.
If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the
linenum/pos that correspond to the opening of the expression.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *at* the opening brace, or
(line, 0, -1) if we never find the matching opening brace. Note
we ignore strings and comments when matching; and the line we
return is the 'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
if line[pos] not in ')}]>':
return (line, 0, -1)
# Check last line
(start_pos, stack) = FindStartOfExpressionInLine(line, pos, [])
if start_pos > -1:
return (line, linenum, start_pos)
# Continue scanning backward
while stack and linenum > 0:
linenum -= 1
line = clean_lines.elided[linenum]
(start_pos, stack) = FindStartOfExpressionInLine(line, len(line) - 1, stack)
if start_pos > -1:
return (line, linenum, start_pos)
# Did not find start of expression before beginning of file, give up
return (line, 0, -1)
def CheckForCopyright(filename, lines, error):
"""Logs an error if no Copyright message appears at the top of the file."""
# We'll say it should occur by line 10. Don't forget there's a
# placeholder line at the front.
for line in xrange(1, min(len(lines), 11)):
if re.search(r'Copyright', lines[line], re.I): break
else: # means no copyright line was found
error(filename, 0, 'legal/copyright', 5,
'No copyright message found. '
'You should have a line: "Copyright [year] <Copyright Owner>"')
def GetIndentLevel(line):
"""Return the number of leading spaces in line.
Args:
line: A string to check.
Returns:
An integer count of leading spaces, possibly zero.
"""
indent = Match(r'^( *)\S', line)
if indent:
return len(indent.group(1))
else:
return 0
def PathSplitToList(path):
"""Returns the path split into a list by the separator.
Args:
path: An absolute or relative path (e.g. '/a/b/c/' or '../a')
Returns:
A list of path components (e.g. ['a', 'b', 'c]).
"""
lst = []
while True:
(head, tail) = os.path.split(path)
if head == path: # absolute paths end
lst.append(head)
break
if tail == path: # relative paths end
lst.append(tail)
break
path = head
lst.append(tail)
lst.reverse()
return lst
def GetHeaderGuardCPPVariable(filename):
"""Returns the CPP variable that should be used as a header guard.
Args:
filename: The name of a C++ header file.
Returns:
The CPP variable that should be used as a header guard in the
named file.
"""
# Restores original filename in case that cpplint is invoked from Emacs's
# flymake.
filename = re.sub(r'_flymake\.h$', '.h', filename)
filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
# Replace 'c++' with 'cpp'.
filename = filename.replace('C++', 'cpp').replace('c++', 'cpp')
fileinfo = FileInfo(filename)
file_path_from_root = fileinfo.RepositoryName()
def FixupPathFromRoot():
if _root_debug:
sys.stderr.write("\n_root fixup, _root = '%s', repository name = '%s'\n"
% (_root, fileinfo.RepositoryName()))
# Process the file path with the --root flag if it was set.
if not _root:
if _root_debug:
sys.stderr.write("_root unspecified\n")
return file_path_from_root
def StripListPrefix(lst, prefix):
# f(['x', 'y'], ['w, z']) -> None (not a valid prefix)
if lst[:len(prefix)] != prefix:
return None
# f(['a, 'b', 'c', 'd'], ['a', 'b']) -> ['c', 'd']
return lst[(len(prefix)):]
# root behavior:
# --root=subdir , lstrips subdir from the header guard
maybe_path = StripListPrefix(PathSplitToList(file_path_from_root),
PathSplitToList(_root))
if _root_debug:
sys.stderr.write(("_root lstrip (maybe_path=%s, file_path_from_root=%s," +
" _root=%s)\n") % (maybe_path, file_path_from_root, _root))
if maybe_path:
return os.path.join(*maybe_path)
# --root=.. , will prepend the outer directory to the header guard
full_path = fileinfo.FullName()
# adapt slashes for windows
root_abspath = os.path.abspath(_root).replace('\\', '/')
maybe_path = StripListPrefix(PathSplitToList(full_path),
PathSplitToList(root_abspath))
if _root_debug:
sys.stderr.write(("_root prepend (maybe_path=%s, full_path=%s, " +
"root_abspath=%s)\n") % (maybe_path, full_path, root_abspath))
if maybe_path:
return os.path.join(*maybe_path)
if _root_debug:
sys.stderr.write("_root ignore, returning %s\n" % (file_path_from_root))
# --root=FAKE_DIR is ignored
return file_path_from_root
file_path_from_root = FixupPathFromRoot()
return re.sub(r'[^a-zA-Z0-9]', '_', file_path_from_root).upper() + '_'
def CheckForHeaderGuard(filename, clean_lines, error):
"""Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
clean_lines: A CleansedLines instance containing the file.
error: The function to call with any errors found.
"""
# Don't check for header guards if there are error suppression
# comments somewhere in this file.
#
# Because this is silencing a warning for a nonexistent line, we
# only support the very specific NOLINT(build/header_guard) syntax,
# and not the general NOLINT or NOLINT(*) syntax.
raw_lines = clean_lines.lines_without_raw_strings
for i in raw_lines:
if Search(r'//\s*NOLINT\(build/header_guard\)', i):
return
# Allow pragma once instead of header guards
for i in raw_lines:
if Search(r'^\s*#pragma\s+once', i):
return
cppvar = GetHeaderGuardCPPVariable(filename)
ifndef = ''
ifndef_linenum = 0
define = ''
endif = ''
endif_linenum = 0
for linenum, line in enumerate(raw_lines):
linesplit = line.split()
if len(linesplit) >= 2:
# find the first occurrence of #ifndef and #define, save arg
if not ifndef and linesplit[0] == '#ifndef':
# set ifndef to the header guard presented on the #ifndef line.
ifndef = linesplit[1]
ifndef_linenum = linenum
if not define and linesplit[0] == '#define':
define = linesplit[1]
# find the last occurrence of #endif, save entire line
if line.startswith('#endif'):
endif = line
endif_linenum = linenum
if not ifndef or not define or ifndef != define:
error(filename, 0, 'build/header_guard', 5,
'No #ifndef header guard found, suggested CPP variable is: %s' %
cppvar)
return
# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
# for backward compatibility.
if ifndef != cppvar:
error_level = 0
if ifndef != cppvar + '_':
error_level = 5
ParseNolintSuppressions(filename, raw_lines[ifndef_linenum], ifndef_linenum,
error)
error(filename, ifndef_linenum, 'build/header_guard', error_level,
'#ifndef header guard has wrong style, please use: %s' % cppvar)
# Check for "//" comments on endif line.
ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum,
error)
match = Match(r'#endif\s*//\s*' + cppvar + r'(_)?\b', endif)
if match:
if match.group(1) == '_':
# Issue low severity warning for deprecated double trailing underscore
error(filename, endif_linenum, 'build/header_guard', 0,
'#endif line should be "#endif // %s"' % cppvar)
return
# Didn't find the corresponding "//" comment. If this file does not
# contain any "//" comments at all, it could be that the compiler
# only wants "/**/" comments, look for those instead.
no_single_line_comments = True
for i in xrange(1, len(raw_lines) - 1):
line = raw_lines[i]
if Match(r'^(?:(?:\'(?:\.|[^\'])*\')|(?:"(?:\.|[^"])*")|[^\'"])*//', line):
no_single_line_comments = False
break
if no_single_line_comments:
match = Match(r'#endif\s*/\*\s*' + cppvar + r'(_)?\s*\*/', endif)
if match:
if match.group(1) == '_':
# Low severity warning for double trailing underscore
error(filename, endif_linenum, 'build/header_guard', 0,
'#endif line should be "#endif /* %s */"' % cppvar)
return
# Didn't find anything
error(filename, endif_linenum, 'build/header_guard', 5,
'#endif line should be "#endif // %s"' % cppvar)
def CheckHeaderFileIncluded(filename, include_state, error):
"""Logs an error if a source file does not include its header."""
# Do not check test files
fileinfo = FileInfo(filename)
if Search(_TEST_FILE_SUFFIX, fileinfo.BaseName()):
return
for ext in GetHeaderExtensions():
basefilename = filename[0:len(filename) - len(fileinfo.Extension())]
headerfile = basefilename + '.' + ext
if not os.path.exists(headerfile):
continue
headername = FileInfo(headerfile).RepositoryName()
first_include = None
include_uses_unix_dir_aliases = False
for section_list in include_state.include_list:
for f in section_list:
include_text = f[0]
if "./" in include_text:
include_uses_unix_dir_aliases = True
if headername in include_text or include_text in headername:
return
if not first_include:
first_include = f[1]
message = '%s should include its header file %s' % (fileinfo.RepositoryName(), headername)
if include_uses_unix_dir_aliases:
message += ". Relative paths like . and .. are not allowed."
error(filename, first_include, 'build/include', 5, message)
def CheckForBadCharacters(filename, lines, error):
"""Logs an error for each line containing bad characters.
Two kinds of bad characters:
1. Unicode replacement characters: These indicate that either the file
contained invalid UTF-8 (likely) or Unicode replacement characters (which
it shouldn't). Note that it's possible for this to throw off line
numbering if the invalid UTF-8 occurred adjacent to a newline.
2. NUL bytes. These are problematic for some tools.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
for linenum, line in enumerate(lines):
if unicode_escape_decode('\ufffd') in line:
error(filename, linenum, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).')
if '\0' in line:
error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
def CheckForNewlineAtEOF(filename, lines, error):
"""Logs an error if there is no newline char at the end of the file.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# The array lines() was created by adding two newlines to the
# original file (go figure), then splitting on \n.
# To verify that the file ends in \n, we just have to make sure the
# last-but-two element of lines() exists and is empty.
if len(lines) < 3 or lines[-2]:
error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
'Could not find a newline character at the end of the file.')
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
"""Logs an error if we see /* ... */ or "..." that extend past one line.
/* ... */ comments are legit inside macros, for one line.
Otherwise, we prefer // comments, so it's ok to warn about the
other. Likewise, it's ok for strings to extend across multiple
lines, as long as a line continuation character (backslash)
terminates each line. Although not currently prohibited by the C++
style guide, it's ugly and unnecessary. We don't do well with either
in this lint program, so we warn about both.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remove all \\ (escaped backslashes) from the line. They are OK, and the
# second (escaped) slash may trigger later \" detection erroneously.
line = line.replace('\\\\', '')
if line.count('/*') > line.count('*/'):
error(filename, linenum, 'readability/multiline_comment', 5,
'Complex multi-line /*...*/-style comment found. '
'Lint may give bogus warnings. '
'Consider replacing these with //-style comments, '
'with #if 0...#endif, '
'or with more clearly structured multi-line comments.')
if (line.count('"') - line.count('\\"')) % 2:
error(filename, linenum, 'readability/multiline_string', 5,
'Multi-line string ("...") found. This lint script doesn\'t '
'do well with such strings, and may give bogus warnings. '
'Use C++11 raw strings or concatenation instead.')
# (non-threadsafe name, thread-safe alternative, validation pattern)
#
# The validation pattern is used to eliminate false positives such as:
# _rand(); // false positive due to substring match.
# ->rand(); // some member function rand().
# ACMRandom rand(seed); // some variable named rand.
# ISAACRandom rand(); // another variable named rand.
#
# Basically we require the return value of these functions to be used
# in some expression context on the same line by matching on some
# operator before the function name. This eliminates constructors and
# member function calls.
_UNSAFE_FUNC_PREFIX = r'(?:[-+*/=%^&|(<]\s*|>\s+)'
_THREADING_LIST = (
('asctime(', 'asctime_r(', _UNSAFE_FUNC_PREFIX + r'asctime\([^)]+\)'),
('ctime(', 'ctime_r(', _UNSAFE_FUNC_PREFIX + r'ctime\([^)]+\)'),
('getgrgid(', 'getgrgid_r(', _UNSAFE_FUNC_PREFIX + r'getgrgid\([^)]+\)'),
('getgrnam(', 'getgrnam_r(', _UNSAFE_FUNC_PREFIX + r'getgrnam\([^)]+\)'),
('getlogin(', 'getlogin_r(', _UNSAFE_FUNC_PREFIX + r'getlogin\(\)'),
('getpwnam(', 'getpwnam_r(', _UNSAFE_FUNC_PREFIX + r'getpwnam\([^)]+\)'),
('getpwuid(', 'getpwuid_r(', _UNSAFE_FUNC_PREFIX + r'getpwuid\([^)]+\)'),
('gmtime(', 'gmtime_r(', _UNSAFE_FUNC_PREFIX + r'gmtime\([^)]+\)'),
('localtime(', 'localtime_r(', _UNSAFE_FUNC_PREFIX + r'localtime\([^)]+\)'),
('rand(', 'rand_r(', _UNSAFE_FUNC_PREFIX + r'rand\(\)'),
('strtok(', 'strtok_r(',
_UNSAFE_FUNC_PREFIX + r'strtok\([^)]+\)'),
('ttyname(', 'ttyname_r(', _UNSAFE_FUNC_PREFIX + r'ttyname\([^)]+\)'),
)
def CheckPosixThreading(filename, clean_lines, linenum, error):
"""Checks for calls to thread-unsafe functions.
Much code has been originally written without consideration of
multi-threading. Also, engineers are relying on their old experience;
they have learned posix before threading extensions were added. These
tests guide the engineers to use thread-safe functions (when using
posix directly).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST:
# Additional pattern matching check to confirm that this is the
# function we are looking for
if Search(pattern, line):
error(filename, linenum, 'runtime/threadsafe_fn', 2,
'Consider using ' + multithread_safe_func +
'...) instead of ' + single_thread_func +
'...) for improved thread safety.')
def CheckVlogArguments(filename, clean_lines, linenum, error):
"""Checks that VLOG() is only used for defining a logging level.
For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
VLOG(FATAL) are not.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
error(filename, linenum, 'runtime/vlog', 5,
'VLOG() should be used with numeric verbosity level. '
'Use LOG() if you want symbolic severity levels.')
# Matches invalid increment: *count++, which moves pointer instead of
# incrementing a value.
_RE_PATTERN_INVALID_INCREMENT = re.compile(
r'^\s*\*\w+(\+\+|--);')
def CheckInvalidIncrement(filename, clean_lines, linenum, error):
"""Checks for invalid increment *count++.
For example following function:
void increment_counter(int* count) {
*count++;
}
is invalid, because it effectively does count++, moving pointer, and should
be replaced with ++*count, (*count)++ or *count += 1.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if _RE_PATTERN_INVALID_INCREMENT.match(line):
error(filename, linenum, 'runtime/invalid_increment', 5,
'Changing pointer instead of value (or unused value of operator*).')
def IsMacroDefinition(clean_lines, linenum):
if Search(r'^#define', clean_lines[linenum]):
return True
if linenum > 0 and Search(r'\\$', clean_lines[linenum - 1]):
return True
return False
def IsForwardClassDeclaration(clean_lines, linenum):
return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum])
class _BlockInfo(object):
"""Stores information about a generic block of code."""
def __init__(self, linenum, seen_open_brace):
self.starting_linenum = linenum
self.seen_open_brace = seen_open_brace
self.open_parentheses = 0
self.inline_asm = _NO_ASM
self.check_namespace_indentation = False
def CheckBegin(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text up to the opening brace.
This is mostly for checking the text after the class identifier
and the "{", usually where the base class is specified. For other
blocks, there isn't much to check, so we always pass.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text after the closing brace.
This is mostly used for checking end of namespace comments.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def IsBlockInfo(self):
"""Returns true if this block is a _BlockInfo.
This is convenient for verifying that an object is an instance of
a _BlockInfo, but not an instance of any of the derived classes.
Returns:
True for this class, False for derived classes.
"""
return self.__class__ == _BlockInfo
class _ExternCInfo(_BlockInfo):
"""Stores information about an 'extern "C"' block."""
def __init__(self, linenum):
_BlockInfo.__init__(self, linenum, True)
class _ClassInfo(_BlockInfo):
"""Stores information about a class."""
def __init__(self, name, class_or_struct, clean_lines, linenum):
_BlockInfo.__init__(self, linenum, False)
self.name = name
self.is_derived = False
self.check_namespace_indentation = True
if class_or_struct == 'struct':
self.access = 'public'
self.is_struct = True
else:
self.access = 'private'
self.is_struct = False
# Remember initial indentation level for this class. Using raw_lines here
# instead of elided to account for leading comments.
self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum])
# Try to find the end of the class. This will be confused by things like:
# class A {
# } *x = { ...
#
# But it's still good enough for CheckSectionSpacing.
self.last_line = 0
depth = 0
for i in range(linenum, clean_lines.NumLines()):
line = clean_lines.elided[i]
depth += line.count('{') - line.count('}')
if not depth:
self.last_line = i
break
def CheckBegin(self, filename, clean_lines, linenum, error):
# Look for a bare ':'
if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
self.is_derived = True
def CheckEnd(self, filename, clean_lines, linenum, error):
# If there is a DISALLOW macro, it should appear near the end of
# the class.
seen_last_thing_in_class = False
for i in xrange(linenum - 1, self.starting_linenum, -1):
match = Search(
r'\b(DISALLOW_COPY_AND_ASSIGN|DISALLOW_IMPLICIT_CONSTRUCTORS)\(' +
self.name + r'\)',
clean_lines.elided[i])
if match:
if seen_last_thing_in_class:
error(filename, i, 'readability/constructors', 3,
match.group(1) + ' should be the last thing in the class')
break
if not Match(r'^\s*$', clean_lines.elided[i]):
seen_last_thing_in_class = True
# Check that closing brace is aligned with beginning of the class.
# Only do this if the closing brace is indented by only whitespaces.
# This means we will not check single-line class definitions.
indent = Match(r'^( *)\}', clean_lines.elided[linenum])
if indent and len(indent.group(1)) != self.class_indent:
if self.is_struct:
parent = 'struct ' + self.name
else:
parent = 'class ' + self.name
error(filename, linenum, 'whitespace/indent', 3,
'Closing brace should be aligned with beginning of %s' % parent)
class _NamespaceInfo(_BlockInfo):
"""Stores information about a namespace."""
def __init__(self, name, linenum):
_BlockInfo.__init__(self, linenum, False)
self.name = name or ''
self.check_namespace_indentation = True
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Check end of namespace comments."""
line = clean_lines.raw_lines[linenum]
# Check how many lines is enclosed in this namespace. Don't issue
# warning for missing namespace comments if there aren't enough
# lines. However, do apply checks if there is already an end of
# namespace comment and it's incorrect.
#
# TODO(unknown): We always want to check end of namespace comments
# if a namespace is large, but sometimes we also want to apply the
# check if a short namespace contained nontrivial things (something
# other than forward declarations). There is currently no logic on
# deciding what these nontrivial things are, so this check is
# triggered by namespace size only, which works most of the time.
if (linenum - self.starting_linenum < 10
and not Match(r'^\s*};*\s*(//|/\*).*\bnamespace\b', line)):
return
# Look for matching comment at end of namespace.
#
# Note that we accept C style "/* */" comments for terminating
# namespaces, so that code that terminate namespaces inside
# preprocessor macros can be cpplint clean.
#
# We also accept stuff like "// end of namespace <name>." with the
# period at the end.
#
# Besides these, we don't accept anything else, otherwise we might
# get false negatives when existing comment is a substring of the
# expected namespace.
if self.name:
# Named namespace
if not Match((r'^\s*};*\s*(//|/\*).*\bnamespace\s+' +
re.escape(self.name) + r'[\*/\.\\\s]*$'),
line):
error(filename, linenum, 'readability/namespace', 5,
'Namespace should be terminated with "// namespace %s"' %
self.name)
else:
# Anonymous namespace
if not Match(r'^\s*};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
# If "// namespace anonymous" or "// anonymous namespace (more text)",
# mention "// anonymous namespace" as an acceptable form
if Match(r'^\s*}.*\b(namespace anonymous|anonymous namespace)\b', line):
error(filename, linenum, 'readability/namespace', 5,
'Anonymous namespace should be terminated with "// namespace"'
' or "// anonymous namespace"')
else:
error(filename, linenum, 'readability/namespace', 5,
'Anonymous namespace should be terminated with "// namespace"')
class _PreprocessorInfo(object):
"""Stores checkpoints of nesting stacks when #if/#else is seen."""
def __init__(self, stack_before_if):
# The entire nesting stack before #if
self.stack_before_if = stack_before_if
# The entire nesting stack up to #else
self.stack_before_else = []
# Whether we have already seen #else or #elif
self.seen_else = False
class NestingState(object):
"""Holds states related to parsing braces."""
def __init__(self):
# Stack for tracking all braces. An object is pushed whenever we
# see a "{", and popped when we see a "}". Only 3 types of
# objects are possible:
# - _ClassInfo: a class or struct.
# - _NamespaceInfo: a namespace.
# - _BlockInfo: some other type of block.
self.stack = []
# Top of the previous stack before each Update().
#
# Because the nesting_stack is updated at the end of each line, we
# had to do some convoluted checks to find out what is the current
# scope at the beginning of the line. This check is simplified by
# saving the previous top of nesting stack.
#
# We could save the full stack, but we only need the top. Copying
# the full nesting stack would slow down cpplint by ~10%.
self.previous_stack_top = []
# Stack of _PreprocessorInfo objects.
self.pp_stack = []
def SeenOpenBrace(self):
"""Check if we have seen the opening brace for the innermost block.
Returns:
True if we have seen the opening brace, False if the innermost
block is still expecting an opening brace.
"""
return (not self.stack) or self.stack[-1].seen_open_brace
def InNamespaceBody(self):
"""Check if we are currently one level inside a namespace body.
Returns:
True if top of the stack is a namespace block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
def InExternC(self):
"""Check if we are currently one level inside an 'extern "C"' block.
Returns:
True if top of the stack is an extern block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _ExternCInfo)
def InClassDeclaration(self):
"""Check if we are currently one level inside a class or struct declaration.
Returns:
True if top of the stack is a class/struct, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _ClassInfo)
def InAsmBlock(self):
"""Check if we are currently one level inside an inline ASM block.
Returns:
True if the top of the stack is a block containing inline ASM.
"""
return self.stack and self.stack[-1].inline_asm != _NO_ASM
def InTemplateArgumentList(self, clean_lines, linenum, pos):
"""Check if current position is inside template argument list.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: position just after the suspected template argument.
Returns:
True if (linenum, pos) is inside template arguments.
"""
while linenum < clean_lines.NumLines():
# Find the earliest character that might indicate a template argument
line = clean_lines.elided[linenum]
match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:])
if not match:
linenum += 1
pos = 0
continue
token = match.group(1)
pos += len(match.group(0))
# These things do not look like template argument list:
# class Suspect {
# class Suspect x; }
if token in ('{', '}', ';'): return False
# These things look like template argument list:
# template <class Suspect>
# template <class Suspect = default_value>
# template <class Suspect[]>
# template <class Suspect...>
if token in ('>', '=', '[', ']', '.'): return True
# Check if token is an unmatched '<'.
# If not, move on to the next character.
if token != '<':
pos += 1
if pos >= len(line):
linenum += 1
pos = 0
continue
# We can't be sure if we just find a single '<', and need to
# find the matching '>'.
(_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1)
if end_pos < 0:
# Not sure if template argument list or syntax error in file
return False
linenum = end_line
pos = end_pos
return False
def UpdatePreprocessor(self, line):
"""Update preprocessor stack.
We need to handle preprocessors due to classes like this:
#ifdef SWIG
struct ResultDetailsPageElementExtensionPoint {
#else
struct ResultDetailsPageElementExtensionPoint : public Extension {
#endif
We make the following assumptions (good enough for most files):
- Preprocessor condition evaluates to true from #if up to first
#else/#elif/#endif.
- Preprocessor condition evaluates to false from #else/#elif up
to #endif. We still perform lint checks on these lines, but
these do not affect nesting stack.
Args:
line: current line to check.
"""
if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
# Beginning of #if block, save the nesting stack here. The saved
# stack will allow us to restore the parsing state in the #else case.
self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
elif Match(r'^\s*#\s*(else|elif)\b', line):
# Beginning of #else block
if self.pp_stack:
if not self.pp_stack[-1].seen_else:
# This is the first #else or #elif block. Remember the
# whole nesting stack up to this point. This is what we
# keep after the #endif.
self.pp_stack[-1].seen_else = True
self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
# Restore the stack to how it was before the #if
self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
else:
# TODO(unknown): unexpected #else, issue warning?
pass
elif Match(r'^\s*#\s*endif\b', line):
# End of #if or #else blocks.
if self.pp_stack:
# If we saw an #else, we will need to restore the nesting
# stack to its former state before the #else, otherwise we
# will just continue from where we left off.
if self.pp_stack[-1].seen_else:
# Here we can just use a shallow copy since we are the last
# reference to it.
self.stack = self.pp_stack[-1].stack_before_else
# Drop the corresponding #if
self.pp_stack.pop()
else:
# TODO(unknown): unexpected #endif, issue warning?
pass
# TODO(unknown): Update() is too long, but we will refactor later.
def Update(self, filename, clean_lines, linenum, error):
"""Update nesting state with current line.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remember top of the previous nesting stack.
#
# The stack is always pushed/popped and not modified in place, so
# we can just do a shallow copy instead of copy.deepcopy. Using
# deepcopy would slow down cpplint by ~28%.
if self.stack:
self.previous_stack_top = self.stack[-1]
else:
self.previous_stack_top = None
# Update pp_stack
self.UpdatePreprocessor(line)
# Count parentheses. This is to avoid adding struct arguments to
# the nesting stack.
if self.stack:
inner_block = self.stack[-1]
depth_change = line.count('(') - line.count(')')
inner_block.open_parentheses += depth_change
# Also check if we are starting or ending an inline assembly block.
if inner_block.inline_asm in (_NO_ASM, _END_ASM):
if (depth_change != 0 and
inner_block.open_parentheses == 1 and
_MATCH_ASM.match(line)):
# Enter assembly block
inner_block.inline_asm = _INSIDE_ASM
else:
# Not entering assembly block. If previous line was _END_ASM,
# we will now shift to _NO_ASM state.
inner_block.inline_asm = _NO_ASM
elif (inner_block.inline_asm == _INSIDE_ASM and
inner_block.open_parentheses == 0):
# Exit assembly block
inner_block.inline_asm = _END_ASM
# Consume namespace declaration at the beginning of the line. Do
# this in a loop so that we catch same line declarations like this:
# namespace proto2 { namespace bridge { class MessageSet; } }
while True:
# Match start of namespace. The "\b\s*" below catches namespace
# declarations even if it weren't followed by a whitespace, this
# is so that we don't confuse our namespace checker. The
# missing spaces will be flagged by CheckSpacing.
namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
if not namespace_decl_match:
break
new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
self.stack.append(new_namespace)
line = namespace_decl_match.group(2)
if line.find('{') != -1:
new_namespace.seen_open_brace = True
line = line[line.find('{') + 1:]
# Look for a class declaration in whatever is left of the line
# after parsing namespaces. The regexp accounts for decorated classes
# such as in:
# class LOCKABLE API Object {
# };
class_decl_match = Match(
r'^(\s*(?:template\s*<[\w\s<>,:=]*>\s*)?'
r'(class|struct)\s+(?:[a-zA-Z0-9_]+\s+)*(\w+(?:::\w+)*))'
r'(.*)$', line)
if (class_decl_match and
(not self.stack or self.stack[-1].open_parentheses == 0)):
# We do not want to accept classes that are actually template arguments:
# template <class Ignore1,
# class Ignore2 = Default<Args>,
# template <Args> class Ignore3>
# void Function() {};
#
# To avoid template argument cases, we scan forward and look for
# an unmatched '>'. If we see one, assume we are inside a
# template argument list.
end_declaration = len(class_decl_match.group(1))
if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration):
self.stack.append(_ClassInfo(
class_decl_match.group(3), class_decl_match.group(2),
clean_lines, linenum))
line = class_decl_match.group(4)
# If we have not yet seen the opening brace for the innermost block,
# run checks here.
if not self.SeenOpenBrace():
self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
# Update access control if we are inside a class/struct
if self.stack and isinstance(self.stack[-1], _ClassInfo):
classinfo = self.stack[-1]
access_match = Match(
r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
r':(?:[^:]|$)',
line)
if access_match:
classinfo.access = access_match.group(2)
# Check that access keywords are indented +1 space. Skip this
# check if the keywords are not preceded by whitespaces.
indent = access_match.group(1)
if (len(indent) != classinfo.class_indent + 1 and
Match(r'^\s*$', indent)):
if classinfo.is_struct:
parent = 'struct ' + classinfo.name
else:
parent = 'class ' + classinfo.name
slots = ''
if access_match.group(3):
slots = access_match.group(3)
error(filename, linenum, 'whitespace/indent', 3,
'%s%s: should be indented +1 space inside %s' % (
access_match.group(2), slots, parent))
# Consume braces or semicolons from what's left of the line
while True:
# Match first brace, semicolon, or closed parenthesis.
matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
if not matched:
break
token = matched.group(1)
if token == '{':
# If namespace or class hasn't seen an opening brace yet, mark
# namespace/class head as complete. Push a new block onto the
# stack otherwise.
if not self.SeenOpenBrace():
self.stack[-1].seen_open_brace = True
elif Match(r'^extern\s*"[^"]*"\s*\{', line):
self.stack.append(_ExternCInfo(linenum))
else:
self.stack.append(_BlockInfo(linenum, True))
if _MATCH_ASM.match(line):
self.stack[-1].inline_asm = _BLOCK_ASM
elif token == ';' or token == ')':
# If we haven't seen an opening brace yet, but we already saw
# a semicolon, this is probably a forward declaration. Pop
# the stack for these.
#
# Similarly, if we haven't seen an opening brace yet, but we
# already saw a closing parenthesis, then these are probably
# function arguments with extra "class" or "struct" keywords.
# Also pop these stack for these.
if not self.SeenOpenBrace():
self.stack.pop()
else: # token == '}'
# Perform end of block checks and pop the stack.
if self.stack:
self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
self.stack.pop()
line = matched.group(2)
def InnermostClass(self):
"""Get class info on the top of the stack.
Returns:
A _ClassInfo object if we are inside a class, or None otherwise.
"""
for i in range(len(self.stack), 0, -1):
classinfo = self.stack[i - 1]
if isinstance(classinfo, _ClassInfo):
return classinfo
return None
def CheckCompletedBlocks(self, filename, error):
"""Checks that all classes and namespaces have been completely parsed.
Call this when all lines in a file have been processed.
Args:
filename: The name of the current file.
error: The function to call with any errors found.
"""
# Note: This test can result in false positives if #ifdef constructs
# get in the way of brace matching. See the testBuildClass test in
# cpplint_unittest.py for an example of this.
for obj in self.stack:
if isinstance(obj, _ClassInfo):
error(filename, obj.starting_linenum, 'build/class', 5,
'Failed to find complete declaration of class %s' %
obj.name)
elif isinstance(obj, _NamespaceInfo):
error(filename, obj.starting_linenum, 'build/namespaces', 5,
'Failed to find complete declaration of namespace %s' %
obj.name)
def CheckForNonStandardConstructs(filename, clean_lines, linenum,
nesting_state, error):
r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
"""
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[linenum]
if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(filename, linenum, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
if Search(r'printf\s*\(.*".*%\d+\$', line):
error(filename, linenum, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
if Search(r'("|\').*\\(%|\[|\(|{)', line):
error(filename, linenum, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[linenum]
if Search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(register|static|extern|typedef)\b',
line):
error(filename, linenum, 'build/storage_class', 5,
'Storage-class specifier (static, extern, typedef, etc) should be '
'at the beginning of the declaration.')
if Match(r'\s*#\s*endif\s*[^/\s]+', line):
error(filename, linenum, 'build/endif_comment', 5,
'Uncommented text after #endif is non-standard. Use a comment.')
if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
error(filename, linenum, 'build/forward_decl', 5,
'Inner-style forward declarations are invalid. Remove this line.')
if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
line):
error(filename, linenum, 'build/deprecated', 3,
'>? and <? (max and min) operators are non-standard and deprecated.')
if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
# TODO(unknown): Could it be expanded safely to arbitrary references,
# without triggering too many false positives? The first
# attempt triggered 5 warnings for mostly benign code in the regtest, hence
# the restriction.
# Here's the original regexp, for the reference:
# type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
# r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
error(filename, linenum, 'runtime/member_string_references', 2,
'const string& members are dangerous. It is much better to use '
'alternatives, such as pointers or simple constants.')
# Everything else in this function operates on class declarations.
# Return early if the top of the nesting stack is not a class, or if
# the class head is not completed yet.
classinfo = nesting_state.InnermostClass()
if not classinfo or not classinfo.seen_open_brace:
return
# The class may have been declared with namespace or classname qualifiers.
# The constructor and destructor will not have those qualifiers.
base_classname = classinfo.name.split('::')[-1]
# Look for single-argument constructors that aren't marked explicit.
# Technically a valid construct, but against style.
explicit_constructor_match = Match(
r'\s+(?:(?:inline|constexpr)\s+)*(explicit\s+)?'
r'(?:(?:inline|constexpr)\s+)*%s\s*'
r'\(((?:[^()]|\([^()]*\))*)\)'
% re.escape(base_classname),
line)
if explicit_constructor_match:
is_marked_explicit = explicit_constructor_match.group(1)
if not explicit_constructor_match.group(2):
constructor_args = []
else:
constructor_args = explicit_constructor_match.group(2).split(',')
# collapse arguments so that commas in template parameter lists and function
# argument parameter lists don't split arguments in two
i = 0
while i < len(constructor_args):
constructor_arg = constructor_args[i]
while (constructor_arg.count('<') > constructor_arg.count('>') or
constructor_arg.count('(') > constructor_arg.count(')')):
constructor_arg += ',' + constructor_args[i + 1]
del constructor_args[i + 1]
constructor_args[i] = constructor_arg
i += 1
variadic_args = [arg for arg in constructor_args if '&&...' in arg]
defaulted_args = [arg for arg in constructor_args if '=' in arg]
noarg_constructor = (not constructor_args or # empty arg list
# 'void' arg specifier
(len(constructor_args) == 1 and
constructor_args[0].strip() == 'void'))
onearg_constructor = ((len(constructor_args) == 1 and # exactly one arg
not noarg_constructor) or
# all but at most one arg defaulted
(len(constructor_args) >= 1 and
not noarg_constructor and
len(defaulted_args) >= len(constructor_args) - 1) or
# variadic arguments with zero or one argument
(len(constructor_args) <= 2 and
len(variadic_args) >= 1))
initializer_list_constructor = bool(
onearg_constructor and
Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0]))
copy_constructor = bool(
onearg_constructor and
Match(r'((const\s+(volatile\s+)?)?|(volatile\s+(const\s+)?))?'
r'%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&'
% re.escape(base_classname), constructor_args[0].strip()))
if (not is_marked_explicit and
onearg_constructor and
not initializer_list_constructor and
not copy_constructor):
if defaulted_args or variadic_args:
error(filename, linenum, 'runtime/explicit', 5,
'Constructors callable with one argument '
'should be marked explicit.')
else:
error(filename, linenum, 'runtime/explicit', 5,
'Single-parameter constructors should be marked explicit.')
elif is_marked_explicit and not onearg_constructor:
if noarg_constructor:
error(filename, linenum, 'runtime/explicit', 5,
'Zero-parameter constructors should not be marked explicit.')
def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):
"""Checks for the correctness of various spacing around function calls.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Since function calls often occur inside if/for/while/switch
# expressions - which have their own, more liberal conventions - we
# first see if we should be looking inside such an expression for a
# function call, to which we can apply more strict standards.
fncall = line # if there's no control flow construct, look at whole line
for pattern in (r'\bif\s*\((.*)\)\s*{',
r'\bfor\s*\((.*)\)\s*{',
r'\bwhile\s*\((.*)\)\s*[{;]',
r'\bswitch\s*\((.*)\)\s*{'):
match = Search(pattern, line)
if match:
fncall = match.group(1) # look inside the parens for function calls
break
# Except in if/for/while/switch, there should never be space
# immediately inside parens (eg "f( 3, 4 )"). We make an exception
# for nested parens ( (a+b) + c ). Likewise, there should never be
# a space before a ( when it's a function argument. I assume it's a
# function argument when the char before the whitespace is legal in
# a function name (alnum + _) and we're not starting a macro. Also ignore
# pointers and references to arrays and functions coz they're too tricky:
# we use a very simple way to recognize these:
# " (something)(maybe-something)" or
# " (something)(maybe-something," or
# " (something)[something]"
# Note that we assume the contents of [] to be short enough that
# they'll never need to wrap.
if ( # Ignore control structures.
not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
fncall) and
# Ignore pointers/references to functions.
not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
# Ignore pointers/references to arrays.
not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
error(filename, linenum, 'whitespace/parens', 4,
'Extra space after ( in function call')
elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Extra space after (')
if (Search(r'\w\s+\(', fncall) and
not Search(r'_{0,2}asm_{0,2}\s+_{0,2}volatile_{0,2}\s+\(', fncall) and
not Search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and
not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall) and
not Search(r'\bcase\s+\(', fncall)):
# TODO(unknown): Space after an operator function seem to be a common
# error, silence those for now by restricting them to highest verbosity.
if Search(r'\boperator_*\b', line):
error(filename, linenum, 'whitespace/parens', 0,
'Extra space before ( in function call')
else:
error(filename, linenum, 'whitespace/parens', 4,
'Extra space before ( in function call')
# If the ) is followed only by a newline or a { + newline, assume it's
# part of a control statement (if/while/etc), and don't complain
if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
# If the closing parenthesis is preceded by only whitespaces,
# try to give a more descriptive error message.
if Search(r'^\s+\)', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Closing ) should be moved to the previous line')
else:
error(filename, linenum, 'whitespace/parens', 2,
'Extra space before )')
def IsBlankLine(line):
"""Returns true if the given line is blank.
We consider a line to be blank if the line is empty or consists of
only white spaces.
Args:
line: A line of a string.
Returns:
True, if the given line is blank.
"""
return not line or line.isspace()
def CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
error):
is_namespace_indent_item = (
len(nesting_state.stack) > 1 and
nesting_state.stack[-1].check_namespace_indentation and
isinstance(nesting_state.previous_stack_top, _NamespaceInfo) and
nesting_state.previous_stack_top == nesting_state.stack[-2])
if ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
clean_lines.elided, line):
CheckItemIndentationInNamespace(filename, clean_lines.elided,
line, error)
def CheckForFunctionLengths(filename, clean_lines, linenum,
function_state, error):
"""Reports for long function bodies.
For an overview why this is done, see:
https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
Uses a simplistic algorithm assuming other style guidelines
(especially spacing) are followed.
Only checks unindented functions, so class members are unchecked.
Trivial bodies are unchecked, so constructors with huge initializer lists
may be missed.
Blank/comment lines are not counted so as to avoid encouraging the removal
of vertical space and comments just to get through a lint check.
NOLINT *on the last line of a function* disables this check.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
lines = clean_lines.lines
line = lines[linenum]
joined_line = ''
starting_func = False
regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
match_result = Match(regexp, line)
if match_result:
# If the name is all caps and underscores, figure it's a macro and
# ignore it, unless it's TEST or TEST_F.
function_name = match_result.group(1).split()[-1]
if function_name == 'TEST' or function_name == 'TEST_F' or (
not Match(r'[A-Z_]+$', function_name)):
starting_func = True
if starting_func:
body_found = False
for start_linenum in xrange(linenum, clean_lines.NumLines()):
start_line = lines[start_linenum]
joined_line += ' ' + start_line.lstrip()
if Search(r'(;|})', start_line): # Declarations and trivial functions
body_found = True
break # ... ignore
if Search(r'{', start_line):
body_found = True
function = Search(r'((\w|:)*)\(', line).group(1)
if Match(r'TEST', function): # Handle TEST... macros
parameter_regexp = Search(r'(\(.*\))', joined_line)
if parameter_regexp: # Ignore bad syntax
function += parameter_regexp.group(1)
else:
function += '()'
function_state.Begin(function)
break
if not body_found:
# No body for the function (or evidence of a non-function) was found.
error(filename, linenum, 'readability/fn_size', 5,
'Lint failed to find start of function body.')
elif Match(r'^\}\s*$', line): # function end
function_state.Check(error, filename, linenum)
function_state.End()
elif not Match(r'^\s*$', line):
function_state.Count() # Count non-blank/non-comment lines.
_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
def CheckComment(line, filename, linenum, next_line_start, error):
"""Checks for common mistakes in comments.
Args:
line: The line in question.
filename: The name of the current file.
linenum: The number of the line to check.
next_line_start: The first non-whitespace column of the next line.
error: The function to call with any errors found.
"""
commentpos = line.find('//')
if commentpos != -1:
# Check if the // may be in quotes. If so, ignore it
if re.sub(r'\\.', '', line[0:commentpos]).count('"') % 2 == 0:
# Allow one space for new scopes, two spaces otherwise:
if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and
((commentpos >= 1 and
line[commentpos-1] not in string.whitespace) or
(commentpos >= 2 and
line[commentpos-2] not in string.whitespace))):
error(filename, linenum, 'whitespace/comments', 2,
'At least two spaces is best between code and comments')
# Checks for common mistakes in TODO comments.
comment = line[commentpos:]
match = _RE_PATTERN_TODO.match(comment)
if match:
# One whitespace is correct; zero whitespace is handled elsewhere.
leading_whitespace = match.group(1)
if len(leading_whitespace) > 1:
error(filename, linenum, 'whitespace/todo', 2,
'Too many spaces before TODO')
username = match.group(2)
if not username:
error(filename, linenum, 'readability/todo', 2,
'Missing username in TODO; it should look like '
'"// TODO(my_username): Stuff."')
middle_whitespace = match.group(3)
# Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison
if middle_whitespace != ' ' and middle_whitespace != '':
error(filename, linenum, 'whitespace/todo', 2,
'TODO(my_username) should be followed by a space')
# If the comment contains an alphanumeric character, there
# should be a space somewhere between it and the // unless
# it's a /// or //! Doxygen comment.
if (Match(r'//[^ ]*\w', comment) and
not Match(r'(///|//\!)(\s+|$)', comment)):
error(filename, linenum, 'whitespace/comments', 4,
'Should have a space between // and comment')
def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
"""Checks for the correctness of various spacing issues in the code.
Things we check for: spaces around operators, spaces after
if/for/while/switch, no spaces around parens in function calls, two
spaces between code and comment, don't start a block with a blank
line, don't end a function with a blank line, don't add a blank line
after public/protected/private, don't have too many blank lines in a row.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw = clean_lines.lines_without_raw_strings
line = raw[linenum]
# Before nixing comments, check if the line is blank for no good
# reason. This includes the first line after a block is opened, and
# blank lines at the end of a function (ie, right before a line like '}'
#
# Skip all the blank line checks if we are immediately inside a
# namespace body. In other words, don't issue blank line warnings
# for this block:
# namespace {
#
# }
#
# A warning about missing end of namespace comments will be issued instead.
#
# Also skip blank line checks for 'extern "C"' blocks, which are formatted
# like namespaces.
if (IsBlankLine(line) and
not nesting_state.InNamespaceBody() and
not nesting_state.InExternC()):
elided = clean_lines.elided
prev_line = elided[linenum - 1]
prevbrace = prev_line.rfind('{')
# TODO(unknown): Don't complain if line before blank line, and line after,
# both start with alnums and are indented the same amount.
# This ignores whitespace at the start of a namespace block
# because those are not usually indented.
if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
# OK, we have a blank line at the start of a code block. Before we
# complain, we check if it is an exception to the rule: The previous
# non-empty line has the parameters of a function header that are indented
# 4 spaces (because they did not fit in a 80 column line when placed on
# the same line as the function name). We also check for the case where
# the previous line is indented 6 spaces, which may happen when the
# initializers of a constructor do not fit into a 80 column line.
exception = False
if Match(r' {6}\w', prev_line): # Initializer list?
# We are looking for the opening column of initializer list, which
# should be indented 4 spaces to cause 6 space indentation afterwards.
search_position = linenum-2
while (search_position >= 0
and Match(r' {6}\w', elided[search_position])):
search_position -= 1
exception = (search_position >= 0
and elided[search_position][:5] == ' :')
else:
# Search for the function arguments or an initializer list. We use a
# simple heuristic here: If the line is indented 4 spaces; and we have a
# closing paren, without the opening paren, followed by an opening brace
# or colon (for initializer lists) we assume that it is the last line of
# a function header. If we have a colon indented 4 spaces, it is an
# initializer list.
exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
prev_line)
or Match(r' {4}:', prev_line))
if not exception:
error(filename, linenum, 'whitespace/blank_line', 2,
'Redundant blank line at the start of a code block '
'should be deleted.')
# Ignore blank lines at the end of a block in a long if-else
# chain, like this:
# if (condition1) {
# // Something followed by a blank line
#
# } else if (condition2) {
# // Something else
# }
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
if (next_line
and Match(r'\s*}', next_line)
and next_line.find('} else ') == -1):
error(filename, linenum, 'whitespace/blank_line', 3,
'Redundant blank line at the end of a code block '
'should be deleted.')
matched = Match(r'\s*(public|protected|private):', prev_line)
if matched:
error(filename, linenum, 'whitespace/blank_line', 3,
'Do not leave a blank line after "%s:"' % matched.group(1))
# Next, check comments
next_line_start = 0
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
next_line_start = len(next_line) - len(next_line.lstrip())
CheckComment(line, filename, linenum, next_line_start, error)
# get rid of comments and strings
line = clean_lines.elided[linenum]
# You shouldn't have spaces before your brackets, except for C++11 attributes
# or maybe after 'delete []', 'return []() {};', or 'auto [abc, ...] = ...;'.
if (Search(r'\w\s+\[(?!\[)', line) and
not Search(r'(?:auto&?|delete|return)\s+\[', line)):
error(filename, linenum, 'whitespace/braces', 5,
'Extra space before [')
# In range-based for, we wanted spaces before and after the colon, but
# not around "::" tokens that might appear.
if (Search(r'for *\(.*[^:]:[^: ]', line) or
Search(r'for *\(.*[^: ]:[^:]', line)):
error(filename, linenum, 'whitespace/forcolon', 2,
'Missing space around colon in range-based for loop')
def CheckOperatorSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing around operators.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Don't try to do spacing checks for operator methods. Do this by
# replacing the troublesome characters with something else,
# preserving column position for all other characters.
#
# The replacement is done repeatedly to avoid false positives from
# operators that call operators.
while True:
match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line)
if match:
line = match.group(1) + ('_' * len(match.group(2))) + match.group(3)
else:
break
# We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
# Otherwise not. Note we only check for non-spaces on *both* sides;
# sometimes people put non-spaces on one side when aligning ='s among
# many lines (not that this is behavior that I approve of...)
if ((Search(r'[\w.]=', line) or
Search(r'=[\w.]', line))
and not Search(r'\b(if|while|for) ', line)
# Operators taken from [lex.operators] in C++11 standard.
and not Search(r'(>=|<=|==|!=|&=|\^=|\|=|\+=|\*=|\/=|\%=)', line)
and not Search(r'operator=', line)):
error(filename, linenum, 'whitespace/operators', 4,
'Missing spaces around =')
# It's ok not to have spaces around binary operators like + - * /, but if
# there's too little whitespace, we get concerned. It's hard to tell,
# though, so we punt on this one for now. TODO.
# You should always have whitespace around binary operators.
#
# Check <= and >= first to avoid false positives with < and >, then
# check non-include lines for spacing around < and >.
#
# If the operator is followed by a comma, assume it's be used in a
# macro context and don't do any checks. This avoids false
# positives.
#
# Note that && is not included here. This is because there are too
# many false positives due to RValue references.
match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around %s' % match.group(1))
elif not Match(r'#.*include', line):
# Look for < that is not surrounded by spaces. This is only
# triggered if both sides are missing spaces, even though
# technically should should flag if at least one side is missing a
# space. This is done to avoid some false positives with shifts.
match = Match(r'^(.*[^\s<])<[^\s=<,]', line)
if match:
(_, _, end_pos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if end_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <')
# Look for > that is not surrounded by spaces. Similar to the
# above, we only trigger if both sides are missing spaces to avoid
# false positives with shifts.
match = Match(r'^(.*[^-\s>])>[^\s=>,]', line)
if match:
(_, _, start_pos) = ReverseCloseExpression(
clean_lines, linenum, len(match.group(1)))
if start_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >')
# We allow no-spaces around << when used like this: 10<<20, but
# not otherwise (particularly, not when used as streams)
#
# We also allow operators following an opening parenthesis, since
# those tend to be macros that deal with operators.
match = Search(r'(operator|[^\s(<])(?:L|UL|LL|ULL|l|ul|ll|ull)?<<([^\s,=<])', line)
if (match and not (match.group(1).isdigit() and match.group(2).isdigit()) and
not (match.group(1) == 'operator' and match.group(2) == ';')):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <<')
# We allow no-spaces around >> for almost anything. This is because
# C++11 allows ">>" to close nested templates, which accounts for
# most cases when ">>" is not followed by a space.
#
# We still warn on ">>" followed by alpha character, because that is
# likely due to ">>" being used for right shifts, e.g.:
# value >> alpha
#
# When ">>" is used to close templates, the alphanumeric letter that
# follows would be part of an identifier, and there should still be
# a space separating the template type and the identifier.
# type<type<type>> alpha
match = Search(r'>>[a-zA-Z_]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >>')
# There shouldn't be space around unary operators
match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
if match:
error(filename, linenum, 'whitespace/operators', 4,
'Extra space for operator %s' % match.group(1))
def CheckParenthesisSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing around parentheses.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# No spaces after an if, while, switch, or for
match = Search(r' (if\(|for\(|while\(|switch\()', line)
if match:
error(filename, linenum, 'whitespace/parens', 5,
'Missing space before ( in %s' % match.group(1))
# For if/for/while/switch, the left and right parens should be
# consistent about how many spaces are inside the parens, and
# there should either be zero or one spaces inside the parens.
# We don't want: "if ( foo)" or "if ( foo )".
# Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
match = Search(r'\b(if|for|while|switch)\s*'
r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
line)
if match:
if len(match.group(2)) != len(match.group(4)):
if not (match.group(3) == ';' and
len(match.group(2)) == 1 + len(match.group(4)) or
not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
error(filename, linenum, 'whitespace/parens', 5,
'Mismatching spaces inside () in %s' % match.group(1))
if len(match.group(2)) not in [0, 1]:
error(filename, linenum, 'whitespace/parens', 5,
'Should have zero or one spaces inside ( and ) in %s' %
match.group(1))
def CheckCommaSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing near commas and semicolons.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
raw = clean_lines.lines_without_raw_strings
line = clean_lines.elided[linenum]
# You should always have a space after a comma (either as fn arg or operator)
#
# This does not apply when the non-space character following the
# comma is another comma, since the only time when that happens is
# for empty macro arguments.
#
# We run this check in two passes: first pass on elided lines to
# verify that lines contain missing whitespaces, second pass on raw
# lines to confirm that those missing whitespaces are not due to
# elided comments.
if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line)) and
Search(r',[^,\s]', raw[linenum])):
error(filename, linenum, 'whitespace/comma', 3,
'Missing space after ,')
# You should always have a space after a semicolon
# except for few corner cases
# TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
# space after ;
if Search(r';[^\s};\\)/]', line):
error(filename, linenum, 'whitespace/semicolon', 3,
'Missing space after ;')
def _IsType(clean_lines, nesting_state, expr):
"""Check if expression looks like a type name, returns true if so.
Args:
clean_lines: A CleansedLines instance containing the file.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
expr: The expression to check.
Returns:
True, if token looks like a type.
"""
# Keep only the last token in the expression
last_word = Match(r'^.*(\b\S+)$', expr)
if last_word:
token = last_word.group(1)
else:
token = expr
# Match native types and stdint types
if _TYPES.match(token):
return True
# Try a bit harder to match templated types. Walk up the nesting
# stack until we find something that resembles a typename
# declaration for what we are looking for.
typename_pattern = (r'\b(?:typename|class|struct)\s+' + re.escape(token) +
r'\b')
block_index = len(nesting_state.stack) - 1
while block_index >= 0:
if isinstance(nesting_state.stack[block_index], _NamespaceInfo):
return False
# Found where the opening brace is. We want to scan from this
# line up to the beginning of the function, minus a few lines.
# template <typename Type1, // stop scanning here
# ...>
# class C
# : public ... { // start scanning here
last_line = nesting_state.stack[block_index].starting_linenum
next_block_start = 0
if block_index > 0:
next_block_start = nesting_state.stack[block_index - 1].starting_linenum
first_line = last_line
while first_line >= next_block_start:
if clean_lines.elided[first_line].find('template') >= 0:
break
first_line -= 1
if first_line < next_block_start:
# Didn't find any "template" keyword before reaching the next block,
# there are probably no template things to check for this block
block_index -= 1
continue
# Look for typename in the specified range
for i in xrange(first_line, last_line + 1, 1):
if Search(typename_pattern, clean_lines.elided[i]):
return True
block_index -= 1
return False
def CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error):
"""Checks for horizontal spacing near commas.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Except after an opening paren, or after another opening brace (in case of
# an initializer list, for instance), you should have spaces before your
# braces when they are delimiting blocks, classes, namespaces etc.
# And since you should never have braces at the beginning of a line,
# this is an easy test. Except that braces used for initialization don't
# follow the same rule; we often don't want spaces before those.
match = Match(r'^(.*[^ ({>]){', line)
if match:
# Try a bit harder to check for brace initialization. This
# happens in one of the following forms:
# Constructor() : initializer_list_{} { ... }
# Constructor{}.MemberFunction()
# Type variable{};
# FunctionCall(type{}, ...);
# LastArgument(..., type{});
# LOG(INFO) << type{} << " ...";
# map_of_type[{...}] = ...;
# ternary = expr ? new type{} : nullptr;
# OuterTemplate<InnerTemplateConstructor<Type>{}>
#
# We check for the character following the closing brace, and
# silence the warning if it's one of those listed above, i.e.
# "{.;,)<>]:".
#
# To account for nested initializer list, we allow any number of
# closing braces up to "{;,)<". We can't simply silence the
# warning on first sight of closing brace, because that would
# cause false negatives for things that are not initializer lists.
# Silence this: But not this:
# Outer{ if (...) {
# Inner{...} if (...){ // Missing space before {
# }; }
#
# There is a false negative with this approach if people inserted
# spurious semicolons, e.g. "if (cond){};", but we will catch the
# spurious semicolon with a separate check.
leading_text = match.group(1)
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
trailing_text = ''
if endpos > -1:
trailing_text = endline[endpos:]
for offset in xrange(endlinenum + 1,
min(endlinenum + 3, clean_lines.NumLines() - 1)):
trailing_text += clean_lines.elided[offset]
# We also suppress warnings for `uint64_t{expression}` etc., as the style
# guide recommends brace initialization for integral types to avoid
# overflow/truncation.
if (not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text)
and not _IsType(clean_lines, nesting_state, leading_text)):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before {')
# Make sure '} else {' has spaces.
if Search(r'}else', line):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before else')
# You shouldn't have a space before a semicolon at the end of the line.
# There's a special case for "for" since the style guide allows space before
# the semicolon there.
if Search(r':\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Semicolon defining empty statement. Use {} instead.')
elif Search(r'^\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Line contains only semicolon. If this should be an empty statement, '
'use {} instead.')
elif (Search(r'\s+;\s*$', line) and
not Search(r'\bfor\b', line)):
error(filename, linenum, 'whitespace/semicolon', 5,
'Extra space before last semicolon. If this should be an empty '
'statement, use {} instead.')
def IsDecltype(clean_lines, linenum, column):
"""Check if the token ending on (linenum, column) is decltype().
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: the number of the line to check.
column: end column of the token to check.
Returns:
True if this token is decltype() expression, False otherwise.
"""
(text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column)
if start_col < 0:
return False
if Search(r'\bdecltype\s*$', text[0:start_col]):
return True
return False
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
"""Checks for additional blank line issues related to sections.
Currently the only thing checked here is blank line before protected/private.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
class_info: A _ClassInfo objects.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Skip checks if the class is small, where small means 25 lines or less.
# 25 lines seems like a good cutoff since that's the usual height of
# terminals, and any class that can't fit in one screen can't really
# be considered "small".
#
# Also skip checks if we are on the first line. This accounts for
# classes that look like
# class Foo { public: ... };
#
# If we didn't find the end of the class, last_line would be zero,
# and the check will be skipped by the first condition.
if (class_info.last_line - class_info.starting_linenum <= 24 or
linenum <= class_info.starting_linenum):
return
matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
if matched:
# Issue warning if the line before public/protected/private was
# not a blank line, but don't do this if the previous line contains
# "class" or "struct". This can happen two ways:
# - We are at the beginning of the class.
# - We are forward-declaring an inner class that is semantically
# private, but needed to be public for implementation reasons.
# Also ignores cases where the previous line ends with a backslash as can be
# common when defining classes in C macros.
prev_line = clean_lines.lines[linenum - 1]
if (not IsBlankLine(prev_line) and
not Search(r'\b(class|struct)\b', prev_line) and
not Search(r'\\$', prev_line)):
# Try a bit harder to find the beginning of the class. This is to
# account for multi-line base-specifier lists, e.g.:
# class Derived
# : public Base {
end_class_head = class_info.starting_linenum
for i in range(class_info.starting_linenum, linenum):
if Search(r'\{\s*$', clean_lines.lines[i]):
end_class_head = i
break
if end_class_head < linenum - 1:
error(filename, linenum, 'whitespace/blank_line', 3,
'"%s:" should be preceded by a blank line' % matched.group(1))
def GetPreviousNonBlankLine(clean_lines, linenum):
"""Return the most recent non-blank line and its line number.
Args:
clean_lines: A CleansedLines instance containing the file contents.
linenum: The number of the line to check.
Returns:
A tuple with two elements. The first element is the contents of the last
non-blank line before the current line, or the empty string if this is the
first non-blank line. The second is the line number of that line, or -1
if this is the first non-blank line.
"""
prevlinenum = linenum - 1
while prevlinenum >= 0:
prevline = clean_lines.elided[prevlinenum]
if not IsBlankLine(prevline): # if not a blank line...
return (prevline, prevlinenum)
prevlinenum -= 1
return ('', -1)
def CheckBraces(filename, clean_lines, linenum, error):
"""Looks for misplaced braces (e.g. at the end of line).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
if Match(r'\s*{\s*$', line):
# We allow an open brace to start a line in the case where someone is using
# braces in a block to explicitly create a new scope, which is commonly used
# to control the lifetime of stack-allocated variables. Braces are also
# used for brace initializers inside function calls. We don't detect this
# perfectly: we just don't complain if the last non-whitespace character on
# the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
# previous line starts a preprocessor block. We also allow a brace on the
# following line if it is part of an array initialization and would not fit
# within the 80 character limit of the preceding line.
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if (not Search(r'[,;:}{(]\s*$', prevline) and
not Match(r'\s*#', prevline) and
not (GetLineWidth(prevline) > _line_length - 2 and '[]' in prevline)):
error(filename, linenum, 'whitespace/braces', 4,
'{ should almost always be at the end of the previous line')
# An else clause should be on the same line as the preceding closing brace.
if Match(r'\s*else\b\s*(?:if\b|\{|$)', line):
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if Match(r'\s*}\s*$', prevline):
error(filename, linenum, 'whitespace/newline', 4,
'An else should appear on the same line as the preceding }')
# If braces come on one side of an else, they should be on both.
# However, we have to worry about "else if" that spans multiple lines!
if Search(r'else if\s*\(', line): # could be multi-line if
brace_on_left = bool(Search(r'}\s*else if\s*\(', line))
# find the ( after the if
pos = line.find('else if')
pos = line.find('(', pos)
if pos > 0:
(endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
brace_on_right = endline[endpos:].find('{') != -1
if brace_on_left != brace_on_right: # must be brace after if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
# Likewise, an else should never have the else clause on the same line
if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
error(filename, linenum, 'whitespace/newline', 4,
'Else clause should never be on same line as else (use 2 lines)')
# In the same way, a do/while should never be on one line
if Match(r'\s*do [^\s{]', line):
error(filename, linenum, 'whitespace/newline', 4,
'do/while clauses should not be on a single line')
# Check single-line if/else bodies. The style guide says 'curly braces are not
# required for single-line statements'. We additionally allow multi-line,
# single statements, but we reject anything with more than one semicolon in
# it. This means that the first semicolon after the if should be at the end of
# its line, and the line after that should have an indent level equal to or
# lower than the if. We also check for ambiguous if/else nesting without
# braces.
if_else_match = Search(r'\b(if\s*(|constexpr)\s*\(|else\b)', line)
if if_else_match and not Match(r'\s*#', line):
if_indent = GetIndentLevel(line)
endline, endlinenum, endpos = line, linenum, if_else_match.end()
if_match = Search(r'\bif\s*(|constexpr)\s*\(', line)
if if_match:
# This could be a multiline if condition, so find the end first.
pos = if_match.end() - 1
(endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos)
# Check for an opening brace, either directly after the if or on the next
# line. If found, this isn't a single-statement conditional.
if (not Match(r'\s*{', endline[endpos:])
and not (Match(r'\s*$', endline[endpos:])
and endlinenum < (len(clean_lines.elided) - 1)
and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))):
while (endlinenum < len(clean_lines.elided)
and ';' not in clean_lines.elided[endlinenum][endpos:]):
endlinenum += 1
endpos = 0
if endlinenum < len(clean_lines.elided):
endline = clean_lines.elided[endlinenum]
# We allow a mix of whitespace and closing braces (e.g. for one-liner
# methods) and a single \ after the semicolon (for macros)
endpos = endline.find(';')
if not Match(r';[\s}]*(\\?)$', endline[endpos:]):
# Semicolon isn't the last character, there's something trailing.
# Output a warning if the semicolon is not contained inside
# a lambda expression.
if not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$',
endline):
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces')
elif endlinenum < len(clean_lines.elided) - 1:
# Make sure the next line is dedented
next_line = clean_lines.elided[endlinenum + 1]
next_indent = GetIndentLevel(next_line)
# With ambiguous nested if statements, this will error out on the
# if that *doesn't* match the else, regardless of whether it's the
# inner one or outer one.
if (if_match and Match(r'\s*else\b', next_line)
and next_indent != if_indent):
error(filename, linenum, 'readability/braces', 4,
'Else clause should be indented at the same level as if. '
'Ambiguous nested if/else chains require braces.')
elif next_indent > if_indent:
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces')
def CheckTrailingSemicolon(filename, clean_lines, linenum, error):
"""Looks for redundant trailing semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Block bodies should not be followed by a semicolon. Due to C++11
# brace initialization, there are more places where semicolons are
# required than not, so we explicitly list the allowed rules rather
# than listing the disallowed ones. These are the places where "};"
# should be replaced by just "}":
# 1. Some flavor of block following closing parenthesis:
# for (;;) {};
# while (...) {};
# switch (...) {};
# Function(...) {};
# if (...) {};
# if (...) else if (...) {};
#
# 2. else block:
# if (...) else {};
#
# 3. const member function:
# Function(...) const {};
#
# 4. Block following some statement:
# x = 42;
# {};
#
# 5. Block at the beginning of a function:
# Function(...) {
# {};
# }
#
# Note that naively checking for the preceding "{" will also match
# braces inside multi-dimensional arrays, but this is fine since
# that expression will not contain semicolons.
#
# 6. Block following another block:
# while (true) {}
# {};
#
# 7. End of namespaces:
# namespace {};
#
# These semicolons seems far more common than other kinds of
# redundant semicolons, possibly due to people converting classes
# to namespaces. For now we do not warn for this case.
#
# Try matching case 1 first.
match = Match(r'^(.*\)\s*)\{', line)
if match:
# Matched closing parenthesis (case 1). Check the token before the
# matching opening parenthesis, and don't warn if it looks like a
# macro. This avoids these false positives:
# - macro that defines a base class
# - multi-line macro that defines a base class
# - macro that defines the whole class-head
#
# But we still issue warnings for macros that we know are safe to
# warn, specifically:
# - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
# - TYPED_TEST
# - INTERFACE_DEF
# - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
#
# We implement a list of safe macros instead of a list of
# unsafe macros, even though the latter appears less frequently in
# google code and would have been easier to implement. This is because
# the downside for getting the allowed checks wrong means some extra
# semicolons, while the downside for getting disallowed checks wrong
# would result in compile errors.
#
# In addition to macros, we also don't want to warn on
# - Compound literals
# - Lambdas
# - alignas specifier with anonymous structs
# - decltype
closing_brace_pos = match.group(1).rfind(')')
opening_parenthesis = ReverseCloseExpression(
clean_lines, linenum, closing_brace_pos)
if opening_parenthesis[2] > -1:
line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
macro = Search(r'\b([A-Z_][A-Z0-9_]*)\s*$', line_prefix)
func = Match(r'^(.*\])\s*$', line_prefix)
if ((macro and
macro.group(1) not in (
'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
(func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or
Search(r'\b(?:struct|union)\s+alignas\s*$', line_prefix) or
Search(r'\bdecltype$', line_prefix) or
Search(r'\s+=\s*$', line_prefix)):
match = None
if (match and
opening_parenthesis[1] > 1 and
Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])):
# Multi-line lambda-expression
match = None
else:
# Try matching cases 2-3.
match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
if not match:
# Try matching cases 4-6. These are always matched on separate lines.
#
# Note that we can't simply concatenate the previous line to the
# current line and do a single match, otherwise we may output
# duplicate warnings for the blank line case:
# if (cond) {
# // blank line
# }
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if prevline and Search(r'[;{}]\s*$', prevline):
match = Match(r'^(\s*)\{', line)
# Check matching closing brace
if match:
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
# Current {} pair is eligible for semicolon check, and we have found
# the redundant semicolon, output warning here.
#
# Note: because we are scanning forward for opening braces, and
# outputting warnings for the matching closing brace, if there are
# nested blocks with trailing semicolons, we will get the error
# messages in reversed order.
# We need to check the line forward for NOLINT
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[endlinenum-1], endlinenum-1,
error)
ParseNolintSuppressions(filename, raw_lines[endlinenum], endlinenum,
error)
error(filename, endlinenum, 'readability/braces', 4,
"You don't need a ; after a }")
def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
"""Look for empty loop/conditional body with only a single semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Search for loop keywords at the beginning of the line. Because only
# whitespaces are allowed before the keywords, this will also ignore most
# do-while-loops, since those lines should start with closing brace.
#
# We also check "if" blocks here, since an empty conditional block
# is likely an error.
line = clean_lines.elided[linenum]
matched = Match(r'\s*(for|while|if)\s*\(', line)
if matched:
# Find the end of the conditional expression.
(end_line, end_linenum, end_pos) = CloseExpression(
clean_lines, linenum, line.find('('))
# Output warning if what follows the condition expression is a semicolon.
# No warning for all other cases, including whitespace or newline, since we
# have a separate check for semicolons preceded by whitespace.
if end_pos >= 0 and Match(r';', end_line[end_pos:]):
if matched.group(1) == 'if':
error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
'Empty conditional bodies should use {}')
else:
error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
'Empty loop bodies should use {} or continue')
# Check for if statements that have completely empty bodies (no comments)
# and no else clauses.
if end_pos >= 0 and matched.group(1) == 'if':
# Find the position of the opening { for the if statement.
# Return without logging an error if it has no brackets.
opening_linenum = end_linenum
opening_line_fragment = end_line[end_pos:]
# Loop until EOF or find anything that's not whitespace or opening {.
while not Search(r'^\s*\{', opening_line_fragment):
if Search(r'^(?!\s*$)', opening_line_fragment):
# Conditional has no brackets.
return
opening_linenum += 1
if opening_linenum == len(clean_lines.elided):
# Couldn't find conditional's opening { or any code before EOF.
return
opening_line_fragment = clean_lines.elided[opening_linenum]
# Set opening_line (opening_line_fragment may not be entire opening line).
opening_line = clean_lines.elided[opening_linenum]
# Find the position of the closing }.
opening_pos = opening_line_fragment.find('{')
if opening_linenum == end_linenum:
# We need to make opening_pos relative to the start of the entire line.
opening_pos += end_pos
(closing_line, closing_linenum, closing_pos) = CloseExpression(
clean_lines, opening_linenum, opening_pos)
if closing_pos < 0:
return
# Now construct the body of the conditional. This consists of the portion
# of the opening line after the {, all lines until the closing line,
# and the portion of the closing line before the }.
if (clean_lines.raw_lines[opening_linenum] !=
CleanseComments(clean_lines.raw_lines[opening_linenum])):
# Opening line ends with a comment, so conditional isn't empty.
return
if closing_linenum > opening_linenum:
# Opening line after the {. Ignore comments here since we checked above.
bodylist = list(opening_line[opening_pos+1:])
# All lines until closing line, excluding closing line, with comments.
bodylist.extend(clean_lines.raw_lines[opening_linenum+1:closing_linenum])
# Closing line before the }. Won't (and can't) have comments.
bodylist.append(clean_lines.elided[closing_linenum][:closing_pos-1])
body = '\n'.join(bodylist)
else:
# If statement has brackets and fits on a single line.
body = opening_line[opening_pos+1:closing_pos-1]
# Check if the body is empty
if not _EMPTY_CONDITIONAL_BODY_PATTERN.search(body):
return
# The body is empty. Now make sure there's not an else clause.
current_linenum = closing_linenum
current_line_fragment = closing_line[closing_pos:]
# Loop until EOF or find anything that's not whitespace or else clause.
while Search(r'^\s*$|^(?=\s*else)', current_line_fragment):
if Search(r'^(?=\s*else)', current_line_fragment):
# Found an else clause, so don't log an error.
return
current_linenum += 1
if current_linenum == len(clean_lines.elided):
break
current_line_fragment = clean_lines.elided[current_linenum]
# The body is empty and there's no else clause until EOF or other code.
error(filename, end_linenum, 'whitespace/empty_if_body', 4,
('If statement had no body and no else clause'))
def FindCheckMacro(line):
"""Find a replaceable CHECK-like macro.
Args:
line: line to search on.
Returns:
(macro name, start position), or (None, -1) if no replaceable
macro is found.
"""
for macro in _CHECK_MACROS:
i = line.find(macro)
if i >= 0:
# Find opening parenthesis. Do a regular expression match here
# to make sure that we are matching the expected CHECK macro, as
# opposed to some other macro that happens to contain the CHECK
# substring.
matched = Match(r'^(.*\b' + macro + r'\s*)\(', line)
if not matched:
continue
return (macro, len(matched.group(1)))
return (None, -1)
def CheckCheck(filename, clean_lines, linenum, error):
"""Checks the use of CHECK and EXPECT macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Decide the set of replacement macros that should be suggested
lines = clean_lines.elided
(check_macro, start_pos) = FindCheckMacro(lines[linenum])
if not check_macro:
return
# Find end of the boolean expression by matching parentheses
(last_line, end_line, end_pos) = CloseExpression(
clean_lines, linenum, start_pos)
if end_pos < 0:
return
# If the check macro is followed by something other than a
# semicolon, assume users will log their own custom error messages
# and don't suggest any replacements.
if not Match(r'\s*;', last_line[end_pos:]):
return
if linenum == end_line:
expression = lines[linenum][start_pos + 1:end_pos - 1]
else:
expression = lines[linenum][start_pos + 1:]
for i in xrange(linenum + 1, end_line):
expression += lines[i]
expression += last_line[0:end_pos - 1]
# Parse expression so that we can take parentheses into account.
# This avoids false positives for inputs like "CHECK((a < 4) == b)",
# which is not replaceable by CHECK_LE.
lhs = ''
rhs = ''
operator = None
while expression:
matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
r'==|!=|>=|>|<=|<|\()(.*)$', expression)
if matched:
token = matched.group(1)
if token == '(':
# Parenthesized operand
expression = matched.group(2)
(end, _) = FindEndOfExpressionInLine(expression, 0, ['('])
if end < 0:
return # Unmatched parenthesis
lhs += '(' + expression[0:end]
expression = expression[end:]
elif token in ('&&', '||'):
# Logical and/or operators. This means the expression
# contains more than one term, for example:
# CHECK(42 < a && a < b);
#
# These are not replaceable with CHECK_LE, so bail out early.
return
elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
# Non-relational operator
lhs += token
expression = matched.group(2)
else:
# Relational operator
operator = token
rhs = matched.group(2)
break
else:
# Unparenthesized operand. Instead of appending to lhs one character
# at a time, we do another regular expression match to consume several
# characters at once if possible. Trivial benchmark shows that this
# is more efficient when the operands are longer than a single
# character, which is generally the case.
matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
if not matched:
matched = Match(r'^(\s*\S)(.*)$', expression)
if not matched:
break
lhs += matched.group(1)
expression = matched.group(2)
# Only apply checks if we got all parts of the boolean expression
if not (lhs and operator and rhs):
return
# Check that rhs do not contain logical operators. We already know
# that lhs is fine since the loop above parses out && and ||.
if rhs.find('&&') > -1 or rhs.find('||') > -1:
return
# At least one of the operands must be a constant literal. This is
# to avoid suggesting replacements for unprintable things like
# CHECK(variable != iterator)
#
# The following pattern matches decimal, hex integers, strings, and
# characters (in that order).
lhs = lhs.strip()
rhs = rhs.strip()
match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
if Match(match_constant, lhs) or Match(match_constant, rhs):
# Note: since we know both lhs and rhs, we can provide a more
# descriptive error message like:
# Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
# Instead of:
# Consider using CHECK_EQ instead of CHECK(a == b)
#
# We are still keeping the less descriptive message because if lhs
# or rhs gets long, the error message might become unreadable.
error(filename, linenum, 'readability/check', 2,
'Consider using %s instead of %s(a %s b)' % (
_CHECK_REPLACEMENT[check_macro][operator],
check_macro, operator))
def CheckAltTokens(filename, clean_lines, linenum, error):
"""Check alternative keywords being used in boolean expressions.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Avoid preprocessor lines
if Match(r'^\s*#', line):
return
# Last ditch effort to avoid multi-line comments. This will not help
# if the comment started before the current line or ended after the
# current line, but it catches most of the false positives. At least,
# it provides a way to workaround this warning for people who use
# multi-line comments in preprocessor macros.
#
# TODO(unknown): remove this once cpplint has better support for
# multi-line comments.
if line.find('/*') >= 0 or line.find('*/') >= 0:
return
for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
error(filename, linenum, 'readability/alt_tokens', 2,
'Use operator %s instead of %s' % (
_ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
def GetLineWidth(line):
"""Determines the width of the line in column positions.
Args:
line: A string, which may be a Unicode string.
Returns:
The width of the line in column positions, accounting for Unicode
combining characters and wide characters.
"""
if isinstance(line, unicode):
width = 0
for uc in unicodedata.normalize('NFC', line):
if unicodedata.east_asian_width(uc) in ('W', 'F'):
width += 2
elif not unicodedata.combining(uc):
# Issue 337
# https://mail.python.org/pipermail/python-list/2012-August/628809.html
if (sys.version_info.major, sys.version_info.minor) <= (3, 2):
# https://github.com/python/cpython/blob/2.7/Include/unicodeobject.h#L81
is_wide_build = sysconfig.get_config_var("Py_UNICODE_SIZE") >= 4
# https://github.com/python/cpython/blob/2.7/Objects/unicodeobject.c#L564
is_low_surrogate = 0xDC00 <= ord(uc) <= 0xDFFF
if not is_wide_build and is_low_surrogate:
width -= 1
width += 1
return width
else:
return len(line)
def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
error):
"""Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 2-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw_lines = clean_lines.lines_without_raw_strings
line = raw_lines[linenum]
prev = raw_lines[linenum - 1] if linenum > 0 else ''
if line.find('\t') != -1:
error(filename, linenum, 'whitespace/tab', 1,
'Tab found; better to use spaces')
# One or three blank spaces at the beginning of the line is weird; it's
# hard to reconcile that with 2-space indents.
# NOTE: here are the conditions rob pike used for his tests. Mine aren't
# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
# if(RLENGTH > 20) complain = 0;
# if(match($0, " +(error|private|public|protected):")) complain = 0;
# if(match(prev, "&& *$")) complain = 0;
# if(match(prev, "\\|\\| *$")) complain = 0;
# if(match(prev, "[\",=><] *$")) complain = 0;
# if(match($0, " <<")) complain = 0;
# if(match(prev, " +for \\(")) complain = 0;
# if(prevodd && match(prevprev, " +for \\(")) complain = 0;
scope_or_label_pattern = r'\s*(?:public|private|protected|signals)(?:\s+(?:slots\s*)?)?:\s*\\?$'
classinfo = nesting_state.InnermostClass()
initial_spaces = 0
cleansed_line = clean_lines.elided[linenum]
while initial_spaces < len(line) and line[initial_spaces] == ' ':
initial_spaces += 1
# There are certain situations we allow one space, notably for
# section labels, and also lines containing multi-line raw strings.
# We also don't check for lines that look like continuation lines
# (of lines ending in double quotes, commas, equals, or angle brackets)
# because the rules for how to indent those are non-trivial.
if (not Search(r'[",=><] *$', prev) and
(initial_spaces == 1 or initial_spaces == 3) and
not Match(scope_or_label_pattern, cleansed_line) and
not (clean_lines.raw_lines[linenum] != line and
Match(r'^\s*""', line))):
error(filename, linenum, 'whitespace/indent', 3,
'Weird number of spaces at line-start. '
'Are you using a 2-space indent?')
if line and line[-1].isspace():
error(filename, linenum, 'whitespace/end_of_line', 4,
'Line ends in whitespace. Consider deleting these extra spaces.')
# Check if the line is a header guard.
is_header_guard = False
if IsHeaderExtension(file_extension):
cppvar = GetHeaderGuardCPPVariable(filename)
if (line.startswith('#ifndef %s' % cppvar) or
line.startswith('#define %s' % cppvar) or
line.startswith('#endif // %s' % cppvar)):
is_header_guard = True
# #include lines and header guards can be long, since there's no clean way to
# split them.
#
# URLs can be long too. It's possible to split these, but it makes them
# harder to cut&paste.
#
# The "$Id:...$" comment may also get very long without it being the
# developers fault.
#
# Doxygen documentation copying can get pretty long when using an overloaded
# function declaration
if (not line.startswith('#include') and not is_header_guard and
not Match(r'^\s*//.*http(s?)://\S*$', line) and
not Match(r'^\s*//\s*[^\s]*$', line) and
not Match(r'^// \$Id:.*#[0-9]+ \$$', line) and
not Match(r'^\s*/// [@\\](copydoc|copydetails|copybrief) .*$', line)):
line_width = GetLineWidth(line)
if line_width > _line_length:
error(filename, linenum, 'whitespace/line_length', 2,
'Lines should be <= %i characters long' % _line_length)
if (cleansed_line.count(';') > 1 and
# allow simple single line lambdas
not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}\n\r]*\}',
line) and
# for loops are allowed two ;'s (and may run over two lines).
cleansed_line.find('for') == -1 and
(GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
# It's ok to have many commands in a switch case that fits in 1 line
not ((cleansed_line.find('case ') != -1 or
cleansed_line.find('default:') != -1) and
cleansed_line.find('break;') != -1)):
error(filename, linenum, 'whitespace/newline', 0,
'More than one command on the same line')
# Some more style checks
CheckBraces(filename, clean_lines, linenum, error)
CheckTrailingSemicolon(filename, clean_lines, linenum, error)
CheckEmptyBlockBody(filename, clean_lines, linenum, error)
CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
CheckOperatorSpacing(filename, clean_lines, linenum, error)
CheckParenthesisSpacing(filename, clean_lines, linenum, error)
CheckCommaSpacing(filename, clean_lines, linenum, error)
CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error)
CheckSpacingForFunctionCall(filename, clean_lines, linenum, error)
CheckCheck(filename, clean_lines, linenum, error)
CheckAltTokens(filename, clean_lines, linenum, error)
classinfo = nesting_state.InnermostClass()
if classinfo:
CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
# Matches the first component of a filename delimited by -s and _s. That is:
# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
def _DropCommonSuffixes(filename):
"""Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/bar/foo.cc')
'foo/bar/foo'
>>> _DropCommonSuffixes('foo/foo_internal.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed.
"""
for suffix in itertools.chain(
('%s.%s' % (test_suffix.lstrip('_'), ext)
for test_suffix, ext in itertools.product(_test_suffixes, GetNonHeaderExtensions())),
('%s.%s' % (suffix, ext)
for suffix, ext in itertools.product(['inl', 'imp', 'internal'], GetHeaderExtensions()))):
if (filename.endswith(suffix) and len(filename) > len(suffix) and
filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0]
def _ClassifyInclude(fileinfo, include, used_angle_brackets, include_order="default"):
"""Figures out what kind of header 'include' is.
Args:
fileinfo: The current file cpplint is running over. A FileInfo instance.
include: The path to a #included file.
used_angle_brackets: True if the #include used <> rather than "".
include_order: "default" or other value allowed in program arguments
Returns:
One of the _XXX_HEADER constants.
For example:
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
_C_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
_CPP_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', True, "standardcfirst")
_OTHER_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
_LIKELY_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
... 'bar/foo_other_ext.h', False)
_POSSIBLE_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
_OTHER_HEADER
"""
# This is a list of all standard c++ header files, except
# those already checked for above.
is_cpp_header = include in _CPP_HEADERS
# Mark include as C header if in list or in a known folder for standard-ish C headers.
is_std_c_header = (include_order == "default") or (include in _C_HEADERS
# additional linux glibc header folders
or Search(r'(?:%s)\/.*\.h' % "|".join(C_STANDARD_HEADER_FOLDERS), include))
# Headers with C++ extensions shouldn't be considered C system headers
is_system = used_angle_brackets and not os.path.splitext(include)[1] in ['.hpp', '.hxx', '.h++']
if is_system:
if is_cpp_header:
return _CPP_SYS_HEADER
if is_std_c_header:
return _C_SYS_HEADER
else:
return _OTHER_SYS_HEADER
# If the target file and the include we're checking share a
# basename when we drop common extensions, and the include
# lives in . , then it's likely to be owned by the target file.
target_dir, target_base = (
os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
target_dir_pub = os.path.normpath(target_dir + '/../public')
target_dir_pub = target_dir_pub.replace('\\', '/')
if target_base == include_base and (
include_dir == target_dir or
include_dir == target_dir_pub):
return _LIKELY_MY_HEADER
# If the target and include share some initial basename
# component, it's possible the target is implementing the
# include, so it's allowed to be first, but we'll never
# complain if it's not there.
target_first_component = _RE_FIRST_COMPONENT.match(target_base)
include_first_component = _RE_FIRST_COMPONENT.match(include_base)
if (target_first_component and include_first_component and
target_first_component.group(0) ==
include_first_component.group(0)):
return _POSSIBLE_MY_HEADER
return _OTHER_HEADER
def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
"""Check rules that are applicable to #include lines.
Strings on #include lines are NOT removed from elided line, to make
certain tasks easier. However, to prevent false positives, checks
applicable to #include lines in CheckLanguage must be put here.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
fileinfo = FileInfo(filename)
line = clean_lines.lines[linenum]
# "include" should use the new style "foo/bar.h" instead of just "bar.h"
# Only do this check if the included header follows google naming
# conventions. If not, assume that it's a 3rd party API that
# requires special include conventions.
#
# We also make an exception for Lua headers, which follow google
# naming convention but not the include convention.
match = Match(r'#include\s*"([^/]+\.h)"', line)
if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)):
error(filename, linenum, 'build/include_subdir', 4,
'Include the directory when naming .h files')
# we shouldn't include a file more than once. actually, there are a
# handful of instances where doing so is okay, but in general it's
# not.
match = _RE_PATTERN_INCLUDE.search(line)
if match:
include = match.group(2)
used_angle_brackets = (match.group(1) == '<')
duplicate_line = include_state.FindHeader(include)
if duplicate_line >= 0:
error(filename, linenum, 'build/include', 4,
'"%s" already included at %s:%s' %
(include, filename, duplicate_line))
return
for extension in GetNonHeaderExtensions():
if (include.endswith('.' + extension) and
os.path.dirname(fileinfo.RepositoryName()) != os.path.dirname(include)):
error(filename, linenum, 'build/include', 4,
'Do not include .' + extension + ' files from other packages')
return
# We DO want to include a 3rd party looking header if it matches the
# filename. Otherwise we get an erroneous error "...should include its
# header" error later.
third_src_header = False
for ext in GetHeaderExtensions():
basefilename = filename[0:len(filename) - len(fileinfo.Extension())]
headerfile = basefilename + '.' + ext
headername = FileInfo(headerfile).RepositoryName()
if headername in include or include in headername:
third_src_header = True
break
if third_src_header or not _THIRD_PARTY_HEADERS_PATTERN.match(include):
include_state.include_list[-1].append((include, linenum))
# We want to ensure that headers appear in the right order:
# 1) for foo.cc, foo.h (preferred location)
# 2) c system files
# 3) cpp system files
# 4) for foo.cc, foo.h (deprecated location)
# 5) other google headers
#
# We classify each include statement as one of those 5 types
# using a number of techniques. The include_state object keeps
# track of the highest type seen, and complains if we see a
# lower type after that.
error_message = include_state.CheckNextIncludeOrder(
_ClassifyInclude(fileinfo, include, used_angle_brackets, _include_order))
if error_message:
error(filename, linenum, 'build/include_order', 4,
'%s. Should be: %s.h, c system, c++ system, other.' %
(error_message, fileinfo.BaseName()))
canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
if not include_state.IsInAlphabeticalOrder(
clean_lines, linenum, canonical_include):
error(filename, linenum, 'build/include_alpha', 4,
'Include "%s" not in alphabetical order' % include)
include_state.SetLastHeader(canonical_include)
def _GetTextInside(text, start_pattern):
r"""Retrieves all the text between matching open and close parentheses.
Given a string of lines and a regular expression string, retrieve all the text
following the expression and between opening punctuation symbols like
(, [, or {, and the matching close-punctuation symbol. This properly nested
occurrences of the punctuations, so for the text like
printf(a(), b(c()));
a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
start_pattern must match string having an open punctuation symbol at the end.
Args:
text: The lines to extract text. Its comments and strings must be elided.
It can be single line and can span multiple lines.
start_pattern: The regexp string indicating where to start extracting
the text.
Returns:
The extracted text.
None if either the opening string or ending punctuation could not be found.
"""
# TODO(unknown): Audit cpplint.py to see what places could be profitably
# rewritten to use _GetTextInside (and use inferior regexp matching today).
# Give opening punctuations to get the matching close-punctuations.
matching_punctuation = {'(': ')', '{': '}', '[': ']'}
closing_punctuation = set(itervalues(matching_punctuation))
# Find the position to start extracting text.
match = re.search(start_pattern, text, re.M)
if not match: # start_pattern not found in text.
return None
start_position = match.end(0)
assert start_position > 0, (
'start_pattern must ends with an opening punctuation.')
assert text[start_position - 1] in matching_punctuation, (
'start_pattern must ends with an opening punctuation.')
# Stack of closing punctuations we expect to have in text after position.
punctuation_stack = [matching_punctuation[text[start_position - 1]]]
position = start_position
while punctuation_stack and position < len(text):
if text[position] == punctuation_stack[-1]:
punctuation_stack.pop()
elif text[position] in closing_punctuation:
# A closing punctuation without matching opening punctuations.
return None
elif text[position] in matching_punctuation:
punctuation_stack.append(matching_punctuation[text[position]])
position += 1
if punctuation_stack:
# Opening punctuations left without matching close-punctuations.
return None
# punctuations match.
return text[start_position:position - 1]
# Patterns for matching call-by-reference parameters.
#
# Supports nested templates up to 2 levels deep using this messy pattern:
# < (?: < (?: < [^<>]*
# >
# | [^<>] )*
# >
# | [^<>] )*
# >
_RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]*
_RE_PATTERN_TYPE = (
r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?'
r'(?:\w|'
r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|'
r'::)+')
# A call-by-reference parameter ends with '& identifier'.
_RE_PATTERN_REF_PARAM = re.compile(
r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*'
r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]')
# A call-by-const-reference parameter either ends with 'const& identifier'
# or looks like 'const type& identifier' when 'type' is atomic.
_RE_PATTERN_CONST_REF_PARAM = (
r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT +
r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')')
# Stream types.
_RE_PATTERN_REF_STREAM_PARAM = (
r'(?:.*stream\s*&\s*' + _RE_PATTERN_IDENT + r')')
def CheckLanguage(filename, clean_lines, linenum, file_extension,
include_state, nesting_state, error):
"""Checks rules from the 'C++ language rules' section of cppguide.html.
Some of these rules are hard to test (function overloading, using
uint32 inappropriately), but we do the best we can.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
include_state: An _IncludeState instance in which the headers are inserted.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# If the line is empty or consists of entirely a comment, no need to
# check it.
line = clean_lines.elided[linenum]
if not line:
return
match = _RE_PATTERN_INCLUDE.search(line)
if match:
CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
return
# Reset include state across preprocessor directives. This is meant
# to silence warnings for conditional includes.
match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line)
if match:
include_state.ResetSection(match.group(1))
# Perform other checks now that we are sure that this is not an include line
CheckCasts(filename, clean_lines, linenum, error)
CheckGlobalStatic(filename, clean_lines, linenum, error)
CheckPrintf(filename, clean_lines, linenum, error)
if IsHeaderExtension(file_extension):
# TODO(unknown): check that 1-arg constructors are explicit.
# How to tell it's a constructor?
# (handled in CheckForNonStandardConstructs for now)
# TODO(unknown): check that classes declare or disable copy/assign
# (level 1 error)
pass
# Check if people are using the verboten C basic types. The only exception
# we regularly allow is "unsigned short port" for port.
if Search(r'\bshort port\b', line):
if not Search(r'\bunsigned short port\b', line):
error(filename, linenum, 'runtime/int', 4,
'Use "unsigned short" for ports, not "short"')
else:
match = Search(r'\b(short|long(?! +double)|long long)\b', line)
if match:
error(filename, linenum, 'runtime/int', 4,
'Use int16/int64/etc, rather than the C type %s' % match.group(1))
# Check if some verboten operator overloading is going on
# TODO(unknown): catch out-of-line unary operator&:
# class X {};
# int operator&(const X& x) { return 42; } // unary operator&
# The trick is it's hard to tell apart from binary operator&:
# class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
if Search(r'\boperator\s*&\s*\(\s*\)', line):
error(filename, linenum, 'runtime/operator', 4,
'Unary operator& is dangerous. Do not use it.')
# Check for suspicious usage of "if" like
# } if (a == b) {
if Search(r'\}\s*if\s*\(', line):
error(filename, linenum, 'readability/braces', 4,
'Did you mean "else if"? If not, start a new line for "if".')
# Check for potential format string bugs like printf(foo).
# We constrain the pattern not to pick things like DocidForPrintf(foo).
# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
# TODO(unknown): Catch the following case. Need to change the calling
# convention of the whole function to process multiple line to handle it.
# printf(
# boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
if printf_args:
match = Match(r'([\w.\->()]+)$', printf_args)
if match and match.group(1) != '__VA_ARGS__':
function_name = re.search(r'\b((?:string)?printf)\s*\(',
line, re.I).group(1)
error(filename, linenum, 'runtime/printf', 4,
'Potential format string bug. Do %s("%%s", %s) instead.'
% (function_name, match.group(1)))
# Check for potential memset bugs like memset(buf, sizeof(buf), 0).
match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
error(filename, linenum, 'runtime/memset', 4,
'Did you mean "memset(%s, 0, %s)"?'
% (match.group(1), match.group(2)))
if Search(r'\busing namespace\b', line):
if Search(r'\bliterals\b', line):
error(filename, linenum, 'build/namespaces_literals', 5,
'Do not use namespace using-directives. '
'Use using-declarations instead.')
else:
error(filename, linenum, 'build/namespaces', 5,
'Do not use namespace using-directives. '
'Use using-declarations instead.')
# Detect variable-length arrays.
match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
match.group(3).find(']') == -1):
# Split the size using space and arithmetic operators as delimiters.
# If any of the resulting tokens are not compile time constants then
# report the error.
tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
is_const = True
skip_next = False
for tok in tokens:
if skip_next:
skip_next = False
continue
if Search(r'sizeof\(.+\)', tok): continue
if Search(r'arraysize\(\w+\)', tok): continue
tok = tok.lstrip('(')
tok = tok.rstrip(')')
if not tok: continue
if Match(r'\d+', tok): continue
if Match(r'0[xX][0-9a-fA-F]+', tok): continue
if Match(r'k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
# A catch all for tricky sizeof cases, including 'sizeof expression',
# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
# requires skipping the next token because we split on ' ' and '*'.
if tok.startswith('sizeof'):
skip_next = True
continue
is_const = False
break
if not is_const:
error(filename, linenum, 'runtime/arrays', 1,
'Do not use variable-length arrays. Use an appropriately named '
"('k' followed by CamelCase) compile-time constant for the size.")
# Check for use of unnamed namespaces in header files. Registration
# macros are typically OK, so we allow use of "namespace {" on lines
# that end with backslashes.
if (IsHeaderExtension(file_extension)
and Search(r'\bnamespace\s*{', line)
and line[-1] != '\\'):
error(filename, linenum, 'build/namespaces_headers', 4,
'Do not use unnamed namespaces in header files. See '
'https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information.')
def CheckGlobalStatic(filename, clean_lines, linenum, error):
"""Check for unsafe global or static objects.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Match two lines at a time to support multiline declarations
if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line):
line += clean_lines.elided[linenum + 1].strip()
# Check for people declaring static/global STL strings at the top level.
# This is dangerous because the C++ language does not guarantee that
# globals with constructors are initialized before the first access, and
# also because globals can be destroyed when some threads are still running.
# TODO(unknown): Generalize this to also find static unique_ptr instances.
# TODO(unknown): File bugs for clang-tidy to find these.
match = Match(
r'((?:|static +)(?:|const +))(?::*std::)?string( +const)? +'
r'([a-zA-Z0-9_:]+)\b(.*)',
line)
# Remove false positives:
# - String pointers (as opposed to values).
# string *pointer
# const string *pointer
# string const *pointer
# string *const pointer
#
# - Functions and template specializations.
# string Function<Type>(...
# string Class<Type>::Method(...
#
# - Operators. These are matched separately because operator names
# cross non-word boundaries, and trying to match both operators
# and functions at the same time would decrease accuracy of
# matching identifiers.
# string Class::operator*()
if (match and
not Search(r'\bstring\b(\s+const)?\s*[\*\&]\s*(const\s+)?\w', line) and
not Search(r'\boperator\W', line) and
not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(4))):
if Search(r'\bconst\b', line):
error(filename, linenum, 'runtime/string', 4,
'For a static/global string constant, use a C style string '
'instead: "%schar%s %s[]".' %
(match.group(1), match.group(2) or '', match.group(3)))
else:
error(filename, linenum, 'runtime/string', 4,
'Static/global string variables are not permitted.')
if (Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line) or
Search(r'\b([A-Za-z0-9_]*_)\(CHECK_NOTNULL\(\1\)\)', line)):
error(filename, linenum, 'runtime/init', 4,
'You seem to be initializing a member variable with itself.')
def CheckPrintf(filename, clean_lines, linenum, error):
"""Check for printf related issues.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# When snprintf is used, the second argument shouldn't be a literal.
match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
if match and match.group(2) != '0':
# If 2nd arg is zero, snprintf is used to calculate size.
error(filename, linenum, 'runtime/printf', 3,
'If you can, use sizeof(%s) instead of %s as the 2nd arg '
'to snprintf.' % (match.group(1), match.group(2)))
# Check if some verboten C functions are being used.
if Search(r'\bsprintf\s*\(', line):
error(filename, linenum, 'runtime/printf', 5,
'Never use sprintf. Use snprintf instead.')
match = Search(r'\b(strcpy|strcat)\s*\(', line)
if match:
error(filename, linenum, 'runtime/printf', 4,
'Almost always, snprintf is better than %s' % match.group(1))
def IsDerivedFunction(clean_lines, linenum):
"""Check if current line contains an inherited function.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line contains a function with "override"
virt-specifier.
"""
# Scan back a few lines for start of current function
for i in xrange(linenum, max(-1, linenum - 10), -1):
match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i])
if match:
# Look for "override" after the matching closing parenthesis
line, _, closing_paren = CloseExpression(
clean_lines, i, len(match.group(1)))
return (closing_paren >= 0 and
Search(r'\boverride\b', line[closing_paren:]))
return False
def IsOutOfLineMethodDefinition(clean_lines, linenum):
"""Check if current line contains an out-of-line method definition.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line contains an out-of-line method definition.
"""
# Scan back a few lines for start of current function
for i in xrange(linenum, max(-1, linenum - 10), -1):
if Match(r'^([^()]*\w+)\(', clean_lines.elided[i]):
return Match(r'^[^()]*\w+::\w+\(', clean_lines.elided[i]) is not None
return False
def IsInitializerList(clean_lines, linenum):
"""Check if current line is inside constructor initializer list.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line appears to be inside constructor initializer
list, False otherwise.
"""
for i in xrange(linenum, 1, -1):
line = clean_lines.elided[i]
if i == linenum:
remove_function_body = Match(r'^(.*)\{\s*$', line)
if remove_function_body:
line = remove_function_body.group(1)
if Search(r'\s:\s*\w+[({]', line):
# A lone colon tend to indicate the start of a constructor
# initializer list. It could also be a ternary operator, which
# also tend to appear in constructor initializer lists as
# opposed to parameter lists.
return True
if Search(r'\}\s*,\s*$', line):
# A closing brace followed by a comma is probably the end of a
# brace-initialized member in constructor initializer list.
return True
if Search(r'[{};]\s*$', line):
# Found one of the following:
# - A closing brace or semicolon, probably the end of the previous
# function.
# - An opening brace, probably the start of current class or namespace.
#
# Current line is probably not inside an initializer list since
# we saw one of those things without seeing the starting colon.
return False
# Got to the beginning of the file without seeing the start of
# constructor initializer list.
return False
def CheckForNonConstReference(filename, clean_lines, linenum,
nesting_state, error):
"""Check for non-const references.
Separate from CheckLanguage since it scans backwards from current
line, instead of scanning forward.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Do nothing if there is no '&' on current line.
line = clean_lines.elided[linenum]
if '&' not in line:
return
# If a function is inherited, current function doesn't have much of
# a choice, so any non-const references should not be blamed on
# derived function.
if IsDerivedFunction(clean_lines, linenum):
return
# Don't warn on out-of-line method definitions, as we would warn on the
# in-line declaration, if it isn't marked with 'override'.
if IsOutOfLineMethodDefinition(clean_lines, linenum):
return
# Long type names may be broken across multiple lines, usually in one
# of these forms:
# LongType
# ::LongTypeContinued &identifier
# LongType::
# LongTypeContinued &identifier
# LongType<
# ...>::LongTypeContinued &identifier
#
# If we detected a type split across two lines, join the previous
# line to current line so that we can match const references
# accordingly.
#
# Note that this only scans back one line, since scanning back
# arbitrary number of lines would be expensive. If you have a type
# that spans more than 2 lines, please use a typedef.
if linenum > 1:
previous = None
if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line):
# previous_line\n + ::current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$',
clean_lines.elided[linenum - 1])
elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line):
# previous_line::\n + current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$',
clean_lines.elided[linenum - 1])
if previous:
line = previous.group(1) + line.lstrip()
else:
# Check for templated parameter that is split across multiple lines
endpos = line.rfind('>')
if endpos > -1:
(_, startline, startpos) = ReverseCloseExpression(
clean_lines, linenum, endpos)
if startpos > -1 and startline < linenum:
# Found the matching < on an earlier line, collect all
# pieces up to current line.
line = ''
for i in xrange(startline, linenum + 1):
line += clean_lines.elided[i].strip()
# Check for non-const references in function parameters. A single '&' may
# found in the following places:
# inside expression: binary & for bitwise AND
# inside expression: unary & for taking the address of something
# inside declarators: reference parameter
# We will exclude the first two cases by checking that we are not inside a
# function body, including one that was just introduced by a trailing '{'.
# TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare].
if (nesting_state.previous_stack_top and
not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or
isinstance(nesting_state.previous_stack_top, _NamespaceInfo))):
# Not at toplevel, not within a class, and not within a namespace
return
# Avoid initializer lists. We only need to scan back from the
# current line for something that starts with ':'.
#
# We don't need to check the current line, since the '&' would
# appear inside the second set of parentheses on the current line as
# opposed to the first set.
if linenum > 0:
for i in xrange(linenum - 1, max(0, linenum - 10), -1):
previous_line = clean_lines.elided[i]
if not Search(r'[),]\s*$', previous_line):
break
if Match(r'^\s*:\s+\S', previous_line):
return
# Avoid preprocessors
if Search(r'\\\s*$', line):
return
# Avoid constructor initializer lists
if IsInitializerList(clean_lines, linenum):
return
# We allow non-const references in a few standard places, like functions
# called "swap()" or iostream operators like "<<" or ">>". Do not check
# those function parameters.
#
# We also accept & in static_assert, which looks like a function but
# it's actually a declaration expression.
allowed_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
r'operator\s*[<>][<>]|'
r'static_assert|COMPILE_ASSERT'
r')\s*\(')
if Search(allowed_functions, line):
return
elif not Search(r'\S+\([^)]*$', line):
# Don't see an allowed function on this line. Actually we
# didn't see any function name on this line, so this is likely a
# multi-line parameter list. Try a bit harder to catch this case.
for i in xrange(2):
if (linenum > i and
Search(allowed_functions, clean_lines.elided[linenum - i - 1])):
return
decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body
for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):
if (not Match(_RE_PATTERN_CONST_REF_PARAM, parameter) and
not Match(_RE_PATTERN_REF_STREAM_PARAM, parameter)):
error(filename, linenum, 'runtime/references', 2,
'Is this a non-const reference? '
'If so, make const or use a pointer: ' +
ReplaceAll(' *<', '<', parameter))
def CheckCasts(filename, clean_lines, linenum, error):
"""Various cast related checks.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Check to see if they're using an conversion function cast.
# I just try to capture the most common basic types, though there are more.
# Parameterless conversion functions, such as bool(), are allowed as they are
# probably a member operator declaration or default constructor.
match = Search(
r'(\bnew\s+(?:const\s+)?|\S<\s*(?:const\s+)?)?\b'
r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
r'(\([^)].*)', line)
expecting_function = ExpectingFunctionArgs(clean_lines, linenum)
if match and not expecting_function:
matched_type = match.group(2)
# matched_new_or_template is used to silence two false positives:
# - New operators
# - Template arguments with function types
#
# For template arguments, we match on types immediately following
# an opening bracket without any spaces. This is a fast way to
# silence the common case where the function type is the first
# template argument. False negative with less-than comparison is
# avoided because those operators are usually followed by a space.
#
# function<double(double)> // bracket + no space = false positive
# value < double(42) // bracket + space = true positive
matched_new_or_template = match.group(1)
# Avoid arrays by looking for brackets that come after the closing
# parenthesis.
if Match(r'\([^()]+\)\s*\[', match.group(3)):
return
# Other things to ignore:
# - Function pointers
# - Casts to pointer types
# - Placement new
# - Alias declarations
matched_funcptr = match.group(3)
if (matched_new_or_template is None and
not (matched_funcptr and
(Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
matched_funcptr) or
matched_funcptr.startswith('(*)'))) and
not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and
not Search(r'new\(\S+\)\s*' + matched_type, line)):
error(filename, linenum, 'readability/casting', 4,
'Using deprecated casting style. '
'Use static_cast<%s>(...) instead' %
matched_type)
if not expecting_function:
CheckCStyleCast(filename, clean_lines, linenum, 'static_cast',
r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
# This doesn't catch all cases. Consider (const char * const)"hello".
#
# (char *) "foo" should always be a const_cast (reinterpret_cast won't
# compile).
if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast',
r'\((char\s?\*+\s?)\)\s*"', error):
pass
else:
# Check pointer casts for other than string constants
CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast',
r'\((\w+\s?\*+\s?)\)', error)
# In addition, we look for people taking the address of a cast. This
# is dangerous -- casts can assign to temporaries, so the pointer doesn't
# point where you think.
#
# Some non-identifier character is required before the '&' for the
# expression to be recognized as a cast. These are casts:
# expression = &static_cast<int*>(temporary());
# function(&(int*)(temporary()));
#
# This is not a cast:
# reference_type&(int* function_param);
match = Search(
r'(?:[^\w]&\(([^)*][^)]*)\)[\w(])|'
r'(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)', line)
if match:
# Try a better error message when the & is bound to something
# dereferenced by the casted pointer, as opposed to the casted
# pointer itself.
parenthesis_error = False
match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line)
if match:
_, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1)))
if x1 >= 0 and clean_lines.elided[y1][x1] == '(':
_, y2, x2 = CloseExpression(clean_lines, y1, x1)
if x2 >= 0:
extended_line = clean_lines.elided[y2][x2:]
if y2 < clean_lines.NumLines() - 1:
extended_line += clean_lines.elided[y2 + 1]
if Match(r'\s*(?:->|\[)', extended_line):
parenthesis_error = True
if parenthesis_error:
error(filename, linenum, 'readability/casting', 4,
('Are you taking an address of something dereferenced '
'from a cast? Wrapping the dereferenced expression in '
'parentheses will make the binding more obvious'))
else:
error(filename, linenum, 'runtime/casting', 4,
('Are you taking an address of a cast? '
'This is dangerous: could be a temp var. '
'Take the address before doing the cast, rather than after'))
def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error):
"""Checks for a C-style cast by looking for the pattern.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
cast_type: The string for the C++ cast to recommend. This is either
reinterpret_cast, static_cast, or const_cast, depending.
pattern: The regular expression used to find C-style casts.
error: The function to call with any errors found.
Returns:
True if an error was emitted.
False otherwise.
"""
line = clean_lines.elided[linenum]
match = Search(pattern, line)
if not match:
return False
# Exclude lines with keywords that tend to look like casts
context = line[0:match.start(1) - 1]
if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context):
return False
# Try expanding current context to see if we one level of
# parentheses inside a macro.
if linenum > 0:
for i in xrange(linenum - 1, max(0, linenum - 5), -1):
context = clean_lines.elided[i] + context
if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context):
return False
# operator++(int) and operator--(int)
if context.endswith(' operator++') or context.endswith(' operator--'):
return False
# A single unnamed argument for a function tends to look like old style cast.
# If we see those, don't issue warnings for deprecated casts.
remainder = line[match.end(0):]
if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),]|->)',
remainder):
return False
# At this point, all that should be left is actual casts.
error(filename, linenum, 'readability/casting', 4,
'Using C-style cast. Use %s<%s>(...) instead' %
(cast_type, match.group(1)))
return True
def ExpectingFunctionArgs(clean_lines, linenum):
"""Checks whether where function type arguments are expected.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if the line at 'linenum' is inside something that expects arguments
of function types.
"""
line = clean_lines.elided[linenum]
return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
(linenum >= 2 and
(Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
clean_lines.elided[linenum - 1]) or
Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
clean_lines.elided[linenum - 2]) or
Search(r'\bstd::m?function\s*\<\s*$',
clean_lines.elided[linenum - 1]))))
_HEADERS_CONTAINING_TEMPLATES = (
('<deque>', ('deque',)),
('<functional>', ('unary_function', 'binary_function',
'plus', 'minus', 'multiplies', 'divides', 'modulus',
'negate',
'equal_to', 'not_equal_to', 'greater', 'less',
'greater_equal', 'less_equal',
'logical_and', 'logical_or', 'logical_not',
'unary_negate', 'not1', 'binary_negate', 'not2',
'bind1st', 'bind2nd',
'pointer_to_unary_function',
'pointer_to_binary_function',
'ptr_fun',
'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
'mem_fun_ref_t',
'const_mem_fun_t', 'const_mem_fun1_t',
'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
'mem_fun_ref',
)),
('<limits>', ('numeric_limits',)),
('<list>', ('list',)),
('<map>', ('multimap',)),
('<memory>', ('allocator', 'make_shared', 'make_unique', 'shared_ptr',
'unique_ptr', 'weak_ptr')),
('<queue>', ('queue', 'priority_queue',)),
('<set>', ('multiset',)),
('<stack>', ('stack',)),
('<string>', ('char_traits', 'basic_string',)),
('<tuple>', ('tuple',)),
('<unordered_map>', ('unordered_map', 'unordered_multimap')),
('<unordered_set>', ('unordered_set', 'unordered_multiset')),
('<utility>', ('pair',)),
('<vector>', ('vector',)),
# gcc extensions.
# Note: std::hash is their hash, ::hash is our hash
('<hash_map>', ('hash_map', 'hash_multimap',)),
('<hash_set>', ('hash_set', 'hash_multiset',)),
('<slist>', ('slist',)),
)
_HEADERS_MAYBE_TEMPLATES = (
('<algorithm>', ('copy', 'max', 'min', 'min_element', 'sort',
'transform',
)),
('<utility>', ('forward', 'make_pair', 'move', 'swap')),
)
_RE_PATTERN_STRING = re.compile(r'\bstring\b')
_re_pattern_headers_maybe_templates = []
for _header, _templates in _HEADERS_MAYBE_TEMPLATES:
for _template in _templates:
# Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
# 'type::max()'.
_re_pattern_headers_maybe_templates.append(
(re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
_template,
_header))
# Match set<type>, but not foo->set<type>, foo.set<type>
_re_pattern_headers_maybe_templates.append(
(re.compile(r'[^>.]\bset\s*\<'),
'set<>',
'<set>'))
# Match 'map<type> var' and 'std::map<type>(...)', but not 'map<type>(...)''
_re_pattern_headers_maybe_templates.append(
(re.compile(r'(std\b::\bmap\s*\<)|(^(std\b::\b)map\b\(\s*\<)'),
'map<>',
'<map>'))
# Other scripts may reach in and modify this pattern.
_re_pattern_templates = []
for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
for _template in _templates:
_re_pattern_templates.append(
(re.compile(r'(\<|\b)' + _template + r'\s*\<'),
_template + '<>',
_header))
def FilesBelongToSameModule(filename_cc, filename_h):
"""Check if these two filenames belong to the same module.
The concept of a 'module' here is as follows:
foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
same 'module' if they are in the same directory.
some/path/public/xyzzy and some/path/internal/xyzzy are also considered
to belong to the same module here.
If the filename_cc contains a longer path than the filename_h, for example,
'/absolute/path/to/base/sysinfo.cc', and this file would include
'base/sysinfo.h', this function also produces the prefix needed to open the
header. This is used by the caller of this function to more robustly open the
header file. We don't have access to the real include paths in this context,
so we need this guesswork here.
Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
according to this implementation. Because of this, this function gives
some false positives. This should be sufficiently rare in practice.
Args:
filename_cc: is the path for the source (e.g. .cc) file
filename_h: is the path for the header path
Returns:
Tuple with a bool and a string:
bool: True if filename_cc and filename_h belong to the same module.
string: the additional prefix needed to open the header file.
"""
fileinfo_cc = FileInfo(filename_cc)
if not fileinfo_cc.Extension().lstrip('.') in GetNonHeaderExtensions():
return (False, '')
fileinfo_h = FileInfo(filename_h)
if not IsHeaderExtension(fileinfo_h.Extension().lstrip('.')):
return (False, '')
filename_cc = filename_cc[:-(len(fileinfo_cc.Extension()))]
matched_test_suffix = Search(_TEST_FILE_SUFFIX, fileinfo_cc.BaseName())
if matched_test_suffix:
filename_cc = filename_cc[:-len(matched_test_suffix.group(1))]
filename_cc = filename_cc.replace('/public/', '/')
filename_cc = filename_cc.replace('/internal/', '/')
filename_h = filename_h[:-(len(fileinfo_h.Extension()))]
if filename_h.endswith('-inl'):
filename_h = filename_h[:-len('-inl')]
filename_h = filename_h.replace('/public/', '/')
filename_h = filename_h.replace('/internal/', '/')
files_belong_to_same_module = filename_cc.endswith(filename_h)
common_path = ''
if files_belong_to_same_module:
common_path = filename_cc[:-len(filename_h)]
return files_belong_to_same_module, common_path
def UpdateIncludeState(filename, include_dict, io=codecs):
"""Fill up the include_dict with new includes found from the file.
Args:
filename: the name of the header to read.
include_dict: a dictionary in which the headers are inserted.
io: The io factory to use to read the file. Provided for testability.
Returns:
True if a header was successfully added. False otherwise.
"""
headerfile = None
try:
with io.open(filename, 'r', 'utf8', 'replace') as headerfile:
linenum = 0
for line in headerfile:
linenum += 1
clean_line = CleanseComments(line)
match = _RE_PATTERN_INCLUDE.search(clean_line)
if match:
include = match.group(2)
include_dict.setdefault(include, linenum)
return True
except IOError:
return False
def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
io=codecs):
"""Reports for missing stl includes.
This function will output warnings to make sure you are including the headers
necessary for the stl containers and functions that you use. We only give one
reason to include a header. For example, if you use both equal_to<> and
less<> in a .h file, only one (the latter in the file) of these will be
reported as a reason to include the <functional>.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
include_state: An _IncludeState instance.
error: The function to call with any errors found.
io: The IO factory to use to read the header file. Provided for unittest
injection.
"""
required = {} # A map of header name to linenumber and the template entity.
# Example of required: { '<functional>': (1219, 'less<>') }
for linenum in xrange(clean_lines.NumLines()):
line = clean_lines.elided[linenum]
if not line or line[0] == '#':
continue
# String is special -- it is a non-templatized type in STL.
matched = _RE_PATTERN_STRING.search(line)
if matched:
# Don't warn about strings in non-STL namespaces:
# (We check only the first match per line; good enough.)
prefix = line[:matched.start()]
if prefix.endswith('std::') or not prefix.endswith('::'):
required['<string>'] = (linenum, 'string')
for pattern, template, header in _re_pattern_headers_maybe_templates:
if pattern.search(line):
required[header] = (linenum, template)
# The following function is just a speed up, no semantics are changed.
if not '<' in line: # Reduces the cpu time usage by skipping lines.
continue
for pattern, template, header in _re_pattern_templates:
matched = pattern.search(line)
if matched:
# Don't warn about IWYU in non-STL namespaces:
# (We check only the first match per line; good enough.)
prefix = line[:matched.start()]
if prefix.endswith('std::') or not prefix.endswith('::'):
required[header] = (linenum, template)
# The policy is that if you #include something in foo.h you don't need to
# include it again in foo.cc. Here, we will look at possible includes.
# Let's flatten the include_state include_list and copy it into a dictionary.
include_dict = dict([item for sublist in include_state.include_list
for item in sublist])
# Did we find the header for this file (if any) and successfully load it?
header_found = False
# Use the absolute path so that matching works properly.
abs_filename = FileInfo(filename).FullName()
# For Emacs's flymake.
# If cpplint is invoked from Emacs's flymake, a temporary file is generated
# by flymake and that file name might end with '_flymake.cc'. In that case,
# restore original file name here so that the corresponding header file can be
# found.
# e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
# instead of 'foo_flymake.h'
abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
# include_dict is modified during iteration, so we iterate over a copy of
# the keys.
header_keys = list(include_dict.keys())
for header in header_keys:
(same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
fullpath = common_path + header
if same_module and UpdateIncludeState(fullpath, include_dict, io):
header_found = True
# If we can't find the header file for a .cc, assume it's because we don't
# know where to look. In that case we'll give up as we're not sure they
# didn't include it in the .h file.
# TODO(unknown): Do a better job of finding .h files so we are confident that
# not having the .h file means there isn't one.
if not header_found:
for extension in GetNonHeaderExtensions():
if filename.endswith('.' + extension):
return
# All the lines have been processed, report the errors found.
for required_header_unstripped in sorted(required, key=required.__getitem__):
template = required[required_header_unstripped][1]
if required_header_unstripped.strip('<>"') not in include_dict:
error(filename, required[required_header_unstripped][0],
'build/include_what_you_use', 4,
'Add #include ' + required_header_unstripped + ' for ' + template)
_RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
"""Check that make_pair's template arguments are deduced.
G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are
specified explicitly, and such use isn't intended in any case.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
if match:
error(filename, linenum, 'build/explicit_make_pair',
4, # 4 = high confidence
'For C++11-compatibility, omit template arguments from make_pair'
' OR use pair directly OR if appropriate, construct a pair directly')
def CheckRedundantVirtual(filename, clean_lines, linenum, error):
"""Check if line contains a redundant "virtual" function-specifier.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Look for "virtual" on current line.
line = clean_lines.elided[linenum]
virtual = Match(r'^(.*)(\bvirtual\b)(.*)$', line)
if not virtual: return
# Ignore "virtual" keywords that are near access-specifiers. These
# are only used in class base-specifier and do not apply to member
# functions.
if (Search(r'\b(public|protected|private)\s+$', virtual.group(1)) or
Match(r'^\s+(public|protected|private)\b', virtual.group(3))):
return
# Ignore the "virtual" keyword from virtual base classes. Usually
# there is a column on the same line in these cases (virtual base
# classes are rare in google3 because multiple inheritance is rare).
if Match(r'^.*[^:]:[^:].*$', line): return
# Look for the next opening parenthesis. This is the start of the
# parameter list (possibly on the next line shortly after virtual).
# TODO(unknown): doesn't work if there are virtual functions with
# decltype() or other things that use parentheses, but csearch suggests
# that this is rare.
end_col = -1
end_line = -1
start_col = len(virtual.group(2))
for start_line in xrange(linenum, min(linenum + 3, clean_lines.NumLines())):
line = clean_lines.elided[start_line][start_col:]
parameter_list = Match(r'^([^(]*)\(', line)
if parameter_list:
# Match parentheses to find the end of the parameter list
(_, end_line, end_col) = CloseExpression(
clean_lines, start_line, start_col + len(parameter_list.group(1)))
break
start_col = 0
if end_col < 0:
return # Couldn't find end of parameter list, give up
# Look for "override" or "final" after the parameter list
# (possibly on the next few lines).
for i in xrange(end_line, min(end_line + 3, clean_lines.NumLines())):
line = clean_lines.elided[i][end_col:]
match = Search(r'\b(override|final)\b', line)
if match:
error(filename, linenum, 'readability/inheritance', 4,
('"virtual" is redundant since function is '
'already declared as "%s"' % match.group(1)))
# Set end_col to check whole lines after we are done with the
# first line.
end_col = 0
if Search(r'[^\w]\s*$', line):
break
def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error):
"""Check if line contains a redundant "override" or "final" virt-specifier.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Look for closing parenthesis nearby. We need one to confirm where
# the declarator ends and where the virt-specifier starts to avoid
# false positives.
line = clean_lines.elided[linenum]
declarator_end = line.rfind(')')
if declarator_end >= 0:
fragment = line[declarator_end:]
else:
if linenum > 1 and clean_lines.elided[linenum - 1].rfind(')') >= 0:
fragment = line
else:
return
# Check that at most one of "override" or "final" is present, not both
if Search(r'\boverride\b', fragment) and Search(r'\bfinal\b', fragment):
error(filename, linenum, 'readability/inheritance', 4,
('"override" is redundant since function is '
'already declared as "final"'))
# Returns true if we are at a new block, and it is directly
# inside of a namespace.
def IsBlockInNameSpace(nesting_state, is_forward_declaration):
"""Checks that the new block is directly in a namespace.
Args:
nesting_state: The _NestingState object that contains info about our state.
is_forward_declaration: If the class is a forward declared class.
Returns:
Whether or not the new block is directly in a namespace.
"""
if is_forward_declaration:
return len(nesting_state.stack) >= 1 and (
isinstance(nesting_state.stack[-1], _NamespaceInfo))
return (len(nesting_state.stack) > 1 and
nesting_state.stack[-1].check_namespace_indentation and
isinstance(nesting_state.stack[-2], _NamespaceInfo))
def ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
raw_lines_no_comments, linenum):
"""This method determines if we should apply our namespace indentation check.
Args:
nesting_state: The current nesting state.
is_namespace_indent_item: If we just put a new class on the stack, True.
If the top of the stack is not a class, or we did not recently
add the class, False.
raw_lines_no_comments: The lines without the comments.
linenum: The current line number we are processing.
Returns:
True if we should apply our namespace indentation check. Currently, it
only works for classes and namespaces inside of a namespace.
"""
is_forward_declaration = IsForwardClassDeclaration(raw_lines_no_comments,
linenum)
if not (is_namespace_indent_item or is_forward_declaration):
return False
# If we are in a macro, we do not want to check the namespace indentation.
if IsMacroDefinition(raw_lines_no_comments, linenum):
return False
return IsBlockInNameSpace(nesting_state, is_forward_declaration)
# Call this method if the line is directly inside of a namespace.
# If the line above is blank (excluding comments) or the start of
# an inner namespace, it cannot be indented.
def CheckItemIndentationInNamespace(filename, raw_lines_no_comments, linenum,
error):
line = raw_lines_no_comments[linenum]
if Match(r'^\s+', line):
error(filename, linenum, 'runtime/indentation_namespace', 4,
'Do not indent within a namespace')
def ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions=None):
"""Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[line], line, error)
nesting_state.Update(filename, clean_lines, line, error)
CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
error)
if nesting_state.InAsmBlock(): return
CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
CheckLanguage(filename, clean_lines, line, file_extension, include_state,
nesting_state, error)
CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)
CheckForNonStandardConstructs(filename, clean_lines, line,
nesting_state, error)
CheckVlogArguments(filename, clean_lines, line, error)
CheckPosixThreading(filename, clean_lines, line, error)
CheckInvalidIncrement(filename, clean_lines, line, error)
CheckMakePairUsesDeduction(filename, clean_lines, line, error)
CheckRedundantVirtual(filename, clean_lines, line, error)
CheckRedundantOverrideOrFinal(filename, clean_lines, line, error)
if extra_check_functions:
for check_fn in extra_check_functions:
check_fn(filename, clean_lines, line, error)
def FlagCxx11Features(filename, clean_lines, linenum, error):
"""Flag those c++11 features that we only allow in certain places.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line)
# Flag unapproved C++ TR1 headers.
if include and include.group(1).startswith('tr1/'):
error(filename, linenum, 'build/c++tr1', 5,
('C++ TR1 headers such as <%s> are unapproved.') % include.group(1))
# Flag unapproved C++11 headers.
if include and include.group(1) in ('cfenv',
'condition_variable',
'fenv.h',
'future',
'mutex',
'thread',
'chrono',
'ratio',
'regex',
'system_error',
):
error(filename, linenum, 'build/c++11', 5,
('<%s> is an unapproved C++11 header.') % include.group(1))
# The only place where we need to worry about C++11 keywords and library
# features in preprocessor directives is in macro definitions.
if Match(r'\s*#', line) and not Match(r'\s*#\s*define\b', line): return
# These are classes and free functions. The classes are always
# mentioned as std::*, but we only catch the free functions if
# they're not found by ADL. They're alphabetical by header.
for top_name in (
# type_traits
'alignment_of',
'aligned_union',
):
if Search(r'\bstd::%s\b' % top_name, line):
error(filename, linenum, 'build/c++11', 5,
('std::%s is an unapproved C++11 class or function. Send c-style '
'an example of where it would make your code more readable, and '
'they may let you use it.') % top_name)
def FlagCxx14Features(filename, clean_lines, linenum, error):
"""Flag those C++14 features that we restrict.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line)
# Flag unapproved C++14 headers.
if include and include.group(1) in ('scoped_allocator', 'shared_mutex'):
error(filename, linenum, 'build/c++14', 5,
('<%s> is an unapproved C++14 header.') % include.group(1))
def ProcessFileData(filename, file_extension, lines, error,
extra_check_functions=None):
"""Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
lines = (['// marker so line numbers and indices both start at 1'] + lines +
['// marker so line numbers end in a known way'])
include_state = _IncludeState()
function_state = _FunctionState()
nesting_state = NestingState()
ResetNolintSuppressions()
CheckForCopyright(filename, lines, error)
ProcessGlobalSuppresions(lines)
RemoveMultiLineComments(filename, lines, error)
clean_lines = CleansedLines(lines)
if IsHeaderExtension(file_extension):
CheckForHeaderGuard(filename, clean_lines, error)
for line in xrange(clean_lines.NumLines()):
ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions)
FlagCxx11Features(filename, clean_lines, line, error)
nesting_state.CheckCompletedBlocks(filename, error)
CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
# Check that the .cc file has included its header if it exists.
if _IsSourceExtension(file_extension):
CheckHeaderFileIncluded(filename, include_state, error)
# We check here rather than inside ProcessLine so that we see raw
# lines rather than "cleaned" lines.
CheckForBadCharacters(filename, lines, error)
CheckForNewlineAtEOF(filename, lines, error)
def ProcessConfigOverrides(filename):
""" Loads the configuration files and processes the config overrides.
Args:
filename: The name of the file being processed by the linter.
Returns:
False if the current |filename| should not be processed further.
"""
abs_filename = os.path.abspath(filename)
cfg_filters = []
keep_looking = True
while keep_looking:
abs_path, base_name = os.path.split(abs_filename)
if not base_name:
break # Reached the root directory.
cfg_file = os.path.join(abs_path, "CPPLINT.cfg")
abs_filename = abs_path
if not os.path.isfile(cfg_file):
continue
try:
with open(cfg_file) as file_handle:
for line in file_handle:
line, _, _ = line.partition('#') # Remove comments.
if not line.strip():
continue
name, _, val = line.partition('=')
name = name.strip()
val = val.strip()
if name == 'set noparent':
keep_looking = False
elif name == 'filter':
cfg_filters.append(val)
elif name == 'exclude_files':
# When matching exclude_files pattern, use the base_name of
# the current file name or the directory name we are processing.
# For example, if we are checking for lint errors in /foo/bar/baz.cc
# and we found the .cfg file at /foo/CPPLINT.cfg, then the config
# file's "exclude_files" filter is meant to be checked against "bar"
# and not "baz" nor "bar/baz.cc".
if base_name:
pattern = re.compile(val)
if pattern.match(base_name):
if _cpplint_state.quiet:
# Suppress "Ignoring file" warning when using --quiet.
return False
_cpplint_state.PrintInfo('Ignoring "%s": file excluded by "%s". '
'File path component "%s" matches '
'pattern "%s"\n' %
(filename, cfg_file, base_name, val))
return False
elif name == 'linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
_cpplint_state.PrintError('Line length must be numeric.')
elif name == 'extensions':
ProcessExtensionsOption(val)
elif name == 'root':
global _root
# root directories are specified relative to CPPLINT.cfg dir.
_root = os.path.join(os.path.dirname(cfg_file), val)
elif name == 'headers':
ProcessHppHeadersOption(val)
elif name == 'includeorder':
ProcessIncludeOrderOption(val)
else:
_cpplint_state.PrintError(
'Invalid configuration option (%s) in file %s\n' %
(name, cfg_file))
except IOError:
_cpplint_state.PrintError(
"Skipping config file '%s': Can't open for reading\n" % cfg_file)
keep_looking = False
# Apply all the accumulated filters in reverse order (top-level directory
# config options having the least priority).
for cfg_filter in reversed(cfg_filters):
_AddFilters(cfg_filter)
return True
def ProcessFile(filename, vlevel, extra_check_functions=None):
"""Does google-lint on a single file.
Args:
filename: The name of the file to parse.
vlevel: The level of errors to report. Every error of confidence
>= verbose_level will be reported. 0 is a good default.
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
_SetVerboseLevel(vlevel)
_BackupFilters()
old_errors = _cpplint_state.error_count
if not ProcessConfigOverrides(filename):
_RestoreFilters()
return
lf_lines = []
crlf_lines = []
try:
# Support the UNIX convention of using "-" for stdin. Note that
# we are not opening the file with universal newline support
# (which codecs doesn't support anyway), so the resulting lines do
# contain trailing '\r' characters if we are reading a file that
# has CRLF endings.
# If after the split a trailing '\r' is present, it is removed
# below.
if filename == '-':
lines = codecs.StreamReaderWriter(sys.stdin,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace').read().split('\n')
else:
with codecs.open(filename, 'r', 'utf8', 'replace') as target_file:
lines = target_file.read().split('\n')
# Remove trailing '\r'.
# The -1 accounts for the extra trailing blank line we get from split()
for linenum in range(len(lines) - 1):
if lines[linenum].endswith('\r'):
lines[linenum] = lines[linenum].rstrip('\r')
crlf_lines.append(linenum + 1)
else:
lf_lines.append(linenum + 1)
except IOError:
_cpplint_state.PrintError(
"Skipping input '%s': Can't open for reading\n" % filename)
_RestoreFilters()
return
# Note, if no dot is found, this will give the entire filename as the ext.
file_extension = filename[filename.rfind('.') + 1:]
# When reading from stdin, the extension is unknown, so no cpplint tests
# should rely on the extension.
if filename != '-' and file_extension not in GetAllExtensions():
_cpplint_state.PrintError('Ignoring %s; not a valid file name '
'(%s)\n' % (filename, ', '.join(GetAllExtensions())))
else:
ProcessFileData(filename, file_extension, lines, Error,
extra_check_functions)
# If end-of-line sequences are a mix of LF and CR-LF, issue
# warnings on the lines with CR.
#
# Don't issue any warnings if all lines are uniformly LF or CR-LF,
# since critique can handle these just fine, and the style guide
# doesn't dictate a particular end of line sequence.
#
# We can't depend on os.linesep to determine what the desired
# end-of-line sequence should be, since that will return the
# server-side end-of-line sequence.
if lf_lines and crlf_lines:
# Warn on every line with CR. An alternative approach might be to
# check whether the file is mostly CRLF or just LF, and warn on the
# minority, we bias toward LF here since most tools prefer LF.
for linenum in crlf_lines:
Error(filename, linenum, 'whitespace/newline', 1,
'Unexpected \\r (^M) found; better to use only \\n')
# Suppress printing anything if --quiet was passed unless the error
# count has increased after processing this file.
if not _cpplint_state.quiet or old_errors != _cpplint_state.error_count:
_cpplint_state.PrintInfo('Done processing %s\n' % filename)
_RestoreFilters()
def PrintUsage(message):
"""Prints a brief usage string and exits, optionally with an error message.
Args:
message: The optional error message.
"""
sys.stderr.write(_USAGE % (list(GetAllExtensions()),
','.join(list(GetAllExtensions())),
GetHeaderExtensions(),
','.join(GetHeaderExtensions())))
if message:
sys.exit('\nFATAL ERROR: ' + message)
else:
sys.exit(0)
def PrintVersion():
sys.stdout.write('Cpplint fork (https://github.com/cpplint/cpplint)\n')
sys.stdout.write('cpplint ' + __VERSION__ + '\n')
sys.stdout.write('Python ' + sys.version + '\n')
sys.exit(0)
def PrintCategories():
"""Prints a list of all the error-categories used by error messages.
These are the categories used to filter messages via --filter.
"""
sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
sys.exit(0)
def ParseArguments(args):
"""Parses the command line arguments.
This may set the output format and verbosity level as side-effects.
Args:
args: The command line arguments:
Returns:
The list of filenames to lint.
"""
try:
(opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
'v=',
'version',
'counting=',
'filter=',
'root=',
'repository=',
'linelength=',
'extensions=',
'exclude=',
'recursive',
'headers=',
'includeorder=',
'quiet'])
except getopt.GetoptError:
PrintUsage('Invalid arguments.')
verbosity = _VerboseLevel()
output_format = _OutputFormat()
filters = ''
quiet = _Quiet()
counting_style = ''
recursive = False
for (opt, val) in opts:
if opt == '--help':
PrintUsage(None)
if opt == '--version':
PrintVersion()
elif opt == '--output':
if val not in ('emacs', 'vs7', 'eclipse', 'junit', 'sed', 'gsed'):
PrintUsage('The only allowed output formats are emacs, vs7, eclipse '
'sed, gsed and junit.')
output_format = val
elif opt == '--quiet':
quiet = True
elif opt == '--verbose' or opt == '--v':
verbosity = int(val)
elif opt == '--filter':
filters = val
if not filters:
PrintCategories()
elif opt == '--counting':
if val not in ('total', 'toplevel', 'detailed'):
PrintUsage('Valid counting options are total, toplevel, and detailed')
counting_style = val
elif opt == '--root':
global _root
_root = val
elif opt == '--repository':
global _repository
_repository = val
elif opt == '--linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
PrintUsage('Line length must be digits.')
elif opt == '--exclude':
global _excludes
if not _excludes:
_excludes = set()
_excludes.update(glob.glob(val))
elif opt == '--extensions':
ProcessExtensionsOption(val)
elif opt == '--headers':
ProcessHppHeadersOption(val)
elif opt == '--recursive':
recursive = True
elif opt == '--includeorder':
ProcessIncludeOrderOption(val)
if not filenames:
PrintUsage('No files were specified.')
if recursive:
filenames = _ExpandDirectories(filenames)
if _excludes:
filenames = _FilterExcludedFiles(filenames)
_SetOutputFormat(output_format)
_SetQuiet(quiet)
_SetVerboseLevel(verbosity)
_SetFilters(filters)
_SetCountingStyle(counting_style)
filenames.sort()
return filenames
def _ExpandDirectories(filenames):
"""Searches a list of filenames and replaces directories in the list with
all files descending from those directories. Files with extensions not in
the valid extensions list are excluded.
Args:
filenames: A list of files or directories
Returns:
A list of all files that are members of filenames or descended from a
directory in filenames
"""
expanded = set()
for filename in filenames:
if not os.path.isdir(filename):
expanded.add(filename)
continue
for root, _, files in os.walk(filename):
for loopfile in files:
fullname = os.path.join(root, loopfile)
if fullname.startswith('.' + os.path.sep):
fullname = fullname[len('.' + os.path.sep):]
expanded.add(fullname)
filtered = []
for filename in expanded:
if os.path.splitext(filename)[1][1:] in GetAllExtensions():
filtered.append(filename)
return filtered
def _FilterExcludedFiles(fnames):
"""Filters out files listed in the --exclude command line switch. File paths
in the switch are evaluated relative to the current working directory
"""
exclude_paths = [os.path.abspath(f) for f in _excludes]
# because globbing does not work recursively, exclude all subpath of all excluded entries
return [f for f in fnames
if not any(e for e in exclude_paths
if _IsParentOrSame(e, os.path.abspath(f)))]
def _IsParentOrSame(parent, child):
"""Return true if child is subdirectory of parent.
Assumes both paths are absolute and don't contain symlinks.
"""
parent = os.path.normpath(parent)
child = os.path.normpath(child)
if parent == child:
return True
prefix = os.path.commonprefix([parent, child])
if prefix != parent:
return False
# Note: os.path.commonprefix operates on character basis, so
# take extra care of situations like '/foo/ba' and '/foo/bar/baz'
child_suffix = child[len(prefix):]
child_suffix = child_suffix.lstrip(os.sep)
return child == os.path.join(prefix, child_suffix)
def main():
filenames = ParseArguments(sys.argv[1:])
backup_err = sys.stderr
try:
# Change stderr to write with replacement characters so we don't die
# if we try to print something containing non-ASCII characters.
sys.stderr = codecs.StreamReader(sys.stderr, 'replace')
_cpplint_state.ResetErrorCounts()
for filename in filenames:
ProcessFile(filename, _cpplint_state.verbose_level)
# If --quiet is passed, suppress printing error count unless there are errors.
if not _cpplint_state.quiet or _cpplint_state.error_count > 0:
_cpplint_state.PrintErrorCounts()
if _cpplint_state.output_format == 'junit':
sys.stderr.write(_cpplint_state.FormatJUnitXML())
finally:
sys.stderr = backup_err
sys.exit(_cpplint_state.error_count > 0)
if __name__ == '__main__':
main()
| 37.970009
| 98
| 0.650489
|
4a02eb5f63f0774490381932d3956873ad692a6a
| 1,476
|
bzl
|
Python
|
packer/dependencies.bzl
|
ChrisCummins/bazel-distribution
|
6865042c2dcfb0e877adb9ba999c9dfa855e4613
|
[
"Apache-2.0"
] | null | null | null |
packer/dependencies.bzl
|
ChrisCummins/bazel-distribution
|
6865042c2dcfb0e877adb9ba999c9dfa855e4613
|
[
"Apache-2.0"
] | null | null | null |
packer/dependencies.bzl
|
ChrisCummins/bazel-distribution
|
6865042c2dcfb0e877adb9ba999c9dfa855e4613
|
[
"Apache-2.0"
] | null | null | null |
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
def deploy_packer_dependencies():
http_archive(
name = "packer_osx",
url = "https://releases.hashicorp.com/packer/1.4.0/packer_1.4.0_darwin_amd64.zip",
sha256 = "475a2a63d37c5bbd27a4b836ffb1ac85d1288f4d55caf04fde3e31ca8e289773",
build_file_content = 'exports_files(["packer"])'
)
http_archive(
name = "packer_linux",
url = "https://releases.hashicorp.com/packer/1.4.0/packer_1.4.0_linux_amd64.zip",
sha256 = "7505e11ce05103f6c170c6d491efe3faea1fb49544db0278377160ffb72721e4",
build_file_content = 'exports_files(["packer"])'
)
| 41
| 91
| 0.733062
|
4a02ebdafe5a5ceae46164cd055f4c5a786da3f2
| 4,715
|
py
|
Python
|
setup.py
|
aznotins/allennlp
|
9db004224adf74267cc923d45e8ecb22b53d7066
|
[
"Apache-2.0"
] | 1
|
2021-04-08T09:53:17.000Z
|
2021-04-08T09:53:17.000Z
|
setup.py
|
aznotins/allennlp
|
9db004224adf74267cc923d45e8ecb22b53d7066
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
aznotins/allennlp
|
9db004224adf74267cc923d45e8ecb22b53d7066
|
[
"Apache-2.0"
] | null | null | null |
"""
In order to create a package for pypi, you need to follow several steps.
1. Create a .pypirc in your home directory. It should look like this:
```
[distutils]
index-servers =
pypi
pypitest
[pypi]
username=allennlp
password= Get the password from LastPass.
[pypitest]
repository=https://test.pypi.org/legacy/
username=allennlp
password= Get the password from LastPass.
```
run chmod 600 ./pypirc so only you can read/write.
1. Change the version in docs/conf.py and setup.py.
2. Commit these changes with the message: "Release: VERSION"
3. Add a tag in git to mark the release: "git tag VERSION -m'Adds tag VERSION for pypi' "
Push the tag to git: git push --tags origin master
4. Build both the sources and the wheel. Do not change anything in setup.py between
creating the wheel and the source distribution (obviously).
For the wheel, run: "python setup.py bdist_wheel" in the top level allennlp directory.
(this will build a wheel for the python version you use to build it - make sure you use python 3.x).
For the sources, run: "python setup.py sdist"
You should now have a /dist directory with both .whl and .tar.gz source versions of allennlp.
5. Check that everything looks correct by uploading the package to the pypi test server:
twine upload dist/* -r pypitest
(pypi suggest using twine as other methods upload files via plaintext.)
Check that you can install it in a virtualenv by running:
pip install -i https://testpypi.python.org/pypi allennlp
6. Upload the final version to actual pypi:
twine upload dist/* -r pypi
7. Copy the release notes from RELEASE.md to the tag in github once everything is looking hunky-dory.
"""
from setuptools import setup, find_packages
import sys
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
# version.py defines the VERSION and VERSION_SHORT variables.
# We use exec here so we don't import allennlp whilst setting up.
VERSION = {}
with open("allennlp/version.py", "r") as version_file:
exec(version_file.read(), VERSION)
# make pytest-runner a conditional requirement,
# per: https://github.com/pytest-dev/pytest-runner#considerations
needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
pytest_runner = ['pytest-runner'] if needs_pytest else []
setup_requirements = [
# add other setup requirements as necessary
] + pytest_runner
setup(name='allennlp',
version=VERSION["VERSION"],
description='An open-source NLP research library, built on PyTorch.',
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
classifiers=[
'Intended Audience :: Science/Research',
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
keywords='allennlp NLP deep learning machine reading',
url='https://github.com/allenai/allennlp',
author='Allen Institute for Artificial Intelligence',
author_email='allennlp@allenai.org',
license='Apache',
packages=find_packages(exclude=["*.tests", "*.tests.*",
"tests.*", "tests"]),
install_requires=[
'torch>=0.4.1',
"jsonnet>=0.10.0 ; sys.platform != 'win32'",
'overrides',
'nltk',
'spacy>=2.0.18,<2.2',
'numpy',
'tensorboardX>=1.2',
'boto3',
'flask>=1.0.2',
'flask-cors>=3.0.7',
'gevent>=1.3.6',
'requests>=2.18',
'tqdm>=4.19',
'editdistance',
'h5py',
'scikit-learn',
'scipy',
'pytz>=2017.3',
'unidecode',
'matplotlib>=2.2.3',
'pytest',
'flaky',
'responses>=0.7',
'numpydoc>=0.8.0',
'conllu==1.3.1',
'parsimonious>=0.8.0',
'ftfy',
'sqlparse>=0.2.4',
'word2number>=1.1',
'pytorch-pretrained-bert>=0.6.0',
'jsonpickle',
],
entry_points={
'console_scripts': [
"allennlp=allennlp.run:run"
]
},
setup_requires=setup_requirements,
tests_require=[
'pytest',
'flaky',
'responses>=0.7',
],
include_package_data=True,
python_requires='>=3.6.1',
zip_safe=False)
| 31.433333
| 103
| 0.62948
|
4a02ec36e9ce0dde5eb3aac7dad7f7e0d3b8894a
| 1,071
|
py
|
Python
|
pacote-download/d045 - jogar jokenpo.py
|
Carlos-DOliveira/cursoemvideo-python3
|
4546c8a7360155243e2f7ecbbb80c57868f770a2
|
[
"MIT"
] | null | null | null |
pacote-download/d045 - jogar jokenpo.py
|
Carlos-DOliveira/cursoemvideo-python3
|
4546c8a7360155243e2f7ecbbb80c57868f770a2
|
[
"MIT"
] | null | null | null |
pacote-download/d045 - jogar jokenpo.py
|
Carlos-DOliveira/cursoemvideo-python3
|
4546c8a7360155243e2f7ecbbb80c57868f770a2
|
[
"MIT"
] | null | null | null |
''' 045 Crie um programa que faça o computador jogar jokenpô com vocês'''
import random,time
lista = [1, 2, 3]
n = random.choice(lista)
if n == 1:
pc = 'pedra'
elif n == 2:
pc = 'tessoura'
else:
pc = 'papel'
escolha = int(input('Escolha um número: 1 - pedra. 2 - tersosura. 3 - papel: '))
if escolha == 1:
vc = 'pedra'
elif escolha == 2:
vc = 'tessoura'
else:
vc = 'papel'
time.sleep(1)
print('JO')
time.sleep(1)
print('KEN')
time.sleep(1)
print('PO')
print('')
print(f'compudaror: {pc} X {vc} : você')
print('')
if n == escolha:
print('EMPATE')
elif n == 1 and escolha == 2:
print(f'O Pc escolheu pedra, ele ganhou!!!')
elif n == 1 and escolha == 3:
print(f'Você ganhou, papel ganha de pedra')
elif n == 2 and escolha == 1:
print('Pedra ganha de tessoura, você ganhou.')
elif n == 2 and escolha == 3:
print('O PC ganhou, tessou ganha de papel')
elif n == 3 and escolha == 1:
print(' O PC escolheu papel, papel ganha de pedra. Parabéns!!')
else:
print('Você escolheu tessoura e ela ganha de papel. Parabéns!!!')
| 22.3125
| 80
| 0.616246
|
4a02ecb8979029940b9a5550e3a2356b3b772fcb
| 153
|
py
|
Python
|
sample/WRS2018/T5-AizuSpiderSS-RTM.py
|
jun0/choreonoid
|
37167e52bfa054088272e1924d2062604104ac08
|
[
"MIT"
] | 91
|
2015-01-29T11:03:42.000Z
|
2022-02-13T07:34:04.000Z
|
sample/WRS2018/T5-AizuSpiderSS-RTM.py
|
jun0/choreonoid
|
37167e52bfa054088272e1924d2062604104ac08
|
[
"MIT"
] | 213
|
2015-01-26T06:21:15.000Z
|
2020-07-23T05:51:30.000Z
|
sample/WRS2018/T5-AizuSpiderSS-RTM.py
|
jun0/choreonoid
|
37167e52bfa054088272e1924d2062604104ac08
|
[
"MIT"
] | 71
|
2015-01-06T02:32:05.000Z
|
2020-12-01T03:42:25.000Z
|
import WRSUtil
WRSUtil.loadProject(
"SingleSceneView", "T5", "AISTSimulator", "AizuSpiderSS",
enableVisionSimulation = True, remoteType = "RTM")
| 30.6
| 61
| 0.732026
|
4a02edb831af2614af7508003894c11988c41d75
| 2,885
|
py
|
Python
|
bika/lims/skins/bika/guard_auto_preservation_required.py
|
hocinebendou/bika.gsoc
|
85bc0c587de7f52073ae0e89bddbc77bf875f295
|
[
"MIT"
] | null | null | null |
bika/lims/skins/bika/guard_auto_preservation_required.py
|
hocinebendou/bika.gsoc
|
85bc0c587de7f52073ae0e89bddbc77bf875f295
|
[
"MIT"
] | null | null | null |
bika/lims/skins/bika/guard_auto_preservation_required.py
|
hocinebendou/bika.gsoc
|
85bc0c587de7f52073ae0e89bddbc77bf875f295
|
[
"MIT"
] | null | null | null |
## Script (Python) "guard_auto_preservation_required"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=type_name=None
##title=
##
# Automatic transition that fires when object enters "Sampled" state
# If this guard returns True, the object will transition to to_be_preserved.
# If the guard returns False, the object will be transitioned to sample_due.
# Pre-preserved containers are short-circuited here.
# Returning a value other than True or False will leave the context in
# "sampled" state
from DateTime import DateTime
workflow = context.portal_workflow
if context.portal_type == 'Sample':
parts = context.objectValues("SamplePartition")
if not parts:
# AR is being created - AR Add will transition us.
return None
# If none of this sample's partitions require preservation, then we return
# false.
preservation_required = False
for part in parts:
if part.getPreservation():
if part.getContainer() \
and part.getContainer().getPrePreserved():
preservation_required = False
else:
preservation_required = True
break
return preservation_required
elif context.portal_type == 'AnalysisRequest':
sample = context.getSample()
if not sample:
# AR is being created - AR Add will transition us.
return None
# If none of this sample's partitions require preservation, then we return
# false.
preservation_required = False
for part in sample.objectValues("SamplePartition"):
if part.getPreservation():
if part.getContainer() \
and part.getContainer().getPrePreserved():
preservation_required = False
else:
preservation_required = True
break
return preservation_required
elif context.portal_type == 'SamplePartition':
analyses = context.getBackReferences('AnalysisSamplePartition')
if not analyses:
# AR is being created - AR Add will transition us.
return None
if context.getPreservation():
if context.getContainer() \
and context.getContainer().getPrePreserved():
return False
else:
return True
else:
return False
elif context.portal_type == 'Analysis':
part = context.getSamplePartition()
if not part:
# AR is being created - AR Add will transition us.
return None
part = context.getSamplePartition()
if part.getPreservation():
if part.getContainer() \
and part.getContainer().getPrePreserved():
return False
else:
return True
else:
return False
| 31.703297
| 79
| 0.638475
|
4a02ee481bec323558f6ccfec81319e8bfaf1a62
| 830
|
py
|
Python
|
server/opendp_apps/analysis/migrations/0013_auto_20210919_2254.py
|
mikephelan/opendp-ux
|
80c65da0ed17adc01c69b05dbc9cbf3a5973a016
|
[
"MIT"
] | 6
|
2021-05-25T18:50:58.000Z
|
2022-03-23T19:52:15.000Z
|
server/opendp_apps/analysis/migrations/0013_auto_20210919_2254.py
|
mikephelan/opendp-ux
|
80c65da0ed17adc01c69b05dbc9cbf3a5973a016
|
[
"MIT"
] | 298
|
2021-05-19T17:34:09.000Z
|
2022-03-29T18:45:22.000Z
|
server/opendp_apps/analysis/migrations/0013_auto_20210919_2254.py
|
opendp/dpcreator
|
6ba3c58ecdcd81ca1f4533a14ce7604eccf6a646
|
[
"MIT"
] | 2
|
2020-10-16T22:03:24.000Z
|
2020-11-15T22:45:19.000Z
|
# Generated by Django 3.1.12 on 2021-09-19 22:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('analysis', '0012_auto_20210919_2233'),
]
operations = [
migrations.RenameField(
model_name='releaseinfo',
old_name='dp_release_json',
new_name='dp_release_json_file',
),
migrations.RemoveField(
model_name='releaseinfo',
name='dp_release_pdf',
),
migrations.AddField(
model_name='releaseinfo',
name='dp_release_pdf_file',
field=models.FileField(blank=True, null=True, storage='/Users/ramanprasad/Documents/github-rp/dpcreator/server/test_setup/public_release_files', upload_to='release-files/%Y/%m/%d/'),
),
]
| 29.642857
| 194
| 0.621687
|
4a02ee560e5647c9b0e735d8b59f1c731ae83902
| 475
|
py
|
Python
|
tests/type/builtin_type/test_low.py
|
llambeau/finitio.py
|
27c2799709993c6edb9d9038290792ed90a97346
|
[
"0BSD"
] | 1
|
2016-02-06T17:16:22.000Z
|
2016-02-06T17:16:22.000Z
|
tests/type/builtin_type/test_low.py
|
llambeau/finitio.py
|
27c2799709993c6edb9d9038290792ed90a97346
|
[
"0BSD"
] | null | null | null |
tests/type/builtin_type/test_low.py
|
llambeau/finitio.py
|
27c2799709993c6edb9d9038290792ed90a97346
|
[
"0BSD"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_equality
----------------------------------
Tests for the `BuiltinType` low() method
"""
import unittest
from numbers import Number
from finitio.types import BuiltinType
class TestBuiltinTypeLow(unittest.TestCase):
subject = BuiltinType(Number)
def test_equals_itself(self):
self.assertIs(self.subject.low(), self.subject)
if __name__ == '__main__':
import sys
sys.exit(unittest.main())
| 17.592593
| 55
| 0.652632
|
4a02ee814e7bd2a1367f9ef189c086ab08c4d8f2
| 4,101
|
py
|
Python
|
scripts/lane_detection.py
|
deltaautonomy/delta_perception
|
7b40f005b9753464016207d142969890ff6ec031
|
[
"BSD-3-Clause"
] | 1
|
2021-05-15T13:05:09.000Z
|
2021-05-15T13:05:09.000Z
|
scripts/lane_detection.py
|
deltaautonomy/delta_perception
|
7b40f005b9753464016207d142969890ff6ec031
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/lane_detection.py
|
deltaautonomy/delta_perception
|
7b40f005b9753464016207d142969890ff6ec031
|
[
"BSD-3-Clause"
] | 3
|
2020-02-20T21:59:02.000Z
|
2021-05-15T13:05:10.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Author : Heethesh Vhavle, Apoorv Singh
Email : heethesh@cmu.edu
Version : 1.0.0
Date : Apr 07, 2019
'''
# Python 2/3 compatibility
from __future__ import print_function, absolute_import, division
# Handle paths and OpenCV import
from init_paths import *
# External modules
import matplotlib.pyplot as plt
# ROS modules
import tf
import rospy
import message_filters
from cv_bridge import CvBridge, CvBridgeError
# ROS messages
from sensor_msgs.msg import Image
from diagnostic_msgs.msg import DiagnosticArray
from delta_msgs.msg import LaneMarking, LaneMarkingArray
# Local python modules
from utils import *
from erfnet.lane_detection import ERFNetLaneDetector
from ipm.ipm import InversePerspectiveMapping
# Frames
CAMERA_FRAME = 'ego_vehicle/camera/rgb/front'
EGO_VEHICLE_FRAME = 'ego_vehicle'
# Classes
lane_detector = ERFNetLaneDetector()
ipm = InversePerspectiveMapping()
lane_fps = FPSLogger('LaneNet')
########################### Functions ###########################
def publish_diagnostics(publishers):
msg = DiagnosticArray()
msg.status.append(make_diagnostics_status('lane_detection', 'perception_', str(lane_fps.fps)))
publishers['diag_pub'].publish(msg)
def lane_detection(image_msg, publishers, vis=True, **kwargs):
# Read image message
img = message_to_cv2(image_msg)
if img is None: return
# Lane detection
lane_fps.lap()
output, lanes = lane_detector.run(img, rospy.Time.now())
lane_fps.tick()
# Publish diagnostics status
publish_diagnostics(publishers)
if output is None or lanes is None:
return
# Convert to lane marking array
lane_array = LaneMarkingArray()
lane_array.header.stamp = image_msg.header.stamp
lane_array.header.frame_id = EGO_VEHICLE_FRAME
for slope, intercept in lanes:
lane = LaneMarking()
lane.slope, lane.intercept = slope, intercept
lane_array.lanes.append(lane)
# Publish the lane data
publishers['lane_pub'].publish(lane_array)
# Visualize and publish image message
cv2_to_message(output, publishers['image_pub'])
def callback(image_msg, publishers, **kwargs):
# Run the perception pipeline
lane_detection(image_msg, publishers)
def shutdown_hook():
print('\n\033[95m' + '*' * 30 + ' Lane Detection Shutdown ' + '*' * 30 + '\033[00m\n')
lane_detector.close()
def run(**kwargs):
# Start node
rospy.init_node('lane_detection', anonymous=True)
rospy.loginfo('Current PID: [%d]' % os.getpid())
# Setup models
lane_detector.setup()
# Handle params and topics
image_color = rospy.get_param('~image_color', '/carla/ego_vehicle/camera/rgb/front/image_color')
lane_output = rospy.get_param('~lane_output', '/delta/perception/lane_detection/markings')
output_image = rospy.get_param('~output_image', '/delta/perception/lane_detection/image')
diagnostics = rospy.get_param('~diagnostics', '/delta/perception/lane_detection/diagnostics')
# Display params and topics
rospy.loginfo('Image topic: %s' % image_color)
rospy.loginfo('Lane marking topic: %s' % lane_output)
rospy.loginfo('Output topic: %s' % output_image)
# Publish output topic
publishers = {}
publishers['lane_pub'] = rospy.Publisher(lane_output, LaneMarkingArray, queue_size=5)
publishers['image_pub'] = rospy.Publisher(output_image, Image, queue_size=5)
publishers['diag_pub'] = rospy.Publisher(diagnostics, DiagnosticArray, queue_size=5)
# Subscribe to topics
image_sub = message_filters.Subscriber(image_color, Image)
# Synchronize the topics by time
ats = message_filters.ApproximateTimeSynchronizer([image_sub], queue_size=1, slop=0.1)
ats.registerCallback(callback, publishers, **kwargs)
# Shutdown hook
rospy.on_shutdown(shutdown_hook)
# Keep python from exiting until this node is stopped
try:
rospy.spin()
except rospy.ROSInterruptException:
rospy.loginfo('Shutting down')
if __name__ == '__main__':
# Start perception node
run()
| 28.880282
| 100
| 0.715191
|
4a02f0192386447c5674b8210e925df0b4a87729
| 11,267
|
py
|
Python
|
homeassistant/components/modbus/modbus.py
|
mikan-megane/core
|
837220cce40890e296920d33a623adbc11bd15a6
|
[
"Apache-2.0"
] | 2
|
2020-03-29T05:32:57.000Z
|
2021-06-13T06:55:05.000Z
|
homeassistant/components/modbus/modbus.py
|
mikan-megane/core
|
837220cce40890e296920d33a623adbc11bd15a6
|
[
"Apache-2.0"
] | 79
|
2020-07-23T07:13:37.000Z
|
2022-03-22T06:02:37.000Z
|
homeassistant/components/modbus/modbus.py
|
mikan-megane/core
|
837220cce40890e296920d33a623adbc11bd15a6
|
[
"Apache-2.0"
] | 1
|
2020-11-18T21:04:18.000Z
|
2020-11-18T21:04:18.000Z
|
"""Support for Modbus."""
import asyncio
import logging
from pymodbus.client.sync import ModbusSerialClient, ModbusTcpClient, ModbusUdpClient
from pymodbus.constants import Defaults
from pymodbus.exceptions import ModbusException
from pymodbus.transaction import ModbusRtuFramer
from homeassistant.const import (
CONF_DELAY,
CONF_HOST,
CONF_METHOD,
CONF_NAME,
CONF_PORT,
CONF_TIMEOUT,
CONF_TYPE,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import callback
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.helpers.event import async_call_later
from .const import (
ATTR_ADDRESS,
ATTR_HUB,
ATTR_STATE,
ATTR_UNIT,
ATTR_VALUE,
CALL_TYPE_COIL,
CALL_TYPE_DISCRETE,
CALL_TYPE_REGISTER_HOLDING,
CALL_TYPE_REGISTER_INPUT,
CALL_TYPE_WRITE_COIL,
CALL_TYPE_WRITE_COILS,
CALL_TYPE_WRITE_REGISTER,
CALL_TYPE_WRITE_REGISTERS,
CONF_BAUDRATE,
CONF_BYTESIZE,
CONF_CLOSE_COMM_ON_ERROR,
CONF_PARITY,
CONF_STOPBITS,
DEFAULT_HUB,
MODBUS_DOMAIN as DOMAIN,
PLATFORMS,
SERVICE_WRITE_COIL,
SERVICE_WRITE_REGISTER,
)
ENTRY_FUNC = "func"
ENTRY_ATTR = "attr"
_LOGGER = logging.getLogger(__name__)
async def async_modbus_setup(
hass, config, service_write_register_schema, service_write_coil_schema
):
"""Set up Modbus component."""
hass.data[DOMAIN] = hub_collect = {}
for conf_hub in config[DOMAIN]:
my_hub = ModbusHub(hass, conf_hub)
hub_collect[conf_hub[CONF_NAME]] = my_hub
# modbus needs to be activated before components are loaded
# to avoid a racing problem
await my_hub.async_setup()
# load platforms
for component, conf_key in PLATFORMS:
if conf_key in conf_hub:
hass.async_create_task(
async_load_platform(hass, component, DOMAIN, conf_hub, config)
)
async def async_stop_modbus(event):
"""Stop Modbus service."""
for client in hub_collect.values():
await client.async_close()
del client
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, async_stop_modbus)
async def async_write_register(service):
"""Write Modbus registers."""
unit = int(float(service.data[ATTR_UNIT]))
address = int(float(service.data[ATTR_ADDRESS]))
value = service.data[ATTR_VALUE]
client_name = (
service.data[ATTR_HUB] if ATTR_HUB in service.data else DEFAULT_HUB
)
if isinstance(value, list):
await hub_collect[client_name].async_pymodbus_call(
unit, address, [int(float(i)) for i in value], CALL_TYPE_WRITE_REGISTERS
)
else:
await hub_collect[client_name].async_pymodbus_call(
unit, address, int(float(value)), CALL_TYPE_WRITE_REGISTER
)
hass.services.async_register(
DOMAIN,
SERVICE_WRITE_REGISTER,
async_write_register,
schema=service_write_register_schema,
)
async def async_write_coil(service):
"""Write Modbus coil."""
unit = service.data[ATTR_UNIT]
address = service.data[ATTR_ADDRESS]
state = service.data[ATTR_STATE]
client_name = (
service.data[ATTR_HUB] if ATTR_HUB in service.data else DEFAULT_HUB
)
if isinstance(state, list):
await hub_collect[client_name].async_pymodbus_call(
unit, address, state, CALL_TYPE_WRITE_COILS
)
else:
await hub_collect[client_name].async_pymodbus_call(
unit, address, state, CALL_TYPE_WRITE_COIL
)
hass.services.async_register(
DOMAIN, SERVICE_WRITE_COIL, async_write_coil, schema=service_write_coil_schema
)
return True
class ModbusHub:
"""Thread safe wrapper class for pymodbus."""
def __init__(self, hass, client_config):
"""Initialize the Modbus hub."""
# generic configuration
self._client = None
self._async_cancel_listener = None
self._in_error = False
self._lock = asyncio.Lock()
self.hass = hass
self._config_name = client_config[CONF_NAME]
self._config_type = client_config[CONF_TYPE]
self._config_port = client_config[CONF_PORT]
self._config_timeout = client_config[CONF_TIMEOUT]
self._config_delay = client_config[CONF_DELAY]
self._config_reset_socket = client_config[CONF_CLOSE_COMM_ON_ERROR]
Defaults.Timeout = client_config[CONF_TIMEOUT]
if self._config_type == "serial":
# serial configuration
self._config_method = client_config[CONF_METHOD]
self._config_baudrate = client_config[CONF_BAUDRATE]
self._config_stopbits = client_config[CONF_STOPBITS]
self._config_bytesize = client_config[CONF_BYTESIZE]
self._config_parity = client_config[CONF_PARITY]
else:
# network configuration
self._config_host = client_config[CONF_HOST]
self._call_type = {
CALL_TYPE_COIL: {
ENTRY_ATTR: "bits",
ENTRY_FUNC: None,
},
CALL_TYPE_DISCRETE: {
ENTRY_ATTR: "bits",
ENTRY_FUNC: None,
},
CALL_TYPE_REGISTER_HOLDING: {
ENTRY_ATTR: "registers",
ENTRY_FUNC: None,
},
CALL_TYPE_REGISTER_INPUT: {
ENTRY_ATTR: "registers",
ENTRY_FUNC: None,
},
CALL_TYPE_WRITE_COIL: {
ENTRY_ATTR: "value",
ENTRY_FUNC: None,
},
CALL_TYPE_WRITE_COILS: {
ENTRY_ATTR: "count",
ENTRY_FUNC: None,
},
CALL_TYPE_WRITE_REGISTER: {
ENTRY_ATTR: "value",
ENTRY_FUNC: None,
},
CALL_TYPE_WRITE_REGISTERS: {
ENTRY_ATTR: "count",
ENTRY_FUNC: None,
},
}
def _log_error(self, exception_error: ModbusException, error_state=True):
log_text = "Pymodbus: " + str(exception_error)
if self._in_error:
_LOGGER.debug(log_text)
else:
_LOGGER.error(log_text)
self._in_error = error_state
async def async_setup(self):
"""Set up pymodbus client."""
try:
if self._config_type == "serial":
self._client = ModbusSerialClient(
method=self._config_method,
port=self._config_port,
baudrate=self._config_baudrate,
stopbits=self._config_stopbits,
bytesize=self._config_bytesize,
parity=self._config_parity,
timeout=self._config_timeout,
retry_on_empty=True,
reset_socket=self._config_reset_socket,
)
elif self._config_type == "rtuovertcp":
self._client = ModbusTcpClient(
host=self._config_host,
port=self._config_port,
framer=ModbusRtuFramer,
timeout=self._config_timeout,
reset_socket=self._config_reset_socket,
)
elif self._config_type == "tcp":
self._client = ModbusTcpClient(
host=self._config_host,
port=self._config_port,
timeout=self._config_timeout,
reset_socket=self._config_reset_socket,
)
elif self._config_type == "udp":
self._client = ModbusUdpClient(
host=self._config_host,
port=self._config_port,
timeout=self._config_timeout,
reset_socket=self._config_reset_socket,
)
except ModbusException as exception_error:
self._log_error(exception_error, error_state=False)
return
async with self._lock:
await self.hass.async_add_executor_job(self._pymodbus_connect)
self._call_type[CALL_TYPE_COIL][ENTRY_FUNC] = self._client.read_coils
self._call_type[CALL_TYPE_DISCRETE][
ENTRY_FUNC
] = self._client.read_discrete_inputs
self._call_type[CALL_TYPE_REGISTER_HOLDING][
ENTRY_FUNC
] = self._client.read_holding_registers
self._call_type[CALL_TYPE_REGISTER_INPUT][
ENTRY_FUNC
] = self._client.read_input_registers
self._call_type[CALL_TYPE_WRITE_COIL][ENTRY_FUNC] = self._client.write_coil
self._call_type[CALL_TYPE_WRITE_COILS][ENTRY_FUNC] = self._client.write_coils
self._call_type[CALL_TYPE_WRITE_REGISTER][
ENTRY_FUNC
] = self._client.write_register
self._call_type[CALL_TYPE_WRITE_REGISTERS][
ENTRY_FUNC
] = self._client.write_registers
# Start counting down to allow modbus requests.
if self._config_delay:
self._async_cancel_listener = async_call_later(
self.hass, self._config_delay, self.async_end_delay
)
@callback
def async_end_delay(self, args):
"""End startup delay."""
self._async_cancel_listener = None
self._config_delay = 0
def _pymodbus_close(self):
"""Close sync. pymodbus."""
if self._client:
try:
self._client.close()
except ModbusException as exception_error:
self._log_error(exception_error)
self._client = None
async def async_close(self):
"""Disconnect client."""
if self._async_cancel_listener:
self._async_cancel_listener()
self._async_cancel_listener = None
async with self._lock:
return await self.hass.async_add_executor_job(self._pymodbus_close)
def _pymodbus_connect(self):
"""Connect client."""
try:
self._client.connect()
except ModbusException as exception_error:
self._log_error(exception_error, error_state=False)
def _pymodbus_call(self, unit, address, value, use_call):
"""Call sync. pymodbus."""
kwargs = {"unit": unit} if unit else {}
try:
result = self._call_type[use_call][ENTRY_FUNC](address, value, **kwargs)
except ModbusException as exception_error:
self._log_error(exception_error)
result = exception_error
if not hasattr(result, self._call_type[use_call][ENTRY_ATTR]):
self._log_error(result)
return None
self._in_error = False
return result
async def async_pymodbus_call(self, unit, address, value, use_call):
"""Convert async to sync pymodbus call."""
if self._config_delay:
return None
async with self._lock:
return await self.hass.async_add_executor_job(
self._pymodbus_call, unit, address, value, use_call
)
| 34.35061
| 88
| 0.612497
|
4a02f09160fb96daf8afeb0546fd8f4f407861be
| 41,975
|
py
|
Python
|
precious.py
|
antonvino/precious_mac
|
a20897ebc134f04d0e1da08e8d8f0d338a576c55
|
[
"MIT"
] | null | null | null |
precious.py
|
antonvino/precious_mac
|
a20897ebc134f04d0e1da08e8d8f0d338a576c55
|
[
"MIT"
] | null | null | null |
precious.py
|
antonvino/precious_mac
|
a20897ebc134f04d0e1da08e8d8f0d338a576c55
|
[
"MIT"
] | null | null | null |
"""
Main app controller file
Uses PyObjC wrappers
Uses Requests (communicates with a Django web app)
Author:
The original author of the Precious app is Anton Vinokurov (www.antonvino.com)
"""
from Cocoa import *
from Foundation import NSObject
import json, os, signal, time
from datetime import datetime, timedelta, date
# Precious Web site URL
SITE_URL = 'http://mytimeisprecious.com/'
#SITE_URL = 'http://127.0.0.1:8000/'
class PreciousUser():
def __init__(self):
self.token = None
def authenticate(self, email, password):
print 'authenticating...'
import requests
# construct auth data using the fields
auth_data = {
'username': email,
'password': password
}
# get token for the user
url = SITE_URL + 'api/token-auth/'
print url
print auth_data
r = requests.post(url, data=auth_data)
response = r.json()
if 'token' in response:
self.token = response['token']
self.email = auth_data['username']
else:
print '[Auth error] {0}'.format(r.text)
error = 'E-mail or password do not match.'
if 'non_field_errors' in response:
error = response['non_field_errors'][0]
raise ValueError(error)
def create(self, email, username, password):
print 'creating an account...'
import requests
# construct auth data using the fields
auth_data = {
'email': email,
'username': username,
'password': password
}
# get token for the user
url = SITE_URL + 'api/sign-up/'
print url
print auth_data
r = requests.post(url, data=auth_data)
print r.text
response = r.json()
if 'id' not in response:
# Django sends errors for each field
if 'email' not in response:
response['email'] = ['']
if 'username' not in response:
response['username'] = ['']
raise ValueError(response['email'][0], response['username'][0])
self.email = response['email']
# token = r.json()
# if 'token' in token:
# self.token = token['token']
# self.email = auth_data['username']
class PreciousData():
def __init__(self):
pass
def load(self, year = None, month = None, day = None, hour = None):
"""
Loads the data from JSON file according to given date
:return: A tuple: reflection, activity, productive
"""
# getting the system date and time if they are not set
if not year or not month or not day or (not hour and hour != 0):
today = datetime.now()
year = today.year
month = today.month
day = today.day
hour = today.hour
# convert date and hour to strings
year = str(year)
month = str(month)
day = str(day)
hour = str(hour)
try:
# open the file to read data from
fr = open('precious_mytime.js', 'r')
# load and decode the JSON data
json_data = json.load(fr)
# close the file
fr.close
except IOError:
# file does not exist yet - set json_data to an empty dictionary
print '[Data:Error] File not found'
json_data = {}
reflection = None
activity = None
productive = None
if year in json_data and month in json_data[year] and day in json_data[year][month]:
if 'reflection' in json_data[year][month][day]:
reflection = json_data[year][month][day]['reflection']
if hour in json_data[year][month][day]:
activity = json_data[year][month][day][hour]['activity']
productive = json_data[year][month][day][hour]['productive']
return reflection, activity, productive
# load last hour logged
# def getLast(self):
# try:
# # open the file to read data from
# fr = open('precious_mytime.js', 'r')
# # load and decode the JSON data
# json_data = json.load(fr)
# # close the file
# fr.close
# # init datetime and time modules
# hour_inc = 0
# reflection = None
# # activity = None
# # run a loop to find the latest activity or reflection from before
# while hour_inc < 86400 and not reflection:
# # get the date and time from earlier
# today = datetime.fromtimestamp(time.time()-hour_inc) # this hour, last hour, 2 hours earlier etc.
# year = str(today.year)
# month = str(today.month)
# day = str(today.day)
# # hour = str(today.hour)
# # 1 hour earlier
# hour_inc += 3600
#
# # try to access data by the loaded year-month-day-hour keys
# try:
# # self.activity = json_data[year][month][day][hour]['activity']
# reflection = json_data[year][month][day]['reflection']
# except KeyError:
# print 'Previous hour not found'
# self.reflection = reflection
#
# except IOError:
# # file does not exist yet
# print 'File not found'
def save(self, type, productive = 1, activity = None, reflection = None, year = None, month = None, day = None, hour = None):
"""
Saves the data for Hour or Day in a JSON file
:param type: `day` or `hour`
:param productive: 1/2/3 as in low/med/high
:param activity: text about activity
:param reflection: reflection of the day
other parameters are self-explanatory and should be numbers
"""
# getting the system date and time if they are not set
if not year or not month or not day or (not hour and hour != 0 and type != 'day'):
today = datetime.now()
year = today.year
month = today.month
day = today.day
# hour = today.hour
hour = int(today.strftime('%H')) # need a 24 hour
# convert date and hour to strings
year = str(year)
month = str(month)
day = str(day)
hour = str(hour)
try:
# open the file to read data from
fr = open('precious_mytime.js', 'r')
# load and decode the JSON data
json_data = json.load(fr)
# close the file
fr.close
except IOError:
# file does not exist yet - set json_data to an empty dictionary
print '[Data:Error] Could not open the file precious_mytime.js'
json_data = {}
# this accounts for the problem when year/month/day have not been set yet in the JSON file
if year not in json_data:
json_data[year] = {};
if month not in json_data[year]:
json_data[year][month] = {}
if day not in json_data[year][month]:
json_data[year][month][day] = {}
# logging hour
if type == 'hour':
# if this hour is not in the JSON file yet
if hour not in json_data[year][month][day]:
json_data[year][month][day][hour] = {}
# append the hour data to the appropriate decoded node of the json_data
json_data[year][month][day][hour].update({
'productive': productive,
'activity': activity
})
# logging day
elif type == 'day':
# append the hour data to the appropriate decoded node of the json_data
json_data[year][month][day].update({
'reflection': reflection
})
try:
# open the file to rewrite data
fw = open('precious_mytime.js', 'w')
# JSON dump of the data
json.dump(json_data, fw)
print '[Data] {0} saved'.format(type)
# close the file
fw.close
except IOError:
print '[Data:Error] Could not open the file precious_mytime.js'
def sync(self, all = False):
"""
Syncs the data with Web API of Precious Web
Loads data from local JSON file
Saves data on the web
Using Python Requests
Requires user instance to be authenticated (i.e. to have a valid token)
Syncs all data only if all=True, otherwise syncs last 3 days
"""
assert(user.token is not None)
import requests
print '[Data] Syncing start...'
# what has been synced
word = 'All data'
headers = {'Authorization': 'Token {0}'.format(user.token), 'user-agent': 'precious-app/1.0.0'}
url = SITE_URL + 'api/users?email={0}'.format(user.email)
print '[API] Authorized user {0}'.format(url)
r = requests.get(url, headers=headers)
users = r.json()
print r.text
user_data = users.pop()
url = SITE_URL + 'api/users/{0}'.format(user_data['id'])
print '[API] Authorized user detailed {0}'.format(url)
r = requests.get(url, headers=headers)
user_data = r.json()
user.username = user_data['username']
user.email = user_data['email']
user.id = user_data['id']
# check if the new version of the app
# needs loading previous data from the server
if precious_settings.is_new_version is True:
self.back_sync()
word = 'Old data'
# otherwise proceed with the normal sync
# TODO: not a very concise way of checking this
else:
# 3 days ago datetime
dt = datetime.now() - timedelta(days=3)
# request recently logged days
url = SITE_URL + 'api/days?synced_after={0}&author={1}'.format(dt, user.id)
r = requests.get(url, headers=headers)
days = r.json()
print '[API] {0}'.format(url)
recent_days = []
for day in days:
recent_days.append(day['date'])
# request recently logged hours data
url = SITE_URL + 'api/hours?synced_after={0}&day={1}&author={2}'.format(dt, day['id'], user.id)
r = requests.get(url, headers=headers)
hours = r.json()
print '[API] {0}'.format(url)
recent_hours = []
for hour in hours:
recent_hours.append('{0}-{1}'.format(day['date'],hour['hour']))
# print repr(recent_hours)
# print repr(recent_days)
# open the file to read data from
fr = open('precious_mytime.js', 'r')
# load and decode the JSON data
json_data = json.load(fr)
# flag and arrays not to redo the already synced stuff
# in case the connection breaks and the loop has to restart
all_done = False
days_done = []
hours_done = []
# we repeat this loop until all is done (this may take a while)
# and frankly it may not be the most elegant way of doing it
# but as it's a one off thing you'd do for full sync - it's not too bad
# for 3 past days sync it won't be needed often
while not all_done:
try:
print 'Wait for 3 seconds...'
time.sleep(3)
for year in json_data:
for month in json_data[year]:
for day in json_data[year][month]:
day_date = date(day=int(day), month=int(month), year=int(year))
# we only sync the data, that is 3 days old or less
# if all==True - we sync all data
if all == True or day_date >= dt.date():
# do this only if the day has not been synced in the previous loop
if str(day_date) not in days_done:
print '[API] Day POST/PUT'
# construct the day data dict
day_data = {'author':user.id, 'date':day_date}
if 'reflection' in json_data[year][month][day]:
day_data['day_text'] = json_data[year][month][day]['reflection']
this_day = {}
# if day has not been logged in the last 3 days - try to add a new one
if str(day_date) not in recent_days:
url = SITE_URL + 'api/days/'
print '[API] POST {0}'.format(url)
# POST new day
r = requests.post(url, data=day_data, headers=headers)
# otherwise update the existing one
else:
url = SITE_URL + 'api/days/?date={0}&author={1}'.format(day_date, user.id)
print '[API] GET {0}'.format(url)
r = requests.get(url, headers=headers)
this_day = r.json()
this_day = this_day.pop()
# the PUT url
url = SITE_URL + 'api/days/{0}/'.format(this_day['id'])
print '[API] PUT {0}'.format(url)
# PUT (update) the day
r = requests.put(url, data=day_data, headers=headers)
# Request result debug
# print r.text
# request day ID
# TODO refactor into one function with above
if 'id' not in this_day:
url = SITE_URL + 'api/days/?date={0}&author={1}'.format(day_date, user.id)
print '[API] GET {0}'.format(url)
r = requests.get(url, headers=headers)
this_day = r.json()
this_day = this_day.pop()
for hour in json_data[year][month][day]:
if hour != 'reflection':
print '[API] Hour POST/PUT'
hour_data = {'author':user.id, 'day':this_day['id'], 'hour':hour}
if 'activity' in json_data[year][month][day][hour]:
hour_data['hour_text'] = json_data[year][month][day][hour]['activity']
if 'productive' in json_data[year][month][day][hour]:
hour_data['productive'] = json_data[year][month][day][hour]['productive']
hour_date = '{0}-{1}'.format(day_date, hour)
# do this only if hour has not been synced in the previous loop
if hour_date not in hours_done:
# if hour has not been logged in the last 3 days - try to add a new one
if hour_date not in recent_hours:
url = SITE_URL + 'api/hours/'
print '[API] POST {0}'.format(url)
# POST new hour
r = requests.post(url, data=hour_data, headers=headers)
# otherwise update the existing one
else:
url = SITE_URL + 'api/hours/?day={0}&hour={1}&author={2}'.format(this_day['id'], hour, user.id)
print '[API] GET {0}'.format(url)
r = requests.get(url, headers=headers)
this_hour = r.json()
this_hour = this_hour.pop()
# the PUT url
url = SITE_URL + 'api/hours/{0}/'.format(this_hour['id'])
print '[API] PUT {0}'.format(url)
# PUT (update) the hour
r = requests.put(url, data=hour_data, headers=headers)
# Request result debug
# print r.text
# save the hour in hours done
hours_done.append(hour_date)
# END FOR "for all hours"
# save the day in days_done
days_done.append(day_date)
# END FOR "for all days"
# close the file
fr.close
# set the flag - loop will not be entered anymore
all_done = True
except Exception, e:
print '[API] Error in sync loop: {0}'.format(e)
# END WHILE
# END ELSE
# return what has been synced
if not all:
word = 'Past 3 days'
return word
# except IOError:
# # file does not exist yet - set json_data to an empty dictionary
# print 'File not found'
# json_data = {}
def back_sync(self):
"""
Loads the data with Web API of Precious Web
Saves the data in local JSON file
Using Python Requests
Requires user instance to be authenticated (i.e. to have a valid token)
"""
assert(user.token is not None)
assert(user.email is not None)
import requests
print '[Data] Syncing start...'
headers = {'Authorization': 'Token {0}'.format(user.token), 'user-agent': 'precious-app/1.0.0'}
# request all logged days
url = SITE_URL + 'api/days?author={0}'.format(user.id)
r = requests.get(url, headers=headers)
days = r.json()
if len(days) > 0:
for day_data in days:
day_date = datetime.strptime(day_data['date'], '%Y-%m-%d')
year = day_date.year
month = day_date.month
day = day_date.day
print 'day:{0}-{1}-{2}'.format(day, month, year)
# log the day locally
precious_data.save(
type='day',
reflection=day_data['day_text'],
year=year,
month=month,
day=day)
# for each day get hours
url = SITE_URL + 'api/hours?author={0}&day={1}'.format(user.id, day_data['id'])
r = requests.get(url, headers=headers)
hours = r.json()
for hour in hours:
# log the hour locally
precious_data.save(
type='hour',
productive=hour['productive'],
activity=hour['hour_text'],
year=year,
month=month,
day=day,
hour=hour['hour'])
# if everything went successfully
precious_settings.is_new_version = False
precious_settings.save()
class PreciousSettings():
def __init__(self):
pass
def load(self):
"""
Loads the settings data from a JSON file
"""
# getting the system date and time if they are not set
try:
# open the file to read data from
fr = open('precious_mysettings.js', 'r')
# load and decode the JSON data
json_data = json.load(fr)
# is it a new app version?
self.is_new_version = bool(json_data['is_new_version'])
# activate app each hour or not?
self.activate_each_hour = bool(json_data['activate_each_hour'])
print '[Data] Settings loaded'
# close the file
fr.close
except IOError:
# file does not exist yet - set json_data to an empty dictionary
print '[Data:Error] Could not open the file precious_mysettings.js'
#json_data = {}
def save(self):
"""
Saves the settings data into a JSON file
"""
json_data = {
'is_new_version': int(self.is_new_version),
'activate_each_hour': int(self.activate_each_hour),
}
try:
# open the file to rewrite data
fw = open('precious_mysettings.js', 'w')
# JSON dump of the data
json.dump(json_data, fw)
print '[Data] Settings saved'
# close the file
fw.close
except IOError:
print '[Data:Error] Could not open the file precious_mysettings.js'
class PreciousController(NSWindowController):
# Hour window
hourLabel = objc.IBOutlet()
hourField = objc.IBOutlet()
hourButton = objc.IBOutlet()
hourProgress = objc.IBOutlet()
hourSegment = objc.IBOutlet()
# Day window
dayLabel = objc.IBOutlet()
dayField = objc.IBOutlet()
dayButton = objc.IBOutlet()
dayProgress = objc.IBOutlet()
# Sign up window
signUpWindow = objc.IBOutlet()
signUpEmailField = objc.IBOutlet()
signUpUsernameField = objc.IBOutlet()
signUpPasswordField = objc.IBOutlet()
signUpProgress = objc.IBOutlet()
signUpButton = objc.IBOutlet()
signUpEmailError = objc.IBOutlet()
signUpUsernameError = objc.IBOutlet()
signUpError = objc.IBOutlet()
# sync window
syncWindow = objc.IBOutlet()
usernameField = objc.IBOutlet()
passwordField = objc.IBOutlet()
syncProgress = objc.IBOutlet()
syncButton = objc.IBOutlet()
syncError = objc.IBOutlet()
statsButton = objc.IBOutlet()
# Miscellaneous items
helpText = objc.IBOutlet()
settMenuActivate = objc.IBOutlet()
def windowDidLoad(self):
"""
Initializing the main window controller
Setting default field values and resetting everything
"""
NSWindowController.windowDidLoad(self)
# default data values in the window
self.productive = 1
self.activity = None
self.reflection = None
self.syncAllFlag = False
# attempt to load current day/hour data
self.loadData()
# init the badge
self.badge = NSApp.dockTile()
# stop animation of the progress indicators to hide them
self.hourProgress.stopAnimation_(self)
self.dayProgress.stopAnimation_(self)
# set current datetime object and current timestamp
self.curr_timestamp = time.time()
self.reloadTime()
# update displayed hour
self.updateDisplayHour()
self.updateDisplayDay()
# init the help text
self.setHelpText()
# set the timer
self.pending = 0
self.pending_hours = []
self.setPyTimer()
# User PREFERENCES / SETTINGS
precious_settings.load()
# update settings states in the menu
self.settMenuActivate.setState_(precious_settings.activate_each_hour)
# check if it's the new version of the app
if precious_settings.is_new_version is True:
# bring sync window up
self.requireSync()
def requireSync(self):
"""
Brings sync window up and asks to re-sync
It's a cap until the re-sync is done
"""
self.syncWindow.makeKeyAndOrderFront_(self)
self.syncError.setStringValue_('Please log in to re-sync old data')
self.syncError.setHidden_(False)
self.syncAllFlag = True
def setHelpText(self):
"""
Reads help text from a faq.txt local file and puts it in the help form
TODO: Webview of the HTML page instead?
"""
try:
# open the file to read data
fw = open('faq.txt', 'r')
# update help text
self.helpText.setStringValue_(fw.read())
# close the file
fw.close
except IOError:
print '[File] File faq.txt was not found.'
def reloadTime(self):
"""
Takes current timestamp and updates the date/time data
"""
self.curr_time = datetime.fromtimestamp(self.curr_timestamp)
self.year = self.curr_time.year
self.month = self.curr_time.month
self.day = self.curr_time.day
self.hour = int(self.curr_time.strftime('%H')) # need a 24 hour
def updateDisplayHour(self):
"""
Updates the displayed date & hour in the interface
"""
self.hourLabel.setStringValue_(self.curr_time.strftime('%a %d %b, %I %p'))
self.dayLabel.setStringValue_(self.curr_time.strftime('%a %d %b'))
self.dayButton.setStringValue_(self.curr_time.strftime('%a %d %b'))
if self.activity:
self.hourField.setStringValue_(self.activity)
self.hourLabel.setTextColor_(NSColor.blackColor())
else:
self.hourField.setStringValue_('')
# if not self.productive and self.productive != 0:
self.hourLabel.setTextColor_(NSColor.redColor())
if self.productive or self.productive == 0:
self.hourSegment.setSelected_forSegment_(1, self.productive)
else:
self.hourSegment.setSelected_forSegment_(1, 1)
def updateDisplayDay(self):
"""
Updates the displayed date in the interface
"""
self.hourLabel.setStringValue_(self.curr_time.strftime('%a %d %b, %I %p'))
self.dayLabel.setStringValue_(self.curr_time.strftime('%a %d %b'))
self.dayButton.setAttributedTitle_(self.curr_time.strftime('%a %d %b'))
if self.reflection:
self.dayField.setStringValue_(self.reflection)
self.dayLabel.setTextColor_(NSColor.blackColor())
else:
self.dayField.setStringValue_('')
self.dayLabel.setTextColor_(NSColor.redColor())
def switchDate(self):
"""
Loads the hour & day data and calls display update
"""
# get the time data
self.reloadTime()
# load the data
self.clearData()
self.loadData(
year = self.curr_time.year,
month = self.curr_time.month,
day = self.curr_time.day,
hour = self.curr_time.hour)
# update the interface
self.updateDisplayDay()
self.updateDisplayHour()
def clearData(self):
"""
Sets default data for Day & Hour fields
"""
self.activity = None
self.reflection = None
self.productive = 1
def loadData(self, year = None, month = None, day = None, hour = None):
"""
Loads data for Day & Hour fields
"""
self.reflection, self.activity, self.productive = precious_data.load(year, month, day, hour)
####
# Timed things
def endOfHour(self):
print '[Timer] Timer routine called'
self.setPyTimer() # reset the timer
now = datetime.now()
if(now.minute == 59):
print '[Timer] End of hour'
# if activate each hour is On - bring the app to attention
if precious_settings.activate_each_hour is True:
# Bring app to top
NSApp.activateIgnoringOtherApps_(True)
# is this hour logged?
reflection, activity, productive = precious_data.load(year = now.year, month = now.month, day = now.day, hour = now.hour)
pending_hour = '{0}-{1}-{2}-{3}'.format(now.year, now.month, now.day, now.hour)
# if not - add it to pending hours and increase the amount of pending hours
if productive is None and pending_hour not in self.pending_hours:
print '[Timer] Increase pending hours'
self.pending += 1
self.pending_hours.append(pending_hour)
# if there are pending hours - update the badge
if self.pending > 0:
# Set badge icon to the current hour
self.badge.setBadgeLabel_(str(self.pending))
else:
self.badge.setBadgeLabel_(None)
# nc = NSNotificationCenter.defaultCenter()
# nc.postNotificationName_object_userInfo_('love_note', None, {'path':'xyz'})
def setPyTimer(self):
from threading import Timer
# today = datetime.now() HERE WE NEED TO SET TIMER FOR APPROPRIATE TIME!
Timer(30, self.endOfHour, ()).start() # 60 second timer
####
# Interface elements actions
@objc.IBAction
def productive_(self, sender):
"""
Operates the SelectedSegment choice of how productive the hour has been
"""
self.productive = sender.selectedSegment()
@objc.IBAction
def prevHour_(self, sender):
"""
Loads the previous hour data in the hour window
"""
# decrement time
self.curr_timestamp -= 3600
self.switchDate()
print '[Action] prev hour'
@objc.IBAction
def nextHour_(self, sender):
"""
Loads the next hour data in the hour window
"""
# increment the time
self.curr_timestamp += 3600
self.switchDate()
print '[Action] next hour'
@objc.IBAction
def prevDay_(self, sender):
"""
Loads the previous day data in the day window
"""
# decrement the time
self.curr_timestamp -= 86400
self.switchDate()
print '[Action] prev day'
@objc.IBAction
def nextDay_(self, sender):
"""
Loads the next day data in the day window
"""
# increment the time
self.curr_timestamp += 86400
self.switchDate()
print '[Action] next day'
# submit the data
@objc.IBAction
def submitHour_(self, sender):
"""
Submits the hour log
Removes the app icon badge
Makes the hour label black
Starts and stops the spinny thing
"""
# check if it's the new version of the app
if precious_settings.is_new_version is True:
# bring sync window up
self.requireSync()
# start the progress spin
self.hourProgress.startAnimation_(self)
# getting the text from text fields
self.activity = self.hourField.stringValue()
# self.reflection = self.dayField.stringValue()
# log the hour
precious_data.save(
type='hour',
productive=self.productive,
activity=self.activity,
year=self.curr_time.year,
month=self.curr_time.month,
day=self.curr_time.day,
hour=self.curr_time.hour)
# set the hour label colour to black
self.hourLabel.setTextColor_(NSColor.blackColor())
# go for the next hour
# self.curr_timestamp += 3600
# self.switchHour()
if '{0}-{1}-{2}-{3}'.format(self.curr_time.year,
self.curr_time.month,
self.curr_time.day,
self.curr_time.hour) in self.pending_hours:
print '[Timer] Decrease pending hours'
self.pending -= 1
if self.pending > 0:
label = str(self.pending)
else:
label = None
# update the badge
self.badge.setBadgeLabel_(label)
# stop the progress spin
self.hourProgress.stopAnimation_(self)
# submit the data
@objc.IBAction
def submitDay_(self, sender):
"""
Submits the day log
Makes the day label black
Starts and stops the spinny thing
"""
# check if it's the new version of the app
if precious_settings.is_new_version is True:
# bring sync window up
self.requireSync()
# start the progress spin
self.dayProgress.startAnimation_(self)
# getting the text from text field
self.reflection = self.dayField.stringValue()
precious_data.save(
type='day',
reflection=self.reflection,
year=self.curr_time.year,
month=self.curr_time.month,
day=self.curr_time.day)
# set the day label colour to black
self.dayLabel.setTextColor_(NSColor.blackColor())
# stop the progress spin
self.dayProgress.stopAnimation_(self)
@objc.IBAction
def authenticate_(self, sender):
"""
Authenticates user
Syncs the Hour & Day data with the web API if authenticated
Shows errors or success message
Starts and stops the spinny thing
"""
# play intro sound
# sound = NSSound.soundNamed_('Frog')
# sound.play()
# start the spin
self.syncProgress.startAnimation_(self)
# hide the stats and result
self.syncError.setHidden_(True)
# self.statsButton.setEnabled_(False)
self.statsButton.setHidden_(True)
email = self.usernameField.stringValue()
password = self.passwordField.stringValue()
auth_success = False
# print email
try:
user.authenticate(
email=email,
password=password)
auth_success = True
except ValueError, e:
print '[Action:Error] Halt auth flow'
print e
self.syncError.setTextColor_(NSColor.redColor())
self.syncError.setStringValue_(str(e))
self.syncError.setHidden_(False)
# stop the spin
self.syncProgress.stopAnimation_(self)
# if authenticated - sync data
if user.token is not None and auth_success:
word = precious_data.sync(all=self.syncAllFlag)
try:
# precious_data.sync()
# success!
self.syncError.setTextColor_(NSColor.blackColor())
self.syncError.setStringValue_('{0} synced.'.format(word))
self.syncError.setHidden_(False)
# play success sound
sound = NSSound.soundNamed_('Pop')
sound.play()
# self.statsButton.setEnabled_(True)
self.statsButton.setHidden_(False)
# stop the spin
self.syncProgress.stopAnimation_(self)
# clear the syncAll flag if it was set
if self.syncAllFlag is True:
self.switchDate() # also update the display
self.syncAllFlag = False
except Exception, e:
print '[Action:Error] Could not sync: {0}'.format(e)
self.syncError.setTextColor_(NSColor.redColor())
self.syncError.setStringValue_('Could not sync.')
self.syncError.setHidden_(False)
# stop the spin
self.syncProgress.stopAnimation_(self)
@objc.IBAction
def signUp_(self, sender):
"""
Registers user
Shows errors or success message
Starts and stops the spinny thing
Opens the sync window on success
"""
# start the spin
self.signUpProgress.startAnimation_(self)
email = self.signUpEmailField.stringValue()
username = self.signUpUsernameField.stringValue()
password = self.signUpPasswordField.stringValue()
self.signUpEmailError.setHidden_(True)
self.signUpUsernameError.setHidden_(True)
self.signUpError.setHidden_(True)
print email
try:
user.create(
email=email,
username=username,
password=password
)
# auth after
# self.user.authenticate(
# email=email,
# password=password
# )
# self.user.email = email
# self.user.password = password
# stop the spin
self.signUpProgress.stopAnimation_(self)
# tell that user needs to confirm his e-mail
print '[Action] User registered'
self.signUpError.setStringValue_('Done! A confirmation request has been sent to your e-mail.')
self.signUpError.setTextColor_(NSColor.blackColor())
self.signUpError.setHidden_(False)
# minimize the window and show login
# self.signUpWindow.close()
self.syncWindow.makeKeyAndOrderFront_(self)
# fill in the email field
self.usernameField.setStringValue_(email)
# self.passwordField.setStringValue(password)
except ValueError, e:
print e
# email error
if e[0]:
# self.signUpEmailField.setTextColor_(NSColor.redColor())
self.signUpEmailError.setStringValue_(str(e[0]))
self.signUpEmailError.setHidden_(False)
# username error
if e[1]:
# self.signUpUsernameField.setTextColor_(NSColor.redColor())
self.signUpUsernameError.setStringValue_(str(e[1]))
self.signUpUsernameError.setHidden_(False)
# general error conclusion
print '[Action:Error] Could not create a new account.'
self.signUpError.setStringValue_('Could not create an account.')
self.signUpError.setTextColor_(NSColor.redColor())
self.signUpError.setHidden_(False)
# stop the spin
self.signUpProgress.stopAnimation_(self)
@objc.IBAction
def syncAll_(self, sender):
self.syncAllFlag = True
self.syncWindow.makeKeyAndOrderFront_(self)
####
# Settings
@objc.IBAction
def settActivate_(self, sender):
"""
Changes setting activate each hour
"""
precious_settings.activate_each_hour = not precious_settings.activate_each_hour
precious_settings.save()
self.settMenuActivate.setState_(precious_settings.activate_each_hour)
####
# Links
@objc.IBAction
def openStats_(self, sender):
"""
Opens stats web page in a browser
Assumes the user is logged in on the web app
"""
print '[WEB] Opening stats...'
sharedWorkspace = NSWorkspace.sharedWorkspace()
sharedWorkspace.openURL_(NSURL.URLWithString_(SITE_URL))
@objc.IBAction
def openWebApp_(self, sender):
"""
Opens precious_web app main page
"""
print '[WEB] Opening web app...'
sharedWorkspace = NSWorkspace.sharedWorkspace()
sharedWorkspace.openURL_(NSURL.URLWithString_('http://www.antonvino.com/precious/'))
@objc.IBAction
def openPasswordReset_(self, sender):
"""
Opens reset password page in web app
Called when user forgot password from the login window
"""
print '[WEB] Opening web app password reset...'
sharedWorkspace = NSWorkspace.sharedWorkspace()
sharedWorkspace.openURL_(NSURL.URLWithString_(SITE_URL + 'accounts/password-reset/'))
@objc.IBAction
def openPortfolio_(self, sender):
"""
Opens author's portfolio in a browser
"""
print '[WEB] Opening portfolio...'
sharedWorkspace = NSWorkspace.sharedWorkspace()
sharedWorkspace.openURL_(NSURL.URLWithString_('http://www.antonvino.com'))
if __name__ == "__main__":
app = NSApplication.sharedApplication()
# Initiate the controller with a XIB
viewController = PreciousController.alloc().initWithWindowNibName_("Precious")
user = PreciousUser()
precious_data = PreciousData()
precious_settings = PreciousSettings()
# Show the window
viewController.showWindow_(viewController)
# viewController.badge = app.dockTile()
# viewController.badge.setBadgeLabel_('1')
# Bring app to top
NSApp.activateIgnoringOtherApps_(True)
from PyObjCTools import AppHelper
AppHelper.runEventLoop()
| 37.511171
| 151
| 0.529005
|
4a02f0ec984296e39c25814d45231f174432859d
| 9,602
|
py
|
Python
|
todone/backend/tests/test_db.py
|
safnuk/todone
|
e6373b69b8159cffb8d1b7ed7b1f5810c4e1e7df
|
[
"Apache-2.0"
] | null | null | null |
todone/backend/tests/test_db.py
|
safnuk/todone
|
e6373b69b8159cffb8d1b7ed7b1f5810c4e1e7df
|
[
"Apache-2.0"
] | null | null | null |
todone/backend/tests/test_db.py
|
safnuk/todone
|
e6373b69b8159cffb8d1b7ed7b1f5810c4e1e7df
|
[
"Apache-2.0"
] | null | null | null |
from unittest import skip, TestCase
from unittest.mock import patch, Mock
import peewee
from todone.backend import DEFAULT_FOLDERS
from todone.backend.db import (
Database, Folder, SavedList,
Todo
)
from todone.backend.db import ListItem, MOST_RECENT_SEARCH
from todone.tests.base import DB_Backend
class TestTodoModel(DB_Backend):
def test_class_is_importable(self):
t = Todo(action='Blank')
self.assertEqual(type(t), Todo)
def test_todo_stores_action(self):
t = Todo(action='New todo item')
self.assertEqual(t.action, 'New todo item')
t.save()
def test_todo_raises_with_empty_action(self):
with self.assertRaises(peewee.IntegrityError):
t = Todo(action='')
t.save()
def test_todo_stores_valid_folder(self):
for folder in [
x for x in DEFAULT_FOLDERS['folders']
]:
t = Todo(action='Test todo', folder=folder)
t.save()
self.assertEqual(t.folder.name, folder)
def test_todo_default_folder_is_inbox(self):
t = Todo(action='Test')
t.save()
self.assertEqual(
t.folder.name, DEFAULT_FOLDERS['inbox'])
def test_active_todos_restricts_select(self):
todos = {}
for n, folder in enumerate(
DEFAULT_FOLDERS['folders']
):
todos[folder] = Todo.create(
action='Item {}'.format(n), folder=folder
)
active = Todo.active_todos()
active_todos = [t for t in active]
test_active = DEFAULT_FOLDERS['active']
test_inactive = [
x for x in DEFAULT_FOLDERS['folders']
if x not in test_active
]
for folder in test_inactive:
self.assertNotIn(todos[folder], active_todos)
for folder in test_active:
self.assertIn(todos[folder], active_todos)
@skip
def test_todo_raises_with_invalid_folder(self):
with self.assertRaises(ValueError):
t = Todo(action='Test', folder='invalid')
t.save()
def test_get_projects_parses_folder_and_todo(self):
t1 = Todo.create(action='Todo 1', folder='next')
t2 = Todo.create(action='Todo 2', folder='today')
query = Todo.get_projects('next/Todo')
self.assertIn(t1, query)
self.assertNotIn(t2, query)
query = Todo.get_projects(' next/ todo')
self.assertIn(t1, query)
self.assertNotIn(t2, query)
query = Todo.get_projects('Todo')
self.assertIn(t1, query)
self.assertIn(t2, query)
class TestFolder(DB_Backend):
def test_folder_raises_with_empty_name(self):
with self.assertRaises(peewee.IntegrityError):
Folder.create(name='')
def test_folder_raises_with_non_unique_name(self):
Folder.create(name='name')
with self.assertRaises(peewee.IntegrityError):
Folder.create(name='name')
def test_new_creates_valid_name(self):
old_list_length = len(Folder.select())
Folder.new('test')
Folder.new('folder')
self.assertEqual(len(Folder.select()) - old_list_length, 2)
f1 = Folder.get(Folder.name == 'test')
self.assertEqual(f1.name, 'test')
f2 = Folder.get(Folder.name == 'folder')
self.assertEqual(f2.name, 'folder')
def test_rename_changes_folder_name(self):
Folder.create(name='test')
Folder.rename('test', 'foo')
Folder.get(Folder.name == 'foo') # does not raise
with self.assertRaises(peewee.DoesNotExist):
Folder.get(Folder.name == 'test')
def test_rename_renames_folder_fields_for_todos(self):
Folder.create(name='test')
Todo.create(action='Todo 1', folder='test')
Folder.rename('test', 'foo')
t1 = Todo.get(Todo.action == 'Todo 1')
self.assertEqual(t1.folder.name, 'foo')
def test_delete_removes_folder(self):
Folder.remove('today')
with self.assertRaises(peewee.DoesNotExist):
Folder.get(name='today')
def test_delete_moves_subtodos_to_default_inbox(self):
Todo.create(action='Foo', folder='today')
Todo.create(action='Bar', folder='today')
Folder.remove('today')
for t in Todo.select():
self.assertEqual(t.folder.name, 'inbox')
class TestSavedList(DB_Backend):
def test_class_is_importable(self):
t = SavedList(name='test')
self.assertEqual(type(t), SavedList)
def test_savedlist_raises_with_empty_name(self):
with self.assertRaises(peewee.IntegrityError):
SavedList.create(name='')
def test_raises_with_duplicate_name(self):
SavedList.create(name='test')
with self.assertRaises(peewee.IntegrityError):
SavedList.create(name='test')
def test_get_most_recent_does_as_advertised(self):
s1 = SavedList.get_most_recent()
self.assertEqual(s1.name, MOST_RECENT_SEARCH)
s2 = SavedList.get_most_recent()
self.assertEqual(s2.name, MOST_RECENT_SEARCH)
def test_get_todos_in_list_returns_empty_list_if_list_not_exists(self):
s = SavedList.create(name='test')
t = Todo.create(action='Test todo')
ListItem.create(savedlist=s, todo=t)
self.assertEqual(SavedList.get_todos_in_list('foo'), [])
def test_get_todos_in_list_returns_list_todos(self):
s = SavedList.create(name='test')
t1 = Todo.create(action='Test todo')
t2 = Todo.create(action='Test another todo')
ListItem.create(savedlist=s, todo=t1)
ListItem.create(savedlist=s, todo=t2)
self.assertEqual(SavedList.get_todos_in_list('test'), [t1, t2])
def test_get_todos_in_list_defaults_to_MOST_RECENT_SEARCH(self):
s1 = SavedList.create(name='test')
s2 = SavedList.create(name=MOST_RECENT_SEARCH)
t1 = Todo.create(action='Test todo')
t2 = Todo.create(action='Test another todo')
ListItem.create(savedlist=s1, todo=t1)
ListItem.create(savedlist=s2, todo=t2)
self.assertEqual(SavedList.get_todos_in_list(''), [t2])
def test_delete_items_erases_all_items_in_list(self):
s = SavedList.create(name='test')
t1 = Todo.create(action='Test todo')
t2 = Todo.create(action='Test another todo')
ListItem.create(savedlist=s, todo=t1)
ListItem.create(savedlist=s, todo=t2)
s.delete_items()
self.assertEqual(SavedList.get_todos_in_list('test'), [])
def test_save_search_with_empty_name_does_not_save(self):
t1 = Todo.create(action='Test todo')
t2 = Todo.create(action='Test another todo')
SavedList.save_search('', [t1, t2])
self.assertEqual(len(SavedList.select()), 0)
def test_save_search_clears_old_items(self):
s = SavedList.create(name='test')
t1 = Todo.create(action='Test todo')
t2 = Todo.create(action='Test another todo')
ListItem.create(savedlist=s, todo=t1)
SavedList.save_search('test', [t2])
self.assertNotIn(t1, SavedList.get_todos_in_list('test'))
def test_save_search_saves_all_passed_todos(self):
t1 = Todo.create(action='Test todo')
t2 = Todo.create(action='Test another todo')
SavedList.save_search('test', [t1, t2])
self.assertEqual(
SavedList.get_todos_in_list('test'), [t1, t2])
def test_save_most_recent_search_saves_all_passed_todos(self):
t1 = Todo.create(action='Test todo')
t2 = Todo.create(action='Test another todo')
SavedList.save_most_recent_search([t1, t2])
self.assertEqual(
SavedList.get_todos_in_list(MOST_RECENT_SEARCH), [t1, t2])
class TestListItem(DB_Backend):
def test_raises_without_list(self):
t = Todo.create(action='Test todo')
with self.assertRaises(peewee.IntegrityError):
ListItem.create(todo=t)
def test_raises_without_todo(self):
l = SavedList.create(name='List')
with self.assertRaises(peewee.IntegrityError):
ListItem.create(savedlist=l)
def test_listitems_ordered_by_insertion_order(self):
l = SavedList.create(name='List')
todos = []
todos.append(Todo.create(action='Todo 1'))
todos.append(Todo.create(action='Another todo'))
todos.append(Todo.create(action='Random todo', folder='today'))
ListItem.create(savedlist=l, todo=todos[0])
ListItem.create(savedlist=l, todo=todos[1])
ListItem.create(savedlist=l, todo=todos[2])
items = ListItem.select().where(ListItem.savedlist == l)
pairs = zip(items, todos)
for item, todo in pairs:
self.assertEqual(item.todo, todo)
class TestDatabase(TestCase):
@patch('todone.backend.db.config.settings')
@patch('todone.backend.db.Database.database')
def test_database_should_ignore_connect_request_to_empty_db_name(
self, mock_database, mock_settings
):
mock_database.init = Mock()
mock_database.connect = Mock()
mock_settings.__getitem__.return_value = {'name': ''}
Database.connect()
mock_database.init.assert_not_called()
mock_database.connect.assert_not_called()
@patch('todone.backend.db.config.settings')
@patch('todone.backend.db.Database.close')
def test_update_should_not_close_uninitialized_db(
self, mock_close, mock_settings):
mock_settings.__getitem__.return_value = {'name': ''}
Database.update()
mock_close.assert_not_called()
| 36.097744
| 75
| 0.649969
|
4a02f3623996842d670ca0ca0c9a434c42cff043
| 755
|
py
|
Python
|
python/collectAblation.py
|
jstraub/tdp
|
dcab53662be5b88db1538cf831707b07ab96e387
|
[
"MIT-feh"
] | 1
|
2017-10-17T19:25:47.000Z
|
2017-10-17T19:25:47.000Z
|
python/collectAblation.py
|
jstraub/tdp
|
dcab53662be5b88db1538cf831707b07ab96e387
|
[
"MIT-feh"
] | 1
|
2018-05-02T06:04:06.000Z
|
2018-05-02T06:04:06.000Z
|
python/collectAblation.py
|
jstraub/tdp
|
dcab53662be5b88db1538cf831707b07ab96e387
|
[
"MIT-feh"
] | 5
|
2017-09-17T18:46:20.000Z
|
2019-03-11T12:52:57.000Z
|
import numpy as np
import os
times = dict()
errs = dict()
surfRegErrs = dict()
for root, dirs, files in os.walk("."):
r, d = os.path.split(root)
for f in files:
if f == "avgFrameTime.csv":
with open(os.path.join(root,f)) as i:
times[os.path.split(d)[1]] = float(i.readline()[:-1])
if f == "trajectoryError.csv":
with open(os.path.join(root,f)) as i:
errs[os.path.split(d)[1]] = float(i.readline()[:-1])
if f == "surfRegErr.csv":
with open(os.path.join(root,f)) as i:
surfRegErrs[os.path.split(d)[1]] = float(i.readline()[:-1])
keys = errs.keys()
keys.sort()
for key in keys:
print key, errs[key], times[key]
keys = surfRegErrs.keys()
keys.sort()
for key in keys:
print key, surfRegErrs[key]
| 26.964286
| 67
| 0.609272
|
4a02f4be78b4e56bce9231b149f6329364adb294
| 1,527
|
py
|
Python
|
test/rules/functions/test_find_in_map.py
|
LukasMusebrink/cfn-python-lint
|
bc3e6e5a9453f8b532a05f87e51d095635dd3b9f
|
[
"MIT-0"
] | null | null | null |
test/rules/functions/test_find_in_map.py
|
LukasMusebrink/cfn-python-lint
|
bc3e6e5a9453f8b532a05f87e51d095635dd3b9f
|
[
"MIT-0"
] | null | null | null |
test/rules/functions/test_find_in_map.py
|
LukasMusebrink/cfn-python-lint
|
bc3e6e5a9453f8b532a05f87e51d095635dd3b9f
|
[
"MIT-0"
] | 1
|
2019-12-17T14:27:07.000Z
|
2019-12-17T14:27:07.000Z
|
"""
Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from cfnlint.rules.functions.FindInMap import FindInMap # pylint: disable=E0401
from .. import BaseRuleTestCase
class TestRulesFindInMap(BaseRuleTestCase):
"""Test Rules Get Att """
def setUp(self):
"""Setup"""
super(TestRulesFindInMap, self).setUp()
self.collection.register(FindInMap())
def test_file_positive(self):
"""Test Positive"""
self.helper_file_positive()
def test_file_negative(self):
"""Test failure"""
self.helper_file_negative('templates/bad/functions_findinmap.yaml', 5)
| 43.628571
| 87
| 0.740013
|
4a02f51704e2a05ff3ba40f85d210180cb2b8e32
| 675
|
py
|
Python
|
tests/extmod/ujson_dumps.py
|
84KaliPleXon3/micropython-esp32
|
a64dc82742749cf4a4bbe5688dde05122fb38f56
|
[
"MIT"
] | 8
|
2017-01-08T19:45:01.000Z
|
2020-09-07T04:39:10.000Z
|
tests/extmod/ujson_dumps.py
|
84KaliPleXon3/micropython-esp32
|
a64dc82742749cf4a4bbe5688dde05122fb38f56
|
[
"MIT"
] | null | null | null |
tests/extmod/ujson_dumps.py
|
84KaliPleXon3/micropython-esp32
|
a64dc82742749cf4a4bbe5688dde05122fb38f56
|
[
"MIT"
] | 2
|
2017-07-27T19:45:05.000Z
|
2020-08-02T19:00:33.000Z
|
try:
import ujson as json
except ImportError:
try:
import json
except ImportError:
import sys
print("SKIP")
sys.exit()
print(json.dumps(False))
print(json.dumps(True))
print(json.dumps(None))
print(json.dumps(1))
print(json.dumps('abc'))
print(json.dumps('\x00\x01\x7e'))
print(json.dumps([]))
print(json.dumps([1]))
print(json.dumps([1, 2]))
print(json.dumps([1, True]))
print(json.dumps(()))
print(json.dumps((1,)))
print(json.dumps((1, 2)))
print(json.dumps((1, (2, 3))))
print(json.dumps({}))
print(json.dumps({"a":1}))
print(json.dumps({"a":(2,[3,None])}))
print(json.dumps('"quoted"'))
print(json.dumps('space\n\r\tspace'))
| 22.5
| 37
| 0.631111
|
4a02f6c4427b1f8cc3b8a15fdf9d91034fcfcdda
| 1,326
|
py
|
Python
|
pyaz/batch/application/summary/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/batch/application/summary/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/batch/application/summary/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | 1
|
2022-02-03T09:12:01.000Z
|
2022-02-03T09:12:01.000Z
|
'''
View a summary of Batch application packages.
'''
from .... pyaz_utils import _call_az
def list(account_endpoint=None, account_key=None, account_name=None):
'''
Lists all of the applications available in the specified account.
Optional Parameters:
- account_endpoint -- Batch service endpoint. Alternatively, set by environment variable: AZURE_BATCH_ENDPOINT
- account_key -- Batch account key. Alternatively, set by environment variable: AZURE_BATCH_ACCESS_KEY
- account_name -- Batch account name. Alternatively, set by environment variable: AZURE_BATCH_ACCOUNT
'''
return _call_az("az batch application summary list", locals())
def show(application_id, account_endpoint=None, account_key=None, account_name=None):
'''
Gets information about the specified application.
Required Parameters:
- application_id -- The ID of the Application.
Optional Parameters:
- account_endpoint -- Batch service endpoint. Alternatively, set by environment variable: AZURE_BATCH_ENDPOINT
- account_key -- Batch account key. Alternatively, set by environment variable: AZURE_BATCH_ACCESS_KEY
- account_name -- Batch account name. Alternatively, set by environment variable: AZURE_BATCH_ACCOUNT
'''
return _call_az("az batch application summary show", locals())
| 41.4375
| 114
| 0.757919
|
4a02f75b90396f6adbe2b8207d9f967f8a777253
| 432
|
py
|
Python
|
problems/construct-quad-tree/construct_quad_tree.py
|
jianpingbadao/leetcode
|
0ddead0b7820c05e353e08c551418fc8e318ee96
|
[
"MIT"
] | 198
|
2018-12-14T11:11:21.000Z
|
2021-11-22T17:31:48.000Z
|
problems/construct-quad-tree/construct_quad_tree.py
|
rohini-raja/leetcode
|
e4250e67a76cf26813fc7b7bfef523c427a2b928
|
[
"MIT"
] | 779
|
2018-12-15T11:04:06.000Z
|
2021-11-19T08:42:40.000Z
|
problems/construct-quad-tree/construct_quad_tree.py
|
rohini-raja/leetcode
|
e4250e67a76cf26813fc7b7bfef523c427a2b928
|
[
"MIT"
] | 85
|
2019-01-10T02:20:04.000Z
|
2021-11-26T08:01:42.000Z
|
#!/usr/bin/env python
"""
# Definition for a QuadTree node.
class Node:
def __init__(self, val, isLeaf, topLeft, topRight, bottomLeft, bottomRight):
self.val = val
self.isLeaf = isLeaf
self.topLeft = topLeft
self.topRight = topRight
self.bottomLeft = bottomLeft
self.bottomRight = bottomRight
"""
class Solution:
def construct(self, grid: List[List[int]]) -> 'Node':
| 27
| 80
| 0.62963
|
4a02f7fc3001646c42e34cd8c4c9a0cf2369e7e4
| 2,508
|
py
|
Python
|
__Lagou.py
|
tosone/Crawler
|
eb3bbd9583aa0fcb55c723f77097e38bc045b332
|
[
"MIT"
] | null | null | null |
__Lagou.py
|
tosone/Crawler
|
eb3bbd9583aa0fcb55c723f77097e38bc045b332
|
[
"MIT"
] | null | null | null |
__Lagou.py
|
tosone/Crawler
|
eb3bbd9583aa0fcb55c723f77097e38bc045b332
|
[
"MIT"
] | null | null | null |
import logging
import math
import re
import time
import uuid
from datetime import datetime
from os import path
import requests
from config import Config
class Run(Config):
def __init__(self):
super(Run, self).__init__()
self.collection = self.Collection(__file__)
self.exploitUrl = 'https://www.lagou.com/gongsi/0-0-0.json'
self.protocal = 'https'
self.host = 'www.lagou.com'
self.totalCount = 2000
self.pageSize = 16
self.key = 'companyId'
self.main()
def cookie(self):
now = datetime.now()
year = str(now.year)
month = str(now.month).rjust(2, "0")
day = str(now.day).rjust(2, "0")
hour = str(now.hour).rjust(2, "0")
minute = str(now.minute).rjust(2, "0")
second = str(now.second).rjust(2, "0")
try:
res = re.search("^JSESSIONID=([0-9A-Z]{32}).*$", requests.get("https://www.lagou.com").headers["Set-Cookie"]).group(1)
except Exception as e:
time.sleep(self.RandomLimit())
return self.cookie()
return "JSESSIONID=" + res + "; user_trace_token=" + year + month + day + hour + minute + second + "-" + "".join(str(uuid.uuid4()).split("-"))
def main(self):
cookie = self.cookie()
while True:
for num in range(1, int(math.ceil(self.totalCount / self.pageSize))):
try:
header = self.Header(self.protocal, self.host)
if num % 10 == 0:
cookie = self.cookie()
header["Cookie"] = cookie
requestBody = {"pn": num, "first": "false", "sortField": 0, "havemark": 0}
res = requests.post(self.exploitUrl, headers=header, timeout=60, data=requestBody)
response = res.json()
self.totalCount = int(response.get("totalCount"))
self.pageSize = int(response.get("pageSize"))
for data in response.get("result"):
logging.info(data["companyFullName"])
self.MGO[self.collection].find_one_and_replace({'companyId': data["companyId"]}, data, upsert=True)
except Exception as e:
self.totalCount = 2000
self.pageSize = 16
logging.error("Post " + str(num) + " with error.")
logging.error(e)
time.sleep(self.RandomLimit())
| 38
| 150
| 0.534689
|
4a02f8b98b505740aca5fe56cb6518890444d120
| 692
|
py
|
Python
|
cvxpy/cvxcore/tests/python/364A_scripts/portfolio.py
|
jasondark/cvxpy
|
56aaa01b0e9d98ae5a91a923708129a7b37a6f18
|
[
"ECL-2.0",
"Apache-2.0"
] | 38
|
2015-10-16T16:55:28.000Z
|
2022-02-16T05:06:01.000Z
|
cvxpy/cvxcore/tests/python/364A_scripts/portfolio.py
|
h-vetinari/cvxpy
|
86307f271819bb78fcdf64a9c3a424773e8269fa
|
[
"ECL-2.0",
"Apache-2.0"
] | 28
|
2015-09-16T16:33:23.000Z
|
2021-11-23T07:31:44.000Z
|
cvxpy/cvxcore/tests/python/364A_scripts/portfolio.py
|
h-vetinari/cvxpy
|
86307f271819bb78fcdf64a9c3a424773e8269fa
|
[
"ECL-2.0",
"Apache-2.0"
] | 21
|
2015-09-16T14:56:16.000Z
|
2022-02-16T05:06:03.000Z
|
import numpy as np
from cvxpy import *
import matplotlib.pyplot as pyplot
import time
TIME = 0
ANSWERS = []
Sigma = Semidef(4)
x = np.array([.1, .2, -.05, .1])
constraints = [Sigma == Sigma.T]
constraints += [Sigma[0,1] >= 0, Sigma[0,2] >= 0,\
Sigma[1,2] <= 0, Sigma[2,3] <= 0, Sigma[1,3]<=0,\
Sigma[0,0] == .2, Sigma[1,1] == .1, Sigma[2,2] == .3,\
Sigma[3,3] == .1]
objective = Maximize(quad_form(x,Sigma))
prob = Problem(objective, constraints)
tic = time.time()
risk = prob.solve()
toc = time.time()
TIME += toc - tic
ANSWERS.append(risk)
pass #print "Risk:, ", risk
pass #print "Sigma: ", Sigma.value
pass #print "Diagonal risk: ", x.T.dot(np.diag(np.diag(Sigma.value)).dot(x))
| 20.352941
| 76
| 0.622832
|
4a02f9ee7228814bae991148bde06837f0dc5bb5
| 5,825
|
py
|
Python
|
tiled/trees/utils.py
|
dylanmcreynolds/tiled
|
a77cf233ff4827ac03faa66448b29a84e3de02bf
|
[
"BSD-3-Clause"
] | 1
|
2021-02-09T20:27:23.000Z
|
2021-02-09T20:27:23.000Z
|
tiled/trees/utils.py
|
dylanmcreynolds/tiled
|
a77cf233ff4827ac03faa66448b29a84e3de02bf
|
[
"BSD-3-Clause"
] | 1
|
2021-02-15T18:24:41.000Z
|
2021-02-15T18:24:41.000Z
|
tiled/trees/utils.py
|
dylanmcreynolds/tiled
|
a77cf233ff4827ac03faa66448b29a84e3de02bf
|
[
"BSD-3-Clause"
] | 2
|
2021-02-06T18:02:24.000Z
|
2021-02-10T19:05:03.000Z
|
import operator
from ..utils import Sentinel
class IndexCallable:
"""Provide getitem syntax for functions
>>> def inc(x):
... return x + 1
>>> I = IndexCallable(inc)
>>> I[3]
4
Vendored from dask
"""
__slots__ = ("fn",)
def __init__(self, fn):
self.fn = fn
def __getitem__(self, key):
return self.fn(key)
class IndexersMixin:
"""
Provides slicable attributes keys_indexes, items_indexer, values_indexer.
Must be mixed in with a class that defines methods:
* ``_item_by_index``
* ``_keys_slice``
* ``_items_slice``
"""
__slots__ = ("keys_indexer", "items_indexer", "values_indexer")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.keys_indexer = IndexCallable(self._keys_indexer)
self.items_indexer = IndexCallable(self._items_indexer)
self.values_indexer = IndexCallable(self._values_indexer)
# There is some code reptition here, but let's live with it rather than add
# yet more depth to the call stack....
def _keys_indexer(self, index_or_slice):
if isinstance(index_or_slice, int):
if index_or_slice < 0:
index_or_slice = -1 - index_or_slice
direction = -1
else:
direction = 1
key, _value = self._item_by_index(index_or_slice, direction)
return key
elif isinstance(index_or_slice, slice):
start, stop, direction = slice_to_interval(index_or_slice)
return list(self._keys_slice(start, stop, direction))
else:
raise TypeError(
f"{index_or_slice} must be an int or slice, not {type(index_or_slice)}"
)
def _items_indexer(self, index_or_slice):
if isinstance(index_or_slice, int):
if index_or_slice < 0:
index_or_slice = -1 - index_or_slice
direction = -1
else:
direction = 1
return self._item_by_index(index_or_slice, direction)
elif isinstance(index_or_slice, slice):
start, stop, direction = slice_to_interval(index_or_slice)
return list(self._items_slice(start, stop, direction))
else:
raise TypeError(
f"{index_or_slice} must be an int or slice, not {type(index_or_slice)}"
)
def _values_indexer(self, index_or_slice):
if isinstance(index_or_slice, int):
if index_or_slice < 0:
index_or_slice = -1 - index_or_slice
direction = -1
else:
direction = 1
_key, value = self._item_by_index(index_or_slice, direction)
return value
elif isinstance(index_or_slice, slice):
start, stop, direction = slice_to_interval(index_or_slice)
return [value for _key, value in self._items_slice(start, stop, direction)]
else:
raise TypeError(
f"{index_or_slice} must be an int or slice, not {type(index_or_slice)}"
)
def slice_to_interval(slice_):
"""
Convert slice object to (start, stop, direction).
"""
start = slice_.start or 0 # Handles case where slice_.start is None.
step = slice_.step or 1 # Handles case where slice_.step is None.
if step == 1:
if start < 0:
raise ValueError(
"Tree sequence slices with start < 0 must have step=-1. "
f"Use for example [{slice_.start}:{slice_.stop}:-1]"
"(This is a limitation of slicing on Tree sequences "
"that does not apply to Python sequences in general.)"
)
if (slice_.stop is not None) and (slice_.stop < start):
raise ValueError(
"Tree sequence slices with step=1 must have stop >= start. "
"(This is a limitation of slicing on Tree sequences "
"that does not apply to Python sequences in general.)"
)
start_ = start
stop_ = slice_.stop
direction = 1
elif step == -1:
if start >= 0:
raise ValueError(
"Tree sequence slices with start >= 0 must have step=1. "
"(This is a limitation of slicing on Tree sequences "
"that does not apply to Python sequences in general.)"
)
if slice_.stop is not None:
if slice_.stop > start:
raise ValueError(
"Tree sequence slices with step=-1 must have stop <= start."
)
stop_ = 1 - slice_.stop
else:
stop_ = slice_.stop
start_ = 1 - start
direction = -1
else:
raise ValueError(
"Only step of 1 or -1 is supported in a Tree sequence slice. "
f"Step {slice_.step} is disallowed."
)
assert start_ >= 0
assert (stop_ is None) or (stop_ >= start_)
return start_, stop_, direction
UNCHANGED = Sentinel("UNCHANGED")
def tree_repr(tree, sample):
sample_reprs = list(map(repr, sample))
out = f"<{type(tree).__name__} {{"
# Always show at least one.
if sample_reprs:
out += sample_reprs[0]
# And then show as many more as we can fit on one line.
counter = 1
for sample_repr in sample_reprs[1:]:
if len(out) + len(sample_repr) > 60: # character count
break
out += ", " + sample_repr
counter += 1
approx_len = operator.length_hint(tree) # cheaper to compute than len(tree)
# Are there more in the tree that what we displayed above?
if approx_len > counter:
out += f", ...}} ~{approx_len} entries>"
else:
out += "}>"
return out
| 33.477011
| 87
| 0.577511
|
4a02fa9159f3fd02552bfb5167d84cbfcbabaa91
| 4,496
|
py
|
Python
|
drishtypy/viz/do_plots.py
|
Abhinavl3v3l/Drishtipy
|
5956afe24916bf40dde8d857ed6030de57dea353
|
[
"MIT"
] | null | null | null |
drishtypy/viz/do_plots.py
|
Abhinavl3v3l/Drishtipy
|
5956afe24916bf40dde8d857ed6030de57dea353
|
[
"MIT"
] | null | null | null |
drishtypy/viz/do_plots.py
|
Abhinavl3v3l/Drishtipy
|
5956afe24916bf40dde8d857ed6030de57dea353
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch.autograd import Variable
# functions to show an image
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
def plot_model_history(title_name, train_acc_hist, test_acc_hist, train_loss_hist, test_loss_hist, save_filename):
fig, axs = plt.subplots(1, 2, figsize=(20, 5))
# summarize history for accuracy
x_size = len(train_acc_hist)
legend_list = ['train', 'test']
axs[0].plot(range(1, x_size + 1), train_acc_hist)
axs[0].plot(range(1, x_size + 1), test_acc_hist)
title = '{} - Accuracy'.format(title_name)
axs[0].set_title(title)
axs[0].set_ylabel('Accuracy')
axs[0].set_xlabel('Epoch')
axs[0].set_xticks(np.arange(1, x_size + 1), x_size / 10)
axs[0].legend(legend_list, loc='best')
# plot losses
axs[1].plot(range(1, x_size + 1), train_loss_hist)
axs[1].plot(range(1, x_size + 1), test_loss_hist)
title = '{} - Losses'.format(title_name)
axs[1].set_title(title)
axs[1].set_ylabel('Loss')
axs[1].set_xlabel('Epoch')
axs[1].set_xticks(np.arange(1, x_size + 1), x_size / 10)
axs[1].legend(legend_list, loc='best')
plt.show()
fig.savefig("{}.png".format(save_filename))
def plot_model_comparison(legend_list, model1_acc_hist, model1_loss_hist,
model2_acc_hist, model2_loss_hist,
model3_acc_hist, model3_loss_hist,
model4_acc_hist, model4_loss_hist, ):
fig, axs = plt.subplots(1, 2, figsize=(20, 5))
# summarize history for accuracy
x_size = len(model1_acc_hist)
axs[0].plot(range(1, x_size + 1), model1_acc_hist)
axs[0].plot(range(1, x_size + 1), model2_acc_hist)
axs[0].plot(range(1, x_size + 1), model3_acc_hist)
axs[0].plot(range(1, x_size + 1), model4_acc_hist)
axs[0].set_title('Model Accuracy')
axs[0].set_ylabel('Accuracy')
axs[0].set_xlabel('Epoch')
axs[0].set_xticks(np.arange(1, x_size + 1), x_size / 10)
axs[0].legend(legend_list, loc='best')
# plot losses
axs[1].plot(range(1, x_size + 1), model1_loss_hist)
axs[1].plot(range(1, x_size + 1), model2_loss_hist)
axs[1].plot(range(1, x_size + 1), model3_loss_hist)
axs[1].plot(range(1, x_size + 1), model4_loss_hist)
axs[1].set_title('Model Losses')
axs[1].set_ylabel('Loss')
axs[1].set_xlabel('Epoch')
axs[1].set_xticks(np.arange(1, x_size + 1), x_size / 10)
axs[1].legend(legend_list, loc='best')
plt.show()
fig.savefig("model_compare.png")
def miss_classification(typ, model, device, classes, testloader, num_of_images=20, save_filename="misclassified"):
model.eval()
misclassified_cnt = 0
fig = plt.figure(figsize=(10, 9))
for data, target in testloader:
data, target = data.to(device), target.to(device)
output = model(data)
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
pred_marker = pred.eq(target.view_as(pred))
wrong_idx = (pred_marker == False).nonzero() # get indices for wrong predictions
for idx in wrong_idx:
index = idx[0].item()
title = "T:{}, P:{}".format(classes[target[index].item()], classes[pred[index][0].item()])
# print(title)
ax = fig.add_subplot(4, 5, misclassified_cnt + 1, xticks=[], yticks=[])
# ax.axis('off')
ax.set_title(title)
# plt.imshow(data[index].cpu().numpy().squeeze(), cmap='gray_r')
imshow(data[index].cpu())
misclassified_cnt += 1
if (misclassified_cnt == num_of_images):
break
if (misclassified_cnt == num_of_images):
break
fig.savefig(typ + '_missclassified_images' + '.jpg')
return
def plot_dataset_images(device, classes, data_loader, num_of_images=20):
cnt = 0
fig = plt.figure(figsize=(10, 9))
for data, target in data_loader:
data, target = data.to(device), target.to(device)
for index, label in enumerate(target):
title = "{}".format(classes[label.item()])
ax = fig.add_subplot(4, 5, cnt + 1, xticks=[], yticks=[])
ax.set_title(title)
imshow(data[index].cpu())
cnt += 1
if (cnt == num_of_images):
break
if (cnt == num_of_images):
break
return
| 35.401575
| 114
| 0.61766
|
4a02fb0189d45a645dcd58d49e31b566fac7abfd
| 3,750
|
py
|
Python
|
pkg/suggestion/v1beta1/nas/enas/Operation.py
|
alexeykaplin/katib
|
195db292374dcf3b39b55dcb3fcd14b3a55d5942
|
[
"Apache-2.0"
] | 1,177
|
2018-04-23T08:45:19.000Z
|
2022-03-23T19:09:13.000Z
|
pkg/suggestion/v1beta1/nas/enas/Operation.py
|
alexeykaplin/katib
|
195db292374dcf3b39b55dcb3fcd14b3a55d5942
|
[
"Apache-2.0"
] | 1,791
|
2018-04-20T00:10:17.000Z
|
2022-03-31T18:18:36.000Z
|
pkg/suggestion/v1beta1/nas/enas/Operation.py
|
alexeykaplin/katib
|
195db292374dcf3b39b55dcb3fcd14b3a55d5942
|
[
"Apache-2.0"
] | 349
|
2018-04-20T01:03:28.000Z
|
2022-03-30T16:11:35.000Z
|
# Copyright 2021 The Kubeflow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import numpy as np
from pkg.apis.manager.v1beta1.python import api_pb2
class Operation(object):
def __init__(self, opt_id, opt_type, opt_params):
self.opt_id = opt_id
self.opt_type = opt_type
self.opt_params = opt_params
def get_dict(self):
opt_dict = dict()
opt_dict['opt_id'] = self.opt_id
opt_dict['opt_type'] = self.opt_type
opt_dict['opt_params'] = self.opt_params
return opt_dict
def print_op(self, logger):
logger.info("Operation ID: \n\t{}".format(self.opt_id))
logger.info("Operation Type: \n\t{}".format(self.opt_type))
logger.info("Operations Parameters:")
for ikey in self.opt_params:
logger.info("\t{}: {}".format(ikey, self.opt_params[ikey]))
logger.info("")
class SearchSpace(object):
def __init__(self, operations):
self.operation_list = list(operations.operation)
self.search_space = list()
self._parse_operations()
self.num_operations = len(self.search_space)
def _parse_operations(self):
# search_sapce is a list of Operation class
operation_id = 0
for operation_dict in self.operation_list:
opt_type = operation_dict.operation_type
opt_spec = list(operation_dict.parameter_specs.parameters)
# avail_space is dict with the format {"spec_nam": [spec feasible values]}
avail_space = dict()
num_spec = len(opt_spec)
for ispec in opt_spec:
spec_name = ispec.name
if ispec.parameter_type == api_pb2.CATEGORICAL:
avail_space[spec_name] = list(ispec.feasible_space.list)
elif ispec.parameter_type == api_pb2.INT:
spec_min = int(ispec.feasible_space.min)
spec_max = int(ispec.feasible_space.max)
spec_step = int(ispec.feasible_space.step)
avail_space[spec_name] = range(
spec_min, spec_max+1, spec_step)
elif ispec.parameter_type == api_pb2.DOUBLE:
spec_min = float(ispec.feasible_space.min)
spec_max = float(ispec.feasible_space.max)
spec_step = float(ispec.feasible_space.step)
double_list = np.arange(
spec_min, spec_max+spec_step, spec_step)
if double_list[-1] > spec_max:
del double_list[-1]
avail_space[spec_name] = double_list
# generate all the combinations of possible operations
key_avail_space = list(avail_space.keys())
val_avail_space = list(avail_space.values())
for this_opt_vector in itertools.product(*val_avail_space):
opt_params = dict()
for i in range(num_spec):
opt_params[key_avail_space[i]] = this_opt_vector[i]
this_opt_class = Operation(operation_id, opt_type, opt_params)
self.search_space.append(this_opt_class)
operation_id += 1
| 40.76087
| 86
| 0.6232
|
4a02fc13ba0f07ea228cc93e6ff4c28f9945eea8
| 1,077
|
py
|
Python
|
aspc/context_processors.py
|
aspc/mainsite
|
a6ccee0bb921147b7f630d65e01371e451aa3c54
|
[
"MIT"
] | 8
|
2015-09-27T07:57:32.000Z
|
2018-10-28T06:08:40.000Z
|
aspc/context_processors.py
|
aspc/mainsite
|
a6ccee0bb921147b7f630d65e01371e451aa3c54
|
[
"MIT"
] | 132
|
2015-01-17T01:22:09.000Z
|
2018-11-13T22:05:32.000Z
|
aspc/context_processors.py
|
aspc/mainsite
|
a6ccee0bb921147b7f630d65e01371e451aa3c54
|
[
"MIT"
] | 20
|
2015-01-16T04:32:30.000Z
|
2018-09-03T22:55:05.000Z
|
from django.contrib.sites.models import Site
from django.contrib.sites.requests import RequestSite
from django.conf import settings as aspc_settings
import re
# Auxiliary request data to pass to templates
def request(request):
port_string = request.META.get('SERVER_PORT')
user_agent = request.META.get('HTTP_USER_AGENT') or request.META.get('HTTP_AGENT')
site_info = {
'protocol': request.is_secure() and 'https' or 'http',
'port': port_string if port_string != "80" else None,
'absolute_uri': request.build_absolute_uri(),
'domain': Site.objects.get_current().domain if Site._meta.installed else RequestSite(request).domain,
'is_mobile': bool(re.search(
'Android|BlackBerry|iPhone|iPad|iPod|Opera Mini|IEMobile|SymbianOS|Windows Phone|Mobile',
user_agent,
re.IGNORECASE
)) if user_agent else False
}
return site_info
# settings.py data to pass to templates
def settings(request):
return {
'is_announcement_banner_visible': aspc_settings.ANNOUNCEMENT_BANNER_ACTIVE,
'announcement_banner_content': aspc_settings.ANNOUNCEMENT_BANNER_CONTENT
}
| 35.9
| 103
| 0.779944
|
4a02fd7d7f70116864ca8af36eaee43613892449
| 2,750
|
py
|
Python
|
core/modules/website_archive.py
|
AlexisAhmed/operative-framework
|
4b3a09e563fd6fec2443aa12240cc1cc54f88df4
|
[
"MIT"
] | 1
|
2020-10-16T22:56:18.000Z
|
2020-10-16T22:56:18.000Z
|
core/modules/website_archive.py
|
AlexisAhmed/operative-framework
|
4b3a09e563fd6fec2443aa12240cc1cc54f88df4
|
[
"MIT"
] | null | null | null |
core/modules/website_archive.py
|
AlexisAhmed/operative-framework
|
4b3a09e563fd6fec2443aa12240cc1cc54f88df4
|
[
"MIT"
] | 1
|
2020-10-16T22:56:19.000Z
|
2020-10-16T22:56:19.000Z
|
#!/usr/bin/env python
#description:Search archive of website domain (archive.org)#
from colorama import Fore,Back,Style
from core import load
import os,sys
import json
import datetime
import requests
class module_element(object):
def __init__(self):
self.title = "Archive.org Gathering : \n"
self.require = {"domain":[{"value":"","required":"yes"}],"from_date":[{"value":"2010","required":"no"}],"to_date":[{"value":"2017","required":"no"}],"limit":[{"value":"100","required":"no"}]}
self.export = []
self.export_file = ""
self.export_status = False
def set_agv(self, argv):
self.argv = argv
def show_options(self):
return load.show_options(self.require)
def export_data(self, argv=False):
return load.export_data(self.export, self.export_file, self.export_status, self.title, argv)
def set_options(self,name,value):
return load.set_options(self.require, name, value)
def check_require(self):
return load.check_require(self.require)
def get_options(self,name):
if name in self.require:
return self.require[name][0]["value"]
else:
return False
def run_module(self):
ret = self.check_require()
if ret == False:
print Back.YELLOW + Fore.BLACK + "Please set the required parameters" + Style.RESET_ALL
else:
self.main()
def main(self):
error = 0
domain_name = str(self.get_options('domain'))
from_date = str(self.get_options('from_date'))
to_date = str(self.get_options('to_date'))
limit_result = str(self.get_options('limit'))
if domain_name[-1] == '/':
domain_name = domain_name[:-1]
if "://" in domain_name:
domain_name = domain_name.split('://')[1]
url = "http://web.archive.org/cdx/search/cdx?url="+domain_name+"&matchType=domain&limit="+limit_result+"&output=json&from="+from_date+"&to="+to_date
try:
req = requests.get(url)
json_data = json.loads(req.text)
if len(json_data) == 0:
print Fore.YELLOW + "output: " + Style.RESET_ALL + "No result found"
self.export.append("no result in archive")
error = 1
except:
print Fore.RED + "error: " + Style.RESET_ALL + " Can't open url"
error = 1
if error == 0:
try:
result = [ x for x in json_data if x[2] != 'original']
result.sort(key=lambda x: x[1])
for line in result:
timestamp = line[1]
website = line[2]
total_link = "https://web.archive.org/web/" + str(timestamp) + "/" + str(website)
string_date = str(timestamp[:4]) + "/" + str(timestamp[4:6]) + "/" + str(timestamp[6:8])
self.export.append(total_link)
print " {} {} {}({}{}{}){}".format(string_date, website, Fore.YELLOW, \
Style.RESET_ALL, total_link, Fore.YELLOW, Style.RESET_ALL)
except:
print Fore.RED + "error: " + Style.RESET_ALL + "Error found please retry"
| 33.13253
| 193
| 0.667273
|
4a02fe16fa15344ad55814655ed3c5b6460ec53e
| 90,205
|
py
|
Python
|
com/sxl/txtdownloader/icon.py
|
sxl77/txtDownloader
|
0cb3905391eec609591fdedb1d8743bbb794563c
|
[
"Apache-2.0"
] | null | null | null |
com/sxl/txtdownloader/icon.py
|
sxl77/txtDownloader
|
0cb3905391eec609591fdedb1d8743bbb794563c
|
[
"Apache-2.0"
] | null | null | null |
com/sxl/txtdownloader/icon.py
|
sxl77/txtDownloader
|
0cb3905391eec609591fdedb1d8743bbb794563c
|
[
"Apache-2.0"
] | null | null | null |
img = b'AAABAAEAgIAAAAEAIAAoCAEAFgAAACgAAACAAAAAAAEAAAEAIAAAAAAAAAAAAMQOAADEDgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiAVMHkiEnB6IhOveiIU0XoiE+h6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE+h6IhTReiITr3kiEnB6IBUwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeCQVMXoiEqd6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhOfeCAQIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiEoB6IhP1eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhPfeiMUWAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhKAeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eyETbAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eSETvwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHsjE6J6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eyMTogAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB7IxRmeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eyMUZgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHsiE8x6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/F6IhPLeSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3oiE8t6IhPxeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/97IhPMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB7IxRmeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3shE2x2HxMpAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB2HxMpeyETbHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/97IxRmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiE7p6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3siE9R7IxIdAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeyMSHXsiE9R6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE60AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB4JBUxeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhPtAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiE/p6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/4AaGgoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHsjE4V6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3kjE1AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSMTdnoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eCESRgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeyIUnHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhJ/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiETuHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/95IRNdAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhK0eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3shElUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAIxcWeyITrHoiE+J6IhPzeiITxXsiETwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB7IRN6eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3khFHQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eSQSKgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeCAUQHoiE/96IhP/eiIT/3oiE9l6IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhFT16IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJIkJAd6IhPieiIT/3oiEoB4IxQzAAAAAHoiE0N6IhPveiIT/3oiE0MAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAfCIVJXoiE/96IhP/eCAQIAAAAAAAAAAAAAAAAHoiE696IhP/fCISYQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB7IBI4eiIT/3oiE98AAAAAAAAAAAAAAAAAAAAAeiITlXoiE/95IhN4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHUgFRh6IhP/eiIT/3ojE2AAAAAAAAAAAAAAAAB7IhPUeiIT/3ojFFgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHsjEpp6IhP/eyIT1HoiEoB6IRNceiITzXoiE/96IhTRdicUDQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgCYNFHoiE7J6IhP/eiIT/3oiE/96IhP/eyITzHghEkYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHogEkd6IhKAeSISpnojFFgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeCMUM3ggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHsiFDSAKwAGAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB4Hg8ReiITrHoiE/F6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/l6IRPGfSATNwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeCQVMXoiE/V6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiISYgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHggFEB6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiISYgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB0FxcLeiMS3XoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/95IhPjdBcXCwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHgiEzV6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/94IhM1AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSESY3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3khEmMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhKAeiIT/3oiE/96IhP/eiIT/3oiE/95IRO/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiEoB6IhP/eiIT/3oiE/96IhP/eiIT/3khE78AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiISgHoiE/96IhP/eiIT/3oiE/96IhP/eSETvwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhKAeiIT/3oiE/96IhP/eiIT/3oiE/95IRO/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiEoB6IhP/eiIT/3oiE/96IhP/eiIT/3khE78AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiISgHoiE/96IhP/eiIT/3oiE/96IhP/eSETvwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhKAeiIT/3oiE/96IhP/eiIT/3oiE/95IRO/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiEoB6IhP/eiIT/3oiE/96IhP/eiIT/3khE78AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiISgHoiE/96IhP/eiIT/3oiE/96IhP/eSETvwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhKAeiIT/3oiE/96IhP/eiIT/3oiE/95IRO/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiEoB6IhP/eiIT/3oiE/96IhP/eiIT/3khE78AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiISgHoiE/96IhP/eiIT/3oiE/96IhP/eSETvwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhKAeiIT/3oiE/96IhP/eiIT/3oiE/95IRO/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiEoB6IhP/eiIT/3oiE/96IhP/eiIT/3khE78AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiISgHoiE/96IhP/eiIT/3oiE/96IhP/eSETvwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhKAeiIT/3oiE/96IhP/eiIT/3oiE/95IRO/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiEoB6IhP/eiIT/3oiE/96IhP/eiIT/3khE78AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiISgHoiE/96IhP/eiIT/3oiE/96IhP/eSETvwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhKAeiIT/3oiE/96IhP/eiIT/3oiE/95IRO/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiEoB6IhP/eiIT/3oiE/96IhP/eiIT/3khE78AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiISgHoiE/96IhP/eiIT/3oiE/96IhP/eSETvwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhKAeiIT/3oiE/96IhP/eiIT/3oiE/95IRO/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeyMSHXshE2x6IhSOeSMTonsiFLV6IhOreiEUgnsiFFl6IBUwgCAgCAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiEoB6IhP/eiIT/3oiE/96IhP/eiIT/3khE78AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeyMUZnoiE7p6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP3eiISz3ojE4R8IhUlAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiISgHoiE/96IhP/eiIT/3oiE/96IhP/eSETvwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeyMUZnsiE8x6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/d6IhOVeyETNgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhKAeiIT/3oiE/96IhP/eiIT/3oiE/95IRO/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHojFKp6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiITq3ojEUkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiEoB6IhP/eiIT/3oiE/96IhP/eiIT/3khE78AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhLPeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP4eSQSKgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiISgHoiE/96IhP/eiIT/3oiE/96IhP/eSETvwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiIT8XoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT2XoiE9d6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3siE4kAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhKAeiIT/3oiE/96IhP/eiIT/3oiE/95IRO/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHojE7h6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3siE6x6IRNreCAUQHYnFA0AAAAAAAAAAIAgIAh7IBI4eiISYnoiEph6IhP3eiIT/3oiE/96IxLddSAVGAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiEoB6IhP/eiIT/3oiE/96IhP/eiIT/3khE78AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhKAeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3khE797IhQ0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkkEip7IxKLeiIT6HsiEnIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiISgHoiE/96IhP/eiIT/3oiE/96IhP/eSETvwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeh8UGXojE+V6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/97IhPMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB/IhEeZjMABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhKAeiIT/3oiE/96IhP/eiIT/3oiE/95IRO/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhKAeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eyIT1AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiEoB6IhP/eiIT/3oiE/96IhP/eiIT/3khE78AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgCAQEHojE+V6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/95IhNSAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiISgHoiE/96IhP/eiIT/3oiE/96IhP/eSETvwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IxNQeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiITygAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhKAeiIT/3oiE/96IhP/eiIT/3oiE/95IRO/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiE5V6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/97IhOHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiEoB6IhP/eiIT/3oiE/96IhP/eiIT/3khE78AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiIT6noiE/96IhP/eiIT/3oiE/96IhP/eiIT/3gjEkgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiISgHoiE/96IhP/eiIT/3oiE/96IhP/eSETvwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIArFQx6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhPfgCAgCAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhKAeiIT/3oiE/96IhP/eiIT/3oiE/95IRO/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAdiQSHHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE58AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiEoB6IhP/eiIT/3oiE/96IhP/eiIT/3khE78AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IxEseiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3shElUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiISgHoiE/96IhP/eiIT/3oiE/96IhP/eSETvwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHsiETx6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIScQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhKAeiIT/3oiE/96IhP/eiIT/3oiE/95IRO/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeyIUNHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhOVAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiEoB6IhP/eiIT/3oiE/96IhP/eiIT/3khE78AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB4IxUkeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3khE78AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiISgHoiE/96IhP/eiIT/3oiE/96IhP/eSETvwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIAmDRR6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiITrQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhKAeiIT/3oiE/96IhP/eiIT/3oiE/95IRO/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgEAABHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3ohEpkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB8IhRMeiIT/3oiE/96IhP/eiIT/3oiE/96IxSbAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiEoB6IhP/eiIT/3oiE/96IhP/eiIT/3khE78AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeyITzHoiE/96IhP/eiIT/3oiE/96IhP/eiIT0wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiEoB6IhP/eiIT/3oiE/96IhP/eiIT/3kiE4kAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiISgHoiE/96IhP/eiIT/3oiE/96IhP/eSETvwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhOIeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eCMUMwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeyMSmnoiE/96IhP/eiIT/3oiE/96IhP/eiISYgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhKAeiIT/3oiE/96IhP/eiIT/3oiE/95IRO/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHwiE0R6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/97IhJwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB7IhK1eiIT/3oiE/96IhP/eiIT/3oiE/98IRQnAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiEoB6IhP/eiIT/3oiE/96IhP/eiIT/3khE78AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHsiE8x6IhP/eiIT/3oiE/96IhP/eiIT/3oiE9uAIBAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeCMUM3oiE/96IhP/eiIT/3oiE/96IhP/eiMT1gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiISgHoiE/96IhP/eiIT/3oiE/96IhP/eSETvwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeyMUZnoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3ohE5IAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IRKZeiIT/3oiE/96IhP/eiIT/3oiE/96IhSOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhKAeiIT/3oiE/96IhP/eiIT/3oiE/95IRO/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeyITzHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3ojEUkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeCMUM3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3ggFEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiEoB6IhP/eiIT/3oiE/96IhP/eiIT/3khE78AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IxRYeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3kiE6QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHojE216IhP2eiIT/3oiE/96IhP/eiIT/3oiE/96IhPJAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiISgHoiE/96IhP/eiIT/3oiE/96IhP/eSETvwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB4IhNqeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE8h7IRNseiESRXsjEh10FxcLeCITNXojE2B6IxOheiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3kjE1AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhKAeiIT/3oiE/96IhP/eiIT/3oiE/95IRO/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhK0eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/97IhSPAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiEoB6IhP/eiIT/3oiE/96IhP/eiIT/3khE78AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhOyeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiMUqgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiISgHoiE/96IhP/eiIT/3oiE/96IhP/eSETvwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IRKKeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3ojFKoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhKAeiIT/3oiE/96IhP/eiIT/3oiE/95IRO/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IRJFeiITunoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiFN56IhJvAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiEoB6IhP/eiIT/3oiE/96IhP/eiIT/3khE78AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeyIUW3kiEqZ6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3khE796IxNtgCMXFgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiISgHoiE/96IhP/eiIT/3oiE/96IhP/eSETvwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIAcDhJ8IhVKeiMTYHojFHV5IxN2eiISYnwhFE55IhQmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhKAeiIT/3oiE/96IhP/eiIT/3oiE/95IRO/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiEoB6IhP/eiIT/3oiE/96IhP/eiIT/3khE78AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiISgHoiE/96IhP/eiIT/3oiE/96IhP/eSETvwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhKAeiIT/3oiE/96IhP/eiIT/3oiE/95IRO/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiEoB6IhP/eiIT/3oiE/96IhP/eiIT/3khE78AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiISgHoiE/96IhP/eiIT/3oiE/96IhP/eSETvwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhKAeiIT/3oiE/96IhP/eiIT/3oiE/95IRO/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiEoB6IhP/eiIT/3oiE/96IhP/eiIT/3khE78AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiISgHoiE/96IhP/eiIT/3oiE/96IhP/eSETvwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhKAeiIT/3oiE/96IhP/eiIT/3oiE/95IRO/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiEoB6IhP/eiIT/3oiE/96IhP/eiIT/3khE78AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiISgHoiE/96IhP/eiIT/3oiE/96IhP/eSETvwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhKAeiIT/3oiE/96IhP/eiIT/3oiE/95IRO/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiEoB6IhP/eiIT/3oiE/96IhP/eiIT/3khE78AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiISgHoiE/96IhP/eiIT/3oiE/96IhP/eSETvwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhKAeiIT/3oiE/96IhP/eiIT/3oiE/95IRO/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiEoB6IhP/eiIT/3oiE/96IhP/eiIT/3khE78AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiISgHoiE/96IhP/eiIT/3oiE/96IhP/eSETvwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhKAeiIT/3oiE/96IhP/eiIT/3oiE/95IRO/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiEoB6IhP/eiIT/3oiE/96IhP/eiIT/3khE78AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiISgHoiE/96IhP/eiIT/3oiE/96IhP/eSETvwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB7IhRoeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eyIUaAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHsjEjp6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/97IxI6AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgCsVDHoiE9h6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT3IArFQwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSETv3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSEVPXoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/97IxNRAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IRO/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiISgHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIUtgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHkhE796IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiERLgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeSATN3oiE7J6IhPveiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP4eiITxXsjFGYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHghEkZ6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiISgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiIStHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/97IhRbAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IBUweCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeCAUQHggFEB4IBRAeiQUMpkzAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiMThHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/95IRR0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB7IhSceiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiEoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5IhO5eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3khE10AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHsjE4V6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3gkFTEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAfCIUTHoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eCESRgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeyMSOnoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eSIT4wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IhP3eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/+AKxUMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeiITxXoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT5HogFTAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6IBUweiIT5HoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIUtgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB7IxRmeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3sjE3Z8IhUlAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB8IhUleyMTdnoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/97IxRmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB7IhPMeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhPweiITyXkhE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE795IRO/eSETv3khE796IhPJeiIT8HoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eyITzAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHsjFGZ6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/97IxRmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiFI56IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIUjgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHojE+V6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3ojE+UAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiE9x6IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhLPAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHoiE656IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eyIUjwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHwiFEx6IhOyeiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiITrHkhFT0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB5JBIqeSESY3oiE6x6IhTReiIT6HoiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT/3oiE/96IhP/eiIT6HoiFNF6IhOseSESY3kkEioAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA////AAAAAAAAAAAAAP//////4AAAAAAAAAAAAAAH/////8AAAAAAAAAAAAAAA/////8AAAAAAAAAAAAAAAD////+AAAAAAAAAAAAAAAAf////AAAAAAAAAAAAAAAAD////gAAAAAAAAAAAAAAAAf///4AP////////////8AH///8AP/////////////wA////AP//////////////AP///gH//////////////4B///4B//////////////+Af//+A///////////////wH///gP//////////////8B///4D//////+J///////Af//+A///////vv//////wH///gP//////z7//////8B///4D//////9+///////Af//+A///////Pv//////wH///gP//////73//////8B///4D///////D///////Af//+A///////////////wH///gP//////////////8B///4D///////////////Af//+A//AAAAAAAAAAAP/wH///gP/AAAAAAAAAAAAf8B///4D/AAAAAAAAAAAAD/Af//+A/wAAAAAAAAAAAA/wH///gP4AAAAAAAAAAAAH8B///4D+AAAAAAAAAAAAB/Af//+A/g///////////wfwH///gP4P//////////8H8B///4D+D///////////B/Af//+A/g///////////wfwH///gP4P//////////8H8B///4D+D///////////B/Af//+A/g///////////wfwH///gP4P//////////8H8B///4D+D///////////B/Af//+A/g///////////wfwH///gP4P//////////8H8B///4D+D///////////B/Af//+A/g///////////wfwH///gP4P//////////8H8B///4D+D///////////B/Af//+A/g///////////wfwH///gP4P//////////8H8B///4D+D///////////B/Af//+A/g///////////wfwH///gP4P//////////8H8B///4D+D///////////B/Af//+A/g///////////wfwH///gP4P////4A////8H8B///4D+D////4AB////B/Af//+A/g////4AAD///wfwH///gP4P///8AAA///8H8B///4D+D///+ADAP///B/Af//+A/g////AP/n///wfwH///gP4P///gP/////8H8B///4D+D///4H//////B/Af//+A/g///8D//////wfwH///gP4P///A//////8H8B///4D+D///gf//////B/Af//+A/g///4H//////wfwH///gP4P//+B//////8H8B///4D+D///A///////B/Af//+A/g///wP//////wfwH///gP4P//8AAAAP//8H8B///4D+D///AAAAD///B/Af//+A/g///wAAAA///wfwH///gP4P//8AAAAP//8H8B///4D+D///AAAAD///B/Af//+A/g///wP//g///wfwH///gP4P//+D//4P//8H8B///4D+D///gf/+D///B/Af//+A/g///4H//g///wfwH///gP4P///B//wf//8H8B///4D+D///wP/8H///B/Af//+A/g///+B/+B///wfwH///gP4P///gP/g///8H8B///4D+D///8B/gP///B/Af//+A/g////gAAH///wfwH///gP4P///8AAD///8H8B///4D+D////gAB////B/Af//+A/g////+AB////wfwH///gP4P////4B////8H8B///4D+D///////////B/Af//+A/g///////////wfwH///gP4P//////////8H8B///4D+D///////////B/Af//+A/g///////////wfwH///gP4P//////////8H8B///4D+D///////////B/Af//+A/g///////////wfwH///gP4P//////////8H8B///4D+D///////////B/Af//+A/g///////////wfwH///gP4P//////////8H8B///4D+D///////////B/Af//+A/g///////////wfwH///gP4P//////////8H8B///4D+D///////////B/Af//+A/g///////////wfwH///gP4P//////////8H8B///4D+D///////////B/Af//+A/g///////////wfwH///gP4P//////////8H8B///4D+D///////////B/Af//+A/gAAAAAAAAAAAAfwH///gP4AAAAAAAAAAAAH8B///4D/AAAAAAAAAAAAD/Af//+A/wAAAAAAAAAAAA/wH///gP+AAAAAAAAAAAAf8B///4D/8AAAAAAAAAAA//Af//+A///////////////wH///gP//////////////8B///4B//////////////+Af//+Af//////////////gH///wD//////////////wD///8AP/////////////wA////gA/////////////wAf///4AAAAAAAAAAAAAAAAH////AAAAAAAAAAAAAAAAD////4AAAAAAAAAAAAAAAB/////AAAAAAAAAAAAAAAA/////4AAAAAAAAAAAAAAAf/////gAAAAAAAAAAAAAAf//////wAAAAAAAAAAAAD///8='
| 90,205
| 90,205
| 0.924173
|
4a02fe6e29905750fdf9fa71cb53ad7139e28a69
| 6,628
|
py
|
Python
|
regression_tests/parsers/fileinfo_output_parser.py
|
mehrdad-shokri/retdec-regression-tests-framework
|
9c3edcd0a7bc292a0d5b5cbfb4315010c78d3bc3
|
[
"MIT"
] | 21
|
2017-12-12T20:38:43.000Z
|
2019-04-14T12:46:10.000Z
|
regression_tests/parsers/fileinfo_output_parser.py
|
mehrdad-shokri/retdec-regression-tests-framework
|
9c3edcd0a7bc292a0d5b5cbfb4315010c78d3bc3
|
[
"MIT"
] | 6
|
2018-01-06T13:32:23.000Z
|
2018-09-14T15:09:11.000Z
|
regression_tests/parsers/fileinfo_output_parser.py
|
mehrdad-shokri/retdec-regression-tests-framework
|
9c3edcd0a7bc292a0d5b5cbfb4315010c78d3bc3
|
[
"MIT"
] | 11
|
2017-12-12T20:38:46.000Z
|
2018-07-19T03:12:03.000Z
|
"""
Parsing of the output from fileinfo.
"""
import json
import re
from regression_tests.parsers.text_parser import Text
def parse(output):
"""Parses the given output from fileinfo (`str`).
:returns: Parsed representation of the given output
(:class:`FileinfoOutput`).
"""
return FileinfoOutput(output)
class FileinfoOutput(Text):
"""Parsed output from fileinfo.
Instances of this class behave like strings with additional properties and
methods. That is, you can access the output in its raw form (as `str`), but
also use methods that operate on the parsed representation of the output.
The parsed representation provides a dictionary-like access to the output.
The precise structure and keys of the dictionary depends on the type of the
output that fileinfo produced. By default, it produces plain text output
(the ``-p/--plain`` parameter, which is the default one):
.. code-block:: text
$ fileinfo file.exe
Input file : file.exe
File format : PE
File class : 32-bit
...
Keys are then words from the first column, before ``:``, like ``'Input
file'``.
The other supported format is JSON, which is selected by the ``-j/--json``
parameter:
.. code-block:: text
$ fileinfo -j file.exe
{
"architecture" : "x86",
"endianness" : "Little endian",
...
}
Keys are then, of course, the keys from the parsed JSON, like
``'architecture'``.
For simplicity, in the remainder of this description, we assume that the
output was plain text (the default one). Tests for the JSON output can be
written analogically.
For example, consider a variable ``fileinfo_output`` that stores some
parsed output from fileinfo. Then, you can perform the following checks:
.. code-block:: python
assert fileinfo_output['File format'] == 'PE'
# or (better)
self.assertEqual(fileinfo_output['File format'], 'PE')
You can access all ``key:value`` pairs that appear in the fileinfo output,
not just ``'File format'``:
.. code-block:: python
self.assertEqual(fileinfo_output['Architecture'], 'ARM')
When there is a key that appears multiple times in the output (like
``'Detected compiler/packer'``), it returns a list of values instead of a
single value. For example, assume that ``fileinfo_output`` contains the
following lines:
.. code-block:: text
Detected compiler/packer : GCC (mingw32-x86-pe) (4.7.3)
Detected compiler/packer : GCC (4.7.3)
Then,
.. code-block:: python
self.assertEqual(
fileinfo_output['Detected compiler/packer'],
['GCC (mingw32-x86-pe) (4.7.3)', 'GCC (4.7.3)']
)
The ``'Detected compiler/packer'`` key is special because it's value is
always a list, even if it appears only once in the output (or does not
appear at all, in which case the list is empty).
Of course, if you want, you can still work with ``fileinfo_output`` as if
it was `str`:
.. code-block:: python
assert 'PE' in fileinfo_output
or :class:`.Text`:
.. code-block:: python
assert fileinfo_output.contains('.*GCC.*4\\.7.*')
The behavior of the ``in`` operator depends on the format of the output.
For plain-text output, it behaves like regular ``in`` for strings,
searching for a substring. For JSON output, it behaves like ``in`` for
dictionaries, searching for a specific key in the top-level JSON object.
Let us assume we have the following output:
.. code-block:: text
Input file : file.exe
File format : PE
The next two tests would pass for this output:
.. code-block:: python
self.assertIn('Input file', fileinfo_output)
self.assertIn('file.exe', fileinfo_output)
Let us take the same output, but this time in the JSON format:
.. code-block:: text
{
"inputFile" : "file.exe",
"fileFormat" : "pe"
}
The following tests would pass (notice ``assertNotIn`` for second
condition):
.. code-block:: python
self.assertIn('inputFile', fileinfo_output)
self.assertNotIn('file.exe', fileinfo_output)
"""
def _parse_output_unless_already_parsed(self):
"""Parses the output from fileinfo unless it has already been parsed.
"""
if not self._output_parsed():
self._parse_output()
def _output_parsed(self):
"""Has the output been parsed?"""
return hasattr(self, '_parsed_output')
def _parse_output(self):
"""Parses the output from fileinfo and stores it into
``self._parsed_output``.
"""
if self._is_output_in_json_format():
self._parsed_output = self._parse_json_output()
else:
self._parsed_output = self._parse_plain_text_output()
def _is_output_in_json_format(self):
"""Is the output in the JSON format?"""
return re.match(r'^\s*{', self)
def _parse_json_output(self):
"""Parses the output as JSON and returns it as a dictionary."""
return json.loads(self)
def _parse_plain_text_output(self):
"""Parses the output as plain text and returns it as a dictionary."""
parsed_output = {}
# When a key appears multiple times, store the values into a list.
#
# We want 'Detected compiler/packer' to always return a list, even if
# there are no occurrences.
parsed_output['Detected compiler/packer'] = []
for line in self.split('\n'):
m = re.fullmatch(r'([^:]+):(.+)', line)
if m is not None:
key = m.group(1).strip()
value = m.group(2).strip()
if key not in parsed_output:
parsed_output[key] = value
elif isinstance(parsed_output[key], list):
parsed_output[key].append(value)
else:
parsed_output[key] = [parsed_output[key], value]
return parsed_output
def __contains__(self, key):
if self._is_output_in_json_format():
self._parse_output_unless_already_parsed()
return key in self._parsed_output
else:
return super().__contains__(key)
def __getitem__(self, key):
self._parse_output_unless_already_parsed()
return self._parsed_output[key]
| 31.561905
| 79
| 0.619644
|
4a02fec8b96e25228e6e0467d646c26995f944fc
| 10,371
|
py
|
Python
|
official/benchmark/models/resnet_cifar_main.py
|
bamdada/UdacityProj10FinaltfModels
|
4701bfbc924539860f610fa4ceae484a7bf194c6
|
[
"Apache-2.0"
] | 153
|
2020-10-25T13:58:04.000Z
|
2022-03-07T06:01:54.000Z
|
official/benchmark/models/resnet_cifar_main.py
|
akshayjaryal603/models
|
db39ef826193d0802f644ba30397242a7272676e
|
[
"Apache-2.0"
] | 11
|
2020-07-13T08:29:00.000Z
|
2022-03-24T07:21:09.000Z
|
official/benchmark/models/resnet_cifar_main.py
|
akshayjaryal603/models
|
db39ef826193d0802f644ba30397242a7272676e
|
[
"Apache-2.0"
] | 23
|
2020-10-25T14:44:47.000Z
|
2021-03-31T02:12:13.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Runs a ResNet model on the Cifar-10 dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow as tf
from official.benchmark.models import cifar_preprocessing
from official.benchmark.models import resnet_cifar_model
from official.benchmark.models import synthetic_util
from official.utils.flags import core as flags_core
from official.utils.misc import distribution_utils
from official.utils.misc import keras_utils
from official.vision.image_classification.resnet import common
LR_SCHEDULE = [ # (multiplier, epoch to start) tuples
(0.1, 91), (0.01, 136), (0.001, 182)
]
def learning_rate_schedule(current_epoch,
current_batch,
batches_per_epoch,
batch_size):
"""Handles linear scaling rule and LR decay.
Scale learning rate at epoch boundaries provided in LR_SCHEDULE by the
provided scaling factor.
Args:
current_epoch: integer, current epoch indexed from 0.
current_batch: integer, current batch in the current epoch, indexed from 0.
batches_per_epoch: integer, number of steps in an epoch.
batch_size: integer, total batch sized.
Returns:
Adjusted learning rate.
"""
del current_batch, batches_per_epoch # not used
initial_learning_rate = common.BASE_LEARNING_RATE * batch_size / 128
learning_rate = initial_learning_rate
for mult, start_epoch in LR_SCHEDULE:
if current_epoch >= start_epoch:
learning_rate = initial_learning_rate * mult
else:
break
return learning_rate
class LearningRateBatchScheduler(tf.keras.callbacks.Callback):
"""Callback to update learning rate on every batch (not epoch boundaries).
N.B. Only support Keras optimizers, not TF optimizers.
Attributes:
schedule: a function that takes an epoch index and a batch index as input
(both integer, indexed from 0) and returns a new learning rate as
output (float).
"""
def __init__(self, schedule, batch_size, steps_per_epoch):
super(LearningRateBatchScheduler, self).__init__()
self.schedule = schedule
self.steps_per_epoch = steps_per_epoch
self.batch_size = batch_size
self.epochs = -1
self.prev_lr = -1
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'learning_rate'):
raise ValueError('Optimizer must have a "learning_rate" attribute.')
self.epochs += 1
def on_batch_begin(self, batch, logs=None):
"""Executes before step begins."""
lr = self.schedule(self.epochs,
batch,
self.steps_per_epoch,
self.batch_size)
if not isinstance(lr, (float, np.float32, np.float64)):
raise ValueError('The output of the "schedule" function should be float.')
if lr != self.prev_lr:
self.model.optimizer.learning_rate = lr # lr should be a float here
self.prev_lr = lr
logging.debug(
'Epoch %05d Batch %05d: LearningRateBatchScheduler '
'change learning rate to %s.', self.epochs, batch, lr)
def run(flags_obj):
"""Run ResNet Cifar-10 training and eval loop using native Keras APIs.
Args:
flags_obj: An object containing parsed flag values.
Raises:
ValueError: If fp16 is passed as it is not currently supported.
Returns:
Dictionary of training and eval stats.
"""
keras_utils.set_session_config(
enable_xla=flags_obj.enable_xla)
# Execute flag override logic for better model performance
if flags_obj.tf_gpu_thread_mode:
keras_utils.set_gpu_thread_mode_and_count(
per_gpu_thread_count=flags_obj.per_gpu_thread_count,
gpu_thread_mode=flags_obj.tf_gpu_thread_mode,
num_gpus=flags_obj.num_gpus,
datasets_num_private_threads=flags_obj.datasets_num_private_threads)
common.set_cudnn_batchnorm_mode()
dtype = flags_core.get_tf_dtype(flags_obj)
if dtype == 'fp16':
raise ValueError('dtype fp16 is not supported in Keras. Use the default '
'value(fp32).')
data_format = flags_obj.data_format
if data_format is None:
data_format = ('channels_first' if tf.config.list_physical_devices('GPU')
else 'channels_last')
tf.keras.backend.set_image_data_format(data_format)
strategy = distribution_utils.get_distribution_strategy(
distribution_strategy=flags_obj.distribution_strategy,
num_gpus=flags_obj.num_gpus,
all_reduce_alg=flags_obj.all_reduce_alg,
num_packs=flags_obj.num_packs)
if strategy:
# flags_obj.enable_get_next_as_optional controls whether enabling
# get_next_as_optional behavior in DistributedIterator. If true, last
# partial batch can be supported.
strategy.extended.experimental_enable_get_next_as_optional = (
flags_obj.enable_get_next_as_optional
)
strategy_scope = distribution_utils.get_strategy_scope(strategy)
if flags_obj.use_synthetic_data:
synthetic_util.set_up_synthetic_data()
input_fn = common.get_synth_input_fn(
height=cifar_preprocessing.HEIGHT,
width=cifar_preprocessing.WIDTH,
num_channels=cifar_preprocessing.NUM_CHANNELS,
num_classes=cifar_preprocessing.NUM_CLASSES,
dtype=flags_core.get_tf_dtype(flags_obj),
drop_remainder=True)
else:
synthetic_util.undo_set_up_synthetic_data()
input_fn = cifar_preprocessing.input_fn
train_input_dataset = input_fn(
is_training=True,
data_dir=flags_obj.data_dir,
batch_size=flags_obj.batch_size,
parse_record_fn=cifar_preprocessing.parse_record,
datasets_num_private_threads=flags_obj.datasets_num_private_threads,
dtype=dtype,
# Setting drop_remainder to avoid the partial batch logic in normalization
# layer, which triggers tf.where and leads to extra memory copy of input
# sizes between host and GPU.
drop_remainder=(not flags_obj.enable_get_next_as_optional))
eval_input_dataset = None
if not flags_obj.skip_eval:
eval_input_dataset = input_fn(
is_training=False,
data_dir=flags_obj.data_dir,
batch_size=flags_obj.batch_size,
parse_record_fn=cifar_preprocessing.parse_record)
steps_per_epoch = (
cifar_preprocessing.NUM_IMAGES['train'] // flags_obj.batch_size)
lr_schedule = 0.1
if flags_obj.use_tensor_lr:
initial_learning_rate = common.BASE_LEARNING_RATE * flags_obj.batch_size / 128
lr_schedule = tf.keras.optimizers.schedules.PiecewiseConstantDecay(
boundaries=list(p[1] * steps_per_epoch for p in LR_SCHEDULE),
values=[initial_learning_rate] +
list(p[0] * initial_learning_rate for p in LR_SCHEDULE))
with strategy_scope:
optimizer = common.get_optimizer(lr_schedule)
model = resnet_cifar_model.resnet56(classes=cifar_preprocessing.NUM_CLASSES)
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=optimizer,
metrics=(['sparse_categorical_accuracy']
if flags_obj.report_accuracy_metrics else None),
run_eagerly=flags_obj.run_eagerly)
train_epochs = flags_obj.train_epochs
callbacks = common.get_callbacks()
if not flags_obj.use_tensor_lr:
lr_callback = LearningRateBatchScheduler(
schedule=learning_rate_schedule,
batch_size=flags_obj.batch_size,
steps_per_epoch=steps_per_epoch)
callbacks.append(lr_callback)
# if mutliple epochs, ignore the train_steps flag.
if train_epochs <= 1 and flags_obj.train_steps:
steps_per_epoch = min(flags_obj.train_steps, steps_per_epoch)
train_epochs = 1
num_eval_steps = (cifar_preprocessing.NUM_IMAGES['validation'] //
flags_obj.batch_size)
validation_data = eval_input_dataset
if flags_obj.skip_eval:
if flags_obj.set_learning_phase_to_train:
# TODO(haoyuzhang): Understand slowdown of setting learning phase when
# not using distribution strategy.
tf.keras.backend.set_learning_phase(1)
num_eval_steps = None
validation_data = None
if not strategy and flags_obj.explicit_gpu_placement:
# TODO(b/135607227): Add device scope automatically in Keras training loop
# when not using distribition strategy.
no_dist_strat_device = tf.device('/device:GPU:0')
no_dist_strat_device.__enter__()
history = model.fit(train_input_dataset,
epochs=train_epochs,
steps_per_epoch=steps_per_epoch,
callbacks=callbacks,
validation_steps=num_eval_steps,
validation_data=validation_data,
validation_freq=flags_obj.epochs_between_evals,
verbose=2)
eval_output = None
if not flags_obj.skip_eval:
eval_output = model.evaluate(eval_input_dataset,
steps=num_eval_steps,
verbose=2)
if not strategy and flags_obj.explicit_gpu_placement:
no_dist_strat_device.__exit__()
stats = common.build_stats(history, eval_output, callbacks)
return stats
def define_cifar_flags():
common.define_keras_flags(dynamic_loss_scale=False)
flags_core.set_defaults(data_dir='/tmp/cifar10_data/cifar-10-batches-bin',
model_dir='/tmp/cifar10_model',
epochs_between_evals=10,
batch_size=128)
def main(_):
return run(flags.FLAGS)
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
define_cifar_flags()
app.run(main)
| 36.389474
| 82
| 0.714589
|
4a02fefcd3965199d58f09229cca8bcd9dc6d3b0
| 8,599
|
py
|
Python
|
old/calculations.py
|
fernandolucasaa/project_vision
|
5cd1355949b47d9e8686123a930c5dc7bd2d475d
|
[
"Unlicense"
] | null | null | null |
old/calculations.py
|
fernandolucasaa/project_vision
|
5cd1355949b47d9e8686123a930c5dc7bd2d475d
|
[
"Unlicense"
] | null | null | null |
old/calculations.py
|
fernandolucasaa/project_vision
|
5cd1355949b47d9e8686123a930c5dc7bd2d475d
|
[
"Unlicense"
] | null | null | null |
from copy import deepcopy
# --- Variáveis globais ---
# Dedos
HAND_FINGERS = ["thumb", "index-finger", "middle-finger", "ring-finger", "pinky"]
# Parâmetros utilizados nos cálculos
DIFF_VALUE = 20 # Utilizado para determinar se dedo está esticado ou dobrado
DIFF_VALUE_2 = 49 # Utilizado para determinar se dedos adjancente está próximo ou não
# -------------------------
def str2float(list_):
for i, value in enumerate(list_):
list_[i] = float(list_[i])
return list_
def verify_hand_direction(hand):
"""
Verifica se mão está voltada para cima (up) ou para baixo (down).
Para isso são verifica-se os valores das pontas dos dedos e compara-se com o valor do pulso.
"""
# Ponta dos dedos (valor da vertical, y)
pos_thumb = hand.landmark[4].y # Polegar
pos_index_finger = hand.landmark[8].y # Indicador
pos_middle_finger = hand.landmark[12].y # Médio
pos_ring_finger = hand.landmark[16].y # Anelar
pos_pinky = hand.landmark[20].y # Mínimo
# Lista dos valores das pontas dos dedos
fingers = [pos_thumb, pos_index_finger, pos_middle_finger, pos_ring_finger, pos_pinky]
# Posicao do pulso (referência)
pos_wrist = hand.landmark[0].y # Pulso
# Obs.: ponto (0,0) é o campo superior esquerda da imagem
# Se todas as pontas do dedos tiverem valor superior ao valor do pulso, mão está para 'baixo',
# caso contrário, está para 'cima'
aux = 0 # Variável auxiliar
for elem in fingers:
# print(elem, pos_wrist)
if elem < pos_wrist:
aux += 1
# print(aux)
if aux == 5: # Todos as pontas dos dedos estão acima do pulso
return "up"
else:
return "down"
def extract_finger_points(hand, img_height, img_width):
"""
Cria um dicionário os valores dos pontos dos dedos dão mão. Cada dedo é compostos por 4 pontos.
:param hand:
:param img_height: altura da imagem
:param img_width: largura da imagem
:return: dicionário com os valores dos pontos em pixel
"""
hand_fingers_points = dict()
for finger in HAND_FINGERS:
if finger == "thumb": # Vim da referência
init = 1
elif finger == "index-finger":
init = 5
elif finger == "middle-finger":
init = 9
elif finger == "ring-finger":
init = 13
elif finger == "pinky":
init = 17
else:
raise Exception("Dedo não encontrado")
# Pontos do dedo
# -> 0: base do dedo
# -> 3: ponta do dedo
points_x = []
points_y = []
points_z = [] # TODO: adicionar tb ?
len_finger = 4 # Número de pontos de um dedo
for i in range(init, init + len_finger):
points_x.append(hand.landmark[i].x * img_width)
points_y.append(hand.landmark[i].y * img_height)
# Atualiza dicionário
points = {'x': points_x, 'y': points_y}
hand_fingers_points.update({finger: points})
return hand_fingers_points
def verify_hand_finger_condition(finger, points_y, hand_direction):
"""
:param finger:
:param points_y:
:param hand_direction:
:return:
"""
# TODO: melhorar função deixar estado "esticado" mais bem explicado ? Ideia:
# explicitar melhor os estados (esticado na vertical, esticado na horizontal) e
# utilizar uma função para calcular o ângulo dos dedos
# TODO: normalizar valor!!!!!!!!!!
# Caso mão voltada para cima
if hand_direction == "up":
# Caso dedo for polegar
if finger == "thumb":
if points_y[3] > points_y[2]: # Ponta do dedo (3) com valor maior que base do dedo (1)
state = "bent" # Dobrado
else:
if points_y[2] - points_y[3] < DIFF_VALUE:
state = "bent" # Dobrado
else:
state = "stretched" # Esticado
# Caso outros dedos
else:
# Ponta do dedo (3) com valor maior que base do dedo (1)
if points_y[3] > points_y[1]:
state = "bent" # Dobrado
else:
state = "stretched" # Esticado
# Caso mão voltada para baixo ("down")
else:
# Caso dedo for polegar
if finger == "thumb":
if points_y[2] > points_y[3]: # Ponta do dedo (3) com valor menor que base do dedo (1)
state = "bent" # Dobrado
else:
if points_y[3] - points_y[2] < DIFF_VALUE:
state = "bent" # Dobrado
else:
state = "stretched" # Esticado
# Caso outros dedos
else:
# Ponta do dedo (3) com valor menor que base do dedo (1)
if points_y[3] < points_y[1]:
state = "bent" # Dobrado
else:
state = "stretched" # Esticado
return state
def compute_distance_adjacent_finger(finger_name, adjacent_finger, hand_fingers_points):
# Distãncia entre ponta dos dedos
finger_distance = abs(hand_fingers_points[adjacent_finger]['x'][3] - hand_fingers_points[finger_name]['x'][3])
# Dividir por um valor padrão para valor não ser afetado por mão próxima ou distante na imagem
reference_distance = abs(hand_fingers_points['index-finger']['y'][0] - hand_fingers_points['index-finger']['y'][3])
# Cálculo
distance = finger_distance/reference_distance*100
return distance
def verify_adjacent_finger(states_, finger_name, adjacent_finger, hand_fingers_points):
# Dividir por um valor padrão para valor não ser afetado por mão próxima ou distante na imagem
reference_distance = abs(hand_fingers_points['index-finger']['y'][0] - hand_fingers_points['index-finger']['y'][3])
# Compara se os dedos têm o mesmo estado ("esticado" ou "dobrado")
if states_[finger_name] == states_[adjacent_finger]:
if finger_name in ['thumb']: # TODO: verificar caso para o polegar !!!
# TODO: fazer
# Distãncia entre ponta dos dedos
finger_distance = abs(hand_fingers_points[adjacent_finger]['x'][3] - hand_fingers_points[finger_name]['x'][3])
else:
# Distãncia entre ponta dos dedos
finger_distance = abs(hand_fingers_points[adjacent_finger]['x'][3] - hand_fingers_points[finger_name]['x'][3])
# Cálculo
distance = finger_distance/reference_distance*100
if distance < DIFF_VALUE_2:
# Próximo do dedo adjacente
state_adjacent_finger = f"next to {adjacent_finger}"
else:
# Distante do dedo adjacente
state_adjacent_finger = f"away from {adjacent_finger}"
# Estados diferentes
else:
# Distante do dedo adjacente
state_adjacent_finger = f"away from {adjacent_finger}"
# Lista de características: condição do dedo e estado com relação ao dedo adjacente
state_list = [states_[finger_name], state_adjacent_finger]
return state_list
def verify_hand_fingers_states(hand, img_height, hand_direction, hand_fingers_points):
"""
:param hand:
:param img_height:
:param hand_direction:
:param hand_fingers_points:
:return:
"""
finger_states = dict()
# Obs.: ponto (0,0) é o canto superior esquerdo da imagem
for finger in HAND_FINGERS:
# Pontos do dedo
points_y = hand_fingers_points[finger]['y']
# 1. Verifica estado do dedo (esticado ou dobrado)
state = verify_hand_finger_condition(finger=finger, points_y=points_y, hand_direction=hand_direction) # TODO: melhorar e aprimorar função
# Atualiza dicionário com estado de cada dedo
finger_states.update({finger: state})
finger_states_copy = deepcopy(finger_states) # Cria uma cópia dos estados
for i, finger in enumerate(HAND_FINGERS):
# Último dedo (mínimo)
if i == len(HAND_FINGERS) - 1:
adjacent_finger = HAND_FINGERS[len(HAND_FINGERS)-2] # Anelar
else:
adjacent_finger = HAND_FINGERS[i+1]
# 2. Verifica distância do dedo com relação ao seu adjacente
state_list = verify_adjacent_finger(states_=finger_states_copy, finger_name=finger,
adjacent_finger=adjacent_finger, hand_fingers_points=hand_fingers_points) # TODO: terminar função
# Atualiza dicionário com uma lista: o estado de cada dedo e se está próximo ou não do dedo adjacente
finger_states.update({finger: state_list})
return finger_states
| 33.589844
| 146
| 0.626236
|
4a02ff01914fa53ce08fbccea60e4b76f6b88d61
| 539
|
py
|
Python
|
implicit_solver/lib/system/time_integrators/variational.py
|
vincentbonnetcg/Numerical-Bric-a-Brac
|
e71f2305d7452de985e5e9fa8935da611b6d9992
|
[
"MIT"
] | 14
|
2019-05-04T00:42:47.000Z
|
2021-09-07T09:57:44.000Z
|
implicit_solver/lib/system/time_integrators/variational.py
|
vincentbonnetcg/Numerical-Bric-a-Brac
|
e71f2305d7452de985e5e9fa8935da611b6d9992
|
[
"MIT"
] | null | null | null |
implicit_solver/lib/system/time_integrators/variational.py
|
vincentbonnetcg/Numerical-Bric-a-Brac
|
e71f2305d7452de985e5e9fa8935da611b6d9992
|
[
"MIT"
] | 5
|
2020-12-07T21:44:41.000Z
|
2021-09-13T05:29:54.000Z
|
"""
@author: Vincent Bonnet
@description : Variational time integrator (placeholder)
"""
import core
from lib.system.time_integrators import TimeIntegrator
class VariationalIntegrator(TimeIntegrator):
def __init__(self):
TimeIntegrator.__init__(self)
@core.timeit
def prepare_system(self, scene, details, dt):
# TODO
pass
@core.timeit
def assemble_system(self, details, dt):
# TODO
pass
@core.timeit
def solve_system(self, details, dt):
# TODO
pass
| 19.25
| 56
| 0.656772
|
4a02ff15173948497ca8220db425cd10d4ffbeaf
| 1,436
|
py
|
Python
|
config.py
|
Roseoketch/My-Blog
|
04caad7f80e3be2d43b888ecab9f4920422735e3
|
[
"MIT"
] | null | null | null |
config.py
|
Roseoketch/My-Blog
|
04caad7f80e3be2d43b888ecab9f4920422735e3
|
[
"MIT"
] | null | null | null |
config.py
|
Roseoketch/My-Blog
|
04caad7f80e3be2d43b888ecab9f4920422735e3
|
[
"MIT"
] | null | null | null |
import os
class Config:
'''
General configuration parent class
'''
SECRET_KEY = os.environ.get('SECRET_KEY')
SQLALCHEMY_TRACK_MODIFICATIONS = False
# simplemde configurations
SIMPLEMDE_JS_IIFE = True
SIMPLEMDE_USE_CDN = True
# email configurations
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get("MAIL_USERNAME")
MAIL_PASSWORD = os.environ.get("MAIL_PASSWORD")
SUBJECT_PREFIX = 'My Blog'
SENDER_EMAIL = 'oketchrose@gmail.com'
class ProdConfig(Config):
'''
Pruduction configuration child class
Args:
Config: The parent configuration class with General configuration settings
'''
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL')
class TestConfig(Config):
'''
Testing configuration child class
Args:
Config: The parent configuration class with General configuration settings
'''
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://rose:justin/@localhost/blog'
class DevConfig(Config):
'''
Development configuration child class
Args:
Config: The parent configuration class with General configuration settings
'''
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://rose:justin/@localhost/blog'
DEBUG = True
config_options = {
'development': DevConfig,
'production': ProdConfig,
'test': TestConfig
}
| 23.540984
| 82
| 0.697075
|
4a030002ecb9f2f6a38292a4e588fa9415caea6b
| 3,301
|
py
|
Python
|
flex/ussd/core.py
|
centergy/flex_ussd
|
ddc0ccd192e3a0a82e8b7705f088862d59656c28
|
[
"MIT"
] | null | null | null |
flex/ussd/core.py
|
centergy/flex_ussd
|
ddc0ccd192e3a0a82e8b7705f088862d59656c28
|
[
"MIT"
] | null | null | null |
flex/ussd/core.py
|
centergy/flex_ussd
|
ddc0ccd192e3a0a82e8b7705f088862d59656c28
|
[
"MIT"
] | null | null | null |
from collections import OrderedDict
from flex.utils.decorators import cached_property
from flex.utils.module_loading import import_if_string, import_strings
from .config import Config, ConfigAttribute, ussd_config
from .wrappers import UssdCode
from .exc import ImproperlyConfigured
from . import signals
class UssdApp(object):
config_class = Config
name = ConfigAttribute('name')
inital_screen = ConfigAttribute('inital_screen')
default_config = dict(
cache_backend='flex.ussd.cache.CacheBackend',
session_manager='flex.ussd.sessions.SessionManager',
middleware=[]
)
def __init__(self, name, *, inital_screen=None, **config):
self.config = self.make_config(config)
self.name = name
self.inital_screen = inital_screen
@cached_property
def cache(self):
return self._create_cache_backend()
@cached_property
def session_manager(self):
return self._create_session_manager()
@cached_property
def handler(self):
return self._create_request_handler()
@cached_property
def middleware(self):
rv = list(self.config.get('global_middleware', []))
rv.extend(self.config.get('middleware', []))
# rv = signals.middleware_list.pipe(self, rv)
return import_strings(rv)
def make_config(self, config=None):
return self.config_class(config or {}, self.default_config, ussd_config)
def _create_cache_backend(self):
factory = signals.cache_backend_factory.pipe(self, self.config.get('cache_backend'))
factory = import_if_string(factory)
if not callable(factory):
raise ImproperlyConfigured('Cache backend must be a type or callable. In app %s.' % self.name)
return factory(self)
def _create_session_manager(self):
factory = signals.session_manager_factory.pipe(self, self.config.get('session_manager'))
factory = import_if_string(factory)
if not callable(factory):
raise ImproperlyConfigured('Session manager must be a type or callable. In app %s.' % self.name)
return factory(self)
def _create_request_handler(self):
factory = signals.request_handler_factory.pipe(self, self.config.get('request_handler'))
factory = import_if_string(factory)
if not callable(factory):
raise ImproperlyConfigured('Request handler must be a type or callable. In app %s.' % self.name)
return factory(self)
class UssdAppRouter(object):
__slots__ = ('routes', 'base_code')
def __init__(self, code=None):
self.routes = OrderedDict()
self.base_code = code and UssdCode(code)
def route(self, code, handler):
code = code if isinstance(code, UssdCode) else UssdCode(code)
key_code = tuple(code)
if key_code in self.routes:
raise ValueError('Ussd code %s already registered in ussd router.' % (code,))
self.routes[key_code] = (code, handler)
def resolve(self, request):
ussd_string = request.ussd_string
if ussd_string.startswith(self.base_code):
ussd_string = ussd_string[len(self.base_code):]
codes = sorted(filter(ussd_string.startswith, self.routes.keys()))
if codes:
code = codes[-1]
route_code = '%s*%s' % (self.base_code, code) if self.base_code else code
request.route_code = route_code
return self.routes[code], route_code
return None, None
def __len__(self):
return self.routes.__len__()
def __contains__(self, code):
return self.routes.__contains__(code)
apps = dict()
| 27.974576
| 99
| 0.75159
|
4a030095b6b0f602404bc38f7d784fd254c40366
| 1,076
|
py
|
Python
|
userbot/plugins/alive.py
|
SeekingLegend/LegendBot
|
86d4e1db09f9940d836ee05fb43f0eba19044a3f
|
[
"MIT"
] | null | null | null |
userbot/plugins/alive.py
|
SeekingLegend/LegendBot
|
86d4e1db09f9940d836ee05fb43f0eba19044a3f
|
[
"MIT"
] | null | null | null |
userbot/plugins/alive.py
|
SeekingLegend/LegendBot
|
86d4e1db09f9940d836ee05fb43f0eba19044a3f
|
[
"MIT"
] | 1
|
2021-01-30T05:38:53.000Z
|
2021-01-30T05:38:53.000Z
|
"""Check if userbot alive. If you change these, you become the gayest gay such that even the gay world will disown you."""
import asyncio
from telethon import events
from telethon.tl.types import ChannelParticipantsAdmins
from platform import uname
from userbot import ALIVE_NAME
from userbot.utils import admin_cmd
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else "Set ALIVE_NAME in config vars in Heroku"
#@command(outgoing=True, pattern="^.alive$")
@borg.on(admin_cmd(pattern=r"alive"))
async def amireallyalive(alive):
""" For .alive command, check if the bot is running. """
await alive.edit("**Me Iz Alive Bro ^.^** \n`🇮🇳BOT Status : ` **☣Hot**\n\n"
f"`My peru owner`: {DEFAULTUSER}\n\n"
"`Telethon version:` **6.0.9**\n`Python:` **3.7.4**\n"
"`Database Status:` **😀ALL OK**\n\n`Always with you, my master!\n`"
"**Bot Creator:** [🇮🇳Seeking Legend](t.me/SeekingLegend)\n"
" [🇮🇳Deploy This LegendBot🇮🇳](https://github.com/SeekingLegend/LegendBot)")
| 46.782609
| 122
| 0.640335
|
4a03015d90829c6013e917b2c3afc24a69f78e35
| 397
|
py
|
Python
|
djangoappengine/tests/__init__.py
|
yeraydiazdiaz/nonrel-blog
|
fc5179561a15ada8cd30fd178aa5356d38d937ff
|
[
"BSD-3-Clause"
] | null | null | null |
djangoappengine/tests/__init__.py
|
yeraydiazdiaz/nonrel-blog
|
fc5179561a15ada8cd30fd178aa5356d38d937ff
|
[
"BSD-3-Clause"
] | null | null | null |
djangoappengine/tests/__init__.py
|
yeraydiazdiaz/nonrel-blog
|
fc5179561a15ada8cd30fd178aa5356d38d937ff
|
[
"BSD-3-Clause"
] | null | null | null |
from .backend import BackendTest
from .field_db_conversion import FieldDBConversionTest
from .field_options import FieldOptionsTest
from .filter import FilterTest
from .keys import KeysTest
from .mapreduce_input_readers import DjangoModelInputReaderTest, DjangoModelIteratorTest
from .not_return_sets import NonReturnSetsTest
from .order import OrderTest
from .transactions import TransactionTest
| 39.7
| 88
| 0.881612
|
4a0301c4a230c56ab4012a6d7dd9b4702165cb0b
| 1,501
|
py
|
Python
|
pandora-hub/pandora/hub/forms.py
|
williamlagos/django-coding
|
246dc1aba32eae0b035c407de3e8fe954606b776
|
[
"MIT"
] | null | null | null |
pandora-hub/pandora/hub/forms.py
|
williamlagos/django-coding
|
246dc1aba32eae0b035c407de3e8fe954606b776
|
[
"MIT"
] | 21
|
2020-03-24T18:18:22.000Z
|
2021-03-31T20:18:53.000Z
|
pandora-hub/pandora/hub/forms.py
|
efforia/dashboard
|
246dc1aba32eae0b035c407de3e8fe954606b776
|
[
"MIT"
] | null | null | null |
#
# This file is part of Efforia project.
#
# Copyright (C) 2011-2013 William Oliveira de Lagos <william@efforia.com.br>
#
# Efforia is free software: you can redistribute it and/or modify
# it under the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Efforia is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Efforia. If not, see <http://www.gnu.org/licenses/>.
#
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div, Hidden, HTML, Field
class PhotoForm(forms.Form):
file = forms.FileField(label='')
redirect = forms.CharField(label='')
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.form_action = '/efforia/photo'
self.helper.layout = Layout(
Hidden('redirect',value='1'),
Field('file',style='opacity:0; width:0; height:0',css_class='file'),
Div(HTML("<span class='glyphicon glyphicon-cloud-upload icon-glyphicon'></span>"),css_class='upload')
)
super(PhotoForm, self).__init__(*args, **kwargs)
| 41.694444
| 114
| 0.692871
|
4a030283e1d12e8c4791e3ddd4cd3462c8cf49a0
| 1,166
|
py
|
Python
|
connectedbytcp_control/connected_controls.py
|
LBNL-ETA/LPDM-Drivers
|
0190ecb1348b10d5fb7c5b60ca30ebbbbebe094e
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
connectedbytcp_control/connected_controls.py
|
LBNL-ETA/LPDM-Drivers
|
0190ecb1348b10d5fb7c5b60ca30ebbbbebe094e
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
connectedbytcp_control/connected_controls.py
|
LBNL-ETA/LPDM-Drivers
|
0190ecb1348b10d5fb7c5b60ca30ebbbbebe094e
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
################################################################################################################################
# *** Copyright Notice ***
#
# "Price Based Local Power Distribution Management System (Local Power Distribution Manager) v1.0"
# Copyright (c) 2016, The Regents of the University of California, through Lawrence Berkeley National Laboratory
# (subject to receipt of any required approvals from the U.S. Dept. of Energy). All rights reserved.
#
# If you have questions about your rights to use or distribute this software, please contact
# Berkeley Lab's Innovation & Partnerships Office at IPO@lbl.gov.
################################################################################################################################
from tcplights import TCPLights
class ConnectedLight:
def __init__(self, lights, did, ipaddr=None):
self.lights = lights
self.did = did
def on(self, brightness=100):
"Turns lights on at brightness passed in"
self.lights.TCPSetLightValue(self.did, brightness)
def off(self):
"Turns light off"
self.lights.TCPSetLightValue(self.did, 0)
| 41.642857
| 128
| 0.558319
|
4a0302bd981d1e6cd79f463bd91102444b2bbaf5
| 1,361
|
py
|
Python
|
duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/18_features/numtrees_45/rule_38.py
|
apcarrik/kaggle
|
6e2d4db58017323e7ba5510bcc2598e01a4ee7bf
|
[
"MIT"
] | null | null | null |
duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/18_features/numtrees_45/rule_38.py
|
apcarrik/kaggle
|
6e2d4db58017323e7ba5510bcc2598e01a4ee7bf
|
[
"MIT"
] | null | null | null |
duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/18_features/numtrees_45/rule_38.py
|
apcarrik/kaggle
|
6e2d4db58017323e7ba5510bcc2598e01a4ee7bf
|
[
"MIT"
] | null | null | null |
def findDecision(obj): #obj[0]: Passanger, obj[1]: Weather, obj[2]: Time, obj[3]: Coupon, obj[4]: Coupon_validity, obj[5]: Gender, obj[6]: Age, obj[7]: Maritalstatus, obj[8]: Children, obj[9]: Education, obj[10]: Occupation, obj[11]: Income, obj[12]: Bar, obj[13]: Coffeehouse, obj[14]: Restaurantlessthan20, obj[15]: Restaurant20to50, obj[16]: Direction_same, obj[17]: Distance
# {"feature": "Bar", "instances": 23, "metric_value": 0.9877, "depth": 1}
if obj[12]<=2.0:
# {"feature": "Income", "instances": 19, "metric_value": 0.8997, "depth": 2}
if obj[11]<=4:
# {"feature": "Coupon", "instances": 14, "metric_value": 0.5917, "depth": 3}
if obj[3]>0:
# {"feature": "Coffeehouse", "instances": 13, "metric_value": 0.3912, "depth": 4}
if obj[13]>0.0:
return 'True'
elif obj[13]<=0.0:
# {"feature": "Passanger", "instances": 2, "metric_value": 1.0, "depth": 5}
if obj[0]>1:
return 'False'
elif obj[0]<=1:
return 'True'
else: return 'True'
else: return 'False'
elif obj[3]<=0:
return 'False'
else: return 'False'
elif obj[11]>4:
# {"feature": "Passanger", "instances": 5, "metric_value": 0.7219, "depth": 3}
if obj[0]<=1:
return 'False'
elif obj[0]>1:
return 'True'
else: return 'True'
else: return 'False'
elif obj[12]>2.0:
return 'False'
else: return 'False'
| 41.242424
| 378
| 0.601763
|
4a0303450f0fa6b4ae08fdbdcad599ef6768d2b8
| 15,329
|
py
|
Python
|
testcases/I2C.py
|
apopple/op-test-framework
|
94687c73bbc09d897350e9e799ea51703050befd
|
[
"Apache-2.0"
] | null | null | null |
testcases/I2C.py
|
apopple/op-test-framework
|
94687c73bbc09d897350e9e799ea51703050befd
|
[
"Apache-2.0"
] | null | null | null |
testcases/I2C.py
|
apopple/op-test-framework
|
94687c73bbc09d897350e9e799ea51703050befd
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python2
# IBM_PROLOG_BEGIN_TAG
# This is an automatically generated prolog.
#
# OpenPOWER Automated Test Project
#
# Contributors Listed Below - COPYRIGHT 2015,2017
# [+] International Business Machines Corp.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# IBM_PROLOG_END_TAG
'''
I2C tests
---------
This class will test functionality of following drivers:
I2C Driver(Inter-Integrated Circuit) driver
'''
import time
import subprocess
import re
import sys
from common.OpTestConstants import OpTestConstants as BMC_CONST
import unittest
import OpTestConfiguration
from common.OpTestSystem import OpSystemState
from common.Exceptions import CommandFailed, KernelModuleNotLoaded
from common.Exceptions import KernelConfigNotSet
import logging
import OpTestLogger
log = OpTestLogger.optest_logger_glob.get_logger(__name__)
class I2CDetectUnsupported(Exception):
"""Asked to do i2c detect on a bus that doesn't support detection
"""
pass
class I2C():
'''
Base class for I2C tests
'''
def setUp(self):
conf = OpTestConfiguration.conf
self.cv_HOST = conf.host()
self.cv_IPMI = conf.ipmi()
self.cv_SYSTEM = conf.system()
def set_up(self):
if self.test == "skiroot":
self.cv_SYSTEM.goto_state(OpSystemState.PETITBOOT_SHELL)
self.c = self.cv_SYSTEM.console
elif self.test == "host":
self.cv_SYSTEM.goto_state(OpSystemState.OS)
self.c = self.cv_HOST.get_ssh_connection()
else:
raise Exception("Unknown test type")
return self.c
def i2c_init(self):
self.cv_HOST.host_get_OS_Level()
self.cv_HOST.host_check_command("i2cdump", "i2cdetect", "hexdump",
"i2cget", "i2cset")
l_kernel = self.cv_HOST.host_get_kernel_version()
mods = {"CONFIG_I2C_OPAL": "i2c_opal",
"CONFIG_I2C_CHARDEV": "i2c_dev",
"CONFIG_EEPROM_AT24": "at24"}
try:
for (c, m) in mods.items():
self.cv_HOST.host_load_module_based_on_config(l_kernel, c, m)
except KernelConfigNotSet as ns:
self.assertTrue(False, str(ns))
except KernelModuleNotLoaded as km:
if km.module == "at24":
# We can fail if we don't load it, not all systems have it
pass
else:
self.assertTrue(False, str(km))
# Get information of EEPROM chips
eeprom_info = self.host_get_info_of_eeprom_chips()
if self.cv_SYSTEM.has_host_accessible_eeprom():
self.assertNotEqual(eeprom_info, None)
else:
self.assertEqual(eeprom_info, None)
def host_get_list_of_i2c_buses(self):
'''
This function will return the list of installed i2c buses on host in
two formats:
list-by number, e.g. ::
["0","1","2",....]
list-by-name, eg ::
["i2c-0","i2c-1","i2c-2"....]
:returns: (l_list, l_list1) : list of i2c buses by number,
list of i2c buses by name
:raises: :class:`common.Exceptions.CommandFailed`
'''
self.c.run_command("i2cdetect -l")
l_res = self.c.run_command("i2cdetect -l | awk '{print $1}'")
# list by number Ex: ["0","1","2",....]
l_list = []
# list by name Ex: ["i2c-0","i2c-1"...]
l_list1 = []
for l_bus in l_res:
matchObj = re.search("(i2c)-(\d{1,})", l_bus)
if matchObj:
l_list.append(matchObj.group(2))
l_list1.append(l_bus)
else:
pass
return l_list, l_list1
def host_get_list_of_eeprom_chips(self):
'''
It will return list with elements having pairs of eeprom chip
addresses and corresponding i2c bus where the chip is attached.
This information is getting through sysfs interface. format is ::
["0 0x50","0 0x51","1 0x51","1 0x52"....]
:returns: list -- list having pairs of i2c bus number and eeprom chip
address.
:raises: :class:`common.Exceptions.CommandFailed`
'''
l_res = self.c.run_command("find /sys/ -name eeprom", timeout=3600)
l_chips = []
for l_line in l_res:
if l_line.__contains__("eeprom"):
matchObj = re.search("/(\d{1,}-\d{4})/eeprom", l_line)
if matchObj:
l_line = matchObj.group(1)
i_args = (l_line.replace("-", " "))
log.debug(i_args)
else:
continue
i_args = re.sub(" 00", " 0x", i_args)
l_chips.append(i_args)
log.debug(i_args)
return l_chips
def host_get_info_of_eeprom_chips(self):
'''
This function will get information of EEPROM chips attached to the i2c
buses
:returns: str EEPROM chips information
:raises: :class:`common.Exceptions.CommandFailed`
'''
log.debug("Getting the information of EEPROM chips")
l_res = None
try:
l_res = self.c.run_command("cat /sys/bus/i2c/drivers/at24/*/name")
except CommandFailed as cf:
l_res = self.c.run_command("dmesg -C")
try:
self.c.run_command("rmmod at24")
self.cv_HOST.host_load_module("at24")
l_res = self.c.run_command(
"cat /sys/bus/i2c/drivers/at24/*/name")
except CommandFailed as cf:
pass
except KernelModuleNotLoaded as km:
pass
return l_res
def host_hexdump(self, i_dev):
'''
The hexdump utility is used to display the specified files.
This function will display in both ASCII+hexadecimal format.
:param: i_dev: this is the file used as a input to hexdump for display
info
:type i_dev: str
:raises: :class:`common.Exceptions.CommandFailed`
'''
l_res = self.c.run_command("hexdump -C %s" % i_dev)
def query_i2c_bus(self, i_bus):
'''
This function query's the i2c bus for devices attached to it.
i2cdetect is a utility to scan an I2C bus for devices
:param: i_bus: i2c bus numer
:type i_bus: str
:raises: :class:`common.Exceptions.CommandFailed`
'''
rc = 0
log.debug("Querying the i2c bus %s for devices attached to it" % i_bus)
try:
l_res = self.c.run_command("i2cdetect -y %i" % int(i_bus))
except CommandFailed as cf:
rc = cf.exitcode
if rc != 0:
try:
l_res = self.c.run_command(
"i2cdetect -F %i|egrep '(Send|Receive) Bytes'|grep yes"
% int(i_bus))
except CommandFailed as cf:
log.debug("i2c bus %i doesn't support query" % int(i_bus))
raise I2CDetectUnsupported
try:
l_res = self.c.run_command("i2cdetect -y -r %i" % int(i_bus))
except CommandFailed as cf:
self.assertEqual(cf.exitcode, 0,
"Querying the i2cbus for devices failed"
":{}\n{}".format(i_bus, str(cf)))
def i2c_dump(self, i_args):
'''
This i2cdump function takes arguments in pair of a string like
"i2cbus address". i2cbus indicates the number or name of the I2C bus to
be scanned. This number should correspond to one of the busses
listed by i2cdetect -l. address indicates the address to be scanned on
that bus, and is an integer between 0x03 and 0x77 i2cdump is a program
to examine registers visible through the I2C bus.
The command may fail due to problems with the device, hardware, or
firmware.
:param i_args: this is the argument to i2cdump utility. Arguments are
in the form of "i2c-bus-number eeprom-chip-address"
e.g. ``0 0x51``, ``3 0x52``
:type i_args: str
'''
try:
l_res = self.c.run_command("i2cdump -f -y %s" % i_args)
except CommandFailed as cf:
self.assertEqual(cf.exitcode, 0,
"i2cdump failed for the device: %s\n%s"
% (i_args, str(cf)))
def i2c_get(self, i_args, i_addr):
'''
This function i2cget read from I2C/SMBus chip registers.
command usage: ::
i2cget [-f] [-y] i2cbus chip-address [data-address [mode]]
:param i_args: this is the argument to i2cget utility. Arguments are
in the form of "i2c-bus-number eeprom-chip-address"
e.g. ``0 0x51``, ``3 0x52``
:type i_args: str
:param i_addr: this is the data-address on chip, from where data will
be read.
e.g. "0x00","0x10","0x20"...
:returns: data present on data-address
'''
try:
l_res = self.c.run_command("i2cget -f -y %s %s" % (i_args, i_addr))
except CommandFailed as cf:
self.assertEqual(cf.exitcode, 0,
"i2cget: Getting data from address %s failed: %s"
% (i_addr, str(cf)))
def i2c_set(self, i_args, i_addr, i_val):
'''
This function i2cset will be used for setting I2C registers.
command usage: ::
i2cset [-f] [-y] [-m mask] [-r] i2cbus chip-address data-address [value] ... [mode]
:param i_args: this is the argument to i2cset utility. Arguments are
in the form of "i2c-bus-number eeprom-chip-address"
e.g. ``0 0x51``, ``3 0x52`` ....etc
:type i_args: str
:param i_addr: this is the data-address on chip, where data will be set
e.g. 0x00","0x10","0x20"...
:type i_addr: str
:param i_val: this is the value which will be set into
data-address i_addr
:type i_val: str
'''
try:
l_res = self.c.run_command(
"i2cset -f -y %s %s %s" % (i_args, i_addr, i_val))
except CommandFailed as cf:
self.assertEqual(cf.exitcode, 0,
"i2cset: Setting the data to a address %s failed:"
" %s" % (i_addr, str(cf)))
class FullI2C(I2C, unittest.TestCase):
'''
This test has following test steps:
1. Getting host information(OS and kernel info)
2. Checking the required utilites are present on host or not
3. Loading the necessary modules to test I2C device driver functionalites
(i2c_dev, i2c_opal and at24)
4. Getting the list of i2c buses
5. Querying the i2c bus for devices
6. Getting the list of i2c buses and eeprom chip addresses
7. Accessing the registers visible through the i2cbus using i2cdump utility
8. Listing the i2c adapter conetents and i2c bus entries to make sure
sysfs entries created for each bus.
9. Testing i2cget functionality for limited samples
Avoiding i2cset functionality, it may damage the system.
'''
BASIC_TEST = False
def setUp(self):
self.test = "host"
super(FullI2C, self).setUp()
def runTest(self):
self.set_up()
if self.test == "host":
self.i2c_init()
# Get list of i2c buses available on host,
# l_list=["0","1"....]
# l_list1=["i2c-0","i2c-1","i2c-2"....]
l_list, l_list1 = self.host_get_list_of_i2c_buses()
if self.BASIC_TEST:
# For the basic test, just go for the first of everything.
l_list = l_list[:1]
l_list1 = l_list1[:1]
# Scanning i2c bus for devices attached to it.
for l_bus in l_list:
try:
self.query_i2c_bus(l_bus)
except I2CDetectUnsupported:
log.debug("Unsupported i2cdetect on bus %s" % l_bus)
# Get list of pairs of i2c bus and EEPROM device addresses in the host
l_chips = self.host_get_list_of_eeprom_chips()
if self.cv_SYSTEM.has_host_accessible_eeprom():
self.assertNotEqual(len(l_chips), 0,
"No EEPROMs detected, while OpTestSystem says "
"there should be")
for l_args in l_chips:
# Accessing the registers visible through the i2cbus using
# i2cdump utility
# l_args format: "0 0x51","1 0x53",.....etc
self.i2c_dump(l_args)
else:
self.assertEqual(len(l_chips), 0,
"Detected EEPROM where OpTestSystem said there "
"should be none")
if self.cv_SYSTEM.has_host_accessible_eeprom():
self.assertGreater(len(l_chips), 0,
"Expected to find EEPROM chips")
# Currently testing only getting the data from a data address,
# avoiding setting data.
# Only four samples are gathered to check whether reading eeprom
# data is working or not.
# Setting eeprom data is dangerous and make your system UNBOOTABLE
l_addrs = ["0x00", "0x10", "0x20", "0x30", "0x40", "0x50", "0x60",
"0x70", "0x80", "0x90", "0xa0", "0xb0", "0xc0", "0xd0",
"0xe0", "0xf0"]
for l_addr in l_addrs:
l_val = self.i2c_get(l_chips[1], l_addr)
# self.i2c_set(l_list2[1], l_addr, "0x50")
if self.test == "skiroot":
return
# list i2c adapter contents
try:
l_res = self.c.run_command(
"ls --color=never -l /sys/class/i2c-adapter")
except CommandFailed as cf:
self.assertEqual(cf.exitcode, 0, str(cf))
# Checking the sysfs entry of each i2c bus
for l_bus in l_list1:
try:
l_res = self.c.run_command(
"ls --color=never -l /sys/class/i2c-adapter/%s" % l_bus)
except CommandFailed as cf:
self.assertEqual(cf.exitcode, 0, str(cf))
return BMC_CONST.FW_SUCCESS
class BasicI2C(FullI2C, unittest.TestCase):
BASIC_TEST = True
def setUp(self):
self.test = "host"
super(BasicI2C, self).setUp()
class BasicSkirootI2C(FullI2C, unittest.TestCase):
BASIC_TEST = True
def setUp(self):
self.test = "skiroot"
super(FullI2C, self).setUp()
| 35.899297
| 94
| 0.571401
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.