hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
95937b6c655000e3afd6bcd53cd133cbcf86b347 | 447 | py | Python | experiment/get_xml.py | juliapochynok/LearnKorean_project | 60c53c1422379bbfc0fbba4c32f74105430326e8 | [
"Apache-2.0"
] | 1 | 2020-07-19T11:35:14.000Z | 2020-07-19T11:35:14.000Z | experiment/get_xml.py | juliapochynok/LearnKorean_project | 60c53c1422379bbfc0fbba4c32f74105430326e8 | [
"Apache-2.0"
] | 1 | 2019-03-04T14:18:03.000Z | 2019-03-04T14:18:03.000Z | experiment/get_xml.py | juliapochynok/LearnKorean_project | 60c53c1422379bbfc0fbba4c32f74105430326e8 | [
"Apache-2.0"
] | null | null | null | import requests
import xml.etree.ElementTree
def make_wordimfo_file(needed_word, file):
'''
'''
parameters = { 'key': "84AD3BB0C4BF3809A9CF3CCA68FAF946", 'q': needed_word, 'part': 'word', 'translated': 'y',\
'trans_lang': '1'}
url = "https://krdict.korean.go.kr/api/search"
url1 = requests.get(url, params = parameters)
content = url1.text
fl = open(file,'w')
fl.write(content)
fl.close() | 27.9375 | 116 | 0.61745 |
57b5ef063b41516ff438cb500ffa0794aec1b17b | 192 | py | Python | src/pypine/do/__init__.py | jkpubsrc/PyPine | e0144fabd8d7cd061f14ac4478ba2d3bc8fafe03 | [
"Apache-1.1"
] | null | null | null | src/pypine/do/__init__.py | jkpubsrc/PyPine | e0144fabd8d7cd061f14ac4478ba2d3bc8fafe03 | [
"Apache-1.1"
] | null | null | null | src/pypine/do/__init__.py | jkpubsrc/PyPine | e0144fabd8d7cd061f14ac4478ba2d3bc8fafe03 | [
"Apache-1.1"
] | null | null | null |
__version__ = "0.2021.3.19"
from .AbstractDataObjectBase import AbstractDataObjectBase
from .DiskFile import DiskFile
from .InMemoryFile import InMemoryFile
from .URLFile import URLFile | 17.454545 | 58 | 0.817708 |
5387117729af21b503fde095a5c82b13c3330208 | 2,970 | py | Python | src/01_basic_model_creation.py | abhishekvarma12345/Transfer_Learning | 2e583a544d41944e8efc88018c9a20c91ae24db8 | [
"MIT"
] | null | null | null | src/01_basic_model_creation.py | abhishekvarma12345/Transfer_Learning | 2e583a544d41944e8efc88018c9a20c91ae24db8 | [
"MIT"
] | null | null | null | src/01_basic_model_creation.py | abhishekvarma12345/Transfer_Learning | 2e583a544d41944e8efc88018c9a20c91ae24db8 | [
"MIT"
] | null | null | null | import argparse
import os
import numpy as np
from tqdm import tqdm
import logging
from src.utils.common import read_yaml, create_directories
import tensorflow as tf
import io
import time
STAGE = "basic model" ## <<< change stage name
logging.basicConfig(
filename=os.path.join("logs", 'basic_running_logs.log'),
level=logging.INFO,
format="[%(asctime)s: %(levelname)s: %(module)s]: %(message)s",
filemode="a"
)
def main(config_path):
## read config files
config = read_yaml(config_path)
## get data
(X_train_full, y_train_full), (X_test, y_test) = tf.keras.datasets.mnist.load_data()
X_train_full = X_train_full / 255.0
X_test = X_test / 255.0
X_valid, X_train = X_train_full[:5000], X_train_full[5000:]
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
## set the seeds
seed = 2021 ## get it from config
tf.random.set_seed(seed)
np.random.seed(seed)
## define layers
LAYERS = [
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(300),
tf.keras.layers.LeakyReLU(), ## alternate way
tf.keras.layers.Dense(100),
tf.keras.layers.LeakyReLU(),
tf.keras.layers.Dense(10, activation="softmax")
]
## define model
model = tf.keras.models.Sequential(LAYERS)
LOSS = "sparse_categorical_crossentropy"
OPTIMIZER = tf.keras.optimizers.SGD(learning_rate=1e-3)
METRICS = ["accuracy"]
## compiling model
model.compile(loss=LOSS, optimizer=OPTIMIZER, metrics=METRICS)
# log our model summary info in logs
def _log_model_summary(model):
with io.StringIO() as stream:
model.summary(print_fn= lambda x: stream.write(f"{x}\n"))
summary_string = stream.getvalue()
return summary_string
## model summary
logging.info(f"{STAGE} summary: \n{_log_model_summary(model)}")
# training model
start = time.time()
model.fit(X_train, y_train,
epochs=10,
validation_data=(X_valid, y_valid),
verbose=2)
end = time.time()
logging.info(f"time taken to train {STAGE} is {end - start}")
# save the base model
model_dir_path = os.path.join("artifacts", "models")
create_directories([model_dir_path])
model_file_path = os.path.join(model_dir_path, "basic_model.h5")
model.save(model_file_path)
logging.info(f"{STAGE} saved at {model_file_path}")
logging.info(f"{STAGE} evaluation metrics {model.evaluate(X_test, y_test)}")
if __name__ == '__main__':
args = argparse.ArgumentParser()
args.add_argument("--config", "-c", default="configs/config.yaml")
parsed_args = args.parse_args()
try:
logging.info("\n********************")
logging.info(f">>>>> stage {STAGE} started <<<<<")
main(config_path=parsed_args.config)
logging.info(f">>>>> stage {STAGE} completed!<<<<<\n")
except Exception as e:
logging.exception(e)
raise e | 29.7 | 88 | 0.653535 |
e5d690564bb509f5c5fd2324f295e31a4b5c3929 | 16,648 | py | Python | ale/util.py | tthatcher95/ale | 6bebf2172c7ffde91e09b0f138c6aebb5b5bca24 | [
"Unlicense"
] | null | null | null | ale/util.py | tthatcher95/ale | 6bebf2172c7ffde91e09b0f138c6aebb5b5bca24 | [
"Unlicense"
] | null | null | null | ale/util.py | tthatcher95/ale | 6bebf2172c7ffde91e09b0f138c6aebb5b5bca24 | [
"Unlicense"
] | null | null | null | import os
from os import path
from glob import glob
from itertools import filterfalse, groupby
import warnings
import pvl
import collections
from collections import OrderedDict
from itertools import chain
import subprocess
import re
import networkx as nx
from networkx.algorithms.shortest_paths.generic import shortest_path
import spiceypy as spice
from ale import spice_root
def get_metakernels(spice_dir=spice_root, missions=set(), years=set(), versions=set()):
"""
Given a root directory, get any subdirectory containing metakernels,
assume spice directory structure.
Mostly doing filtering here, might be worth using Pandas?
Parameters
----------
spice_dir : str
Path containing Spice directories downlaoded from NAIF's website
missions : set, str
Mission or set of missions to search for
years : set, str, int
year or set of years to search for
versions : set, str
version or set of versions to search for
"""
if not missions or missions == "all":
missions = set()
if not years or years == "all":
years = set()
if not versions or versions == "all":
versions = set()
if isinstance(missions, str):
missions = {missions}
if isinstance(years, str) or isinstance(years, int):
years = {str(years)}
else:
years = {str(year) for year in years}
avail = {
'count': 0,
'data': []
}
missions = [m.lower() for m in missions]
mission_dirs = list(filter(path.isdir, glob(path.join(spice_dir, '*'))))
for md in mission_dirs:
# Assuming spice root has the same name as the original on NAIF website"
mission = os.path.basename(md).split('-')[0].split('_')[0]
if missions and all([m not in mission.lower() for m in missions]):
continue
metakernel_keys = ['mission', 'year', 'version', 'path']
# recursive glob to make metakernel search more robust to subtle directory structure differences
metakernel_paths = sorted(glob(os.path.join(md, '**','*.tm'), recursive=True))
metakernels = []
for k in metakernel_paths:
components = path.splitext(path.basename(k))[0].split('_') + [k]
if len(components) == 3:
components.insert(1, 'N/A')
metakernels.append(dict(zip(metakernel_keys, components)))
# naive filter, do we really need anything else?
if years:
metakernels = list(filter(lambda x:x['year'] in years or x['year'] == 'N/A', metakernels))
if versions:
if versions == 'latest':
latest = []
# Panda's groupby is overrated
for k, g in groupby(metakernels, lambda x:x['year']):
items = list(g)
latest.append(max(items, key=lambda x:x['version']))
metakernels = latest
else:
metakernels = list(filter(lambda x:x['version'] in versions, metakernels))
avail['data'].extend(metakernels)
avail['count'] = len(avail['data'])
return avail
def find_latest_metakernel(path, year):
metakernel = None
mks = sorted(glob(os.path.join(path,'*.[Tt][Mm]')))
if not mks:
raise Exception(f'No metakernels found in {path}.')
for mk in mks:
if str(year) in os.path.basename(mk):
metakernel = mk
if not metakernel:
raise Exception(f'No metakernels found in {path} for {year}.')
return metakernel
def dict_merge(dct, merge_dct):
for k, v in merge_dct.items():
if (k in dct and isinstance(dct[k], dict)
and isinstance(merge_dct[k], collections.Mapping)):
dict_merge(dct[k], merge_dct[k])
else:
dct[k] = merge_dct[k]
return dct
def get_isis_preferences(isis_preferences=None):
"""
Returns ISIS Preference file as a pvl object
"""
def read_pref(path):
with open(path) as f:
preftext = f.read().replace('EndGroup', 'End_Group')
pvlprefs = pvl.loads(preftext)
return pvlprefs
argprefs = {}
if isis_preferences:
if isinstance(isis_preferences, dict):
argprefs = isis_preferences
else:
argprefs = read_pref(isis_preferences)
try:
homeprefs = read_pref(os.path.join(os.path.expanduser("~"), '.Isis', 'IsisPreferences'))
except FileNotFoundError as e:
homeprefs = {}
try:
isisrootprefs_path = os.path.join(os.environ["ISISROOT"], 'IsisPreferences')
isisroot = os.environ['ISISROOT']
isisrootprefs = read_pref(isisrootprefs_path)
except (FileNotFoundError, KeyError) as e:
isisrootprefs = {}
finalprefs = dict_merge(dict_merge(isisrootprefs, homeprefs), argprefs)
return finalprefs
def dict_to_lower(d):
return {k.lower():v if not isinstance(v, dict) else dict_to_lower(v) for k,v in d.items()}
def expandvars(path, env_dict=os.environ, default=None, case_sensative=True):
user_dict = env_dict if case_sensative else dict_to_lower(env_dict)
def replace_var(m):
group0 = m.group(0) if case_sensative else m.group(0).lower()
group1 = m.group(1) if case_sensative else m.group(1).lower()
return user_dict.get(m.group(2) or group1, group0 if default is None else default)
reVar = r'\$(\w+|\{([^}]*)\})'
return re.sub(reVar, replace_var, path)
def generate_kernels_from_cube(cube, expand=False, format_as='list'):
"""
Parses a cube label to obtain the kernels from the Kernels group.
Parameters
----------
cube : cube
Path to the cube to pull the kernels from.
expand : bool, optional
Whether or not to expand variables within kernel paths based on your IsisPreferences file.
format_as : str, optional {'list', 'dict'}
How to return the kernels: either as a one-demensional ordered list, or as a dictionary
of kernel lists.
Returns
-------
: list
One-dimensional ordered list of all kernels from the Kernels group in the cube.
: Dictionary
Dictionary of lists of kernels with the keys being the Keywords from the Kernels group of
cube itself, and the values being the values associated with that Keyword in the cube.
"""
# enforce key order
mk_paths = OrderedDict.fromkeys(
['TargetPosition', 'InstrumentPosition',
'InstrumentPointing', 'Frame', 'TargetAttitudeShape',
'Instrument', 'InstrumentAddendum', 'LeapSecond',
'SpacecraftClock', 'Extra'])
# just work with full path
cube = os.path.abspath(cube)
cubelabel = pvl.load(cube)
try:
kernel_group = cubelabel['IsisCube']
except KeyError:
raise KeyError(f'{cubelabel}, Could not find kernels group, input cube [{cube}] may not be spiceinited')
return get_kernels_from_isis_pvl(kernel_group, expand, format_as)
def get_kernels_from_isis_pvl(kernel_group, expand=True, format_as="list"):
# enforce key order
mk_paths = OrderedDict.fromkeys(
['TargetPosition', 'InstrumentPosition',
'InstrumentPointing', 'Frame', 'TargetAttitudeShape',
'Instrument', 'InstrumentAddendum', 'LeapSecond',
'SpacecraftClock', 'Extra'])
if isinstance(kernel_group, str):
kernel_group = pvl.loads(kernel_group)
kernel_group = kernel_group["Kernels"]
def load_table_data(key):
mk_paths[key] = kernel_group.get(key, None)
if isinstance(mk_paths[key], str):
mk_paths[key] = [mk_paths[key]]
while 'Table' in mk_paths[key]: mk_paths[key].remove('Table')
load_table_data('TargetPosition')
load_table_data('InstrumentPosition')
load_table_data('InstrumentPointing')
load_table_data('TargetAttitudeShape')
# the rest
mk_paths['Frame'] = [kernel_group.get('Frame', None)]
mk_paths['Instrument'] = [kernel_group.get('Instrument', None)]
mk_paths['InstrumentAddendum'] = [kernel_group.get('InstrumentAddendum', None)]
mk_paths['SpacecraftClock'] = [kernel_group.get('SpacecraftClock', None)]
mk_paths['LeapSecond'] = [kernel_group.get('LeapSecond', None)]
mk_paths['Clock'] = [kernel_group.get('Clock', None)]
mk_paths['Extra'] = [kernel_group.get('Extra', None)]
if (format_as == 'list'):
# get kernels as 1-d string list
kernels = [kernel for kernel in chain.from_iterable(mk_paths.values()) if isinstance(kernel, str)]
if expand:
isisprefs = get_isis_preferences()
kernels = [expandvars(expandvars(k, dict_to_lower(isisprefs['DataDirectory']))) for k in kernels]
return kernels
elif (format_as == 'dict'):
# return created dict
if expand:
isisprefs = get_isis_preferences()
for kern_list in mk_paths:
for index, kern in enumerate(mk_paths[kern_list]):
if kern is not None:
mk_paths[kern_list][index] = expandvars(expandvars(kern, dict_to_lower(isisprefs['DataDirectory'])))
return mk_paths
else:
raise Exception(f'{format_as} is not a valid return format')
def write_metakernel_from_cube(cube, mkpath=None):
# add ISISPREF paths as path_symbols and path_values to avoid custom expand logic
pvlprefs = get_isis_preferences()
kernels = generate_kernels_from_cube(cube)
# make sure kernels are mk strings
kernels = ["'"+k+"'" for k in kernels]
paths = OrderedDict(pvlprefs['DataDirectory'])
path_values = ["'"+os.path.expandvars(path)+"'" for path in paths.values()]
path_symbols = ["'"+symbol.lower()+"'" for symbol in paths.keys()]
body = '\n\n'.join([
'KPL/MK',
f'Metakernel Generated from an ISIS cube: {cube}',
'\\begindata',
'PATH_VALUES = (',
'\n'.join(path_values),
')',
'PATH_SYMBOLS = (',
'\n'.join(path_symbols),
')',
'KERNELS_TO_LOAD = (',
'\n'.join(kernels),
')',
'\\begintext'
])
if mkpath is not None:
with open(mkpath, 'w') as f:
f.write(body)
return body
def get_ck_frames(kernel):
"""
Get all of the reference frames defined in a kernel.
Parameters
----------
kernel : str
The path to the kernel
Returns
-------
ids : list
The set of reference frames IDs defined in the kernel
"""
ckbrief = subprocess.run(["ckbrief", "-t {}".format(kernel)],
capture_output=True,
check=True,
text=True)
ids = set()
for id in re.findall(r'^(-?[0-9]+)', ckbrief.stdout, flags=re.MULTILINE):
ids.add(int(id))
# Sort the output list for testability
return sorted(list(ids))
def create_spk_dependency_tree(kernels):
"""
construct the dependency tree for the body states in a set of kernels.
Parameters
----------
kernels : list
The list of kernels to evaluate the dependencies in. If two
kernels in this list contain the same information for the same
pair of bodies, then the later kernel in the list will be
identified in the kernel property for that edge in dep_tree.
Returns
-------
dep_tree : nx.DiGraph
The dependency tree for the kernels. Nodes are bodies. There is
an edge from one node to another if the state of the body of the
source node is defined relative to the state of the body of the
destination node. The kernel edge property identifies what kernel
the information for the edge is defined in.
"""
dep_tree = nx.DiGraph()
for kernel in kernels:
print(kernel)
brief = subprocess.run(["brief", "-c {}".format(kernel)],
capture_output=True,
check=True,
text=True)
for body, rel_body in re.findall(r'([^()].\d+.*).w.r.t..+(..\d+)', brief.stdout):
dep_tree.add_edge(int(body.strip('( )')), int(rel_body.strip('( )')), kernel=kernel)
return dep_tree
def spkmerge_config_string(dep_tree, output_spk, bodies, lsk, start, stop):
"""
Create the contents of an spkmerge config file that will produce a spk that
completely defines the state of a list of bodies for a time range.
Parameters
----------
dep_tree : nx.DiGraph
Dependency tree from create_kernel_dependency_tree that contains
information about what the state of different bodies are relative
to and where that information is stored.
output_spk : str
The path to the SPK that will be output by spkmerge
bodies : list
The list of body ID codes that need to be defined in the kernel
created by spkmerge
lsk : str
The absolute path to the leap second kernel to use
start : str
The UTC start time for the kernel created by spkmerge
stop : str
The UTC stop time for the kernel created by spkmerge
Returns
-------
: str
The contents of an spkmerge config file that will produce a kernel that
defines the state of the input bodies for the input time range.
"""
input_kernels = set()
all_bodies = set(bodies)
for body in bodies:
# Everything is ultimately defined relative to
# SOLAR SYSTEM BARYCENTER (0) so find the path to it
print('DEP: ', dep_tree.nodes())
print('BOD: ', body)
dep_path = shortest_path(dep_tree, body, 0)
all_bodies.update(dep_path)
for i in range(len(dep_path) - 1):
input_kernels.add(dep_tree[dep_path[i]][dep_path[i+1]]['kernel'])
config = f"LEAPSECONDS_KERNEL = {lsk}\n"
config += f"SPK_KERNEL = {output_spk}\n"
config += f" BODIES = {', '.join([str(b) for b in all_bodies])}\n"
config += f" BEGIN_TIME = {start}\n"
config += f" END_TIME = {stop}\n"
for kernel in input_kernels:
config += f" SOURCE_SPK_KERNEL = {kernel}\n"
config += f" INCLUDE_COMMENTS = no\n"
return config
def write_metakernel_from_kernel_list(kernels):
"""
Parameters
----------
kernels : str
list of kernel paths
Returns
-------
: str
Returns string representation of a Naif Metakernel file
"""
kernels = [os.path.abspath(k) for k in kernels]
common_prefix = os.path.commonprefix(kernels)
kernels = ["'"+"$PREFIX"+k[len(common_prefix):]+"'" for k in kernels]
body = '\n\n'.join([
'KPL/MK',
f'Metakernel Generated from a kernel list by Ale',
'\\begindata',
'PATH_VALUES = (',
"'"+common_prefix+"'",
')',
'PATH_SYMBOLS = (',
"'PREFIX'",
')',
'KERNELS_TO_LOAD = (',
'\n'.join(kernels),
')',
'\\begintext'
])
return body
def duckpool(naifvar, start=0, length=10, default=None):
"""
Duck typing friendly version of spiceypy kernel pool functions.
Parameters
----------
naifvar : str
naif var string to query pool for
start : int
Index of first value
length : int
max number of values returned
default : obj
Default value to return if key is not found in kernel pool
Returns
-------
: obj
Spice value returned from spiceypy if found, default value otherwise
"""
for f in [spice.gdpool, spice.gcpool, spice.gipool]:
try:
val = f(naifvar, start, length)
return val[0] if len(val) == 1 else val
except:
continue
return default
def query_kernel_pool(matchstr="*"):
"""
Collect multiple keywords from the naif kernel pool based on a
template string
Parameters
----------
matchstr : str
matchi_c formatted str
Returns
-------
: dict
python dictionary of naif keywords in {keyword:value} format.
"""
try:
svars = spice.gnpool(matchstr, 0, 100)
except Exception as e:
warnings.warn(f"kernel search for {matchstr} failed with {e}")
svars = []
svals = [duckpool(v) for v in svars]
return dict(zip(svars, svals))
| 32.515625 | 124 | 0.606379 |
9961490c6fbb427bcea3d9d8bc556439f64e2f3c | 5,144 | py | Python | src/transform/transformer.py | aquariumbio/experiment-request | 026e3eb767c47f980a35004e9ded5e4e33553693 | [
"MIT"
] | null | null | null | src/transform/transformer.py | aquariumbio/experiment-request | 026e3eb767c47f980a35004e9ded5e4e33553693 | [
"MIT"
] | null | null | null | src/transform/transformer.py | aquariumbio/experiment-request | 026e3eb767c47f980a35004e9ded5e4e33553693 | [
"MIT"
] | null | null | null | from __future__ import annotations
import abc
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from cp_request.attribute import Attribute
from cp_request.measurement import Control, Measurement, Sample
from cp_request.named_entity import NamedEntity
from cp_request.treatment import Treatment
from cp_request.unit import Unit
from cp_request.value import Value
from cp_request.version import Version
from cp_request.experimental_request import ExperimentalRequest
from cp_request.design.design_block import DesignBlock
from cp_request.design.block_reference import BlockReference
from cp_request.design.generate_block import GenerateBlock
from cp_request.design.product_block import ProductBlock
from cp_request.design.replicate_block import ReplicateBlock
from cp_request.design.sum_block import SumBlock
from cp_request.design.subject_reference import SubjectReference
from cp_request.design.treatment_reference import (
TreatmentReference,
TreatmentValueReference
)
class RequestTransformer(abc.ABC):
"""
Abstract transformer for structured request classes.
Includes stubbed methods for each class, with each returning a deep copy.
To create a transformer, inherit from this class, define an initializer, and
each appropriate visit method.
"""
@abc.abstractmethod
def __init__(self, symbol_table):
self.__symbol_table = symbol_table
def transform_design_block(self, block: DesignBlock):
return DesignBlock(
label=block.label,
definition=block.transform(self)
)
def transform_product_block(self, block: ProductBlock):
block_list = list()
for block in block.block_list:
block_list.append(block.transform(self))
return ProductBlock(block_list=block_list)
def transform_block_reference(self, reference: BlockReference):
if reference.block_label in self.__symbol_table:
block = self.__symbol_table[reference.block_label]
# TODO: check type of object?
else:
block = reference.block.transform(self)
self.__symbol_table[reference.block_label] = block
return BlockReference(block=block)
def transform_sum_block(self, block: SumBlock):
block_list = list()
for block in block.block_list:
block_list.append(block.transform(self))
return SumBlock(block_list=block_list)
def transform_subject_reference(self, reference: SubjectReference):
if reference.entity.name in self.__symbol_table:
entity = self.__symbol_table[reference.entity.name]
# TODO: check type of object?
else:
entity = reference.entity.transform(self)
self.__symbol_table[reference.entity.name] = entity
return SubjectReference(entity=entity)
def transform_treatment_reference(self, reference: TreatmentReference):
if reference.treatment_name in self.__symbol_table:
treatment = self.__symbol_table[reference.treatment_name]
# TODO: check type of object?
else:
treatment = reference.treatment.transform(self)
self.__symbol_table[reference.treatment_name] = treatment
return TreatmentReference(treatment=treatment)
def transform_treatment_value_reference(
self,
reference: TreatmentValueReference):
if reference.treatment_name in self.__symbol_table:
treatment = self.__symbol_table[reference.treatment_name]
# TODO: check type of object?
else:
treatment = reference.treatment.transform(self)
self.__symbol_table[reference.treatment_name] = treatment
return TreatmentValueReference(
treatment=treatment,
value=reference.value
)
def transform_replicate_block(self, block: ReplicateBlock):
return ReplicateBlock(
count=block.count,
block=block.block.transform(self)
)
def transform_generate_block(self, block: GenerateBlock):
value_list = list()
for value in block.values:
value_list.append(value.transform(self))
return GenerateBlock(
treatment=block.treatment.transform(self),
attribute_name=block.attribute_name,
values=value_list
)
def transform_attribute(self, attribute: Attribute):
return
def transform_version(self, version: Version):
return
def transform_treatment(self, treatment: Treatment):
return
def transform_sample(self, sample: Sample):
return
def transform_control(self, control: Control):
return
def transform_measurement(self, measurement: Measurement):
return
def transform_unit(self, unit: Unit):
return
def transform_value(self, value: Value):
return
def transform_named_entity(self, entity: NamedEntity):
return
def transform_experiment(self, experiment: ExperimentalRequest):
return
| 35.232877 | 80 | 0.697512 |
53f462e06c422ce134e510cda70e5c860519e21d | 125 | py | Python | buglab/data/modelsync/__init__.py | microsoft/neurips21-self-supervised-bug-detection-and-repair | 4e51184a63aecd19174ee40fc6433260ab73d56e | [
"MIT"
] | 47 | 2021-10-19T16:15:41.000Z | 2022-03-21T11:51:43.000Z | buglab/data/modelsync/__init__.py | microsoft/neurips21-self-supervised-bug-detection-and-repair | 4e51184a63aecd19174ee40fc6433260ab73d56e | [
"MIT"
] | 2 | 2022-01-10T09:41:44.000Z | 2022-03-09T12:54:55.000Z | buglab/data/modelsync/__init__.py | microsoft/neurips21-self-supervised-bug-detection-and-repair | 4e51184a63aecd19174ee40fc6433260ab73d56e | [
"MIT"
] | 11 | 2021-11-30T13:25:03.000Z | 2022-03-16T11:38:08.000Z | from .client import MockModelSyncClient, ModelSyncClient
from .data import ModelSyncData
from .server import ModelSyncServer
| 31.25 | 56 | 0.864 |
f65e4cc6c07a73bc43977ec69bff34f542a64e5c | 127 | py | Python | control_gpio_pi/92high.py | Rahul14singh/remote_raspberrypigpio_control | 315bba85024a6c2612ac84e21858fe93cbb3cf8f | [
"MIT"
] | 10 | 2017-06-28T17:35:43.000Z | 2022-02-22T22:57:27.000Z | control_gpio_pi/92high.py | Rahul14singh/Remote_raspberrypi_GPIO_Control | 315bba85024a6c2612ac84e21858fe93cbb3cf8f | [
"MIT"
] | null | null | null | control_gpio_pi/92high.py | Rahul14singh/Remote_raspberrypi_GPIO_Control | 315bba85024a6c2612ac84e21858fe93cbb3cf8f | [
"MIT"
] | null | null | null | import RPi.GPIO as ir
print "PIN 24 High"
ir.setwarnings(False)
ir.setmode(ir.BOARD)
ir.setup(24,ir.OUT)
ir.output(24,ir.HIGH)
| 18.142857 | 21 | 0.748031 |
7cd759e8b66f959231a1eaf82bb30bbdaa4fadbd | 395 | py | Python | gh_action/wsgi.py | SmartC2016/github_action | 42f3e1d7f9f15c4253812388a37360ee403a304d | [
"MIT"
] | null | null | null | gh_action/wsgi.py | SmartC2016/github_action | 42f3e1d7f9f15c4253812388a37360ee403a304d | [
"MIT"
] | null | null | null | gh_action/wsgi.py | SmartC2016/github_action | 42f3e1d7f9f15c4253812388a37360ee403a304d | [
"MIT"
] | null | null | null | """
WSGI config for gh_action project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gh_action.settings')
application = get_wsgi_application()
| 23.235294 | 78 | 0.787342 |
a02af4cf8a1c1bcd44269410cf8fee8b3191c2a7 | 11,669 | py | Python | annotation/regexes.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
] | 5 | 2021-01-14T03:34:42.000Z | 2022-03-07T15:34:18.000Z | annotation/regexes.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
] | 551 | 2020-10-19T00:02:38.000Z | 2022-03-30T02:18:22.000Z | annotation/regexes.py | SACGF/variantgrid | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | [
"RSA-MD"
] | null | null | null | import re
from enum import Enum
from operator import attrgetter
from re import RegexFlag
from typing import List, Union, Match, Dict, Optional
from annotation.models.models import Citation
from annotation.models.models_enums import CitationSource
from library.log_utils import report_message
from ontology.models import OntologyService, OntologyTerm
class MatchType(Enum):
NUMERIC = 1
ALPHA_NUMERIC = 2
SIMPLE_NUMBERS = 3
ENTIRE_UNTIL_SPACE = 4
class DbRefRegex:
_all_db_ref_regexes: List['DbRefRegex'] = []
def __init__(self,
db: str,
prefixes: Union[str, List[str]],
link: str,
match_type: MatchType = MatchType.NUMERIC,
min_length: int = 3,
expected_length: Optional[int] = None):
"""
Creates an instance of a external id/link detection and automatically registers it with the complete collection.
The end result allowing us to scan text for any number of kinds of links.
:param db: An identifier uniquely associated with the DB
:param prefixes: A single string or array of strings that will be scanned for in text - IMPORTANT - these will be interpreted in regex
:param link: The URL the link will go to with ${1} being replaced with the value found after the prefix
:param match_type: Determines if the link is a series of numbers, alpha-numeric etc - be specific to avoid false positives
:param min_length: How long the ID part must be after the prefix, helps avoid false positives such as the gene rs1 being mistaken for SNP
"""
if isinstance(prefixes, str):
prefixes = [prefixes]
self.db = db
self.prefixes = prefixes
self.link = link
self.match_type = match_type
self.min_length = min_length or 1
self.expected_length = expected_length
self._all_db_ref_regexes.append(self)
def link_for(self, idx: int) -> str:
id_str = self.fix_id(str(idx))
return self.link.replace("${1}", id_str)
def fix_id(self, id_str: str) -> str:
if self.expected_length:
id_str = id_str.rjust(self.expected_length, '0')
return id_str
def __eq__(self, other):
# db should be unique in DbRefRegex
return self.db == other.db
def __hash__(self):
return hash(self.db)
class DbRegexes:
CLINGEN = DbRefRegex(db="ClinGen", prefixes="CA", link="http://reg.clinicalgenome.org/redmine/projects/registry/genboree_registry/by_caid?caid=CA${1}", match_type=MatchType.SIMPLE_NUMBERS)
CLINVAR = DbRefRegex(db="Clinvar", prefixes="VariationID", link="https://www.ncbi.nlm.nih.gov/clinvar/variation/${1}")
COSMIC = DbRefRegex(db="COSMIC", prefixes="COSM", link="https://cancer.sanger.ac.uk/cosmic/mutation/overview?id=${1}")
DOID = DbRefRegex(db="DOID", prefixes="DOID", link=OntologyService.URLS[OntologyService.DOID], min_length=OntologyService.EXPECTED_LENGTHS[OntologyService.DOID], expected_length=OntologyService.EXPECTED_LENGTHS[OntologyService.DOID])
GTR = DbRefRegex(db="GTR", prefixes="GTR", link="https://www.ncbi.nlm.nih.gov/gtr/tests/${1}/overview/")
HP = DbRefRegex(db="HP", prefixes=["HPO", "HP"], link=OntologyService.URLS[OntologyService.HPO], expected_length=OntologyService.EXPECTED_LENGTHS[OntologyService.HPO])
HGNC = DbRefRegex(db="HGNC", prefixes="HGNC", link=OntologyService.URLS[OntologyService.HGNC], expected_length=OntologyService.EXPECTED_LENGTHS[OntologyService.HGNC])
MEDGEN = DbRefRegex(db="MedGen", prefixes="MedGen", link="https://www.ncbi.nlm.nih.gov/medgen/?term=${1}", match_type=MatchType.ALPHA_NUMERIC)
MONDO = DbRefRegex(db="MONDO", prefixes="MONDO", link=OntologyService.URLS[OntologyService.MONDO], expected_length=OntologyService.EXPECTED_LENGTHS[OntologyService.MONDO])
NCBIBookShelf = DbRefRegex(db="NCBIBookShelf", prefixes=["NCBIBookShelf"], link="https://www.ncbi.nlm.nih.gov/books/${1}", match_type=MatchType.ALPHA_NUMERIC)
NIHMS = DbRefRegex(db="NIHMS", prefixes="NIHMS", link="https://www.ncbi.nlm.nih.gov/pubmed/?term=NIHMS${1}")
# smallest OMIM starts with a 1, so there's no 0 padding there, expect min length
OMIM = DbRefRegex(db="OMIM", prefixes=["OMIM", "MIM"], link=OntologyService.URLS[OntologyService.OMIM], min_length=OntologyService.EXPECTED_LENGTHS[OntologyService.OMIM], expected_length=OntologyService.EXPECTED_LENGTHS[OntologyService.OMIM])
ORPHA = DbRefRegex(db="Orphanet", prefixes=["ORPHANET", "ORPHA"], link=OntologyService.URLS[OntologyService.ORPHANET], expected_length=OntologyService.EXPECTED_LENGTHS[OntologyService.ORPHANET])
PMC = DbRefRegex(db="PMC", prefixes="PMCID", link="https://www.ncbi.nlm.nih.gov/pubmed/?term=PMC${1}")
PUBMED = DbRefRegex(db="PubMed", prefixes=["PubMed", "PMID", "PubMedCentral"], link="https://www.ncbi.nlm.nih.gov/pubmed/?term=${1}")
SNP = DbRefRegex(db="SNP", prefixes="rs", link="https://www.ncbi.nlm.nih.gov/snp/${1}", match_type=MatchType.SIMPLE_NUMBERS)
SNOMEDCT = DbRefRegex(db="SNOMED-CT", prefixes=["SNOMED-CT", "SNOMEDCT"], link="https://snomedbrowser.com/Codes/Details/${1}")
UNIPROTKB = DbRefRegex(db="UniProtKB", prefixes="UniProtKB", link="https://www.uniprot.org/uniprot/${1}", match_type=MatchType.ALPHA_NUMERIC)
HTTP = DbRefRegex(db="HTTP", prefixes="http:", link="http:${1}", match_type=MatchType.ENTIRE_UNTIL_SPACE)
HTTPS = DbRefRegex(db="HTTPS", prefixes="https:", link="https:${1}", match_type=MatchType.ENTIRE_UNTIL_SPACE)
FTP = DbRefRegex(db="FTP", prefixes="ftp:", link="ftp:${1}", match_type=MatchType.ENTIRE_UNTIL_SPACE)
class DbRefRegexResult:
def __init__(self, cregx: DbRefRegex, idx: str, match: Match):
self.cregx = cregx
self.idx = cregx.fix_id(idx)
self.match = match
self.internal_id = None
self.summary = None
# this is where we check our database to see if we know what this reference is about
if self.db in OntologyService.LOCAL_ONTOLOGY_PREFIXES:
term_id = f"{self.db}:{self.idx}"
if term := OntologyTerm.objects.filter(id=term_id).first():
self.summary = term.name
try:
if source := CitationSource.CODES.get(self.db):
citation, _ = Citation.objects.get_or_create(citation_source=source, citation_id=idx)
self.internal_id = citation.pk
except:
report_message(message=f"Could not resolve external DB reference for {self.db}:{self.idx}")
@property
def id_fixed(self):
return f"{self.db}:{self.cregx.fix_id(self.idx)}"
@property
def url(self):
return self.cregx.link.replace('${1}', self.idx)
@property
def idx_num(self):
"""
Attempt to convert the id to a number, only use for sorting.
Some ids have a version suffix, so using float for the sake of decimals
"""
try:
return float(self.idx)
except:
return 0
@property
def db(self):
return self.cregx.db
def to_json(self):
jsonny = {'id': '%s: %s' % (self.db, self.idx), 'db': self.db, 'idx': self.idx, 'url': self.url}
if self.summary:
jsonny['summary'] = self.summary
if self.internal_id:
jsonny['internal_id'] = self.internal_id
return jsonny
def __str__(self):
return f'{self.cregx.db}:{self.idx}'
_simple_numbers = re.compile('([0-9]{3,})')
_num_regex = re.compile('[:#\\s]*([0-9]+)')
_num_repeat_regex = re.compile('\\s*,[:#\\s]*([0-9]+)')
_word_regex = re.compile('[:# ]*([A-Za-z0-9_-]+)') # no repeats for words, too risky
_entire_until_space = re.compile('(.*?)(?:[)]|\\s|$|[.] )')
class DbRefRegexes:
def __init__(self, regexes: List[DbRefRegex]):
self.regexes = regexes
self.prefix_map: Dict[str, DbRefRegex] = dict()
prefixes: List[str] = list()
for regex in self.regexes:
for prefix in regex.prefixes:
prefix = prefix.lower()
self.prefix_map[prefix] = regex
prefixes.append(prefix)
self.prefix_regex = re.compile('(' + '|'.join(prefixes) + ')', RegexFlag.IGNORECASE)
def link_html(self, text: str) -> str:
db_matches = reversed(self.search(text, sort=False))
for db_match in db_matches:
span = db_match.match.span()
if text[span[0]] in (':', ',', ' ', '#'):
span = [span[0]+1, span[1]]
before, middle, after = text[0:span[0]], text[span[0]:span[1]], text[span[1]:]
text = f"{before}<a href='{db_match.url}'>{middle}</a>{after}"
return text
def search(self, text: str, default_regex: DbRefRegex = None, sort: bool = True) -> List[DbRefRegexResult]:
"""
@param text The text to be searched for ID patterns
@param default_regex If the field is expected to be a specific kind of id
(e.g. db_rs_id should default to SNP). Only gets used if no match can be found
and will look for just the number part, e.g. if db_rs_id is "23432" instead of "rs23432"
it will still work).
@param sort If true sorts the results by database and id, otherwise leaves them in order of discovery
"""
results: List[DbRefRegexResult] = list()
def append_result_if_length(db_regex: DbRefRegex, match: Optional[Match]) -> bool:
"""
:param db_regex: The Database Regex we were searching for
:param match: The regex match
:return: True if the ID looked valid and was recorded, False otherwise
"""
nonlocal results
if match and len(match.group(1)) >= db_regex.min_length:
results.append(DbRefRegexResult(cregx=db_regex, idx=match.group(1), match=match))
return True
return False
for match in re.finditer(self.prefix_regex, text):
prefix = match.group(1).lower()
db_regex = self.prefix_map[prefix]
find_from = match.end(0)
if db_regex.match_type == MatchType.SIMPLE_NUMBERS:
match = _simple_numbers.match(text, find_from)
append_result_if_length(db_regex, match)
elif db_regex.match_type == MatchType.ALPHA_NUMERIC:
match = _word_regex.match(text, find_from)
append_result_if_length(db_regex, match)
elif db_regex.match_type == MatchType.ENTIRE_UNTIL_SPACE:
match = _entire_until_space.match(text, find_from)
append_result_if_length(db_regex, match)
else:
match = _num_regex.match(text, find_from)
if append_result_if_length(db_regex, match):
find_from = match.end(0)
while True:
match = _num_repeat_regex.match(text, find_from)
if append_result_if_length(db_regex, match):
find_from = match.end(0)
else:
break
if not results and default_regex:
match = None
if default_regex.match_type == MatchType.SIMPLE_NUMBERS:
match = _word_regex.match(text)
else:
match = _num_regex.match(text)
append_result_if_length(default_regex, match)
if sort:
results.sort(key=attrgetter('db', 'idx_num', 'idx'))
return results
db_ref_regexes = DbRefRegexes(DbRefRegex._all_db_ref_regexes)
| 48.620833 | 246 | 0.645128 |
e17c4c36199e125584120783d3cca398b8f1e4d5 | 1,182 | py | Python | grow/commands/subcommands/inspect_stats.py | matthiasrohmer/grow | 88fae5026040ad0f7dd9260ee290cebbe49b39d7 | [
"MIT"
] | null | null | null | grow/commands/subcommands/inspect_stats.py | matthiasrohmer/grow | 88fae5026040ad0f7dd9260ee290cebbe49b39d7 | [
"MIT"
] | null | null | null | grow/commands/subcommands/inspect_stats.py | matthiasrohmer/grow | 88fae5026040ad0f7dd9260ee290cebbe49b39d7 | [
"MIT"
] | null | null | null | """Subcommand for displaying pod stats."""
import os
import click
from grow.commands import shared
from grow.common import rc_config
from grow.deployments import stats as stats_lib
from grow.pods import pods
from grow import storage
CFG = rc_config.RC_CONFIG.prefixed('grow.inspect.stats')
@click.command(name='stats')
@shared.pod_path_argument
@click.option('--full/--no-full', '-f', is_flag=CFG.get('full', True), default=False,
help='Whether to show full stats. By default, only '
'short stats are displayed. Short stats do not '
'require a build and may be faster to generate.')
@shared.reroute_option(CFG)
def inspect_stats(pod_path, full, use_reroute):
"""Displays statistics about the pod."""
root = os.path.abspath(os.path.join(os.getcwd(), pod_path))
pod = pods.Pod(root, storage=storage.FileStorage, use_reroute=use_reroute)
try:
with pod.profile.timer('grow_inspect_stats'):
pod_stats = stats_lib.Stats(pod, full=full)
click.echo_via_pager('\n\n'.join(pod_stats.to_tables()))
except pods.Error as err:
raise click.ClickException(str(err))
return pod
| 35.818182 | 85 | 0.692893 |
cf9cb82140636e09defc032445dc361a3cdf2aaf | 897 | py | Python | cmdb/ipmi/urls.py | longgeek/muop_v1 | e1dda2261384afb51429cfe1efbabdf17c2bbba0 | [
"Apache-2.0"
] | null | null | null | cmdb/ipmi/urls.py | longgeek/muop_v1 | e1dda2261384afb51429cfe1efbabdf17c2bbba0 | [
"Apache-2.0"
] | null | null | null | cmdb/ipmi/urls.py | longgeek/muop_v1 | e1dda2261384afb51429cfe1efbabdf17c2bbba0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Longgeek <longgeek@gmail.com>
from django.conf.urls import url
from cmdb.ipmi.actions import bmc, fan, power, event, bootdev, temperature
urlpatterns = [
url(r'^fan/([0-9]*)/$', fan.status, name='fan_status'),
url(r'^time/([0-9]*)/$', bmc.time, name='bmc_time'),
url(r'^restart/([0-9]*)/$', bmc.restart, name='bmc_restart'),
url(r'^version/([0-9]*)/$', bmc.version, name='bmc_version'),
url(r'^event/([0-9]*)/$', event.log, name='event_log'),
url(r'^bootdev/([0-9]*)/$', bootdev.from_disk, name='bootdev_fromdisk'),
url(r'^power/on/([0-9]*)/$', power.on, name='power_on'),
url(r'^power/status/([0-9]*)/$', power.status, name='power_status'),
url(r'^power/hardware/([0-9]*)/$', power.hardware, name='power_hardware'),
url(r'^temperature/([0-9]*)/$', temperature.inlet_exhaust, name='temperature'),
]
| 44.85 | 83 | 0.615385 |
79bc123c7b9a8ab995cefe34416776b6a58ee585 | 1,462 | py | Python | components/monoprice_10761/number/__init__.py | jhenkens/monoprice-6ch-esphome | f49bb7b34cb35670fe9513d7e9d18eb598e4d3bc | [
"MIT"
] | 4 | 2021-12-01T05:22:18.000Z | 2022-02-22T07:26:17.000Z | components/monoprice_10761/number/__init__.py | jhenkens/monoprice-6ch-esphome | f49bb7b34cb35670fe9513d7e9d18eb598e4d3bc | [
"MIT"
] | null | null | null | components/monoprice_10761/number/__init__.py | jhenkens/monoprice-6ch-esphome | f49bb7b34cb35670fe9513d7e9d18eb598e4d3bc | [
"MIT"
] | null | null | null | from esphome.components import number
import esphome.config_validation as cv
import esphome.codegen as cg
from esphome.const import CONF_ID
from .. import monoprice_10761_ns, CONF_MONOPRICE_10761_ID, CONF_ZONE, CONF_COMMAND, Monoprice10761
DEPENDENCIES = ["monoprice_10761"]
CODEOWNERS = ["@jhenkens"]
Monoprice10761Number = monoprice_10761_ns.class_("Monoprice10761Number", number.Number, cg.Component)
CONFIG_SCHEMA = number.NUMBER_SCHEMA.extend(
{
cv.GenerateID(): cv.declare_id(Monoprice10761Number),
cv.GenerateID(CONF_MONOPRICE_10761_ID): cv.use_id(Monoprice10761),
cv.Required(CONF_ZONE): cv.int_range(min=11, max=36),
cv.Required(CONF_COMMAND): cv.one_of("VO", "TR", "BS", "BL", upper=True),
}
).extend(cv.COMPONENT_SCHEMA)
async def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
await cg.register_component(var, config)
min = 0
max = 38
offset = 0
command = config[CONF_COMMAND]
if command == "BL":
min = -10
max = 10
offset= -10
elif command == "TR" or command == "BS":
offset= -7
min = -7
max = 7
await number.register_number(var, config, min_value=min, max_value=max, step=1)
paren = await cg.get_variable(config[CONF_MONOPRICE_10761_ID])
cg.add(var.set_parent(paren))
cg.add(var.set_zone(config[CONF_ZONE]))
cg.add(var.set_data_type(config[CONF_COMMAND]))
cg.add(var.set_offset(offset))
| 32.488889 | 101 | 0.695622 |
04413abe9b41273c1d064c5183a530f5397610ff | 21,417 | py | Python | ibeis/main_module.py | brmscheiner/ibeis | 9bb93a6cd74ac47921e734c80917a38609dfe661 | [
"Apache-2.0"
] | null | null | null | ibeis/main_module.py | brmscheiner/ibeis | 9bb93a6cd74ac47921e734c80917a38609dfe661 | [
"Apache-2.0"
] | null | null | null | ibeis/main_module.py | brmscheiner/ibeis | 9bb93a6cd74ac47921e734c80917a38609dfe661 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
This module defines the entry point into the IBEIS system
ibeis.opendb and ibeis.main are the main entry points
"""
from __future__ import absolute_import, division, print_function
#from six.moves import builtins
import sys
import multiprocessing
#try:
import utool as ut
profile = ut.profile
#profile = getattr(builtins, 'profile')
#except AttributeError:
#def profile(func):
# return func
QUIET = '--quiet' in sys.argv
NOT_QUIET = not QUIET
USE_GUI = '--gui' in sys.argv or '--nogui' not in sys.argv
(print, rrr, profile) = ut.inject2(__name__)
def _on_ctrl_c(signal, frame):
proc_name = multiprocessing.current_process().name
print('[ibeis.main_module] Caught ctrl+c in %s' % (proc_name,))
sys.exit(0)
# try:
# _close_parallel()
# except Exception as ex:
# print('Something very bad happened' + repr(ex))
# finally:
# print('[ibeis.main_module] sys.exit(0)')
# sys.exit(0)
#-----------------------
# private init functions
def _init_signals():
import signal
signal.signal(signal.SIGINT, _on_ctrl_c)
def _reset_signals():
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL) # reset ctrl+c behavior
def _parse_args():
from ibeis import params
params.parse_args()
def _init_matplotlib():
from plottool import __MPL_INIT__
__MPL_INIT__.init_matplotlib()
def _init_gui(activate=True):
import guitool
if NOT_QUIET:
print('[main] _init_gui()')
guitool.ensure_qtapp()
#USE_OLD_BACKEND = '--old-backend' in sys.argv
#if USE_OLD_BACKEND:
from ibeis.gui import guiback
back = guiback.MainWindowBackend()
#else:
# from ibeis.gui import newgui
# back = newgui.IBEISGuiWidget()
if activate:
guitool.activate_qwindow(back.mainwin)
return back
def _init_ibeis(dbdir=None, verbose=None, use_cache=True, web=None, **kwargs):
"""
Private function that calls code to create an ibeis controller
"""
import utool as ut
from ibeis import params
from ibeis.control import IBEISControl
if verbose is None:
verbose = ut.VERBOSE
if verbose and NOT_QUIET:
print('[main] _init_ibeis()')
# Use command line dbdir unless user specifies it
if dbdir is None:
ibs = None
print('[main!] WARNING: args.dbdir is None')
else:
kwargs = kwargs.copy()
request_dbversion = kwargs.pop('request_dbversion', None)
force_serial = kwargs.get('force_serial', None)
ibs = IBEISControl.request_IBEISController(
dbdir=dbdir, use_cache=use_cache,
request_dbversion=request_dbversion,
force_serial=force_serial)
if web is None:
web = ut.get_argflag(('--webapp', '--webapi', '--web', '--browser'),
help_='automatically launch the web app / web api')
#web = params.args.webapp
if web:
from ibeis.web import app
port = params.args.webport
app.start_from_ibeis(ibs, port=port, **kwargs)
return ibs
def _init_parallel():
import utool as ut
if ut.VERBOSE:
print('_init_parallel')
from utool import util_parallel
from ibeis import params
# Import any modules which parallel process will use here
# so they are accessable when the program forks
#from utool import util_sysreq
#util_sysreq.ensure_in_pythonpath('hesaff')
#util_sysreq.ensure_in_pythonpath('pyrf')
#util_sysreq.ensure_in_pythonpath('code')
#import pyhesaff # NOQA
#import pyrf # NOQA
from ibeis import core_annots # NOQA
#.algo.preproc import preproc_chip # NOQA
util_parallel.set_num_procs(params.args.num_procs)
#if PREINIT_MULTIPROCESSING_POOLS:
# util_parallel.init_pool(params.args.num_procs)
# def _close_parallel():
# #if ut.VERBOSE:
# # print('_close_parallel')
# try:
# from utool import util_parallel
# util_parallel.close_pool(terminate=True)
# except Exception as ex:
# import utool as ut
# ut.printex(ex, 'error closing parallel')
# raise
def _init_numpy():
import utool as ut
import numpy as np
if ut.VERBOSE:
print('_init_numpy')
error_options = ['ignore', 'warn', 'raise', 'call', 'print', 'log']
on_err = error_options[0]
#np.seterr(divide='ignore', invalid='ignore')
numpy_err = {
'divide': on_err,
'over': on_err,
'under': on_err,
'invalid': on_err,
}
#numpy_print = {
# 'precision': 8,
# 'threshold': 500,
# 'edgeitems': 3,
# 'linewidth': 200, # default 75
# 'suppress': False,
# 'nanstr': 'nan',
# 'formatter': None,
#}
np.seterr(**numpy_err)
#np.set_printoptions(**numpy_print)
#-----------------------
# private loop functions
def _guitool_loop(main_locals, ipy=False):
import guitool
from ibeis import params
print('[main] guitool loop')
back = main_locals.get('back', None)
if back is not None:
loop_freq = params.args.loop_freq
ipy = ipy or params.args.cmd
guitool.qtapp_loop(qwin=back.mainwin, ipy=ipy, frequency=loop_freq, init_signals=False)
if ipy: # If we're in IPython, the qtapp loop won't block, so we need to refresh
back.refresh_state()
else:
if NOT_QUIET:
print('WARNING: back was not expected to be None')
def set_newfile_permissions():
r"""
sets this processes default permission bits when creating new files
CommandLine:
python -m ibeis.main_module --test-set_newfile_permissions
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.main_module import * # NOQA
>>> import os
>>> import utool as ut
>>> # write before umask
>>> ut.delete('tempfile1.txt')
>>> ut.write_to('tempfile1.txt', 'foo')
>>> stat_result1 = os.stat('tempfile1.txt')
>>> # apply umask
>>> set_newfile_permissions()
>>> ut.delete('tempfile2.txt')
>>> ut.write_to('tempfile2.txt', 'foo')
>>> stat_result2 = os.stat('tempfile2.txt')
>>> # verify results
>>> print('old masked all bits = %o' % (stat_result1.st_mode))
>>> print('new masked all bits = %o' % (stat_result2.st_mode))
"""
import os
#import stat
# Set umask so all files written will be group read and writable
# To get the permissions we want subtract what you want from 0o0666 because
# umask subtracts the mask you give it.
#mask = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH
#mask = 0o000 # most permissive umask
mask = 0o000 # most permissive umask
prev_mask = os.umask(mask)
return prev_mask
#print('prev_mask = %o' % (prev_mask,))
#print('new_mask = %o' % (mask,))
def main(gui=True, dbdir=None, defaultdb='cache',
allow_newdir=False, db=None,
delete_ibsdir=False,
**kwargs):
"""
Program entry point
Inits the system environment, an IBEISControl, and a GUI if requested
Args:
gui (bool): (default=True) If gui is False a gui instance will not be created
dbdir (None): full directory of a database to load
db (None): name of database to load relative to the workdir
allow_newdir (bool): (default=False) if False an error is raised if a
a new database is created
defaultdb (str): codename of database to load if db and dbdir is None. a value
of 'cache' will open the last database opened with the GUI.
Returns:
dict: main_locals
"""
_preload()
set_newfile_permissions()
from ibeis.init import main_commands
from ibeis.init import sysres
# Display a visible intro message
msg = '''
_____ ______ _______ _____ _______
| |_____] |______ | |______
__|__ |_____] |______ __|__ ______|
'''
if NOT_QUIET:
print(msg)
# Init the only two main system api handles
ibs = None
back = None
if NOT_QUIET:
print('[main] ibeis.main_module.main()')
DIAGNOSTICS = NOT_QUIET
if DIAGNOSTICS:
import os
import utool as ut
import ibeis
print('[main] MAIN DIAGNOSTICS')
print('[main] * username = %r' % (ut.get_user_name()))
print('[main] * ibeis.__version__ = %r' % (ibeis.__version__,))
print('[main] * computername = %r' % (ut.get_computer_name()))
print('[main] * cwd = %r' % (os.getcwd(),))
print('[main] * sys.argv = %r' % (sys.argv,))
# Parse directory to be loaded from command line args
# and explicit kwargs
dbdir = sysres.get_args_dbdir(defaultdb=defaultdb,
allow_newdir=allow_newdir, db=db,
dbdir=dbdir)
if delete_ibsdir is True:
from ibeis.other import ibsfuncs
assert allow_newdir, 'must be making new directory if you are deleting everything!'
ibsfuncs.delete_ibeis_database(dbdir)
#limit = sys.getrecursionlimit()
#if limit == 1000:
# print('Setting Recursion Limit to 3000')
# sys.setrecursionlimit(3000)
# Execute preload commands
main_commands.preload_commands(dbdir, **kwargs) # PRELOAD CMDS
try:
# Build IBEIS Control object
ibs = _init_ibeis(dbdir)
if gui and USE_GUI:
back = _init_gui(activate=kwargs.get('activate', True))
back.connect_ibeis_control(ibs)
except Exception as ex:
print('[main()] IBEIS LOAD encountered exception: %s %s' % (type(ex), ex))
raise
main_commands.postload_commands(ibs, back) # POSTLOAD CMDS
main_locals = {'ibs': ibs, 'back': back}
return main_locals
def opendb_in_background(*args, **kwargs):
"""
Starts a web server in the background
"""
import utool as ut
import time
sec = kwargs.pop('wait', 0)
if sec != 0:
raise AssertionError('wait is depricated')
print('waiting %s seconds for startup' % (sec,))
proc = ut.spawn_background_process(opendb, *args, **kwargs)
if sec != 0:
raise AssertionError('wait is depricated')
time.sleep(sec) # wait for process to initialize
return proc
def opendb_bg_web(*args, **kwargs):
"""
Wrapper around opendb_in_background, returns a nice web_ibs
object to execute web calls using normal python-like syntax
Args:
*args: passed to opendb_in_background
**kwargs:
port (int):
domain (str): if specified assumes server is already running
somewhere otherwise kwargs is passed to opendb_in_background
start_job_queue (bool)
Returns:
web_ibs - this is a KillableProcess object with special functions
CommandLine:
python -m ibeis.main_module opendb_bg_web
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.main_module import * # NOQA
>>> args = tuple()
>>> kwargs = {}
>>> print('Opening a web_ibs')
>>> web_ibs = opendb_bg_web()
>>> print('SUCESS Opened a web_ibs!')
>>> print(web_ibs)
>>> print('Now kill the web_ibs')
>>> web_ibs.terminate2()
"""
import utool as ut
from ibeis.web import appfuncs
domain = kwargs.pop('domain', ut.get_argval('--domain', type_=str, default=None))
port = kwargs.pop('port', appfuncs.DEFAULT_WEB_API_PORT)
if 'wait' in kwargs:
print('NOTE: No need to specify wait param anymore. '
'This is automatically taken care of.')
if domain is None:
# Requesting a local test server
_kw = dict(web=True, browser=False)
_kw.update(kwargs)
web_ibs = opendb_in_background(*args, **_kw)
else:
# Using a remote controller, no need to spin up anything
web_ibs = ut.DynStruct()
web_ibs.terminate2 = lambda: None
# Augment web instance with usefull test functions
if domain is None:
domain = 'http://127.0.1.1'
if not domain.startswith('http://'):
domain = 'http://' + domain
baseurl = domain + ':' + str(port)
web_ibs.domain = domain
web_ibs.port = port
web_ibs.baseurl = baseurl
def get(suffix, **kwargs):
import requests
return requests.get(baseurl + suffix)
def post(suffix, **kwargs):
import requests
return requests.post(baseurl + suffix)
def send_ibeis_request(suffix, type_='post', **kwargs):
"""
Posts a request to a url suffix
"""
import requests
import utool as ut
if not suffix.endswith('/'):
raise Exception('YOU PROBABLY WANT A / AT THE END OF YOUR URL')
payload = ut.map_dict_vals(ut.to_json, kwargs)
if type_ == 'post':
resp = requests.post(baseurl + suffix, data=payload)
json_content = resp._content
elif type_ == 'get':
resp = requests.get(baseurl + suffix, data=payload)
json_content = resp.content
try:
content = ut.from_json(json_content)
except ValueError:
raise Exception('Expected JSON string but got json_content=%r' % (json_content,))
else:
# print('content = %r' % (content,))
if content['status']['code'] != 200:
print(content['status']['message'])
raise Exception(content['status']['message'])
request_response = content['response']
return request_response
def wait_for_results(jobid, timeout=None, delays=[1, 3, 10]):
"""
Waits for results from an engine
"""
for _ in ut.delayed_retry_gen(delays):
print('Waiting for jobid = %s' % (jobid,))
status_response = web_ibs.send_ibeis_request('/api/engine/job/status/', jobid=jobid)
if status_response['jobstatus'] == 'completed':
break
return status_response
def read_engine_results(jobid):
result_response = web_ibs.send_ibeis_request('/api/engine/job/result/', jobid=jobid)
return result_response
def send_request_and_wait(suffix, type_='post', timeout=None, **kwargs):
jobid = web_ibs.send_ibeis_request(suffix, type_=type_, **kwargs)
status_response = web_ibs.wait_for_results(jobid, timeout) # NOQA
result_response = web_ibs.read_engine_results(jobid)
#>>> cmdict = ut.from_json(result_response['json_result'])[0]
return result_response
web_ibs.send_ibeis_request = send_ibeis_request
web_ibs.wait_for_results = wait_for_results
web_ibs.read_engine_results = read_engine_results
web_ibs.send_request_and_wait = send_request_and_wait
web_ibs.get = get
web_ibs.post = post
def wait_until_started():
""" waits until the web server responds to a request """
import requests
for count in ut.delayed_retry_gen([1], timeout=15):
if True or ut.VERBOSE:
print('Waiting for server to be up. count=%r' % (count,))
try:
web_ibs.send_ibeis_request('/api/test/heartbeat/', type_='get')
break
except requests.ConnectionError:
pass
wait_until_started()
return web_ibs
def opendb_fg_web(*args, **kwargs):
"""
Example:
>>> from ibeis.main_module import * # NOQA
>>> kwargs = {'db': 'testdb1'}
>>> args = tuple()
>>> import ibeis
>>> ibs = ibeis.opendb_fg_web()
"""
# Gives you context inside the web app for testing
kwargs['start_web_loop'] = False
kwargs['web'] = True
kwargs['browser'] = False
ibs = opendb(*args, **kwargs)
from ibeis.control import controller_inject
app = controller_inject.get_flask_app()
ibs.app = app
return ibs
def opendb(db=None, dbdir=None, defaultdb='cache', allow_newdir=False,
delete_ibsdir=False, verbose=False, use_cache=True,
web=None, **kwargs):
"""
main without the preload (except for option to delete database before
opening)
Args:
db (str): database name in your workdir used only if dbdir is None
dbdir (None): full database path
defaultdb (str): dbdir search stratagy when db is None and dbdir is
None
allow_newdir (bool): (default=True) if True errors when opening a
nonexisting database
delete_ibsdir (bool): BE CAREFUL! (default=False) if True deletes the
entire
verbose (bool): verbosity flag
web (bool): starts webserver if True (default=param specification)
use_cache (bool): if True will try to return a previously loaded
controller
Returns:
ibeis.IBEISController: ibs
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.main_module import * # NOQA
>>> db = None
>>> dbdir = None
>>> defaultdb = 'cache'
>>> allow_newdir = False
>>> delete_ibsdir = False
>>> verbose = False
>>> use_cache = True
>>> ibs = opendb(db, dbdir, defaultdb, allow_newdir, delete_ibsdir,
>>> verbose, use_cache)
>>> result = str(ibs)
>>> print(result)
"""
from ibeis.init import sysres
from ibeis.other import ibsfuncs
dbdir = sysres.get_args_dbdir(defaultdb=defaultdb,
allow_newdir=allow_newdir, db=db,
dbdir=dbdir)
if delete_ibsdir is True:
assert allow_newdir, (
'must be making new directory if you are deleting everything!')
ibsfuncs.delete_ibeis_database(dbdir)
ibs = _init_ibeis(dbdir, verbose=verbose, use_cache=use_cache, web=web,
**kwargs)
return ibs
def start(*args, **kwargs):
""" alias for main() """ # + main.__doc__
return main(*args, **kwargs)
def opendb_test(gui=True, dbdir=None, defaultdb='cache', allow_newdir=False,
db=None):
""" alias for main() """ # + main.__doc__
from ibeis.init import sysres
_preload()
dbdir = sysres.get_args_dbdir(defaultdb=defaultdb,
allow_newdir=allow_newdir, db=db,
dbdir=dbdir)
ibs = _init_ibeis(dbdir)
return ibs
def _preload(mpl=True, par=True, logging=True):
""" Sets up python environment """
import utool as ut
#from ibeis.init import main_helpers
# from ibeis import params
# from ibeis.init import sysres
if multiprocessing.current_process().name != 'MainProcess':
return
if ut.VERBOSE:
print('[ibeis] _preload')
_parse_args()
# mpl backends
# if logging and not params.args.nologging:
# if params.args.logdir is not None:
# sysres.set_logdir(params.args.logdir)
# else:
# # Log in the configured ibeis log dir (which is maintained by utool)
# # fix this to be easier to figure out where the logs actually are
# ut.start_logging(appname='ibeis')
if mpl:
_init_matplotlib()
# numpy print settings
_init_numpy()
# parallel servent processes
if par:
_init_parallel()
# ctrl+c
_init_signals()
# inject colored exceptions
ut.util_inject.inject_colored_exceptions()
# register type aliases for debugging
#main_helpers.register_utool_aliases()
#return params.args
def main_loop(main_locals, rungui=True, ipy=False, persist=True):
"""
Runs the qt loop if the GUI was initialized and returns an executable string
for embedding an IPython terminal if requested.
If rungui is False the gui will not loop even if back has been created
the main locals dict must be callsed main_locals in the scope you call this
function in.
Args:
main_locals (dict_):
rungui (bool):
ipy (bool):
persist (bool):
Returns:
str: execstr
"""
print('[main] ibeis.main_module.main_loop()')
from ibeis import params
import utool as ut
#print('current process = %r' % (multiprocessing.current_process().name,))
#== 'MainProcess':
if rungui and not params.args.nogui:
try:
_guitool_loop(main_locals, ipy=ipy)
except Exception as ex:
ut.printex(ex, 'error in main_loop')
raise
#if not persist or params.args.cmd:
# main_close()
# Put locals in the exec namespace
ipycmd_execstr = ut.ipython_execstr()
locals_execstr = ut.execstr_dict(main_locals, 'main_locals')
execstr = locals_execstr + '\n' + ipycmd_execstr
return execstr
def main_close(main_locals=None):
#import utool as ut
#if ut.VERBOSE:
# print('main_close')
# _close_parallel()
_reset_signals()
#if __name__ == '__main__':
# multiprocessing.freeze_support()
if __name__ == '__main__':
"""
CommandLine:
python -m ibeis.main_module
python -m ibeis.main_module --allexamples
python -m ibeis.main_module --allexamples --noface --nosrc
"""
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
| 32.797856 | 96 | 0.619228 |
964ee80a4a1f064a7c78aafcd653905351a294ba | 1,357 | py | Python | timemory/gperf/heap_profiler.py | jrmadsen/TiMEmory | 8df2055e68da56e2fe57f716ca9b6d27f7eb4407 | [
"MIT"
] | 5 | 2018-01-19T06:18:00.000Z | 2019-07-19T16:08:46.000Z | timemory/gperf/heap_profiler.py | jrmadsen/TiMEmory | 8df2055e68da56e2fe57f716ca9b6d27f7eb4407 | [
"MIT"
] | 1 | 2018-02-09T21:33:08.000Z | 2018-02-11T23:39:47.000Z | timemory/gperf/heap_profiler.py | jrmadsen/TiMEmory | 8df2055e68da56e2fe57f716ca9b6d27f7eb4407 | [
"MIT"
] | 2 | 2019-06-30T00:46:54.000Z | 2019-07-09T18:35:45.000Z | #!@PYTHON_EXECUTABLE@
# MIT License
#
# Copyright (c) 2020, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory (subject to receipt of any
# required approvals from the U.S. Dept. of Energy). All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
from . import general
| 45.233333 | 80 | 0.77745 |
bac01a8aa79ff45d34a268ccea7c6dec2f9e4841 | 728 | py | Python | get_scannet_part.py | dsw-jlu-rgzn/gcn_votenet | 5d39861c9861f822f39dc3747ccfc17b5377c9e8 | [
"MIT"
] | null | null | null | get_scannet_part.py | dsw-jlu-rgzn/gcn_votenet | 5d39861c9861f822f39dc3747ccfc17b5377c9e8 | [
"MIT"
] | null | null | null | get_scannet_part.py | dsw-jlu-rgzn/gcn_votenet | 5d39861c9861f822f39dc3747ccfc17b5377c9e8 | [
"MIT"
] | null | null | null | # -*- coding: UTF8 -*-
import os
import shutil
size = 0
scannet_dir = "/home/dongshuwei/3Ddetection/scannet/scans"
ids = os.listdir(scannet_dir)
dir_id = []
file_name = []
dist_dile = "/home/dongshuwei/3Ddetection/scannet_vis"
for id in ids:
the_dir_id = os.path.join(scannet_dir, id)
the_file_name = id+"_vh_clean_2.ply"
the_dir_id = os.path.join(the_dir_id, the_file_name)
if os.path.isfile(the_dir_id):
print("the dir the_dir_id: {} is a correct file".format(the_dir_id))
dir_id.append(the_dir_id)
size = size + os.path.getsize(the_dir_id)
dist_file = os.path.join(dist_dile, the_file_name)
shutil.copyfile(the_dir_id, dist_file)
print(dir_id)
print(size/1024/1024)
| 29.12 | 76 | 0.701923 |
506b24c27619b3f1aebb275b08bcc929e094f3f4 | 977 | py | Python | pgdrive/tests/vis_funtionality/vis_render_msg.py | decisionforce/pgdrive | 19af5d09a40a68a2a5f8b3ac8b40f109e71c26ee | [
"Apache-2.0"
] | 97 | 2020-12-25T06:02:17.000Z | 2022-01-16T06:58:39.000Z | pgdrive/tests/vis_funtionality/vis_render_msg.py | decisionforce/pgdrive | 19af5d09a40a68a2a5f8b3ac8b40f109e71c26ee | [
"Apache-2.0"
] | 192 | 2020-12-25T07:58:17.000Z | 2021-08-28T10:13:59.000Z | pgdrive/tests/vis_funtionality/vis_render_msg.py | decisionforce/pgdrive | 19af5d09a40a68a2a5f8b3ac8b40f109e71c26ee | [
"Apache-2.0"
] | 11 | 2020-12-29T11:23:44.000Z | 2021-12-06T23:25:49.000Z | from pgdrive.component.map.base_map import BaseMap, MapGenerateMethod
from pgdrive.envs.pgdrive_env import PGDriveEnv
from pgdrive.utils import setup_logger
setup_logger(debug=True)
if __name__ == "__main__":
env = PGDriveEnv(
{
"environment_num": 4,
"traffic_density": 0.1,
"start_seed": 3,
"image_source": "mini_map",
"manual_control": True,
"use_render": True,
"offscreen_render": False,
"decision_repeat": 5,
"rgb_clip": True,
"map_config": {
BaseMap.GENERATE_TYPE: MapGenerateMethod.BIG_BLOCK_NUM,
BaseMap.GENERATE_CONFIG: 12,
BaseMap.LANE_WIDTH: 3.5,
BaseMap.LANE_NUM: 3,
}
}
)
env.reset()
for i in range(1, 100000):
o, r, d, info = env.step([0, 1])
env.render(text={"Frame": i, "Speed": env.vehicle.speed})
env.close()
| 29.606061 | 71 | 0.554759 |
c0a85a62a5f5699e9543f85e0432908cc9a7a78a | 13,916 | py | Python | python/ray/tune/sample.py | jody3t/ray | 6a78ba9752dc7f17b0e4b7423898c0facf777d3d | [
"Apache-2.0"
] | null | null | null | python/ray/tune/sample.py | jody3t/ray | 6a78ba9752dc7f17b0e4b7423898c0facf777d3d | [
"Apache-2.0"
] | null | null | null | python/ray/tune/sample.py | jody3t/ray | 6a78ba9752dc7f17b0e4b7423898c0facf777d3d | [
"Apache-2.0"
] | null | null | null | import logging
import random
from copy import copy
from inspect import signature
from numbers import Number
from typing import Any, Callable, Dict, List, Optional, Sequence, Union
import numpy as np
logger = logging.getLogger(__name__)
class Domain:
"""Base class to specify a type and valid range to sample parameters from.
This base class is implemented by parameter spaces, like float ranges
(``Float``), integer ranges (``Integer``), or categorical variables
(``Categorical``). The ``Domain`` object contains information about
valid values (e.g. minimum and maximum values), and exposes methods that
allow specification of specific samplers (e.g. ``uniform()`` or
``loguniform()``).
"""
sampler = None
default_sampler_cls = None
def cast(self, value):
"""Cast value to domain type"""
return value
def set_sampler(self, sampler, allow_override=False):
if self.sampler and not allow_override:
raise ValueError("You can only choose one sampler for parameter "
"domains. Existing sampler for parameter {}: "
"{}. Tried to add {}".format(
self.__class__.__name__, self.sampler,
sampler))
self.sampler = sampler
def get_sampler(self):
sampler = self.sampler
if not sampler:
sampler = self.default_sampler_cls()
return sampler
def sample(self, spec=None, size=1):
sampler = self.get_sampler()
return sampler.sample(self, spec=spec, size=size)
def is_grid(self):
return isinstance(self.sampler, Grid)
def is_function(self):
return False
class Sampler:
def sample(self,
domain: Domain,
spec: Optional[Union[List[Dict], Dict]] = None,
size: int = 1):
raise NotImplementedError
class BaseSampler(Sampler):
def __str__(self):
return "Base"
class Uniform(Sampler):
def __str__(self):
return "Uniform"
class LogUniform(Sampler):
def __init__(self, base: float = 10):
self.base = base
assert self.base > 0, "Base has to be strictly greater than 0"
def __str__(self):
return "LogUniform"
class Normal(Sampler):
def __init__(self, mean: float = 0., sd: float = 0.):
self.mean = mean
self.sd = sd
assert self.sd > 0, "SD has to be strictly greater than 0"
def __str__(self):
return "Normal"
class Grid(Sampler):
"""Dummy sampler used for grid search"""
def sample(self,
domain: Domain,
spec: Optional[Union[List[Dict], Dict]] = None,
size: int = 1):
return RuntimeError("Do not call `sample()` on grid.")
class Float(Domain):
class _Uniform(Uniform):
def sample(self,
domain: "Float",
spec: Optional[Union[List[Dict], Dict]] = None,
size: int = 1):
assert domain.lower > float("-inf"), \
"Uniform needs a lower bound"
assert domain.upper < float("inf"), \
"Uniform needs a upper bound"
items = np.random.uniform(domain.lower, domain.upper, size=size)
return items if len(items) > 1 else domain.cast(items[0])
class _LogUniform(LogUniform):
def sample(self,
domain: "Float",
spec: Optional[Union[List[Dict], Dict]] = None,
size: int = 1):
assert domain.lower > 0, \
"LogUniform needs a lower bound greater than 0"
assert 0 < domain.upper < float("inf"), \
"LogUniform needs a upper bound greater than 0"
logmin = np.log(domain.lower) / np.log(self.base)
logmax = np.log(domain.upper) / np.log(self.base)
items = self.base**(np.random.uniform(logmin, logmax, size=size))
return items if len(items) > 1 else domain.cast(items[0])
class _Normal(Normal):
def sample(self,
domain: "Float",
spec: Optional[Union[List[Dict], Dict]] = None,
size: int = 1):
assert not domain.lower or domain.lower == float("-inf"), \
"Normal sampling does not allow a lower value bound."
assert not domain.upper or domain.upper == float("inf"), \
"Normal sampling does not allow a upper value bound."
items = np.random.normal(self.mean, self.sd, size=size)
return items if len(items) > 1 else domain.cast(items[0])
default_sampler_cls = _Uniform
def __init__(self, lower: Optional[float], upper: Optional[float]):
# Need to explicitly check for None
self.lower = lower if lower is not None else float("-inf")
self.upper = upper if upper is not None else float("inf")
def cast(self, value):
return float(value)
def uniform(self):
if not self.lower > float("-inf"):
raise ValueError(
"Uniform requires a lower bound. Make sure to set the "
"`lower` parameter of `Float()`.")
if not self.upper < float("inf"):
raise ValueError(
"Uniform requires a upper bound. Make sure to set the "
"`upper` parameter of `Float()`.")
new = copy(self)
new.set_sampler(self._Uniform())
return new
def loguniform(self, base: float = 10):
if not self.lower > 0:
raise ValueError(
"LogUniform requires a lower bound greater than 0."
f"Got: {self.lower}. Did you pass a variable that has "
"been log-transformed? If so, pass the non-transformed value "
"instead.")
if not 0 < self.upper < float("inf"):
raise ValueError(
"LogUniform requires a upper bound greater than 0. "
f"Got: {self.lower}. Did you pass a variable that has "
"been log-transformed? If so, pass the non-transformed value "
"instead.")
new = copy(self)
new.set_sampler(self._LogUniform(base))
return new
def normal(self, mean=0., sd=1.):
new = copy(self)
new.set_sampler(self._Normal(mean, sd))
return new
def quantized(self, q: Number):
new = copy(self)
new.set_sampler(Quantized(new.get_sampler(), q), allow_override=True)
return new
class Integer(Domain):
class _Uniform(Uniform):
def sample(self,
domain: "Integer",
spec: Optional[Union[List[Dict], Dict]] = None,
size: int = 1):
items = np.random.randint(domain.lower, domain.upper, size=size)
return items if len(items) > 1 else domain.cast(items[0])
default_sampler_cls = _Uniform
def __init__(self, lower, upper):
self.lower = lower
self.upper = upper
def cast(self, value):
return int(value)
def quantized(self, q: Number):
new = copy(self)
new.set_sampler(Quantized(new.get_sampler(), q), allow_override=True)
return new
def uniform(self):
new = copy(self)
new.set_sampler(self._Uniform())
return new
class Categorical(Domain):
class _Uniform(Uniform):
def sample(self,
domain: "Categorical",
spec: Optional[Union[List[Dict], Dict]] = None,
size: int = 1):
items = random.choices(domain.categories, k=size)
return items if len(items) > 1 else domain.cast(items[0])
default_sampler_cls = _Uniform
def __init__(self, categories: Sequence):
self.categories = list(categories)
def uniform(self):
new = copy(self)
new.set_sampler(self._Uniform())
return new
def grid(self):
new = copy(self)
new.set_sampler(Grid())
return new
def __len__(self):
return len(self.categories)
def __getitem__(self, item):
return self.categories[item]
class Function(Domain):
class _CallSampler(BaseSampler):
def sample(self,
domain: "Function",
spec: Optional[Union[List[Dict], Dict]] = None,
size: int = 1):
pass_spec = len(signature(domain.func).parameters) > 0
if pass_spec:
items = [
domain.func(spec[i] if isinstance(spec, list) else spec)
for i in range(size)
]
else:
items = [domain.func() for i in range(size)]
return items if len(items) > 1 else domain.cast(items[0])
default_sampler_cls = _CallSampler
def __init__(self, func: Callable):
if len(signature(func).parameters) > 1:
raise ValueError(
"The function passed to a `Function` parameter must accept "
"either 0 or 1 parameters.")
self.func = func
def is_function(self):
return True
class Quantized(Sampler):
def __init__(self, sampler: Sampler, q: Number):
self.sampler = sampler
self.q = q
assert self.sampler, "Quantized() expects a sampler instance"
def get_sampler(self):
return self.sampler
def sample(self,
domain: Domain,
spec: Optional[Union[List[Dict], Dict]] = None,
size: int = 1):
values = self.sampler.sample(domain, spec, size)
quantized = np.round(np.divide(values, self.q)) * self.q
if not isinstance(quantized, np.ndarray):
return domain.cast(quantized)
return list(quantized)
# TODO (krfricke): Remove tune.function
def function(func):
logger.warning(
"DeprecationWarning: wrapping {} with tune.function() is no "
"longer needed".format(func))
return func
def sample_from(func: Callable[[Dict], Any]):
"""Specify that tune should sample configuration values from this function.
Arguments:
func: An callable function to draw a sample from.
"""
return Function(func)
def uniform(lower: float, upper: float):
"""Sample a float value uniformly between ``lower`` and ``upper``.
Sampling from ``tune.uniform(1, 10)`` is equivalent to sampling from
``np.random.uniform(1, 10))``
"""
return Float(lower, upper).uniform()
def quniform(lower: float, upper: float, q: float):
"""Sample a quantized float value uniformly between ``lower`` and ``upper``.
Sampling from ``tune.uniform(1, 10)`` is equivalent to sampling from
``np.random.uniform(1, 10))``
The value will be quantized, i.e. rounded to an integer increment of ``q``.
Quantization makes the upper bound inclusive.
"""
return Float(lower, upper).uniform().quantized(q)
def loguniform(lower: float, upper: float, base: float = 10):
"""Sugar for sampling in different orders of magnitude.
Args:
lower (float): Lower boundary of the output interval (e.g. 1e-4)
upper (float): Upper boundary of the output interval (e.g. 1e-2)
base (int): Base of the log. Defaults to 10.
"""
return Float(lower, upper).loguniform(base)
def qloguniform(lower: float, upper: float, q: float, base: float = 10):
"""Sugar for sampling in different orders of magnitude.
The value will be quantized, i.e. rounded to an integer increment of ``q``.
Quantization makes the upper bound inclusive.
Args:
lower (float): Lower boundary of the output interval (e.g. 1e-4)
upper (float): Upper boundary of the output interval (e.g. 1e-2)
q (float): Quantization number. The result will be rounded to an
integer increment of this value.
base (int): Base of the log. Defaults to 10.
"""
return Float(lower, upper).loguniform(base).quantized(q)
def choice(categories: List):
"""Sample a categorical value.
Sampling from ``tune.choice([1, 2])`` is equivalent to sampling from
``random.choice([1, 2])``
"""
return Categorical(categories).uniform()
def randint(lower: int, upper: int):
"""Sample an integer value uniformly between ``lower`` and ``upper``.
``lower`` is inclusive, ``upper`` is exclusive.
Sampling from ``tune.randint(10)`` is equivalent to sampling from
``np.random.randint(10)``
"""
return Integer(lower, upper).uniform()
def qrandint(lower: int, upper: int, q: int = 1):
"""Sample an integer value uniformly between ``lower`` and ``upper``.
``lower`` is inclusive, ``upper`` is also inclusive (!).
The value will be quantized, i.e. rounded to an integer increment of ``q``.
Quantization makes the upper bound inclusive.
Sampling from ``tune.randint(10)`` is equivalent to sampling from
``np.random.randint(10)``
"""
return Integer(lower, upper).uniform().quantized(q)
def randn(mean: float = 0., sd: float = 1.):
"""Sample a float value normally with ``mean`` and ``sd``.
Args:
mean (float): Mean of the normal distribution. Defaults to 0.
sd (float): SD of the normal distribution. Defaults to 1.
"""
return Float(None, None).normal(mean, sd)
def qrandn(mean: float, sd: float, q: float):
"""Sample a float value normally with ``mean`` and ``sd``.
The value will be quantized, i.e. rounded to an integer increment of ``q``.
Args:
mean (float): Mean of the normal distribution.
sd (float): SD of the normal distribution.
q (float): Quantization number. The result will be rounded to an
integer increment of this value.
"""
return Float(None, None).normal(mean, sd).quantized(q)
| 31.627273 | 80 | 0.593202 |
914db75793ba4af530c08d097936783044495342 | 810 | py | Python | setup.py | bruno-janota/lambdata-bjanota | 4e4d9c0a3a2a1ddaaa7857d694bc14f33175ce2b | [
"MIT"
] | null | null | null | setup.py | bruno-janota/lambdata-bjanota | 4e4d9c0a3a2a1ddaaa7857d694bc14f33175ce2b | [
"MIT"
] | 3 | 2020-03-24T17:40:48.000Z | 2021-06-02T00:32:37.000Z | setup.py | bruno-janota/lambdata-bjanota | 4e4d9c0a3a2a1ddaaa7857d694bc14f33175ce2b | [
"MIT"
] | 1 | 2019-09-05T03:44:55.000Z | 2019-09-05T03:44:55.000Z | """
lambdata-bruno-janota - A Collection of Data Science helper functions.
"""
import setuptools
REQUIRED = [
"numpy",
"pandas",
"sklearn"
]
with open("README.md", "r") as fh:
LONG_DESCRIPTION = fh.read()
setuptools.setup(
name="lambdata-bruno-janota",
version="0.0.1",
author="bruno-janota",
description="A collection of Data Science helper functions",
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
url="https://github.com/bruno-janota/lambdata-bjanota",
packages=setuptools.find_packages(),
python_requires=">=3.5",
install_required=REQUIRED,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
) | 25.3125 | 70 | 0.665432 |
876a312426e46326a622afbbed72da0d69106814 | 1,876 | py | Python | app/user/serializers.py | dragonzite/recipe-app-api | 3409afe997b0b1a6e2c82225d345562f20c2e5f6 | [
"MIT"
] | null | null | null | app/user/serializers.py | dragonzite/recipe-app-api | 3409afe997b0b1a6e2c82225d345562f20c2e5f6 | [
"MIT"
] | null | null | null | app/user/serializers.py | dragonzite/recipe-app-api | 3409afe997b0b1a6e2c82225d345562f20c2e5f6 | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model, authenticate
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
"""Serializer for the new user
Args:
serializers (obj): rest_framework module
"""
class Meta:
model = get_user_model()
fields = ('email', 'password', 'name')
extra_kwargs = {'password': {'write_only': True, 'min_length': 5}}
def create(self, validated_data):
"""Create a new user with enctypted password
Args:
validated_data (obj): validated user data passed in to serializer
"""
return get_user_model().objects.create_user(**validated_data)
def update(self, instance, validated_data):
"""Update a user, setting the password correctly and return it"""
password = validated_data.pop('password', None)
user = super().update(instance, validated_data)
if password:
user.set_password(password)
user.save()
return user
class AuthTokenSerializer(serializers.Serializer):
"""Serializer for the user authentication object"""
email = serializers.CharField()
password = serializers.CharField(
style={'input_type': 'password'},
trim_whitespace=False
)
def validate(self, attrs):
"""Validate and authenticate the user"""
email = attrs.get('email')
password = attrs.get('password')
user = authenticate(
request=self.context.get('request'),
username=email,
password=password
)
if not user:
msg = _('Unable to authenticate with provided credentials')
raise serializers.ValidationError(msg, code='authorization')
attrs['user'] = user
return attrs
| 29.777778 | 77 | 0.63806 |
3ef025e78d2f3a783aea393d5908fd44601000e7 | 10,635 | py | Python | attic/key.py | AaronWebster/attic | be7e636b6de677776d57b788c9a435070063575d | [
"BSD-3-Clause"
] | 1 | 2022-02-11T21:46:40.000Z | 2022-02-11T21:46:40.000Z | attic/key.py | AaronWebster/attic | be7e636b6de677776d57b788c9a435070063575d | [
"BSD-3-Clause"
] | 20 | 2022-02-14T07:54:49.000Z | 2022-02-14T07:57:38.000Z | attic/key.py | AaronWebster/attic | be7e636b6de677776d57b788c9a435070063575d | [
"BSD-3-Clause"
] | null | null | null | from binascii import hexlify, a2b_base64, b2a_base64
from getpass import getpass
import os
import msgpack
import textwrap
import hmac
from hashlib import sha256
import zlib
from attic.crypto import pbkdf2_sha256, get_random_bytes, AES, bytes_to_long, long_to_bytes, bytes_to_int, num_aes_blocks
from attic.helpers import IntegrityError, get_keys_dir, Error
PREFIX = b'\0' * 8
class UnsupportedPayloadError(Error):
"""Unsupported payload type {}.
A newer version is required to access this repository.
"""
class KeyfileNotFoundError(Error):
"""No key file for repository {} found in {}."""
class HMAC(hmac.HMAC):
"""Workaround a bug in Python < 3.4 Where HMAC does not accept memoryviews"""
def update(self, msg):
self.inner.update(msg)
def key_creator(repository, args):
if args.encryption == 'keyfile':
return KeyfileKey.create(repository, args)
elif args.encryption == 'passphrase':
return PassphraseKey.create(repository, args)
else:
return PlaintextKey.create(repository, args)
def key_factory(repository, manifest_data):
if manifest_data[0] == KeyfileKey.TYPE:
return KeyfileKey.detect(repository, manifest_data)
elif manifest_data[0] == PassphraseKey.TYPE:
return PassphraseKey.detect(repository, manifest_data)
elif manifest_data[0] == PlaintextKey.TYPE:
return PlaintextKey.detect(repository, manifest_data)
else:
raise UnsupportedPayloadError(manifest_data[0])
class KeyBase(object):
def __init__(self):
self.TYPE_STR = bytes([self.TYPE])
def id_hash(self, data):
"""Return HMAC hash using the "id" HMAC key"""
def encrypt(self, data):
pass
def decrypt(self, id, data):
pass
class PlaintextKey(KeyBase):
TYPE = 0x02
chunk_seed = 0
@classmethod
def create(cls, repository, args):
print(
'Encryption NOT enabled.\nUse the "--encryption=passphrase|keyfile" to enable encryption.'
)
return cls()
@classmethod
def detect(cls, repository, manifest_data):
return cls()
def id_hash(self, data):
return sha256(data).digest()
def encrypt(self, data):
return b''.join([self.TYPE_STR, zlib.compress(data)])
def decrypt(self, id, data):
if data[0] != self.TYPE:
raise IntegrityError('Invalid encryption envelope')
data = zlib.decompress(memoryview(data)[1:])
if id and sha256(data).digest() != id:
raise IntegrityError('Chunk id verification failed')
return data
class AESKeyBase(KeyBase):
"""Common base class shared by KeyfileKey and PassphraseKey
Chunks are encrypted using 256bit AES in Counter Mode (CTR)
Payload layout: TYPE(1) + HMAC(32) + NONCE(8) + CIPHERTEXT
To reduce payload size only 8 bytes of the 16 bytes nonce is saved
in the payload, the first 8 bytes are always zeros. This does not
affect security but limits the maximum repository capacity to
only 295 exabytes!
"""
PAYLOAD_OVERHEAD = 1 + 32 + 8 # TYPE + HMAC + NONCE
def id_hash(self, data):
"""Return HMAC hash using the "id" HMAC key"""
return HMAC(self.id_key, data, sha256).digest()
def encrypt(self, data):
data = zlib.compress(data)
self.enc_cipher.reset()
data = b''.join((self.enc_cipher.iv[8:], self.enc_cipher.encrypt(data)))
hmac = HMAC(self.enc_hmac_key, data, sha256).digest()
return b''.join((self.TYPE_STR, hmac, data))
def decrypt(self, id, data):
if data[0] != self.TYPE:
raise IntegrityError('Invalid encryption envelope')
hmac = memoryview(data)[1:33]
if memoryview(
HMAC(self.enc_hmac_key,
memoryview(data)[33:], sha256).digest()) != hmac:
raise IntegrityError('Encryption envelope checksum mismatch')
self.dec_cipher.reset(iv=PREFIX + data[33:41])
data = zlib.decompress(self.dec_cipher.decrypt(
data[41:])) # should use memoryview
if id and HMAC(self.id_key, data, sha256).digest() != id:
raise IntegrityError('Chunk id verification failed')
return data
def extract_nonce(self, payload):
if payload[0] != self.TYPE:
raise IntegrityError('Invalid encryption envelope')
nonce = bytes_to_long(payload[33:41])
return nonce
def init_from_random_data(self, data):
self.enc_key = data[0:32]
self.enc_hmac_key = data[32:64]
self.id_key = data[64:96]
self.chunk_seed = bytes_to_int(data[96:100])
# Convert to signed int32
if self.chunk_seed & 0x80000000:
self.chunk_seed = self.chunk_seed - 0xffffffff - 1
def init_ciphers(self, enc_iv=b''):
self.enc_cipher = AES(self.enc_key, enc_iv)
self.dec_cipher = AES(self.enc_key)
class PassphraseKey(AESKeyBase):
TYPE = 0x01
iterations = 100000
@classmethod
def create(cls, repository, args):
key = cls()
passphrase = os.environ.get('ATTIC_PASSPHRASE')
if passphrase is not None:
passphrase2 = passphrase
else:
passphrase, passphrase2 = 1, 2
while passphrase != passphrase2:
passphrase = getpass('Enter passphrase: ')
if not passphrase:
print('Passphrase must not be blank')
continue
passphrase2 = getpass('Enter same passphrase again: ')
if passphrase != passphrase2:
print('Passphrases do not match')
key.init(repository, passphrase)
if passphrase:
print(
'Remember your passphrase. Your data will be inaccessible without it.'
)
return key
@classmethod
def detect(cls, repository, manifest_data):
prompt = 'Enter passphrase for %s: ' % repository._location.orig
key = cls()
passphrase = os.environ.get('ATTIC_PASSPHRASE')
if passphrase is None:
passphrase = getpass(prompt)
while True:
key.init(repository, passphrase)
try:
key.decrypt(None, manifest_data)
num_blocks = num_aes_blocks(len(manifest_data) - 41)
key.init_ciphers(
PREFIX +
long_to_bytes(key.extract_nonce(manifest_data) + num_blocks))
return key
except IntegrityError:
passphrase = getpass(prompt)
def init(self, repository, passphrase):
self.init_from_random_data(
pbkdf2_sha256(
passphrase.encode('utf-8'), repository.id, self.iterations, 100))
self.init_ciphers()
class KeyfileKey(AESKeyBase):
FILE_ID = 'ATTIC KEY'
TYPE = 0x00
@classmethod
def detect(cls, repository, manifest_data):
key = cls()
path = cls.find_key_file(repository)
prompt = 'Enter passphrase for key file %s: ' % path
passphrase = os.environ.get('ATTIC_PASSPHRASE', '')
while not key.load(path, passphrase):
passphrase = getpass(prompt)
num_blocks = num_aes_blocks(len(manifest_data) - 41)
key.init_ciphers(
PREFIX + long_to_bytes(key.extract_nonce(manifest_data) + num_blocks))
return key
@classmethod
def find_key_file(cls, repository):
id = hexlify(repository.id).decode('ascii')
keys_dir = get_keys_dir()
for name in os.listdir(keys_dir):
filename = os.path.join(keys_dir, name)
with open(filename, 'r') as fd:
line = fd.readline().strip()
if line and line.startswith(cls.FILE_ID) and line[10:] == id:
return filename
raise KeyfileNotFoundError(repository._location.canonical_path(),
get_keys_dir())
def load(self, filename, passphrase):
with open(filename, 'r') as fd:
cdata = a2b_base64(''.join(fd.readlines()[1:]).encode(
'ascii')) # .encode needed for Python 3.[0-2]
data = self.decrypt_key_file(cdata, passphrase)
if data:
key = msgpack.unpackb(data)
if key[b'version'] != 1:
raise IntegrityError('Invalid key file header')
self.repository_id = key[b'repository_id']
self.enc_key = key[b'enc_key']
self.enc_hmac_key = key[b'enc_hmac_key']
self.id_key = key[b'id_key']
self.chunk_seed = key[b'chunk_seed']
self.path = filename
return True
def decrypt_key_file(self, data, passphrase):
d = msgpack.unpackb(data)
assert d[b'version'] == 1
assert d[b'algorithm'] == b'sha256'
key = pbkdf2_sha256(
passphrase.encode('utf-8'), d[b'salt'], d[b'iterations'], 32)
data = AES(key).decrypt(d[b'data'])
if HMAC(key, data, sha256).digest() != d[b'hash']:
return None
return data
def encrypt_key_file(self, data, passphrase):
salt = get_random_bytes(32)
iterations = 100000
key = pbkdf2_sha256(passphrase.encode('utf-8'), salt, iterations, 32)
hash = HMAC(key, data, sha256).digest()
cdata = AES(key).encrypt(data)
d = {
'version': 1,
'salt': salt,
'iterations': iterations,
'algorithm': 'sha256',
'hash': hash,
'data': cdata,
}
return msgpack.packb(d)
def save(self, path, passphrase):
key = {
'version': 1,
'repository_id': self.repository_id,
'enc_key': self.enc_key,
'enc_hmac_key': self.enc_hmac_key,
'id_key': self.id_key,
'chunk_seed': self.chunk_seed,
}
data = self.encrypt_key_file(msgpack.packb(key), passphrase)
with open(path, 'w') as fd:
fd.write('%s %s\n' %
(self.FILE_ID, hexlify(self.repository_id).decode('ascii')))
fd.write('\n'.join(textwrap.wrap(b2a_base64(data).decode('ascii'))))
fd.write('\n')
self.path = path
def change_passphrase(self):
passphrase, passphrase2 = 1, 2
while passphrase != passphrase2:
passphrase = getpass('New passphrase: ')
passphrase2 = getpass('Enter same passphrase again: ')
if passphrase != passphrase2:
print('Passphrases do not match')
self.save(self.path, passphrase)
print('Key file "%s" updated' % self.path)
@classmethod
def create(cls, repository, args):
filename = args.repository.to_key_filename()
path = filename
i = 1
while os.path.exists(path):
i += 1
path = filename + '.%d' % i
passphrase = os.environ.get('ATTIC_PASSPHRASE')
if passphrase is not None:
passphrase2 = passphrase
else:
passphrase, passphrase2 = 1, 2
while passphrase != passphrase2:
passphrase = getpass('Enter passphrase (empty for no passphrase):')
passphrase2 = getpass('Enter same passphrase again: ')
if passphrase != passphrase2:
print('Passphrases do not match')
key = cls()
key.repository_id = repository.id
key.init_from_random_data(get_random_bytes(100))
key.init_ciphers()
key.save(path, passphrase)
print('Key file "%s" created.' % key.path)
print('Keep this file safe. Your data will be inaccessible without it.')
return key
| 31.187683 | 121 | 0.66441 |
5ed419bce8701b9e287011c69d99ba6bf59558a7 | 2,849 | py | Python | raidcal/common_settings.py | katajakasa/Raidcal | 61d17d8ca0c58c09debb31e868afdd377a87f38d | [
"MIT"
] | null | null | null | raidcal/common_settings.py | katajakasa/Raidcal | 61d17d8ca0c58c09debb31e868afdd377a87f38d | [
"MIT"
] | null | null | null | raidcal/common_settings.py | katajakasa/Raidcal | 61d17d8ca0c58c09debb31e868afdd377a87f38d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Application definition
INSTALLED_APPS = (
'raidcal.maincal',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crispy_forms',
'compressor',
'django_summernote',
'eadred',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
ROOT_URLCONF = 'raidcal.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
'custom.context_processors.language_code',
],
},
},
]
LOGIN_URL = '/login'
CRISPY_TEMPLATE_PACK = 'bootstrap3'
WSGI_APPLICATION = 'raidcal.wsgi.application'
SUMMERNOTE_CONFIG = {
'iframe': False,
'airMode': False,
'styleWithTags': False,
'height': '400',
'width': '100%',
'toolbar': [
["style", ["style"]],
["font", ["bold", "italic", "underline", "superscript", "subscript", "strikethrough", "clear"]],
["color", ["color"]],
["para", ["ul", "ol", "paragraph"]],
["insert", ["link", "picture"]],
["view", ["fullscreen", "codeview"]]
],
}
# To sanitize tinymce stuff
SANITIZER_ALLOWED_TAGS = ['a', 'strong', 'img', 'li', 'ol', 'ul', 'em', 'u', 'span', 'p', 'strike',
'address', 'sup', 'h1', 'h2', 'n3', 'h4', 'h5', 'h6', 'pre', 'blockquote', 'br']
SANITIZER_ALLOWED_ATTRIBUTES = ['href', 'target', 'style', 'class', 'title', 'width', 'height', 'src', 'alt']
SANITIZER_ALLOWED_STYLES = ['color', 'background-color', 'text-align', 'margin-left']
COMPRESS_ENABLED = True
COMPRESS_OFFLINE = False
COMPRESS_PRECOMPILERS = (
('text/x-scss', 'scss -m -C {infile} {outfile}'),
)
USE_I18N = True
USE_L10N = True
USE_TZ = True
| 30.967391 | 109 | 0.631099 |
5199354acd6db260a9ba46e6bd11dc0a27cf19cc | 515 | py | Python | openpeerpower/helpers/typing.py | pcaston/Open-Peer-Power | 81805d455c548e0f86b0f7fedc793b588b2afdfd | [
"Apache-2.0"
] | null | null | null | openpeerpower/helpers/typing.py | pcaston/Open-Peer-Power | 81805d455c548e0f86b0f7fedc793b588b2afdfd | [
"Apache-2.0"
] | null | null | null | openpeerpower/helpers/typing.py | pcaston/Open-Peer-Power | 81805d455c548e0f86b0f7fedc793b588b2afdfd | [
"Apache-2.0"
] | 1 | 2019-04-24T14:10:08.000Z | 2019-04-24T14:10:08.000Z | """Typing Helpers for Open Peer Power."""
from typing import Any, Dict, Optional, Tuple
import openpeerpower.core
# pylint: disable=invalid-name
GPSType = Tuple[float, float]
ConfigType = Dict[str, Any]
ContextType = openpeerpower.core.Context
EventType = openpeerpower.core.Event
OpenPeerPowerType = openpeerpower.core.OpenPeerPower
ServiceCallType = openpeerpower.core.ServiceCall
ServiceDataType = Dict[str, Any]
TemplateVarsType = Optional[Dict[str, Any]]
# Custom type for recorder Queries
QueryType = Any
| 27.105263 | 52 | 0.794175 |
663f64767793a753e6f3ebd528650cc7904ae056 | 1,519 | py | Python | sir.py | retr0-13/skype_ip_resolver | 4f9ec2f05ddff28cc90cb631f8eae4882ee2d293 | [
"MIT"
] | 11 | 2015-09-03T12:07:59.000Z | 2022-03-06T06:59:10.000Z | sir.py | retr0-13/skype_ip_resolver | 4f9ec2f05ddff28cc90cb631f8eae4882ee2d293 | [
"MIT"
] | null | null | null | sir.py | retr0-13/skype_ip_resolver | 4f9ec2f05ddff28cc90cb631f8eae4882ee2d293 | [
"MIT"
] | 4 | 2018-04-01T09:03:30.000Z | 2021-04-08T19:13:47.000Z | #!/usr/bin/python
"""
Copyright (c) 2014 tilt (https://github.com/AeonDave/sir)
See the file 'LICENSE' for copying permission
"""
import sys, getopt, logging
from lib import update
from lib import actions
from lib.logger import logger
# Tilt Setup
try:
options, args = getopt.getopt(sys.argv[1:], 'n:vhu', ['name=', 'version', 'help', 'update', 'output'])
except getopt.GetoptError:
actions.showhelp()
sys.exit(1)
name=None
output=None
for opt, arg in options:
if opt in ('-h', '--help'):
actions.showhelp()
sys.exit(0)
elif opt in ('-v', '--version'):
actions.header()
sys.exit(0)
elif opt in ('-u', '--update'):
actions.header()
update.update()
sys.exit(0)
elif opt in ('-n', '--name'):
name = arg
elif opt in ('-o', '--output'):
output = arg
else:
actions.header()
actions.showhelp()
sys.exit(1)
if not name:
actions.header()
actions.showhelp()
msg = "[-] ERROR: You must provide a Skype Name."
logger.error(msg)
sys.exit(1)
def main():
if output:
handler = logging.FileHandler(output)
handler.setLevel(logging.INFO)
logger.addHandler(handler)
if name:
logger.info('[*] Trying to resolve name: '+ name)
actions.resolve(name)
if output:
logger.info('[+] File log written: ' + output)
# Program
if __name__ == '__main__':
actions.header()
main()
sys.exit(0)
| 21.7 | 106 | 0.57472 |
6ea6bb20e3040fb50407ffeb855a15a9b13a1663 | 156 | py | Python | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/commands/__init__.py | yankeexe/cookiecutter-python-cli | 992bc1bf9af825b6ad144e5e4cbf969a58003347 | [
"MIT"
] | 4 | 2021-02-12T05:32:25.000Z | 2022-01-19T10:30:46.000Z | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/commands/__init__.py | yankeexe/cookiecutter-python-cli | 992bc1bf9af825b6ad144e5e4cbf969a58003347 | [
"MIT"
] | 1 | 2022-01-19T13:56:45.000Z | 2022-01-20T12:13:06.000Z | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/commands/__init__.py | yankeexe/cookiecutter-python-cli | 992bc1bf9af825b6ad144e5e4cbf969a58003347 | [
"MIT"
] | 1 | 2022-01-19T10:30:49.000Z | 2022-01-19T10:30:49.000Z | """
Exports for CLI commands.
"""
from {{cookiecutter.project_slug}}.commands.init import init
from {{cookiecutter.project_slug}}.commands.show import show
| 26 | 60 | 0.775641 |
c6d48e481202750b624658513892e4d88444ba2b | 2,631 | py | Python | libp2p/security/noise/io.py | g-r-a-n-t/py-libp2p | 36a4a9150dcc53b42315b5c6868fccde5083963b | [
"Apache-2.0",
"MIT"
] | 315 | 2019-02-13T01:29:09.000Z | 2022-03-28T13:44:07.000Z | libp2p/security/noise/io.py | pipermerriam/py-libp2p | 379a157d6b67e86a616b2458af519bbe5fb26a51 | [
"Apache-2.0",
"MIT"
] | 249 | 2019-02-22T05:00:07.000Z | 2022-03-29T16:30:46.000Z | libp2p/security/noise/io.py | ralexstokes/py-libp2p | 5144ab82894623969cb17baf0d4c64bd0a274068 | [
"Apache-2.0",
"MIT"
] | 77 | 2019-02-24T19:45:17.000Z | 2022-03-30T03:20:09.000Z | from typing import cast
from noise.connection import NoiseConnection as NoiseState
from libp2p.io.abc import EncryptedMsgReadWriter, MsgReadWriteCloser, ReadWriteCloser
from libp2p.io.msgio import FixedSizeLenMsgReadWriter
from libp2p.network.connection.raw_connection_interface import IRawConnection
SIZE_NOISE_MESSAGE_LEN = 2
MAX_NOISE_MESSAGE_LEN = 2 ** (8 * SIZE_NOISE_MESSAGE_LEN) - 1
SIZE_NOISE_MESSAGE_BODY_LEN = 2
MAX_NOISE_MESSAGE_BODY_LEN = MAX_NOISE_MESSAGE_LEN - SIZE_NOISE_MESSAGE_BODY_LEN
BYTE_ORDER = "big"
# | Noise packet |
# < 2 bytes -><- 65535 ->
# | noise msg len | noise msg |
class NoisePacketReadWriter(FixedSizeLenMsgReadWriter):
size_len_bytes = SIZE_NOISE_MESSAGE_LEN
class BaseNoiseMsgReadWriter(EncryptedMsgReadWriter):
"""
The base implementation of noise message reader/writer.
`encrypt` and `decrypt` are not implemented here, which should be
implemented by the subclasses.
"""
read_writer: MsgReadWriteCloser
noise_state: NoiseState
# FIXME: This prefix is added in msg#3 in Go. Check whether it's a desired behavior.
prefix: bytes = b"\x00" * 32
def __init__(self, conn: IRawConnection, noise_state: NoiseState) -> None:
self.read_writer = NoisePacketReadWriter(cast(ReadWriteCloser, conn))
self.noise_state = noise_state
async def write_msg(self, data: bytes, prefix_encoded: bool = False) -> None:
data_encrypted = self.encrypt(data)
if prefix_encoded:
await self.read_writer.write_msg(self.prefix + data_encrypted)
else:
await self.read_writer.write_msg(data_encrypted)
async def read_msg(self, prefix_encoded: bool = False) -> bytes:
noise_msg_encrypted = await self.read_writer.read_msg()
if prefix_encoded:
return self.decrypt(noise_msg_encrypted[len(self.prefix) :])
else:
return self.decrypt(noise_msg_encrypted)
async def close(self) -> None:
await self.read_writer.close()
class NoiseHandshakeReadWriter(BaseNoiseMsgReadWriter):
def encrypt(self, data: bytes) -> bytes:
return self.noise_state.write_message(data)
def decrypt(self, data: bytes) -> bytes:
return self.noise_state.read_message(data)
class NoiseTransportReadWriter(BaseNoiseMsgReadWriter):
def encrypt(self, data: bytes) -> bytes:
return self.noise_state.encrypt(data)
def decrypt(self, data: bytes) -> bytes:
return self.noise_state.decrypt(data)
| 35.554054 | 88 | 0.694033 |
460c4a27fc5a84f7040ab8eea7813e22da721ad7 | 1,350 | py | Python | orc8r/gateway/python/magma/magmad/events.py | omnicate/magma | e1e6c244f9e8bd000587a3dad3c54f4e64ada222 | [
"BSD-3-Clause"
] | null | null | null | orc8r/gateway/python/magma/magmad/events.py | omnicate/magma | e1e6c244f9e8bd000587a3dad3c54f4e64ada222 | [
"BSD-3-Clause"
] | null | null | null | orc8r/gateway/python/magma/magmad/events.py | omnicate/magma | e1e6c244f9e8bd000587a3dad3c54f4e64ada222 | [
"BSD-3-Clause"
] | null | null | null | """
Copyright (c) 2016-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
import json
import snowflake
from google.protobuf.json_format import MessageToDict
from magma.eventd.eventd_client import log_event
from orc8r.protos.eventd_pb2 import Event
from orc8r.swagger.models.processed_updates import ProcessedUpdates
from orc8r.swagger.models.restarted_services import RestartedServices
def processed_updates(updates):
# Convert updates to dicts for JSON serializability
dict_updates = [MessageToDict(u) for u in updates]
log_event(
Event(
stream_name="magmad",
event_type="processed_updates",
tag=snowflake.snowflake(),
value=json.dumps(ProcessedUpdates(updates=dict_updates).to_dict()),
)
)
def restarted_services(services):
# Convert to a list for JSON serializability
services = list(services)
log_event(
Event(
stream_name="magmad",
event_type="restarted_services",
tag=snowflake.snowflake(),
value=json.dumps(RestartedServices(services=services).to_dict()),
)
)
| 31.395349 | 79 | 0.714815 |
12517c9b5218f24acb1494c3bd9b5c4d3e1deab9 | 25,461 | py | Python | qa/rpc-tests/fundrawtransaction-hd.py | AtomicLemon/bitthatchain | 60231977d446a3f0ba4c112a1405eebed9acc17a | [
"MIT"
] | null | null | null | qa/rpc-tests/fundrawtransaction-hd.py | AtomicLemon/bitthatchain | 60231977d446a3f0ba4c112a1405eebed9acc17a | [
"MIT"
] | null | null | null | qa/rpc-tests/fundrawtransaction-hd.py | AtomicLemon/bitthatchain | 60231977d446a3f0ba4c112a1405eebed9acc17a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
def setup_network(self, split=False):
self.nodes = start_nodes(4, self.options.tmpdir, [['-usehd=1']] * self.num_nodes, redirect_stderr=True)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
def run_test(self):
print("Mining blocks...")
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(2000)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 15)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 50)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enought inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 22 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 26 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 26, self.nodes[1].getnewaddress() : 25 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 50:
utx = aUtx
break
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 50:
utx = aUtx
break
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(50) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 10:
utx = aUtx
break
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = False
utx2 = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 10:
utx = aUtx
if aUtx['amount'] == 50:
utx2 = aUtx
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 60 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = False
utx2 = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 10:
utx = aUtx
if aUtx['amount'] == 50:
utx2 = aUtx
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 60, self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
listunspent = self.nodes[2].listunspent()
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 10}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
try:
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
raise AssertionError("Spent more than available")
except JSONRPCException as e:
assert("Insufficient" in e.error['message'])
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():11,self.nodes[1].getnewaddress():12,self.nodes[1].getnewaddress():1,self.nodes[1].getnewaddress():13,self.nodes[1].getnewaddress():2,self.nodes[1].getnewaddress():3}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
inputs = []
outputs = {mSigObj:11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
addr3Obj = self.nodes[1].validateaddress(addr3)
addr4Obj = self.nodes[1].validateaddress(addr4)
addr5Obj = self.nodes[1].validateaddress(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])
inputs = []
outputs = {mSigObj:11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
# send 12 XBCH to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 12)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():11}
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawTx)
signedTx = self.nodes[2].signrawtransaction(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('11.0000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.nodes[1].encryptwallet("test")
self.nodes.pop(1)
stop_node(self.nodes[0], 0)
stop_node(self.nodes[1], 2)
stop_node(self.nodes[2], 3)
self.nodes = start_nodes(4, self.options.tmpdir, [['-usehd=1']] * self.num_nodes, redirect_stderr=True)
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
# drain the keypool
self.nodes[1].getnewaddress()
self.nodes[1].getrawchangeaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
try:
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
raise AssertionError("Wallet unlocked without passphrase")
except JSONRPCException as e:
assert('Keypool ran out' in e.error['message'])
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].keypoolrefill(2) #need to refill the keypool to get an internal change address
self.nodes[1].walletlock()
try:
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 12)
raise AssertionError("Wallet unlocked without passphrase")
except JSONRPCException as e:
assert('walletpassphrase' in e.error['message'])
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 100)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('511.0000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('500.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransaction(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
if __name__ == '__main__':
RawTransactionsTest().main()
| 40.672524 | 214 | 0.558658 |
4c8aa2ccdf738480dc8ce7bf8e085875879a1e5c | 1,350 | py | Python | tests/records/test_plr.py | pvalsecc/pyramid_oereb | 3d9a8a68952641e923c34e086768630e21a559ed | [
"BSD-2-Clause"
] | null | null | null | tests/records/test_plr.py | pvalsecc/pyramid_oereb | 3d9a8a68952641e923c34e086768630e21a559ed | [
"BSD-2-Clause"
] | 3 | 2019-12-26T17:00:44.000Z | 2022-03-21T22:16:54.000Z | tests/records/test_plr.py | pvalsecc/pyramid_oereb | 3d9a8a68952641e923c34e086768630e21a559ed | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import datetime
import pytest
from shapely.geometry import Point
from pyramid_oereb.lib.records.geometry import GeometryRecord
from pyramid_oereb.lib.records.image import ImageRecord
from pyramid_oereb.lib.records.law_status import LawStatusRecord
from pyramid_oereb.lib.records.office import OfficeRecord
from pyramid_oereb.lib.records.plr import PlrRecord
from pyramid_oereb.lib.records.theme import ThemeRecord
from pyramid_oereb.lib.records.view_service import ViewServiceRecord
def test_mandatory_fields():
with pytest.raises(TypeError):
PlrRecord()
def test_init():
office = OfficeRecord({'en': 'Office'})
view_service = ViewServiceRecord('http://my.wms.com', 1, 1.0)
law_status = LawStatusRecord.from_config(u'inForce')
geometry = GeometryRecord(law_status, datetime.date.today(), Point(1, 1))
record = PlrRecord(
ThemeRecord('code', dict()), {'en': 'Content'}, law_status, datetime.date(1985, 8, 29), office,
ImageRecord('1'.encode('utf-8')), view_service, [geometry])
assert record.information == {'en': 'Content'}
assert record.sub_theme is None
assert isinstance(record.geometries, list)
assert isinstance(record.responsible_office, OfficeRecord)
assert isinstance(record.theme, ThemeRecord)
assert isinstance(record.symbol, ImageRecord)
| 39.705882 | 103 | 0.754815 |
6f9282a3891ba4b2e58c2ba4d146e2191bba93a4 | 3,576 | py | Python | pyns/protocols/factory.py | Kuree/pyns | c627d11bbc79b86f868371282165b41652040e57 | [
"MIT"
] | null | null | null | pyns/protocols/factory.py | Kuree/pyns | c627d11bbc79b86f868371282165b41652040e57 | [
"MIT"
] | null | null | null | pyns/protocols/factory.py | Kuree/pyns | c627d11bbc79b86f868371282165b41652040e57 | [
"MIT"
] | null | null | null | from . import TDMANode, TDMABaseStation
from . import CSMANode, CSMABaseStation
from . import LPDQNode, LPDQBaseStation
from . import DQNNode, DQNBaseStation
from . import ALOHANode, ALOHABaseStation
from . import LORANode, LORABaseStation
import json
import inspect
from enum import Enum
class ProtocolType(Enum):
TDMA = 0
CSMA = 1
LPDQ = 2
DQN = 3
ALOHA = 4
LORA = 5
SPECIAL_ATTRIBUTES = ["lat", "lng", "path_loss", "packet_loss"]
def create_basestation(protocol_type, id, env, config, special_arg = None):
args = __load_config(config)
args["id"] = id
args["env"] = env
bs = None
protocol_type = ProtocolType(protocol_type)
if protocol_type == ProtocolType.TDMA:
args = __process_args(TDMABaseStation.__init__, args, special_arg)
bs = TDMABaseStation(**args)
elif protocol_type == ProtocolType.CSMA:
args = __process_args(CSMABaseStation.__init__, args, special_arg)
bs = CSMABaseStation(**args)
elif protocol_type == ProtocolType.LPDQ:
args = __process_args(LPDQBaseStation.__init__, args, special_arg)
bs = LPDQBaseStation(**args)
elif protocol_type == ProtocolType.DQN:
args = __process_args(DQNBaseStation.__init__, args, special_arg)
bs = DQNBaseStation(**args)
elif protocol_type == ProtocolType.ALOHA:
args = __process_args(ALOHABaseStation.__init__, args, special_arg)
bs = ALOHABaseStation(**args)
elif protocol_type == ProtocolType.LORA:
args = __process_args(LORABaseStation.__init__, args, special_arg)
bs = LORABaseStation(**args)
for name in SPECIAL_ATTRIBUTES:
__set_attributes(bs, config, name)
return bs
def create_node(protocol_type, id, env, config, special_arg = None):
args = __load_config(config)
args["id"] = id
args["env"] = env
node = None
protocol_type = ProtocolType(protocol_type)
if protocol_type == ProtocolType.TDMA:
args = __process_args(TDMANode.__init__, args, special_arg)
node = TDMANode(**args)
elif protocol_type == ProtocolType.CSMA:
args = __process_args(CSMANode.__init__, args, special_arg)
node = CSMANode(**args)
elif protocol_type == ProtocolType.LPDQ:
args = __process_args(LPDQNode.__init__, args, special_arg)
node = LPDQNode(**args)
elif protocol_type == ProtocolType.DQN:
args = __process_args(DQNNode.__init__, args, special_arg)
node = DQNNode(**args)
elif protocol_type == ProtocolType.ALOHA:
args = __process_args(ALOHANode.__init__, args, special_arg)
node = ALOHANode(**args)
elif protocol_type == ProtocolType.LORA:
args = __process_args(LORANode.__init__, args, special_arg)
node = LORANode(**args)
for name in SPECIAL_ATTRIBUTES:
__set_attributes(node, config, name)
return node
def __load_config(config):
if type(config) == str:
with open(config) as f:
return json.load(f)
elif type(config) == dict:
return config
else:
return {}
def __set_attributes(device, config, name):
if device is not None and name in config:
setattr(device, name, config[name])
def __process_args(func, args, special_arg):
# remove unused args
func_args = inspect.getargspec(func).args
key_list = args.keys()
# load the special arg
if special_arg is not None:
for key in special_arg:
args[key] = special_arg[key]
args = {k:v for k, v in args.items() if k in func_args}
return args
| 33.420561 | 75 | 0.677573 |
9254a149d28961f519e2f1c82f6df29190013a04 | 370 | py | Python | Adafruit_QT_Py_ESP32-S2/Capacitive_Touch_One_Pin/code.py | gamblor21/Adafruit_Learning_System_Guides | f5dab4a758bc82d0bfc3c299683fe89dc093912a | [
"MIT"
] | null | null | null | Adafruit_QT_Py_ESP32-S2/Capacitive_Touch_One_Pin/code.py | gamblor21/Adafruit_Learning_System_Guides | f5dab4a758bc82d0bfc3c299683fe89dc093912a | [
"MIT"
] | null | null | null | Adafruit_QT_Py_ESP32-S2/Capacitive_Touch_One_Pin/code.py | gamblor21/Adafruit_Learning_System_Guides | f5dab4a758bc82d0bfc3c299683fe89dc093912a | [
"MIT"
] | null | null | null | # SPDX-FileCopyrightText: 2021 Kattni Rembor for Adafruit Industries
# SPDX-License-Identifier: Unlicense
"""
CircuitPython Capacitive Touch Pin Example - Print to the serial console when one pin is touched.
"""
import time
import board
import touchio
touch = touchio.TouchIn(board.A2)
while True:
if touch.value:
print("Pin touched!")
time.sleep(0.1)
| 23.125 | 97 | 0.743243 |
5c8b9c9ad3ef87f3b37baf34b6ed3f4a880fe634 | 3,934 | py | Python | preproc.py | SingularityF/VO-1s-Ov-A | 0b9f053253eb958e8268f6ba8a8b828e31778ee2 | [
"MIT"
] | null | null | null | preproc.py | SingularityF/VO-1s-Ov-A | 0b9f053253eb958e8268f6ba8a8b828e31778ee2 | [
"MIT"
] | null | null | null | preproc.py | SingularityF/VO-1s-Ov-A | 0b9f053253eb958e8268f6ba8a8b828e31778ee2 | [
"MIT"
] | null | null | null | import cv2
import tensorflow as tf
import numpy as np
def tf_int64_feature(value):
"""Returns an int64_list from a bool / enum / int / uint."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def tf_bytes_feature(value):
"""Returns a bytes_list from a string / byte."""
if isinstance(value, type(tf.constant(0))):
# BytesList won't unpack a string from an EagerTensor.
value = value.numpy()
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def debug_contour(img, contours):
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
cv2.drawContours(image=img, contours=contours, contourIdx=-1,
color=(0, 255, 0), thickness=1, lineType=cv2.LINE_AA)
cv2.imshow("", img)
cv2.waitKey(0)
def line_detector(img, debug_flag=False):
rect_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2000, 2))
dilation = cv2.dilate(img, rect_kernel, iterations=1)
contours, hierarchy = cv2.findContours(
dilation, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
lines = []
if debug_flag:
debug_contour(img, contours)
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
lines.append((x, y, w, h))
return sorted(lines, key=lambda x: x[1])
def word_detector(img, word_spacing, debug_flag=False):
rect_kernel = cv2.getStructuringElement(
cv2.MORPH_RECT, (word_spacing, 100))
dilation = cv2.dilate(img, rect_kernel, iterations=1)
contours, hierarchy = cv2.findContours(
dilation, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
words = []
if debug_flag:
debug_contour(img, contours)
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
words.append((x, y, w, h))
return sorted(words, key=lambda x: x[0])
def character_detector(img, debug_flag=False):
rect_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 100))
dilation = cv2.dilate(img, rect_kernel, iterations=1)
contours, hierarchy = cv2.findContours(
dilation, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
chars = []
if debug_flag:
debug_contour(img, contours)
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
chars.append((x, y, w, h))
return sorted(chars, key=lambda x: x[0])
def preproc_img_color(img, resize_factor):
height, width, _ = img.shape
img = cv2.resize(img, (int(width*resize_factor),
int(height*resize_factor)), interpolation=cv2.INTER_AREA)
#img = cv2.rectangle(img, (780, 93), (1000, 200), (0, 0, 0), -1)
return img
def preproc_img_bw(img, threshold, prompt_roi=None):
bw_img = thresholding(img, threshold)
kernel = np.array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]], dtype=np.uint8)
bw_img = cv2.erode(bw_img, kernel, iterations=1)
if prompt_roi != None:
height, width = bw_img.shape
bw_img = cv2.rectangle(bw_img, (int(width*prompt_roi[0]), int(height*prompt_roi[1])), (int(
width*prompt_roi[0]+width*prompt_roi[2]), int(height*prompt_roi[1] + height*prompt_roi[3])), (0, 0, 0), -1)
return bw_img
def thresholding(img, threshold):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#_, bw_img = cv2.threshold(gray, threshold, 255, cv2.THRESH_BINARY)
bw_img = cv2.adaptiveThreshold(
gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, threshold["block_size"], - threshold["const"])
return bw_img
def char_img2arr(char):
arr = cv2.resize(
char, (28, 28), interpolation=cv2.INTER_AREA)
arr = tf.expand_dims(arr, -1)
return arr
def img_reconstruct(example):
img = example["char_img"].numpy()
img = np.frombuffer(img, dtype=np.uint8).reshape(
example['height'], example['width'])
return img
def filter_noise(char):
if sum(sum(char > 128)) < 5:
return True
return False
| 33.623932 | 119 | 0.649975 |
678484c782528c67b0377388a024bc4205be1556 | 882 | py | Python | LeetCode/InterviewQuestions/ques2.py | shrey199325/LeetCodeSolution | fede0d98d2aeeedc3cd501ac5042f2217494f4c6 | [
"Unlicense"
] | null | null | null | LeetCode/InterviewQuestions/ques2.py | shrey199325/LeetCodeSolution | fede0d98d2aeeedc3cd501ac5042f2217494f4c6 | [
"Unlicense"
] | null | null | null | LeetCode/InterviewQuestions/ques2.py | shrey199325/LeetCodeSolution | fede0d98d2aeeedc3cd501ac5042f2217494f4c6 | [
"Unlicense"
] | 1 | 2020-09-18T09:38:26.000Z | 2020-09-18T09:38:26.000Z | """
class inter():
pass
class B:
def alpha(self):
print("B")
class C:
def alpha(self):
print("C")
class A(metaclass=inter, B, C):
def alpha(self):
super(B, self).alpha()
@staticmethod
def stat_method(val):
return val
@classmethod
def class_method(cls):
pass
A.class_method()
A.stat_method(10)"""
"""
DS:
insert(), find(), del(), get_random()
O(1)
2,6,8,9,9,11,4
arr = [2,6,8,9,11]
i = 5
map_node = {
2: (0),
6: (1),
8: (2),
9: (3),
11: 4
}
def del_(val):
pass
# add_ = map_node[val]
# map_node.remove(val)
# set = {hash(id(2)): id(2)}
# add_set = {id(2), id(6), ...}
"""
"""
m*n
rows and columns are sorted
2 3 4 5 6
7 8 9 10 11
12 13 14 15 16
17 18 19 20 21
m+n
# I1(2,14) I2(5, 6) I3 (17, 19), I4 (20,21) I5(2,8)
I
I1 I2 I3
""" | 10.888889 | 53 | 0.495465 |
c0d8ba8108e546a207fa6fd42baada6cdcc267d0 | 1,494 | py | Python | uplink/api/ranks.py | Subzidion/uplink-python | 2fc4fc65e15b157a7d6c76dc72ba1b2bd5fb4428 | [
"Apache-2.0"
] | null | null | null | uplink/api/ranks.py | Subzidion/uplink-python | 2fc4fc65e15b157a7d6c76dc72ba1b2bd5fb4428 | [
"Apache-2.0"
] | null | null | null | uplink/api/ranks.py | Subzidion/uplink-python | 2fc4fc65e15b157a7d6c76dc72ba1b2bd5fb4428 | [
"Apache-2.0"
] | null | null | null | from flask import jsonify, request, abort
from .. import db
from ..models import Rank
from . import api
@api.route('/rank', methods=['GET'])
def getRanks():
ranks = Rank.query.all()
return jsonify({ 'ranks': [ rank.to_dict() for rank in ranks ] })
@api.route('/rank', methods=['POST'])
def postRank():
if not request.json:
abort(400)
data = request.json
try:
rank = Rank(name=data['name'], description=data['description'], textureUUID=data['textureUUID'])
except KeyError:
abort(400)
db.session.add(rank)
db.session.commit()
r = jsonify(rank.to_dict())
r.status_code = 201
return r
@api.route('/rank/<int:id>', methods=['GET'])
def getRankByID(id):
rank = Rank.query.get_or_404(id)
return jsonify({ 'rank': rank.to_dict() })
@api.route('/rank/<int:id>/<attr>', methods=['PUT'])
def putRankAttrByID(id, attr):
if not hasattr(Rank, attr):
abort(404)
if not request.json or attr == 'id' or attr not in request.json:
abort(400)
data = request.json
rank = Rank.query.get_or_404(id)
try:
setattr(rank, attr, data[attr])
except KeyError:
abort(400)
db.session.commit()
return jsonify({ attr: data[attr] })
@api.route('/rank/<int:id>/<attr>', methods=['GET'])
def getRankAttrByID(id, attr):
rank = Rank.query.get_or_404(id)
try:
value = getattr(rank, attr)
except AttributeError:
abort(404)
return jsonify({ attr: value })
| 26.678571 | 104 | 0.621151 |
dec446fe2c2f3312a871b6c017871788bab83a41 | 3,994 | py | Python | examples/adwords/v201506/account_management/get_account_changes.py | fosterwei/adwords-keyword-planner-API-googleads-python-lib | b80b8b3741a55f1d00c5974bc58f92540663c6f6 | [
"Apache-2.0"
] | 1 | 2020-05-23T11:32:32.000Z | 2020-05-23T11:32:32.000Z | examples/adwords/v201506/account_management/get_account_changes.py | fosterwei/adwords-keyword-planner-API-googleads-python-lib | b80b8b3741a55f1d00c5974bc58f92540663c6f6 | [
"Apache-2.0"
] | null | null | null | examples/adwords/v201506/account_management/get_account_changes.py | fosterwei/adwords-keyword-planner-API-googleads-python-lib | b80b8b3741a55f1d00c5974bc58f92540663c6f6 | [
"Apache-2.0"
] | 2 | 2018-04-20T02:16:33.000Z | 2020-11-12T20:58:54.000Z | #!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets the changes in the account during the last 24 hours.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
import datetime
from googleads import adwords
def main(client):
# Initialize appropriate service.
customer_sync_service = client.GetService(
'CustomerSyncService', version='v201506')
campaign_service = client.GetService('CampaignService', version='v201506')
# Construct selector and get all campaigns.
selector = {
'fields': ['Id', 'Name', 'Status']
}
campaigns = campaign_service.get(selector)
campaign_ids = []
if 'entries' in campaigns:
for campaign in campaigns['entries']:
campaign_ids.append(campaign['id'])
else:
print 'No campaigns were found.'
return
# Construct selector and get all changes.
today = datetime.datetime.today()
yesterday = today - datetime.timedelta(1)
selector = {
'dateTimeRange': {
'min': yesterday.strftime('%Y%m%d %H%M%S'),
'max': today.strftime('%Y%m%d %H%M%S')
},
'campaignIds': campaign_ids
}
account_changes = customer_sync_service.get(selector)
# Display results.
if account_changes:
if 'lastChangeTimestamp' in account_changes:
print 'Most recent changes: %s' % account_changes['lastChangeTimestamp']
if account_changes['changedCampaigns']:
for data in account_changes['changedCampaigns']:
print ('Campaign with id \'%s\' has change status \'%s\'.'
% (data['campaignId'], data['campaignChangeStatus']))
if (data['campaignChangeStatus'] != 'NEW' and
data['campaignChangeStatus'] != 'FIELDS_UNCHANGED'):
if 'addedAdExtensions' in data:
print ' Added ad extensions: %s' % data['addedAdExtensions']
if 'removedAdExtensions' in data:
print ' Removed ad extensions: %s' % data['removedAdExtensions']
if 'addedCampaignCriteria' in data:
print (' Added campaign criteria: %s' %
data['addedCampaignCriteria'])
if 'removedCampaignCriteria' in data:
print (' Removed campaign criteria: %s' %
data['removedCampaignCriteria'])
if 'changedAdGroups' in data:
for ad_group_data in data['changedAdGroups']:
print (' Ad group with id \'%s\' has change status \'%s\'.'
% (ad_group_data['adGroupId'],
ad_group_data['adGroupChangeStatus']))
if ad_group_data['adGroupChangeStatus'] != 'NEW':
if 'changedAds' in ad_group_data:
print ' Changed ads: %s' % ad_group_data['changedAds']
if 'changedCriteria' in ad_group_data:
print (' Changed criteria: %s' %
ad_group_data['changedCriteria'])
if 'removedCriteria' in ad_group_data:
print (' Removed criteria: %s' %
ad_group_data['removedCriteria'])
else:
print 'No changes were found.'
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client)
| 38.403846 | 78 | 0.648723 |
00f1f72be2817228d7f8fbbd72fc340e84db1a97 | 5,945 | py | Python | src/c02_introducing_essential_functional_concepts.py | jashburn8020/functional-python-programming | 34dab481e48f5e95f2d4f5400156f13b232aef25 | [
"Apache-2.0"
] | null | null | null | src/c02_introducing_essential_functional_concepts.py | jashburn8020/functional-python-programming | 34dab481e48f5e95f2d4f5400156f13b232aef25 | [
"Apache-2.0"
] | null | null | null | src/c02_introducing_essential_functional_concepts.py | jashburn8020/functional-python-programming | 34dab481e48f5e95f2d4f5400156f13b232aef25 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env pytest-3
"""
Introducing Essential Functional Concepts
We'll look at each of the following functional programming topics:
- First-class and higher-order functions, which are sometimes known as pure functions.
- Immutable data.
- Strict and non-strict evaluation. We can also call this eager versus lazy evaluation.
- Recursion instead of an explicit loop state.
- Functional type systems
"""
# pylint:disable=missing-docstring
# Pure functions
def test_pure_function():
"""
Pure function:
- conceptually simpler and much easier to test
- local-only code; avoid global statements, look closely at any use of nonlocal
- its return value is the same for the same arguments (no variation with local static
variables, non-local variables, mutable reference arguments or input streams from I/O devices)
- its evaluation has no side effects (no mutation of local static variables, non-local
variables, mutable reference arguments or I/O streams).
- allows some optimizations by changing evaluation order
- conceptually simpler and much easier to test
A Python lambda is a pure function - can't have assignment statements
"""
def mersenne(integer):
return 2**integer - 1
assert mersenne(17) == 131071
# Higher-order functions
def test_higher_order_function():
"""
Higher-order function:
Functions that accept a function as an argument or return a function as a value. We can use
higher-order functions as a way to create composite functions from simpler functions.
Example higher-order function: max(iterable[, default=obj, key=func])
"""
year_cheese = [(2006, 32.73), (2007, 33.5), (2008, 32.84), (2009, 33.02), (2010, 32.92)]
# default: return tuple with largest value on position 0
assert max(year_cheese) == (2010, 32.92)
# return tuple with largest value in position 1
assert max(year_cheese, key=lambda yc: yc[1]) == (2007, 33.5)
# Immutable data
def test_wrap_process_unwrap():
"""
Immutable data:
Since we're not using variables to track the state of a computation, our focus needs to stay
on immutable objects. We can make extensive use of tuples and namedtuples to provide more
complex data structures that are immutable.
A list of tuples is a fairly common data structure. We will often process this list of tuples
in one of the two following ways:
1. Using higher-order functions, e.g., provide lambda as an argument to the max() function
2. Using the wrap-process-unwrap pattern: In a functional context, we should call this and the
unwrap(process(wrap(structure))) pattern
"""
year_cheese = [(2006, 32.73), (2007, 33.5), (2008, 32.84)]
# wrap: map(lambda yc: (yc[1], yc), year_cheese) - transform each item into a two-tuple with a
# key followed by the original item; comparison key is yc[1]
# (map object converted into a list for readability)
assert list(map(lambda yc: (yc[1], yc), year_cheese)) == [
(32.73, (2006, 32.73)), (33.5, (2007, 33.5)), (32.84, (2008, 32.84))]
# process: max() - position zero used for comparison
assert max(map(lambda yc: (yc[1], yc), year_cheese)) == (33.5, (2007, 33.5))
# unwrap: subscript [1] - second element of the two-tuple selected by max()
assert max(map(lambda yc: (yc[1], yc), year_cheese))[1] == (2007, 33.5)
# Strict and non-strict evaluation
def test_non_strict():
"""
Functional programming's efficiency stems, in part, from being able to defer a computation
until it's required. Logical expression operators and, or, and if-then-else are all lazy/non
strict. We sometimes call them short-circuit operators because they don't need to evaluate all
arguments to determine the resulting value.
Generator expressions and generator functions are lazy. They don't create all possible results
immediately.
(If the body of a def contains yield, the function automatically becomes a generator function.
yield statement suspends function's execution and sends a value back to the caller, but retains
enough state to enable the function to resume where it left off.)
"""
def numbers():
for i in range(1024):
yield i
def sum_to(num: int) -> int:
total: int = 0
for i in numbers():
if i == num:
break # does not evaluate the entire result of numbers()
total += i
return total
assert sum_to(5) == 10
# Recursion instead of an explicit loop state
def test_linear_search_imperative():
def linear_search_imperative(alist, element):
for elem in alist:
if elem == element:
return True
return False
assert not linear_search_imperative([1, 2, 3, 5, 8], 4)
assert linear_search_imperative([1, 2, 3, 5, 8], 5)
def test_linear_search_recursive():
"""
Functional programs don't rely on loops and the associated overhead of tracking the state of
loops. Instead, functional programs try to rely on recursive functions.
While recursion is often succinct and expressive, we have to be cautious about using it in
Python. There are two problems that can arise:
1. Python imposes a recursion limit (default is 1000) to detect recursive functions with
improperly defined base cases
2. Python does not have a compiler that does TCO (Tail-Call Optimization (TCO) in the compiler
changes them to loops)
We'll often optimize a purely recursive function to use an explicit for loop in a generator
expression.
"""
def linear_search_recursive(alist, element):
if not alist:
return False
if alist[0] == element:
return True
return linear_search_recursive(alist[1:], element)
assert not linear_search_recursive([1, 2, 3, 5, 8], 4)
assert linear_search_recursive([1, 2, 3, 5, 8], 5)
| 38.108974 | 99 | 0.693692 |
b46c6e9f185ee5feec7e97567c280b1ffce0e210 | 22,123 | py | Python | keras/callbacks_v1_test.py | itsraina/keras | 5e9376b5b94b6fb445dd52dbfafbc4e95bff5e35 | [
"Apache-2.0"
] | null | null | null | keras/callbacks_v1_test.py | itsraina/keras | 5e9376b5b94b6fb445dd52dbfafbc4e95bff5e35 | [
"Apache-2.0"
] | null | null | null | keras/callbacks_v1_test.py | itsraina/keras | 5e9376b5b94b6fb445dd52dbfafbc4e95bff5e35 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras callbacks."""
import os
import shutil
import tempfile
import numpy as np
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
from keras import callbacks
from keras import callbacks_v1
from keras import layers
from keras.engine import input_layer
from keras.engine import sequential
from keras.engine import training
from keras.testing_infra import test_combinations
from keras.testing_infra import test_utils
from keras.utils import np_utils
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
NUM_CLASSES = 2
INPUT_DIM = 3
NUM_HIDDEN = 5
BATCH_SIZE = 5
class TestTensorBoardV1(tf.test.TestCase, parameterized.TestCase):
def test_TensorBoard(self):
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = test_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES,
)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(x_train) // BATCH_SIZE
else:
max_batch_index = len(x_test) // BATCH_SIZE
i = 0
while 1:
if train:
yield (
x_train[i * BATCH_SIZE : (i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE : (i + 1) * BATCH_SIZE],
)
else:
yield (
x_test[i * BATCH_SIZE : (i + 1) * BATCH_SIZE],
y_test[i * BATCH_SIZE : (i + 1) * BATCH_SIZE],
)
i += 1
i %= max_batch_index
# case: Sequential
with tf.Graph().as_default(), self.cached_session():
model = sequential.Sequential()
model.add(
layers.Dense(NUM_HIDDEN, input_dim=INPUT_DIM, activation="relu")
)
# non_trainable_weights: moving_variance, moving_mean
model.add(layers.BatchNormalization())
model.add(layers.Dense(NUM_CLASSES, activation="softmax"))
model.compile(
loss="categorical_crossentropy",
optimizer="sgd",
metrics=["accuracy"],
)
tsb = callbacks_v1.TensorBoard(
log_dir=temp_dir,
histogram_freq=1,
write_images=True,
write_grads=True,
batch_size=5,
)
cbks = [tsb]
# fit with validation data
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=3,
verbose=0,
)
# fit with validation data and accuracy
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0,
)
# fit generator with validation data
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
validation_data=(x_test, y_test),
callbacks=cbks,
verbose=0,
)
# fit generator without validation data
# histogram_freq must be zero
tsb.histogram_freq = 0
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
callbacks=cbks,
verbose=0,
)
# fit generator with validation data and accuracy
tsb.histogram_freq = 1
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
validation_data=(x_test, y_test),
callbacks=cbks,
verbose=0,
)
# fit generator without validation data and accuracy
tsb.histogram_freq = 0
model.fit_generator(
data_generator(True), len(x_train), epochs=2, callbacks=cbks
)
assert os.path.exists(temp_dir)
def test_TensorBoard_multi_input_output(self):
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
with tf.Graph().as_default(), self.cached_session():
filepath = os.path.join(tmpdir, "logs")
(x_train, y_train), (x_test, y_test) = test_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES,
)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def data_generator(train):
if train:
max_batch_index = len(x_train) // BATCH_SIZE
else:
max_batch_index = len(x_test) // BATCH_SIZE
i = 0
while 1:
if train:
# simulate multi-input/output models
yield (
[x_train[i * BATCH_SIZE : (i + 1) * BATCH_SIZE]]
* 2,
[y_train[i * BATCH_SIZE : (i + 1) * BATCH_SIZE]]
* 2,
)
else:
yield (
[x_test[i * BATCH_SIZE : (i + 1) * BATCH_SIZE]] * 2,
[y_test[i * BATCH_SIZE : (i + 1) * BATCH_SIZE]] * 2,
)
i += 1
i %= max_batch_index
inp1 = input_layer.Input((INPUT_DIM,))
inp2 = input_layer.Input((INPUT_DIM,))
inp = layers.add([inp1, inp2])
hidden = layers.Dense(2, activation="relu")(inp)
hidden = layers.Dropout(0.1)(hidden)
output1 = layers.Dense(NUM_CLASSES, activation="softmax")(hidden)
output2 = layers.Dense(NUM_CLASSES, activation="softmax")(hidden)
model = training.Model([inp1, inp2], [output1, output2])
model.compile(
loss="categorical_crossentropy",
optimizer="sgd",
metrics=["accuracy"],
)
# we must generate new callbacks for each test, as they aren't
# stateless
def callbacks_factory(histogram_freq):
return [
callbacks_v1.TensorBoard(
log_dir=filepath,
histogram_freq=histogram_freq,
write_images=True,
write_grads=True,
batch_size=5,
)
]
# fit without validation data
model.fit(
[x_train] * 2,
[y_train] * 2,
batch_size=BATCH_SIZE,
callbacks=callbacks_factory(histogram_freq=0),
epochs=3,
)
# fit with validation data and accuracy
model.fit(
[x_train] * 2,
[y_train] * 2,
batch_size=BATCH_SIZE,
validation_data=([x_test] * 2, [y_test] * 2),
callbacks=callbacks_factory(histogram_freq=1),
epochs=2,
)
# fit generator without validation data
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
callbacks=callbacks_factory(histogram_freq=0),
)
# fit generator with validation data and accuracy
model.fit_generator(
data_generator(True),
len(x_train),
epochs=2,
validation_data=([x_test] * 2, [y_test] * 2),
callbacks=callbacks_factory(histogram_freq=1),
)
assert os.path.isdir(filepath)
def test_Tensorboard_histogram_summaries_in_test_function(self):
class FileWriterStub:
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
self.steps_seen = []
def add_summary(self, summary, global_step):
summary_obj = tf.compat.v1.Summary()
# ensure a valid Summary proto is being sent
if isinstance(summary, bytes):
summary_obj.ParseFromString(summary)
else:
assert isinstance(summary, tf.compat.v1.Summary)
summary_obj = summary
# keep track of steps seen for the merged_summary op,
# which contains the histogram summaries
if len(summary_obj.value) > 1:
self.steps_seen.append(global_step)
def flush(self):
pass
def close(self):
pass
def _init_writer(obj, _):
obj.writer = FileWriterStub(obj.log_dir)
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = test_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES,
)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
with tf.Graph().as_default(), self.cached_session():
model = sequential.Sequential()
model.add(
layers.Dense(NUM_HIDDEN, input_dim=INPUT_DIM, activation="relu")
)
# non_trainable_weights: moving_variance, moving_mean
model.add(layers.BatchNormalization())
model.add(layers.Dense(NUM_CLASSES, activation="softmax"))
model.compile(
loss="categorical_crossentropy",
optimizer="sgd",
metrics=["accuracy"],
)
callbacks_v1.TensorBoard._init_writer = _init_writer
tsb = callbacks_v1.TensorBoard(
log_dir=tmpdir,
histogram_freq=1,
write_images=True,
write_grads=True,
batch_size=5,
)
cbks = [tsb]
# fit with validation data
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=3,
verbose=0,
)
self.assertAllEqual(tsb.writer.steps_seen, [0, 1, 2, 3, 4, 5])
def test_Tensorboard_histogram_summaries_with_generator(self):
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
def generator():
x = np.random.randn(10, 100).astype(np.float32)
y = np.random.randn(10, 10).astype(np.float32)
while True:
yield x, y
with tf.Graph().as_default(), self.cached_session():
model = test_utils.get_small_sequential_mlp(
num_hidden=10, num_classes=10, input_dim=100
)
model.compile(
loss="categorical_crossentropy",
optimizer="sgd",
metrics=["accuracy"],
)
tsb = callbacks_v1.TensorBoard(
log_dir=tmpdir,
histogram_freq=1,
write_images=True,
write_grads=True,
batch_size=5,
)
cbks = [tsb]
# fit with validation generator
model.fit_generator(
generator(),
steps_per_epoch=2,
epochs=2,
validation_data=generator(),
validation_steps=2,
callbacks=cbks,
verbose=0,
)
with self.assertRaises(ValueError):
# fit with validation generator but no
# validation_steps
model.fit_generator(
generator(),
steps_per_epoch=2,
epochs=2,
validation_data=generator(),
callbacks=cbks,
verbose=0,
)
self.assertTrue(os.path.exists(tmpdir))
def test_TensorBoard_with_ReduceLROnPlateau(self):
with self.cached_session():
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = test_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES,
)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = test_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN,
num_classes=NUM_CLASSES,
input_dim=INPUT_DIM,
)
model.compile(
loss="binary_crossentropy",
optimizer="sgd",
metrics=["accuracy"],
)
cbks = [
callbacks.ReduceLROnPlateau(
monitor="val_loss", factor=0.5, patience=4, verbose=1
),
callbacks_v1.TensorBoard(log_dir=temp_dir),
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0,
)
assert os.path.exists(temp_dir)
def test_Tensorboard_batch_logging(self):
class FileWriterStub:
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
self.batches_logged = []
self.summary_values = []
self.summary_tags = []
def add_summary(self, summary, step):
self.summary_values.append(summary.value[0].simple_value)
self.summary_tags.append(summary.value[0].tag)
self.batches_logged.append(step)
def flush(self):
pass
def close(self):
pass
with tf.Graph().as_default():
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
tb_cbk = callbacks_v1.TensorBoard(temp_dir, update_freq="batch")
tb_cbk.writer = FileWriterStub(temp_dir)
for batch in range(5):
tb_cbk.on_batch_end(batch, {"acc": batch})
self.assertEqual(tb_cbk.writer.batches_logged, [0, 1, 2, 3, 4])
self.assertEqual(
tb_cbk.writer.summary_values, [0.0, 1.0, 2.0, 3.0, 4.0]
)
self.assertEqual(tb_cbk.writer.summary_tags, ["batch_acc"] * 5)
def test_Tensorboard_epoch_and_batch_logging(self):
class FileWriterStub:
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
def add_summary(self, summary, step):
if "batch_" in summary.value[0].tag:
self.batch_summary = (step, summary)
elif "epoch_" in summary.value[0].tag:
self.epoch_summary = (step, summary)
def flush(self):
pass
def close(self):
pass
with tf.Graph().as_default():
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
tb_cbk = callbacks_v1.TensorBoard(temp_dir, update_freq="batch")
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {"acc": 5.0})
tb_cbk.on_train_end()
batch_step, batch_summary = tb_cbk.writer.batch_summary
self.assertEqual(batch_step, 0)
self.assertEqual(batch_summary.value[0].simple_value, 5.0)
tb_cbk = callbacks_v1.TensorBoard(temp_dir, update_freq="epoch")
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_epoch_end(0, {"acc": 10.0})
tb_cbk.on_train_end()
epoch_step, epoch_summary = tb_cbk.writer.epoch_summary
self.assertEqual(epoch_step, 0)
self.assertEqual(epoch_summary.value[0].simple_value, 10.0)
@test_combinations.generate(
test_combinations.combine(mode=["graph", "eager"])
)
def test_Tensorboard_eager(self):
temp_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
(x_train, y_train), (x_test, y_test) = test_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES,
)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = test_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM
)
model.compile(
loss="binary_crossentropy",
optimizer=tf.compat.v1.train.AdamOptimizer(0.01),
metrics=["accuracy"],
)
cbks = [callbacks_v1.TensorBoard(log_dir=temp_dir)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0,
)
self.assertTrue(os.path.exists(temp_dir))
def test_TensorBoard_update_freq(self):
class FileWriterStub:
def __init__(self, logdir, graph=None):
self.logdir = logdir
self.graph = graph
self.batch_summaries = []
self.epoch_summaries = []
def add_summary(self, summary, step):
if "batch_" in summary.value[0].tag:
self.batch_summaries.append((step, summary))
elif "epoch_" in summary.value[0].tag:
self.epoch_summaries.append((step, summary))
def flush(self):
pass
def close(self):
pass
with tf.Graph().as_default():
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
# Epoch mode
tb_cbk = callbacks_v1.TensorBoard(temp_dir, update_freq="epoch")
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {"acc": 5.0, "size": 1})
self.assertEqual(tb_cbk.writer.batch_summaries, [])
tb_cbk.on_epoch_end(0, {"acc": 10.0, "size": 1})
self.assertLen(tb_cbk.writer.epoch_summaries, 1)
tb_cbk.on_train_end()
# Batch mode
tb_cbk = callbacks_v1.TensorBoard(temp_dir, update_freq="batch")
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {"acc": 5.0, "size": 1})
self.assertLen(tb_cbk.writer.batch_summaries, 1)
tb_cbk.on_batch_end(0, {"acc": 5.0, "size": 1})
self.assertLen(tb_cbk.writer.batch_summaries, 2)
self.assertFalse(tb_cbk.writer.epoch_summaries)
tb_cbk.on_train_end()
# Integer mode
tb_cbk = callbacks_v1.TensorBoard(temp_dir, update_freq=20)
tb_cbk.writer = FileWriterStub(temp_dir)
tb_cbk.on_batch_end(0, {"acc": 5.0, "size": 10})
self.assertFalse(tb_cbk.writer.batch_summaries)
tb_cbk.on_batch_end(0, {"acc": 5.0, "size": 10})
self.assertLen(tb_cbk.writer.batch_summaries, 1)
tb_cbk.on_batch_end(0, {"acc": 5.0, "size": 10})
self.assertLen(tb_cbk.writer.batch_summaries, 1)
tb_cbk.on_batch_end(0, {"acc": 5.0, "size": 10})
self.assertLen(tb_cbk.writer.batch_summaries, 2)
tb_cbk.on_batch_end(0, {"acc": 10.0, "size": 10})
self.assertLen(tb_cbk.writer.batch_summaries, 2)
self.assertFalse(tb_cbk.writer.epoch_summaries)
tb_cbk.on_train_end()
if __name__ == "__main__":
tf.test.main()
| 35.567524 | 80 | 0.531076 |
64b3abfc411dcbb56011f91ca095bdfdcf613afb | 21,710 | py | Python | CryptoAttacks/Block/gcm.py | akbarszcz/CryptoAttacks | ae675d016b314414a3dc9b23c7d8a32da4c62457 | [
"MIT"
] | 54 | 2017-03-28T23:46:58.000Z | 2022-02-23T01:53:38.000Z | CryptoAttacks/Block/gcm.py | maximmasiutin/CryptoAttacks | d1d47d3cb2ce38738a60b728bc35ce80bfe64374 | [
"MIT"
] | null | null | null | CryptoAttacks/Block/gcm.py | maximmasiutin/CryptoAttacks | d1d47d3cb2ce38738a60b728bc35ce80bfe64374 | [
"MIT"
] | 13 | 2017-03-31T06:07:23.000Z | 2021-11-20T19:01:30.000Z | from __future__ import absolute_import, division, print_function
from builtins import bytes, range, int
import math
from itertools import combinations
try:
from itertools import zip_longest
except:
from itertools import izip_longest as zip_longest
from random import randint
from copy import deepcopy
from Crypto.Cipher import AES
from CryptoAttacks.Utils import random_bytes, xor, i2b, b2i, b2h, log
from CryptoAttacks.Math import egcd, invmod
def deg(n):
"""Find degree of polynomial
Args:
n(Polynomial_128/list/int)
Returns:
int
"""
if isinstance(n, Polynomial_128):
n = n.coefficients
if type(n) == list:
for d in reversed(range(len(n))):
if n[d].to_int() != 0:
return d
return -1
else:
if n == 0:
return -1
return int(math.floor(math.log(n, 2) + 1))
class Polynomial_2():
"""Polynomial with coefficients in GF(2)"""
def __init__(self, coefficients):
"""x^3 + x + 1 == 0b1101 == [3, 1, 0]"""
self.coefficients = Polynomial_2.convert_coefficients(coefficients)
@staticmethod
def convert_coefficients(coefficients):
if type(coefficients) == list:
coefficients = Polynomial_2.list_to_int(coefficients)
elif type(coefficients) == bytes:
# reverse bit order
coefficients = int(''.join(map(lambda x: '{:08b}'.format(x), coefficients))[::-1], 2)
elif isinstance(coefficients, int):
pass
else:
raise ValueError("Bad coefficients: {} ({})".format(coefficients, type(coefficients)))
return coefficients
@staticmethod
def egcd(a, b):
"""Extended Euclidean algorithm"""
a, b = map(Polynomial_2, [a, b])
s0, t0, s1, t1 = map(Polynomial_2, [1, 0, 0, 1])
while b.coefficients:
q, a, b = a/b, b, a%b
s0, s1 = s1, s0 - q*s1
t0, t1 = t1, t0 - q*t1
return a, s0, t0
@staticmethod
def list_to_int(coefficients):
result = 0
for coef in coefficients:
result |= 1<<coef
return result
def __str__(self):
return self.to_poly()
def __getitem__(self, no):
if not isinstance(no, int):
return 'No must be a number'
if no < 0 or no > self.to_bits():
return 'Bad no'
return int(self.to_bits()[no])
def to_bits(self):
return '{:b}'.format(self.coefficients)[::-1]
def to_int(self):
return self.coefficients
def to_poly(self):
if self.coefficients == 0:
return '0'
result = ''
for i, coef in enumerate(self.to_bits()):
if coef == '1':
result = 'x^{} + '.format(i) + result
return result[:-3]
def to_list(self):
return list(map(int, list(self.to_bits())))
def __add__(self, other):
return Polynomial_2(self.coefficients ^ other.coefficients)
def __sub__(self, other):
return self + other
def __mul__(self, other):
if isinstance(other, int):
other = Polynomial_2(other)
p = 0
a = self.coefficients
b = other.coefficients
while a > 0:
if a & 1:
p = p ^ b
a = a >> 1
b = b << 1
return Polynomial_2(p)
def __rmul__(self, other):
return self.__mul__(other)
def __divmod__(self, other):
a = self.coefficients
b = other.coefficients
q, r = 0, a
while deg(r) >= deg(b):
d = deg(r) - deg(b)
q = q ^ (1 << d)
r = r ^ (b << d)
return Polynomial_2(q), Polynomial_2(r)
def __mod__(self, other):
return self.__divmod__(other)[1]
def __div__(self, other):
return self.__divmod__(other)[0]
def __floordiv__(self, other):
return self.__div__(other)
def __truediv__(self, other):
return self.__div__(other)
def __pow__(self, y):
p = Polynomial_2(1)
b = Polynomial_2(self.coefficients)
while y > 0:
if y & 1:
p *= b
y >>= 1
b *= b
return p
def __eq__(self, other):
return self.coefficients == other.coefficients
def __hash__(self):
return hash(self.to_int())
class GF_2k():
"""GF(2^k) with elements represented as polynomials with coefficients in GF(2)"""
def __init__(self, coefficients, k, modulus):
"""x^3 + x + 1 == 0b1101"""
self.coefficients = Polynomial_2.convert_coefficients(coefficients)
self.k = k
if isinstance(modulus, Polynomial_2):
self.modulus = modulus
else:
self.modulus = Polynomial_2(modulus)
tmp = Polynomial_2(self.coefficients) % self.modulus
self.coefficients = tmp.coefficients
def __str__(self):
return self.to_poly()
def __repr__(self):
return self.__str__()
def __getitem__(self, no):
if not isinstance(no, int):
return 'No must be a number'
if no > self.to_bits():
return 'Bad no'
return int(self.to_bits()[no])
def to_bytes(self):
return bytes(i2b(int(self.to_bits(), 2)).rjust(self.k//8, bytes(b'\x00')))
def to_bits(self):
return '{:b}'.format(self.coefficients).zfill(self.k)[::-1]
def to_int(self):
return self.coefficients
def to_poly(self):
if self.coefficients == 0:
return '0'
result = ''
for i, coef in enumerate(self.to_bits()):
if coef == '1':
result = 'x^{} + '.format(i) + result
return result[:-3]
def to_list(self):
return list(map(int, list(self.to_bits())))
def __add__(self, other):
return GF_2k(self.coefficients ^ other.coefficients, self.k, self.modulus)
def __sub__(self, other):
return self + other
def __mul__(self, other):
if isinstance(other, int):
other = GF_2k(other, self.k, self.modulus)
p = 0
a = self.coefficients
b = other.coefficients
m = self.modulus.coefficients
while a > 0:
if a & 1:
p = p ^ b
a = a >> 1
b = b << 1
if deg(b) == deg(m):
b = b ^ m
return GF_2k(p, self.k, self.modulus)
def invmod(self):
"""Modular inverse. a*invmod(a) == 1 (mod n)"""
d, s, t = Polynomial_2.egcd(self.coefficients, self.modulus.coefficients)
if d.coefficients != 1:
raise ValueError("Modular inverse doesn't exists ({}**(-1) % {})".format(self, self.modulus))
return GF_2k(s.coefficients, self.k, self.modulus)
def __mod__(self, other):
log.error('Modulo not allowed')
return None
# result = Polynomial_2(self.coefficients) % Polynomial_2(other.coefficients)
# return GF_2k(result.coefficients, self.k, self.modulus)
def __div__(self, other):
return self * other.invmod()
def __floordiv__(self, other):
return self.__div__(other)
def __truediv__(self, other):
return self.__div__(other)
def __pow__(self, y):
p = GF_2k(1, self.k, self.modulus)
b = GF_2k(self.coefficients, self.k, self.modulus)
while y > 0:
if y & 1:
p *= b
y >>= 1
b *= b
return p
def __eq__(self, other):
return self.k == other.k and self.coefficients == other.coefficients
def __hash__(self):
return hash(self.to_bytes() + bytes(b'-') + bytes(self.k))
class GF_2k_generator():
"""Helper for generating GF(2^k) with given k and modulus"""
def __init__(self, k, modulus):
self.k = k
self.modulus = modulus
def __call__(self, coefficients):
return GF_2k(coefficients, self.k, self.modulus)
class Polynomial_128():
"""Polynomial with coefficients in GF(2^128)"""
def __init__(self, coefficients):
"""12*x^2 + x + 43 == [GF_2k(43,128,m), GF_2k(1,128,m), GF_2k(12,128,m)]"""
self.coefficients = coefficients
self.k = self.coefficients[0].k
self.modulus = self.coefficients[0].modulus
for no in range(len(self.coefficients)):
if self.coefficients[no].k != self.k:
raise ValueError("Coefficients not consistient: k=={}, coef[{}].k=={}".format(self.k, no, self.coefficients[no].k))
if self.coefficients[no].modulus.coefficients != self.modulus.coefficients:
raise ValueError("Coefficients not consistient: modulus=={}, coef[{}].modulus=={}".format(self.modulus, no, self.coefficients[no].modulus))
def __add__(self, other):
return Polynomial_128(
[a+b for a,b in zip_longest(self.coefficients, other.coefficients,
fillvalue=GF_2k(0,self.k,self.modulus))])
def __sub__(self, other):
return self + other
def __str__(self):
if len(self.coefficients) == 0 or deg(self) < 0:
return '0'
result = ''
for i, coef in enumerate(self.coefficients):
if coef.to_int() != 0:
if coef.to_int() == 1:
result = 'x^{} + '.format(i) + result
else:
result = '{}*x^{} + '.format(coef.to_int(), i) + result
return result[:-3]
def __repr__(self):
return self.__str__()
def __mul__(self, other):
if isinstance(other, GF_2k):
other = Polynomial_128([other])
if self.is_zero() or other.is_zero():
return self.zero_element()
k = deg(self) + 1
l = deg(other) + 1
c = [GF_2k(0,self.k,self.modulus)]*(k+l-1)
for i in range(k):
for j in range(l):
c[i+j] += self.coefficients[i]*other.coefficients[j]
return Polynomial_128(c)
def __divmod__(self, other):
k = deg(self) + 1
l = deg(other) + 1
if k < l:
return Polynomial_128([GF_2k(0,self.k,self.modulus)]), self
t = other.coefficients[l-1].invmod()
r = [a for a in self.coefficients]
q = [GF_2k(0,self.k,self.modulus)]*(k-l+1)
for i in reversed(range(k-l+1)):
q[i] = t*r[i+l-1]
for j in range(l):
r[i+j] -= q[i]*other.coefficients[j]
return Polynomial_128(q), Polynomial_128(r)
def __mod__(self, other):
return self.__divmod__(other)[1]
def __div__(self, other):
if isinstance(other, GF_2k):
return self.__divmod__(Polynomial_128([other]))[0]
else:
return self.__divmod__(other)[0]
def __floordiv__(self, other):
return self.__div__(other)
def __truediv__(self, other):
return self.__div__(other)
def __pow__(self, y):
p = self.one_element()
b = Polynomial_128(self.coefficients)
while y > 0:
if y & 1:
p *= b
y >>= 1
b *= b
return p
def powmod(self, y, m):
p = self.one_element()
b = Polynomial_128(self.coefficients) % m
while y > 0:
if y & 1:
p = (p*b) % m
y >>= 1
b = (b*b) % m
return p
def __eq__(self, other):
return self.k == other.k and self.modulus == other.modulus and\
all([a == b for a,b in zip(self.coefficients, other.coefficients)])
def __hash__(self):
return hash(''.join(map(str,map(hash, self.coefficients))))
def is_zero(self):
return len(self.coefficients) == 0 or deg(self) < 0
def is_one(self):
return deg(self) == 0 and self.coefficients[0].to_int() == 1
def zero_element(self):
"""0*x^0"""
return Polynomial_128([GF_2k(0,self.k,self.modulus)])
def one_element(self):
"""1*x^0"""
return Polynomial_128([GF_2k(1,self.k,self.modulus)])
def element(self):
"""x^1"""
return Polynomial_128([GF_2k(0,self.k,self.modulus), GF_2k(1,self.k,self.modulus)])
def monic(self):
f = deepcopy(self)
if self.coefficients[deg(f)].to_int() != 1:
f /= self.coefficients[deg(f)]
return f
aes_polynomial = GF_2k_generator(128, [128, 7, 2, 1, 0])
def encrypt_ctr(plaintext, key, nonce, block_size=16, initial_value=0):
aes = AES.new(key, AES.MODE_ECB)
key_stream = bytes(b'')
for counter in range(int(math.ceil(len(plaintext)/16.))):
key_stream += bytes(aes.encrypt(nonce + i2b(counter+initial_value, size=8*(block_size-len(nonce)), endian='big')))
return xor(plaintext, key_stream)
def aes_bytes_to_poly_blocks(ciphertext, additional, block_size=16):
"""Convert ciphertext to list of GF(2^128)"""
size_additional = len(additional)*8
size_ciphertext = len(ciphertext)*8
if len(ciphertext) % block_size != 0:
ciphertext += bytes(b'\x00' * (block_size - len(ciphertext)%block_size))
if len(additional) % block_size != 0:
additional += bytes(b'\x00' * (block_size - len(additional)%block_size))
blocks = []
blocks.extend([additional[block_size*i:(block_size*i)+block_size] for i in range(len(additional)//block_size)])
blocks.extend([ciphertext[block_size*i:(block_size*i)+block_size] for i in range(len(ciphertext)//block_size)])
blocks.append(i2b(size_additional, size=(block_size//2)*8, endian='big') + i2b(size_ciphertext, size=(block_size//2)*8, endian='big'))
blocks = list(map(aes_polynomial, blocks))
return blocks
def poly_blocks_to_aes_bytes(blocks, block_size=16):
"""Convert list of GF(2^128) to ciphertext"""
blocks = list(map(lambda x: x.to_bytes(), blocks))
sizes = blocks[-1]
size_additional = b2i(sizes[:block_size//2], endian='big')
size_ciphertext = b2i(sizes[block_size//2:], endian='big')
size_additional_padded = size_additional//8
if size_additional_padded % block_size != 0:
size_additional_padded += 16 - size_additional_padded % block_size
blocks = bytes(b''.join(blocks[:-1]))
additional = blocks[:size_additional//8]
ciphertext = blocks[size_additional_padded:size_additional_padded + size_ciphertext//8]
return ciphertext, additional
def gcm_compute_parts(additional='', key=None, nonce=None, auth_key=None, s=None, plaintext='', ciphertext='', block_size=16):
if nonce is not None and len(nonce) != 12:
log.error('nonce length must be 12')
return None, None, None
if nonce is None or key is None:
if None in (ciphertext, s):
log.error('nonce can\'t be None if ciphertext, auth_key or s is None')
return None, None, None
blocks = []
if auth_key is None:
auth_key = bytes(AES.new(key, AES.MODE_ECB).encrypt(bytes(b'\x00'*block_size)))
h = aes_polynomial(auth_key)
if ciphertext == '':
ciphertext = encrypt_ctr(plaintext, key, nonce, block_size, 2)
size_additional = len(additional)*8
size_ciphertext = len(ciphertext)*8
if len(additional) % block_size != 0:
additional += bytes(b'\x00'*(block_size - len(additional)%block_size))
if len(ciphertext) % block_size != 0:
ciphertext += bytes(b'\x00'*(block_size - len(ciphertext)%block_size))
blocks.extend([additional[block_size*i:(block_size*i)+block_size] for i in range(len(additional)//block_size)])
blocks.extend([ciphertext[block_size*i:(block_size*i)+block_size] for i in range(len(ciphertext)//block_size)])
blocks.append(i2b(size_additional, size=(block_size//2)*8, endian='big') + i2b(size_ciphertext, size=(block_size//2)*8, endian='big'))
blocks = map(aes_polynomial, blocks)
g = aes_polynomial(0)
for b in blocks:
g = g + b
g = g * h
if s is None:
s = bytes(AES.new(key, AES.MODE_ECB).encrypt(nonce+i2b(1, size=4*8, endian='big')))
s = aes_polynomial(s)
t = g + s
return list(blocks), t, s
def gcm_encrypt(plaintext, additional, key, nonce, tag_size=128, block_size=16):
if len(nonce) != 12:
log.error('nonce length must be 12')
return None, None
ciphertext = encrypt_ctr(plaintext, key, nonce, block_size, 2)
_, t, _ = gcm_compute_parts(ciphertext=ciphertext, additional=additional, key=key, nonce=nonce, block_size=16)
return ciphertext, t.to_bytes()[:tag_size//8]
def gcm_verify(tag, ciphertext, additional, key, nonce, tag_size=16, block_size=16):
_, t, _ = gcm_compute_parts(ciphertext=ciphertext, additional=additional, key=key, nonce=nonce, block_size=16)
return t.to_bytes()[:tag_size//8], t.to_bytes()[:tag_size//8] == tag
def compute_s(tag, ciphertext, additional, auth_key):
blocks = aes_bytes_to_poly_blocks(ciphertext, additional)
t = aes_polynomial(tag)
h = aes_polynomial(auth_key)
g = aes_polynomial(0)
for b in blocks:
g = g + b
g = g * h
s = t - g
return s.to_bytes()
def gcm_forge_tag(ciphertext, additional, auth_key, valid_ciphertext, valid_additional, valid_tag, tag_size=128):
s = compute_s(valid_tag, valid_ciphertext, valid_additional, auth_key)
blocks, t, s = gcm_compute_parts(ciphertext=ciphertext, additional=additional, auth_key=auth_key, s=s)
return t.to_bytes()[:tag_size//8]
def derivate(f):
if deg(f) == 0:
return f.zero_element()
return Polynomial_128([c*(i+1) for i,c in enumerate(f.coefficients[1:])])
def pth_root(f, p, w):
return Polynomial_128([pow(a, p**(w-1)) for a in f.coefficients])
def polynomial_gcd(f, g):
if f.is_one():
return f
if g.is_one():
return g
while deg(g) >= 0:
t = g
g = f % g
f = t
return f
def factor_square_free(f):
# make it monic
f = f.monic()
# square free
L = []
s = 1
p = 2
w = f.k
q = p**w
while True:
j = 1
f_derivative = derivate(f)
d = polynomial_gcd(f, f_derivative)
g = f / d
while not g.is_one():
f /= g
h = polynomial_gcd(f, g)
m = g / h
if not m.is_one():
L.append((m, j*s))
g = h
j += 1
if not f.is_one():
f = pth_root(f, p, w)
s = p*s
if f.is_one():
break
return L
def factor_distinct_degree(f):
L_dd = []
h = f.element() % f
k = 0
q = 2**f.k
while not f.is_one():
h = h.powmod(q, f)
k += 1
g = polynomial_gcd(h-h.element(), f)
if not g.is_one():
L_dd.append((g, k))
f /= g
h %= f
return L_dd
def random_polynomial(f):
return Polynomial_128([GF_2k(randint(0, f.k), f.k, f.modulus) for _ in range(deg(f))]) % f
def factor_equal_degree(f, f_degree):
n = deg(f)
r = n / f_degree
S = set([f])
q = 2**f.k
while len(S) < r:
h = random_polynomial(f)
g = polynomial_gcd(h, f)
if g.is_one():
g = h.modpow(((q**f_degree - 1)/3) - 1, f)
S_tmp = set()
for u in S:
if deg(u) == f_degree:
continue
d = polynomial_gcd(g, u)
if d.is_one() or d == u:
S_tmp.add(u)
else:
S_tmp.update(set([d, u / d]))
S = S_tmp
return S
def factor_polynomial(f):
log.debug('factoring {}'.format(f))
factorization = []
L_sf = factor_square_free(f)
for f_sf, power_sf in L_sf:
L_dd = factor_distinct_degree(f_sf)
for f_dd, f_dd_degree in L_dd:
L_ed = factor_equal_degree(f_dd, f_dd_degree)
for f_ed in L_ed:
factorization.append((f_ed, power_sf))
return factorization
def recover_key_repated_nonce(ciphertexts_additionals_tags):
"""Recover authentication key for GCM given ciphertext encrypted with repeated nonce
Sometimes fail (maybe bug in factorization)
Args:
ciphertexts_additionals_tags(list(tuple(bytes))): [(ciphertext, additional_data, auth_tag), ...]
Returns:
set(bytes): candidates for GCM auth key
"""
auth_key_candidates = set()
pair_count = 0
for (ciphertext1, additional1, tag1), (ciphertext2, additional2, tag2) in combinations(ciphertexts_additionals_tags, 2):
pair_count += 1
log.debug('Trying pair no {}'.format(pair_count))
p1 = aes_bytes_to_poly_blocks(ciphertext1, additional1)
t1 = aes_polynomial(tag1)
p1 = Polynomial_128([t1]+p1[::-1]) # first element is x0
p2 = aes_bytes_to_poly_blocks(ciphertext2, additional2)
t2 = aes_polynomial(tag2)
p2 = Polynomial_128([t2]+p2[::-1])
auth_key_candidates_tmp = set()
factorization = factor_polynomial(p1+p2)
for f, f_degree in factorization:
if deg(f) == 1:
log.debug('auth key candidate: {}'.format(f.monic()))
key_candidate = f.monic().coefficients[0].to_bytes()
auth_key_candidates_tmp.add(key_candidate)
if len(auth_key_candidates) > 0:
auth_key_candidates.intersection_update(auth_key_candidates_tmp)
else:
auth_key_candidates = auth_key_candidates_tmp
if len(auth_key_candidates) == 1:
break
log.success('Found {} auth key candidates'.format(len(auth_key_candidates)))
return auth_key_candidates
| 28.305085 | 155 | 0.580147 |
dcfa26e596224bd7534bef1a245e941f9c799733 | 11,341 | py | Python | src/evaluate.py | the-gamecoders/TheGameHub | b60b237a2626defda2240951f14074d13075c3c9 | [
"MIT"
] | null | null | null | src/evaluate.py | the-gamecoders/TheGameHub | b60b237a2626defda2240951f14074d13075c3c9 | [
"MIT"
] | null | null | null | src/evaluate.py | the-gamecoders/TheGameHub | b60b237a2626defda2240951f14074d13075c3c9 | [
"MIT"
] | 1 | 2020-11-15T14:24:43.000Z | 2020-11-15T14:24:43.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'evaluate.ui'
#
# Created by: PyQt5 UI code generator 5.15.1
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMessageBox
import sqlite3
conn=sqlite3.connect('TheGameHub.db')
curs=conn.cursor()
class Ui_MainWindow2(object):
def setupUi(self, MainWindow2):
MainWindow2.setObjectName("MainWindow2")
MainWindow2.resize(668, 483)
font = QtGui.QFont()
font.setPointSize(10)
MainWindow2.setFont(font)
MainWindow2.setStyleSheet("background-color: rgb(178, 222, 255);")
self.centralwidget = QtWidgets.QWidget(MainWindow2)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setContentsMargins(20, 20, 20, 6)
self.gridLayout.setHorizontalSpacing(9)
self.gridLayout.setVerticalSpacing(14)
self.gridLayout.setObjectName("gridLayout")
spacerItem = QtWidgets.QSpacerItem(77, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem, 1, 3, 1, 1)
self.select_match = QtWidgets.QComboBox(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Comic Sans MS")
font.setPointSize(8)
font.setBold(True)
font.setWeight(75)
self.select_match.setFont(font)
self.select_match.setStyleSheet("background-color: rgb(255, 194, 255);")
self.select_match.setObjectName("select_match")
self.select_match.addItem("")
self.select_match.addItem("")
self.gridLayout.addWidget(self.select_match, 1, 4, 1, 3)
self.line = QtWidgets.QFrame(self.centralwidget)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.gridLayout.addWidget(self.line, 2, 0, 1, 7)
self.label_3 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Comic Sans MS")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.gridLayout.addWidget(self.label_3, 3, 0, 1, 1)
self.select_team = QtWidgets.QComboBox(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Comic Sans MS")
font.setBold(True)
font.setWeight(75)
self.select_team.setFont(font)
self.select_team.setStyleSheet("background-color: rgb(255, 194, 255);")
self.select_team.setObjectName("select_team")
self.select_team.addItem("")
self.gridLayout.addWidget(self.select_team, 1, 0, 1, 3)
self.label_2 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Comic Sans MS")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.gridLayout.addWidget(self.label_2, 3, 6, 1, 1)
self.players_list = QtWidgets.QListWidget(self.centralwidget)
self.players_list.setStyleSheet("background-color: rgb(183, 255, 183);")
self.players_list.setObjectName("players_list")
self.gridLayout.addWidget(self.players_list, 4, 0, 1, 3)
self.evaluate_score = QtWidgets.QPushButton(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Comic Sans MS")
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.evaluate_score.setFont(font)
self.evaluate_score.setStyleSheet("background-color:rgb(99, 200, 81);\n"
"")
self.evaluate_score.setObjectName("evaluate_score")
self.gridLayout.addWidget(self.evaluate_score, 4, 3, 1, 3)
self.total_pts = QtWidgets.QLineEdit(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Comic Sans MS")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.total_pts.setFont(font)
self.total_pts.setStyleSheet("background-color: rgb(132, 223, 146);")
self.total_pts.setObjectName("total_pts")
self.gridLayout.addWidget(self.total_pts, 5, 6, 1, 1)
self.pts_list = QtWidgets.QListWidget(self.centralwidget)
self.pts_list.setStyleSheet("background-color: rgb(183, 255, 183);")
self.pts_list.setObjectName("pts_list")
self.gridLayout.addWidget(self.pts_list, 4, 6, 1, 1)
spacerItem1 = QtWidgets.QSpacerItem(165, 22, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem1, 5, 0, 1, 2)
self.label_4 = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Comic Sans MS")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.label_4.setFont(font)
self.label_4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_4.setObjectName("label_4")
self.gridLayout.addWidget(self.label_4, 5, 2, 1, 3)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem2, 0, 0, 1, 1)
self.label = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily("Comic Sans MS")
font.setPointSize(12)
font.setBold(True)
font.setUnderline(False)
font.setWeight(75)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 1, 1, 6)
self.gridLayout.setColumnStretch(0, 5)
self.gridLayout.setColumnStretch(1, 5)
self.gridLayout.setColumnStretch(2, 5)
self.gridLayout.setColumnStretch(3, 2)
MainWindow2.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow2)
self.statusbar.setObjectName("statusbar")
MainWindow2.setStatusBar(self.statusbar)
self.get_players()
self.select_team.activated.connect(self.getTeam)
self.evaluate_score.clicked.connect(self.score)
self.retranslateUi(MainWindow2)
QtCore.QMetaObject.connectSlotsByName(MainWindow2)
def retranslateUi(self, MainWindow2):
_translate = QtCore.QCoreApplication.translate
MainWindow2.setWindowTitle(_translate("MainWindow2", "Evaluate Team"))
self.select_match.setItemText(0, _translate("MainWindow2", "Select Match"))
self.select_match.setItemText(1, _translate("MainWindow2", "Match 1"))
self.label_3.setText(_translate("MainWindow2", "Players"))
self.select_team.setItemText(0, _translate("MainWindow2", "Select Team"))
self.label_2.setText(_translate("MainWindow2", "Score"))
self.evaluate_score.setText(_translate("MainWindow2", "Calculate Score"))
self.label_4.setText(_translate("MainWindow2", "Total Score:"))
self.label.setText(_translate("MainWindow2", "Evaluate the performance of your team here:"))
def string_to_list(self,string):
items = []
item = ""
itemExpected = True
for char in string[1:]:
if itemExpected and char not in ["]", ",", "[", "'"]:
item += char
elif char in [",", "[", "]"]:
itemExpected = True
items.append(item)
item = ""
i=1
for txt in items[1:]:
items[i]=txt[1:]
i+=1
if i==12:break
return items
def get_players(self):
curs.execute("SELECT * from Teams")
rec=curs.fetchall()
l=len(rec)
for i in range(0,l):
row=rec[i]
team=row[0]
self.select_team.addItem(team)
def getTeam(self):
self.total_pts.setText("")
self.players_list.clear()
self.pts_list.clear()
txt=self.select_team.currentText()
if txt != "Select Team":
curs.execute("SELECT Players from Teams WHERE Name='"+txt+"'")
row=curs.fetchone()
selected = self.string_to_list(row[0])
self.players_list.addItems(selected)
else:
msg = QMessageBox()
msg.setWindowTitle("The Game Hub")
msg.setText("Invalid Selection.")
msg.setIcon(QMessageBox.Warning)
x=msg.exec()
def score(self):
txt=self.select_team.currentText()
self.pts_list.clear()
total=int()
for i in range(self.players_list.count()):
player=self.players_list.item(i).text()
cur = conn.execute('SELECT * FROM Match WHERE Player="{}";'.format(player))
row = cur.fetchone()
runs_score=row[1]
balls_faced=row[2]
fours=row[3]
sixes=row[4]
balls_delivered=row[5]
overs=int(balls_delivered/6)
runs_given=row[7]
wkts=row[8]
catches=row[9]
stumping=row[10]
run_out=row[11]
batscore=int(runs_score/2)
if runs_score>=50 and runs_score<100: batscore+=5
if runs_score>=100:batscore+=10
if runs_score>0:
sr=float(runs_score*100/balls_faced)
if sr>=80 and sr<100:batscore+=2
if sr>=100:batscore+=4
batscore+=fours
batscore+=2*sixes
bowlscore=int(wkts*10)
if wkts>=3:bowlscore+=5
if wkts>=5:bowlscore+=10
if balls_delivered>0:
er=float(runs_given/overs)
if er<=2.0:bowlscore+=10
elif er>2.0 and er<=3.5:bowlscore+=7
elif er > 3.5 and er <= 4.5:bowlscore+=4
points=batscore+bowlscore
points+=10*catches
points+=10*stumping
points+=10*run_out
self.pts_list.addItem(str(points))
total+=points
self.total_pts.setText(str(total))
curs.execute('UPDATE Teams SET Score = "{}" WHERE Name = "{}";'.format(total, txt))
conn.commit()
msg2 = QMessageBox()
msg2.setWindowTitle("The Game Hub")
msg2.setText("Your Team has scored '{}' points.".format(total))
msg2.setIcon(QMessageBox.Information)
z=msg2.exec()
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow2 = QtWidgets.QMainWindow()
ui = Ui_MainWindow2()
ui.setupUi(MainWindow2)
MainWindow2.show()
sys.exit(app.exec_())
| 42.796226 | 117 | 0.608853 |
fc82bb27a5dbb2bd65ede07302d5f8f551af58ae | 93,329 | py | Python | crabageprediction/venv/Lib/site-packages/fontTools/cffLib/__init__.py | 13rianlucero/CrabAgePrediction | 92bc7fbe1040f49e820473e33cc3902a5a7177c7 | [
"MIT"
] | null | null | null | crabageprediction/venv/Lib/site-packages/fontTools/cffLib/__init__.py | 13rianlucero/CrabAgePrediction | 92bc7fbe1040f49e820473e33cc3902a5a7177c7 | [
"MIT"
] | null | null | null | crabageprediction/venv/Lib/site-packages/fontTools/cffLib/__init__.py | 13rianlucero/CrabAgePrediction | 92bc7fbe1040f49e820473e33cc3902a5a7177c7 | [
"MIT"
] | null | null | null | """cffLib: read/write Adobe CFF fonts
OpenType fonts with PostScript outlines contain a completely independent
font file, Adobe's *Compact Font Format*. So dealing with OpenType fonts
requires also dealing with CFF. This module allows you to read and write
fonts written in the CFF format.
In 2016, OpenType 1.8 introduced the `CFF2 <https://docs.microsoft.com/en-us/typography/opentype/spec/cff2>`_
format which, along with other changes, extended the CFF format to deal with
the demands of variable fonts. This module parses both original CFF and CFF2.
"""
from fontTools.misc import sstruct
from fontTools.misc import psCharStrings
from fontTools.misc.arrayTools import unionRect, intRect
from fontTools.misc.textTools import bytechr, byteord, bytesjoin, tobytes, tostr, safeEval
from fontTools.ttLib import TTFont
from fontTools.ttLib.tables.otBase import OTTableWriter
from fontTools.ttLib.tables.otBase import OTTableReader
from fontTools.ttLib.tables import otTables as ot
from io import BytesIO
import struct
import logging
import re
# mute cffLib debug messages when running ttx in verbose mode
DEBUG = logging.DEBUG - 1
log = logging.getLogger(__name__)
cffHeaderFormat = """
major: B
minor: B
hdrSize: B
"""
maxStackLimit = 513
# maxstack operator has been deprecated. max stack is now always 513.
class StopHintCountEvent(Exception):
pass
class _DesubroutinizingT2Decompiler(psCharStrings.SimpleT2Decompiler):
stop_hintcount_ops = ("op_hintmask", "op_cntrmask", "op_rmoveto", "op_hmoveto",
"op_vmoveto")
def __init__(self, localSubrs, globalSubrs, private=None):
psCharStrings.SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs,
private)
def execute(self, charString):
self.need_hintcount = True # until proven otherwise
for op_name in self.stop_hintcount_ops:
setattr(self, op_name, self.stop_hint_count)
if hasattr(charString, '_desubroutinized'):
# If a charstring has already been desubroutinized, we will still
# need to execute it if we need to count hints in order to
# compute the byte length for mask arguments, and haven't finished
# counting hints pairs.
if self.need_hintcount and self.callingStack:
try:
psCharStrings.SimpleT2Decompiler.execute(self, charString)
except StopHintCountEvent:
del self.callingStack[-1]
return
charString._patches = []
psCharStrings.SimpleT2Decompiler.execute(self, charString)
desubroutinized = charString.program[:]
for idx, expansion in reversed(charString._patches):
assert idx >= 2
assert desubroutinized[idx - 1] in ['callsubr', 'callgsubr'], desubroutinized[idx - 1]
assert type(desubroutinized[idx - 2]) == int
if expansion[-1] == 'return':
expansion = expansion[:-1]
desubroutinized[idx-2:idx] = expansion
if not self.private.in_cff2:
if 'endchar' in desubroutinized:
# Cut off after first endchar
desubroutinized = desubroutinized[:desubroutinized.index('endchar') + 1]
else:
if not len(desubroutinized) or desubroutinized[-1] != 'return':
desubroutinized.append('return')
charString._desubroutinized = desubroutinized
del charString._patches
def op_callsubr(self, index):
subr = self.localSubrs[self.operandStack[-1]+self.localBias]
psCharStrings.SimpleT2Decompiler.op_callsubr(self, index)
self.processSubr(index, subr)
def op_callgsubr(self, index):
subr = self.globalSubrs[self.operandStack[-1]+self.globalBias]
psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index)
self.processSubr(index, subr)
def stop_hint_count(self, *args):
self.need_hintcount = False
for op_name in self.stop_hintcount_ops:
setattr(self, op_name, None)
cs = self.callingStack[-1]
if hasattr(cs, '_desubroutinized'):
raise StopHintCountEvent()
def op_hintmask(self, index):
psCharStrings.SimpleT2Decompiler.op_hintmask(self, index)
if self.need_hintcount:
self.stop_hint_count()
def processSubr(self, index, subr):
cs = self.callingStack[-1]
if not hasattr(cs, '_desubroutinized'):
cs._patches.append((index, subr._desubroutinized))
class CFFFontSet(object):
"""A CFF font "file" can contain more than one font, although this is
extremely rare (and not allowed within OpenType fonts).
This class is the entry point for parsing a CFF table. To actually
manipulate the data inside the CFF font, you will want to access the
``CFFFontSet``'s :class:`TopDict` object. To do this, a ``CFFFontSet``
object can either be treated as a dictionary (with appropriate
``keys()`` and ``values()`` methods) mapping font names to :class:`TopDict`
objects, or as a list.
.. code:: python
from fontTools import ttLib
tt = ttLib.TTFont("Tests/cffLib/data/LinLibertine_RBI.otf")
tt["CFF "].cff
# <fontTools.cffLib.CFFFontSet object at 0x101e24c90>
tt["CFF "].cff[0] # Here's your actual font data
# <fontTools.cffLib.TopDict object at 0x1020f1fd0>
"""
def decompile(self, file, otFont, isCFF2=None):
"""Parse a binary CFF file into an internal representation. ``file``
should be a file handle object. ``otFont`` is the top-level
:py:class:`fontTools.ttLib.ttFont.TTFont` object containing this CFF file.
If ``isCFF2`` is passed and set to ``True`` or ``False``, then the
library makes an assertion that the CFF header is of the appropriate
version.
"""
self.otFont = otFont
sstruct.unpack(cffHeaderFormat, file.read(3), self)
if isCFF2 is not None:
# called from ttLib: assert 'major' as read from file matches the
# expected version
expected_major = (2 if isCFF2 else 1)
if self.major != expected_major:
raise ValueError(
"Invalid CFF 'major' version: expected %d, found %d" %
(expected_major, self.major))
else:
# use 'major' version from file to determine if isCFF2
assert self.major in (1, 2), "Unknown CFF format"
isCFF2 = self.major == 2
if not isCFF2:
self.offSize = struct.unpack("B", file.read(1))[0]
file.seek(self.hdrSize)
self.fontNames = list(tostr(s) for s in Index(file, isCFF2=isCFF2))
self.topDictIndex = TopDictIndex(file, isCFF2=isCFF2)
self.strings = IndexedStrings(file)
else: # isCFF2
self.topDictSize = struct.unpack(">H", file.read(2))[0]
file.seek(self.hdrSize)
self.fontNames = ["CFF2Font"]
cff2GetGlyphOrder = otFont.getGlyphOrder
# in CFF2, offsetSize is the size of the TopDict data.
self.topDictIndex = TopDictIndex(
file, cff2GetGlyphOrder, self.topDictSize, isCFF2=isCFF2)
self.strings = None
self.GlobalSubrs = GlobalSubrsIndex(file, isCFF2=isCFF2)
self.topDictIndex.strings = self.strings
self.topDictIndex.GlobalSubrs = self.GlobalSubrs
def __len__(self):
return len(self.fontNames)
def keys(self):
return list(self.fontNames)
def values(self):
return self.topDictIndex
def __getitem__(self, nameOrIndex):
""" Return TopDict instance identified by name (str) or index (int
or any object that implements `__index__`).
"""
if hasattr(nameOrIndex, "__index__"):
index = nameOrIndex.__index__()
elif isinstance(nameOrIndex, str):
name = nameOrIndex
try:
index = self.fontNames.index(name)
except ValueError:
raise KeyError(nameOrIndex)
else:
raise TypeError(nameOrIndex)
return self.topDictIndex[index]
def compile(self, file, otFont, isCFF2=None):
"""Write the object back into binary representation onto the given file.
``file`` should be a file handle object. ``otFont`` is the top-level
:py:class:`fontTools.ttLib.ttFont.TTFont` object containing this CFF file.
If ``isCFF2`` is passed and set to ``True`` or ``False``, then the
library makes an assertion that the CFF header is of the appropriate
version.
"""
self.otFont = otFont
if isCFF2 is not None:
# called from ttLib: assert 'major' value matches expected version
expected_major = (2 if isCFF2 else 1)
if self.major != expected_major:
raise ValueError(
"Invalid CFF 'major' version: expected %d, found %d" %
(expected_major, self.major))
else:
# use current 'major' value to determine output format
assert self.major in (1, 2), "Unknown CFF format"
isCFF2 = self.major == 2
if otFont.recalcBBoxes and not isCFF2:
for topDict in self.topDictIndex:
topDict.recalcFontBBox()
if not isCFF2:
strings = IndexedStrings()
else:
strings = None
writer = CFFWriter(isCFF2)
topCompiler = self.topDictIndex.getCompiler(strings, self, isCFF2=isCFF2)
if isCFF2:
self.hdrSize = 5
writer.add(sstruct.pack(cffHeaderFormat, self))
# Note: topDictSize will most likely change in CFFWriter.toFile().
self.topDictSize = topCompiler.getDataLength()
writer.add(struct.pack(">H", self.topDictSize))
else:
self.hdrSize = 4
self.offSize = 4 # will most likely change in CFFWriter.toFile().
writer.add(sstruct.pack(cffHeaderFormat, self))
writer.add(struct.pack("B", self.offSize))
if not isCFF2:
fontNames = Index()
for name in self.fontNames:
fontNames.append(name)
writer.add(fontNames.getCompiler(strings, self, isCFF2=isCFF2))
writer.add(topCompiler)
if not isCFF2:
writer.add(strings.getCompiler())
writer.add(self.GlobalSubrs.getCompiler(strings, self, isCFF2=isCFF2))
for topDict in self.topDictIndex:
if not hasattr(topDict, "charset") or topDict.charset is None:
charset = otFont.getGlyphOrder()
topDict.charset = charset
children = topCompiler.getChildren(strings)
for child in children:
writer.add(child)
writer.toFile(file)
def toXML(self, xmlWriter):
"""Write the object into XML representation onto the given
:class:`fontTools.misc.xmlWriter.XMLWriter`.
.. code:: python
writer = xmlWriter.XMLWriter(sys.stdout)
tt["CFF "].cff.toXML(writer)
"""
xmlWriter.simpletag("major", value=self.major)
xmlWriter.newline()
xmlWriter.simpletag("minor", value=self.minor)
xmlWriter.newline()
for fontName in self.fontNames:
xmlWriter.begintag("CFFFont", name=tostr(fontName))
xmlWriter.newline()
font = self[fontName]
font.toXML(xmlWriter)
xmlWriter.endtag("CFFFont")
xmlWriter.newline()
xmlWriter.newline()
xmlWriter.begintag("GlobalSubrs")
xmlWriter.newline()
self.GlobalSubrs.toXML(xmlWriter)
xmlWriter.endtag("GlobalSubrs")
xmlWriter.newline()
def fromXML(self, name, attrs, content, otFont=None):
"""Reads data from the XML element into the ``CFFFontSet`` object."""
self.otFont = otFont
# set defaults. These will be replaced if there are entries for them
# in the XML file.
if not hasattr(self, "major"):
self.major = 1
if not hasattr(self, "minor"):
self.minor = 0
if name == "CFFFont":
if self.major == 1:
if not hasattr(self, "offSize"):
# this will be recalculated when the cff is compiled.
self.offSize = 4
if not hasattr(self, "hdrSize"):
self.hdrSize = 4
if not hasattr(self, "GlobalSubrs"):
self.GlobalSubrs = GlobalSubrsIndex()
if not hasattr(self, "fontNames"):
self.fontNames = []
self.topDictIndex = TopDictIndex()
fontName = attrs["name"]
self.fontNames.append(fontName)
topDict = TopDict(GlobalSubrs=self.GlobalSubrs)
topDict.charset = None # gets filled in later
elif self.major == 2:
if not hasattr(self, "hdrSize"):
self.hdrSize = 5
if not hasattr(self, "GlobalSubrs"):
self.GlobalSubrs = GlobalSubrsIndex()
if not hasattr(self, "fontNames"):
self.fontNames = ["CFF2Font"]
cff2GetGlyphOrder = self.otFont.getGlyphOrder
topDict = TopDict(
GlobalSubrs=self.GlobalSubrs,
cff2GetGlyphOrder=cff2GetGlyphOrder)
self.topDictIndex = TopDictIndex(None, cff2GetGlyphOrder)
self.topDictIndex.append(topDict)
for element in content:
if isinstance(element, str):
continue
name, attrs, content = element
topDict.fromXML(name, attrs, content)
if hasattr(topDict, "VarStore") and topDict.FDArray[0].vstore is None:
fdArray = topDict.FDArray
for fontDict in fdArray:
if hasattr(fontDict, "Private"):
fontDict.Private.vstore = topDict.VarStore
elif name == "GlobalSubrs":
subrCharStringClass = psCharStrings.T2CharString
if not hasattr(self, "GlobalSubrs"):
self.GlobalSubrs = GlobalSubrsIndex()
for element in content:
if isinstance(element, str):
continue
name, attrs, content = element
subr = subrCharStringClass()
subr.fromXML(name, attrs, content)
self.GlobalSubrs.append(subr)
elif name == "major":
self.major = int(attrs['value'])
elif name == "minor":
self.minor = int(attrs['value'])
def convertCFFToCFF2(self, otFont):
"""Converts this object from CFF format to CFF2 format. This conversion
is done 'in-place'. The conversion cannot be reversed.
This assumes a decompiled CFF table. (i.e. that the object has been
filled via :meth:`decompile`.)"""
self.major = 2
cff2GetGlyphOrder = self.otFont.getGlyphOrder
topDictData = TopDictIndex(None, cff2GetGlyphOrder)
topDictData.items = self.topDictIndex.items
self.topDictIndex = topDictData
topDict = topDictData[0]
if hasattr(topDict, 'Private'):
privateDict = topDict.Private
else:
privateDict = None
opOrder = buildOrder(topDictOperators2)
topDict.order = opOrder
topDict.cff2GetGlyphOrder = cff2GetGlyphOrder
for entry in topDictOperators:
key = entry[1]
if key not in opOrder:
if key in topDict.rawDict:
del topDict.rawDict[key]
if hasattr(topDict, key):
delattr(topDict, key)
if not hasattr(topDict, "FDArray"):
fdArray = topDict.FDArray = FDArrayIndex()
fdArray.strings = None
fdArray.GlobalSubrs = topDict.GlobalSubrs
topDict.GlobalSubrs.fdArray = fdArray
charStrings = topDict.CharStrings
if charStrings.charStringsAreIndexed:
charStrings.charStringsIndex.fdArray = fdArray
else:
charStrings.fdArray = fdArray
fontDict = FontDict()
fontDict.setCFF2(True)
fdArray.append(fontDict)
fontDict.Private = privateDict
privateOpOrder = buildOrder(privateDictOperators2)
for entry in privateDictOperators:
key = entry[1]
if key not in privateOpOrder:
if key in privateDict.rawDict:
# print "Removing private dict", key
del privateDict.rawDict[key]
if hasattr(privateDict, key):
delattr(privateDict, key)
# print "Removing privateDict attr", key
else:
# clean up the PrivateDicts in the fdArray
fdArray = topDict.FDArray
privateOpOrder = buildOrder(privateDictOperators2)
for fontDict in fdArray:
fontDict.setCFF2(True)
for key in fontDict.rawDict.keys():
if key not in fontDict.order:
del fontDict.rawDict[key]
if hasattr(fontDict, key):
delattr(fontDict, key)
privateDict = fontDict.Private
for entry in privateDictOperators:
key = entry[1]
if key not in privateOpOrder:
if key in privateDict.rawDict:
# print "Removing private dict", key
del privateDict.rawDict[key]
if hasattr(privateDict, key):
delattr(privateDict, key)
# print "Removing privateDict attr", key
# At this point, the Subrs and Charstrings are all still T2Charstring class
# easiest to fix this by compiling, then decompiling again
file = BytesIO()
self.compile(file, otFont, isCFF2=True)
file.seek(0)
self.decompile(file, otFont, isCFF2=True)
def desubroutinize(self):
for fontName in self.fontNames:
font = self[fontName]
cs = font.CharStrings
for g in font.charset:
c, _ = cs.getItemAndSelector(g)
c.decompile()
subrs = getattr(c.private, "Subrs", [])
decompiler = _DesubroutinizingT2Decompiler(subrs, c.globalSubrs, c.private)
decompiler.execute(c)
c.program = c._desubroutinized
del c._desubroutinized
# Delete all the local subrs
if hasattr(font, 'FDArray'):
for fd in font.FDArray:
pd = fd.Private
if hasattr(pd, 'Subrs'):
del pd.Subrs
if 'Subrs' in pd.rawDict:
del pd.rawDict['Subrs']
else:
pd = font.Private
if hasattr(pd, 'Subrs'):
del pd.Subrs
if 'Subrs' in pd.rawDict:
del pd.rawDict['Subrs']
# as well as the global subrs
self.GlobalSubrs.clear()
class CFFWriter(object):
"""Helper class for serializing CFF data to binary. Used by
:meth:`CFFFontSet.compile`."""
def __init__(self, isCFF2):
self.data = []
self.isCFF2 = isCFF2
def add(self, table):
self.data.append(table)
def toFile(self, file):
lastPosList = None
count = 1
while True:
log.log(DEBUG, "CFFWriter.toFile() iteration: %d", count)
count = count + 1
pos = 0
posList = [pos]
for item in self.data:
if hasattr(item, "getDataLength"):
endPos = pos + item.getDataLength()
if isinstance(item, TopDictIndexCompiler) and item.isCFF2:
self.topDictSize = item.getDataLength()
else:
endPos = pos + len(item)
if hasattr(item, "setPos"):
item.setPos(pos, endPos)
pos = endPos
posList.append(pos)
if posList == lastPosList:
break
lastPosList = posList
log.log(DEBUG, "CFFWriter.toFile() writing to file.")
begin = file.tell()
if self.isCFF2:
self.data[1] = struct.pack(">H", self.topDictSize)
else:
self.offSize = calcOffSize(lastPosList[-1])
self.data[1] = struct.pack("B", self.offSize)
posList = [0]
for item in self.data:
if hasattr(item, "toFile"):
item.toFile(file)
else:
file.write(item)
posList.append(file.tell() - begin)
assert posList == lastPosList
def calcOffSize(largestOffset):
if largestOffset < 0x100:
offSize = 1
elif largestOffset < 0x10000:
offSize = 2
elif largestOffset < 0x1000000:
offSize = 3
else:
offSize = 4
return offSize
class IndexCompiler(object):
"""Base class for writing CFF `INDEX data <https://docs.microsoft.com/en-us/typography/opentype/spec/cff2#5-index-data>`_
to binary."""
def __init__(self, items, strings, parent, isCFF2=None):
if isCFF2 is None and hasattr(parent, "isCFF2"):
isCFF2 = parent.isCFF2
assert isCFF2 is not None
self.isCFF2 = isCFF2
self.items = self.getItems(items, strings)
self.parent = parent
def getItems(self, items, strings):
return items
def getOffsets(self):
# An empty INDEX contains only the count field.
if self.items:
pos = 1
offsets = [pos]
for item in self.items:
if hasattr(item, "getDataLength"):
pos = pos + item.getDataLength()
else:
pos = pos + len(item)
offsets.append(pos)
else:
offsets = []
return offsets
def getDataLength(self):
if self.isCFF2:
countSize = 4
else:
countSize = 2
if self.items:
lastOffset = self.getOffsets()[-1]
offSize = calcOffSize(lastOffset)
dataLength = (
countSize + # count
1 + # offSize
(len(self.items) + 1) * offSize + # the offsets
lastOffset - 1 # size of object data
)
else:
# count. For empty INDEX tables, this is the only entry.
dataLength = countSize
return dataLength
def toFile(self, file):
offsets = self.getOffsets()
if self.isCFF2:
writeCard32(file, len(self.items))
else:
writeCard16(file, len(self.items))
# An empty INDEX contains only the count field.
if self.items:
offSize = calcOffSize(offsets[-1])
writeCard8(file, offSize)
offSize = -offSize
pack = struct.pack
for offset in offsets:
binOffset = pack(">l", offset)[offSize:]
assert len(binOffset) == -offSize
file.write(binOffset)
for item in self.items:
if hasattr(item, "toFile"):
item.toFile(file)
else:
data = tobytes(item, encoding="latin1")
file.write(data)
class IndexedStringsCompiler(IndexCompiler):
def getItems(self, items, strings):
return items.strings
class TopDictIndexCompiler(IndexCompiler):
"""Helper class for writing the TopDict to binary."""
def getItems(self, items, strings):
out = []
for item in items:
out.append(item.getCompiler(strings, self))
return out
def getChildren(self, strings):
children = []
for topDict in self.items:
children.extend(topDict.getChildren(strings))
return children
def getOffsets(self):
if self.isCFF2:
offsets = [0, self.items[0].getDataLength()]
return offsets
else:
return super(TopDictIndexCompiler, self).getOffsets()
def getDataLength(self):
if self.isCFF2:
dataLength = self.items[0].getDataLength()
return dataLength
else:
return super(TopDictIndexCompiler, self).getDataLength()
def toFile(self, file):
if self.isCFF2:
self.items[0].toFile(file)
else:
super(TopDictIndexCompiler, self).toFile(file)
class FDArrayIndexCompiler(IndexCompiler):
"""Helper class for writing the
`Font DICT INDEX <https://docs.microsoft.com/en-us/typography/opentype/spec/cff2#10-font-dict-index-font-dicts-and-fdselect>`_
to binary."""
def getItems(self, items, strings):
out = []
for item in items:
out.append(item.getCompiler(strings, self))
return out
def getChildren(self, strings):
children = []
for fontDict in self.items:
children.extend(fontDict.getChildren(strings))
return children
def toFile(self, file):
offsets = self.getOffsets()
if self.isCFF2:
writeCard32(file, len(self.items))
else:
writeCard16(file, len(self.items))
offSize = calcOffSize(offsets[-1])
writeCard8(file, offSize)
offSize = -offSize
pack = struct.pack
for offset in offsets:
binOffset = pack(">l", offset)[offSize:]
assert len(binOffset) == -offSize
file.write(binOffset)
for item in self.items:
if hasattr(item, "toFile"):
item.toFile(file)
else:
file.write(item)
def setPos(self, pos, endPos):
self.parent.rawDict["FDArray"] = pos
class GlobalSubrsCompiler(IndexCompiler):
"""Helper class for writing the `global subroutine INDEX <https://docs.microsoft.com/en-us/typography/opentype/spec/cff2#9-local-and-global-subr-indexes>`_
to binary."""
def getItems(self, items, strings):
out = []
for cs in items:
cs.compile(self.isCFF2)
out.append(cs.bytecode)
return out
class SubrsCompiler(GlobalSubrsCompiler):
"""Helper class for writing the `local subroutine INDEX <https://docs.microsoft.com/en-us/typography/opentype/spec/cff2#9-local-and-global-subr-indexes>`_
to binary."""
def setPos(self, pos, endPos):
offset = pos - self.parent.pos
self.parent.rawDict["Subrs"] = offset
class CharStringsCompiler(GlobalSubrsCompiler):
"""Helper class for writing the `CharStrings INDEX <https://docs.microsoft.com/en-us/typography/opentype/spec/cff2#9-local-and-global-subr-indexes>`_
to binary."""
def getItems(self, items, strings):
out = []
for cs in items:
cs.compile(self.isCFF2)
out.append(cs.bytecode)
return out
def setPos(self, pos, endPos):
self.parent.rawDict["CharStrings"] = pos
class Index(object):
"""This class represents what the CFF spec calls an INDEX (an array of
variable-sized objects). `Index` items can be addressed and set using
Python list indexing."""
compilerClass = IndexCompiler
def __init__(self, file=None, isCFF2=None):
assert (isCFF2 is None) == (file is None)
self.items = []
name = self.__class__.__name__
if file is None:
return
self._isCFF2 = isCFF2
log.log(DEBUG, "loading %s at %s", name, file.tell())
self.file = file
if isCFF2:
count = readCard32(file)
else:
count = readCard16(file)
if count == 0:
return
self.items = [None] * count
offSize = readCard8(file)
log.log(DEBUG, " index count: %s offSize: %s", count, offSize)
assert offSize <= 4, "offSize too large: %s" % offSize
self.offsets = offsets = []
pad = b'\0' * (4 - offSize)
for index in range(count + 1):
chunk = file.read(offSize)
chunk = pad + chunk
offset, = struct.unpack(">L", chunk)
offsets.append(int(offset))
self.offsetBase = file.tell() - 1
file.seek(self.offsetBase + offsets[-1]) # pretend we've read the whole lot
log.log(DEBUG, " end of %s at %s", name, file.tell())
def __len__(self):
return len(self.items)
def __getitem__(self, index):
item = self.items[index]
if item is not None:
return item
offset = self.offsets[index] + self.offsetBase
size = self.offsets[index + 1] - self.offsets[index]
file = self.file
file.seek(offset)
data = file.read(size)
assert len(data) == size
item = self.produceItem(index, data, file, offset)
self.items[index] = item
return item
def __setitem__(self, index, item):
self.items[index] = item
def produceItem(self, index, data, file, offset):
return data
def append(self, item):
"""Add an item to an INDEX."""
self.items.append(item)
def getCompiler(self, strings, parent, isCFF2=None):
return self.compilerClass(self, strings, parent, isCFF2=isCFF2)
def clear(self):
"""Empty the INDEX."""
del self.items[:]
class GlobalSubrsIndex(Index):
"""This index contains all the global subroutines in the font. A global
subroutine is a set of ``CharString`` data which is accessible to any
glyph in the font, and are used to store repeated instructions - for
example, components may be encoded as global subroutines, but so could
hinting instructions.
Remember that when interpreting a ``callgsubr`` instruction (or indeed
a ``callsubr`` instruction) that you will need to add the "subroutine
number bias" to number given:
.. code:: python
tt = ttLib.TTFont("Almendra-Bold.otf")
u = tt["CFF "].cff[0].CharStrings["udieresis"]
u.decompile()
u.toXML(XMLWriter(sys.stdout))
# <some stuff>
# -64 callgsubr <-- Subroutine which implements the dieresis mark
# <other stuff>
tt["CFF "].cff[0].GlobalSubrs[-64] # <-- WRONG
# <T2CharString (bytecode) at 103451d10>
tt["CFF "].cff[0].GlobalSubrs[-64 + 107] # <-- RIGHT
# <T2CharString (source) at 103451390>
("The bias applied depends on the number of subrs (gsubrs). If the number of
subrs (gsubrs) is less than 1240, the bias is 107. Otherwise if it is less
than 33900, it is 1131; otherwise it is 32768.",
`Subroutine Operators <https://docs.microsoft.com/en-us/typography/opentype/otspec180/cff2charstr#section4.4>`)
"""
compilerClass = GlobalSubrsCompiler
subrClass = psCharStrings.T2CharString
charStringClass = psCharStrings.T2CharString
def __init__(self, file=None, globalSubrs=None, private=None,
fdSelect=None, fdArray=None, isCFF2=None):
super(GlobalSubrsIndex, self).__init__(file, isCFF2=isCFF2)
self.globalSubrs = globalSubrs
self.private = private
if fdSelect:
self.fdSelect = fdSelect
if fdArray:
self.fdArray = fdArray
def produceItem(self, index, data, file, offset):
if self.private is not None:
private = self.private
elif hasattr(self, 'fdArray') and self.fdArray is not None:
if hasattr(self, 'fdSelect') and self.fdSelect is not None:
fdIndex = self.fdSelect[index]
else:
fdIndex = 0
private = self.fdArray[fdIndex].Private
else:
private = None
return self.subrClass(data, private=private, globalSubrs=self.globalSubrs)
def toXML(self, xmlWriter):
"""Write the subroutines index into XML representation onto the given
:class:`fontTools.misc.xmlWriter.XMLWriter`.
.. code:: python
writer = xmlWriter.XMLWriter(sys.stdout)
tt["CFF "].cff[0].GlobalSubrs.toXML(writer)
"""
xmlWriter.comment(
"The 'index' attribute is only for humans; "
"it is ignored when parsed.")
xmlWriter.newline()
for i in range(len(self)):
subr = self[i]
if subr.needsDecompilation():
xmlWriter.begintag("CharString", index=i, raw=1)
else:
xmlWriter.begintag("CharString", index=i)
xmlWriter.newline()
subr.toXML(xmlWriter)
xmlWriter.endtag("CharString")
xmlWriter.newline()
def fromXML(self, name, attrs, content):
if name != "CharString":
return
subr = self.subrClass()
subr.fromXML(name, attrs, content)
self.append(subr)
def getItemAndSelector(self, index):
sel = None
if hasattr(self, 'fdSelect'):
sel = self.fdSelect[index]
return self[index], sel
class SubrsIndex(GlobalSubrsIndex):
"""This index contains a glyph's local subroutines. A local subroutine is a
private set of ``CharString`` data which is accessible only to the glyph to
which the index is attached."""
compilerClass = SubrsCompiler
class TopDictIndex(Index):
"""This index represents the array of ``TopDict`` structures in the font
(again, usually only one entry is present). Hence the following calls are
equivalent:
.. code:: python
tt["CFF "].cff[0]
# <fontTools.cffLib.TopDict object at 0x102ed6e50>
tt["CFF "].cff.topDictIndex[0]
# <fontTools.cffLib.TopDict object at 0x102ed6e50>
"""
compilerClass = TopDictIndexCompiler
def __init__(self, file=None, cff2GetGlyphOrder=None, topSize=0,
isCFF2=None):
assert (isCFF2 is None) == (file is None)
self.cff2GetGlyphOrder = cff2GetGlyphOrder
if file is not None and isCFF2:
self._isCFF2 = isCFF2
self.items = []
name = self.__class__.__name__
log.log(DEBUG, "loading %s at %s", name, file.tell())
self.file = file
count = 1
self.items = [None] * count
self.offsets = [0, topSize]
self.offsetBase = file.tell()
# pretend we've read the whole lot
file.seek(self.offsetBase + topSize)
log.log(DEBUG, " end of %s at %s", name, file.tell())
else:
super(TopDictIndex, self).__init__(file, isCFF2=isCFF2)
def produceItem(self, index, data, file, offset):
top = TopDict(
self.strings, file, offset, self.GlobalSubrs,
self.cff2GetGlyphOrder, isCFF2=self._isCFF2)
top.decompile(data)
return top
def toXML(self, xmlWriter):
for i in range(len(self)):
xmlWriter.begintag("FontDict", index=i)
xmlWriter.newline()
self[i].toXML(xmlWriter)
xmlWriter.endtag("FontDict")
xmlWriter.newline()
class FDArrayIndex(Index):
compilerClass = FDArrayIndexCompiler
def toXML(self, xmlWriter):
for i in range(len(self)):
xmlWriter.begintag("FontDict", index=i)
xmlWriter.newline()
self[i].toXML(xmlWriter)
xmlWriter.endtag("FontDict")
xmlWriter.newline()
def produceItem(self, index, data, file, offset):
fontDict = FontDict(
self.strings, file, offset, self.GlobalSubrs, isCFF2=self._isCFF2,
vstore=self.vstore)
fontDict.decompile(data)
return fontDict
def fromXML(self, name, attrs, content):
if name != "FontDict":
return
fontDict = FontDict()
for element in content:
if isinstance(element, str):
continue
name, attrs, content = element
fontDict.fromXML(name, attrs, content)
self.append(fontDict)
class VarStoreData(object):
def __init__(self, file=None, otVarStore=None):
self.file = file
self.data = None
self.otVarStore = otVarStore
self.font = TTFont() # dummy font for the decompile function.
def decompile(self):
if self.file:
# read data in from file. Assume position is correct.
length = readCard16(self.file)
self.data = self.file.read(length)
globalState = {}
reader = OTTableReader(self.data, globalState)
self.otVarStore = ot.VarStore()
self.otVarStore.decompile(reader, self.font)
return self
def compile(self):
writer = OTTableWriter()
self.otVarStore.compile(writer, self.font)
# Note that this omits the initial Card16 length from the CFF2
# VarStore data block
self.data = writer.getAllData()
def writeXML(self, xmlWriter, name):
self.otVarStore.toXML(xmlWriter, self.font)
def xmlRead(self, name, attrs, content, parent):
self.otVarStore = ot.VarStore()
for element in content:
if isinstance(element, tuple):
name, attrs, content = element
self.otVarStore.fromXML(name, attrs, content, self.font)
else:
pass
return None
def __len__(self):
return len(self.data)
def getNumRegions(self, vsIndex):
varData = self.otVarStore.VarData[vsIndex]
numRegions = varData.VarRegionCount
return numRegions
class FDSelect(object):
def __init__(self, file=None, numGlyphs=None, format=None):
if file:
# read data in from file
self.format = readCard8(file)
if self.format == 0:
from array import array
self.gidArray = array("B", file.read(numGlyphs)).tolist()
elif self.format == 3:
gidArray = [None] * numGlyphs
nRanges = readCard16(file)
fd = None
prev = None
for i in range(nRanges):
first = readCard16(file)
if prev is not None:
for glyphID in range(prev, first):
gidArray[glyphID] = fd
prev = first
fd = readCard8(file)
if prev is not None:
first = readCard16(file)
for glyphID in range(prev, first):
gidArray[glyphID] = fd
self.gidArray = gidArray
elif self.format == 4:
gidArray = [None] * numGlyphs
nRanges = readCard32(file)
fd = None
prev = None
for i in range(nRanges):
first = readCard32(file)
if prev is not None:
for glyphID in range(prev, first):
gidArray[glyphID] = fd
prev = first
fd = readCard16(file)
if prev is not None:
first = readCard32(file)
for glyphID in range(prev, first):
gidArray[glyphID] = fd
self.gidArray = gidArray
else:
assert False, "unsupported FDSelect format: %s" % format
else:
# reading from XML. Make empty gidArray, and leave format as passed in.
# format is None will result in the smallest representation being used.
self.format = format
self.gidArray = []
def __len__(self):
return len(self.gidArray)
def __getitem__(self, index):
return self.gidArray[index]
def __setitem__(self, index, fdSelectValue):
self.gidArray[index] = fdSelectValue
def append(self, fdSelectValue):
self.gidArray.append(fdSelectValue)
class CharStrings(object):
"""The ``CharStrings`` in the font represent the instructions for drawing
each glyph. This object presents a dictionary interface to the font's
CharStrings, indexed by glyph name:
.. code:: python
tt["CFF "].cff[0].CharStrings["a"]
# <T2CharString (bytecode) at 103451e90>
See :class:`fontTools.misc.psCharStrings.T1CharString` and
:class:`fontTools.misc.psCharStrings.T2CharString` for how to decompile,
compile and interpret the glyph drawing instructions in the returned objects.
"""
def __init__(self, file, charset, globalSubrs, private, fdSelect, fdArray,
isCFF2=None):
self.globalSubrs = globalSubrs
if file is not None:
self.charStringsIndex = SubrsIndex(
file, globalSubrs, private, fdSelect, fdArray, isCFF2=isCFF2)
self.charStrings = charStrings = {}
for i in range(len(charset)):
charStrings[charset[i]] = i
# read from OTF file: charStrings.values() are indices into
# charStringsIndex.
self.charStringsAreIndexed = 1
else:
self.charStrings = {}
# read from ttx file: charStrings.values() are actual charstrings
self.charStringsAreIndexed = 0
self.private = private
if fdSelect is not None:
self.fdSelect = fdSelect
if fdArray is not None:
self.fdArray = fdArray
def keys(self):
return list(self.charStrings.keys())
def values(self):
if self.charStringsAreIndexed:
return self.charStringsIndex
else:
return list(self.charStrings.values())
def has_key(self, name):
return name in self.charStrings
__contains__ = has_key
def __len__(self):
return len(self.charStrings)
def __getitem__(self, name):
charString = self.charStrings[name]
if self.charStringsAreIndexed:
charString = self.charStringsIndex[charString]
return charString
def __setitem__(self, name, charString):
if self.charStringsAreIndexed:
index = self.charStrings[name]
self.charStringsIndex[index] = charString
else:
self.charStrings[name] = charString
def getItemAndSelector(self, name):
if self.charStringsAreIndexed:
index = self.charStrings[name]
return self.charStringsIndex.getItemAndSelector(index)
else:
if hasattr(self, 'fdArray'):
if hasattr(self, 'fdSelect'):
sel = self.charStrings[name].fdSelectIndex
else:
sel = 0
else:
sel = None
return self.charStrings[name], sel
def toXML(self, xmlWriter):
names = sorted(self.keys())
for name in names:
charStr, fdSelectIndex = self.getItemAndSelector(name)
if charStr.needsDecompilation():
raw = [("raw", 1)]
else:
raw = []
if fdSelectIndex is None:
xmlWriter.begintag("CharString", [('name', name)] + raw)
else:
xmlWriter.begintag(
"CharString",
[('name', name), ('fdSelectIndex', fdSelectIndex)] + raw)
xmlWriter.newline()
charStr.toXML(xmlWriter)
xmlWriter.endtag("CharString")
xmlWriter.newline()
def fromXML(self, name, attrs, content):
for element in content:
if isinstance(element, str):
continue
name, attrs, content = element
if name != "CharString":
continue
fdID = -1
if hasattr(self, "fdArray"):
try:
fdID = safeEval(attrs["fdSelectIndex"])
except KeyError:
fdID = 0
private = self.fdArray[fdID].Private
else:
private = self.private
glyphName = attrs["name"]
charStringClass = psCharStrings.T2CharString
charString = charStringClass(
private=private,
globalSubrs=self.globalSubrs)
charString.fromXML(name, attrs, content)
if fdID >= 0:
charString.fdSelectIndex = fdID
self[glyphName] = charString
def readCard8(file):
return byteord(file.read(1))
def readCard16(file):
value, = struct.unpack(">H", file.read(2))
return value
def readCard32(file):
value, = struct.unpack(">L", file.read(4))
return value
def writeCard8(file, value):
file.write(bytechr(value))
def writeCard16(file, value):
file.write(struct.pack(">H", value))
def writeCard32(file, value):
file.write(struct.pack(">L", value))
def packCard8(value):
return bytechr(value)
def packCard16(value):
return struct.pack(">H", value)
def packCard32(value):
return struct.pack(">L", value)
def buildOperatorDict(table):
d = {}
for op, name, arg, default, conv in table:
d[op] = (name, arg)
return d
def buildOpcodeDict(table):
d = {}
for op, name, arg, default, conv in table:
if isinstance(op, tuple):
op = bytechr(op[0]) + bytechr(op[1])
else:
op = bytechr(op)
d[name] = (op, arg)
return d
def buildOrder(table):
l = []
for op, name, arg, default, conv in table:
l.append(name)
return l
def buildDefaults(table):
d = {}
for op, name, arg, default, conv in table:
if default is not None:
d[name] = default
return d
def buildConverters(table):
d = {}
for op, name, arg, default, conv in table:
d[name] = conv
return d
class SimpleConverter(object):
def read(self, parent, value):
if not hasattr(parent, "file"):
return self._read(parent, value)
file = parent.file
pos = file.tell()
try:
return self._read(parent, value)
finally:
file.seek(pos)
def _read(self, parent, value):
return value
def write(self, parent, value):
return value
def xmlWrite(self, xmlWriter, name, value):
xmlWriter.simpletag(name, value=value)
xmlWriter.newline()
def xmlRead(self, name, attrs, content, parent):
return attrs["value"]
class ASCIIConverter(SimpleConverter):
def _read(self, parent, value):
return tostr(value, encoding='ascii')
def write(self, parent, value):
return tobytes(value, encoding='ascii')
def xmlWrite(self, xmlWriter, name, value):
xmlWriter.simpletag(name, value=tostr(value, encoding="ascii"))
xmlWriter.newline()
def xmlRead(self, name, attrs, content, parent):
return tobytes(attrs["value"], encoding=("ascii"))
class Latin1Converter(SimpleConverter):
def _read(self, parent, value):
return tostr(value, encoding='latin1')
def write(self, parent, value):
return tobytes(value, encoding='latin1')
def xmlWrite(self, xmlWriter, name, value):
value = tostr(value, encoding="latin1")
if name in ['Notice', 'Copyright']:
value = re.sub(r"[\r\n]\s+", " ", value)
xmlWriter.simpletag(name, value=value)
xmlWriter.newline()
def xmlRead(self, name, attrs, content, parent):
return tobytes(attrs["value"], encoding=("latin1"))
def parseNum(s):
try:
value = int(s)
except:
value = float(s)
return value
def parseBlendList(s):
valueList = []
for element in s:
if isinstance(element, str):
continue
name, attrs, content = element
blendList = attrs["value"].split()
blendList = [eval(val) for val in blendList]
valueList.append(blendList)
if len(valueList) == 1:
valueList = valueList[0]
return valueList
class NumberConverter(SimpleConverter):
def xmlWrite(self, xmlWriter, name, value):
if isinstance(value, list):
xmlWriter.begintag(name)
xmlWriter.newline()
xmlWriter.indent()
blendValue = " ".join([str(val) for val in value])
xmlWriter.simpletag(kBlendDictOpName, value=blendValue)
xmlWriter.newline()
xmlWriter.dedent()
xmlWriter.endtag(name)
xmlWriter.newline()
else:
xmlWriter.simpletag(name, value=value)
xmlWriter.newline()
def xmlRead(self, name, attrs, content, parent):
valueString = attrs.get("value", None)
if valueString is None:
value = parseBlendList(content)
else:
value = parseNum(attrs["value"])
return value
class ArrayConverter(SimpleConverter):
def xmlWrite(self, xmlWriter, name, value):
if value and isinstance(value[0], list):
xmlWriter.begintag(name)
xmlWriter.newline()
xmlWriter.indent()
for valueList in value:
blendValue = " ".join([str(val) for val in valueList])
xmlWriter.simpletag(kBlendDictOpName, value=blendValue)
xmlWriter.newline()
xmlWriter.dedent()
xmlWriter.endtag(name)
xmlWriter.newline()
else:
value = " ".join([str(val) for val in value])
xmlWriter.simpletag(name, value=value)
xmlWriter.newline()
def xmlRead(self, name, attrs, content, parent):
valueString = attrs.get("value", None)
if valueString is None:
valueList = parseBlendList(content)
else:
values = valueString.split()
valueList = [parseNum(value) for value in values]
return valueList
class TableConverter(SimpleConverter):
def xmlWrite(self, xmlWriter, name, value):
xmlWriter.begintag(name)
xmlWriter.newline()
value.toXML(xmlWriter)
xmlWriter.endtag(name)
xmlWriter.newline()
def xmlRead(self, name, attrs, content, parent):
ob = self.getClass()()
for element in content:
if isinstance(element, str):
continue
name, attrs, content = element
ob.fromXML(name, attrs, content)
return ob
class PrivateDictConverter(TableConverter):
def getClass(self):
return PrivateDict
def _read(self, parent, value):
size, offset = value
file = parent.file
isCFF2 = parent._isCFF2
try:
vstore = parent.vstore
except AttributeError:
vstore = None
priv = PrivateDict(
parent.strings, file, offset, isCFF2=isCFF2, vstore=vstore)
file.seek(offset)
data = file.read(size)
assert len(data) == size
priv.decompile(data)
return priv
def write(self, parent, value):
return (0, 0) # dummy value
class SubrsConverter(TableConverter):
def getClass(self):
return SubrsIndex
def _read(self, parent, value):
file = parent.file
isCFF2 = parent._isCFF2
file.seek(parent.offset + value) # Offset(self)
return SubrsIndex(file, isCFF2=isCFF2)
def write(self, parent, value):
return 0 # dummy value
class CharStringsConverter(TableConverter):
def _read(self, parent, value):
file = parent.file
isCFF2 = parent._isCFF2
charset = parent.charset
globalSubrs = parent.GlobalSubrs
if hasattr(parent, "FDArray"):
fdArray = parent.FDArray
if hasattr(parent, "FDSelect"):
fdSelect = parent.FDSelect
else:
fdSelect = None
private = None
else:
fdSelect, fdArray = None, None
private = parent.Private
file.seek(value) # Offset(0)
charStrings = CharStrings(
file, charset, globalSubrs, private, fdSelect, fdArray, isCFF2=isCFF2)
return charStrings
def write(self, parent, value):
return 0 # dummy value
def xmlRead(self, name, attrs, content, parent):
if hasattr(parent, "FDArray"):
# if it is a CID-keyed font, then the private Dict is extracted from the
# parent.FDArray
fdArray = parent.FDArray
if hasattr(parent, "FDSelect"):
fdSelect = parent.FDSelect
else:
fdSelect = None
private = None
else:
# if it is a name-keyed font, then the private dict is in the top dict,
# and
# there is no fdArray.
private, fdSelect, fdArray = parent.Private, None, None
charStrings = CharStrings(
None, None, parent.GlobalSubrs, private, fdSelect, fdArray)
charStrings.fromXML(name, attrs, content)
return charStrings
class CharsetConverter(SimpleConverter):
def _read(self, parent, value):
isCID = hasattr(parent, "ROS")
if value > 2:
numGlyphs = parent.numGlyphs
file = parent.file
file.seek(value)
log.log(DEBUG, "loading charset at %s", value)
format = readCard8(file)
if format == 0:
charset = parseCharset0(numGlyphs, file, parent.strings, isCID)
elif format == 1 or format == 2:
charset = parseCharset(numGlyphs, file, parent.strings, isCID, format)
else:
raise NotImplementedError
assert len(charset) == numGlyphs
log.log(DEBUG, " charset end at %s", file.tell())
# make sure glyph names are unique
allNames = {}
newCharset = []
for glyphName in charset:
if glyphName in allNames:
# make up a new glyphName that's unique
n = allNames[glyphName]
while (glyphName + "#" + str(n)) in allNames:
n += 1
allNames[glyphName] = n + 1
glyphName = glyphName + "#" + str(n)
allNames[glyphName] = 1
newCharset.append(glyphName)
charset = newCharset
else: # offset == 0 -> no charset data.
if isCID or "CharStrings" not in parent.rawDict:
# We get here only when processing fontDicts from the FDArray of
# CFF-CID fonts. Only the real topDict references the chrset.
assert value == 0
charset = None
elif value == 0:
charset = cffISOAdobeStrings
elif value == 1:
charset = cffIExpertStrings
elif value == 2:
charset = cffExpertSubsetStrings
if charset and (len(charset) != parent.numGlyphs):
charset = charset[:parent.numGlyphs]
return charset
def write(self, parent, value):
return 0 # dummy value
def xmlWrite(self, xmlWriter, name, value):
# XXX only write charset when not in OT/TTX context, where we
# dump charset as a separate "GlyphOrder" table.
# # xmlWriter.simpletag("charset")
xmlWriter.comment("charset is dumped separately as the 'GlyphOrder' element")
xmlWriter.newline()
def xmlRead(self, name, attrs, content, parent):
pass
class CharsetCompiler(object):
def __init__(self, strings, charset, parent):
assert charset[0] == '.notdef'
isCID = hasattr(parent.dictObj, "ROS")
data0 = packCharset0(charset, isCID, strings)
data = packCharset(charset, isCID, strings)
if len(data) < len(data0):
self.data = data
else:
self.data = data0
self.parent = parent
def setPos(self, pos, endPos):
self.parent.rawDict["charset"] = pos
def getDataLength(self):
return len(self.data)
def toFile(self, file):
file.write(self.data)
def getStdCharSet(charset):
# check to see if we can use a predefined charset value.
predefinedCharSetVal = None
predefinedCharSets = [
(cffISOAdobeStringCount, cffISOAdobeStrings, 0),
(cffExpertStringCount, cffIExpertStrings, 1),
(cffExpertSubsetStringCount, cffExpertSubsetStrings, 2)]
lcs = len(charset)
for cnt, pcs, csv in predefinedCharSets:
if predefinedCharSetVal is not None:
break
if lcs > cnt:
continue
predefinedCharSetVal = csv
for i in range(lcs):
if charset[i] != pcs[i]:
predefinedCharSetVal = None
break
return predefinedCharSetVal
def getCIDfromName(name, strings):
return int(name[3:])
def getSIDfromName(name, strings):
return strings.getSID(name)
def packCharset0(charset, isCID, strings):
fmt = 0
data = [packCard8(fmt)]
if isCID:
getNameID = getCIDfromName
else:
getNameID = getSIDfromName
for name in charset[1:]:
data.append(packCard16(getNameID(name, strings)))
return bytesjoin(data)
def packCharset(charset, isCID, strings):
fmt = 1
ranges = []
first = None
end = 0
if isCID:
getNameID = getCIDfromName
else:
getNameID = getSIDfromName
for name in charset[1:]:
SID = getNameID(name, strings)
if first is None:
first = SID
elif end + 1 != SID:
nLeft = end - first
if nLeft > 255:
fmt = 2
ranges.append((first, nLeft))
first = SID
end = SID
if end:
nLeft = end - first
if nLeft > 255:
fmt = 2
ranges.append((first, nLeft))
data = [packCard8(fmt)]
if fmt == 1:
nLeftFunc = packCard8
else:
nLeftFunc = packCard16
for first, nLeft in ranges:
data.append(packCard16(first) + nLeftFunc(nLeft))
return bytesjoin(data)
def parseCharset0(numGlyphs, file, strings, isCID):
charset = [".notdef"]
if isCID:
for i in range(numGlyphs - 1):
CID = readCard16(file)
charset.append("cid" + str(CID).zfill(5))
else:
for i in range(numGlyphs - 1):
SID = readCard16(file)
charset.append(strings[SID])
return charset
def parseCharset(numGlyphs, file, strings, isCID, fmt):
charset = ['.notdef']
count = 1
if fmt == 1:
nLeftFunc = readCard8
else:
nLeftFunc = readCard16
while count < numGlyphs:
first = readCard16(file)
nLeft = nLeftFunc(file)
if isCID:
for CID in range(first, first + nLeft + 1):
charset.append("cid" + str(CID).zfill(5))
else:
for SID in range(first, first + nLeft + 1):
charset.append(strings[SID])
count = count + nLeft + 1
return charset
class EncodingCompiler(object):
def __init__(self, strings, encoding, parent):
assert not isinstance(encoding, str)
data0 = packEncoding0(parent.dictObj.charset, encoding, parent.strings)
data1 = packEncoding1(parent.dictObj.charset, encoding, parent.strings)
if len(data0) < len(data1):
self.data = data0
else:
self.data = data1
self.parent = parent
def setPos(self, pos, endPos):
self.parent.rawDict["Encoding"] = pos
def getDataLength(self):
return len(self.data)
def toFile(self, file):
file.write(self.data)
class EncodingConverter(SimpleConverter):
def _read(self, parent, value):
if value == 0:
return "StandardEncoding"
elif value == 1:
return "ExpertEncoding"
else:
assert value > 1
file = parent.file
file.seek(value)
log.log(DEBUG, "loading Encoding at %s", value)
fmt = readCard8(file)
haveSupplement = fmt & 0x80
if haveSupplement:
raise NotImplementedError("Encoding supplements are not yet supported")
fmt = fmt & 0x7f
if fmt == 0:
encoding = parseEncoding0(parent.charset, file, haveSupplement,
parent.strings)
elif fmt == 1:
encoding = parseEncoding1(parent.charset, file, haveSupplement,
parent.strings)
return encoding
def write(self, parent, value):
if value == "StandardEncoding":
return 0
elif value == "ExpertEncoding":
return 1
return 0 # dummy value
def xmlWrite(self, xmlWriter, name, value):
if value in ("StandardEncoding", "ExpertEncoding"):
xmlWriter.simpletag(name, name=value)
xmlWriter.newline()
return
xmlWriter.begintag(name)
xmlWriter.newline()
for code in range(len(value)):
glyphName = value[code]
if glyphName != ".notdef":
xmlWriter.simpletag("map", code=hex(code), name=glyphName)
xmlWriter.newline()
xmlWriter.endtag(name)
xmlWriter.newline()
def xmlRead(self, name, attrs, content, parent):
if "name" in attrs:
return attrs["name"]
encoding = [".notdef"] * 256
for element in content:
if isinstance(element, str):
continue
name, attrs, content = element
code = safeEval(attrs["code"])
glyphName = attrs["name"]
encoding[code] = glyphName
return encoding
def parseEncoding0(charset, file, haveSupplement, strings):
nCodes = readCard8(file)
encoding = [".notdef"] * 256
for glyphID in range(1, nCodes + 1):
code = readCard8(file)
if code != 0:
encoding[code] = charset[glyphID]
return encoding
def parseEncoding1(charset, file, haveSupplement, strings):
nRanges = readCard8(file)
encoding = [".notdef"] * 256
glyphID = 1
for i in range(nRanges):
code = readCard8(file)
nLeft = readCard8(file)
for glyphID in range(glyphID, glyphID + nLeft + 1):
encoding[code] = charset[glyphID]
code = code + 1
glyphID = glyphID + 1
return encoding
def packEncoding0(charset, encoding, strings):
fmt = 0
m = {}
for code in range(len(encoding)):
name = encoding[code]
if name != ".notdef":
m[name] = code
codes = []
for name in charset[1:]:
code = m.get(name)
codes.append(code)
while codes and codes[-1] is None:
codes.pop()
data = [packCard8(fmt), packCard8(len(codes))]
for code in codes:
if code is None:
code = 0
data.append(packCard8(code))
return bytesjoin(data)
def packEncoding1(charset, encoding, strings):
fmt = 1
m = {}
for code in range(len(encoding)):
name = encoding[code]
if name != ".notdef":
m[name] = code
ranges = []
first = None
end = 0
for name in charset[1:]:
code = m.get(name, -1)
if first is None:
first = code
elif end + 1 != code:
nLeft = end - first
ranges.append((first, nLeft))
first = code
end = code
nLeft = end - first
ranges.append((first, nLeft))
# remove unencoded glyphs at the end.
while ranges and ranges[-1][0] == -1:
ranges.pop()
data = [packCard8(fmt), packCard8(len(ranges))]
for first, nLeft in ranges:
if first == -1: # unencoded
first = 0
data.append(packCard8(first) + packCard8(nLeft))
return bytesjoin(data)
class FDArrayConverter(TableConverter):
def _read(self, parent, value):
try:
vstore = parent.VarStore
except AttributeError:
vstore = None
file = parent.file
isCFF2 = parent._isCFF2
file.seek(value)
fdArray = FDArrayIndex(file, isCFF2=isCFF2)
fdArray.vstore = vstore
fdArray.strings = parent.strings
fdArray.GlobalSubrs = parent.GlobalSubrs
return fdArray
def write(self, parent, value):
return 0 # dummy value
def xmlRead(self, name, attrs, content, parent):
fdArray = FDArrayIndex()
for element in content:
if isinstance(element, str):
continue
name, attrs, content = element
fdArray.fromXML(name, attrs, content)
return fdArray
class FDSelectConverter(SimpleConverter):
def _read(self, parent, value):
file = parent.file
file.seek(value)
fdSelect = FDSelect(file, parent.numGlyphs)
return fdSelect
def write(self, parent, value):
return 0 # dummy value
# The FDSelect glyph data is written out to XML in the charstring keys,
# so we write out only the format selector
def xmlWrite(self, xmlWriter, name, value):
xmlWriter.simpletag(name, [('format', value.format)])
xmlWriter.newline()
def xmlRead(self, name, attrs, content, parent):
fmt = safeEval(attrs["format"])
file = None
numGlyphs = None
fdSelect = FDSelect(file, numGlyphs, fmt)
return fdSelect
class VarStoreConverter(SimpleConverter):
def _read(self, parent, value):
file = parent.file
file.seek(value)
varStore = VarStoreData(file)
varStore.decompile()
return varStore
def write(self, parent, value):
return 0 # dummy value
def xmlWrite(self, xmlWriter, name, value):
value.writeXML(xmlWriter, name)
def xmlRead(self, name, attrs, content, parent):
varStore = VarStoreData()
varStore.xmlRead(name, attrs, content, parent)
return varStore
def packFDSelect0(fdSelectArray):
fmt = 0
data = [packCard8(fmt)]
for index in fdSelectArray:
data.append(packCard8(index))
return bytesjoin(data)
def packFDSelect3(fdSelectArray):
fmt = 3
fdRanges = []
lenArray = len(fdSelectArray)
lastFDIndex = -1
for i in range(lenArray):
fdIndex = fdSelectArray[i]
if lastFDIndex != fdIndex:
fdRanges.append([i, fdIndex])
lastFDIndex = fdIndex
sentinelGID = i + 1
data = [packCard8(fmt)]
data.append(packCard16(len(fdRanges)))
for fdRange in fdRanges:
data.append(packCard16(fdRange[0]))
data.append(packCard8(fdRange[1]))
data.append(packCard16(sentinelGID))
return bytesjoin(data)
def packFDSelect4(fdSelectArray):
fmt = 4
fdRanges = []
lenArray = len(fdSelectArray)
lastFDIndex = -1
for i in range(lenArray):
fdIndex = fdSelectArray[i]
if lastFDIndex != fdIndex:
fdRanges.append([i, fdIndex])
lastFDIndex = fdIndex
sentinelGID = i + 1
data = [packCard8(fmt)]
data.append(packCard32(len(fdRanges)))
for fdRange in fdRanges:
data.append(packCard32(fdRange[0]))
data.append(packCard16(fdRange[1]))
data.append(packCard32(sentinelGID))
return bytesjoin(data)
class FDSelectCompiler(object):
def __init__(self, fdSelect, parent):
fmt = fdSelect.format
fdSelectArray = fdSelect.gidArray
if fmt == 0:
self.data = packFDSelect0(fdSelectArray)
elif fmt == 3:
self.data = packFDSelect3(fdSelectArray)
elif fmt == 4:
self.data = packFDSelect4(fdSelectArray)
else:
# choose smaller of the two formats
data0 = packFDSelect0(fdSelectArray)
data3 = packFDSelect3(fdSelectArray)
if len(data0) < len(data3):
self.data = data0
fdSelect.format = 0
else:
self.data = data3
fdSelect.format = 3
self.parent = parent
def setPos(self, pos, endPos):
self.parent.rawDict["FDSelect"] = pos
def getDataLength(self):
return len(self.data)
def toFile(self, file):
file.write(self.data)
class VarStoreCompiler(object):
def __init__(self, varStoreData, parent):
self.parent = parent
if not varStoreData.data:
varStoreData.compile()
data = [
packCard16(len(varStoreData.data)),
varStoreData.data
]
self.data = bytesjoin(data)
def setPos(self, pos, endPos):
self.parent.rawDict["VarStore"] = pos
def getDataLength(self):
return len(self.data)
def toFile(self, file):
file.write(self.data)
class ROSConverter(SimpleConverter):
def xmlWrite(self, xmlWriter, name, value):
registry, order, supplement = value
xmlWriter.simpletag(
name,
[
('Registry', tostr(registry)),
('Order', tostr(order)),
('Supplement', supplement)
])
xmlWriter.newline()
def xmlRead(self, name, attrs, content, parent):
return (attrs['Registry'], attrs['Order'], safeEval(attrs['Supplement']))
topDictOperators = [
# opcode name argument type default converter
(25, 'maxstack', 'number', None, None),
((12, 30), 'ROS', ('SID', 'SID', 'number'), None, ROSConverter()),
((12, 20), 'SyntheticBase', 'number', None, None),
(0, 'version', 'SID', None, None),
(1, 'Notice', 'SID', None, Latin1Converter()),
((12, 0), 'Copyright', 'SID', None, Latin1Converter()),
(2, 'FullName', 'SID', None, None),
((12, 38), 'FontName', 'SID', None, None),
(3, 'FamilyName', 'SID', None, None),
(4, 'Weight', 'SID', None, None),
((12, 1), 'isFixedPitch', 'number', 0, None),
((12, 2), 'ItalicAngle', 'number', 0, None),
((12, 3), 'UnderlinePosition', 'number', -100, None),
((12, 4), 'UnderlineThickness', 'number', 50, None),
((12, 5), 'PaintType', 'number', 0, None),
((12, 6), 'CharstringType', 'number', 2, None),
((12, 7), 'FontMatrix', 'array', [0.001, 0, 0, 0.001, 0, 0], None),
(13, 'UniqueID', 'number', None, None),
(5, 'FontBBox', 'array', [0, 0, 0, 0], None),
((12, 8), 'StrokeWidth', 'number', 0, None),
(14, 'XUID', 'array', None, None),
((12, 21), 'PostScript', 'SID', None, None),
((12, 22), 'BaseFontName', 'SID', None, None),
((12, 23), 'BaseFontBlend', 'delta', None, None),
((12, 31), 'CIDFontVersion', 'number', 0, None),
((12, 32), 'CIDFontRevision', 'number', 0, None),
((12, 33), 'CIDFontType', 'number', 0, None),
((12, 34), 'CIDCount', 'number', 8720, None),
(15, 'charset', 'number', None, CharsetConverter()),
((12, 35), 'UIDBase', 'number', None, None),
(16, 'Encoding', 'number', 0, EncodingConverter()),
(18, 'Private', ('number', 'number'), None, PrivateDictConverter()),
((12, 37), 'FDSelect', 'number', None, FDSelectConverter()),
((12, 36), 'FDArray', 'number', None, FDArrayConverter()),
(17, 'CharStrings', 'number', None, CharStringsConverter()),
(24, 'VarStore', 'number', None, VarStoreConverter()),
]
topDictOperators2 = [
# opcode name argument type default converter
(25, 'maxstack', 'number', None, None),
((12, 7), 'FontMatrix', 'array', [0.001, 0, 0, 0.001, 0, 0], None),
((12, 37), 'FDSelect', 'number', None, FDSelectConverter()),
((12, 36), 'FDArray', 'number', None, FDArrayConverter()),
(17, 'CharStrings', 'number', None, CharStringsConverter()),
(24, 'VarStore', 'number', None, VarStoreConverter()),
]
# Note! FDSelect and FDArray must both preceed CharStrings in the output XML build order,
# in order for the font to compile back from xml.
kBlendDictOpName = "blend"
blendOp = 23
privateDictOperators = [
# opcode name argument type default converter
(22, "vsindex", 'number', None, None),
(blendOp, kBlendDictOpName, 'blendList', None, None), # This is for reading to/from XML: it not written to CFF.
(6, 'BlueValues', 'delta', None, None),
(7, 'OtherBlues', 'delta', None, None),
(8, 'FamilyBlues', 'delta', None, None),
(9, 'FamilyOtherBlues', 'delta', None, None),
((12, 9), 'BlueScale', 'number', 0.039625, None),
((12, 10), 'BlueShift', 'number', 7, None),
((12, 11), 'BlueFuzz', 'number', 1, None),
(10, 'StdHW', 'number', None, None),
(11, 'StdVW', 'number', None, None),
((12, 12), 'StemSnapH', 'delta', None, None),
((12, 13), 'StemSnapV', 'delta', None, None),
((12, 14), 'ForceBold', 'number', 0, None),
((12, 15), 'ForceBoldThreshold', 'number', None, None), # deprecated
((12, 16), 'lenIV', 'number', None, None), # deprecated
((12, 17), 'LanguageGroup', 'number', 0, None),
((12, 18), 'ExpansionFactor', 'number', 0.06, None),
((12, 19), 'initialRandomSeed', 'number', 0, None),
(20, 'defaultWidthX', 'number', 0, None),
(21, 'nominalWidthX', 'number', 0, None),
(19, 'Subrs', 'number', None, SubrsConverter()),
]
privateDictOperators2 = [
# opcode name argument type default converter
(22, "vsindex", 'number', None, None),
(blendOp, kBlendDictOpName, 'blendList', None, None), # This is for reading to/from XML: it not written to CFF.
(6, 'BlueValues', 'delta', None, None),
(7, 'OtherBlues', 'delta', None, None),
(8, 'FamilyBlues', 'delta', None, None),
(9, 'FamilyOtherBlues', 'delta', None, None),
((12, 9), 'BlueScale', 'number', 0.039625, None),
((12, 10), 'BlueShift', 'number', 7, None),
((12, 11), 'BlueFuzz', 'number', 1, None),
(10, 'StdHW', 'number', None, None),
(11, 'StdVW', 'number', None, None),
((12, 12), 'StemSnapH', 'delta', None, None),
((12, 13), 'StemSnapV', 'delta', None, None),
((12, 17), 'LanguageGroup', 'number', 0, None),
((12, 18), 'ExpansionFactor', 'number', 0.06, None),
(19, 'Subrs', 'number', None, SubrsConverter()),
]
def addConverters(table):
for i in range(len(table)):
op, name, arg, default, conv = table[i]
if conv is not None:
continue
if arg in ("delta", "array"):
conv = ArrayConverter()
elif arg == "number":
conv = NumberConverter()
elif arg == "SID":
conv = ASCIIConverter()
elif arg == 'blendList':
conv = None
else:
assert False
table[i] = op, name, arg, default, conv
addConverters(privateDictOperators)
addConverters(topDictOperators)
class TopDictDecompiler(psCharStrings.DictDecompiler):
operators = buildOperatorDict(topDictOperators)
class PrivateDictDecompiler(psCharStrings.DictDecompiler):
operators = buildOperatorDict(privateDictOperators)
class DictCompiler(object):
maxBlendStack = 0
def __init__(self, dictObj, strings, parent, isCFF2=None):
if strings:
assert isinstance(strings, IndexedStrings)
if isCFF2 is None and hasattr(parent, "isCFF2"):
isCFF2 = parent.isCFF2
assert isCFF2 is not None
self.isCFF2 = isCFF2
self.dictObj = dictObj
self.strings = strings
self.parent = parent
rawDict = {}
for name in dictObj.order:
value = getattr(dictObj, name, None)
if value is None:
continue
conv = dictObj.converters[name]
value = conv.write(dictObj, value)
if value == dictObj.defaults.get(name):
continue
rawDict[name] = value
self.rawDict = rawDict
def setPos(self, pos, endPos):
pass
def getDataLength(self):
return len(self.compile("getDataLength"))
def compile(self, reason):
log.log(DEBUG, "-- compiling %s for %s", self.__class__.__name__, reason)
rawDict = self.rawDict
data = []
for name in self.dictObj.order:
value = rawDict.get(name)
if value is None:
continue
op, argType = self.opcodes[name]
if isinstance(argType, tuple):
l = len(argType)
assert len(value) == l, "value doesn't match arg type"
for i in range(l):
arg = argType[i]
v = value[i]
arghandler = getattr(self, "arg_" + arg)
data.append(arghandler(v))
else:
arghandler = getattr(self, "arg_" + argType)
data.append(arghandler(value))
data.append(op)
data = bytesjoin(data)
return data
def toFile(self, file):
data = self.compile("toFile")
file.write(data)
def arg_number(self, num):
if isinstance(num, list):
data = [encodeNumber(val) for val in num]
data.append(encodeNumber(1))
data.append(bytechr(blendOp))
datum = bytesjoin(data)
else:
datum = encodeNumber(num)
return datum
def arg_SID(self, s):
return psCharStrings.encodeIntCFF(self.strings.getSID(s))
def arg_array(self, value):
data = []
for num in value:
data.append(self.arg_number(num))
return bytesjoin(data)
def arg_delta(self, value):
if not value:
return b""
val0 = value[0]
if isinstance(val0, list):
data = self.arg_delta_blend(value)
else:
out = []
last = 0
for v in value:
out.append(v - last)
last = v
data = []
for num in out:
data.append(encodeNumber(num))
return bytesjoin(data)
def arg_delta_blend(self, value):
"""A delta list with blend lists has to be *all* blend lists.
The value is a list is arranged as follows::
[
[V0, d0..dn]
[V1, d0..dn]
...
[Vm, d0..dn]
]
``V`` is the absolute coordinate value from the default font, and ``d0-dn``
are the delta values from the *n* regions. Each ``V`` is an absolute
coordinate from the default font.
We want to return a list::
[
[v0, v1..vm]
[d0..dn]
...
[d0..dn]
numBlends
blendOp
]
where each ``v`` is relative to the previous default font value.
"""
numMasters = len(value[0])
numBlends = len(value)
numStack = (numBlends * numMasters) + 1
if numStack > self.maxBlendStack:
# Figure out the max number of value we can blend
# and divide this list up into chunks of that size.
numBlendValues = int((self.maxBlendStack - 1) / numMasters)
out = []
while True:
numVal = min(len(value), numBlendValues)
if numVal == 0:
break
valList = value[0:numVal]
out1 = self.arg_delta_blend(valList)
out.extend(out1)
value = value[numVal:]
else:
firstList = [0] * numBlends
deltaList = [None] * numBlends
i = 0
prevVal = 0
while i < numBlends:
# For PrivateDict BlueValues, the default font
# values are absolute, not relative.
# Must convert these back to relative coordinates
# befor writing to CFF2.
defaultValue = value[i][0]
firstList[i] = defaultValue - prevVal
prevVal = defaultValue
deltaList[i] = value[i][1:]
i += 1
relValueList = firstList
for blendList in deltaList:
relValueList.extend(blendList)
out = [encodeNumber(val) for val in relValueList]
out.append(encodeNumber(numBlends))
out.append(bytechr(blendOp))
return out
def encodeNumber(num):
if isinstance(num, float):
return psCharStrings.encodeFloat(num)
else:
return psCharStrings.encodeIntCFF(num)
class TopDictCompiler(DictCompiler):
opcodes = buildOpcodeDict(topDictOperators)
def getChildren(self, strings):
isCFF2 = self.isCFF2
children = []
if self.dictObj.cff2GetGlyphOrder is None:
if hasattr(self.dictObj, "charset") and self.dictObj.charset:
if hasattr(self.dictObj, "ROS"): # aka isCID
charsetCode = None
else:
charsetCode = getStdCharSet(self.dictObj.charset)
if charsetCode is None:
children.append(CharsetCompiler(strings, self.dictObj.charset, self))
else:
self.rawDict["charset"] = charsetCode
if hasattr(self.dictObj, "Encoding") and self.dictObj.Encoding:
encoding = self.dictObj.Encoding
if not isinstance(encoding, str):
children.append(EncodingCompiler(strings, encoding, self))
else:
if hasattr(self.dictObj, "VarStore"):
varStoreData = self.dictObj.VarStore
varStoreComp = VarStoreCompiler(varStoreData, self)
children.append(varStoreComp)
if hasattr(self.dictObj, "FDSelect"):
# I have not yet supported merging a ttx CFF-CID font, as there are
# interesting issues about merging the FDArrays. Here I assume that
# either the font was read from XML, and the FDSelect indices are all
# in the charstring data, or the FDSelect array is already fully defined.
fdSelect = self.dictObj.FDSelect
# probably read in from XML; assume fdIndex in CharString data
if len(fdSelect) == 0:
charStrings = self.dictObj.CharStrings
for name in self.dictObj.charset:
fdSelect.append(charStrings[name].fdSelectIndex)
fdSelectComp = FDSelectCompiler(fdSelect, self)
children.append(fdSelectComp)
if hasattr(self.dictObj, "CharStrings"):
items = []
charStrings = self.dictObj.CharStrings
for name in self.dictObj.charset:
items.append(charStrings[name])
charStringsComp = CharStringsCompiler(
items, strings, self, isCFF2=isCFF2)
children.append(charStringsComp)
if hasattr(self.dictObj, "FDArray"):
# I have not yet supported merging a ttx CFF-CID font, as there are
# interesting issues about merging the FDArrays. Here I assume that the
# FDArray info is correct and complete.
fdArrayIndexComp = self.dictObj.FDArray.getCompiler(strings, self)
children.append(fdArrayIndexComp)
children.extend(fdArrayIndexComp.getChildren(strings))
if hasattr(self.dictObj, "Private"):
privComp = self.dictObj.Private.getCompiler(strings, self)
children.append(privComp)
children.extend(privComp.getChildren(strings))
return children
class FontDictCompiler(DictCompiler):
opcodes = buildOpcodeDict(topDictOperators)
def __init__(self, dictObj, strings, parent, isCFF2=None):
super(FontDictCompiler, self).__init__(dictObj, strings, parent, isCFF2=isCFF2)
#
# We now take some effort to detect if there were any key/value pairs
# supplied that were ignored in the FontDict context, and issue a warning
# for those cases.
#
ignoredNames = []
dictObj = self.dictObj
for name in sorted(set(dictObj.converters) - set(dictObj.order)):
if name in dictObj.rawDict:
# The font was directly read from binary. In this
# case, we want to report *all* "useless" key/value
# pairs that are in the font, not just the ones that
# are different from the default.
ignoredNames.append(name)
else:
# The font was probably read from a TTX file. We only
# warn about keys whos value is not the default. The
# ones that have the default value will not be written
# to binary anyway.
default = dictObj.defaults.get(name)
if default is not None:
conv = dictObj.converters[name]
default = conv.read(dictObj, default)
if getattr(dictObj, name, None) != default:
ignoredNames.append(name)
if ignoredNames:
log.warning(
"Some CFF FDArray/FontDict keys were ignored upon compile: " +
" ".join(sorted(ignoredNames)))
def getChildren(self, strings):
children = []
if hasattr(self.dictObj, "Private"):
privComp = self.dictObj.Private.getCompiler(strings, self)
children.append(privComp)
children.extend(privComp.getChildren(strings))
return children
class PrivateDictCompiler(DictCompiler):
maxBlendStack = maxStackLimit
opcodes = buildOpcodeDict(privateDictOperators)
def setPos(self, pos, endPos):
size = endPos - pos
self.parent.rawDict["Private"] = size, pos
self.pos = pos
def getChildren(self, strings):
children = []
if hasattr(self.dictObj, "Subrs"):
children.append(self.dictObj.Subrs.getCompiler(strings, self))
return children
class BaseDict(object):
def __init__(self, strings=None, file=None, offset=None, isCFF2=None):
assert (isCFF2 is None) == (file is None)
self.rawDict = {}
self.skipNames = []
self.strings = strings
if file is None:
return
self._isCFF2 = isCFF2
self.file = file
if offset is not None:
log.log(DEBUG, "loading %s at %s", self.__class__.__name__, offset)
self.offset = offset
def decompile(self, data):
log.log(DEBUG, " length %s is %d", self.__class__.__name__, len(data))
dec = self.decompilerClass(self.strings, self)
dec.decompile(data)
self.rawDict = dec.getDict()
self.postDecompile()
def postDecompile(self):
pass
def getCompiler(self, strings, parent, isCFF2=None):
return self.compilerClass(self, strings, parent, isCFF2=isCFF2)
def __getattr__(self, name):
if name[:2] == name[-2:] == "__":
# to make deepcopy() and pickle.load() work, we need to signal with
# AttributeError that dunder methods like '__deepcopy__' or '__getstate__'
# aren't implemented. For more details, see:
# https://github.com/fonttools/fonttools/pull/1488
raise AttributeError(name)
value = self.rawDict.get(name, None)
if value is None:
value = self.defaults.get(name)
if value is None:
raise AttributeError(name)
conv = self.converters[name]
value = conv.read(self, value)
setattr(self, name, value)
return value
def toXML(self, xmlWriter):
for name in self.order:
if name in self.skipNames:
continue
value = getattr(self, name, None)
# XXX For "charset" we never skip calling xmlWrite even if the
# value is None, so we always write the following XML comment:
#
# <!-- charset is dumped separately as the 'GlyphOrder' element -->
#
# Charset is None when 'CFF ' table is imported from XML into an
# empty TTFont(). By writing this comment all the time, we obtain
# the same XML output whether roundtripping XML-to-XML or
# dumping binary-to-XML
if value is None and name != "charset":
continue
conv = self.converters[name]
conv.xmlWrite(xmlWriter, name, value)
ignoredNames = set(self.rawDict) - set(self.order)
if ignoredNames:
xmlWriter.comment(
"some keys were ignored: %s" % " ".join(sorted(ignoredNames)))
xmlWriter.newline()
def fromXML(self, name, attrs, content):
conv = self.converters[name]
value = conv.xmlRead(name, attrs, content, self)
setattr(self, name, value)
class TopDict(BaseDict):
"""The ``TopDict`` represents the top-level dictionary holding font
information. CFF2 tables contain a restricted set of top-level entries
as described `here <https://docs.microsoft.com/en-us/typography/opentype/spec/cff2#7-top-dict-data>`_,
but CFF tables may contain a wider range of information. This information
can be accessed through attributes or through the dictionary returned
through the ``rawDict`` property:
.. code:: python
font = tt["CFF "].cff[0]
font.FamilyName
# 'Linux Libertine O'
font.rawDict["FamilyName"]
# 'Linux Libertine O'
More information is available in the CFF file's private dictionary, accessed
via the ``Private`` property:
.. code:: python
tt["CFF "].cff[0].Private.BlueValues
# [-15, 0, 515, 515, 666, 666]
"""
defaults = buildDefaults(topDictOperators)
converters = buildConverters(topDictOperators)
compilerClass = TopDictCompiler
order = buildOrder(topDictOperators)
decompilerClass = TopDictDecompiler
def __init__(self, strings=None, file=None, offset=None,
GlobalSubrs=None, cff2GetGlyphOrder=None, isCFF2=None):
super(TopDict, self).__init__(strings, file, offset, isCFF2=isCFF2)
self.cff2GetGlyphOrder = cff2GetGlyphOrder
self.GlobalSubrs = GlobalSubrs
if isCFF2:
self.defaults = buildDefaults(topDictOperators2)
self.charset = cff2GetGlyphOrder()
self.order = buildOrder(topDictOperators2)
else:
self.defaults = buildDefaults(topDictOperators)
self.order = buildOrder(topDictOperators)
def getGlyphOrder(self):
"""Returns a list of glyph names in the CFF font."""
return self.charset
def postDecompile(self):
offset = self.rawDict.get("CharStrings")
if offset is None:
return
# get the number of glyphs beforehand.
self.file.seek(offset)
if self._isCFF2:
self.numGlyphs = readCard32(self.file)
else:
self.numGlyphs = readCard16(self.file)
def toXML(self, xmlWriter):
if hasattr(self, "CharStrings"):
self.decompileAllCharStrings()
if hasattr(self, "ROS"):
self.skipNames = ['Encoding']
if not hasattr(self, "ROS") or not hasattr(self, "CharStrings"):
# these values have default values, but I only want them to show up
# in CID fonts.
self.skipNames = [
'CIDFontVersion', 'CIDFontRevision', 'CIDFontType', 'CIDCount']
BaseDict.toXML(self, xmlWriter)
def decompileAllCharStrings(self):
# Make sure that all the Private Dicts have been instantiated.
for i, charString in enumerate(self.CharStrings.values()):
try:
charString.decompile()
except:
log.error("Error in charstring %s", i)
raise
def recalcFontBBox(self):
fontBBox = None
for charString in self.CharStrings.values():
bounds = charString.calcBounds(self.CharStrings)
if bounds is not None:
if fontBBox is not None:
fontBBox = unionRect(fontBBox, bounds)
else:
fontBBox = bounds
if fontBBox is None:
self.FontBBox = self.defaults['FontBBox'][:]
else:
self.FontBBox = list(intRect(fontBBox))
class FontDict(BaseDict):
#
# Since fonttools used to pass a lot of fields that are not relevant in the FDArray
# FontDict, there are 'ttx' files in the wild that contain all these. These got in
# the ttx files because fonttools writes explicit values for all the TopDict default
# values. These are not actually illegal in the context of an FDArray FontDict - you
# can legally, per spec, put any arbitrary key/value pair in a FontDict - but are
# useless since current major company CFF interpreters ignore anything but the set
# listed in this file. So, we just silently skip them. An exception is Weight: this
# is not used by any interpreter, but some foundries have asked that this be
# supported in FDArray FontDicts just to preserve information about the design when
# the font is being inspected.
#
# On top of that, there are fonts out there that contain such useless FontDict values.
#
# By subclassing TopDict, we *allow* all key/values from TopDict, both when reading
# from binary or when reading from XML, but by overriding `order` with a limited
# list of names, we ensure that only the useful names ever get exported to XML and
# ever get compiled into the binary font.
#
# We override compilerClass so we can warn about "useless" key/value pairs, either
# from the original binary font or from TTX input.
#
# See:
# - https://github.com/fonttools/fonttools/issues/740
# - https://github.com/fonttools/fonttools/issues/601
# - https://github.com/adobe-type-tools/afdko/issues/137
#
defaults = {}
converters = buildConverters(topDictOperators)
compilerClass = FontDictCompiler
orderCFF = ['FontName', 'FontMatrix', 'Weight', 'Private']
orderCFF2 = ['Private']
decompilerClass = TopDictDecompiler
def __init__(self, strings=None, file=None, offset=None,
GlobalSubrs=None, isCFF2=None, vstore=None):
super(FontDict, self).__init__(strings, file, offset, isCFF2=isCFF2)
self.vstore = vstore
self.setCFF2(isCFF2)
def setCFF2(self, isCFF2):
# isCFF2 may be None.
if isCFF2:
self.order = self.orderCFF2
self._isCFF2 = True
else:
self.order = self.orderCFF
self._isCFF2 = False
class PrivateDict(BaseDict):
defaults = buildDefaults(privateDictOperators)
converters = buildConverters(privateDictOperators)
order = buildOrder(privateDictOperators)
decompilerClass = PrivateDictDecompiler
compilerClass = PrivateDictCompiler
def __init__(self, strings=None, file=None, offset=None, isCFF2=None,
vstore=None):
super(PrivateDict, self).__init__(strings, file, offset, isCFF2=isCFF2)
self.vstore = vstore
if isCFF2:
self.defaults = buildDefaults(privateDictOperators2)
self.order = buildOrder(privateDictOperators2)
# Provide dummy values. This avoids needing to provide
# an isCFF2 state in a lot of places.
self.nominalWidthX = self.defaultWidthX = None
else:
self.defaults = buildDefaults(privateDictOperators)
self.order = buildOrder(privateDictOperators)
@property
def in_cff2(self):
return self._isCFF2
def getNumRegions(self, vi=None): # called from misc/psCharStrings.py
# if getNumRegions is being called, we can assume that VarStore exists.
if vi is None:
if hasattr(self, 'vsindex'):
vi = self.vsindex
else:
vi = 0
numRegions = self.vstore.getNumRegions(vi)
return numRegions
class IndexedStrings(object):
"""SID -> string mapping."""
def __init__(self, file=None):
if file is None:
strings = []
else:
strings = [
tostr(s, encoding="latin1")
for s in Index(file, isCFF2=False)
]
self.strings = strings
def getCompiler(self):
return IndexedStringsCompiler(self, None, self, isCFF2=False)
def __len__(self):
return len(self.strings)
def __getitem__(self, SID):
if SID < cffStandardStringCount:
return cffStandardStrings[SID]
else:
return self.strings[SID - cffStandardStringCount]
def getSID(self, s):
if not hasattr(self, "stringMapping"):
self.buildStringMapping()
s = tostr(s, encoding="latin1")
if s in cffStandardStringMapping:
SID = cffStandardStringMapping[s]
elif s in self.stringMapping:
SID = self.stringMapping[s]
else:
SID = len(self.strings) + cffStandardStringCount
self.strings.append(s)
self.stringMapping[s] = SID
return SID
def getStrings(self):
return self.strings
def buildStringMapping(self):
self.stringMapping = {}
for index in range(len(self.strings)):
self.stringMapping[self.strings[index]] = index + cffStandardStringCount
# The 391 Standard Strings as used in the CFF format.
# from Adobe Technical None #5176, version 1.0, 18 March 1998
cffStandardStrings = ['.notdef', 'space', 'exclam', 'quotedbl', 'numbersign',
'dollar', 'percent', 'ampersand', 'quoteright', 'parenleft', 'parenright',
'asterisk', 'plus', 'comma', 'hyphen', 'period', 'slash', 'zero', 'one',
'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'colon',
'semicolon', 'less', 'equal', 'greater', 'question', 'at', 'A', 'B', 'C',
'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R',
'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash',
'bracketright', 'asciicircum', 'underscore', 'quoteleft', 'a', 'b', 'c',
'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r',
's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'braceleft', 'bar', 'braceright',
'asciitilde', 'exclamdown', 'cent', 'sterling', 'fraction', 'yen', 'florin',
'section', 'currency', 'quotesingle', 'quotedblleft', 'guillemotleft',
'guilsinglleft', 'guilsinglright', 'fi', 'fl', 'endash', 'dagger',
'daggerdbl', 'periodcentered', 'paragraph', 'bullet', 'quotesinglbase',
'quotedblbase', 'quotedblright', 'guillemotright', 'ellipsis', 'perthousand',
'questiondown', 'grave', 'acute', 'circumflex', 'tilde', 'macron', 'breve',
'dotaccent', 'dieresis', 'ring', 'cedilla', 'hungarumlaut', 'ogonek', 'caron',
'emdash', 'AE', 'ordfeminine', 'Lslash', 'Oslash', 'OE', 'ordmasculine', 'ae',
'dotlessi', 'lslash', 'oslash', 'oe', 'germandbls', 'onesuperior',
'logicalnot', 'mu', 'trademark', 'Eth', 'onehalf', 'plusminus', 'Thorn',
'onequarter', 'divide', 'brokenbar', 'degree', 'thorn', 'threequarters',
'twosuperior', 'registered', 'minus', 'eth', 'multiply', 'threesuperior',
'copyright', 'Aacute', 'Acircumflex', 'Adieresis', 'Agrave', 'Aring',
'Atilde', 'Ccedilla', 'Eacute', 'Ecircumflex', 'Edieresis', 'Egrave',
'Iacute', 'Icircumflex', 'Idieresis', 'Igrave', 'Ntilde', 'Oacute',
'Ocircumflex', 'Odieresis', 'Ograve', 'Otilde', 'Scaron', 'Uacute',
'Ucircumflex', 'Udieresis', 'Ugrave', 'Yacute', 'Ydieresis', 'Zcaron',
'aacute', 'acircumflex', 'adieresis', 'agrave', 'aring', 'atilde', 'ccedilla',
'eacute', 'ecircumflex', 'edieresis', 'egrave', 'iacute', 'icircumflex',
'idieresis', 'igrave', 'ntilde', 'oacute', 'ocircumflex', 'odieresis',
'ograve', 'otilde', 'scaron', 'uacute', 'ucircumflex', 'udieresis', 'ugrave',
'yacute', 'ydieresis', 'zcaron', 'exclamsmall', 'Hungarumlautsmall',
'dollaroldstyle', 'dollarsuperior', 'ampersandsmall', 'Acutesmall',
'parenleftsuperior', 'parenrightsuperior', 'twodotenleader', 'onedotenleader',
'zerooldstyle', 'oneoldstyle', 'twooldstyle', 'threeoldstyle', 'fouroldstyle',
'fiveoldstyle', 'sixoldstyle', 'sevenoldstyle', 'eightoldstyle',
'nineoldstyle', 'commasuperior', 'threequartersemdash', 'periodsuperior',
'questionsmall', 'asuperior', 'bsuperior', 'centsuperior', 'dsuperior',
'esuperior', 'isuperior', 'lsuperior', 'msuperior', 'nsuperior', 'osuperior',
'rsuperior', 'ssuperior', 'tsuperior', 'ff', 'ffi', 'ffl', 'parenleftinferior',
'parenrightinferior', 'Circumflexsmall', 'hyphensuperior', 'Gravesmall',
'Asmall', 'Bsmall', 'Csmall', 'Dsmall', 'Esmall', 'Fsmall', 'Gsmall', 'Hsmall',
'Ismall', 'Jsmall', 'Ksmall', 'Lsmall', 'Msmall', 'Nsmall', 'Osmall', 'Psmall',
'Qsmall', 'Rsmall', 'Ssmall', 'Tsmall', 'Usmall', 'Vsmall', 'Wsmall', 'Xsmall',
'Ysmall', 'Zsmall', 'colonmonetary', 'onefitted', 'rupiah', 'Tildesmall',
'exclamdownsmall', 'centoldstyle', 'Lslashsmall', 'Scaronsmall', 'Zcaronsmall',
'Dieresissmall', 'Brevesmall', 'Caronsmall', 'Dotaccentsmall', 'Macronsmall',
'figuredash', 'hypheninferior', 'Ogoneksmall', 'Ringsmall', 'Cedillasmall',
'questiondownsmall', 'oneeighth', 'threeeighths', 'fiveeighths', 'seveneighths',
'onethird', 'twothirds', 'zerosuperior', 'foursuperior', 'fivesuperior',
'sixsuperior', 'sevensuperior', 'eightsuperior', 'ninesuperior', 'zeroinferior',
'oneinferior', 'twoinferior', 'threeinferior', 'fourinferior', 'fiveinferior',
'sixinferior', 'seveninferior', 'eightinferior', 'nineinferior', 'centinferior',
'dollarinferior', 'periodinferior', 'commainferior', 'Agravesmall',
'Aacutesmall', 'Acircumflexsmall', 'Atildesmall', 'Adieresissmall', 'Aringsmall',
'AEsmall', 'Ccedillasmall', 'Egravesmall', 'Eacutesmall', 'Ecircumflexsmall',
'Edieresissmall', 'Igravesmall', 'Iacutesmall', 'Icircumflexsmall',
'Idieresissmall', 'Ethsmall', 'Ntildesmall', 'Ogravesmall', 'Oacutesmall',
'Ocircumflexsmall', 'Otildesmall', 'Odieresissmall', 'OEsmall', 'Oslashsmall',
'Ugravesmall', 'Uacutesmall', 'Ucircumflexsmall', 'Udieresissmall',
'Yacutesmall', 'Thornsmall', 'Ydieresissmall', '001.000', '001.001', '001.002',
'001.003', 'Black', 'Bold', 'Book', 'Light', 'Medium', 'Regular', 'Roman',
'Semibold'
]
cffStandardStringCount = 391
assert len(cffStandardStrings) == cffStandardStringCount
# build reverse mapping
cffStandardStringMapping = {}
for _i in range(cffStandardStringCount):
cffStandardStringMapping[cffStandardStrings[_i]] = _i
cffISOAdobeStrings = [".notdef", "space", "exclam", "quotedbl", "numbersign",
"dollar", "percent", "ampersand", "quoteright", "parenleft", "parenright",
"asterisk", "plus", "comma", "hyphen", "period", "slash", "zero", "one", "two",
"three", "four", "five", "six", "seven", "eight", "nine", "colon", "semicolon",
"less", "equal", "greater", "question", "at", "A", "B", "C", "D", "E", "F", "G",
"H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W",
"X", "Y", "Z", "bracketleft", "backslash", "bracketright", "asciicircum",
"underscore", "quoteleft", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j",
"k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z",
"braceleft", "bar", "braceright", "asciitilde", "exclamdown", "cent",
"sterling", "fraction", "yen", "florin", "section", "currency", "quotesingle",
"quotedblleft", "guillemotleft", "guilsinglleft", "guilsinglright", "fi", "fl",
"endash", "dagger", "daggerdbl", "periodcentered", "paragraph", "bullet",
"quotesinglbase", "quotedblbase", "quotedblright", "guillemotright", "ellipsis",
"perthousand", "questiondown", "grave", "acute", "circumflex", "tilde",
"macron", "breve", "dotaccent", "dieresis", "ring", "cedilla", "hungarumlaut",
"ogonek", "caron", "emdash", "AE", "ordfeminine", "Lslash", "Oslash", "OE",
"ordmasculine", "ae", "dotlessi", "lslash", "oslash", "oe", "germandbls",
"onesuperior", "logicalnot", "mu", "trademark", "Eth", "onehalf", "plusminus",
"Thorn", "onequarter", "divide", "brokenbar", "degree", "thorn",
"threequarters", "twosuperior", "registered", "minus", "eth", "multiply",
"threesuperior", "copyright", "Aacute", "Acircumflex", "Adieresis", "Agrave",
"Aring", "Atilde", "Ccedilla", "Eacute", "Ecircumflex", "Edieresis", "Egrave",
"Iacute", "Icircumflex", "Idieresis", "Igrave", "Ntilde", "Oacute",
"Ocircumflex", "Odieresis", "Ograve", "Otilde", "Scaron", "Uacute",
"Ucircumflex", "Udieresis", "Ugrave", "Yacute", "Ydieresis", "Zcaron", "aacute",
"acircumflex", "adieresis", "agrave", "aring", "atilde", "ccedilla", "eacute",
"ecircumflex", "edieresis", "egrave", "iacute", "icircumflex", "idieresis",
"igrave", "ntilde", "oacute", "ocircumflex", "odieresis", "ograve", "otilde",
"scaron", "uacute", "ucircumflex", "udieresis", "ugrave", "yacute", "ydieresis",
"zcaron"]
cffISOAdobeStringCount = 229
assert len(cffISOAdobeStrings) == cffISOAdobeStringCount
cffIExpertStrings = [".notdef", "space", "exclamsmall", "Hungarumlautsmall",
"dollaroldstyle", "dollarsuperior", "ampersandsmall", "Acutesmall",
"parenleftsuperior", "parenrightsuperior", "twodotenleader", "onedotenleader",
"comma", "hyphen", "period", "fraction", "zerooldstyle", "oneoldstyle",
"twooldstyle", "threeoldstyle", "fouroldstyle", "fiveoldstyle", "sixoldstyle",
"sevenoldstyle", "eightoldstyle", "nineoldstyle", "colon", "semicolon",
"commasuperior", "threequartersemdash", "periodsuperior", "questionsmall",
"asuperior", "bsuperior", "centsuperior", "dsuperior", "esuperior", "isuperior",
"lsuperior", "msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior",
"tsuperior", "ff", "fi", "fl", "ffi", "ffl", "parenleftinferior",
"parenrightinferior", "Circumflexsmall", "hyphensuperior", "Gravesmall",
"Asmall", "Bsmall", "Csmall", "Dsmall", "Esmall", "Fsmall", "Gsmall", "Hsmall",
"Ismall", "Jsmall", "Ksmall", "Lsmall", "Msmall", "Nsmall", "Osmall", "Psmall",
"Qsmall", "Rsmall", "Ssmall", "Tsmall", "Usmall", "Vsmall", "Wsmall", "Xsmall",
"Ysmall", "Zsmall", "colonmonetary", "onefitted", "rupiah", "Tildesmall",
"exclamdownsmall", "centoldstyle", "Lslashsmall", "Scaronsmall", "Zcaronsmall",
"Dieresissmall", "Brevesmall", "Caronsmall", "Dotaccentsmall", "Macronsmall",
"figuredash", "hypheninferior", "Ogoneksmall", "Ringsmall", "Cedillasmall",
"onequarter", "onehalf", "threequarters", "questiondownsmall", "oneeighth",
"threeeighths", "fiveeighths", "seveneighths", "onethird", "twothirds",
"zerosuperior", "onesuperior", "twosuperior", "threesuperior", "foursuperior",
"fivesuperior", "sixsuperior", "sevensuperior", "eightsuperior", "ninesuperior",
"zeroinferior", "oneinferior", "twoinferior", "threeinferior", "fourinferior",
"fiveinferior", "sixinferior", "seveninferior", "eightinferior", "nineinferior",
"centinferior", "dollarinferior", "periodinferior", "commainferior",
"Agravesmall", "Aacutesmall", "Acircumflexsmall", "Atildesmall",
"Adieresissmall", "Aringsmall", "AEsmall", "Ccedillasmall", "Egravesmall",
"Eacutesmall", "Ecircumflexsmall", "Edieresissmall", "Igravesmall",
"Iacutesmall", "Icircumflexsmall", "Idieresissmall", "Ethsmall", "Ntildesmall",
"Ogravesmall", "Oacutesmall", "Ocircumflexsmall", "Otildesmall",
"Odieresissmall", "OEsmall", "Oslashsmall", "Ugravesmall", "Uacutesmall",
"Ucircumflexsmall", "Udieresissmall", "Yacutesmall", "Thornsmall",
"Ydieresissmall"]
cffExpertStringCount = 166
assert len(cffIExpertStrings) == cffExpertStringCount
cffExpertSubsetStrings = [".notdef", "space", "dollaroldstyle",
"dollarsuperior", "parenleftsuperior", "parenrightsuperior", "twodotenleader",
"onedotenleader", "comma", "hyphen", "period", "fraction", "zerooldstyle",
"oneoldstyle", "twooldstyle", "threeoldstyle", "fouroldstyle", "fiveoldstyle",
"sixoldstyle", "sevenoldstyle", "eightoldstyle", "nineoldstyle", "colon",
"semicolon", "commasuperior", "threequartersemdash", "periodsuperior",
"asuperior", "bsuperior", "centsuperior", "dsuperior", "esuperior", "isuperior",
"lsuperior", "msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior",
"tsuperior", "ff", "fi", "fl", "ffi", "ffl", "parenleftinferior",
"parenrightinferior", "hyphensuperior", "colonmonetary", "onefitted", "rupiah",
"centoldstyle", "figuredash", "hypheninferior", "onequarter", "onehalf",
"threequarters", "oneeighth", "threeeighths", "fiveeighths", "seveneighths",
"onethird", "twothirds", "zerosuperior", "onesuperior", "twosuperior",
"threesuperior", "foursuperior", "fivesuperior", "sixsuperior", "sevensuperior",
"eightsuperior", "ninesuperior", "zeroinferior", "oneinferior", "twoinferior",
"threeinferior", "fourinferior", "fiveinferior", "sixinferior", "seveninferior",
"eightinferior", "nineinferior", "centinferior", "dollarinferior",
"periodinferior", "commainferior"]
cffExpertSubsetStringCount = 87
assert len(cffExpertSubsetStrings) == cffExpertSubsetStringCount
| 30.913879 | 156 | 0.694007 |
21658900ead32dd2caada22f663e19f75f7bd6b4 | 5,711 | py | Python | sdk/python/pulumi_azure_native/cache/v20210301/get_private_endpoint_connection.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/cache/v20210301/get_private_endpoint_connection.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/cache/v20210301/get_private_endpoint_connection.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetPrivateEndpointConnectionResult',
'AwaitableGetPrivateEndpointConnectionResult',
'get_private_endpoint_connection',
]
@pulumi.output_type
class GetPrivateEndpointConnectionResult:
"""
The Private Endpoint Connection resource.
"""
def __init__(__self__, id=None, name=None, private_endpoint=None, private_link_service_connection_state=None, provisioning_state=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if private_endpoint and not isinstance(private_endpoint, dict):
raise TypeError("Expected argument 'private_endpoint' to be a dict")
pulumi.set(__self__, "private_endpoint", private_endpoint)
if private_link_service_connection_state and not isinstance(private_link_service_connection_state, dict):
raise TypeError("Expected argument 'private_link_service_connection_state' to be a dict")
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional['outputs.PrivateEndpointResponse']:
"""
The resource of private end point.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> 'outputs.PrivateLinkServiceConnectionStateResponse':
"""
A collection of information about the state of the connection between service consumer and provider.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the private endpoint connection resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetPrivateEndpointConnectionResult(GetPrivateEndpointConnectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPrivateEndpointConnectionResult(
id=self.id,
name=self.name,
private_endpoint=self.private_endpoint,
private_link_service_connection_state=self.private_link_service_connection_state,
provisioning_state=self.provisioning_state,
type=self.type)
def get_private_endpoint_connection(cluster_name: Optional[str] = None,
private_endpoint_connection_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateEndpointConnectionResult:
"""
The Private Endpoint Connection resource.
:param str cluster_name: The name of the RedisEnterprise cluster.
:param str private_endpoint_connection_name: The name of the private endpoint connection associated with the Azure resource
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['clusterName'] = cluster_name
__args__['privateEndpointConnectionName'] = private_endpoint_connection_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:cache/v20210301:getPrivateEndpointConnection', __args__, opts=opts, typ=GetPrivateEndpointConnectionResult).value
return AwaitableGetPrivateEndpointConnectionResult(
id=__ret__.id,
name=__ret__.name,
private_endpoint=__ret__.private_endpoint,
private_link_service_connection_state=__ret__.private_link_service_connection_state,
provisioning_state=__ret__.provisioning_state,
type=__ret__.type)
| 42.303704 | 193 | 0.698477 |
7dadadef1dd0943294d6619155cf7140f3536b6a | 599 | py | Python | tencent/lengthOfLIS.py | summer-vacation/AlgoExec | 55c6c3e7890b596b709b50cafa415b9594c03edd | [
"MIT"
] | null | null | null | tencent/lengthOfLIS.py | summer-vacation/AlgoExec | 55c6c3e7890b596b709b50cafa415b9594c03edd | [
"MIT"
] | 2 | 2019-12-09T06:12:51.000Z | 2019-12-16T14:38:34.000Z | tencent/lengthOfLIS.py | summer-vacation/AlgoExec | 55c6c3e7890b596b709b50cafa415b9594c03edd | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
File Name: lengthOfLIS
Author : jing
Date: 2020/3/24
https://leetcode-cn.com/problems/longest-increasing-subsequence/
最长上升子序列
"""
class Solution:
def lengthOfLIS(self, nums) -> int:
if nums is None or len(nums) == 0:
return 0
# 从尾到头比较
dp = [1 for i in range(len(nums))]
res = 1
for i in range(len(nums)):
for j in range(i):
if nums[i] > nums[j]:
dp[i] = max(dp[i], dp[j] + 1)
res = max(res, dp[i])
return res
| 23.038462 | 67 | 0.475793 |
5b1165f2e66336ce08da4aaa4d3c62ee5105ee6e | 736 | py | Python | examples/alembic/migrations/versions/3bf110ac566b_initial_create_table.py | DanCardin/sqlalchemy-postgresql-audit | e4f9d9497cf77dc96a243c48cd07c7658d4265dc | [
"MIT"
] | 3 | 2019-06-24T07:28:11.000Z | 2020-07-14T08:08:27.000Z | examples/alembic/migrations/versions/3bf110ac566b_initial_create_table.py | DanCardin/sqlalchemy-postgresql-audit | e4f9d9497cf77dc96a243c48cd07c7658d4265dc | [
"MIT"
] | 7 | 2019-06-20T20:56:44.000Z | 2019-06-28T07:00:04.000Z | examples/alembic/migrations/versions/3bf110ac566b_initial_create_table.py | DanCardin/sqlalchemy-postgresql-audit | e4f9d9497cf77dc96a243c48cd07c7658d4265dc | [
"MIT"
] | 2 | 2021-03-04T03:36:15.000Z | 2021-11-09T15:28:57.000Z | """Initial create table
Revision ID: 3bf110ac566b
Revises:
Create Date: 2019-06-23 12:24:26.501538
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3bf110ac566b'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('test',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('test')
# ### end Alembic commands ###
| 23 | 70 | 0.67663 |
6385f1a3ab345564a2a3ac7560f5b9d252838e37 | 15,751 | py | Python | topik/_version.py | ContinuumIO/topik | 3f943dce48fe8ca805868151c33f155a2d200a7c | [
"BSD-3-Clause"
] | 104 | 2015-02-06T10:37:32.000Z | 2019-10-11T09:53:10.000Z | topik/_version.py | sleitner/topik | 3f943dce48fe8ca805868151c33f155a2d200a7c | [
"BSD-3-Clause"
] | 61 | 2015-05-21T21:16:14.000Z | 2018-06-13T18:37:36.000Z | topik/_version.py | sleitner/topik | 3f943dce48fe8ca805868151c33f155a2d200a7c | [
"BSD-3-Clause"
] | 27 | 2015-03-09T07:46:08.000Z | 2021-03-22T13:27:40.000Z |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.15 (https://github.com/warner/python-versioneer)
import errno
import os
import re
import subprocess
import sys
def get_keywords():
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
pass
def get_config():
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = ""
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "."
cfg.versionfile_source = "topik/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
pass
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
def decorate(f):
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag, this yields TAG-NUM-gHEX[-dirty]
# if there are no tags, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long"],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def plus_or_dot(pieces):
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
# now build up version string, with post-release "local version
# identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
# get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
# exceptions:
# 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
# TAG[.post.devDISTANCE] . No -dirty
# exceptions:
# 1: no tags. 0.post.devDISTANCE
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
# TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
# .dev0 sorts backwards (a dirty tree will appear "older" than the
# corresponding clean one), but you shouldn't be releasing software with
# -dirty anyways.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
# TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
# TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
# --always'
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
# TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
# --always -long'. The distance/hash is unconditional.
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
| 34.167028 | 79 | 0.587582 |
6397a60bb633dd9772f20e5bcd71e9f966a80259 | 18,957 | py | Python | bakery_cli/scripts/genmetadata.py | jessamynsmith/fontbakery | fd75d7651c133ea27f8f98e81d1571e02ef5cf46 | [
"Apache-2.0"
] | null | null | null | bakery_cli/scripts/genmetadata.py | jessamynsmith/fontbakery | fd75d7651c133ea27f8f98e81d1571e02ef5cf46 | [
"Apache-2.0"
] | null | null | null | bakery_cli/scripts/genmetadata.py | jessamynsmith/fontbakery | fd75d7651c133ea27f8f98e81d1571e02ef5cf46 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# taken from https://code.google.com/p/googlefontdirectory/source/browse/tools/genmetadata/genmetadata.py
#
# Copyright 2012, Google Inc.
# Author: Jeremie Lenfant-Engelmann (jeremiele a google com)
# Author: Dave Crossland (dcrossland a google com )
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Portions Copyright (c) 2003, Michael C. Fletcher, TTFQuery Project
#
# A script for generating METADATA.json files, using fontTools.
#
# Ported to Python 3.x by Mikhail Kashkin
from __future__ import print_function
from __future__ import unicode_literals
from datetime import date
from fontTools import ttLib
import io
import json
import os
import sys
import gzip
if sys.version < '3':
import codecs
def u(x):
if not x:
return ''
return codecs.unicode_escape_decode(x)[0]
else:
def u(x):
return x
# This is only here to have the JSON file data written in a predictable way
# We only care about the the json object being able to iterate over the keys, so
# other stuff might be broken...
METADATA_JSON = 'METADATA.json'
METADATA_JSON_NEW = 'METADATA.json.new'
def check_regular(filename):
fontdata = fontToolsOpenFont(filename)
isRegular = True
if fontdata['OS/2'].fsSelection & 0b10001:
isRegular = False
if fontdata['head'].macStyle & 0b11:
isRegular = False
return fontdata['OS/2'].usWeightClass == 400 and isRegular
import sys
if sys.version_info[0] < 3:
def unicode(str):
return str.decode('utf-8')
def listdir(familydir):
files = []
for dirpath, dirnames, filenames in os.walk(familydir):
files += [os.path.join(dirpath, fn)
for fn in filenames if unicode(fn.lower()).endswith('.ttf')]
return files
class InsertOrderedDict(dict):
def __init__(self):
dict.__init__(self)
self.orderedKeys = []
def __setitem__(self, key, item):
dict.__setitem__(self, key, item)
if key not in self.orderedKeys:
self.orderedKeys.append(key)
def __delitem__(self, key):
dict.__delitem__(self, key)
self.orderedKeys.remove(key)
def clear(self):
dict.clear(self)
self.orderedKeys = []
def copy(self):
dictCopy = InsertOrderedDict()
for key in self.orderedKeys:
dictCopy[key] = dict.get(self, key)
return dictCopy
def keys(self):
return self.orderedKeys
def items(self):
return [(key, dict.get(self, key)) for key in self.orderedKeys]
def iteritems(self):
return iter(list(self.items()))
def iterkeys(self):
return iter(self.orderedKeys)
# That's definitely a mess, but doing our best
def update(self, dictionary=None, **kwargs):
for key in dictionary.keys():
if key not in self.orderedKeys:
self.orderedKeys.append(key)
if len(kwargs):
for key in kwargs:
if key not in self.orderedKeys:
self.orderedKeys.append(key)
dict.update(self, dictionary, **kwargs)
def pop(self, key, *args):
self.orderedKeys.remove(key)
return dict.pop(self, key, *args)
def __getattr__(self, key):
return dict.get(self, key)
def popitem(self):
if self.orderedKeys:
return self.pop(self.orderedKeys[0])
return dict.popitem(self) # should raise KeyError
SUPPORTED_SUBSETS = frozenset([
"menu",
"arabic",
"armenian",
"balinese",
"bengali",
"burmese",
"cherokee",
"cyrillic",
"cyrillic-ext",
"ethiopic",
"georgian",
"greek",
"greek-ext",
"gujarati",
"hebrew",
"hindi",
"japanese",
"javanese",
"kannada",
"khmer",
"korean",
"lao",
"latin",
"latin-ext",
"malayalam",
"oriya",
"osmanya",
"sinhala",
"tamil",
"telugu",
"thai",
"tibetan",
"vietnamese",
"devanagari"
])
# DC This should check the NAME table for correct values of the license
# and licenseurl keys
def inferLicense(familydir):
from bakery_cli.utils import UpstreamDirectory
directory = UpstreamDirectory(familydir)
if not directory.LICENSE:
return ""
with io.open(directory.LICENSE[0]) as fp:
content = fp.read()
if 'Apache License' in content:
return 'Apache2'
if 'SIL Open Font License, Version 1.1' in content:
return 'OFL'
if 'UBUNTU FONT LICENCE Version 1.0' in content:
return 'UFL'
return ""
# DC This should check the italicangle matches the other ways italic can
# be seen - filename, full name, psname, macstyle, others?
def inferStyle(ftfont):
if ftfont['post'].italicAngle == 0.0:
return "normal"
return "italic"
# DC This should check both names match, and match across the family
def inferFamilyName(familydir):
NAMEID_FAMILYNAME = 1
NAMEID_STYLE = 2
files = listdir(familydir)
familyName = ""
styleName = ""
for f in files:
if check_regular(f):
ftfont = fontToolsOpenFont(f)
for record in ftfont['name'].names:
if record.nameID == NAMEID_FAMILYNAME:
if b'\000' in record.string:
familyName = record.string.decode('utf-16-be').encode('utf-8')
else:
familyName = record.string
# Some authors creates TTF with wrong family name including styles
if record.nameID == NAMEID_STYLE:
if b'\000' in record.string:
styleName = record.string.decode('utf-16-be').encode('utf-8')
else:
styleName = record.string
familyName = familyName.replace(styleName, '').strip()
if familyName == "":
string = "FATAL: No *-Regular.ttf found to set family name!"
color = "red"
ansiprint(string, color)
return "UNKNOWN"
else:
return familyName
def fontToolsOpenFont(filepath):
f = io.open(filepath, 'rb')
try:
return ttLib.TTFont(f)
except:
print(filepath)
raise
# DC This should check both copyright strings match
def fontToolsGetCopyright(ftfont):
# return 'COPYRIGHT'
NAMEID_PSNAME = 0
copyright = ""
for record in ftfont['name'].names:
if record.nameID == NAMEID_PSNAME and not copyright:
if b'\000' in record.string:
try:
copyright = u(record.string.decode('utf-16-be'))
except:
copyright = 'COPYRIGHT'
else:
copyright = str(record.string)
if copyright:
return copyright
# DC What happens if there is no copyright set?
# DC This should check both names match, and stems match across the family
def fontToolsGetPSName(ftfont):
NAMEID_PSNAME = 6
psName = ""
for record in ftfont['name'].names:
if record.nameID == NAMEID_PSNAME and not psName:
if b'\000' in record.string:
psName = record.string.decode('utf-16-be').encode('utf-8')
else:
psName = record.string
if psName:
return psName
# DC What happens if there is no PSName set?
# DC This should check both names match, and stems match across the
# family, and italic/bold match other metadata (weight, macstyle,
# italicangle)
def fontToolsGetFullName(ftfont):
NAMEID_FULLNAME = 4
fullName = ""
for record in ftfont['name'].names:
if record.nameID == NAMEID_FULLNAME and not fullName:
if b'\000' in record.string:
fullName = record.string.decode('utf-16-be').encode('utf-8')
else:
fullName = record.string
if fullName:
return fullName
# DC This should check both names match, and is found in designers.json
def fontToolsGetDesignerName(ftfont):
# return 'DESIGNER'
NAMEID_DESIGNERNAME = 9
desName = ""
for record in ftfont['name'].names:
if record.nameID == NAMEID_DESIGNERNAME and not desName:
if b'\000' in record.string:
desName = record.string.decode('utf-16-be').encode('utf-8')
else:
desName = record.string
if desName:
return desName
# DC This should check both names match
def fontToolsGetDesc(ftfont):
NAMEID_DESC = 10
fontDesc = False
for record in ftfont['name'].names:
if record.nameID == NAMEID_DESC and not fontDesc:
if b'\000' in record.string:
fontDesc = record.string.decode('utf-16-be').encode('utf-8')
else:
fontDesc = record.string
break
if not fontDesc:
fontDesc = "TODO"
return fontDesc
# DC NameIDs are as follows:
# required marked *
# 0 Copyright notice.
# * 1 Family name
# * 2 Font Subfamily name (should matcht the OS/2.fsSelection bit - eg, fsSelection bit 6 set = Regular)
# * 4 Full name
# 5 Version string (Should be 'Version <number>.<number>' Caps with a space between “Version” and the number; one or more digits (0-9) of value less than 65535 followed by period followed by one or more digits of value less than 65535; Any character other than a digit will terminate the minor number and act as comment string “;” is sometimes used)
# * 6 Postscript name (Must have Platform: 1 [Macintosh]; Platform-specific encoding: 0 [Roman]; Language: 0 [English] and Platform: 3 [Windows]; Platform-specific encoding: 1 [Unicode]; Language: 0x409 [English (American)] and any nameID=6s other than those are out of spec; both must be identical; no longer than 63 characters; and restricted to the printable ASCII subset, codes 33 through 126; identical to the font name as stored in the CFF's Name INDEX;
# 7 Trademark
# 8 Manufacturer Name.
# 9 Designer Name
# 10 Description
# 11 URL Vendor (should have http://)
# 12 URL Designer (should have http://)
# 13 License Description
# 14 License URL
# 16 Preferred Family; must be different to ID 1 but make sense
# 17 Preferred Subfamily; must be different to ID 2, and unique in the Prefered Family
# 18 Compatible Full (Macintosh only); matches the Full Name
# 19 Sample text (best sample to display the font in)
# DC This should use fontTools not FontForge for everything
def createFonts(familydir, familyname):
from operator import attrgetter
fonts = []
files = listdir(familydir)
for f in files:
fontmetadata = InsertOrderedDict()
ftfont = fontToolsOpenFont(f)
fontmetadata["name"] = u(familyname)
fontmetadata["postScriptName"] = u(fontToolsGetPSName(ftfont))
fontmetadata["fullName"] = u(fontToolsGetFullName(ftfont))
fontmetadata["style"] = u(inferStyle(ftfont))
fontmetadata["weight"] = ftfont['OS/2'].usWeightClass
fontmetadata["filename"] = os.path.basename(unicode(f).lstrip('./'))
fontmetadata["copyright"] = u(fontToolsGetCopyright(ftfont))
fonts.append(fontmetadata)
return sorted(fonts, key=attrgetter('weight'))
# DC This should also print the subset filesizes and check they are
# smaller than the original ttf
def inferSubsets(familydir):
subsets = set()
files = listdir(familydir)
for f in files:
index = unicode(f).rfind(".")
if index != -1:
extension = unicode(f)[index + 1:]
if extension in SUPPORTED_SUBSETS:
subsets.add(extension)
if len(subsets) == 0:
return ["latin"]
return sorted(subsets)
def getDesigner(familydir):
# import fontforge
files = listdir(familydir)
for f in files:
if check_regular(f): # DC should ansiprint red if no Reg exemplar
ftfont = fontToolsOpenFont(f)
desName = fontToolsGetDesignerName(ftfont)
if isinstance(desName, str):
string = u"Designer's name from font is: " + u(desName)
color = "green"
ansiprint(string, color)
return u(desName)
else:
desName = "Multiple Designers"
ansiprint(
"No Designer Name known, using Multiple Designers for now...", "red")
return desName
def check_monospace(familydir):
files = listdir(familydir)
glyphwidths = []
for f in files:
if not unicode(f).endswith('.ttf'):
continue
font = fontToolsOpenFont(unicode(f))
for table in font['cmap'].tables:
if not (table.platformID == 3 and table.platEncID in [1, 10]):
continue
for glyphname in table.cmap:
try:
glyphwidths.append(font['hmtx'][glyphname][0])
except (IndexError, KeyError):
# can't read hmtx for glyphname, append value of zero
glyphwidths.append(0)
# if all glyphs has the same widths then it is easy to check
# by casting list to python sets.
return len(set(glyphwidths)) == 1
def getSize(familydir):
files = listdir(familydir)
matchedFiles = []
for f in files:
if check_regular(f):
matchedFiles.append(f)
if matchedFiles == []:
gzipSize = str(-1)
string = "WARNING: No *-Regular.ttf to calculate gzipped filesize!"
color = "red"
else:
filepath = matchedFiles[0]
tmpgzip = "/tmp/tempfont.gz"
string = "Original size: "
string += str(os.path.getsize(filepath))
f_in = io.open(filepath, 'rb')
f_out = gzip.open(tmpgzip, 'wb')
f_out.writelines(f_in)
f_out.close()
f_in.close()
gzipSize = str(os.path.getsize(tmpgzip))
string += "\nGzip size: "
string += gzipSize
color = "green"
ansiprint(string, color)
return int(gzipSize)
def setIfNotPresent(metadata, key, value):
if key not in metadata:
metadata[key] = value
def genmetadata(familydir):
metadata = InsertOrderedDict()
if hasMetadata(familydir):
metadata = loadMetadata(familydir)
print(metadata)
familyname = inferFamilyName(familydir)
if not metadata.get('name') or metadata.get('name', "UNKNOWN") == "UNKNOWN":
metadata["name"] = familyname
desName = getDesigner(familydir)
if not metadata.get('designer'):
metadata["designer"] = desName
# DC Should check it against profiles.json
if not metadata.get('license'):
metadata["license"] = inferLicense(familydir)
setIfNotPresent(metadata, "visibility", "Sandbox")
category = ''
if check_monospace(familydir):
category = 'monospace'
setIfNotPresent(metadata, "category", category)
# DC Should get this from the font or prompt?
if not metadata.get('size') or metadata.get('size', -1) == -1:
metadata["size"] = getSize(familydir)
# DC: this should check the filesize got smaller than last
# time
metadata["fonts"] = createFonts(familydir, familyname)
metadata["subsets"] = inferSubsets(familydir)
setIfNotPresent(metadata, "dateAdded", getToday())
# DC This is used for the Date Added sort in the GWF
# Directory - DC to check all existing values in hg repo
# are correct
return metadata
def getToday():
return str(date.today().strftime("%Y-%m-%d"))
def hasMetadata(familydir):
fn = os.path.join(familydir, METADATA_JSON)
return os.path.exists(fn) and (os.path.getsize(fn) > 0)
def loadMetadata(familydir):
import collections
with io.open(os.path.join(familydir, METADATA_JSON), 'r', encoding="utf-8") as fp:
return json.load(fp, object_pairs_hook=collections.OrderedDict)
def sortFont(fonts):
sortedfonts = []
for font in fonts:
fontMetadata = InsertOrderedDict()
fontMetadata["name"] = font["name"]
fontMetadata["style"] = font["style"]
fontMetadata["weight"] = font["weight"]
fontMetadata["filename"] = font["filename"]
fontMetadata["postScriptName"] = font["postScriptName"]
fontMetadata["fullName"] = font["fullName"]
fontMetadata["copyright"] = font["copyright"]
sortedfonts.append(fontMetadata)
return sortedfonts
def striplines(jsontext):
lines = jsontext.split("\n")
newlines = []
for line in lines:
newlines.append(u"%s\n" % (line.rstrip()))
return u"".join(newlines)
def writeFile(familydir, metadata):
filename = METADATA_JSON
if hasMetadata(familydir):
filename = METADATA_JSON_NEW
with io.open(os.path.join(familydir, filename), 'w', encoding='utf-8') as f:
contents = json.dumps(metadata, indent=2, ensure_ascii=False)
f.write(striplines(contents))
print(json.dumps(metadata, indent=2, ensure_ascii=False))
def ansiprint(string, color):
if sys.stdout.isatty():
attr = []
if color == "green":
attr.append('32') # green
attr.append('1') # bold
else:
attr.append('31') # red
attr.append('1') # bold
print('\x1b[%sm%s\x1b[0m' % (';'.join(attr), string))
else:
print(string)
def writeDescHtml(familydir):
filename = "DESCRIPTION.en_us.html"
if os.path.exists(os.path.join(familydir, filename)):
ansiprint('{} exists', 'green')
return
foundRegular = False
files = listdir(familydir)
for f in files:
if check_regular(f):
foundRegular = True
filepath = os.path.join(familydir, f)
ftfont = fontToolsOpenFont(filepath)
fontDesc = fontToolsGetDesc(ftfont)
break
if not foundRegular:
string = "No Regular found! REMEMBER! Create a " + filename
color = "red"
ansiprint(string, color)
fontDesc = "TODO"
descHtml = u"<p>" + u(fontDesc) + u"</p>"
with io.open(os.path.join(familydir, filename), 'w', encoding="utf-8") as f:
f.write(descHtml)
def run(familydir):
writeDescHtml(familydir)
writeFile(familydir, genmetadata(familydir))
| 31.385762 | 463 | 0.618927 |
d6792a73798158e71a5f4c65659ba7e052a6bf48 | 1,033 | py | Python | char-rnn-generation/model.py | rdcsung/practical-pytorch | 6c57013c16eb928232af5e9bbe886a41c4ac9f9e | [
"MIT"
] | null | null | null | char-rnn-generation/model.py | rdcsung/practical-pytorch | 6c57013c16eb928232af5e9bbe886a41c4ac9f9e | [
"MIT"
] | null | null | null | char-rnn-generation/model.py | rdcsung/practical-pytorch | 6c57013c16eb928232af5e9bbe886a41c4ac9f9e | [
"MIT"
] | null | null | null | # https://github.com/spro/practical-pytorch
import torch
import torch.nn as nn
from torch.autograd import Variable
import config
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size, n_layers=1):
super(RNN, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.encoder = nn.Embedding(input_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size, n_layers)
self.decoder = nn.Linear(hidden_size, output_size)
self.to(config.HOST_DEVICE)
def forward(self, input, hidden):
input = input.to(config.HOST_DEVICE)
input = self.encoder(input.view(1, -1))
output, hidden = self.gru(input.view(1, 1, -1), hidden)
output = self.decoder(output.view(1, -1))
return output, hidden
def init_hidden(self):
return Variable(torch.zeros(self.n_layers, 1, self.hidden_size)).to(config.HOST_DEVICE)
| 31.30303 | 95 | 0.673766 |
e419e8c427020085f1744b6b91ec250edf882070 | 2,041 | py | Python | pageobjects/contact_us.py | sammy-nyubs/sovtech | 95d13f5830b259925bac32151dfe0d3309cfee0d | [
"Apache-2.0"
] | null | null | null | pageobjects/contact_us.py | sammy-nyubs/sovtech | 95d13f5830b259925bac32151dfe0d3309cfee0d | [
"Apache-2.0"
] | null | null | null | pageobjects/contact_us.py | sammy-nyubs/sovtech | 95d13f5830b259925bac32151dfe0d3309cfee0d | [
"Apache-2.0"
] | null | null | null | from selenium.webdriver.common.by import By
from toolium.pageobjects.page_object import PageObject
import time
class contact_usPagePageObject(PageObject):
def init_page_elements(self):
self.get_in_touch = (By.XPATH, "//h2[contains(text(),'Get in touch')]")
def open(self):
""" Open contact-us page url in browser
:returns: this page object instance
"""
self.driver.get('{}'.format(self.config.get('Test', 'url')))
return self
def wait_until_loaded(self):
""" Wait until contact-us page is loaded
:returns: this page object instance
"""
self.get_in_touch.wait_until_visible()
return self
def fill_get_in_touch_form(self):
""" Fill the get in touch form and submit
:returns: this page object instance
"""
self.driver.switch_to.frame(self.driver.find_element_by_xpath("//iframe[@id='hs-form-iframe-0']"))
self.driver.find_element_by_xpath("//input[@id='your_name-c2e387f9-4bd8-496f-ab2a-81fbbc31712a']").send_keys('Sam')
self.driver.find_element_by_xpath("//input[@id='email-c2e387f9-4bd8-496f-ab2a-81fbbc31712a']").send_keys('Sam@sovtech.com')
self.driver.find_element_by_xpath("//input[@id='mobilephone-c2e387f9-4bd8-496f-ab2a-81fbbc31712a']").send_keys('0812131311')
self.driver.find_element_by_xpath("//select[@id='numemployees-c2e387f9-4bd8-496f-ab2a-81fbbc31712a']").click()
self.driver.find_element_by_xpath("//option[contains(text(),'25-50')]").click()
self.driver.find_element_by_xpath("//textarea[@id='message-c2e387f9-4bd8-496f-ab2a-81fbbc31712a']").send_keys('Please help me automate the boring stuff!!')
self.driver.find_element_by_xpath("//input[@id='LEGAL_CONSENT.subscription_type_10841063-c2e387f9-4bd8-496f-ab2a-81fbbc31712a']").click()
self.driver.find_element_by_xpath("/html/body/div/form/div[7]/div[2]/input").click()
time.sleep(10)
return self
| 42.520833 | 164 | 0.67222 |
bdc8402291b6b69c943ffe54cfcc55e5801d497d | 4,997 | py | Python | mikefm_skill/observation.py | mohm-dhi/mikefm-skill | 7f8125983b4ec2a84c3ec5f7f683d389d2163ff4 | [
"MIT"
] | null | null | null | mikefm_skill/observation.py | mohm-dhi/mikefm-skill | 7f8125983b4ec2a84c3ec5f7f683d389d2163ff4 | [
"MIT"
] | null | null | null | mikefm_skill/observation.py | mohm-dhi/mikefm-skill | 7f8125983b4ec2a84c3ec5f7f683d389d2163ff4 | [
"MIT"
] | null | null | null | import os
from shapely.geometry import Point, MultiPoint
import pandas as pd
from mikeio import Dfs0, eum
class Observation:
name = None
df = None
itemInfo = None
color = "#d62728"
# DHI: darkblue: #004165,
# midblue: #0098DB,
# gray: #8B8D8E,
# lightblue:#63CEFF,
# DHI secondary
# yellow: #FADC41,
# orange: #FF8849
# lightblue2:#C1E2E5
# green: #61C250
# purple: #93509E
# darkgray: #51626F
# matplotlib: red=#d62728
@property
def time(self):
return self.df.index
@property
def start_time(self):
"""First time instance (as datetime)"""
return self.time[0].to_pydatetime()
@property
def end_time(self):
"""Last time instance (as datetime)"""
return self.time[-1].to_pydatetime()
@property
def values(self):
return self.df.values
@property
def n(self):
"""Number of observations"""
return len(self.df)
def __init__(self, name: str = None):
self.name = name
def _unit_text(self):
if self.itemInfo is None:
return ""
txt = f"{self.itemInfo.type.display_name}"
if self.itemInfo.type != eum.EUMType.Undefined:
unit = self.itemInfo.unit.display_name
txt = f"{txt} [{unit_display_name(unit)}]"
return txt
def hist(self, bins=100, **kwargs):
"""plot histogram"""
ax = self.df.iloc[:, -1].hist(bins=bins, color=self.color, **kwargs)
ax.set_title(self.name)
ax.set_xlabel(self._unit_text())
return ax
class PointObservation(Observation):
x = None
y = None
z = None
@property
def geometry(self) -> Point:
"""Coordinates of observation"""
if self.z is None:
return Point(self.x, self.y)
else:
return Point(self.x, self.y, self.z)
def __init__(
self,
filename,
item: int = 0,
x: float = None,
y: float = None,
z: float = None,
name=None,
):
self.x = x
self.y = y
self.z = z
if isinstance(filename, pd.DataFrame) or isinstance(filename, pd.Series):
raise NotImplementedError()
else:
if name is None:
name = os.path.basename(filename).split(".")[0]
ext = os.path.splitext(filename)[-1]
if ext == ".dfs0":
df, itemInfo = self._read_dfs0(Dfs0(filename), item)
self.df, self.itemInfo = df, itemInfo
else:
raise NotImplementedError()
super().__init__(name)
def __repr__(self):
out = f"PointObservation: {self.name}, x={self.x}, y={self.y}"
return out
@staticmethod
def from_dataframe(df):
pass
@staticmethod
def from_dfs0(dfs, item_number):
pass
@staticmethod
def _read_dfs0(dfs, item):
"""Read data from dfs0 file"""
df = dfs.read(items=item).to_dataframe()
df.dropna(inplace=True)
return df, dfs.items[item]
def plot(self, **kwargs):
"""plot timeseries"""
ax = self.df.plot(marker=".", color=self.color, linestyle="None", **kwargs)
ax.set_title(self.name)
ax.set_ylabel(self._unit_text())
return ax
class TrackObservation(Observation):
@property
def geometry(self) -> MultiPoint:
"""Coordinates of observation"""
return MultiPoint(self.df.iloc[:, 0:2].values)
@property
def x(self):
return self.df.iloc[:, 0].values
@property
def y(self):
return self.df.iloc[:, 1].values
@property
def values(self):
return self.df.iloc[:, 2].values
def __init__(self, filename, item: int = 2, name=None):
if isinstance(filename, pd.DataFrame) or isinstance(filename, pd.Series):
raise NotImplementedError()
else:
if name is None:
name = os.path.basename(filename).split(".")[0]
ext = os.path.splitext(filename)[-1]
if ext == ".dfs0":
items = [0, 1, item]
df, itemInfo = self._read_dfs0(Dfs0(filename), items)
self.df, self.itemInfo = df, itemInfo
else:
raise NotImplementedError()
super().__init__(name)
def __repr__(self):
out = f"TrackObservation: {self.name}, n={self.n}"
return out
@staticmethod
def _read_dfs0(dfs, items):
"""Read track data from dfs0 file"""
df = dfs.read(items=items).to_dataframe()
df.dropna(inplace=True)
return df, dfs.items[items[-1]]
def unit_display_name(name: str) -> str:
"""Display name
Examples
--------
>>> unit_display_name("meter")
m
"""
res = name.replace("meter", "m").replace("_per_", "/").replace("sec", "s")
return res
| 25.237374 | 83 | 0.550931 |
0b262dc4e46e3805a80996d5c5edea60d04f0487 | 1,305 | py | Python | vut/lib/python3.8/site-packages/pipenv/vendor/pip_shims/__init__.py | dan-mutua/djangowk1 | 1e5dcb6443ef21451e21845ec639198719e11b10 | [
"MIT"
] | 18,636 | 2017-12-06T14:53:18.000Z | 2022-03-31T13:12:34.000Z | vut/lib/python3.8/site-packages/pipenv/vendor/pip_shims/__init__.py | dan-mutua/djangowk1 | 1e5dcb6443ef21451e21845ec639198719e11b10 | [
"MIT"
] | 3,640 | 2017-12-06T16:58:35.000Z | 2022-03-31T22:20:57.000Z | vut/lib/python3.8/site-packages/pipenv/vendor/pip_shims/__init__.py | dan-mutua/djangowk1 | 1e5dcb6443ef21451e21845ec639198719e11b10 | [
"MIT"
] | 1,987 | 2017-12-06T15:04:51.000Z | 2022-03-26T10:05:15.000Z | # -*- coding=utf-8 -*-
"""
This library is a set of compatibilty access shims to the ``pip`` internal API.
It provides compatibility with pip versions 8.0 through the current release. The
shims are provided using a lazy import strategy by hacking a module by overloading
a class instance's ``getattr`` method. This library exists due to my constant
writing of the same set of import shims.
Submodules
==========
.. autosummary::
:toctree: _autosummary
pip_shims.models
pip_shims.compat
pip_shims.utils
pip_shims.shims
pip_shims.environment
"""
from __future__ import absolute_import
import sys
from . import shims
__version__ = "0.5.3"
if "pip_shims" in sys.modules:
# mainly to keep a reference to the old module on hand so it doesn't get
# weakref'd away
if __name__ != "pip_shims":
del sys.modules["pip_shims"]
if __name__ in sys.modules:
old_module = sys.modules[__name__]
module = sys.modules[__name__] = sys.modules["pip_shims"] = shims._new()
module.shims = shims
module.__dict__.update(
{
"__file__": __file__,
"__package__": "pip_shims",
"__path__": __path__,
"__doc__": __doc__,
"__all__": module.__all__ + ["shims"],
"__version__": __version__,
"__name__": __name__,
}
)
| 24.622642 | 82 | 0.68046 |
038bfedb961c372ce61faf55350be365b32797f5 | 997 | py | Python | solutions/0763.Partition_Labels/python_solution.py | garyzccisme/leetcode | 56be6aeb07253c9da2d354eb239bd016b7574b22 | [
"MIT"
] | 2 | 2020-06-16T17:15:17.000Z | 2021-07-26T12:17:54.000Z | solutions/0763.Partition_Labels/python_solution.py | garyzccisme/leetcode | 56be6aeb07253c9da2d354eb239bd016b7574b22 | [
"MIT"
] | null | null | null | solutions/0763.Partition_Labels/python_solution.py | garyzccisme/leetcode | 56be6aeb07253c9da2d354eb239bd016b7574b22 | [
"MIT"
] | 1 | 2020-10-03T18:34:56.000Z | 2020-10-03T18:34:56.000Z | # My Solution
class Solution:
def partitionLabels(self, S: str) -> List[int]:
letter_dict = {}
for i, letter in enumerate(S):
if letter not in letter_dict:
letter_dict[letter] = [i, i]
else:
letter_dict[letter][-1] = i
pos = list(letter_dict.values())
index = 0
while index < len(pos) - 1:
if pos[index][1] > pos[index + 1][0]:
pos[index][1] = max(pos[index][1], pos[index + 1][1])
pos.pop(index + 1)
else:
index += 1
return [tail - head + 1 for head, tail in pos]
# Leetcode Solution
class Solution(object):
def partitionLabels(self, S):
last = {c: i for i, c in enumerate(S)}
j = anchor = 0
ans = []
for i, c in enumerate(S):
j = max(j, last[c])
if i == j:
ans.append(i - anchor + 1)
anchor = i + 1
return ans | 27.694444 | 69 | 0.465396 |
aa6c31bc5bda966d39c7c4b0c6adfd6a821fe832 | 393 | py | Python | eventup/users/models/role_admin.py | Z-Devs-platzi/backend | 66dc436311c1e5e307c5f32d6a151fb9e5d6e0b8 | [
"MIT"
] | null | null | null | eventup/users/models/role_admin.py | Z-Devs-platzi/backend | 66dc436311c1e5e307c5f32d6a151fb9e5d6e0b8 | [
"MIT"
] | 15 | 2020-08-23T18:40:49.000Z | 2022-03-12T00:46:49.000Z | eventup/users/models/role_admin.py | Z-Devs-platzi/event_up-backend | 2b03a87e220cf4d68c4c2c2067096926d6f19b37 | [
"MIT"
] | null | null | null | """Profile model."""
# Django
from django.db import models
# Utilities
from eventup.utils.models import GeneralModel
class RoleAdmin(GeneralModel):
"""Role Admin model.
The role base of the user main.
"""
name = models.TextField(max_length=500, blank=False, null=False)
def __str__(self):
"""Return user's str representation."""
return str(self.name)
| 19.65 | 68 | 0.6743 |
c703b7cf0d2bcd2a2b045643206b20482eaa2b4b | 2,481 | py | Python | bin/update_results.py | bmustiata/jenny | 1fd5b873684a747d9924f3f07d4e114c2193ac3c | [
"BSD-3-Clause"
] | 84 | 2018-04-14T08:13:02.000Z | 2022-03-25T13:41:17.000Z | bin/update_results.py | bmustiata/jenny | 1fd5b873684a747d9924f3f07d4e114c2193ac3c | [
"BSD-3-Clause"
] | 61 | 2018-04-05T22:21:46.000Z | 2019-12-01T14:30:14.000Z | bin/update_results.py | bmustiata/jenny | 1fd5b873684a747d9924f3f07d4e114c2193ac3c | [
"BSD-3-Clause"
] | 10 | 2018-08-20T16:44:07.000Z | 2021-02-15T19:50:28.000Z | #!/usr/bin/env python3
import os
import os.path
import subprocess
import sys
from typing import List
PROJECT_FOLDER = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def update_single_test(folder: str) -> None:
log_folder = folder
current_folder = os.curdir # type: str
os.chdir("{0}/{1}".format(PROJECT_FOLDER, folder))
update_jenny_execution(log_folder,
"jenny-expected.txt")
update_jenny_execution(log_folder,
"jenny-expected-info.txt",
["--info"])
os.chdir(current_folder)
def update_jenny_execution(log_folder: str,
log_file_name: str,
extra_arguments: List[str] = None) -> None:
if not extra_arguments:
extra_arguments = []
print("Updating %s (%s)" % (log_folder, " ".join(extra_arguments)))
expected_file = "{0}/{1}".format(log_folder, log_file_name)
popen_command = ["%s/jenny" % PROJECT_FOLDER]
popen_command.extend(extra_arguments)
with open("{0}/{1}".format(PROJECT_FOLDER, expected_file),
mode="w") as output_file:
status, output = subprocess.getstatusoutput(" ".join(popen_command))
print(output)
if status != 0 and 'fail' not in log_folder:
raise Exception("Wrong return code: %d" % status)
output_file.write(output + "\n")
tests_to_run = [
"features/ansiColor-support",
"features/archiveArtifacts",
"features/build-environment-variables",
"features/checkout-preserves-permissions",
"features/child-section-skip/parent",
"features/credentials-in-container",
"features/current-build",
"features/different-work-folder/parent",
"features/dir-step",
"features/external-classes",
"features/failing-project",
"features/failing-sh-inside-docker",
"features/library-annotation-mock",
"features/mock-missing-functions",
"features/multiple-nodes",
"features/overwrite-environment-variables",
"features/parameters-boolean-value",
"features/stash-unstash",
"features/testset/parent",
"features/upstream-pipeline-trigger",
# these tests need docker
"features/write-file",
"features/sh-support",
"features/docker-user-id-inside",
"features/docker-support",
"features/junit-support"
]
if len(sys.argv) > 1:
tests_to_run = sys.argv[1:]
for test in tests_to_run:
update_single_test(test)
| 28.517241 | 77 | 0.651753 |
c797b48912518b804b816199ababd3cc8d2fd3e1 | 10,662 | py | Python | aiida/backends/tests/cmdline/commands/test_code.py | joepvd/aiida_core | 6e9711046753332933f982971db1d7ac7e7ade58 | [
"BSD-2-Clause"
] | null | null | null | aiida/backends/tests/cmdline/commands/test_code.py | joepvd/aiida_core | 6e9711046753332933f982971db1d7ac7e7ade58 | [
"BSD-2-Clause"
] | null | null | null | aiida/backends/tests/cmdline/commands/test_code.py | joepvd/aiida_core | 6e9711046753332933f982971db1d7ac7e7ade58 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Tests for the 'verdi code' command."""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import subprocess as sp
import traceback
from click.testing import CliRunner
from aiida.backends.testbase import AiidaTestCase
from aiida.cmdline.commands.cmd_code import (setup_code, delete, hide, reveal, relabel, code_list, show, code_duplicate)
from aiida.common.exceptions import NotExistent
from aiida import orm
# pylint: disable=missing-docstring
class TestVerdiCodeSetup(AiidaTestCase):
"""Tests for the 'verdi code setup' command."""
@classmethod
def setUpClass(cls, *args, **kwargs):
super(TestVerdiCodeSetup, cls).setUpClass(*args, **kwargs)
orm.Computer(
name='comp',
hostname='localhost',
transport_type='local',
scheduler_type='direct',
workdir='/tmp/aiida',
backend=cls.backend).store()
def setUp(self):
self.comp = orm.Computer.objects(self.backend).get(name='comp')
self.cli_runner = CliRunner()
self.this_folder = os.path.dirname(__file__)
self.this_file = os.path.basename(__file__)
def test_help(self):
self.cli_runner.invoke(setup_code, ['--help'], catch_exceptions=False)
def test_reachable(self):
output = sp.check_output(['verdi', 'code', 'setup', '--help'])
self.assertIn(b'Usage:', output)
def test_interactive_remote(self):
from aiida.orm import Code
os.environ['VISUAL'] = 'sleep 1; vim -cwq'
os.environ['EDITOR'] = 'sleep 1; vim -cwq'
label = 'interactive_remote'
user_input = '\n'.join(
[label, 'description', 'simpleplugins.arithmetic.add', 'yes', self.comp.name, '/remote/abs/path'])
result = self.cli_runner.invoke(setup_code, input=user_input)
self.assertIsNone(result.exception, "".join(traceback.format_exception(*result.exc_info)))
self.assertIsInstance(Code.get_from_string('{}@{}'.format(label, self.comp.name)), Code)
def test_interactive_upload(self):
from aiida.orm import Code
os.environ['VISUAL'] = 'sleep 1; vim -cwq'
os.environ['EDITOR'] = 'sleep 1; vim -cwq'
label = 'interactive_upload'
user_input = '\n'.join(
[label, 'description', 'simpleplugins.arithmetic.add', 'no', self.this_folder, self.this_file])
result = self.cli_runner.invoke(setup_code, input=user_input)
self.assertIsNone(result.exception, result.output)
self.assertIsInstance(Code.get_from_string('{}'.format(label)), Code)
def test_noninteractive_remote(self):
from aiida.orm import Code
label = 'noninteractive_remote'
options = [
'--non-interactive', '--label={}'.format(label), '--description=description',
'--input-plugin=simpleplugins.arithmetic.add', '--on-computer', '--computer={}'.format(self.comp.name),
'--remote-abs-path=/remote/abs/path'
]
result = self.cli_runner.invoke(setup_code, options)
self.assertIsNone(result.exception, "".join(traceback.format_exception(*result.exc_info)))
self.assertIsInstance(Code.get_from_string('{}@{}'.format(label, self.comp.name)), Code)
def test_noninteractive_upload(self):
from aiida.orm import Code
label = 'noninteractive_upload'
options = [
'--non-interactive', '--label={}'.format(label), '--description=description',
'--input-plugin=simpleplugins.arithmetic.add', '--store-in-db', '--code-folder={}'.format(self.this_folder),
'--code-rel-path={}'.format(self.this_file)
]
result = self.cli_runner.invoke(setup_code, options)
self.assertIsNone(result.exception, result.output[-1000:])
self.assertIsInstance(Code.get_from_string('{}'.format(label)), Code)
def test_mixed(self):
from aiida.orm import Code
label = 'mixed_remote'
options = ['--description=description', '--on-computer', '--remote-abs-path=/remote/abs/path']
user_input = '\n'.join([label, 'simpleplugins.arithmetic.add', self.comp.name])
result = self.cli_runner.invoke(setup_code, options, input=user_input)
self.assertIsNone(result.exception, "".join(traceback.format_exception(*result.exc_info)))
self.assertIsInstance(Code.get_from_string('{}@{}'.format(label, self.comp.name)), Code)
class TestVerdiCodeCommands(AiidaTestCase):
"""Testing verdi code commands.
Testing everything besides `code setup`.
"""
@classmethod
def setUpClass(cls, *args, **kwargs):
from aiida import orm
super(TestVerdiCodeCommands, cls).setUpClass(*args, **kwargs)
orm.Computer(
name='comp',
hostname='localhost',
transport_type='local',
scheduler_type='direct',
workdir='/tmp/aiida',
backend=cls.backend).store()
def setUp(self):
from aiida import orm
self.comp = orm.Computer.objects(self.backend).get(name='comp')
try:
code = orm.Code.get_from_string('code')
except NotExistent:
code = orm.Code(
input_plugin_name='simpleplugins.arithmetic.add',
remote_computer_exec=[self.comp, '/remote/abs/path'],
)
code.label = 'code'
code.description = 'desc'
code.store()
self.code = code
self.cli_runner = CliRunner()
def test_hide_one(self):
result = self.cli_runner.invoke(hide, [str(self.code.pk)])
self.assertIsNone(result.exception)
self.assertTrue(self.code.is_hidden())
def test_reveal_one(self):
result = self.cli_runner.invoke(reveal, [str(self.code.pk)])
self.assertIsNone(result.exception)
self.assertFalse(self.code.is_hidden())
def test_relabel_code(self):
result = self.cli_runner.invoke(relabel, [str(self.code.pk), 'new_code'])
self.assertIsNone(result.exception)
from aiida.orm import load_node
new_code = load_node(self.code.pk)
self.assertEquals(new_code.label, 'new_code')
def test_relabel_code_full(self):
self.cli_runner.invoke(relabel, [str(self.code.pk), 'new_code@comp'])
from aiida.orm import load_node
new_code = load_node(self.code.pk)
self.assertEquals(new_code.label, 'new_code')
def test_relabel_code_full_bad(self):
result = self.cli_runner.invoke(relabel, [str(self.code.pk), 'new_code@otherstuff'])
self.assertIsNotNone(result.exception)
def test_delete_one(self):
result = self.cli_runner.invoke(delete, [str(self.code.pk)])
self.assertIsNone(result.exception)
with self.assertRaises(NotExistent):
from aiida.orm import Code
Code.get_from_string('code')
def test_code_list(self):
# set up second code 'code2'
from aiida.orm import Code
try:
code = Code.get_from_string('code2')
except NotExistent:
code = Code(
input_plugin_name='simpleplugins.templatereplacer',
remote_computer_exec=[self.comp, '/remote/abs/path'],
)
code.label = 'code2'
code.store()
options = [
'-A', '-a', '-o', '--input-plugin=simpleplugins.arithmetic.add', '--computer={}'.format(self.comp.name)
]
result = self.cli_runner.invoke(code_list, options)
self.assertIsNone(result.exception)
self.assertTrue(str(self.code.pk) in result.output, 'PK of first code should be included')
self.assertTrue('code2' not in result.output, 'label of second code should not be included')
self.assertTrue('comp' in result.output, 'computer name should be included')
def test_code_list_hide(self):
self.code.hide()
options = ['-A']
result = self.cli_runner.invoke(code_list, options)
self.assertIsNone(result.exception)
self.assertTrue(self.code.full_label not in result.output, 'code should be hidden')
options = ['-a']
result = self.cli_runner.invoke(code_list, options)
self.assertIsNone(result.exception)
self.assertTrue(self.code.full_label in result.output, 'code should be shown')
def test_code_show(self):
result = self.cli_runner.invoke(show, [str(self.code.pk)])
self.assertIsNone(result.exception)
self.assertTrue(str(self.code.pk) in result.output)
def test_code_duplicate_interactive(self):
os.environ['VISUAL'] = 'sleep 1; vim -cwq'
os.environ['EDITOR'] = 'sleep 1; vim -cwq'
label = 'code_duplicate_interactive'
user_input = label + '\n\n\n\n\n\n'
result = self.cli_runner.invoke(code_duplicate, [str(self.code.pk)], input=user_input, catch_exceptions=False)
self.assertIsNone(result.exception, result.output)
from aiida.orm import Code
new_code = Code.get_from_string(label)
self.assertEquals(self.code.description, new_code.description)
self.assertEquals(self.code.get_prepend_text(), new_code.get_prepend_text())
self.assertEquals(self.code.get_append_text(), new_code.get_append_text())
def test_code_duplicate_non_interactive(self):
label = 'code_duplicate_noninteractive'
result = self.cli_runner.invoke(code_duplicate, ['--non-interactive', '--label=' + label, str(self.code.pk)])
self.assertIsNone(result.exception)
from aiida.orm import Code
new_code = Code.get_from_string(label)
self.assertEquals(self.code.description, new_code.description)
self.assertEquals(self.code.get_prepend_text(), new_code.get_prepend_text())
self.assertEquals(self.code.get_append_text(), new_code.get_append_text())
self.assertEquals(self.code.get_input_plugin_name(), new_code.get_input_plugin_name())
| 42.991935 | 120 | 0.636091 |
c40e84478b80fe3ebff8ada60a45ac082c6fa81c | 1,318 | py | Python | test/test_add_contact.py | Marat-github/new_proj_1 | a35d49f260ec08218a92782b83e974785c3a3581 | [
"Apache-2.0"
] | null | null | null | test/test_add_contact.py | Marat-github/new_proj_1 | a35d49f260ec08218a92782b83e974785c3a3581 | [
"Apache-2.0"
] | null | null | null | test/test_add_contact.py | Marat-github/new_proj_1 | a35d49f260ec08218a92782b83e974785c3a3581 | [
"Apache-2.0"
] | null | null | null |
from model.contact_class import Contact
def test_add_contact(app):
app.session.login(username="admin", password="secret")
app.contact.create_contact(Contact(firstname="1234", middlename="1243", lastname="234", nickname="324", title="324234",
company="fdge", address="ebebebt", home_number="4532532", mobile_number="5435435435",
work_number="234324", fax="12333", email="bteby56@mail.ru", email2="bteby56@mail.ru",
email3="bteby56@mail.ru", homepage="bteby56@mail.ru", bday="10", bmonth="November",
byear="1999", aday="12", amonth="December", ayear="2018", address2="greg3g43",
phone2="32g4", notes="vbfdbr"))
app.session.logout()
def test_add_empty_contact(app):
app.session.login(username="admin", password="secret")
app.contact.create_contact(Contact(firstname="", middlename="", lastname="", nickname="", title="",
company="", address="", home_number="", mobile_number="", work_number="", fax="",
email="", email2="", email3="", homepage="", bday="-", bmonth="-",
byear="", aday="-", amonth="-", ayear="", address2="", phone2="", notes=""))
app.session.logout()
| 57.304348 | 123 | 0.570561 |
470d6d544634af5f6d93cc19bd20c9e1176e8fd5 | 214 | py | Python | cryptosploit.py | nullpsifer/cryptosploit | e33cfca07397c05dffa734274c202acc7ff597b4 | [
"MIT"
] | null | null | null | cryptosploit.py | nullpsifer/cryptosploit | e33cfca07397c05dffa734274c202acc7ff597b4 | [
"MIT"
] | null | null | null | cryptosploit.py | nullpsifer/cryptosploit | e33cfca07397c05dffa734274c202acc7ff597b4 | [
"MIT"
] | null | null | null | from interfaces import *
from states import AwaitingCommandState
def main():
interface = TerminalInterface(AwaitingCommandState())
interface.handleCommands()
if __name__ == "__main__":
main()
| 21.4 | 57 | 0.719626 |
bc4d51d3bd84b09763d10f222217339b64eb88b2 | 14,799 | py | Python | appGUI/preferences/tools/ToolsISOPrefGroupUI.py | DannyPol/flatcam | 25a8634d0658e98b7fae31a095f8bef40c1b3067 | [
"MIT"
] | 1 | 2022-02-11T06:19:34.000Z | 2022-02-11T06:19:34.000Z | appGUI/preferences/tools/ToolsISOPrefGroupUI.py | MRemy2/FlatCam | d4f941335ca8a8d5351aab23b396f99da06a9029 | [
"MIT"
] | null | null | null | appGUI/preferences/tools/ToolsISOPrefGroupUI.py | MRemy2/FlatCam | d4f941335ca8a8d5351aab23b396f99da06a9029 | [
"MIT"
] | null | null | null | from PyQt5 import QtWidgets
from PyQt5.QtCore import QSettings
from appGUI.GUIElements import RadioSet, FCDoubleSpinner, FCComboBox2, FCCheckBox, FCSpinner, NumericalEvalTupleEntry
from appGUI.preferences.OptionsGroupUI import OptionsGroupUI
import gettext
import appTranslation as fcTranslate
import builtins
fcTranslate.apply_language('strings')
if '_' not in builtins.__dict__:
_ = gettext.gettext
settings = QSettings("Open Source", "FlatCAM")
if settings.contains("machinist"):
machinist_setting = settings.value('machinist', type=int)
else:
machinist_setting = 0
class ToolsISOPrefGroupUI(OptionsGroupUI):
def __init__(self, decimals=4, parent=None):
super(ToolsISOPrefGroupUI, self).__init__(self, parent=parent)
self.setTitle(str(_("Isolation Tool Options")))
self.decimals = decimals
# ## Clear non-copper regions
self.iso_label = QtWidgets.QLabel("<b>%s:</b>" % _("Parameters"))
self.iso_label.setToolTip(
_("Create a Geometry object with\n"
"toolpaths to cut around polygons.")
)
self.layout.addWidget(self.iso_label)
grid0 = QtWidgets.QGridLayout()
self.layout.addLayout(grid0)
# Tool Dias
isotdlabel = QtWidgets.QLabel('<b><font color="green">%s:</font></b>' % _('Tools Dia'))
isotdlabel.setToolTip(
_("Diameters of the tools, separated by comma.\n"
"The value of the diameter has to use the dot decimals separator.\n"
"Valid values: 0.3, 1.0")
)
self.tool_dia_entry = NumericalEvalTupleEntry(border_color='#0069A9')
self.tool_dia_entry.setPlaceholderText(_("Comma separated values"))
grid0.addWidget(isotdlabel, 0, 0)
grid0.addWidget(self.tool_dia_entry, 0, 1, 1, 2)
# Tool order Radio Button
self.order_label = QtWidgets.QLabel('%s:' % _('Tool order'))
self.order_label.setToolTip(_("This set the way that the tools in the tools table are used.\n"
"'No' --> means that the used order is the one in the tool table\n"
"'Forward' --> means that the tools will be ordered from small to big\n"
"'Reverse' --> means that the tools will ordered from big to small\n\n"
"WARNING: using rest machining will automatically set the order\n"
"in reverse and disable this control."))
self.order_radio = RadioSet([{'label': _('No'), 'value': 'no'},
{'label': _('Forward'), 'value': 'fwd'},
{'label': _('Reverse'), 'value': 'rev'}])
grid0.addWidget(self.order_label, 1, 0)
grid0.addWidget(self.order_radio, 1, 1, 1, 2)
# Tool Type Radio Button
self.tool_type_label = QtWidgets.QLabel('%s:' % _('Tool Type'))
self.tool_type_label.setToolTip(
_("Default tool type:\n"
"- 'V-shape'\n"
"- Circular")
)
self.tool_type_radio = RadioSet([{'label': _('V-shape'), 'value': 'V'},
{'label': _('Circular'), 'value': 'C1'}])
self.tool_type_radio.setToolTip(
_("Default tool type:\n"
"- 'V-shape'\n"
"- Circular")
)
grid0.addWidget(self.tool_type_label, 2, 0)
grid0.addWidget(self.tool_type_radio, 2, 1, 1, 2)
# Tip Dia
self.tipdialabel = QtWidgets.QLabel('%s:' % _('V-Tip Dia'))
self.tipdialabel.setToolTip(
_("The tip diameter for V-Shape Tool"))
self.tipdia_entry = FCDoubleSpinner()
self.tipdia_entry.set_precision(self.decimals)
self.tipdia_entry.set_range(0, 1000)
self.tipdia_entry.setSingleStep(0.1)
grid0.addWidget(self.tipdialabel, 3, 0)
grid0.addWidget(self.tipdia_entry, 3, 1, 1, 2)
# Tip Angle
self.tipanglelabel = QtWidgets.QLabel('%s:' % _('V-Tip Angle'))
self.tipanglelabel.setToolTip(
_("The tip angle for V-Shape Tool.\n"
"In degrees."))
self.tipangle_entry = FCDoubleSpinner()
self.tipangle_entry.set_precision(self.decimals)
self.tipangle_entry.set_range(1, 180)
self.tipangle_entry.setSingleStep(5)
self.tipangle_entry.setWrapping(True)
grid0.addWidget(self.tipanglelabel, 4, 0)
grid0.addWidget(self.tipangle_entry, 4, 1, 1, 2)
# Cut Z entry
cutzlabel = QtWidgets.QLabel('%s:' % _('Cut Z'))
cutzlabel.setToolTip(
_("Depth of cut into material. Negative value.\n"
"In application units.")
)
self.cutz_entry = FCDoubleSpinner()
self.cutz_entry.set_precision(self.decimals)
self.cutz_entry.set_range(-10000.0000, 0.0000)
self.cutz_entry.setSingleStep(0.1)
self.cutz_entry.setToolTip(
_("Depth of cut into material. Negative value.\n"
"In application units.")
)
grid0.addWidget(cutzlabel, 5, 0)
grid0.addWidget(self.cutz_entry, 5, 1, 1, 2)
# New Diameter
self.newdialabel = QtWidgets.QLabel('%s:' % _('New Dia'))
self.newdialabel.setToolTip(
_("Diameter for the new tool to add in the Tool Table.\n"
"If the tool is V-shape type then this value is automatically\n"
"calculated from the other parameters.")
)
self.newdia_entry = FCDoubleSpinner()
self.newdia_entry.set_precision(self.decimals)
self.newdia_entry.set_range(0.0001, 10000.0000)
self.newdia_entry.setSingleStep(0.1)
grid0.addWidget(self.newdialabel, 6, 0)
grid0.addWidget(self.newdia_entry, 6, 1, 1, 2)
separator_line = QtWidgets.QFrame()
separator_line.setFrameShape(QtWidgets.QFrame.HLine)
separator_line.setFrameShadow(QtWidgets.QFrame.Sunken)
grid0.addWidget(separator_line, 7, 0, 1, 3)
# Passes
passlabel = QtWidgets.QLabel('%s:' % _('Passes'))
passlabel.setToolTip(
_("Width of the isolation gap in\n"
"number (integer) of tool widths.")
)
self.passes_entry = FCSpinner()
self.passes_entry.set_range(1, 999)
self.passes_entry.setObjectName("i_passes")
grid0.addWidget(passlabel, 8, 0)
grid0.addWidget(self.passes_entry, 8, 1, 1, 2)
# Overlap Entry
overlabel = QtWidgets.QLabel('%s:' % _('Overlap'))
overlabel.setToolTip(
_("How much (percentage) of the tool width to overlap each tool pass.")
)
self.overlap_entry = FCDoubleSpinner(suffix='%')
self.overlap_entry.set_precision(self.decimals)
self.overlap_entry.setWrapping(True)
self.overlap_entry.set_range(0.0000, 99.9999)
self.overlap_entry.setSingleStep(0.1)
self.overlap_entry.setObjectName("i_overlap")
grid0.addWidget(overlabel, 9, 0)
grid0.addWidget(self.overlap_entry, 9, 1, 1, 2)
# Milling Type Radio Button
self.milling_type_label = QtWidgets.QLabel('%s:' % _('Milling Type'))
self.milling_type_label.setToolTip(
_("Milling type:\n"
"- climb / best for precision milling and to reduce tool usage\n"
"- conventional / useful when there is no backlash compensation")
)
self.milling_type_radio = RadioSet([{'label': _('Climb'), 'value': 'cl'},
{'label': _('Conventional'), 'value': 'cv'}])
self.milling_type_radio.setToolTip(
_("Milling type:\n"
"- climb / best for precision milling and to reduce tool usage\n"
"- conventional / useful when there is no backlash compensation")
)
grid0.addWidget(self.milling_type_label, 10, 0)
grid0.addWidget(self.milling_type_radio, 10, 1, 1, 2)
# Follow
self.follow_label = QtWidgets.QLabel('%s:' % _('Follow'))
self.follow_label.setToolTip(
_("Generate a 'Follow' geometry.\n"
"This means that it will cut through\n"
"the middle of the trace.")
)
self.follow_cb = FCCheckBox()
self.follow_cb.setToolTip(_("Generate a 'Follow' geometry.\n"
"This means that it will cut through\n"
"the middle of the trace."))
self.follow_cb.setObjectName("i_follow")
grid0.addWidget(self.follow_label, 11, 0)
grid0.addWidget(self.follow_cb, 11, 1, 1, 2)
# Isolation Type
self.iso_type_label = QtWidgets.QLabel('%s:' % _('Isolation Type'))
self.iso_type_label.setToolTip(
_("Choose how the isolation will be executed:\n"
"- 'Full' -> complete isolation of polygons\n"
"- 'Ext' -> will isolate only on the outside\n"
"- 'Int' -> will isolate only on the inside\n"
"'Exterior' isolation is almost always possible\n"
"(with the right tool) but 'Interior'\n"
"isolation can be done only when there is an opening\n"
"inside of the polygon (e.g polygon is a 'doughnut' shape).")
)
self.iso_type_radio = RadioSet([{'label': _('Full'), 'value': 'full'},
{'label': _('Ext'), 'value': 'ext'},
{'label': _('Int'), 'value': 'int'}])
self.iso_type_radio.setObjectName("i_type")
grid0.addWidget(self.iso_type_label, 12, 0)
grid0.addWidget(self.iso_type_radio, 12, 1, 1, 2)
separator_line = QtWidgets.QFrame()
separator_line.setFrameShape(QtWidgets.QFrame.HLine)
separator_line.setFrameShadow(QtWidgets.QFrame.Sunken)
grid0.addWidget(separator_line, 13, 0, 1, 3)
# Rest machining CheckBox
self.rest_cb = FCCheckBox('%s' % _("Rest"))
self.rest_cb.setObjectName("i_rest_machining")
self.rest_cb.setToolTip(
_("If checked, use 'rest machining'.\n"
"Basically it will process copper outside PCB features,\n"
"using the biggest tool and continue with the next tools,\n"
"from bigger to smaller, to process the copper features that\n"
"could not be processed by previous tool, until there is\n"
"nothing left to process or there are no more tools.\n\n"
"If not checked, use the standard algorithm.")
)
grid0.addWidget(self.rest_cb, 17, 0)
# Combine All Passes
self.combine_passes_cb = FCCheckBox(label=_('Combine'))
self.combine_passes_cb.setToolTip(
_("Combine all passes into one object")
)
self.combine_passes_cb.setObjectName("i_combine")
grid0.addWidget(self.combine_passes_cb, 17, 1)
# Exception Areas
self.except_cb = FCCheckBox(label=_('Except'))
self.except_cb.setToolTip(_("When the isolation geometry is generated,\n"
"by checking this, the area of the object below\n"
"will be subtracted from the isolation geometry."))
self.except_cb.setObjectName("i_except")
grid0.addWidget(self.except_cb, 17, 2)
# Check Tool validity
self.valid_cb = FCCheckBox(label=_('Check validity'))
self.valid_cb.setToolTip(
_("If checked then the tools diameters are verified\n"
"if they will provide a complete isolation.")
)
self.valid_cb.setObjectName("i_check")
grid0.addWidget(self.valid_cb, 18, 0, 1, 3)
# Isolation Scope
self.select_label = QtWidgets.QLabel('%s:' % _("Selection"))
self.select_label.setToolTip(
_("Isolation scope. Choose what to isolate:\n"
"- 'All' -> Isolate all the polygons in the object\n"
"- 'Area Selection' -> Isolate polygons within a selection area.\n"
"- 'Polygon Selection' -> Isolate a selection of polygons.\n"
"- 'Reference Object' - will process the area specified by another object.")
)
self.select_combo = FCComboBox2()
self.select_combo.addItems(
[_("All"), _("Area Selection"), _("Polygon Selection"), _("Reference Object")]
)
self.select_combo.setObjectName("i_selection")
grid0.addWidget(self.select_label, 20, 0)
grid0.addWidget(self.select_combo, 20, 1, 1, 2)
# Area Shape
self.area_shape_label = QtWidgets.QLabel('%s:' % _("Shape"))
self.area_shape_label.setToolTip(
_("The kind of selection shape used for area selection.")
)
self.area_shape_radio = RadioSet([{'label': _("Square"), 'value': 'square'},
{'label': _("Polygon"), 'value': 'polygon'}])
grid0.addWidget(self.area_shape_label, 21, 0)
grid0.addWidget(self.area_shape_radio, 21, 1, 1, 2)
# Polygon interiors selection
self.poly_int_cb = FCCheckBox(_("Interiors"))
self.poly_int_cb.setToolTip(
_("When checked the user can select interiors of a polygon.\n"
"(holes in the polygon).")
)
# Force isolation even if the interiors are not isolated
self.force_iso_cb = FCCheckBox(_("Forced Rest"))
self.force_iso_cb.setToolTip(
_("When checked the isolation will be done with the current tool even if\n"
"interiors of a polygon (holes in the polygon) could not be isolated.\n"
"Works when 'rest machining' is used.")
)
grid0.addWidget(self.poly_int_cb, 22, 0)
grid0.addWidget(self.force_iso_cb, 22, 1)
separator_line = QtWidgets.QFrame()
separator_line.setFrameShape(QtWidgets.QFrame.HLine)
separator_line.setFrameShadow(QtWidgets.QFrame.Sunken)
grid0.addWidget(separator_line, 24, 0, 1, 3)
# ## Plotting type
self.plotting_radio = RadioSet([{'label': _('Normal'), 'value': 'normal'},
{"label": _("Progressive"), "value": "progressive"}])
plotting_label = QtWidgets.QLabel('%s:' % _("Plotting"))
plotting_label.setToolTip(
_("- 'Normal' - normal plotting, done at the end of the job\n"
"- 'Progressive' - each shape is plotted after it is generated")
)
grid0.addWidget(plotting_label, 25, 0)
grid0.addWidget(self.plotting_radio, 25, 1, 1, 2)
self.layout.addStretch()
| 42.525862 | 117 | 0.593081 |
3883c2a3b6513acfd31aa53f189c2c2ef634ded8 | 2,047 | py | Python | distopia/district/__init__.py | kevinguo344/distopia | 077dd3501bd43565d1a9647a151fb20b90b71a54 | [
"MIT"
] | null | null | null | distopia/district/__init__.py | kevinguo344/distopia | 077dd3501bd43565d1a9647a151fb20b90b71a54 | [
"MIT"
] | null | null | null | distopia/district/__init__.py | kevinguo344/distopia | 077dd3501bd43565d1a9647a151fb20b90b71a54 | [
"MIT"
] | null | null | null | """
District
=========
:class:`District` defines a district and and the precincts it contains.
"""
__all__ = ('District', )
class District(object):
"""
Describes a district, its precincts and its metrics.
"""
name = ''
"""A globally unique name (or number) describing the district.
"""
identity = 0
"""The id of the district. """
boundary = []
"""A list of the ``x``, ``y`` coordinates of the polygon that
describes the district's boundary.
"""
neighbours = []
"""List of other :class:`District`'s that are on the boundary of this
district.
"""
precincts = []
"""List of :class:`~distopia.precinct.Precinct` instances that are
currently contained within this district.
"""
metrics = {}
"""A mapping from :attr:`~distopia.district.metrics.DistrictMetric.name`
to the :class:`~distopia.district.metrics.DistrictMetric` instance that
contains the summery metric data for this district.
"""
collider = None
def __init__(self, **kwargs):
super(District, self).__init__(**kwargs)
self.clear()
def clear(self):
"""Clears all the existing precincts from the district.
"""
for precinct in self.precincts:
precinct.district = None
self.neighbours = []
self.precincts = []
self.metrics = {}
def assign_precincts(self, precincts):
"""Adds the precincts to the district.
:param precincts: Iterable of :class:`~distopia.precinct.Precinct`
instances.
"""
self.precincts = list(precincts)
for precinct in precincts:
precinct.district = self
def add_precinct(self, precinct):
"""Adds a precinct to the district.
:param precinct: :class:`~distopia.precinct.Precinct` instance.
"""
precinct.district = self
self.precincts.append(precinct)
def compute_metrics(self):
for metric in self.metrics.values():
metric.compute()
| 25.271605 | 77 | 0.608207 |
7fcc4151fc635130ed97cbcc693c2d2a28801b9f | 2,323 | py | Python | var/spack/repos/builtin/packages/py-bluepysnap/package.py | DriesVerachtert/spack | 7b55a4cc4cddb183138e4af2d140567a98297543 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/py-bluepysnap/package.py | DriesVerachtert/spack | 7b55a4cc4cddb183138e4af2d140567a98297543 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/py-bluepysnap/package.py | DriesVerachtert/spack | 7b55a4cc4cddb183138e4af2d140567a98297543 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyBluepysnap(PythonPackage):
"""Pythonic Sonata circuits access API"""
homepage = "https://github.com/BlueBrain/snap"
git = "https://github.com/BlueBrain/snap.git"
url = "https://pypi.io/packages/source/b/bluepysnap/bluepysnap-0.9.0.tar.gz"
version('0.9.0', sha256='140e06b9a7cc90719ffaf4c71ffaa9320c13bdd8ef25ffb598fd348b850b6695')
version('0.8.0', sha256='10337610cfb83121f2869cec53830de04eed8e90b2b27aba36b8799468fa9c0f')
version('0.7.0', sha256='fa4d54539fdb98c5febdabf7f13786567fc8fbd5e86f95864b4de89f18fd97bd')
version('0.6.1', sha256='f17cdd43a9f444e4825ab9578b3184fb17df76b1598852d3b74161647a096285')
version('0.5.2', sha256='d97c3fcd05261ba68e855e828b88d37fa1a7302abd79163d2d8ee806b1aad9b3')
version('0.5.1', sha256='81cbeab26b219b52f496ea55145e60f6b826c697265f4fe2d1effe5df249cabf')
version('0.5.0', sha256='97a31644d1e1daccb708e92734dfb873bc0b7aa7f98939527b91cde895cdef74')
version('0.4.1', sha256='cbb16a0cbd43ae4ad9e35b2fad0c986ebf9029e386087c0a934565da428ad558')
version('0.2.0', sha256='95273bedee0ad0b9957aed40dadc94c4a0973ba014bbc215172a9c2e7144d64a')
version('0.1.2', sha256='aa29c1344258c1ca0cf7f5f53f3080025b9d098e3366872369586ad7ccf334a2')
depends_on('py-setuptools', type=('build', 'run'))
depends_on('py-pandas@0.24.0:', type='run')
depends_on('py-libsonata@0.1.4:', type='run')
depends_on('py-numpy@1.8:', type='run')
depends_on('py-click@7.0:', type='run')
depends_on('py-cached-property@1.0:', type='run')
depends_on('py-more-itertools@7.2.0:', type='run')
# Version 0.8.0 is the switch between the python2+python3
# and python3 only + the h5py > 3.0.0
depends_on('py-h5py@2.2:2.99', type='run', when='@:0.7.9')
depends_on('py-h5py@3.0.1:', type='run', when='@0.8.0:')
depends_on('py-neurom@1.3:1.5.99', type='run', when='@:0.7.9')
depends_on('py-neurom@1.6:', type='run', when='@0.8.0:')
# python2 only so only < 0.8.0
depends_on('py-pathlib2@2.3:', type='run', when='@:0.7.9')
depends_on('py-six@1.0:', type='run', when='@:0.7.9')
| 48.395833 | 95 | 0.717607 |
cb748998520081d003f6e3d859d246ad5c427f21 | 31,540 | py | Python | pygbm/gradient_boosting.py | aniajj9/pygbm | 08206c88d703f3085b4a18b8ceef83b3450cfc91 | [
"MIT"
] | null | null | null | pygbm/gradient_boosting.py | aniajj9/pygbm | 08206c88d703f3085b4a18b8ceef83b3450cfc91 | [
"MIT"
] | null | null | null | pygbm/gradient_boosting.py | aniajj9/pygbm | 08206c88d703f3085b4a18b8ceef83b3450cfc91 | [
"MIT"
] | null | null | null | """
Gradient Boosting decision trees for classification and regression.
"""
from abc import ABC, abstractmethod
import numpy as np
from numba import njit, prange
from time import time
from sklearn.base import BaseEstimator, RegressorMixin, ClassifierMixin
from sklearn.utils import check_X_y, check_random_state, check_array
from sklearn.utils.validation import check_is_fitted
from sklearn.utils.multiclass import check_classification_targets
from sklearn.metrics import check_scoring
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from pygbm.binning import BinMapper
from pygbm.grower import TreeGrower
from pygbm.loss import _LOSSES
class BaseGradientBoostingMachine(BaseEstimator, ABC):
"""Base class for gradient boosting estimators."""
@abstractmethod
def __init__(self, loss, learning_rate, max_iter, max_leaf_nodes,
max_depth, min_samples_leaf, l2_regularization, max_bins,
scoring, validation_split, n_iter_no_change, tol, verbose,
random_state):
self.loss = loss
self.learning_rate = learning_rate
self.max_iter = max_iter
self.max_leaf_nodes = max_leaf_nodes
self.max_depth = max_depth
self.min_samples_leaf = min_samples_leaf
self.l2_regularization = l2_regularization
self.max_bins = max_bins
self.n_iter_no_change = n_iter_no_change
self.validation_split = validation_split
self.scoring = scoring
self.tol = tol
self.verbose = verbose
self.random_state = random_state
def _validate_parameters(self, X):
"""Validate parameters passed to __init__.
The parameters that are directly passed to the grower are checked in
TreeGrower."""
if self.loss not in self._VALID_LOSSES:
raise ValueError(
"Loss {} is not supported for {}. Accepted losses"
"are {}.".format(self.loss, self.__class__.__name__,
', '.join(self._VALID_LOSSES)))
if self.learning_rate <= 0:
raise ValueError(f'learning_rate={self.learning_rate} must '
f'be strictly positive')
if self.max_iter < 1:
raise ValueError(f'max_iter={self.max_iter} must '
f'not be smaller than 1.')
if self.n_iter_no_change is not None and self.n_iter_no_change < 0:
raise ValueError(f'n_iter_no_change={self.n_iter_no_change} '
f'must be positive.')
if self.validation_split is not None and self.validation_split <= 0:
raise ValueError(f'validation_split={self.validation_split} '
f'must be strictly positive, or None.')
if self.tol is not None and self.tol < 0:
raise ValueError(f'tol={self.tol} '
f'must not be smaller than 0.')
if X.dtype == np.uint8: # pre-binned data
max_bin_index = X.max()
if self.max_bins < max_bin_index + 1:
raise ValueError(
f'max_bins is set to {self.max_bins} but the data is '
f'pre-binned with {max_bin_index + 1} bins.'
)
def fit_model(self, X, y):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
The input samples. If ``X.dtype == np.uint8``, the data is
assumed to be pre-binned and the prediction methods
(``predict``, ``predict_proba``) will only accept pre-binned
data as well.
y : array-like, shape=(n_samples,)
Target values.
Returns
-------
self : object
"""
fit_start_time = time()
acc_find_split_time = 0. # time spent finding the best splits
acc_apply_split_time = 0. # time spent splitting nodes
# time spent predicting X for gradient and hessians update
acc_prediction_time = 0.
# TODO: add support for mixed-typed (numerical + categorical) data
# TODO: add support for missing data
X, y = check_X_y(X, y, dtype=[np.float32, np.float64, np.uint8])
y = self._encode_y(y)
if X.shape[0] == 1 or X.shape[1] == 1:
raise ValueError(
'Passing only one sample or one feature is not supported yet. '
'See numba issue #3569.'
)
rng = check_random_state(self.random_state)
self._validate_parameters(X)
self.n_features_ = X.shape[1] # used for validation in predict()
if X.dtype == np.uint8: # data is pre-binned
if self.verbose:
print("X is pre-binned.")
X_binned = X
self.bin_mapper_ = None
numerical_thresholds = None
n_bins_per_feature = X.max(axis=0).astype(np.uint32)
else:
if self.verbose:
print(f"Binning {X.nbytes / 1e9:.3f} GB of data: ", end="",
flush=True)
tic = time()
self.bin_mapper_ = BinMapper(max_bins=self.max_bins,
random_state=rng)
X_binned = self.bin_mapper_.fit_transform(X)
numerical_thresholds = self.bin_mapper_.numerical_thresholds_
n_bins_per_feature = self.bin_mapper_.n_bins_per_feature_
toc = time()
if self.verbose:
duration = toc - tic
throughput = X.nbytes / duration
print(f"{duration:.3f} s ({throughput / 1e6:.3f} MB/s)")
self.loss_ = self._get_loss()
do_early_stopping = (self.n_iter_no_change is not None and
self.n_iter_no_change > 0)
if do_early_stopping and self.validation_split is not None:
# stratify for classification
stratify = y if hasattr(self.loss_, 'predict_proba') else None
X_binned_train, X_binned_val, y_train, y_val = train_test_split(
X_binned, y, test_size=self.validation_split,
stratify=stratify, random_state=rng)
if X_binned_train.size == 0 or X_binned_val.size == 0:
raise ValueError(
f'Not enough data (n_samples={X_binned.shape[0]}) to '
f'perform early stopping with validation_split='
f'{self.validation_split}. Use more training data or '
f'adjust validation_split.'
)
# Predicting is faster of C-contiguous arrays, training is faster
# on Fortran arrays.
X_binned_val = np.ascontiguousarray(X_binned_val)
X_binned_train = np.asfortranarray(X_binned_train)
else:
X_binned_train, y_train = X_binned, y
X_binned_val, y_val = None, None
# Subsample the training set for score-based monitoring.
if do_early_stopping:
subsample_size = 10000
n_samples_train = X_binned_train.shape[0]
if n_samples_train > subsample_size:
indices = rng.choice(X_binned_train.shape[0], subsample_size)
X_binned_small_train = X_binned_train[indices]
y_small_train = y_train[indices]
else:
X_binned_small_train = X_binned_train
y_small_train = y_train
# Predicting is faster of C-contiguous arrays.
X_binned_small_train = np.ascontiguousarray(X_binned_small_train)
if self.verbose:
print("Fitting gradient boosted rounds:")
n_samples = X_binned_train.shape[0]
self.baseline_prediction_ = self.loss_.get_baseline_prediction(
y_train, self.n_trees_per_iteration_)
# raw_predictions are the accumulated values predicted by the trees
# for the training data.
raw_predictions = np.zeros(
shape=(n_samples, self.n_trees_per_iteration_),
dtype=self.baseline_prediction_.dtype
)
raw_predictions += self.baseline_prediction_
# gradients and hessians are 1D arrays of size
# n_samples * n_trees_per_iteration
gradients, hessians = self.loss_.init_gradients_and_hessians(
n_samples=n_samples,
prediction_dim=self.n_trees_per_iteration_
)
# predictors_ is a matrix of TreePredictor objects with shape
# (n_iter_, n_trees_per_iteration)
self.predictors_ = predictors = []
# scorer_ is a callable with signature (est, X, y) and calls
# est.predict() or est.predict_proba() depending on its nature.
self.scorer_ = check_scoring(self, self.scoring)
self.train_scores_ = []
self.validation_scores_ = []
if do_early_stopping:
# Add predictions of the initial model (before the first tree)
self.train_scores_.append(
self._get_scores(X_binned_train, y_train))
if self.validation_split is not None:
self.validation_scores_.append(
self._get_scores(X_binned_val, y_val))
for iteration in range(self.max_iter):
if self.verbose:
iteration_start_time = time()
print(f"[{iteration + 1}/{self.max_iter}] ", end='',
flush=True)
# Update gradients and hessians, inplace
self.loss_.update_gradients_and_hessians(gradients, hessians,
y_train, raw_predictions)
predictors.append([])
# Build `n_trees_per_iteration` trees.
for k, (gradients_at_k, hessians_at_k) in enumerate(zip(
np.array_split(gradients, self.n_trees_per_iteration_),
np.array_split(hessians, self.n_trees_per_iteration_))):
# the xxxx_at_k arrays are **views** on the original arrays.
# Note that for binary classif and regressions,
# n_trees_per_iteration is 1 and xxxx_at_k is equivalent to the
# whole array.
grower = TreeGrower(
X_binned_train, gradients_at_k, hessians_at_k,
max_bins=self.max_bins,
n_bins_per_feature=n_bins_per_feature,
max_leaf_nodes=self.max_leaf_nodes,
max_depth=self.max_depth,
min_samples_leaf=self.min_samples_leaf,
l2_regularization=self.l2_regularization,
shrinkage=self.learning_rate)
grower.grow()
acc_apply_split_time += grower.total_apply_split_time
acc_find_split_time += grower.total_find_split_time
predictor = grower.make_predictor(numerical_thresholds)
predictors[-1].append(predictor)
tic_pred = time()
# prepare leaves_data so that _update_raw_predictions can be
# @njitted
leaves_data = [(l.value, l.sample_indices)
for l in grower.finalized_leaves]
_update_raw_predictions(leaves_data, raw_predictions[:, k])
toc_pred = time()
acc_prediction_time += toc_pred - tic_pred
should_early_stop = False
if do_early_stopping:
should_early_stop = self._check_early_stopping(
X_binned_small_train, y_small_train,
X_binned_val, y_val)
if self.verbose:
self._print_iteration_stats(iteration_start_time,
do_early_stopping)
if should_early_stop:
break
if self.verbose:
duration = time() - fit_start_time
n_total_leaves = sum(
predictor.get_n_leaf_nodes()
for predictors_at_ith_iteration in self.predictors_
for predictor in predictors_at_ith_iteration)
n_predictors = sum(
len(predictors_at_ith_iteration)
for predictors_at_ith_iteration in self.predictors_)
print(f"Fit {n_predictors} trees in {duration:.3f} s, "
f"({n_total_leaves} total leaves)")
print(f"{'Time spent finding best splits:':<32} "
f"{acc_find_split_time:.3f}s")
print(f"{'Time spent applying splits:':<32} "
f"{acc_apply_split_time:.3f}s")
print(f"{'Time spent predicting:':<32} "
f"{acc_prediction_time:.3f}s")
self.train_scores_ = np.asarray(self.train_scores_)
self.validation_scores_ = np.asarray(self.validation_scores_)
return self
def _check_early_stopping(self, X_binned_train, y_train,
X_binned_val, y_val):
"""Check if fitting should be early-stopped.
Scores are computed on validation data or on training data.
"""
self.train_scores_.append(
self._get_scores(X_binned_train, y_train))
if self.validation_split is not None:
self.validation_scores_.append(
self._get_scores(X_binned_val, y_val))
return self._should_stop(self.validation_scores_)
return self._should_stop(self.train_scores_)
def _should_stop(self, scores):
"""
Return True (do early stopping) if the last n scores aren't better
than the (n-1)th-to-last score, up to some tolerance.
"""
reference_position = self.n_iter_no_change + 1
if len(scores) < reference_position:
return False
# A higher score is always better. Higher tol means that it will be
# harder for subsequent iteration to be considered an improvement upon
# the reference score, and therefore it is more likely to early stop
# because of the lack of significant improvement.
tol = 0 if self.tol is None else self.tol
reference_score = scores[-reference_position] + tol
recent_scores = scores[-reference_position + 1:]
recent_improvements = [score > reference_score
for score in recent_scores]
return not any(recent_improvements)
def _get_scores(self, X, y):
"""Compute scores on data X with target y.
Scores are either computed with a scorer if scoring parameter is not
None, else with the loss. As higher is always better, we return
-loss_value.
"""
if self.scoring is not None:
return self.scorer_(self, X, y)
# Else, use the negative loss as score.
raw_predictions = self._raw_predict(X)
return -self.loss_(y, raw_predictions)
def _print_iteration_stats(self, iteration_start_time, do_early_stopping):
"""Print info about the current fitting iteration."""
log_msg = ''
predictors_of_ith_iteration = [
predictors_list for predictors_list in self.predictors_[-1]
if predictors_list
]
n_trees = len(predictors_of_ith_iteration)
max_depth = max(predictor.get_max_depth()
for predictor in predictors_of_ith_iteration)
n_leaves = sum(predictor.get_n_leaf_nodes()
for predictor in predictors_of_ith_iteration)
if n_trees == 1:
log_msg += (f"{n_trees} tree, {n_leaves} leaves, ")
else:
log_msg += (f"{n_trees} trees, {n_leaves} leaves ")
log_msg += (f"({int(n_leaves / n_trees)} on avg), ")
log_msg += f"max depth = {max_depth}, "
if do_early_stopping:
log_msg += f"{self.scoring} train: {self.train_scores_[-1]:.5f}, "
if self.validation_split is not None:
log_msg += (f"{self.scoring} val: "
f"{self.validation_scores_[-1]:.5f}, ")
iteration_time = time() - iteration_start_time
log_msg += f"in {iteration_time:0.3f}s"
print(log_msg)
def _raw_predict(self, X):
"""Return the sum of the leaves values over all predictors.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
The input samples. If ``X.dtype == np.uint8``, the data is assumed
to be pre-binned and the estimator must have been fitted with
pre-binned data.
Returns
-------
raw_predictions : array, shape (n_samples * n_trees_per_iteration,)
The raw predicted values.
"""
X = check_array(X)
check_is_fitted(self, 'predictors_')
if X.shape[1] != self.n_features_:
raise ValueError(
f'X has {X.shape[1]} features but this estimator was '
f'trained with {self.n_features_} features.'
)
is_binned = X.dtype == np.uint8
if not is_binned and self.bin_mapper_ is None:
raise ValueError(
'This estimator was fitted with pre-binned data and '
'can only predict pre-binned data as well. If your data *is* '
'already pre-binnned, convert it to uint8 using e.g. '
'X.astype(np.uint8). If the data passed to fit() was *not* '
'pre-binned, convert it to float32 and call fit() again.'
)
n_samples = X.shape[0]
raw_predictions = np.zeros(
shape=(n_samples, self.n_trees_per_iteration_),
dtype=self.baseline_prediction_.dtype
)
raw_predictions += self.baseline_prediction_
# Should we parallelize this?
for predictors_of_ith_iteration in self.predictors_:
for k, predictor in enumerate(predictors_of_ith_iteration):
predict = (predictor.predict_binned if is_binned
else predictor.predict)
raw_predictions[:, k] += predict(X)
return raw_predictions
@abstractmethod
def _get_loss(self):
pass
@abstractmethod
def _encode_y(self, y=None):
pass
@property
def n_iter_(self):
check_is_fitted(self, 'predictors_')
return len(self.predictors_)
class GradientBoostingRegressor(BaseGradientBoostingMachine, RegressorMixin):
"""Scikit-learn compatible Gradient Boosting Tree for regression.
Parameters
----------
loss : {'least_squares'}, optional(default='least_squares')
The loss function to use in the boosting process.
learning_rate : float, optional(default=0.1)
The learning rate, also known as *shrinkage*. This is used as a
multiplicative factor for the leaves values. Use ``1`` for no
shrinkage.
max_iter : int, optional(default=100)
The maximum number of iterations of the boosting process, i.e. the
maximum number of trees.
max_leaf_nodes : int or None, optional(default=None)
The maximum number of leaves for each tree. If None, there is no
maximum limit.
max_depth : int or None, optional(default=None)
The maximum depth of each tree. The depth of a tree is the number of
nodes to go from the root to the deepest leaf.
min_samples_leaf : int, optional(default=20)
The minimum number of samples per leaf.
l2_regularization : float, optional(default=0)
The L2 regularization parameter. Use 0 for no regularization.
max_bins : int, optional(default=256)
The maximum number of bins to use. Before training, each feature of
the input array ``X`` is binned into at most ``max_bins`` bins, which
allows for a much faster training stage. Features with a small
number of unique values may use less than ``max_bins`` bins. Must be no
larger than 256.
scoring : str or callable or None, \
optional (default=None)
Scoring parameter to use for early stopping (see sklearn.metrics for
available options). If None, early stopping is check w.r.t the loss
value.
validation_split : int or float or None, optional(default=0.1)
Proportion (or absolute size) of training data to set aside as
validation data for early stopping. If None, early stopping is done on
the training data.
n_iter_no_change : int or None, optional (default=5)
Used to determine when to "early stop". The fitting process is
stopped when none of the last ``n_iter_no_change`` scores are better
than the ``n_iter_no_change - 1``th-to-last one, up to some
tolerance. If None or 0, no early-stopping is done.
tol : float or None optional (default=1e-7)
The absolute tolerance to use when comparing scores. The higher the
tolerance, the more likely we are to early stop: higher tolerance
means that it will be harder for subsequent iterations to be
considered an improvement upon the reference score.
verbose: int, optional (default=0)
The verbosity level. If not zero, print some information about the
fitting process.
random_state : int, np.random.RandomStateInstance or None, \
optional (default=None)
Pseudo-random number generator to control the subsampling in the
binning process, and the train/validation data split if early stopping
is enabled. See
`scikit-learn glossary
<https://scikit-learn.org/stable/glossary.html#term-random-state>`_.
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from pygbm import GradientBoostingRegressor
>>> X, y = load_boston(return_X_y=True)
>>> est = GradientBoostingRegressor().fit(X, y)
>>> est.score(X, y)
0.92...
"""
_VALID_LOSSES = ('least_squares',)
def __init__(self, loss='least_squares', learning_rate=0.1,
max_iter=100, max_leaf_nodes=31, max_depth=None,
min_samples_leaf=20, l2_regularization=0., max_bins=256,
scoring=None, validation_split=0.1, n_iter_no_change=5,
tol=1e-7, verbose=0, random_state=None):
super(GradientBoostingRegressor, self).__init__(
loss=loss, learning_rate=learning_rate, max_iter=max_iter,
max_leaf_nodes=max_leaf_nodes, max_depth=max_depth,
min_samples_leaf=min_samples_leaf,
l2_regularization=l2_regularization, max_bins=max_bins,
scoring=scoring, validation_split=validation_split,
n_iter_no_change=n_iter_no_change, tol=tol, verbose=verbose,
random_state=random_state)
def predict(self, X):
"""Predict values for X.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
The input samples. If ``X.dtype == np.uint8``, the data is assumed
to be pre-binned and the estimator must have been fitted with
pre-binned data.
Returns
-------
y : array, shape (n_samples,)
The predicted values.
"""
# Return raw predictions after converting shape
# (n_samples, 1) to (n_samples,)
return self._raw_predict(X).ravel()
def _encode_y(self, y):
# Just convert y to float32
self.n_trees_per_iteration_ = 1
y = y.astype(np.float32, copy=False)
return y
def _get_loss(self):
return _LOSSES[self.loss]()
class GradientBoostingClassifier(BaseGradientBoostingMachine, ClassifierMixin):
"""Scikit-learn compatible Gradient Boosting Tree for classification.
Parameters
----------
loss : {'auto', 'binary_crossentropy', 'categorical_crossentropy'}, \
optional(default='auto')
The loss function to use in the boosting process. 'binary_crossentropy'
(also known as logistic loss) is used for binary classification and
generalizes to 'categorical_crossentropy' for multiclass
classification. 'auto' will automatically choose either loss depending
on the nature of the problem.
learning_rate : float, optional(default=1)
The learning rate, also known as *shrinkage*. This is used as a
multiplicative factor for the leaves values. Use ``1`` for no
shrinkage.
max_iter : int, optional(default=100)
The maximum number of iterations of the boosting process, i.e. the
maximum number of trees for binary classification. For multiclass
classification, `n_classes` trees per iteration are built.
max_leaf_nodes : int or None, optional(default=None)
The maximum number of leaves for each tree. If None, there is no
maximum limit.
max_depth : int or None, optional(default=None)
The maximum depth of each tree. The depth of a tree is the number of
nodes to go from the root to the deepest leaf.
min_samples_leaf : int, optional(default=20)
The minimum number of samples per leaf.
l2_regularization : float, optional(default=0)
The L2 regularization parameter. Use 0 for no regularization.
max_bins : int, optional(default=256)
The maximum number of bins to use. Before training, each feature of
the input array ``X`` is binned into at most ``max_bins`` bins, which
allows for a much faster training stage. Features with a small
number of unique values may use less than ``max_bins`` bins. Must be no
larger than 256.
scoring : str or callable or None, optional (default=None)
Scoring parameter to use for early stopping (see sklearn.metrics for
available options). If None, early stopping is check w.r.t the loss
value.
validation_split : int or float or None, optional(default=0.1)
Proportion (or absolute size) of training data to set aside as
validation data for early stopping. If None, early stopping is done on
the training data.
n_iter_no_change : int or None, optional (default=5)
Used to determine when to "early stop". The fitting process is
stopped when none of the last ``n_iter_no_change`` scores are better
than the ``n_iter_no_change - 1``th-to-last one, up to some
tolerance. If None or 0, no early-stopping is done.
tol : float or None optional (default=1e-7)
The absolute tolerance to use when comparing scores. The higher the
tolerance, the more likely we are to early stop: higher tolerance
means that it will be harder for subsequent iterations to be
considered an improvement upon the reference score.
verbose: int, optional(default=0)
The verbosity level. If not zero, print some information about the
fitting process.
random_state : int, np.random.RandomStateInstance or None, \
optional(default=None)
Pseudo-random number generator to control the subsampling in the
binning process, and the train/validation data split if early stopping
is enabled. See `scikit-learn glossary
<https://scikit-learn.org/stable/glossary.html#term-random-state>`_.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from pygbm import GradientBoostingClassifier
>>> X, y = load_iris(return_X_y=True)
>>> clf = GradientBoostingClassifier().fit(X, y)
>>> clf.score(X, y)
0.97...
"""
_VALID_LOSSES = ('binary_crossentropy', 'categorical_crossentropy',
'auto')
def __init__(self, loss='auto', learning_rate=0.1, max_iter=100,
max_leaf_nodes=31, max_depth=None, min_samples_leaf=20,
l2_regularization=0., max_bins=256, scoring=None,
validation_split=0.1, n_iter_no_change=5, tol=1e-7,
verbose=0, random_state=None):
super(GradientBoostingClassifier, self).__init__(
loss=loss, learning_rate=learning_rate, max_iter=max_iter,
max_leaf_nodes=max_leaf_nodes, max_depth=max_depth,
min_samples_leaf=min_samples_leaf,
l2_regularization=l2_regularization, max_bins=max_bins,
scoring=scoring, validation_split=validation_split,
n_iter_no_change=n_iter_no_change, tol=tol, verbose=verbose,
random_state=random_state)
def predict(self, X):
"""Predict classes for X.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
The input samples. If ``X.dtype == np.uint8``, the data is assumed
to be pre-binned and the estimator must have been fitted with
pre-binned data.
Returns
-------
y : array, shape (n_samples,)
The predicted classes.
"""
# This could be done in parallel
encoded_classes = np.argmax(self.predict_proba(X), axis=1)
return self.classes_[encoded_classes]
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
The input samples. If ``X.dtype == np.uint8``, the data is assumed
to be pre-binned and the estimator must have been fitted with
pre-binned data.
Returns
-------
p : array, shape (n_samples, n_classes)
The class probabilities of the input samples.
"""
raw_predictions = self._raw_predict(X)
return self.loss_.predict_proba(raw_predictions)
def _encode_y(self, y):
# encode classes into 0 ... n_classes - 1 and sets attributes classes_
# and n_trees_per_iteration_
check_classification_targets(y)
label_encoder = LabelEncoder()
encoded_y = label_encoder.fit_transform(y)
self.classes_ = label_encoder.classes_
n_classes = self.classes_.shape[0]
# only 1 tree for binary classification. For multiclass classification,
# we build 1 tree per class.
self.n_trees_per_iteration_ = 1 if n_classes <= 2 else n_classes
encoded_y = encoded_y.astype(np.float32, copy=False)
return encoded_y
def _get_loss(self):
if self.loss == 'auto':
if self.n_trees_per_iteration_ == 1:
return _LOSSES['binary_crossentropy']()
else:
return _LOSSES['categorical_crossentropy']()
return _LOSSES[self.loss]()
@njit(parallel=True)
def _update_raw_predictions(leaves_data, raw_predictions):
"""Update raw_predictions by reading the predictions of the ith tree
directly form the leaves.
Can only be used for predicting the training data. raw_predictions
contains the sum of the tree values from iteration 0 to i - 1. This adds
the predictions of the ith tree to raw_predictions.
Parameters
----------
leaves_data: list of tuples (leaf.value, leaf.sample_indices)
The leaves data used to update raw_predictions.
raw_predictions : array-like, shape=(n_samples,)
The raw predictions for the training data.
"""
for leaf_idx in prange(len(leaves_data)):
leaf_value, sample_indices = leaves_data[leaf_idx]
for sample_idx in sample_indices:
raw_predictions[sample_idx] += leaf_value
| 42.679296 | 79 | 0.624604 |
f6ff48deb9fb9ebd76e48ee147a3d063ebc775c0 | 2,249 | py | Python | udacity-program_self_driving_car_engineer_v1.0/part01-computer vision and deep learning/module04-computer vision/lesson01-camera calibration/undistort_and_transform/distort_transform.py | linksdl/futuretec-project-self_driving_cars_projects | 38e8f14543132ec86a8bada8d708eefaef23fee8 | [
"MIT"
] | null | null | null | udacity-program_self_driving_car_engineer_v1.0/part01-computer vision and deep learning/module04-computer vision/lesson01-camera calibration/undistort_and_transform/distort_transform.py | linksdl/futuretec-project-self_driving_cars_projects | 38e8f14543132ec86a8bada8d708eefaef23fee8 | [
"MIT"
] | null | null | null | udacity-program_self_driving_car_engineer_v1.0/part01-computer vision and deep learning/module04-computer vision/lesson01-camera calibration/undistort_and_transform/distort_transform.py | linksdl/futuretec-project-self_driving_cars_projects | 38e8f14543132ec86a8bada8d708eefaef23fee8 | [
"MIT"
] | null | null | null | """
# !/usr/bin/env python
# -*- coding: utf-8 -*-
@Time : 2022/2/24 23:00
@Author : shengdl999links@gmail.com
@ProjectName : udacity-program_self_driving_car_engineer_v1.0_source.0
@File : distort_transform.py
"""
import pickle
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# Read in the saved camera matrix and distortion coefficients
# These are the arrays you calculated using cv2.calibrateCamera()
dist_pickle = pickle.load( open( "wide_dist_pickle.p", "rb" ) )
mtx = dist_pickle["mtx"]
dist = dist_pickle["dist"]
# Read in an image
img = cv2.imread('test_image2.png')
nx = 8 # the number of inside corners in x
ny = 6 # the number of inside corners in y
# MODIFY THIS FUNCTION TO GENERATE OUTPUT
# THAT LOOKS LIKE THE IMAGE ABOVE
def corners_unwarp(img, nx, ny, mtx, dist):
# Pass in your image into this function
# Write code to do the following steps
# 1) Undistort using mtx and dist
# 2) Convert to grayscale
# 3) Find the chessboard corners
# 4) If corners found:
# a) draw corners
# b) define 4 source points src = np.float32([[,],[,],[,],[,]])
#Note: you could pick any four of the detected corners
# as long as those four corners define a rectangle
#One especially smart way to do this would be to use four well-chosen
# corners that were automatically detected during the undistortion steps
#We recommend using the automatic detection of corners in your code
# c) define 4 destination points dst = np.float32([[,],[,],[,],[,]])
# d) use cv2.getPerspectiveTransform() to get M, the transform matrix
# e) use cv2.warpPerspective() to warp your image to a top-down view
#delete the next two lines
M = None
warped = np.copy(img)
return warped, M
top_down, perspective_M = corners_unwarp(img, nx, ny, mtx, dist)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(img)
ax1.set_title('Original Image', fontsize=50)
ax2.imshow(top_down)
ax2.set_title('Undistorted and Warped Image', fontsize=50)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
| 37.483333 | 89 | 0.669186 |
9c59c74a7b41a45097f0f004065bfb7e3236acea | 1,333 | py | Python | csv_handler.py | Lauenburg/simple_webcrawler | 27cb1ec3625033ebb232bf7ac40546e89658abdd | [
"MIT"
] | null | null | null | csv_handler.py | Lauenburg/simple_webcrawler | 27cb1ec3625033ebb232bf7ac40546e89658abdd | [
"MIT"
] | null | null | null | csv_handler.py | Lauenburg/simple_webcrawler | 27cb1ec3625033ebb232bf7ac40546e89658abdd | [
"MIT"
] | null | null | null | import csv
import os
def create_csv_file(path, csv_file_name, csv_header=None):
''' Creates a csv file with the default header defined below.
Args:
path: Path to the csv_file
csv_file_name: Name of the csv file
csv_header: Header for the csv-file
Return:
The path to and the name of the created file and its header
'''
if csv_header is None:
csv_header = ['name', 'webaddress', 'address']
# Creates the file if it does not exist
if not (os.path.isfile(path+csv_file_name+'.csv')):
with open(path+csv_file_name+'.csv', 'w', newline='') as file:
writer = csv.DictWriter(file, fieldnames=csv_header)
writer.writeheader()
return path, csv_file_name, csv_header
def write_line_to_csv(path, csv_file_name, header, csv_dictionary):
''' Open the csv-file and appends the dictionary.
Args:
path: Path to the location of the csv-file
csv_file_name: Name of the csv-file
header: Header of the csv-file
csv_dictionary: The data that should be appended to the csv
'''
with open(path + csv_file_name + '.csv', 'a', newline='') as file:
writer = csv.DictWriter(file, fieldnames=header)
writer.writerow(csv_dictionary)
| 27.204082 | 71 | 0.631658 |
80ce750e47e69d1ea755ad8a857ebfac2ec20ea3 | 3,894 | py | Python | manifolds/main.py | rrkarim/MoNet | 0913173735ce2f369a16165b50c47438b7249406 | [
"MIT"
] | 35 | 2019-06-28T11:00:19.000Z | 2022-03-26T19:03:21.000Z | manifolds/main.py | rrkarim/MoNet | 0913173735ce2f369a16165b50c47438b7249406 | [
"MIT"
] | 3 | 2020-08-13T08:16:00.000Z | 2022-02-28T13:20:43.000Z | manifolds/main.py | rrkarim/MoNet | 0913173735ce2f369a16165b50c47438b7249406 | [
"MIT"
] | 3 | 2020-05-31T13:48:43.000Z | 2022-01-24T13:11:30.000Z | import argparse
import os.path as osp
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.backends.cudnn as cudnn
from torch_geometric.datasets import FAUST
from torch_geometric.data import DataLoader
import torch_geometric.transforms as T
from conv import GMMConv
from manifolds import run
parser = argparse.ArgumentParser(description='shape correspondence')
parser.add_argument('--dataset', type=str, default='FAUST')
parser.add_argument('--device_idx', type=int, default=4)
parser.add_argument('--n_threads', type=int, default=4)
parser.add_argument('--kernel_size', type=int, default=10)
parser.add_argument('--lr', type=float, default=3e-3)
parser.add_argument('--lr_decay', type=float, default=0.99)
parser.add_argument('--decay_step', type=int, default=1)
parser.add_argument('--weight_decay', type=float, default=5e-5)
parser.add_argument('--epochs', type=int, default=500)
parser.add_argument('--seed', type=int, default=1)
args = parser.parse_args()
args.data_fp = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data',
args.dataset)
device = torch.device('cuda', args.device_idx)
torch.set_num_threads(args.n_threads)
# deterministic
torch.manual_seed(args.seed)
cudnn.benchmark = False
cudnn.deterministic = True
class Pre_Transform(object):
def __call__(self, data):
data.x = data.pos
data = T.FaceToEdge()(data)
return data
train_dataset = FAUST(args.data_fp,
True,
transform=T.Cartesian(),
pre_transform=Pre_Transform())
test_dataset = FAUST(args.data_fp,
False,
transform=T.Cartesian(),
pre_transform=Pre_Transform())
train_loader = DataLoader(train_dataset, batch_size=1, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=1)
d = train_dataset[0]
target = torch.arange(d.num_nodes, dtype=torch.long, device=device)
print(d)
class MoNet(nn.Module):
def __init__(self, in_channels, num_classes, kernel_size):
super(MoNet, self).__init__()
self.fc0 = nn.Linear(in_channels, 16)
self.conv1 = GMMConv(16, 32, dim=3, kernel_size=kernel_size)
self.conv2 = GMMConv(32, 64, dim=3, kernel_size=kernel_size)
self.conv3 = GMMConv(64, 128, dim=3, kernel_size=kernel_size)
self.fc1 = nn.Linear(128, 256)
self.fc2 = nn.Linear(256, num_classes)
self.reset_parameters()
def reset_parameters(self):
self.conv1.reset_parameters()
self.conv2.reset_parameters()
self.conv3.reset_parameters()
nn.init.xavier_uniform_(self.fc0.weight, gain=1)
nn.init.xavier_uniform_(self.fc1.weight, gain=1)
nn.init.xavier_uniform_(self.fc2.weight, gain=1)
nn.init.constant_(self.fc0.bias, 0)
nn.init.constant_(self.fc1.bias, 0)
nn.init.constant_(self.fc2.bias, 0)
def forward(self, data):
x, edge_index, edge_attr = data.x, data.edge_index, data.edge_attr
x = F.elu(self.fc0(x))
x = F.elu(self.conv1(x, edge_index, edge_attr))
x = F.elu(self.conv2(x, edge_index, edge_attr))
x = F.elu(self.conv3(x, edge_index, edge_attr))
x = F.elu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
model = MoNet(d.num_features, d.num_nodes, args.kernel_size).to(device)
print(model)
optimizer = optim.Adam(model.parameters(),
lr=args.lr,
weight_decay=args.weight_decay)
scheduler = optim.lr_scheduler.StepLR(optimizer,
args.decay_step,
gamma=args.lr_decay)
run(model, train_loader, test_loader, target, d.num_nodes, args.epochs,
optimizer, scheduler, device)
| 35.4 | 74 | 0.665383 |
140fafa64c75356310fa8071504252b81973574a | 24,357 | py | Python | tensorflow/python/kernel_tests/check_ops_test.py | sachinpro/sachinpro.github.io | c3bbd8d89818f5d8bb7296c851ed5e52c19728e3 | [
"Apache-2.0"
] | 1 | 2018-01-24T12:57:47.000Z | 2018-01-24T12:57:47.000Z | tensorflow/python/kernel_tests/check_ops_test.py | sachinpro/sachinpro.github.io | c3bbd8d89818f5d8bb7296c851ed5e52c19728e3 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/kernel_tests/check_ops_test.py | sachinpro/sachinpro.github.io | c3bbd8d89818f5d8bb7296c851ed5e52c19728e3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.check_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class AssertProperIterableTest(tf.test.TestCase):
def test_single_tensor_raises(self):
tensor = tf.constant(1)
with self.assertRaisesRegexp(TypeError, "proper"):
tf.assert_proper_iterable(tensor)
def test_single_sparse_tensor_raises(self):
ten = tf.SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], shape=[3, 4])
with self.assertRaisesRegexp(TypeError, "proper"):
tf.assert_proper_iterable(ten)
def test_single_ndarray_raises(self):
array = np.array([1, 2, 3])
with self.assertRaisesRegexp(TypeError, "proper"):
tf.assert_proper_iterable(array)
def test_single_string_raises(self):
mystr = "hello"
with self.assertRaisesRegexp(TypeError, "proper"):
tf.assert_proper_iterable(mystr)
def test_non_iterable_object_raises(self):
non_iterable = 1234
with self.assertRaisesRegexp(TypeError, "to be iterable"):
tf.assert_proper_iterable(non_iterable)
def test_list_does_not_raise(self):
list_of_stuff = [tf.constant([11, 22]), tf.constant([1, 2])]
tf.assert_proper_iterable(list_of_stuff)
def test_generator_does_not_raise(self):
generator_of_stuff = (tf.constant([11, 22]), tf.constant([1, 2]))
tf.assert_proper_iterable(generator_of_stuff)
class AssertEqualTest(tf.test.TestCase):
def test_doesnt_raise_when_equal(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
with tf.control_dependencies([tf.assert_equal(small, small)]):
out = tf.identity(small)
out.eval()
def test_raises_when_greater(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
big = tf.constant([3, 4], name="big")
with tf.control_dependencies([tf.assert_equal(big, small)]):
out = tf.identity(small)
with self.assertRaisesOpError("big.*small"):
out.eval()
def test_raises_when_less(self):
with self.test_session():
small = tf.constant([3, 1], name="small")
big = tf.constant([4, 2], name="big")
with tf.control_dependencies([tf.assert_equal(small, big)]):
out = tf.identity(small)
with self.assertRaisesOpError("small.*big"):
out.eval()
def test_doesnt_raise_when_equal_and_broadcastable_shapes(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
small_2 = tf.constant([1, 2], name="small_2")
with tf.control_dependencies([tf.assert_equal(small, small_2)]):
out = tf.identity(small)
out.eval()
def test_raises_when_equal_but_non_broadcastable_shapes(self):
with self.test_session():
small = tf.constant([1, 1, 1], name="small")
small_2 = tf.constant([1, 1], name="small_2")
with self.assertRaisesRegexp(ValueError, "broadcast"):
with tf.control_dependencies([tf.assert_equal(small, small_2)]):
out = tf.identity(small)
out.eval()
def test_doesnt_raise_when_both_empty(self):
with self.test_session():
larry = tf.constant([])
curly = tf.constant([])
with tf.control_dependencies([tf.assert_equal(larry, curly)]):
out = tf.identity(larry)
out.eval()
class AssertLessTest(tf.test.TestCase):
def test_raises_when_equal(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
with tf.control_dependencies([tf.assert_less(small, small)]):
out = tf.identity(small)
with self.assertRaisesOpError("small.*small"):
out.eval()
def test_raises_when_greater(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
big = tf.constant([3, 4], name="big")
with tf.control_dependencies([tf.assert_less(big, small)]):
out = tf.identity(small)
with self.assertRaisesOpError("big.*small"):
out.eval()
def test_doesnt_raise_when_less(self):
with self.test_session():
small = tf.constant([3, 1], name="small")
big = tf.constant([4, 2], name="big")
with tf.control_dependencies([tf.assert_less(small, big)]):
out = tf.identity(small)
out.eval()
def test_doesnt_raise_when_less_and_broadcastable_shapes(self):
with self.test_session():
small = tf.constant([1], name="small")
big = tf.constant([3, 2], name="big")
with tf.control_dependencies([tf.assert_less(small, big)]):
out = tf.identity(small)
out.eval()
def test_raises_when_less_but_non_broadcastable_shapes(self):
with self.test_session():
small = tf.constant([1, 1, 1], name="small")
big = tf.constant([3, 2], name="big")
with self.assertRaisesRegexp(ValueError, "broadcast"):
with tf.control_dependencies([tf.assert_less(small, big)]):
out = tf.identity(small)
out.eval()
def test_doesnt_raise_when_both_empty(self):
with self.test_session():
larry = tf.constant([])
curly = tf.constant([])
with tf.control_dependencies([tf.assert_less(larry, curly)]):
out = tf.identity(larry)
out.eval()
class AssertLessEqualTest(tf.test.TestCase):
def test_doesnt_raise_when_equal(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
with tf.control_dependencies([tf.assert_less_equal(small, small)]):
out = tf.identity(small)
out.eval()
def test_raises_when_greater(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
big = tf.constant([3, 4], name="big")
with tf.control_dependencies([tf.assert_less_equal(big, small)]):
out = tf.identity(small)
with self.assertRaisesOpError("big.*small"):
out.eval()
def test_doesnt_raise_when_less_equal(self):
with self.test_session():
small = tf.constant([1, 2], name="small")
big = tf.constant([3, 2], name="big")
with tf.control_dependencies([tf.assert_less_equal(small, big)]):
out = tf.identity(small)
out.eval()
def test_doesnt_raise_when_less_equal_and_broadcastable_shapes(self):
with self.test_session():
small = tf.constant([1], name="small")
big = tf.constant([3, 1], name="big")
with tf.control_dependencies([tf.assert_less_equal(small, big)]):
out = tf.identity(small)
out.eval()
def test_raises_when_less_equal_but_non_broadcastable_shapes(self):
with self.test_session():
small = tf.constant([1, 1, 1], name="small")
big = tf.constant([3, 1], name="big")
with self.assertRaisesRegexp(ValueError, "broadcast"):
with tf.control_dependencies([tf.assert_less_equal(small, big)]):
out = tf.identity(small)
out.eval()
def test_doesnt_raise_when_both_empty(self):
with self.test_session():
larry = tf.constant([])
curly = tf.constant([])
with tf.control_dependencies([tf.assert_less_equal(larry, curly)]):
out = tf.identity(larry)
out.eval()
class AssertNegativeTest(tf.test.TestCase):
def test_doesnt_raise_when_negative(self):
with self.test_session():
frank = tf.constant([-1, -2], name="frank")
with tf.control_dependencies([tf.assert_negative(frank)]):
out = tf.identity(frank)
out.eval()
def test_raises_when_positive(self):
with self.test_session():
doug = tf.constant([1, 2], name="doug")
with tf.control_dependencies([tf.assert_negative(doug)]):
out = tf.identity(doug)
with self.assertRaisesOpError("doug"):
out.eval()
def test_raises_when_zero(self):
with self.test_session():
claire = tf.constant([0], name="claire")
with tf.control_dependencies([tf.assert_negative(claire)]):
out = tf.identity(claire)
with self.assertRaisesOpError("claire"):
out.eval()
def test_empty_tensor_doesnt_raise(self):
# A tensor is negative when it satisfies:
# For every element x_i in x, x_i < 0
# and an empty tensor has no elements, so this is trivially satisfied.
# This is standard set theory.
with self.test_session():
empty = tf.constant([], name="empty")
with tf.control_dependencies([tf.assert_negative(empty)]):
out = tf.identity(empty)
out.eval()
class AssertPositiveTest(tf.test.TestCase):
def test_raises_when_negative(self):
with self.test_session():
freddie = tf.constant([-1, -2], name="freddie")
with tf.control_dependencies([tf.assert_positive(freddie)]):
out = tf.identity(freddie)
with self.assertRaisesOpError("freddie"):
out.eval()
def test_doesnt_raise_when_positive(self):
with self.test_session():
remmy = tf.constant([1, 2], name="remmy")
with tf.control_dependencies([tf.assert_positive(remmy)]):
out = tf.identity(remmy)
out.eval()
def test_raises_when_zero(self):
with self.test_session():
meechum = tf.constant([0], name="meechum")
with tf.control_dependencies([tf.assert_positive(meechum)]):
out = tf.identity(meechum)
with self.assertRaisesOpError("meechum"):
out.eval()
def test_empty_tensor_doesnt_raise(self):
# A tensor is positive when it satisfies:
# For every element x_i in x, x_i > 0
# and an empty tensor has no elements, so this is trivially satisfied.
# This is standard set theory.
with self.test_session():
empty = tf.constant([], name="empty")
with tf.control_dependencies([tf.assert_positive(empty)]):
out = tf.identity(empty)
out.eval()
class AssertRankTest(tf.test.TestCase):
def test_rank_zero_tensor_raises_if_rank_too_small_static_rank(self):
with self.test_session():
tensor = tf.constant(1, name="my_tensor")
desired_rank = 1
with self.assertRaisesRegexp(ValueError, "my_tensor.*rank"):
with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
tf.identity(tensor).eval()
def test_rank_zero_tensor_raises_if_rank_too_small_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 1
with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
with self.assertRaisesOpError("my_tensor.*rank"):
tf.identity(tensor).eval(feed_dict={tensor: 0})
def test_rank_zero_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
with self.test_session():
tensor = tf.constant(1, name="my_tensor")
desired_rank = 0
with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
tf.identity(tensor).eval()
def test_rank_zero_tensor_doesnt_raise_if_rank_just_right_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 0
with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
tf.identity(tensor).eval(feed_dict={tensor: 0})
def test_rank_one_tensor_raises_if_rank_too_large_static_rank(self):
with self.test_session():
tensor = tf.constant([1, 2], name="my_tensor")
desired_rank = 0
with self.assertRaisesRegexp(ValueError, "my_tensor.*rank"):
with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
tf.identity(tensor).eval()
def test_rank_one_tensor_raises_if_rank_too_large_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 0
with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
with self.assertRaisesOpError("my_tensor.*rank"):
tf.identity(tensor).eval(feed_dict={tensor: [1, 2]})
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
with self.test_session():
tensor = tf.constant([1, 2], name="my_tensor")
desired_rank = 1
with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
tf.identity(tensor).eval()
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 1
with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
tf.identity(tensor).eval(feed_dict={tensor: [1, 2]})
def test_rank_one_tensor_raises_if_rank_too_small_static_rank(self):
with self.test_session():
tensor = tf.constant([1, 2], name="my_tensor")
desired_rank = 2
with self.assertRaisesRegexp(ValueError, "my_tensor.*rank"):
with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
tf.identity(tensor).eval()
def test_rank_one_tensor_raises_if_rank_too_small_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 2
with tf.control_dependencies([tf.assert_rank(tensor, desired_rank)]):
with self.assertRaisesOpError("my_tensor.*rank"):
tf.identity(tensor).eval(feed_dict={tensor: [1, 2]})
def test_raises_if_rank_is_not_scalar_static(self):
with self.test_session():
tensor = tf.constant([1, 2], name="my_tensor")
with self.assertRaisesRegexp(ValueError, "Rank must be a scalar"):
tf.assert_rank(tensor, np.array([], dtype=np.int32))
def test_raises_if_rank_is_not_scalar_dynamic(self):
with self.test_session():
tensor = tf.constant([1, 2], dtype=tf.float32, name="my_tensor")
rank_tensor = tf.placeholder(tf.int32, name="rank_tensor")
with self.assertRaisesOpError("Rank must be a scalar"):
with tf.control_dependencies([tf.assert_rank(tensor, rank_tensor)]):
tf.identity(tensor).eval(feed_dict={rank_tensor: [1, 2]})
def test_raises_if_rank_is_not_integer_static(self):
with self.test_session():
tensor = tf.constant([1, 2], name="my_tensor")
with self.assertRaisesRegexp(ValueError,
"must be of type <dtype: 'int32'>"):
tf.assert_rank(tensor, .5)
def test_raises_if_rank_is_not_integer_dynamic(self):
with self.test_session():
tensor = tf.constant([1, 2], dtype=tf.float32, name="my_tensor")
rank_tensor = tf.placeholder(tf.float32, name="rank_tensor")
with self.assertRaisesRegexp(ValueError,
"must be of type <dtype: 'int32'>"):
with tf.control_dependencies([tf.assert_rank(tensor, rank_tensor)]):
tf.identity(tensor).eval(feed_dict={rank_tensor: .5})
class AssertRankAtLeastTest(tf.test.TestCase):
def test_rank_zero_tensor_raises_if_rank_too_small_static_rank(self):
with self.test_session():
tensor = tf.constant(1, name="my_tensor")
desired_rank = 1
with self.assertRaisesRegexp(ValueError, "my_tensor.*rank"):
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
tf.identity(tensor).eval()
def test_rank_zero_tensor_raises_if_rank_too_small_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 1
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
with self.assertRaisesOpError("my_tensor.*rank"):
tf.identity(tensor).eval(feed_dict={tensor: 0})
def test_rank_zero_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
with self.test_session():
tensor = tf.constant(1, name="my_tensor")
desired_rank = 0
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
tf.identity(tensor).eval()
def test_rank_zero_tensor_doesnt_raise_if_rank_just_right_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 0
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
tf.identity(tensor).eval(feed_dict={tensor: 0})
def test_rank_one_ten_doesnt_raise_raise_if_rank_too_large_static_rank(self):
with self.test_session():
tensor = tf.constant([1, 2], name="my_tensor")
desired_rank = 0
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
tf.identity(tensor).eval()
def test_rank_one_ten_doesnt_raise_if_rank_too_large_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 0
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
tf.identity(tensor).eval(feed_dict={tensor: [1, 2]})
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_static_rank(self):
with self.test_session():
tensor = tf.constant([1, 2], name="my_tensor")
desired_rank = 1
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
tf.identity(tensor).eval()
def test_rank_one_tensor_doesnt_raise_if_rank_just_right_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 1
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
tf.identity(tensor).eval(feed_dict={tensor: [1, 2]})
def test_rank_one_tensor_raises_if_rank_too_small_static_rank(self):
with self.test_session():
tensor = tf.constant([1, 2], name="my_tensor")
desired_rank = 2
with self.assertRaisesRegexp(ValueError, "my_tensor.*rank"):
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
tf.identity(tensor).eval()
def test_rank_one_tensor_raises_if_rank_too_small_dynamic_rank(self):
with self.test_session():
tensor = tf.placeholder(tf.float32, name="my_tensor")
desired_rank = 2
with tf.control_dependencies([tf.assert_rank_at_least(tensor,
desired_rank)]):
with self.assertRaisesOpError("my_tensor.*rank"):
tf.identity(tensor).eval(feed_dict={tensor: [1, 2]})
class AssertNonNegativeTest(tf.test.TestCase):
def test_raises_when_negative(self):
with self.test_session():
zoe = tf.constant([-1, -2], name="zoe")
with tf.control_dependencies([tf.assert_non_negative(zoe)]):
out = tf.identity(zoe)
with self.assertRaisesOpError("zoe"):
out.eval()
def test_doesnt_raise_when_zero_and_positive(self):
with self.test_session():
lucas = tf.constant([0, 2], name="lucas")
with tf.control_dependencies([tf.assert_non_negative(lucas)]):
out = tf.identity(lucas)
out.eval()
def test_empty_tensor_doesnt_raise(self):
# A tensor is non-negative when it satisfies:
# For every element x_i in x, x_i >= 0
# and an empty tensor has no elements, so this is trivially satisfied.
# This is standard set theory.
with self.test_session():
empty = tf.constant([], name="empty")
with tf.control_dependencies([tf.assert_non_negative(empty)]):
out = tf.identity(empty)
out.eval()
class AssertNonPositiveTest(tf.test.TestCase):
def test_doesnt_raise_when_zero_and_negative(self):
with self.test_session():
tom = tf.constant([0, -2], name="tom")
with tf.control_dependencies([tf.assert_non_positive(tom)]):
out = tf.identity(tom)
out.eval()
def test_raises_when_positive(self):
with self.test_session():
rachel = tf.constant([0, 2], name="rachel")
with tf.control_dependencies([tf.assert_non_positive(rachel)]):
out = tf.identity(rachel)
with self.assertRaisesOpError("rachel"):
out.eval()
def test_empty_tensor_doesnt_raise(self):
# A tensor is non-positive when it satisfies:
# For every element x_i in x, x_i <= 0
# and an empty tensor has no elements, so this is trivially satisfied.
# This is standard set theory.
with self.test_session():
empty = tf.constant([], name="empty")
with tf.control_dependencies([tf.assert_non_positive(empty)]):
out = tf.identity(empty)
out.eval()
class AssertIntegerTest(tf.test.TestCase):
def test_doesnt_raise_when_integer(self):
with self.test_session():
integers = tf.constant([1, 2], name="integers")
with tf.control_dependencies([tf.assert_integer(integers)]):
out = tf.identity(integers)
out.eval()
def test_raises_when_float(self):
with self.test_session():
floats = tf.constant([1.0, 2.0], name="floats")
with tf.control_dependencies([tf.assert_integer(floats)]):
out = tf.identity(floats)
with self.assertRaisesOpError("x is not of integer dtype.*"):
out.eval()
class IsStrictlyIncreasingTest(tf.test.TestCase):
def test_constant_tensor_is_not_strictly_increasing(self):
with self.test_session():
self.assertFalse(tf.is_strictly_increasing([1, 1, 1]).eval())
def test_decreasing_tensor_is_not_strictly_increasing(self):
with self.test_session():
self.assertFalse(tf.is_strictly_increasing([1, 0, -1]).eval())
def test_2d_decreasing_tensor_is_not_strictly_increasing(self):
with self.test_session():
self.assertFalse(tf.is_strictly_increasing([[1, 3], [2, 4]]).eval())
def test_increasing_tensor_is_increasing(self):
with self.test_session():
self.assertTrue(tf.is_strictly_increasing([1, 2, 3]).eval())
def test_increasing_rank_two_tensor(self):
with self.test_session():
self.assertTrue(tf.is_strictly_increasing([[-1, 2], [3, 4]]).eval())
def test_tensor_with_one_element_is_strictly_increasing(self):
with self.test_session():
self.assertTrue(tf.is_strictly_increasing([1]).eval())
def test_empty_tensor_is_strictly_increasing(self):
with self.test_session():
self.assertTrue(tf.is_strictly_increasing([]).eval())
class IsNonDecreasingTest(tf.test.TestCase):
def test_constant_tensor_is_non_decreasing(self):
with self.test_session():
self.assertTrue(tf.is_non_decreasing([1, 1, 1]).eval())
def test_decreasing_tensor_is_not_non_decreasing(self):
with self.test_session():
self.assertFalse(tf.is_non_decreasing([3, 2, 1]).eval())
def test_2d_decreasing_tensor_is_not_non_decreasing(self):
with self.test_session():
self.assertFalse(tf.is_non_decreasing([[1, 3], [2, 4]]).eval())
def test_increasing_rank_one_tensor_is_non_decreasing(self):
with self.test_session():
self.assertTrue(tf.is_non_decreasing([1, 2, 3]).eval())
def test_increasing_rank_two_tensor(self):
with self.test_session():
self.assertTrue(tf.is_non_decreasing([[-1, 2], [3, 3]]).eval())
def test_tensor_with_one_element_is_non_decreasing(self):
with self.test_session():
self.assertTrue(tf.is_non_decreasing([1]).eval())
def test_empty_tensor_is_non_decreasing(self):
with self.test_session():
self.assertTrue(tf.is_non_decreasing([]).eval())
if __name__ == "__main__":
tf.test.main()
| 38.785032 | 80 | 0.673277 |
785b937a837d5802b9fa00a8074ecfe9c68f6c81 | 1,079 | py | Python | Web-Development/Learning-Django/Polling-Site/polls_app/migrations/0001_initial.py | oliverkeen/Sandbox | 40e2a9239a81ebaeff6e7b34ed8329c6796d71f5 | [
"MIT"
] | null | null | null | Web-Development/Learning-Django/Polling-Site/polls_app/migrations/0001_initial.py | oliverkeen/Sandbox | 40e2a9239a81ebaeff6e7b34ed8329c6796d71f5 | [
"MIT"
] | null | null | null | Web-Development/Learning-Django/Polling-Site/polls_app/migrations/0001_initial.py | oliverkeen/Sandbox | 40e2a9239a81ebaeff6e7b34ed8329c6796d71f5 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.7 on 2021-03-25 17:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls_app.question')),
],
),
]
| 32.69697 | 118 | 0.587581 |
9191fe9394efb568cec8993f6d50b4af875985e9 | 3,039 | py | Python | SimilarSentences/TrainSentences.py | shangan23/similar-sentences | 49a512b4010fb1a4a4def2685b04f9c27551b543 | [
"Apache-2.0"
] | 9 | 2020-04-15T13:31:01.000Z | 2021-12-13T06:37:08.000Z | SimilarSentences/TrainSentences.py | shangan23/similar-sentences | 49a512b4010fb1a4a4def2685b04f9c27551b543 | [
"Apache-2.0"
] | null | null | null | SimilarSentences/TrainSentences.py | shangan23/similar-sentences | 49a512b4010fb1a4a4def2685b04f9c27551b543 | [
"Apache-2.0"
] | 3 | 2020-06-23T12:45:47.000Z | 2020-10-09T05:41:53.000Z | import numpy as np
import logging
import zipfile
import os
import shutil
from sentence_transformers import SentenceTransformer, LoggingHandler
from sys import exit
class TrainSentences:
def __init__(self, txt_file, pretrain_model: str = None):
dir_path = os.getcwd() + '/'
file_path = dir_path + txt_file
print('\n')
print('Scanning the path '+file_path + ' ...')
pretrain = pretrain_model if pretrain_model else 'bert-base-nli-mean-tokens'
self.pretrain_model = pretrain
print('Pretrained model is set to '+self.pretrain_model + ' ...')
if(os.path.isfile(file_path) and self.get_file_extension(file_path) == ".txt"):
print('Training file validation OK...')
self.train_file_path = file_path
if not os.path.exists(dir_path+'trained_model'):
os.makedirs(dir_path+'trained_model')
self.model_save_path = dir_path+'trained_model/'
self.zip_save_path = dir_path+'/'
else:
exit('Training file is not valid... exiting...')
def get_file_extension(self, src):
return os.path.splitext(src)[-1].lower()
def get_path(self):
_vector_file = 'vector.npy'
_train_file = 'train.txt'
_files = {
'model': self.model_save_path,
'vector': self.model_save_path + _vector_file,
'training_set': self.train_file_path,
'zip_path': self.zip_save_path+'model.zip',
'train_file': self.model_save_path + _train_file,
}
return _files
def train(self):
path = self.get_path()
np.set_printoptions(threshold=100)
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.ERROR,
handlers=[LoggingHandler()])
model = SentenceTransformer(self.pretrain_model)
sentences = open(path.get('training_set')).read().splitlines()
sentence_embeddings = model.encode(sentences)
vecs = np.stack(sentence_embeddings)
model.save(path.get('model'))
print('Saving the model to '+path.get('model')+'...')
np.save(path.get('vector'), sentence_embeddings)
print('Saving the vector to '+path.get('vector')+'...')
print('Initiating model compression(.zip) ...')
os.rename(path.get('training_set'), path.get('train_file'))
self.compress_file(path.get('model'), path.get('zip_path'))
print('→ Download "model.zip" and use it for prediction ...')
def compress_file(self, dirpath, zippath):
fzip = zipfile.ZipFile(zippath, 'w', zipfile.ZIP_DEFLATED)
basedir = os.path.dirname(dirpath) + '/'
for root, dirs, files in os.walk(dirpath):
dirname = root.replace(basedir, '')
for f in files:
fzip.write(root + '/' + f, dirname + '/' + f)
fzip.close()
shutil.rmtree(self.model_save_path)
| 41.067568 | 87 | 0.602501 |
13ba6cfcbf7a417e45a97fa1c6e508cb2411260d | 113 | py | Python | agbot/models/states.py | Toffooo/aituio | a4382f2d857cf8a5dd3b44bbc5fa93203c2eec28 | [
"MIT"
] | 1 | 2022-01-18T14:17:11.000Z | 2022-01-18T14:17:11.000Z | agbot/models/states.py | Toffooo/aituio | a4382f2d857cf8a5dd3b44bbc5fa93203c2eec28 | [
"MIT"
] | null | null | null | agbot/models/states.py | Toffooo/aituio | a4382f2d857cf8a5dd3b44bbc5fa93203c2eec28 | [
"MIT"
] | null | null | null | from aiogram.dispatcher.filters.state import State, StatesGroup
class UseState(StatesGroup):
who = State()
| 18.833333 | 63 | 0.769912 |
e317927b3ad6f6c82f03e391a88299190d06dab0 | 2,620 | py | Python | app/modules/labeled_group.py | embian-inc/Appium-Python-Console | bb15ac441d713a0be4a8e6fee61ca54cf6fc7614 | [
"MIT"
] | 8 | 2017-09-25T05:46:53.000Z | 2022-02-04T14:42:57.000Z | app/modules/labeled_group.py | embian-inc/Appium-Python-Console | bb15ac441d713a0be4a8e6fee61ca54cf6fc7614 | [
"MIT"
] | null | null | null | app/modules/labeled_group.py | embian-inc/Appium-Python-Console | bb15ac441d713a0be4a8e6fee61ca54cf6fc7614 | [
"MIT"
] | 2 | 2018-11-28T19:16:20.000Z | 2022-02-04T14:39:15.000Z | # -*- coding: utf-8 -*-
import os,sys
from bs4 import BeautifulSoup
from app.modules.rect import Bounds, Point
from labeled_element import LabeledElement
reload(sys)
sys.setdefaultencoding('utf-8')
class LabeledGroup(LabeledElement):
#group: {'gtype':'selection|sequence', 'pre':[LabeledAction, ...], 'final':[(LabeledAction, {'a': angle, 'p':pos, 'd':distnace}), ...]}
def __init__(self, group):
super(LabeledGroup, self).__init__()
self.group = group
self._init_label_elements()
def _init_label_elements(self):
actions = self.pre_actions
_filter = [Bounds.RELATIVE_POS_LEFT, Bounds.RELATIVE_POS_TOP]
if len(actions) == 1:
_filter.append(self.RELATIVE_POS_EQUAL)
self.label_elements = filter(lambda k: k['pos'] in _filter , actions[0].label_elements)
self.select_label_element()
#override
def select_label_element(self):
if self.label_elements is None or len(self.label_elements) == 0:
return None
# self
self.label_elements.sort(key = lambda k: (k['pos']), reverse=True)
self.label_element = self.label_elements[0]
if self.label_element['pos'] == self.RELATIVE_POS_EQUAL:
if len(self.label_element['text']) > 0 or len(self.label_element['desc']) > 0 or len(self.label_element['rid']) > 0:
return self.label_element
# text or description
self.label_elements.sort(key = lambda k: (k['pos'], k['selected'], -k['dist']), reverse=True)
self.label_element = self._select_first_by(['text', 'desc'], self.label_elements)
if self.label_element is not None:
return self.label_element
# resource-id
self.label_elements.sort(key = lambda k: (k['pos'], k['selected'], -k['dist']), reverse=True)
self.label_element = self._select_first_by(['rid'], self.label_elements)
if self.label_element is not None:
return self.label_element
return None
@property
def bounds(self):
return self.pre_actions[0].bounds
@property
def gtype(self):
return self.group['gtype']
@property
def desc(self):
return "/alias:%s/pos:%s" % (self.alias, self.label_pos)
@property
def pre_actions(self):
return self.group['pre']
@property
def pre_desc(self):
return [ action.no for action in self.pre_actions ]
@property
def final_actions(self):
return self.group['final']
@property
def final_desc(self):
return [ (f[0].no, f[1]) for f in self.final_actions ]
| 32.345679 | 139 | 0.635115 |
cdb283559563caf2cc79f74e7f501f1eda6a2498 | 1,348 | py | Python | load_tests/QueryGuest_Case/add_guests.py | Ezetowers/AppEngine_EventsManagement | 23e496dee161fbe62596f466d3e83e9a88c2f2b4 | [
"MIT"
] | null | null | null | load_tests/QueryGuest_Case/add_guests.py | Ezetowers/AppEngine_EventsManagement | 23e496dee161fbe62596f466d3e83e9a88c2f2b4 | [
"MIT"
] | null | null | null | load_tests/QueryGuest_Case/add_guests.py | Ezetowers/AppEngine_EventsManagement | 23e496dee161fbe62596f466d3e83e9a88c2f2b4 | [
"MIT"
] | null | null | null | from lxml import etree
import sys
REQUEST_BODY_PART_1 = '<![CDATA[actualEvent='
REQUEST_BODY_PART_2 = '&guestName=Phil&guestSurname=Collins&guestCompany=GenesisSRL&guestEmail='
REQUEST_BODY_PART_3 = '&checkDuplicates=false]]>'
CONTENT_TYPE = 'Content-type: application/x-www-form-urlencoded'
def usage():
print "python create_test_case [URL]"\
" [EVENT_NAME] [AMOUNT_CASES] [TEST_CASE_FILENAME]"
def main():
if len(sys.argv) != 5:
usage()
root = etree.Element('testcases')
url = sys.argv[1]
event = sys.argv[2]
amount_cases = int(sys.argv[3])
test_case_filename = sys.argv[4]
for case in range(1,amount_cases):
case_node = etree.Element('case')
etree.SubElement(case_node, 'url').text = url + "/add_guest"
etree.SubElement(case_node, 'method').text = 'POST'
body = REQUEST_BODY_PART_1 + event + REQUEST_BODY_PART_2 + "Email" + str(case) + REQUEST_BODY_PART_3
etree.SubElement(case_node, 'body').text = body
etree.SubElement(case_node, 'add_header').text = CONTENT_TYPE
root.append(case_node)
etree.ElementTree(root).write(test_case_filename,
pretty_print=True,
encoding='iso-8859-1')
# Line to indicate that this is the main
if __name__ == "__main__":
main()
| 32.878049 | 108 | 0.654303 |
9d2ac6bbd7d9ef2580e7d60000631f555a05c17a | 15,463 | py | Python | sdk/python/pulumi_azure_native/web/v20210101/web_app_relay_service_connection.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/web/v20210101/web_app_relay_service_connection.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/web/v20210101/web_app_relay_service_connection.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['WebAppRelayServiceConnectionArgs', 'WebAppRelayServiceConnection']
@pulumi.input_type
class WebAppRelayServiceConnectionArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
biztalk_uri: Optional[pulumi.Input[str]] = None,
entity_connection_string: Optional[pulumi.Input[str]] = None,
entity_name: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
resource_connection_string: Optional[pulumi.Input[str]] = None,
resource_type: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a WebAppRelayServiceConnection resource.
:param pulumi.Input[str] name: Name of the app.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
:param pulumi.Input[str] kind: Kind of resource.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if biztalk_uri is not None:
pulumi.set(__self__, "biztalk_uri", biztalk_uri)
if entity_connection_string is not None:
pulumi.set(__self__, "entity_connection_string", entity_connection_string)
if entity_name is not None:
pulumi.set(__self__, "entity_name", entity_name)
if hostname is not None:
pulumi.set(__self__, "hostname", hostname)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if port is not None:
pulumi.set(__self__, "port", port)
if resource_connection_string is not None:
pulumi.set(__self__, "resource_connection_string", resource_connection_string)
if resource_type is not None:
pulumi.set(__self__, "resource_type", resource_type)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name of the app.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Name of the resource group to which the resource belongs.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="biztalkUri")
def biztalk_uri(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "biztalk_uri")
@biztalk_uri.setter
def biztalk_uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "biztalk_uri", value)
@property
@pulumi.getter(name="entityConnectionString")
def entity_connection_string(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "entity_connection_string")
@entity_connection_string.setter
def entity_connection_string(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "entity_connection_string", value)
@property
@pulumi.getter(name="entityName")
def entity_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "entity_name")
@entity_name.setter
def entity_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "entity_name", value)
@property
@pulumi.getter
def hostname(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "hostname")
@hostname.setter
def hostname(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "hostname", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter(name="resourceConnectionString")
def resource_connection_string(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "resource_connection_string")
@resource_connection_string.setter
def resource_connection_string(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_connection_string", value)
@property
@pulumi.getter(name="resourceType")
def resource_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "resource_type")
@resource_type.setter
def resource_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_type", value)
class WebAppRelayServiceConnection(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
biztalk_uri: Optional[pulumi.Input[str]] = None,
entity_connection_string: Optional[pulumi.Input[str]] = None,
entity_name: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
resource_connection_string: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_type: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Hybrid Connection for an App Service app.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] name: Name of the app.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: WebAppRelayServiceConnectionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Hybrid Connection for an App Service app.
:param str resource_name: The name of the resource.
:param WebAppRelayServiceConnectionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(WebAppRelayServiceConnectionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
biztalk_uri: Optional[pulumi.Input[str]] = None,
entity_connection_string: Optional[pulumi.Input[str]] = None,
entity_name: Optional[pulumi.Input[str]] = None,
hostname: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
resource_connection_string: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_type: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = WebAppRelayServiceConnectionArgs.__new__(WebAppRelayServiceConnectionArgs)
__props__.__dict__["biztalk_uri"] = biztalk_uri
__props__.__dict__["entity_connection_string"] = entity_connection_string
__props__.__dict__["entity_name"] = entity_name
__props__.__dict__["hostname"] = hostname
__props__.__dict__["kind"] = kind
if name is None and not opts.urn:
raise TypeError("Missing required property 'name'")
__props__.__dict__["name"] = name
__props__.__dict__["port"] = port
__props__.__dict__["resource_connection_string"] = resource_connection_string
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["resource_type"] = resource_type
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:web/v20210101:WebAppRelayServiceConnection"), pulumi.Alias(type_="azure-native:web:WebAppRelayServiceConnection"), pulumi.Alias(type_="azure-nextgen:web:WebAppRelayServiceConnection"), pulumi.Alias(type_="azure-native:web/v20150801:WebAppRelayServiceConnection"), pulumi.Alias(type_="azure-nextgen:web/v20150801:WebAppRelayServiceConnection"), pulumi.Alias(type_="azure-native:web/v20160801:WebAppRelayServiceConnection"), pulumi.Alias(type_="azure-nextgen:web/v20160801:WebAppRelayServiceConnection"), pulumi.Alias(type_="azure-native:web/v20180201:WebAppRelayServiceConnection"), pulumi.Alias(type_="azure-nextgen:web/v20180201:WebAppRelayServiceConnection"), pulumi.Alias(type_="azure-native:web/v20181101:WebAppRelayServiceConnection"), pulumi.Alias(type_="azure-nextgen:web/v20181101:WebAppRelayServiceConnection"), pulumi.Alias(type_="azure-native:web/v20190801:WebAppRelayServiceConnection"), pulumi.Alias(type_="azure-nextgen:web/v20190801:WebAppRelayServiceConnection"), pulumi.Alias(type_="azure-native:web/v20200601:WebAppRelayServiceConnection"), pulumi.Alias(type_="azure-nextgen:web/v20200601:WebAppRelayServiceConnection"), pulumi.Alias(type_="azure-native:web/v20200901:WebAppRelayServiceConnection"), pulumi.Alias(type_="azure-nextgen:web/v20200901:WebAppRelayServiceConnection"), pulumi.Alias(type_="azure-native:web/v20201001:WebAppRelayServiceConnection"), pulumi.Alias(type_="azure-nextgen:web/v20201001:WebAppRelayServiceConnection"), pulumi.Alias(type_="azure-native:web/v20201201:WebAppRelayServiceConnection"), pulumi.Alias(type_="azure-nextgen:web/v20201201:WebAppRelayServiceConnection"), pulumi.Alias(type_="azure-native:web/v20210115:WebAppRelayServiceConnection"), pulumi.Alias(type_="azure-nextgen:web/v20210115:WebAppRelayServiceConnection"), pulumi.Alias(type_="azure-native:web/v20210201:WebAppRelayServiceConnection"), pulumi.Alias(type_="azure-nextgen:web/v20210201:WebAppRelayServiceConnection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(WebAppRelayServiceConnection, __self__).__init__(
'azure-native:web/v20210101:WebAppRelayServiceConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'WebAppRelayServiceConnection':
"""
Get an existing WebAppRelayServiceConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = WebAppRelayServiceConnectionArgs.__new__(WebAppRelayServiceConnectionArgs)
__props__.__dict__["biztalk_uri"] = None
__props__.__dict__["entity_connection_string"] = None
__props__.__dict__["entity_name"] = None
__props__.__dict__["hostname"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["name"] = None
__props__.__dict__["port"] = None
__props__.__dict__["resource_connection_string"] = None
__props__.__dict__["resource_type"] = None
__props__.__dict__["type"] = None
return WebAppRelayServiceConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="biztalkUri")
def biztalk_uri(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "biztalk_uri")
@property
@pulumi.getter(name="entityConnectionString")
def entity_connection_string(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "entity_connection_string")
@property
@pulumi.getter(name="entityName")
def entity_name(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "entity_name")
@property
@pulumi.getter
def hostname(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "hostname")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def port(self) -> pulumi.Output[Optional[int]]:
return pulumi.get(self, "port")
@property
@pulumi.getter(name="resourceConnectionString")
def resource_connection_string(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "resource_connection_string")
@property
@pulumi.getter(name="resourceType")
def resource_type(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "resource_type")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
| 46.575301 | 2,021 | 0.670439 |
abde5948bfb9448ac231ad1734c1110428078ef9 | 196 | py | Python | mm_tools/web/models/__init__.py | ceko/mischief-tools | 10618982a08aa765e5e71bb6ac7d4ee6886cb65f | [
"MIT"
] | null | null | null | mm_tools/web/models/__init__.py | ceko/mischief-tools | 10618982a08aa765e5e71bb6ac7d4ee6886cb65f | [
"MIT"
] | 5 | 2021-03-19T02:48:51.000Z | 2021-09-22T18:58:55.000Z | mm_tools/web/models/__init__.py | ceko/mischief-tools | 10618982a08aa765e5e71bb6ac7d4ee6886cb65f | [
"MIT"
] | null | null | null | from django.db import models
from .items import Item, Priority, ItemTier
from .tokens import Token
from .raids import *
from .base import *
from .characters import Character
from .users import *
| 21.777778 | 43 | 0.780612 |
70de64d2c6fad42a1deff1d730cb527188b852a5 | 383 | py | Python | examples/keras/iris_server.py | jacquelinegarrahan/lume-epics | f647107d87c80f3b69eaa5d49de08f212e95428d | [
"BSD-3-Clause-LBNL"
] | 1 | 2020-06-24T02:54:28.000Z | 2020-06-24T02:54:28.000Z | examples/keras/iris_server.py | jacquelinegarrahan/lume-epics | f647107d87c80f3b69eaa5d49de08f212e95428d | [
"BSD-3-Clause-LBNL"
] | 1 | 2021-07-29T03:59:01.000Z | 2021-07-29T03:59:01.000Z | examples/keras/iris_server.py | jacquelinegarrahan/lume-epics | f647107d87c80f3b69eaa5d49de08f212e95428d | [
"BSD-3-Clause-LBNL"
] | 5 | 2020-06-12T21:29:47.000Z | 2021-10-16T22:48:15.000Z |
from lume_epics.epics_server import Server
from lume_model.utils import model_from_yaml
with open("examples/files/iris_config.yml", "r") as f:
model_class, model_kwargs = model_from_yaml(f, load_model=False)
prefix = "test"
server = Server(
model_class,
prefix,
model_kwargs=model_kwargs
)
# monitor = False does not loop in main thread
server.start(monitor=True) | 23.9375 | 68 | 0.759791 |
f8d1a7c2ec9a5d79795c58aa579b89c5d0213001 | 794 | py | Python | bts/utilities/octoBTS/dataBaseClass.py | ellisgeek/Burn-the-Subs | c30c41b6bdba573f906c36d367547c7305017b4f | [
"MIT"
] | 10 | 2018-09-25T18:05:41.000Z | 2020-05-05T21:12:22.000Z | bts/utilities/octoBTS/dataBaseClass.py | ellisgeek/Burn-the-Subs | c30c41b6bdba573f906c36d367547c7305017b4f | [
"MIT"
] | 1 | 2018-12-29T19:36:21.000Z | 2018-12-29T19:36:21.000Z | bts/utilities/octoBTS/dataBaseClass.py | ellisgeek/Burn-the-Subs | c30c41b6bdba573f906c36d367547c7305017b4f | [
"MIT"
] | 6 | 2018-12-29T00:59:22.000Z | 2021-09-15T17:12:01.000Z | # db shit goes here
# so if we want to change anything on the fly
# we don't have to edit like 7 programs.
# this is from: with help from: cordain99 etc
# status = 0: name entered 1: name placed 2:gcode ready 3: burnt to board
from peewee import *
from playhouse.sqliteq import SqliteQueueDatabase
from . import settings
dbFile = settings.dataBaseFile
db = SqliteQueueDatabase(dbFile)
entered = settings.nameEntered
#db.atomic('IMMEDIATE')
class Sub(Model):
status = IntegerField(default = entered)
userName = CharField()
fontSize = IntegerField(default = 0)
positionX = IntegerField(default = 0)
positionY = IntegerField(default = 0)
gCode = CharField(default = "")
entryTime = DateTimeField()
class Meta:
database = db
db.create_tables([Sub]) | 27.37931 | 73 | 0.714106 |
31e4514253566355c82c69780f0a4369f03a460b | 73 | py | Python | combine.py | kw0nta/pyledjr | b3f4c5f727defa025b78cd5cdd9f2cb134559e4c | [
"MIT"
] | 1 | 2019-08-11T03:48:48.000Z | 2019-08-11T03:48:48.000Z | combine.py | kw0nta/pyledjr | b3f4c5f727defa025b78cd5cdd9f2cb134559e4c | [
"MIT"
] | null | null | null | combine.py | kw0nta/pyledjr | b3f4c5f727defa025b78cd5cdd9f2cb134559e4c | [
"MIT"
] | null | null | null | """
Combine individual cleaned bank account data into a single file.
"""
| 18.25 | 64 | 0.739726 |
3dab07d52ad2a6d28440ce56d7ee97244aca53fd | 1,300 | py | Python | test.py | rapsealk/yolov3-tf2 | 4e5381a5c8ceda28668e0f144e58638ee64d9187 | [
"MIT"
] | null | null | null | test.py | rapsealk/yolov3-tf2 | 4e5381a5c8ceda28668e0f144e58638ee64d9187 | [
"MIT"
] | null | null | null | test.py | rapsealk/yolov3-tf2 | 4e5381a5c8ceda28668e0f144e58638ee64d9187 | [
"MIT"
] | null | null | null | #!/usr/local/bin/python3
# -*- coding: utf-8 -*-
import unittest
import os
import io
import time
import tensorflow as tf
import numpy as np
from PIL import ImageGrab
shape = (1080, 1920, 3)
class TensorFlowImageTestCase(unittest.TestCase):
def test_pillow_image(self):
ticks = []
for _ in range(10):
perf_time = time.perf_counter()
bytesio = io.BytesIO()
ImageGrab.grab().save(bytesio, format='PNG')
image_raw = tf.image.decode_image(bytesio.getvalue(), channels=3)
self.assertTrue(image_raw.shape == shape)
self.assertTrue(image_raw.dtype == tf.uint8)
tick = time.perf_counter() - perf_time
ticks.append(tick)
print('tick.mean:', np.mean(ticks))
self.assertTrue(np.mean(ticks) < 0.2)
def test_bytes_image(self):
perf_time = time.perf_counter()
path = os.path.join(os.path.dirname(__file__), '..', 'workspace', 'data', '000.png')
image_raw = tf.image.decode_image(open(path, 'rb').read(), channels=3)
self.assertTrue(image_raw.shape == shape)
self.assertTrue(image_raw.dtype == tf.uint8)
tick = time.perf_counter() - perf_time
self.assertTrue(tick < 1)
if __name__ == "__main__":
unittest.main()
| 30.232558 | 92 | 0.625385 |
5f89e6c0ee1155767632d3e5269e846be578789a | 6,104 | py | Python | fairseq/distributed/legacy_distributed_data_parallel.py | MANGA-UOFA/NAUS | 8c0c0815a280d0661adf588302848c7f1ecc56da | [
"MIT"
] | 8 | 2022-03-19T15:20:10.000Z | 2022-03-30T20:29:25.000Z | fairseq/distributed/legacy_distributed_data_parallel.py | MANGA-UOFA/NAUS | 8c0c0815a280d0661adf588302848c7f1ecc56da | [
"MIT"
] | null | null | null | fairseq/distributed/legacy_distributed_data_parallel.py | MANGA-UOFA/NAUS | 8c0c0815a280d0661adf588302848c7f1ecc56da | [
"MIT"
] | 2 | 2022-03-18T14:42:41.000Z | 2022-03-19T15:23:20.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
A modified version of the legacy DistributedDataParallel module that uses c10d
communication primitives. This version is simpler than the latest PyTorch
version and is useful for debugging. Notably it does not overlap gradient
communication with the backward pass, which makes it slower but more robust
than the PyTorch version.
This version also supports the *no_sync* context manager, which allows faster
training with `--update-freq`.
"""
from collections import OrderedDict
from contextlib import contextmanager
import torch
from torch import nn
from fairseq.distributed import utils
class LegacyDistributedDataParallel(nn.Module):
"""Implements distributed data parallelism at the module level.
A simplified version of :class:`torch.nn.parallel.DistributedDataParallel`.
This version uses a c10d process group for communication and does not
broadcast buffers.
Args:
module (~torch.nn.Module): module to be parallelized
process_group: the c10d process group to be used for distributed data
parallel all-reduction.
buffer_size (int, optional): number of elements to buffer before
performing all-reduce (default: 256M).
"""
def __init__(self, module, process_group, buffer_size=2 ** 28):
super().__init__()
self.module = module
self.process_group = process_group
self.world_size = utils.get_world_size(self.process_group)
# Never use a bigger buffer than the number of model params
self.buffer_size = min(buffer_size, sum(p.numel() for p in module.parameters()))
self.buffer = None
# We can also forcibly accumulate grads locally and only do the
# all-reduce at some later time
self.accumulate_grads = False
# make per-device lists of parameters
paramlists = OrderedDict()
for param in self.module.parameters():
device = param.device
if paramlists.get(device) is None:
paramlists[device] = []
paramlists[device] += [param]
self.per_device_params = list(paramlists.values())
@contextmanager
def no_sync(self):
"""A context manager to disable gradient synchronization."""
old_accumulate_grads = self.accumulate_grads
self.accumulate_grads = True
yield
self.accumulate_grads = old_accumulate_grads
def forward(self, *inputs, **kwargs):
return self.module(*inputs, **kwargs)
def all_reduce_grads(self):
"""
This function must be called explicitly after backward to reduce
gradients. There is no automatic hook like c10d.
"""
def all_reduce_params(params):
buffer = self.buffer
nonzero_buffer = False
if len(params) > 1:
offset = 0
for p in params:
sz = p.numel()
if p.grad is not None:
buffer[offset : offset + sz].copy_(p.grad.data.view(-1))
nonzero_buffer = True
else:
buffer[offset : offset + sz].zero_()
offset += sz
else:
# we only have a single grad to all-reduce
p = params[0]
if p.grad is not None:
buffer = p.grad.data
nonzero_buffer = True
elif p.numel() <= self.buffer.numel():
buffer = buffer[: p.numel()]
buffer.zero_()
else:
buffer = torch.zeros_like(p)
if nonzero_buffer:
buffer.div_(self.world_size)
utils.all_reduce(buffer, self.process_group)
# copy all-reduced grads back into their original place
offset = 0
for p in params:
sz = p.numel()
if p.grad is not None:
p.grad.data.copy_(buffer[offset : offset + sz].view_as(p))
else:
p.grad = buffer[offset : offset + sz].view_as(p).clone()
offset += sz
def reduction_fn():
# This function only needs to be called once
if self.accumulate_grads:
return
if self.buffer is None:
self.buffer = next(self.module.parameters()).new(self.buffer_size)
for params in self.per_device_params:
# All-reduce the gradients in buckets
offset = 0
buffered_params = []
for param in params:
if not param.requires_grad:
continue
if param.grad is None:
param.grad = torch.zeros_like(param)
if hasattr(param, "expert"):
# Skip gradient sync for unshared parameters
continue
if param.grad.requires_grad:
raise RuntimeError(
"DistributedDataParallel only works "
"with gradients that don't require "
"grad"
)
sz = param.numel()
if sz > self.buffer.numel():
# all-reduce big params directly
all_reduce_params([param])
else:
if offset + sz > self.buffer.numel():
all_reduce_params(buffered_params)
offset = 0
buffered_params.clear()
buffered_params.append(param)
offset += sz
if len(buffered_params) > 0:
all_reduce_params(buffered_params)
reduction_fn()
| 36.771084 | 88 | 0.55652 |
fc642e7cae179e800da97f7f9df4799650624163 | 2,535 | py | Python | Moon_Data/source/predict.py | ACasey13/udacity-ML-case-studies | ebdbe44786403d3373779d576a00f552754cf15d | [
"MIT"
] | null | null | null | Moon_Data/source/predict.py | ACasey13/udacity-ML-case-studies | ebdbe44786403d3373779d576a00f552754cf15d | [
"MIT"
] | null | null | null | Moon_Data/source/predict.py | ACasey13/udacity-ML-case-studies | ebdbe44786403d3373779d576a00f552754cf15d | [
"MIT"
] | null | null | null | import argparse
import os
import sys
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
from io import StringIO
from six import BytesIO
# import model
from model import SimpleNet
# accepts and returns numpy data
CONTENT_TYPE = 'application/x-npy'
def model_fn(model_dir):
"""Load the PyTorch model from the `model_dir` directory."""
print("Loading model.")
# First, load the parameters used to create the model.
model_info = {}
model_info_path = os.path.join(model_dir, 'model_info.pth')
with open(model_info_path, 'rb') as f:
model_info = torch.load(f)
print("model_info: {}".format(model_info))
# Determine the device and construct the model.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = SimpleNet(model_info['input_dim'], model_info['hidden_dim'],
model_info['num_hidden'], model_info['output_dim'])
# Load the stored model parameters.
model_path = os.path.join(model_dir, 'model.pth')
with open(model_path, 'rb') as f:
model.load_state_dict(torch.load(f))
# prep for testing
model.to(device).eval()
print("Done loading model.")
return model
def input_fn(serialized_input_data, content_type):
print('Deserializing the input data.')
if content_type == CONTENT_TYPE:
stream = BytesIO(serialized_input_data)
return np.load(stream)
raise Exception('Requested unsupported ContentType in content_type: ' + content_type)
def output_fn(prediction_output, accept):
print('Serializing the generated output.')
if accept == CONTENT_TYPE:
buffer = BytesIO()
np.save(buffer, prediction_output)
return buffer.getvalue(), accept
raise Exception('Requested unsupported ContentType in Accept: ' + accept)
def predict_fn(input_data, model):
print('Predicting class labels for the input data...')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Process input_data so that it is ready to be sent to our model
# convert data to numpy array then to Tensor
data = torch.from_numpy(input_data.astype('float32'))
data = data.to(device)
# Put model into evaluation mode
model.eval()
# Compute the result of applying the model to the input data.
out = model(data)
# The variable `result` should be a numpy array; a single value 0-1
result = out.cpu().detach().numpy()
return result | 30.914634 | 89 | 0.696252 |
0305ca99d4af997bf57b71067468ac537b615655 | 10,927 | py | Python | explain.py | IPL-UV/LatentGranger | 78d25621f94f338c3c1957a216679c94b2d9b764 | [
"BSD-3-Clause"
] | null | null | null | explain.py | IPL-UV/LatentGranger | 78d25621f94f338c3c1957a216679c94b2d9b764 | [
"BSD-3-Clause"
] | null | null | null | explain.py | IPL-UV/LatentGranger | 78d25621f94f338c3c1957a216679c94b2d9b764 | [
"BSD-3-Clause"
] | 1 | 2021-08-09T09:58:19.000Z | 2021-08-09T09:58:19.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# BSD 3-Clause License (see LICENSE file)
# Copyright (c) Image and Signaling Process Group (ISP) IPL-UV 2021
# All rights reserved.
"""
Explain latent space of LatentGranger
"""
import os
import git
import numpy as np
import argparse, yaml
from datetime import datetime
import netCDF4 as nc
from natsort import natsorted
import torch
import pytorch_lightning as pl
from PIL import Image
import loaders
# Model
import archs
from losses import lag_cor
from losses import granger_loss, granger_simple_loss
from utils import *
# PyTorch Captum for XAI
from captum.attr import IntegratedGradients
from captum.attr import LayerConductance, LayerIntegratedGradients
from captum.attr import NeuronConductance, NeuronIntegratedGradients
# ArgParse
parser = argparse.ArgumentParser(description="ArgParse")
parser.add_argument('--arch', default='vae', type=str,
help='arch name (default: vae)')
parser.add_argument('-d', '--data', default='toy', type=str,
help='database name (default: toy)')
parser.add_argument('--loader', default='base', type=str,
help='loaders name (default: base) associated to a config file in configs/loaders/')
parser.add_argument('--dir', default='experiment', type=str,
help='path to experiment folder')
parser.add_argument('-c','--checkpoint', default='last.ckpt', type=str,
help='checkpoint (default: last)')
parser.add_argument('-t','--timestamp', default='', type=str,
help='timestampt (default:)')
parser.add_argument('--train', action = 'store_true',
help='use trainig data')
parser.add_argument('--val', action = 'store_true',
help='use val data')
parser.add_argument('--save', action = 'store_true',
help='save images')
parser.add_argument('--grad', action = 'store_true',
help='compute average gradient')
parser.add_argument('--extract', action = 'store_true',
help='extract latent series')
parser.add_argument('--nig', action = 'store_true',
help='run NIG')
parser.add_argument('--latint', action = 'store_true',
help='run LATENT INTERVENTIONS')
parser.add_argument('--idx', type=int, default = 0,
help='index of reconstruction to plot')
args = parser.parse_args()
log_root = os.path.join(args.dir, 'logs', args.data, args.arch)
check_root = os.path.join(args.dir, 'checkpoints', args.data, args.arch)
print(check_root)
allchckpts = natsorted(
[
fname
for fname in os.listdir(check_root)
],
)
if args.timestamp == '':
chosen = allchckpts[-1]
else:
### search the closer one
### .... to do ...
#for timestamp in timestamps:
dt = datetime.fromisoformat(args.timestamp)
chosen = min(allchckpts,key=lambda x : abs(datetime.fromisoformat(x) - dt))
checkpoint = os.path.join(check_root, chosen, args.checkpoint)
print('chosen checkpoint: ' + checkpoint)
with open(f'configs/archs/{args.arch}.yaml') as file:
# The FullLoader parameter handles the conversion from YAML
# scalar values to Python dictionary format
arch_config = yaml.load(file, Loader=yaml.FullLoader)
## define the model and load the checkpoint
model = getattr(archs, arch_config['class']).load_from_checkpoint(checkpoint)
print("model loaded with chosen checkpoint")
################### print model info #####################
print(model)
print(f'gamma: {model.gamma}, maxlag: {model.lag}')
#################### load data #########################
with open(f'configs/loaders/{args.loader}.yaml') as file:
# The FullLoader parameter handles the conversion from YAML
# scalar values to Python dictionary format
loader_config = yaml.load(file, Loader=yaml.FullLoader)
with open(f'configs/data/{args.data}.yaml') as file:
# The FullLoader parameter handles the conversion from YAML
# scalar values to Python dictionary format
data_config = yaml.load(file, Loader=yaml.FullLoader)
# Build data module
datamodule_class = getattr(loaders, loader_config['class'])
datamodule = datamodule_class(loader_config, data_config, arch_config['processing_mode'])
##################### here we can do inference, plot ##############
if args.train:
data = datamodule.data_train
elif args.val:
data = datamodule.data_val
else:
data = datamodule.data_test
savedir = os.path.join('viz', chosen)
os.makedirs(savedir, exist_ok = True)
x, target = data.getAll()
x = torch.reshape(x, (1,) + x.shape)
x.requires_grad_()
target = torch.reshape(target, (1,) + target.shape)
model.eval()
x_out, latent, mu, sigma = model(x)
for j in range(latent.shape[-1]):
corr = lag_cor(latent[:,:,j], target, lag = model.lag)
print(f'lagged correlation with target of latent {j}: {corr}')
gloss = granger_loss(latent, target, maxlag = model.lag, idx=j)
print(f'granger losss of latent {j}: {gloss}')
if args.save:
svpth = os.path.join(savedir, f'{chosen}_latents.tiff')
plot_latent(mu[:,:], target, svpth)
else:
plot_latent(mu[:,0:1], target)
### prepare arrays
if hasattr(data, 'mask'):
mask = data.mask
avg = np.zeros(data.mask.shape + (latent.shape[-1],), dtype = float)
imout = np.zeros(data.mask.shape + (3,), dtype = float)
else:
avg = np.zeros(data.input_size + (latent.shape[-1],), dtype = float)
imout = np.zeros(data.input_size + (3,), dtype = float)
mask = avg[:,:,0] == 0
tpb = model.tpb
if arch_config['processing_mode']== 'flat':
imout[mask, 0] = x.detach().numpy()[0, args.idx, :]
imout[mask, 1] = x_out.detach().numpy()[0, args.idx, :]
imout[mask, 2] = (imout[mask, 0] - imout[mask, 1])**2
else:
imout[:, :, 0] = x.detach().numpy()[0, args.idx, :, :, 0]
imout[:, :, 1] = x_out.detach().numpy()[0, args.idx, :, :, 0]
imout[:, :, 2] = (imout[:, :, 0] - imout[:, :, 1])**2
if args.grad:
for j in range(latent.shape[-1]):
grad = np.zeros(x.shape[1:])
for i in range(latent.shape[1]):
mu[i,j].backward(retain_graph = True)
grad[i,:] += np.abs(x.grad.numpy()[0,i,:])
#grad[i,:] += x.grad.numpy()[0,i,:]
x.grad.fill_(0.0)
avg[:,:,j][mask] = np.mean(grad, 0)
#avg[:,:,j][mask] = np.amax(grad, 0)
#avg[:,:,j] = grad.mean(0)[:,:,0]
if args.save:
img = Image.fromarray(imout[:,:,1])
svpth = os.path.join(savedir, f'{chosen}_reconstruction_lag={model.lag}.tiff')
img.save(svpth)
for j in range(latent.shape[-1]):
img = Image.fromarray(avg[:,:,j])
svpth = os.path.join(savedir, f'{chosen}_grad_avg_latent{j}_lag={model.lag}.tiff')
img.save(svpth)
else:
plot_output(imout)
if args.grad:
plot_output(avg)
if args.extract:
## save latent
np.savetxt(os.path.join(savedir, f'{chosen}_latents.csv'), mu.detach().numpy())
## save target
np.savetxt(os.path.join(savedir, f'{chosen}_target.csv'), target.detach().numpy()[0,:])
if args.nig:
#baseline = np.zeros(x.shape, dtype="float32")
#baseline = torch.Tensor(baseline)
baseline = torch.mean(x[0,:,:], 0).expand(x.shape)
nig = NeuronIntegratedGradients(model, model.mu_layer, multiply_by_inputs = True)
for j in range(latent.shape[-1]):
avg_nig = np.zeros(x.shape[-1], dtype="float32")
# Baseline for Integrated Gradients
# Zeros (default)
# 1) NeuronIntegratedGradients, to see, for each latent representation,
# the attribution to each of the input spatial locations in features (e.g. NDVI)
attr_maps = nig.attribute(x,(j,), baselines=baseline, internal_batch_size=1)
attr_maps = attr_maps
os.makedirs(os.path.join(savedir, f'nig{j}'), exist_ok = True)
for i in range(attr_maps.shape[1]):
imgarray = np.zeros(mask.shape)
avg_nig += np.abs(attr_maps.detach().numpy()[0,i,:])
imgarray[mask] = np.abs(attr_maps.detach().numpy()[0,i,:])
img = Image.fromarray(imgarray)
img.save(os.path.join(savedir, f'nig{j}', f'{i}_.tiff'))
avg_nig = avg_nig / attr_maps.shape[1]
imgarray = np.zeros(mask.shape)
imgarray[mask] = avg_nig
img = Image.fromarray(imgarray)
img.save(os.path.join(savedir, f'avg_nig_{j}_.tiff'))
if args.latint:
avg.fill(0) ##clean avg array
avg_max = avg.copy()
avg_min = avg.copy()
avg_plus = avg.copy()
avg_minus = avg.copy()
latent_max = torch.amax(mu, 0)
latent_min = torch.amin(mu, 0)
std, m = torch.std_mean(mu, 0)
base = model.decoder(torch.zeros(mu.shape))
for j in range(mu.shape[1]):
latent_int_max = mu.clone()
latent_int_max[:,j] = latent_max[j]
latent_int_min = mu.clone()
latent_int_min[:,j] = latent_min[j]
latent_int_plus = mu.clone()
latent_int_plus[:,j] += 1 * sigma[:,j]
latent_int_minus = mu.clone()
latent_int_minus[:,j] -= 1 * sigma[:,j]
out_int_max = model.decoder(latent_int_max)
out_int_min = model.decoder(latent_int_min)
out_int_plus = model.decoder(latent_int_plus)
out_int_minus = model.decoder(latent_int_minus)
diff_int_max = (out_int_max - x_out[0,:,:]) #/ (x_out[0,:,:] + 0.0001)
diff_int_min = (out_int_min - x_out[0,:,:]) #/ (x_out[0,:,:] + 0.0001)
diff_int_plus = (out_int_plus - x_out[0,:,:]) #/ (x_out[0,:,:] + 0.0001)
diff_int_minus = (out_int_minus - x_out[0,:,:]) #/ (x_out[0,:,:] + 0.0001)
avg_max[:,:,j][mask] = np.mean(diff_int_max.detach().numpy(), 0)
avg_min[:,:,j][mask] = np.mean(diff_int_min.detach().numpy(), 0)
avg_plus[:,:,j][mask] = np.mean(diff_int_plus.detach().numpy(), 0)
avg_minus[:,:,j][mask] = np.mean(diff_int_minus.detach().numpy(), 0)
if args.save:
for j in range(latent.shape[-1]):
img_max = Image.fromarray(avg_max[:,:,j])
img_min = Image.fromarray(avg_min[:,:,j])
img_plus = Image.fromarray(avg_plus[:,:,j])
img_minus = Image.fromarray(avg_minus[:,:,j])
svpth = os.path.join(savedir, f'{chosen}_latint_max_avg_latent{j}_lag={model.lag}.tiff')
img_max.save(svpth)
svpth = os.path.join(savedir, f'{chosen}_latint_min_avg_latent{j}_lag={model.lag}.tiff')
img_min.save(svpth)
svpth = os.path.join(savedir, f'{chosen}_latint_plus_avg_latent{j}_lag={model.lag}.tiff')
img_plus.save(svpth)
svpth = os.path.join(savedir, f'{chosen}_latint_minus_avg_latent{j}_lag={model.lag}.tiff')
img_minus.save(svpth)
else:
plot_output(avg_max)
plot_output(avg_min)
plot_output(avg_plus)
plot_output(avg_minus)
| 35.477273 | 102 | 0.623867 |
a440664956723eab08fdbc275f38fa9b0ebae042 | 10,816 | py | Python | airbyte-integrations/bases/base-normalization/integration_tests/dbt_integration_test.py | ashis-Nayak-13/airbyte | e699f397401c509286fd29f2678e9ee860f001a5 | [
"MIT"
] | 2 | 2021-03-02T09:17:41.000Z | 2021-03-02T11:02:23.000Z | airbyte-integrations/bases/base-normalization/integration_tests/dbt_integration_test.py | ashis-Nayak-13/airbyte | e699f397401c509286fd29f2678e9ee860f001a5 | [
"MIT"
] | 4 | 2021-06-10T13:30:31.000Z | 2021-06-10T14:03:22.000Z | airbyte-integrations/bases/base-normalization/integration_tests/dbt_integration_test.py | keu/airbyte | 0bfd83052887e1ca72ddfdb339c62d2e39b050bc | [
"MIT"
] | 1 | 2021-07-02T15:08:53.000Z | 2021-07-02T15:08:53.000Z | #
# MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import json
import os
import random
import re
import shutil
import socket
import string
import subprocess
import sys
import threading
from typing import Any, Dict, List
from normalization.destination_type import DestinationType
from normalization.transform_config.transform import TransformConfig
class DbtIntegrationTest(object):
def __init__(self):
self.target_schema = "test_normalization"
self.container_name = "test_normalization_db_" + self.random_string(3)
@staticmethod
def random_string(length: int) -> str:
return "".join(random.choice(string.ascii_lowercase) for i in range(length))
def setup_postgres_db(self):
print("Starting localhost postgres container for tests")
port = self.find_free_port()
config = {
"host": "localhost",
"username": "integration-tests",
"password": "integration-tests",
"port": port,
"database": "postgres",
"schema": self.target_schema,
}
commands = [
"docker",
"run",
"--rm",
"--name",
f"{self.container_name}",
"-e",
f"POSTGRES_USER={config['username']}",
"-e",
f"POSTGRES_PASSWORD={config['password']}",
"-p",
f"{config['port']}:5432",
"-d",
"postgres",
]
print("Executing: ", " ".join(commands))
subprocess.call(commands)
if not os.path.exists("../secrets"):
os.makedirs("../secrets")
with open("../secrets/postgres.json", "w") as fh:
fh.write(json.dumps(config))
@staticmethod
def find_free_port():
"""
Find an unused port to create a database listening on localhost to run destination-postgres
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
addr = s.getsockname()
s.close()
return addr[1]
def tear_down_postgres_db(self):
print("Stopping localhost postgres container for tests")
try:
subprocess.call(["docker", "kill", f"{self.container_name}"])
except Exception as e:
print(f"WARN: Exception while shutting down postgres db: {e}")
@staticmethod
def change_current_test_dir(request):
# This makes the test run whether it is executed from the tests folder (with pytest/gradle)
# or from the base-normalization folder (through pycharm)
integration_tests_dir = os.path.join(request.fspath.dirname, "integration_tests")
if os.path.exists(integration_tests_dir):
os.chdir(integration_tests_dir)
else:
os.chdir(request.fspath.dirname)
def generate_profile_yaml_file(self, destination_type: DestinationType, test_root_dir: str) -> Dict[str, Any]:
"""
Each destination requires different settings to connect to. This step generates the adequate profiles.yml
as described here: https://docs.getdbt.com/reference/profiles.yml
"""
config_generator = TransformConfig()
profiles_config = config_generator.read_json_config(f"../secrets/{destination_type.value.lower()}.json")
# Adapt credential file to look like destination config.json
if destination_type.value == DestinationType.BIGQUERY.value:
profiles_config["credentials_json"] = json.dumps(profiles_config)
profiles_config["dataset_id"] = self.target_schema
else:
profiles_config["schema"] = self.target_schema
profiles_yaml = config_generator.transform(destination_type, profiles_config)
config_generator.write_yaml_config(test_root_dir, profiles_yaml)
return profiles_config
@staticmethod
def run_destination_process(message_file: str, test_root_dir: str, commands: List[str]):
print("Executing: ", " ".join(commands))
with open(os.path.join(test_root_dir, "destination_output.log"), "ab") as f:
process = subprocess.Popen(commands, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def writer():
if os.path.exists(message_file):
with open(message_file, "rb") as input_data:
while True:
line = input_data.readline()
if not line:
break
process.stdin.write(line)
process.stdin.close()
thread = threading.Thread(target=writer)
thread.start()
for line in iter(process.stdout.readline, b""):
f.write(line)
sys.stdout.write(line.decode("utf-8"))
thread.join()
process.wait()
return process.returncode == 0
def dbt_run(self, test_root_dir: str):
"""
Run the dbt CLI to perform transformations on the test raw data in the destination
"""
# Perform sanity check on dbt project settings
assert self.run_check_dbt_command("debug", test_root_dir)
assert self.run_check_dbt_command("deps", test_root_dir)
final_sql_files = os.path.join(test_root_dir, "final")
shutil.rmtree(final_sql_files, ignore_errors=True)
# Compile dbt models files into destination sql dialect, then run the transformation queries
assert self.run_check_dbt_command("run", test_root_dir)
@staticmethod
def run_check_dbt_command(command: str, cwd: str) -> bool:
"""
Run dbt subprocess while checking and counting for "ERROR", "FAIL" or "WARNING" printed in its outputs
"""
error_count = 0
commands = [
"docker",
"run",
"--rm",
"--init",
"-v",
f"{cwd}:/workspace",
"-v",
f"{cwd}/build:/build",
"-v",
f"{cwd}/final:/build/run/airbyte_utils/models/generated",
"-v",
"/tmp:/tmp",
"--network",
"host",
"--entrypoint",
"/usr/local/bin/dbt",
"-i",
"airbyte/normalization:dev",
command,
"--profiles-dir=/workspace",
"--project-dir=/workspace",
]
print("Executing: ", " ".join(commands))
print(f"Equivalent to: dbt {command} --profiles-dir={cwd} --project-dir={cwd}")
with open(os.path.join(cwd, "dbt_output.log"), "ab") as f:
process = subprocess.Popen(commands, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=os.environ)
for line in iter(lambda: process.stdout.readline(), b""):
f.write(line)
str_line = line.decode("utf-8")
sys.stdout.write(str_line)
# keywords to match lines as signaling errors
if "ERROR" in str_line or "FAIL" in str_line or "WARNING" in str_line:
# exception keywords in lines to ignore as errors (such as summary or expected warnings)
is_exception = False
for except_clause in [
"Done.", # DBT Summary
"PASS=", # DBT Summary
"Nothing to do.", # When no schema/data tests are setup
"Configuration paths exist in your dbt_project.yml", # When no cte / view are generated
]:
if except_clause in str_line:
is_exception = True
break
if not is_exception:
# count lines signaling an error/failure/warning
error_count += 1
process.wait()
message = (
f"{' '.join(commands)}\n\tterminated with return code {process.returncode} "
f"with {error_count} 'Error/Warning/Fail' mention(s)."
)
print(message)
assert error_count == 0, message
assert process.returncode == 0, message
if error_count > 0:
return False
return process.returncode == 0
@staticmethod
def copy_replace(src, dst, pattern=None, replace_value=None):
"""
Copies a file from src to dst replacing pattern by replace_value
Parameters
----------
src : string
Path to the source filename to copy from
dst : string
Path to the output filename to copy to
pattern
list of Patterns to replace inside the src file
replace_value
list of Values to replace by in the dst file
"""
file1 = open(src, "r") if isinstance(src, str) else src
file2 = open(dst, "w") if isinstance(dst, str) else dst
pattern = [pattern] if isinstance(pattern, str) else pattern
replace_value = [replace_value] if isinstance(replace_value, str) else replace_value
if replace_value and pattern:
if len(replace_value) != len(pattern):
raise Exception("Invalid parameters: pattern and replace_value" " have different sizes.")
rules = [(re.compile(regex, re.IGNORECASE), value) for regex, value in zip(pattern, replace_value)]
else:
rules = []
for line in file1:
if rules:
for rule in rules:
line = re.sub(rule[0], rule[1], line)
file2.write(line)
if isinstance(src, str):
file1.close()
if isinstance(dst, str):
file2.close()
| 40.815094 | 123 | 0.596801 |
e4ca5a98263def49e8fe87b37ea0cf66a3890236 | 6,038 | py | Python | test/part_manager_oop.py | yassineazrak/app_desktop_sraping_web | 5bc61a452deaeedf7526e6811de82f76311ed85a | [
"MIT"
] | 136 | 2019-11-07T14:08:45.000Z | 2022-03-31T23:56:12.000Z | test/part_manager_oop.py | yassineazrak/app_desktop_sraping_web | 5bc61a452deaeedf7526e6811de82f76311ed85a | [
"MIT"
] | 4 | 2019-11-08T12:39:07.000Z | 2021-07-04T16:12:57.000Z | test/part_manager_oop.py | yassineazrak/app_desktop_sraping_web | 5bc61a452deaeedf7526e6811de82f76311ed85a | [
"MIT"
] | 98 | 2019-11-07T18:09:52.000Z | 2022-03-28T16:39:06.000Z | import tkinter as tk
from tkinter import messagebox
from db import Database
# Instanciate databse object
db = Database('store.db')
# Main Application/GUI class
class Application(tk.Frame):
def __init__(self, master):
super().__init__(master)
self.master = master
master.title('Part Manager')
# Width height
master.geometry("700x350")
# Create widgets/grid
self.create_widgets()
# Init selected item var
self.selected_item = 0
# Populate initial list
self.populate_list()
def create_widgets(self):
# Part
self.part_text = tk.StringVar()
self.part_label = tk.Label(
self.master, text='Part Name', font=('bold', 14), pady=20)
self.part_label.grid(row=0, column=0, sticky=tk.W)
self.part_entry = tk.Entry(self.master, textvariable=self.part_text)
self.part_entry.grid(row=0, column=1)
# Customer
self.customer_text = tk.StringVar()
self.customer_label = tk.Label(
self.master, text='Customer', font=('bold', 14))
self.customer_label.grid(row=0, column=2, sticky=tk.W)
self.customer_entry = tk.Entry(
self.master, textvariable=self.customer_text)
self.customer_entry.grid(row=0, column=3)
# Retailer
self.retailer_text = tk.StringVar()
self.retailer_label = tk.Label(
self.master, text='Retailer', font=('bold', 14))
self.retailer_label.grid(row=1, column=0, sticky=tk.W)
self.retailer_entry = tk.Entry(
self.master, textvariable=self.retailer_text)
self.retailer_entry.grid(row=1, column=1)
# Price
self.price_text = tk.StringVar()
self.price_label = tk.Label(
self.master, text='Price', font=('bold', 14))
self.price_label.grid(row=1, column=2, sticky=tk.W)
self.price_entry = tk.Entry(self.master, textvariable=self.price_text)
self.price_entry.grid(row=1, column=3)
# Parts list (listbox)
self.parts_list = tk.Listbox(self.master, height=8, width=50, border=0)
self.parts_list.grid(row=3, column=0, columnspan=3,
rowspan=6, pady=20, padx=20)
# Create scrollbar
self.scrollbar = tk.Scrollbar(self.master)
self.scrollbar.grid(row=3, column=3)
# Set scrollbar to parts
self.parts_list.configure(yscrollcommand=self.scrollbar.set)
self.scrollbar.configure(command=self.parts_list.yview)
# Bind select
self.parts_list.bind('<<ListboxSelect>>', self.select_item)
# Buttons
self.add_btn = tk.Button(
self.master, text="Add Part", width=12, command=self.add_item)
self.add_btn.grid(row=2, column=0, pady=20)
self.remove_btn = tk.Button(
self.master, text="Remove Part", width=12, command=self.remove_item)
self.remove_btn.grid(row=2, column=1)
self.update_btn = tk.Button(
self.master, text="Update Part", width=12, command=self.update_item)
self.update_btn.grid(row=2, column=2)
self.exit_btn = tk.Button(
self.master, text="Clear Input", width=12, command=self.clear_text)
self.exit_btn.grid(row=2, column=3)
def populate_list(self):
# Delete items before update. So when you keep pressing it doesnt keep getting (show example by calling this twice)
self.parts_list.delete(0, tk.END)
# Loop through records
for row in db.fetch():
# Insert into list
self.parts_list.insert(tk.END, row)
# Add new item
def add_item(self):
if self.part_text.get() == '' or self.customer_text.get() == '' or self.retailer_text.get() == '' or self.price_text.get() == '':
messagebox.showerror(
"Required Fields", "Please include all fields")
return
print(self.part_text.get())
# Insert into DB
db.insert(self.part_text.get(), self.customer_text.get(),
self.retailer_text.get(), self.price_text.get())
# Clear list
self.parts_list.delete(0, tk.END)
# Insert into list
self.parts_list.insert(tk.END, (self.part_text.get(), self.customer_text.get(
), self.retailer_text.get(), self.price_text.get()))
self.clear_text()
self.populate_list()
# Runs when item is selected
def select_item(self, event):
# # Create global selected item to use in other functions
# global self.selected_item
try:
# Get index
index = self.parts_list.curselection()[0]
# Get selected item
self.selected_item = self.parts_list.get(index)
# print(selected_item) # Print tuple
# Add text to entries
self.part_entry.delete(0, tk.END)
self.part_entry.insert(tk.END, self.selected_item[1])
self.customer_entry.delete(0, tk.END)
self.customer_entry.insert(tk.END, self.selected_item[2])
self.retailer_entry.delete(0, tk.END)
self.retailer_entry.insert(tk.END, self.selected_item[3])
self.price_entry.delete(0, tk.END)
self.price_entry.insert(tk.END, self.selected_item[4])
except IndexError:
pass
# Remove item
def remove_item(self):
db.remove(self.selected_item[0])
self.clear_text()
self.populate_list()
# Update item
def update_item(self):
db.update(self.selected_item[0], self.part_text.get(
), self.customer_text.get(), self.retailer_text.get(), self.price_text.get())
self.populate_list()
# Clear all text fields
def clear_text(self):
self.part_entry.delete(0, tk.END)
self.customer_entry.delete(0, tk.END)
self.retailer_entry.delete(0, tk.END)
self.price_entry.delete(0, tk.END)
root = tk.Tk()
app = Application(master=root)
app.mainloop()
| 37.7375 | 137 | 0.617257 |
03b68a85e90b9b6f1bf6bc0ce79172eaa4f10d9b | 54,732 | py | Python | research/object_detection/inputs.py | duncanriach-nvidia/tensorflow-models | f95f014e6192434f405b7d6209c885072a3f6b6d | [
"Apache-2.0"
] | 1 | 2020-05-20T11:40:56.000Z | 2020-05-20T11:40:56.000Z | research/object_detection/inputs.py | duncanriach-nvidia/tensorflow-models | f95f014e6192434f405b7d6209c885072a3f6b6d | [
"Apache-2.0"
] | null | null | null | research/object_detection/inputs.py | duncanriach-nvidia/tensorflow-models | f95f014e6192434f405b7d6209c885072a3f6b6d | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model input function for tf-learn object detection model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import estimator as tf_estimator
from object_detection.builders import dataset_builder
from object_detection.builders import image_resizer_builder
from object_detection.builders import model_builder
from object_detection.builders import preprocessor_builder
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.core import densepose_ops
from object_detection.core import keypoint_ops
from object_detection.core import preprocessor
from object_detection.core import standard_fields as fields
from object_detection.data_decoders import tf_example_decoder
from object_detection.protos import eval_pb2
from object_detection.protos import image_resizer_pb2
from object_detection.protos import input_reader_pb2
from object_detection.protos import model_pb2
from object_detection.protos import train_pb2
from object_detection.utils import config_util
from object_detection.utils import ops as util_ops
from object_detection.utils import shape_utils
HASH_KEY = 'hash'
HASH_BINS = 1 << 31
SERVING_FED_EXAMPLE_KEY = 'serialized_example'
_LABEL_OFFSET = 1
# A map of names to methods that help build the input pipeline.
INPUT_BUILDER_UTIL_MAP = {
'dataset_build': dataset_builder.build,
'model_build': model_builder.build,
}
def _multiclass_scores_or_one_hot_labels(multiclass_scores, groundtruth_boxes,
groundtruth_classes, num_classes):
"""Returns one-hot encoding of classes when multiclass_scores is empty."""
# Replace groundtruth_classes tensor with multiclass_scores tensor when its
# non-empty. If multiclass_scores is empty fall back on groundtruth_classes
# tensor.
def true_fn():
return tf.reshape(multiclass_scores,
[tf.shape(groundtruth_boxes)[0], num_classes])
def false_fn():
return tf.one_hot(groundtruth_classes, num_classes)
return tf.cond(tf.size(multiclass_scores) > 0, true_fn, false_fn)
def convert_labeled_classes_to_k_hot(groundtruth_labeled_classes,
num_classes,
map_empty_to_ones=False):
"""Returns k-hot encoding of the labeled classes.
If map_empty_to_ones is enabled and the input labeled_classes is empty,
this function assumes all classes are exhaustively labeled, thus returning
an all-one encoding.
Args:
groundtruth_labeled_classes: a Tensor holding a sparse representation of
labeled classes.
num_classes: an integer representing the number of classes
map_empty_to_ones: boolean (default: False). Set this to be True to default
to an all-ones result if given an empty `groundtruth_labeled_classes`.
Returns:
A k-hot (and 0-indexed) tensor representation of
`groundtruth_labeled_classes`.
"""
# If the input labeled_classes is empty, it assumes all classes are
# exhaustively labeled, thus returning an all-one encoding.
def true_fn():
return tf.sparse_to_dense(
groundtruth_labeled_classes - _LABEL_OFFSET, [num_classes],
tf.constant(1, dtype=tf.float32),
validate_indices=False)
def false_fn():
return tf.ones(num_classes, dtype=tf.float32)
if map_empty_to_ones:
return tf.cond(tf.size(groundtruth_labeled_classes) > 0, true_fn, false_fn)
return true_fn()
def _remove_unrecognized_classes(class_ids, unrecognized_label):
"""Returns class ids with unrecognized classes filtered out."""
recognized_indices = tf.squeeze(
tf.where(tf.greater(class_ids, unrecognized_label)), -1)
return tf.gather(class_ids, recognized_indices)
def assert_or_prune_invalid_boxes(boxes):
"""Makes sure boxes have valid sizes (ymax >= ymin, xmax >= xmin).
When the hardware supports assertions, the function raises an error when
boxes have an invalid size. If assertions are not supported (e.g. on TPU),
boxes with invalid sizes are filtered out.
Args:
boxes: float tensor of shape [num_boxes, 4]
Returns:
boxes: float tensor of shape [num_valid_boxes, 4] with invalid boxes
filtered out.
Raises:
tf.errors.InvalidArgumentError: When we detect boxes with invalid size.
This is not supported on TPUs.
"""
ymin, xmin, ymax, xmax = tf.split(boxes, num_or_size_splits=4, axis=1)
height_check = tf.Assert(tf.reduce_all(ymax >= ymin), [ymin, ymax])
width_check = tf.Assert(tf.reduce_all(xmax >= xmin), [xmin, xmax])
with tf.control_dependencies([height_check, width_check]):
boxes_tensor = tf.concat([ymin, xmin, ymax, xmax], axis=1)
boxlist = box_list.BoxList(boxes_tensor)
# TODO(b/149221748) Remove pruning when XLA supports assertions.
boxlist = box_list_ops.prune_small_boxes(boxlist, 0)
return boxlist.get()
def transform_input_data(tensor_dict,
model_preprocess_fn,
image_resizer_fn,
num_classes,
data_augmentation_fn=None,
merge_multiple_boxes=False,
retain_original_image=False,
use_multiclass_scores=False,
use_bfloat16=False,
retain_original_image_additional_channels=False,
keypoint_type_weight=None,
image_classes_field_map_empty_to_ones=True):
"""A single function that is responsible for all input data transformations.
Data transformation functions are applied in the following order.
1. If key fields.InputDataFields.image_additional_channels is present in
tensor_dict, the additional channels will be merged into
fields.InputDataFields.image.
2. data_augmentation_fn (optional): applied on tensor_dict.
3. model_preprocess_fn: applied only on image tensor in tensor_dict.
4. keypoint_type_weight (optional): If groundtruth keypoints are in
the tensor dictionary, per-keypoint weights are produced. These weights are
initialized by `keypoint_type_weight` (or ones if left None).
Then, for all keypoints that are not visible, the weights are set to 0 (to
avoid penalizing the model in a loss function).
5. image_resizer_fn: applied on original image and instance mask tensor in
tensor_dict.
6. one_hot_encoding: applied to classes tensor in tensor_dict.
7. merge_multiple_boxes (optional): when groundtruth boxes are exactly the
same they can be merged into a single box with an associated k-hot class
label.
Args:
tensor_dict: dictionary containing input tensors keyed by
fields.InputDataFields.
model_preprocess_fn: model's preprocess function to apply on image tensor.
This function must take in a 4-D float tensor and return a 4-D preprocess
float tensor and a tensor containing the true image shape.
image_resizer_fn: image resizer function to apply on groundtruth instance
`masks. This function must take a 3-D float tensor of an image and a 3-D
tensor of instance masks and return a resized version of these along with
the true shapes.
num_classes: number of max classes to one-hot (or k-hot) encode the class
labels.
data_augmentation_fn: (optional) data augmentation function to apply on
input `tensor_dict`.
merge_multiple_boxes: (optional) whether to merge multiple groundtruth boxes
and classes for a given image if the boxes are exactly the same.
retain_original_image: (optional) whether to retain original image in the
output dictionary.
use_multiclass_scores: whether to use multiclass scores as class targets
instead of one-hot encoding of `groundtruth_classes`. When
this is True and multiclass_scores is empty, one-hot encoding of
`groundtruth_classes` is used as a fallback.
use_bfloat16: (optional) a bool, whether to use bfloat16 in training.
retain_original_image_additional_channels: (optional) Whether to retain
original image additional channels in the output dictionary.
keypoint_type_weight: A list (of length num_keypoints) containing
groundtruth loss weights to use for each keypoint. If None, will use a
weight of 1.
image_classes_field_map_empty_to_ones: A boolean flag indicating if empty
image classes field indicates that all classes have been labeled on this
image [true] or none [false].
Returns:
A dictionary keyed by fields.InputDataFields containing the tensors obtained
after applying all the transformations.
Raises:
KeyError: If both groundtruth_labeled_classes and groundtruth_image_classes
are provided by the decoder in tensor_dict since both fields are
considered to contain the same information.
"""
out_tensor_dict = tensor_dict.copy()
input_fields = fields.InputDataFields
labeled_classes_field = input_fields.groundtruth_labeled_classes
image_classes_field = input_fields.groundtruth_image_classes
verified_neg_classes_field = input_fields.groundtruth_verified_neg_classes
not_exhaustive_field = input_fields.groundtruth_not_exhaustive_classes
if (labeled_classes_field in out_tensor_dict and
image_classes_field in out_tensor_dict):
raise KeyError('groundtruth_labeled_classes and groundtruth_image_classes'
'are provided by the decoder, but only one should be set.')
for field, map_empty_to_ones in [(labeled_classes_field, True),
(image_classes_field,
image_classes_field_map_empty_to_ones),
(verified_neg_classes_field, False),
(not_exhaustive_field, False)]:
if field in out_tensor_dict:
out_tensor_dict[field] = _remove_unrecognized_classes(
out_tensor_dict[field], unrecognized_label=-1)
out_tensor_dict[field] = convert_labeled_classes_to_k_hot(
out_tensor_dict[field], num_classes, map_empty_to_ones)
if input_fields.multiclass_scores in out_tensor_dict:
out_tensor_dict[
input_fields
.multiclass_scores] = _multiclass_scores_or_one_hot_labels(
out_tensor_dict[input_fields.multiclass_scores],
out_tensor_dict[input_fields.groundtruth_boxes],
out_tensor_dict[input_fields.groundtruth_classes],
num_classes)
if input_fields.groundtruth_boxes in out_tensor_dict:
out_tensor_dict = util_ops.filter_groundtruth_with_nan_box_coordinates(
out_tensor_dict)
out_tensor_dict = util_ops.filter_unrecognized_classes(out_tensor_dict)
if retain_original_image:
out_tensor_dict[input_fields.original_image] = tf.cast(
image_resizer_fn(out_tensor_dict[input_fields.image],
None)[0], tf.uint8)
if input_fields.image_additional_channels in out_tensor_dict:
channels = out_tensor_dict[input_fields.image_additional_channels]
out_tensor_dict[input_fields.image] = tf.concat(
[out_tensor_dict[input_fields.image], channels], axis=2)
if retain_original_image_additional_channels:
out_tensor_dict[
input_fields.image_additional_channels] = tf.cast(
image_resizer_fn(channels, None)[0], tf.uint8)
# Apply data augmentation ops.
if data_augmentation_fn is not None:
out_tensor_dict = data_augmentation_fn(out_tensor_dict)
# Apply model preprocessing ops and resize instance masks.
image = out_tensor_dict[input_fields.image]
preprocessed_resized_image, true_image_shape = model_preprocess_fn(
tf.expand_dims(tf.cast(image, dtype=tf.float32), axis=0))
preprocessed_shape = tf.shape(preprocessed_resized_image)
new_height, new_width = preprocessed_shape[1], preprocessed_shape[2]
im_box = tf.stack([
0.0, 0.0,
tf.to_float(new_height) / tf.to_float(true_image_shape[0, 0]),
tf.to_float(new_width) / tf.to_float(true_image_shape[0, 1])
])
if input_fields.groundtruth_boxes in tensor_dict:
bboxes = out_tensor_dict[input_fields.groundtruth_boxes]
boxlist = box_list.BoxList(bboxes)
realigned_bboxes = box_list_ops.change_coordinate_frame(boxlist, im_box)
realigned_boxes_tensor = realigned_bboxes.get()
valid_boxes_tensor = assert_or_prune_invalid_boxes(realigned_boxes_tensor)
out_tensor_dict[
input_fields.groundtruth_boxes] = valid_boxes_tensor
if input_fields.groundtruth_keypoints in tensor_dict:
keypoints = out_tensor_dict[input_fields.groundtruth_keypoints]
realigned_keypoints = keypoint_ops.change_coordinate_frame(keypoints,
im_box)
out_tensor_dict[
input_fields.groundtruth_keypoints] = realigned_keypoints
flds_gt_kpt = input_fields.groundtruth_keypoints
flds_gt_kpt_vis = input_fields.groundtruth_keypoint_visibilities
flds_gt_kpt_weights = input_fields.groundtruth_keypoint_weights
if flds_gt_kpt_vis not in out_tensor_dict:
out_tensor_dict[flds_gt_kpt_vis] = tf.ones_like(
out_tensor_dict[flds_gt_kpt][:, :, 0],
dtype=tf.bool)
flds_gt_kpt_depth = fields.InputDataFields.groundtruth_keypoint_depths
flds_gt_kpt_depth_weight = (
fields.InputDataFields.groundtruth_keypoint_depth_weights)
if flds_gt_kpt_depth in out_tensor_dict:
out_tensor_dict[flds_gt_kpt_depth] = out_tensor_dict[flds_gt_kpt_depth]
out_tensor_dict[flds_gt_kpt_depth_weight] = out_tensor_dict[
flds_gt_kpt_depth_weight]
out_tensor_dict[flds_gt_kpt_weights] = (
keypoint_ops.keypoint_weights_from_visibilities(
out_tensor_dict[flds_gt_kpt_vis],
keypoint_type_weight))
dp_surface_coords_fld = input_fields.groundtruth_dp_surface_coords
if dp_surface_coords_fld in tensor_dict:
dp_surface_coords = out_tensor_dict[dp_surface_coords_fld]
realigned_dp_surface_coords = densepose_ops.change_coordinate_frame(
dp_surface_coords, im_box)
out_tensor_dict[dp_surface_coords_fld] = realigned_dp_surface_coords
if use_bfloat16:
preprocessed_resized_image = tf.cast(
preprocessed_resized_image, tf.bfloat16)
if input_fields.context_features in out_tensor_dict:
out_tensor_dict[input_fields.context_features] = tf.cast(
out_tensor_dict[input_fields.context_features], tf.bfloat16)
out_tensor_dict[input_fields.image] = tf.squeeze(
preprocessed_resized_image, axis=0)
out_tensor_dict[input_fields.true_image_shape] = tf.squeeze(
true_image_shape, axis=0)
if input_fields.groundtruth_instance_masks in out_tensor_dict:
masks = out_tensor_dict[input_fields.groundtruth_instance_masks]
_, resized_masks, _ = image_resizer_fn(image, masks)
if use_bfloat16:
resized_masks = tf.cast(resized_masks, tf.bfloat16)
out_tensor_dict[
input_fields.groundtruth_instance_masks] = resized_masks
zero_indexed_groundtruth_classes = out_tensor_dict[
input_fields.groundtruth_classes] - _LABEL_OFFSET
if use_multiclass_scores:
out_tensor_dict[
input_fields.groundtruth_classes] = out_tensor_dict[
input_fields.multiclass_scores]
else:
out_tensor_dict[input_fields.groundtruth_classes] = tf.one_hot(
zero_indexed_groundtruth_classes, num_classes)
out_tensor_dict.pop(input_fields.multiclass_scores, None)
if input_fields.groundtruth_confidences in out_tensor_dict:
groundtruth_confidences = out_tensor_dict[
input_fields.groundtruth_confidences]
# Map the confidences to the one-hot encoding of classes
out_tensor_dict[input_fields.groundtruth_confidences] = (
tf.reshape(groundtruth_confidences, [-1, 1]) *
out_tensor_dict[input_fields.groundtruth_classes])
else:
groundtruth_confidences = tf.ones_like(
zero_indexed_groundtruth_classes, dtype=tf.float32)
out_tensor_dict[input_fields.groundtruth_confidences] = (
out_tensor_dict[input_fields.groundtruth_classes])
if merge_multiple_boxes:
merged_boxes, merged_classes, merged_confidences, _ = (
util_ops.merge_boxes_with_multiple_labels(
out_tensor_dict[input_fields.groundtruth_boxes],
zero_indexed_groundtruth_classes,
groundtruth_confidences,
num_classes))
merged_classes = tf.cast(merged_classes, tf.float32)
out_tensor_dict[input_fields.groundtruth_boxes] = merged_boxes
out_tensor_dict[input_fields.groundtruth_classes] = merged_classes
out_tensor_dict[input_fields.groundtruth_confidences] = (
merged_confidences)
if input_fields.groundtruth_boxes in out_tensor_dict:
out_tensor_dict[input_fields.num_groundtruth_boxes] = tf.shape(
out_tensor_dict[input_fields.groundtruth_boxes])[0]
return out_tensor_dict
def pad_input_data_to_static_shapes(tensor_dict,
max_num_boxes,
num_classes,
spatial_image_shape=None,
max_num_context_features=None,
context_feature_length=None,
max_dp_points=336):
"""Pads input tensors to static shapes.
In case num_additional_channels > 0, we assume that the additional channels
have already been concatenated to the base image.
Args:
tensor_dict: Tensor dictionary of input data
max_num_boxes: Max number of groundtruth boxes needed to compute shapes for
padding.
num_classes: Number of classes in the dataset needed to compute shapes for
padding.
spatial_image_shape: A list of two integers of the form [height, width]
containing expected spatial shape of the image.
max_num_context_features (optional): The maximum number of context
features needed to compute shapes padding.
context_feature_length (optional): The length of the context feature.
max_dp_points (optional): The maximum number of DensePose sampled points per
instance. The default (336) is selected since the original DensePose paper
(https://arxiv.org/pdf/1802.00434.pdf) indicates that the maximum number
of samples per part is 14, and therefore 24 * 14 = 336 is the maximum
sampler per instance.
Returns:
A dictionary keyed by fields.InputDataFields containing padding shapes for
tensors in the dataset.
Raises:
ValueError: If groundtruth classes is neither rank 1 nor rank 2, or if we
detect that additional channels have not been concatenated yet, or if
max_num_context_features is not specified and context_features is in the
tensor dict.
"""
if not spatial_image_shape or spatial_image_shape == [-1, -1]:
height, width = None, None
else:
height, width = spatial_image_shape # pylint: disable=unpacking-non-sequence
input_fields = fields.InputDataFields
num_additional_channels = 0
if input_fields.image_additional_channels in tensor_dict:
num_additional_channels = shape_utils.get_dim_as_int(tensor_dict[
input_fields.image_additional_channels].shape[2])
# We assume that if num_additional_channels > 0, then it has already been
# concatenated to the base image (but not the ground truth).
num_channels = 3
if input_fields.image in tensor_dict:
num_channels = shape_utils.get_dim_as_int(
tensor_dict[input_fields.image].shape[2])
if num_additional_channels:
if num_additional_channels >= num_channels:
raise ValueError(
'Image must be already concatenated with additional channels.')
if (input_fields.original_image in tensor_dict and
shape_utils.get_dim_as_int(
tensor_dict[input_fields.original_image].shape[2]) ==
num_channels):
raise ValueError(
'Image must be already concatenated with additional channels.')
if input_fields.context_features in tensor_dict and (
max_num_context_features is None):
raise ValueError('max_num_context_features must be specified in the model '
'config if include_context is specified in the input '
'config')
padding_shapes = {
input_fields.image: [height, width, num_channels],
input_fields.original_image_spatial_shape: [2],
input_fields.image_additional_channels: [
height, width, num_additional_channels
],
input_fields.source_id: [],
input_fields.filename: [],
input_fields.key: [],
input_fields.groundtruth_difficult: [max_num_boxes],
input_fields.groundtruth_boxes: [max_num_boxes, 4],
input_fields.groundtruth_classes: [max_num_boxes, num_classes],
input_fields.groundtruth_instance_masks: [
max_num_boxes, height, width
],
input_fields.groundtruth_instance_mask_weights: [max_num_boxes],
input_fields.groundtruth_is_crowd: [max_num_boxes],
input_fields.groundtruth_group_of: [max_num_boxes],
input_fields.groundtruth_area: [max_num_boxes],
input_fields.groundtruth_weights: [max_num_boxes],
input_fields.groundtruth_confidences: [
max_num_boxes, num_classes
],
input_fields.num_groundtruth_boxes: [],
input_fields.groundtruth_label_types: [max_num_boxes],
input_fields.groundtruth_label_weights: [max_num_boxes],
input_fields.true_image_shape: [3],
input_fields.groundtruth_image_classes: [num_classes],
input_fields.groundtruth_image_confidences: [num_classes],
input_fields.groundtruth_labeled_classes: [num_classes],
}
if input_fields.original_image in tensor_dict:
padding_shapes[input_fields.original_image] = [
height, width,
shape_utils.get_dim_as_int(tensor_dict[input_fields.
original_image].shape[2])
]
if input_fields.groundtruth_keypoints in tensor_dict:
tensor_shape = (
tensor_dict[input_fields.groundtruth_keypoints].shape)
padding_shape = [max_num_boxes,
shape_utils.get_dim_as_int(tensor_shape[1]),
shape_utils.get_dim_as_int(tensor_shape[2])]
padding_shapes[input_fields.groundtruth_keypoints] = padding_shape
if input_fields.groundtruth_keypoint_visibilities in tensor_dict:
tensor_shape = tensor_dict[input_fields.
groundtruth_keypoint_visibilities].shape
padding_shape = [max_num_boxes, shape_utils.get_dim_as_int(tensor_shape[1])]
padding_shapes[input_fields.
groundtruth_keypoint_visibilities] = padding_shape
if fields.InputDataFields.groundtruth_keypoint_depths in tensor_dict:
tensor_shape = tensor_dict[fields.InputDataFields.
groundtruth_keypoint_depths].shape
padding_shape = [max_num_boxes, shape_utils.get_dim_as_int(tensor_shape[1])]
padding_shapes[fields.InputDataFields.
groundtruth_keypoint_depths] = padding_shape
padding_shapes[fields.InputDataFields.
groundtruth_keypoint_depth_weights] = padding_shape
if input_fields.groundtruth_keypoint_weights in tensor_dict:
tensor_shape = (
tensor_dict[input_fields.groundtruth_keypoint_weights].shape)
padding_shape = [max_num_boxes, shape_utils.get_dim_as_int(tensor_shape[1])]
padding_shapes[input_fields.
groundtruth_keypoint_weights] = padding_shape
if input_fields.groundtruth_dp_num_points in tensor_dict:
padding_shapes[
input_fields.groundtruth_dp_num_points] = [max_num_boxes]
padding_shapes[
input_fields.groundtruth_dp_part_ids] = [
max_num_boxes, max_dp_points]
padding_shapes[
input_fields.groundtruth_dp_surface_coords] = [
max_num_boxes, max_dp_points, 4]
if input_fields.groundtruth_track_ids in tensor_dict:
padding_shapes[
input_fields.groundtruth_track_ids] = [max_num_boxes]
if input_fields.groundtruth_verified_neg_classes in tensor_dict:
padding_shapes[
input_fields.groundtruth_verified_neg_classes] = [num_classes]
if input_fields.groundtruth_not_exhaustive_classes in tensor_dict:
padding_shapes[
input_fields.groundtruth_not_exhaustive_classes] = [num_classes]
# Prepare for ContextRCNN related fields.
if input_fields.context_features in tensor_dict:
padding_shape = [max_num_context_features, context_feature_length]
padding_shapes[input_fields.context_features] = padding_shape
tensor_shape = tf.shape(
tensor_dict[fields.InputDataFields.context_features])
tensor_dict[fields.InputDataFields.valid_context_size] = tensor_shape[0]
padding_shapes[fields.InputDataFields.valid_context_size] = []
if fields.InputDataFields.context_feature_length in tensor_dict:
padding_shapes[fields.InputDataFields.context_feature_length] = []
if fields.InputDataFields.context_features_image_id_list in tensor_dict:
padding_shapes[fields.InputDataFields.context_features_image_id_list] = [
max_num_context_features]
if input_fields.is_annotated in tensor_dict:
padding_shapes[input_fields.is_annotated] = []
padded_tensor_dict = {}
for tensor_name in tensor_dict:
padded_tensor_dict[tensor_name] = shape_utils.pad_or_clip_nd(
tensor_dict[tensor_name], padding_shapes[tensor_name])
# Make sure that the number of groundtruth boxes now reflects the
# padded/clipped tensors.
if input_fields.num_groundtruth_boxes in padded_tensor_dict:
padded_tensor_dict[input_fields.num_groundtruth_boxes] = (
tf.minimum(
padded_tensor_dict[input_fields.num_groundtruth_boxes],
max_num_boxes))
return padded_tensor_dict
def augment_input_data(tensor_dict, data_augmentation_options):
"""Applies data augmentation ops to input tensors.
Args:
tensor_dict: A dictionary of input tensors keyed by fields.InputDataFields.
data_augmentation_options: A list of tuples, where each tuple contains a
function and a dictionary that contains arguments and their values.
Usually, this is the output of core/preprocessor.build.
Returns:
A dictionary of tensors obtained by applying data augmentation ops to the
input tensor dictionary.
"""
tensor_dict[fields.InputDataFields.image] = tf.expand_dims(
tf.cast(tensor_dict[fields.InputDataFields.image], dtype=tf.float32), 0)
include_instance_masks = (fields.InputDataFields.groundtruth_instance_masks
in tensor_dict)
include_instance_mask_weights = (
fields.InputDataFields.groundtruth_instance_mask_weights in tensor_dict)
include_keypoints = (fields.InputDataFields.groundtruth_keypoints
in tensor_dict)
include_keypoint_visibilities = (
fields.InputDataFields.groundtruth_keypoint_visibilities in tensor_dict)
include_keypoint_depths = (
fields.InputDataFields.groundtruth_keypoint_depths in tensor_dict)
include_label_weights = (fields.InputDataFields.groundtruth_weights
in tensor_dict)
include_label_confidences = (fields.InputDataFields.groundtruth_confidences
in tensor_dict)
include_multiclass_scores = (fields.InputDataFields.multiclass_scores in
tensor_dict)
dense_pose_fields = [fields.InputDataFields.groundtruth_dp_num_points,
fields.InputDataFields.groundtruth_dp_part_ids,
fields.InputDataFields.groundtruth_dp_surface_coords]
include_dense_pose = all(field in tensor_dict for field in dense_pose_fields)
tensor_dict = preprocessor.preprocess(
tensor_dict, data_augmentation_options,
func_arg_map=preprocessor.get_default_func_arg_map(
include_label_weights=include_label_weights,
include_label_confidences=include_label_confidences,
include_multiclass_scores=include_multiclass_scores,
include_instance_masks=include_instance_masks,
include_instance_mask_weights=include_instance_mask_weights,
include_keypoints=include_keypoints,
include_keypoint_visibilities=include_keypoint_visibilities,
include_dense_pose=include_dense_pose,
include_keypoint_depths=include_keypoint_depths))
tensor_dict[fields.InputDataFields.image] = tf.squeeze(
tensor_dict[fields.InputDataFields.image], axis=0)
return tensor_dict
def _get_labels_dict(input_dict):
"""Extracts labels dict from input dict."""
required_label_keys = [
fields.InputDataFields.num_groundtruth_boxes,
fields.InputDataFields.groundtruth_boxes,
fields.InputDataFields.groundtruth_classes,
fields.InputDataFields.groundtruth_weights,
]
labels_dict = {}
for key in required_label_keys:
labels_dict[key] = input_dict[key]
optional_label_keys = [
fields.InputDataFields.groundtruth_confidences,
fields.InputDataFields.groundtruth_labeled_classes,
fields.InputDataFields.groundtruth_keypoints,
fields.InputDataFields.groundtruth_keypoint_depths,
fields.InputDataFields.groundtruth_keypoint_depth_weights,
fields.InputDataFields.groundtruth_instance_masks,
fields.InputDataFields.groundtruth_instance_mask_weights,
fields.InputDataFields.groundtruth_area,
fields.InputDataFields.groundtruth_is_crowd,
fields.InputDataFields.groundtruth_group_of,
fields.InputDataFields.groundtruth_difficult,
fields.InputDataFields.groundtruth_keypoint_visibilities,
fields.InputDataFields.groundtruth_keypoint_weights,
fields.InputDataFields.groundtruth_dp_num_points,
fields.InputDataFields.groundtruth_dp_part_ids,
fields.InputDataFields.groundtruth_dp_surface_coords,
fields.InputDataFields.groundtruth_track_ids,
fields.InputDataFields.groundtruth_verified_neg_classes,
fields.InputDataFields.groundtruth_not_exhaustive_classes,
fields.InputDataFields.groundtruth_image_classes,
]
for key in optional_label_keys:
if key in input_dict:
labels_dict[key] = input_dict[key]
if fields.InputDataFields.groundtruth_difficult in labels_dict:
labels_dict[fields.InputDataFields.groundtruth_difficult] = tf.cast(
labels_dict[fields.InputDataFields.groundtruth_difficult], tf.int32)
return labels_dict
def _replace_empty_string_with_random_number(string_tensor):
"""Returns string unchanged if non-empty, and random string tensor otherwise.
The random string is an integer 0 and 2**63 - 1, casted as string.
Args:
string_tensor: A tf.tensor of dtype string.
Returns:
out_string: A tf.tensor of dtype string. If string_tensor contains the empty
string, out_string will contain a random integer casted to a string.
Otherwise string_tensor is returned unchanged.
"""
empty_string = tf.constant('', dtype=tf.string, name='EmptyString')
random_source_id = tf.as_string(
tf.random_uniform(shape=[], maxval=2**63 - 1, dtype=tf.int64))
out_string = tf.cond(
tf.equal(string_tensor, empty_string),
true_fn=lambda: random_source_id,
false_fn=lambda: string_tensor)
return out_string
def _get_features_dict(input_dict, include_source_id=False):
"""Extracts features dict from input dict."""
source_id = _replace_empty_string_with_random_number(
input_dict[fields.InputDataFields.source_id])
hash_from_source_id = tf.string_to_hash_bucket_fast(source_id, HASH_BINS)
features = {
fields.InputDataFields.image:
input_dict[fields.InputDataFields.image],
HASH_KEY: tf.cast(hash_from_source_id, tf.int32),
fields.InputDataFields.true_image_shape:
input_dict[fields.InputDataFields.true_image_shape],
fields.InputDataFields.original_image_spatial_shape:
input_dict[fields.InputDataFields.original_image_spatial_shape]
}
if include_source_id:
features[fields.InputDataFields.source_id] = source_id
if fields.InputDataFields.original_image in input_dict:
features[fields.InputDataFields.original_image] = input_dict[
fields.InputDataFields.original_image]
if fields.InputDataFields.image_additional_channels in input_dict:
features[fields.InputDataFields.image_additional_channels] = input_dict[
fields.InputDataFields.image_additional_channels]
if fields.InputDataFields.context_features in input_dict:
features[fields.InputDataFields.context_features] = input_dict[
fields.InputDataFields.context_features]
if fields.InputDataFields.valid_context_size in input_dict:
features[fields.InputDataFields.valid_context_size] = input_dict[
fields.InputDataFields.valid_context_size]
if fields.InputDataFields.context_features_image_id_list in input_dict:
features[fields.InputDataFields.context_features_image_id_list] = (
input_dict[fields.InputDataFields.context_features_image_id_list])
return features
def create_train_input_fn(train_config, train_input_config,
model_config):
"""Creates a train `input` function for `Estimator`.
Args:
train_config: A train_pb2.TrainConfig.
train_input_config: An input_reader_pb2.InputReader.
model_config: A model_pb2.DetectionModel.
Returns:
`input_fn` for `Estimator` in TRAIN mode.
"""
def _train_input_fn(params=None):
return train_input(train_config, train_input_config, model_config,
params=params)
return _train_input_fn
def train_input(train_config, train_input_config,
model_config, model=None, params=None, input_context=None):
"""Returns `features` and `labels` tensor dictionaries for training.
Args:
train_config: A train_pb2.TrainConfig.
train_input_config: An input_reader_pb2.InputReader.
model_config: A model_pb2.DetectionModel.
model: A pre-constructed Detection Model.
If None, one will be created from the config.
params: Parameter dictionary passed from the estimator.
input_context: optional, A tf.distribute.InputContext object used to
shard filenames and compute per-replica batch_size when this function
is being called per-replica.
Returns:
A tf.data.Dataset that holds (features, labels) tuple.
features: Dictionary of feature tensors.
features[fields.InputDataFields.image] is a [batch_size, H, W, C]
float32 tensor with preprocessed images.
features[HASH_KEY] is a [batch_size] int32 tensor representing unique
identifiers for the images.
features[fields.InputDataFields.true_image_shape] is a [batch_size, 3]
int32 tensor representing the true image shapes, as preprocessed
images could be padded.
features[fields.InputDataFields.original_image] (optional) is a
[batch_size, H, W, C] float32 tensor with original images.
labels: Dictionary of groundtruth tensors.
labels[fields.InputDataFields.num_groundtruth_boxes] is a [batch_size]
int32 tensor indicating the number of groundtruth boxes.
labels[fields.InputDataFields.groundtruth_boxes] is a
[batch_size, num_boxes, 4] float32 tensor containing the corners of
the groundtruth boxes.
labels[fields.InputDataFields.groundtruth_classes] is a
[batch_size, num_boxes, num_classes] float32 one-hot tensor of
classes.
labels[fields.InputDataFields.groundtruth_weights] is a
[batch_size, num_boxes] float32 tensor containing groundtruth weights
for the boxes.
-- Optional --
labels[fields.InputDataFields.groundtruth_instance_masks] is a
[batch_size, num_boxes, H, W] float32 tensor containing only binary
values, which represent instance masks for objects.
labels[fields.InputDataFields.groundtruth_instance_mask_weights] is a
[batch_size, num_boxes] float32 tensor containing groundtruth weights
for each instance mask.
labels[fields.InputDataFields.groundtruth_keypoints] is a
[batch_size, num_boxes, num_keypoints, 2] float32 tensor containing
keypoints for each box.
labels[fields.InputDataFields.groundtruth_weights] is a
[batch_size, num_boxes, num_keypoints] float32 tensor containing
groundtruth weights for the keypoints.
labels[fields.InputDataFields.groundtruth_visibilities] is a
[batch_size, num_boxes, num_keypoints] bool tensor containing
groundtruth visibilities for each keypoint.
labels[fields.InputDataFields.groundtruth_labeled_classes] is a
[batch_size, num_classes] float32 k-hot tensor of classes.
labels[fields.InputDataFields.groundtruth_dp_num_points] is a
[batch_size, num_boxes] int32 tensor with the number of sampled
DensePose points per object.
labels[fields.InputDataFields.groundtruth_dp_part_ids] is a
[batch_size, num_boxes, max_sampled_points] int32 tensor with the
DensePose part ids (0-indexed) per object.
labels[fields.InputDataFields.groundtruth_dp_surface_coords] is a
[batch_size, num_boxes, max_sampled_points, 4] float32 tensor with the
DensePose surface coordinates. The format is (y, x, v, u), where (y, x)
are normalized image coordinates and (v, u) are normalized surface part
coordinates.
labels[fields.InputDataFields.groundtruth_track_ids] is a
[batch_size, num_boxes] int32 tensor with the track ID for each object.
Raises:
TypeError: if the `train_config`, `train_input_config` or `model_config`
are not of the correct type.
"""
if not isinstance(train_config, train_pb2.TrainConfig):
raise TypeError('For training mode, the `train_config` must be a '
'train_pb2.TrainConfig.')
if not isinstance(train_input_config, input_reader_pb2.InputReader):
raise TypeError('The `train_input_config` must be a '
'input_reader_pb2.InputReader.')
if not isinstance(model_config, model_pb2.DetectionModel):
raise TypeError('The `model_config` must be a '
'model_pb2.DetectionModel.')
if model is None:
model_preprocess_fn = INPUT_BUILDER_UTIL_MAP['model_build'](
model_config, is_training=True).preprocess
else:
model_preprocess_fn = model.preprocess
num_classes = config_util.get_number_of_classes(model_config)
def transform_and_pad_input_data_fn(tensor_dict):
"""Combines transform and pad operation."""
data_augmentation_options = [
preprocessor_builder.build(step)
for step in train_config.data_augmentation_options
]
data_augmentation_fn = functools.partial(
augment_input_data,
data_augmentation_options=data_augmentation_options)
image_resizer_config = config_util.get_image_resizer_config(model_config)
image_resizer_fn = image_resizer_builder.build(image_resizer_config)
keypoint_type_weight = train_input_config.keypoint_type_weight or None
transform_data_fn = functools.partial(
transform_input_data, model_preprocess_fn=model_preprocess_fn,
image_resizer_fn=image_resizer_fn,
num_classes=num_classes,
data_augmentation_fn=data_augmentation_fn,
merge_multiple_boxes=train_config.merge_multiple_label_boxes,
retain_original_image=train_config.retain_original_images,
use_multiclass_scores=train_config.use_multiclass_scores,
use_bfloat16=train_config.use_bfloat16,
keypoint_type_weight=keypoint_type_weight)
tensor_dict = pad_input_data_to_static_shapes(
tensor_dict=transform_data_fn(tensor_dict),
max_num_boxes=train_input_config.max_number_of_boxes,
num_classes=num_classes,
spatial_image_shape=config_util.get_spatial_image_size(
image_resizer_config),
max_num_context_features=config_util.get_max_num_context_features(
model_config),
context_feature_length=config_util.get_context_feature_length(
model_config))
include_source_id = train_input_config.include_source_id
return (_get_features_dict(tensor_dict, include_source_id),
_get_labels_dict(tensor_dict))
reduce_to_frame_fn = get_reduce_to_frame_fn(train_input_config, True)
dataset = INPUT_BUILDER_UTIL_MAP['dataset_build'](
train_input_config,
transform_input_data_fn=transform_and_pad_input_data_fn,
batch_size=params['batch_size'] if params else train_config.batch_size,
input_context=input_context,
reduce_to_frame_fn=reduce_to_frame_fn)
return dataset
def create_eval_input_fn(eval_config, eval_input_config, model_config):
"""Creates an eval `input` function for `Estimator`.
Args:
eval_config: An eval_pb2.EvalConfig.
eval_input_config: An input_reader_pb2.InputReader.
model_config: A model_pb2.DetectionModel.
Returns:
`input_fn` for `Estimator` in EVAL mode.
"""
def _eval_input_fn(params=None):
return eval_input(eval_config, eval_input_config, model_config,
params=params)
return _eval_input_fn
def eval_input(eval_config, eval_input_config, model_config,
model=None, params=None, input_context=None):
"""Returns `features` and `labels` tensor dictionaries for evaluation.
Args:
eval_config: An eval_pb2.EvalConfig.
eval_input_config: An input_reader_pb2.InputReader.
model_config: A model_pb2.DetectionModel.
model: A pre-constructed Detection Model.
If None, one will be created from the config.
params: Parameter dictionary passed from the estimator.
input_context: optional, A tf.distribute.InputContext object used to
shard filenames and compute per-replica batch_size when this function
is being called per-replica.
Returns:
A tf.data.Dataset that holds (features, labels) tuple.
features: Dictionary of feature tensors.
features[fields.InputDataFields.image] is a [1, H, W, C] float32 tensor
with preprocessed images.
features[HASH_KEY] is a [1] int32 tensor representing unique
identifiers for the images.
features[fields.InputDataFields.true_image_shape] is a [1, 3]
int32 tensor representing the true image shapes, as preprocessed
images could be padded.
features[fields.InputDataFields.original_image] is a [1, H', W', C]
float32 tensor with the original image.
labels: Dictionary of groundtruth tensors.
labels[fields.InputDataFields.groundtruth_boxes] is a [1, num_boxes, 4]
float32 tensor containing the corners of the groundtruth boxes.
labels[fields.InputDataFields.groundtruth_classes] is a
[num_boxes, num_classes] float32 one-hot tensor of classes.
labels[fields.InputDataFields.groundtruth_area] is a [1, num_boxes]
float32 tensor containing object areas.
labels[fields.InputDataFields.groundtruth_is_crowd] is a [1, num_boxes]
bool tensor indicating if the boxes enclose a crowd.
labels[fields.InputDataFields.groundtruth_difficult] is a [1, num_boxes]
int32 tensor indicating if the boxes represent difficult instances.
-- Optional --
labels[fields.InputDataFields.groundtruth_instance_masks] is a
[1, num_boxes, H, W] float32 tensor containing only binary values,
which represent instance masks for objects.
labels[fields.InputDataFields.groundtruth_instance_mask_weights] is a
[1, num_boxes] float32 tensor containing groundtruth weights for each
instance mask.
labels[fields.InputDataFields.groundtruth_weights] is a
[batch_size, num_boxes, num_keypoints] float32 tensor containing
groundtruth weights for the keypoints.
labels[fields.InputDataFields.groundtruth_visibilities] is a
[batch_size, num_boxes, num_keypoints] bool tensor containing
groundtruth visibilities for each keypoint.
labels[fields.InputDataFields.groundtruth_group_of] is a [1, num_boxes]
bool tensor indicating if the box covers more than 5 instances of the
same class which heavily occlude each other.
labels[fields.InputDataFields.groundtruth_labeled_classes] is a
[num_boxes, num_classes] float32 k-hot tensor of classes.
labels[fields.InputDataFields.groundtruth_dp_num_points] is a
[batch_size, num_boxes] int32 tensor with the number of sampled
DensePose points per object.
labels[fields.InputDataFields.groundtruth_dp_part_ids] is a
[batch_size, num_boxes, max_sampled_points] int32 tensor with the
DensePose part ids (0-indexed) per object.
labels[fields.InputDataFields.groundtruth_dp_surface_coords] is a
[batch_size, num_boxes, max_sampled_points, 4] float32 tensor with the
DensePose surface coordinates. The format is (y, x, v, u), where (y, x)
are normalized image coordinates and (v, u) are normalized surface part
coordinates.
labels[fields.InputDataFields.groundtruth_track_ids] is a
[batch_size, num_boxes] int32 tensor with the track ID for each object.
Raises:
TypeError: if the `eval_config`, `eval_input_config` or `model_config`
are not of the correct type.
"""
params = params or {}
if not isinstance(eval_config, eval_pb2.EvalConfig):
raise TypeError('For eval mode, the `eval_config` must be a '
'train_pb2.EvalConfig.')
if not isinstance(eval_input_config, input_reader_pb2.InputReader):
raise TypeError('The `eval_input_config` must be a '
'input_reader_pb2.InputReader.')
if not isinstance(model_config, model_pb2.DetectionModel):
raise TypeError('The `model_config` must be a '
'model_pb2.DetectionModel.')
if eval_config.force_no_resize:
arch = model_config.WhichOneof('model')
arch_config = getattr(model_config, arch)
image_resizer_proto = image_resizer_pb2.ImageResizer()
image_resizer_proto.identity_resizer.CopyFrom(
image_resizer_pb2.IdentityResizer())
arch_config.image_resizer.CopyFrom(image_resizer_proto)
if model is None:
model_preprocess_fn = INPUT_BUILDER_UTIL_MAP['model_build'](
model_config, is_training=False).preprocess
else:
model_preprocess_fn = model.preprocess
def transform_and_pad_input_data_fn(tensor_dict):
"""Combines transform and pad operation."""
num_classes = config_util.get_number_of_classes(model_config)
image_resizer_config = config_util.get_image_resizer_config(model_config)
image_resizer_fn = image_resizer_builder.build(image_resizer_config)
keypoint_type_weight = eval_input_config.keypoint_type_weight or None
transform_data_fn = functools.partial(
transform_input_data, model_preprocess_fn=model_preprocess_fn,
image_resizer_fn=image_resizer_fn,
num_classes=num_classes,
data_augmentation_fn=None,
retain_original_image=eval_config.retain_original_images,
retain_original_image_additional_channels=
eval_config.retain_original_image_additional_channels,
keypoint_type_weight=keypoint_type_weight,
image_classes_field_map_empty_to_ones=eval_config
.image_classes_field_map_empty_to_ones)
tensor_dict = pad_input_data_to_static_shapes(
tensor_dict=transform_data_fn(tensor_dict),
max_num_boxes=eval_input_config.max_number_of_boxes,
num_classes=config_util.get_number_of_classes(model_config),
spatial_image_shape=config_util.get_spatial_image_size(
image_resizer_config),
max_num_context_features=config_util.get_max_num_context_features(
model_config),
context_feature_length=config_util.get_context_feature_length(
model_config))
include_source_id = eval_input_config.include_source_id
return (_get_features_dict(tensor_dict, include_source_id),
_get_labels_dict(tensor_dict))
reduce_to_frame_fn = get_reduce_to_frame_fn(eval_input_config, False)
dataset = INPUT_BUILDER_UTIL_MAP['dataset_build'](
eval_input_config,
batch_size=params['batch_size'] if params else eval_config.batch_size,
transform_input_data_fn=transform_and_pad_input_data_fn,
input_context=input_context,
reduce_to_frame_fn=reduce_to_frame_fn)
return dataset
def create_predict_input_fn(model_config, predict_input_config):
"""Creates a predict `input` function for `Estimator`.
Args:
model_config: A model_pb2.DetectionModel.
predict_input_config: An input_reader_pb2.InputReader.
Returns:
`input_fn` for `Estimator` in PREDICT mode.
"""
def _predict_input_fn(params=None):
"""Decodes serialized tf.Examples and returns `ServingInputReceiver`.
Args:
params: Parameter dictionary passed from the estimator.
Returns:
`ServingInputReceiver`.
"""
del params
example = tf.placeholder(dtype=tf.string, shape=[], name='tf_example')
num_classes = config_util.get_number_of_classes(model_config)
model_preprocess_fn = INPUT_BUILDER_UTIL_MAP['model_build'](
model_config, is_training=False).preprocess
image_resizer_config = config_util.get_image_resizer_config(model_config)
image_resizer_fn = image_resizer_builder.build(image_resizer_config)
transform_fn = functools.partial(
transform_input_data, model_preprocess_fn=model_preprocess_fn,
image_resizer_fn=image_resizer_fn,
num_classes=num_classes,
data_augmentation_fn=None)
decoder = tf_example_decoder.TfExampleDecoder(
load_instance_masks=False,
num_additional_channels=predict_input_config.num_additional_channels)
input_dict = transform_fn(decoder.decode(example))
images = tf.cast(input_dict[fields.InputDataFields.image], dtype=tf.float32)
images = tf.expand_dims(images, axis=0)
true_image_shape = tf.expand_dims(
input_dict[fields.InputDataFields.true_image_shape], axis=0)
return tf_estimator.export.ServingInputReceiver(
features={
fields.InputDataFields.image: images,
fields.InputDataFields.true_image_shape: true_image_shape},
receiver_tensors={SERVING_FED_EXAMPLE_KEY: example})
return _predict_input_fn
def get_reduce_to_frame_fn(input_reader_config, is_training):
"""Returns a function reducing sequence tensors to single frame tensors.
If the input type is not TF_SEQUENCE_EXAMPLE, the tensors are passed through
this function unchanged. Otherwise, when in training mode, a single frame is
selected at random from the sequence example, and the tensors for that frame
are converted to single frame tensors, with all associated context features.
In evaluation mode all frames are converted to single frame tensors with
copied context tensors. After the sequence example tensors are converted into
one or many single frame tensors, the images from each frame are decoded.
Args:
input_reader_config: An input_reader_pb2.InputReader.
is_training: Whether we are in training mode.
Returns:
`reduce_to_frame_fn` for the dataset builder
"""
if input_reader_config.input_type != (
input_reader_pb2.InputType.Value('TF_SEQUENCE_EXAMPLE')):
return lambda dataset, dataset_map_fn, batch_size, config: dataset
else:
def reduce_to_frame(dataset, dataset_map_fn, batch_size,
input_reader_config):
"""Returns a function reducing sequence tensors to single frame tensors.
Args:
dataset: A tf dataset containing sequence tensors.
dataset_map_fn: A function that handles whether to
map_with_legacy_function for this dataset
batch_size: used if map_with_legacy_function is true to determine
num_parallel_calls
input_reader_config: used if map_with_legacy_function is true to
determine num_parallel_calls
Returns:
A tf dataset containing single frame tensors.
"""
if is_training:
def get_single_frame(tensor_dict):
"""Returns a random frame from a sequence.
Picks a random frame and returns slices of sequence tensors
corresponding to the random frame. Returns non-sequence tensors
unchanged.
Args:
tensor_dict: A dictionary containing sequence tensors.
Returns:
Tensors for a single random frame within the sequence.
"""
num_frames = tf.cast(
tf.shape(tensor_dict[fields.InputDataFields.source_id])[0],
dtype=tf.int32)
if input_reader_config.frame_index == -1:
frame_index = tf.random.uniform((), minval=0, maxval=num_frames,
dtype=tf.int32)
else:
frame_index = tf.constant(input_reader_config.frame_index,
dtype=tf.int32)
out_tensor_dict = {}
for key in tensor_dict:
if key in fields.SEQUENCE_FIELDS:
# Slice random frame from sequence tensors
out_tensor_dict[key] = tensor_dict[key][frame_index]
else:
# Copy all context tensors.
out_tensor_dict[key] = tensor_dict[key]
return out_tensor_dict
dataset = dataset_map_fn(dataset, get_single_frame, batch_size,
input_reader_config)
else:
dataset = dataset_map_fn(dataset, util_ops.tile_context_tensors,
batch_size, input_reader_config)
dataset = dataset.unbatch()
# Decode frame here as SequenceExample tensors contain encoded images.
dataset = dataset_map_fn(dataset, util_ops.decode_image, batch_size,
input_reader_config)
return dataset
return reduce_to_frame
| 45.046914 | 81 | 0.740499 |
c271975c5b2509c3980649a11526b214cbce831b | 6,368 | py | Python | utils/generator/generators_isic_test.py | leezhp1994/TMHFS | 4711c38aab7657313eea3697da5cb1e4122ae8c8 | [
"Apache-2.0"
] | 7 | 2020-05-20T02:22:25.000Z | 2021-03-26T08:51:51.000Z | utils/generator/generators_isic_test.py | leezhp1994/TMHFS | 4711c38aab7657313eea3697da5cb1e4122ae8c8 | [
"Apache-2.0"
] | null | null | null | utils/generator/generators_isic_test.py | leezhp1994/TMHFS | 4711c38aab7657313eea3697da5cb1e4122ae8c8 | [
"Apache-2.0"
] | null | null | null | """
This code based on codes from https://github.com/tristandeleu/ntm-one-shot \
and https://github.com/kjunelee/MetaOptNet
"""
import numpy as np
import random, os
import pickle as pkl
from PIL import Image
import pandas as pd
import utils.generator.additional_transforms as add_transforms
import torchvision.transforms as transforms
from .auto_augment import AutoAugment, Cutout
class TransformLoader:
def __init__(self, image_size,
normalize_param=dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
jitter_param=dict(Brightness=0.4, Contrast=0.4, Color=0.4)):
self.image_size = image_size
self.normalize_param = normalize_param
self.jitter_param = jitter_param
def parse_transform(self, transform_type):
if transform_type == 'ImageJitter':
method = add_transforms.ImageJitter(self.jitter_param)
return method
if transform_type == 'AutoAugment':
method = AutoAugment()
return method
method = getattr(transforms, transform_type)
if transform_type == 'RandomResizedCrop':
return method(self.image_size)
elif transform_type == 'CenterCrop':
return method(self.image_size)
elif transform_type == 'Resize':
return method([int(self.image_size * 1), int(self.image_size * 1)])
elif transform_type == 'Normalize':
return method(**self.normalize_param)
elif transform_type == 'RandomRotation':
return method(45)
else:
return method()
def get_composed_transform(self):
transform_lists = [['Resize','RandomResizedCrop', 'ImageJitter', 'ColorJitter', 'RandomRotation', 'RandomHorizontalFlip'],
['Resize', 'RandomRotation','ImageJitter', 'RandomHorizontalFlip'],
['Resize', 'RandomRotation'],
['Resize', 'ImageJitter'],
['Resize', 'RandomHorizontalFlip'],
['Resize', 'RandomRotation','ImageJitter', 'RandomHorizontalFlip'],
['Resize', 'RandomRotation'],
['Resize', 'RandomRotation','ImageJitter'],
['Resize', 'ImageJitter', 'RandomHorizontalFlip'],
['Resize', 'RandomHorizontalFlip']
]
transforms_all = []
for transform_list in transform_lists:
transform_funcs = [self.parse_transform(x) for x in transform_list]
transforms_all.append(transforms.Compose(transform_funcs))
return transforms_all
class miniImageNetGenerator(object):
def __init__(self, data_file, nb_classes=5, nb_samples_per_class=15,
max_iter=None, xp=np, aug_num=10):
super(miniImageNetGenerator, self).__init__()
csv_path = data_file + "/ISIC2018_Task3_Training_GroundTruth/ISIC2018_Task3_Training_GroundTruth.csv"
image_path = data_file + "/ISIC2018_Task3_Training_Input/"
self.img_path = image_path
self.csv_path = csv_path
self.data_file = data_file
self.nb_classes = nb_classes
self.nb_samples_per_class = nb_samples_per_class
self.max_iter = max_iter
self.xp = xp
self.num_iter = 0
self.data_info = pd.read_csv(csv_path, skiprows=[0], header=None)
# First column contains the image paths
self.image_name = np.asarray(self.data_info.iloc[:, 0])
self.labels = np.asarray(self.data_info.iloc[:, 1:])
self.labels = (self.labels != 0).argmax(axis=1)
self.data = self._load_data()
self.trans_loader = TransformLoader(84)
self.aug_num = aug_num
def _load_data(self):
data = {}
for i in range(len(self.image_name)):
label = self.labels[i]
img_path = self.image_name[i]
if 'ISIC' not in img_path:
continue
if label not in data.keys():
data[label] = []
data[label].append(img_path)
return data
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
if (self.max_iter is None) or (self.num_iter < self.max_iter):
self.num_iter += 1
images, labels = self.sample(self.nb_classes, self.nb_samples_per_class)
return (self.num_iter - 1), (images, labels)
else:
raise StopIteration()
def augment(self, path, aug_ind=0):
path = os.path.join(self.img_path, path + ".jpg")
with open(path, 'rb') as f:
with Image.open(f) as img:
img = img.convert('RGB')
if aug_ind==0:
img = img.resize((84, 84), Image.BILINEAR)
imgs = np.array(img)
elif aug_ind==-1:
imgs = np.array(img)
else:
transform_all = self.trans_loader.get_composed_transform()
imgs=np.array(transform_all[aug_ind](img))
return imgs
def sample(self, nb_classes, nb_samples_per_class):
sampled_characters = random.sample(self.data.keys(), nb_classes)
labels = []
images = []
_ind_all = []
_imgss=[]
for ii in range(self.aug_num):
labels_and_images = []
for (k, char) in enumerate(sampled_characters):
_imgs = self.data[char]
if ii ==0:
_ind_all.append(random.sample(range(len(_imgs)), nb_samples_per_class))
_ind = _ind_all[k]
if k==0:
_imgss.append(_imgs[_ind[1]])
labels_and_images.extend([(k, self.xp.array(self.augment(_imgs[i],ii)/np.float32(255).flatten())) for i in _ind])
arg_labels_and_images = []
for i in range(self.nb_samples_per_class):
for j in range(self.nb_classes):
arg_labels_and_images.extend([labels_and_images[i+j*self.nb_samples_per_class]])
labels_temp, images_temp = zip(*arg_labels_and_images)
labels.append(labels_temp)
images.append(images_temp)
return images, labels_temp
| 39.552795 | 157 | 0.585898 |
ec5b91a0224a0e390523216ef95d80ded5a79025 | 1,479 | py | Python | setup.py | sih4sing5hong5/hue7jip8 | 64ff872e11ce36c3721889f59de92b694735400c | [
"MIT"
] | 15 | 2018-10-15T06:45:50.000Z | 2021-08-10T03:43:00.000Z | setup.py | tku609630339/hue7jip8 | 64ff872e11ce36c3721889f59de92b694735400c | [
"MIT"
] | 58 | 2017-08-02T07:26:11.000Z | 2020-11-06T06:50:09.000Z | setup.py | sih4sing5hong5/hue7jip8 | 64ff872e11ce36c3721889f59de92b694735400c | [
"MIT"
] | 4 | 2018-07-10T20:10:35.000Z | 2021-05-22T08:49:01.000Z | from setuptools import setup
from os import walk
import os
import sys
from 版本 import 版本
# tar無法度下傷長的檔案名,所以愛用zip
# python setup.py sdist --format=zip upload
try:
# travis攏先`python setup.py sdist`才閣上傳
sys.argv.insert(sys.argv.index('sdist') + 1, '--format=zip')
except ValueError:
# 無upload
pass
def 讀(檔名):
return open(os.path.join(os.path.dirname(__file__), 檔名)).read()
def 揣工具包(頭):
'setup的find_packages無支援windows中文檔案'
工具包 = []
for 目錄, _, 檔案 in walk(頭):
if '__init__.py' in 檔案:
工具包.append(目錄.replace('/', '.'))
return 工具包
setup(
name='hue7jip8',
packages=揣工具包('匯入'),
version=版本,
description='匯入臺灣語料',
long_description=讀('README.md'),
long_description_content_type="text/markdown",
author='薛丞宏',
author_email='ihcaoe@gmail.com',
url='https://xn--v0qr21b.xn--kpry57d/',
download_url='https://github.com/sih4sing5hong5/hue7jip8',
keywords=[
'Corpus', '語料庫',
'Taiwan', 'Taiwanese',
],
classifiers=[
'Development Status :: 3 - Alpha',
'Operating System :: Unix',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
install_requires=[
'tai5-uan5_gian5-gi2_hok8-bu7',
'pyyaml',
],
package_data={
'匯入': [
'教育部閩南語常用詞辭典/下載臺語教典音檔-dropbox.sh',
'教育部閩南語常用詞辭典/下載臺語教典音檔-官網沓沓掠.sh',
'教育部閩南語常用詞辭典/詞目總檔.csv',
],
}
)
| 23.47619 | 67 | 0.597701 |
74c3998665f3c93c811fb672334ed7ceed0a2a49 | 4,933 | py | Python | armada/tests/unit/api/test_armada_controller.py | openstack/airship-armada | 416eff4e52f72169c3ebf49a8ffca36086692b7a | [
"Apache-2.0"
] | 29 | 2018-05-21T04:34:34.000Z | 2019-05-10T12:32:46.000Z | armada/tests/unit/api/test_armada_controller.py | openstack/airship-armada | 416eff4e52f72169c3ebf49a8ffca36086692b7a | [
"Apache-2.0"
] | 1 | 2019-11-11T23:31:27.000Z | 2019-11-11T23:31:27.000Z | armada/tests/unit/api/test_armada_controller.py | airshipit/armada | 790af88d61bb3d0f0368faae22e94b4e533df92d | [
"Apache-2.0"
] | 9 | 2019-05-23T10:14:00.000Z | 2020-03-03T03:08:02.000Z | # Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import mock
from oslo_config import cfg
from armada import api
from armada.api.controller import armada as armada_api
from armada.common.policies import base as policy_base
from armada.tests import test_utils
from armada.tests.unit.api import base
CONF = cfg.CONF
@mock.patch.object(
armada_api.Apply, 'handle', armada_api.Apply.handle.__wrapped__)
class ArmadaControllerTest(base.BaseControllerTest):
@mock.patch.object(api, 'Helm')
@mock.patch.object(armada_api, 'Armada')
@mock.patch.object(armada_api, 'ReferenceResolver')
def test_armada_apply_resource(
self, mock_resolver, mock_armada, mock_helm):
"""Tests the POST /api/v1.0/apply endpoint."""
rules = {'armada:create_endpoints': '@'}
self.policy.set_rules(rules)
options = {
'debug': 'true',
'disable_update_pre': 'false',
'disable_update_post': 'false',
'enable_chart_cleanup': 'false',
'skip_pre_flight': 'false',
'wait': 'false',
'timeout': '100'
}
m_helm = mock_helm.return_value
m_helm.__enter__.return_value = m_helm
expected_armada_options = {
'disable_update_pre': False,
'disable_update_post': False,
'enable_chart_cleanup': False,
'force_wait': False,
'timeout': 100,
'helm': m_helm,
'target_manifest': None
}
payload_url = 'http://foo.com/test.yaml'
payload = {'hrefs': [payload_url]}
body = json.dumps(payload)
expected = {'message': {'diff': [], 'install': [], 'upgrade': []}}
mock_resolver.resolve_reference.return_value = \
[b"---\nfoo: bar"]
mock_armada.return_value.sync.return_value = \
{'diff': [], 'install': [], 'upgrade': []}
result = self.app.simulate_post(
path='/api/v1.0/apply',
body=body,
headers={'Content-Type': 'application/json'},
params=options)
self.assertEqual(result.json, expected)
self.assertEqual('application/json', result.headers['content-type'])
mock_resolver.resolve_reference.assert_called_with([payload_url])
mock_armada.assert_called_with(
[{
'foo': 'bar'
}], **expected_armada_options)
mock_armada.return_value.sync.assert_called()
mock_helm.assert_called()
m_helm.__exit__.assert_called()
def test_armada_apply_no_href(self):
"""Tests /api/v1.0/apply returns 400 when hrefs list is empty."""
rules = {'armada:create_endpoints': '@'}
self.policy.set_rules(rules)
options = {
'debug': 'true',
'disable_update_pre': 'false',
'disable_update_post': 'false',
'enable_chart_cleanup': 'false',
'skip_pre_flight': 'false',
'wait': 'false',
'timeout': '100'
}
payload = {'hrefs': []}
body = json.dumps(payload)
result = self.app.simulate_post(
path='/api/v1.0/apply',
body=body,
headers={'Content-Type': 'application/json'},
params=options)
self.assertEqual(result.status_code, 400)
class ArmadaControllerNegativeTest(base.BaseControllerTest):
@test_utils.attr(type=['negative'])
def test_armada_apply_raises_415_given_unsupported_media_type(self):
"""Tests the POST /api/v1.0/apply endpoint returns 415 given
unsupported media type.
"""
rules = {'armada:create_endpoints': '@'}
self.policy.set_rules(rules)
resp = self.app.simulate_post('/api/v1.0/apply', body=None)
self.assertEqual(415, resp.status_code)
class ArmadaControllerNegativeRbacTest(base.BaseControllerTest):
@test_utils.attr(type=['negative'])
def test_armada_apply_resource_insufficient_permissions(self):
"""Tests the POST /api/v1.0/apply endpoint returns 403 following failed
authorization.
"""
rules = {'armada:create_endpoints': policy_base.RULE_ADMIN_REQUIRED}
self.policy.set_rules(rules)
resp = self.app.simulate_post('/api/v1.0/apply')
self.assertEqual(403, resp.status_code)
| 34.985816 | 79 | 0.631664 |
43f5569a13dd26e3e044ea8726b6f4f22d8a8f59 | 2,018 | py | Python | benchmark.py | wjwOffline/brainfuck-jit-interpreter | c1e58feffe95050d86ddf17fecc2edb5f6ac29ce | [
"MIT"
] | 14 | 2021-05-19T09:57:58.000Z | 2022-03-23T16:32:25.000Z | benchmark.py | wjwOffline/brainfuck-jit-interpreter | c1e58feffe95050d86ddf17fecc2edb5f6ac29ce | [
"MIT"
] | 1 | 2022-01-10T05:06:24.000Z | 2022-01-10T05:32:28.000Z | benchmark.py | wjwOffline/brainfuck-jit-interpreter | c1e58feffe95050d86ddf17fecc2edb5f6ac29ce | [
"MIT"
] | 2 | 2021-06-04T13:59:26.000Z | 2021-12-29T12:14:15.000Z | import logging
import shlex
import subprocess
import sys
import threading
import os
import time
default_benchmark_time = 10
constant_idx_zer = 0
constant_idx_one = 1
def exec_cmd(command, file_name, timeout):
try:
with open(file_name, 'w+') as f:
cmd = subprocess.Popen(shlex.split(command),
shell=False,
stdout=f,
stderr=subprocess.PIPE,
universal_newlines=True)
_thread_command(cmd, timeout)
except subprocess.SubprocessError as err:
print('Calledprocerr: %s', err)
def _thread_command(task, timeout):
task_thread = threading.Thread(target=task.wait)
task_thread.start()
task_thread.join(timeout)
if task_thread.is_alive():
task.kill()
if __name__ == '__main__':
if len(sys.argv) == 1:
# run default benchmark.
print('\nBenchmark for %s seconds: (higher score is better)' % default_benchmark_time)
exec_cmd('./interpreter ./bfs/FIB.bf', '.out_interpreter', default_benchmark_time)
exec_cmd('./interpreter ./bfs/FIB.bf --jit', '.out_jit', default_benchmark_time)
# comparision.
completedprocess = subprocess.run('wc -l .out_interpreter .out_jit', shell=True, capture_output=True)
print(str(completedprocess.stdout, 'utf-8'))
os.remove('.out_interpreter')
os.remove('.out_jit')
sys.exit(0)
else:
# run optional benchmark.
if sys.argv[constant_idx_one] == "mandelbrot":
time_dict = []
def _exec_case(command):
print('\nCommand ' + command + ': \n')
start_time = time.time()
subprocess.call(shlex.split(command))
time_dict.append(time.time() - start_time)
_exec_case('./interpreter ./bfs/MANDELBROT.bf')
_exec_case('./interpreter ./bfs/MANDELBROT.bf --jit')
print('\nBenchmark Result: (lower time is better)\n' +
"{:>10.3f}".format(time_dict[constant_idx_zer]) + 's interpreter\n' +
"{:>10.3f}".format(time_dict[constant_idx_one]) + 's jit\n')
| 34.20339 | 105 | 0.652131 |
0fdbd30996d223aa52fcbb7f1ce15f24b7331d5b | 1,917 | py | Python | tests/test_01_main/test_env_vars_3.py | sondrelg/uvicorn-gunicorn-docker | ddd38797d6a9ca820bd8b3134a0398ef3df8877a | [
"MIT"
] | null | null | null | tests/test_01_main/test_env_vars_3.py | sondrelg/uvicorn-gunicorn-docker | ddd38797d6a9ca820bd8b3134a0398ef3df8877a | [
"MIT"
] | null | null | null | tests/test_01_main/test_env_vars_3.py | sondrelg/uvicorn-gunicorn-docker | ddd38797d6a9ca820bd8b3134a0398ef3df8877a | [
"MIT"
] | null | null | null | import os
import time
import docker
import requests
from docker.client import DockerClient
from ..utils import (
CONTAINER_NAME,
get_config,
get_logs,
get_process_names,
get_response_text1,
remove_previous_container,
)
client = docker.from_env()
def verify_container(container: DockerClient, response_text: str) -> None:
config_data = get_config(container)
process_names = get_process_names(container)
config_data = get_config(container)
assert config_data['workers'] == 1
assert len(process_names) == 2 # Manager + worker
assert config_data['host'] == '127.0.0.1'
assert config_data['port'] == '9000'
assert config_data['bind'] == '0.0.0.0:8080'
assert config_data['use_max_workers'] == 1
logs = get_logs(container)
assert 'Checking for script in /app/prestart.sh' in logs
assert 'Running script /app/prestart.sh' in logs
assert 'Running inside /app/prestart.sh, you could add migrations to this file' in logs
response = requests.get('http://127.0.0.1:8000')
assert response.text == response_text
def test_env_bind() -> None:
name = os.getenv('NAME')
image = f'sondrelg/uvicorn-gunicorn:{name}'
response_text = get_response_text1()
sleep_time = int(os.getenv('SLEEP_TIME', 1))
remove_previous_container(client)
container = client.containers.run(
image,
name=CONTAINER_NAME,
environment={
'BIND': '0.0.0.0:8080',
'HOST': '127.0.0.1',
'PORT': '9000',
'MAX_WORKERS': '1',
},
ports={'8080': '8000'},
detach=True,
)
time.sleep(sleep_time)
verify_container(container, response_text)
container.stop()
# Test that everything works after restarting too
container.start()
time.sleep(sleep_time)
verify_container(container, response_text)
container.stop()
container.remove()
| 29.492308 | 91 | 0.665102 |
3d78e464a063dff4c3f266eebf88a4f54eb528a5 | 5,864 | py | Python | model/e2e_ctc.py | hajiejue/FP-Code | d144336cfd8e70b289a673567f727b9c9abbf9f5 | [
"MIT"
] | null | null | null | model/e2e_ctc.py | hajiejue/FP-Code | d144336cfd8e70b289a673567f727b9c9abbf9f5 | [
"MIT"
] | null | null | null | model/e2e_ctc.py | hajiejue/FP-Code | d144336cfd8e70b289a673567f727b9c9abbf9f5 | [
"MIT"
] | null | null | null | import logging
import math
import sys
import numpy as np
import six
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import warpctc_pytorch as warp_ctc
from model.e2e_common import linear_tensor, to_cuda
# ------------- CTC Network --------------------------------------------------------------------------------------------
class CTC(torch.nn.Module):
"""CTC module
:param int odim: dimension of outputs
:param int eprojs: number of encoder projection units
:param float dropout_rate: dropout rate (0.0 ~ 1.0)
"""
def __init__(self, odim, eprojs, dropout_rate):
super(CTC, self).__init__()
self.dropout_rate = dropout_rate
self.loss = None
self.ctc_lo = torch.nn.Linear(eprojs, odim)
self.loss_fn = warp_ctc.CTCLoss(size_average=True)
self.ignore_id = -1
def forward(self, hs_pad, hlens, ys_pad):
'''CTC forward
:param torch.Tensor hs_pad: batch of padded hidden state sequences (B, Tmax, D)
:param torch.Tensor hlens: batch of lengths of hidden state sequences (B)
:param torch.Tensor ys_pad: batch of padded character id sequence tensor (B, Lmax)
:return: ctc loss value
:rtype: torch.Tensor
'''
# TODO(kan-bayashi): need to make more smart way
ys = [y[y != self.ignore_id] for y in ys_pad] # parse padded ys
self.loss = None
hlens = torch.from_numpy(np.fromiter(hlens, dtype=np.int32))
olens = torch.from_numpy(np.fromiter(
(x.size(0) for x in ys), dtype=np.int32))
# zero padding for hs
ys_hat = self.ctc_lo(F.dropout(hs_pad, p=self.dropout_rate))
# zero padding for ys
ys_true = torch.cat(ys).cpu().int() # batch x olen
# get length info
##logging.info(self.__class__.__name__ + ' input lengths: ' + ''.join(str(hlens).split('\n')))
##logging.info(self.__class__.__name__ + ' output lengths: ' + ''.join(str(olens).split('\n')))
# get ctc loss
# expected shape of seqLength x batchSize x alphabet_size
ys_hat = ys_hat.transpose(0, 1)
self.loss = to_cuda(self, self.loss_fn(ys_hat, ys_true, hlens, olens))
##logging.info('ctc loss:' + str(float(self.loss)))
return self.loss
def log_softmax(self, hs_pad):
'''log_softmax of frame activations
:param torch.Tensor hs_pad: 3d tensor (B, Tmax, eprojs)
:return: log softmax applied 3d tensor (B, Tmax, odim)
:rtype: torch.Tensor
'''
return F.log_softmax(self.ctc_lo(hs_pad), dim=2)
class CTCPrefixScore(object):
'''Compute CTC label sequence scores
which is based on Algorithm 2 in WATANABE et al.
"HYBRID CTC/ATTENTION ARCHITECTURE FOR END-TO-END SPEECH RECOGNITION,"
but extended to efficiently compute the probablities of multiple labels
simultaneously
'''
def __init__(self, x, blank, eos, xp):
self.xp = xp
self.logzero = -10000000000.0
self.blank = blank
self.eos = eos
self.input_length = len(x)
self.x = x
def initial_state(self):
'''Obtain an initial CTC state
:return: CTC state
'''
# initial CTC state is made of a frame x 2 tensor that corresponds to
# r_t^n(<sos>) and r_t^b(<sos>), where 0 and 1 of axis=1 represent
# superscripts n and b (non-blank and blank), respectively.
r = self.xp.full((self.input_length, 2), self.logzero, dtype=np.float32)
r[0, 1] = self.x[0, self.blank]
for i in six.moves.range(1, self.input_length):
r[i, 1] = r[i - 1, 1] + self.x[i, self.blank]
return r
def __call__(self, y, cs, r_prev):
'''Compute CTC prefix scores for next labels
:param y : prefix label sequence
:param cs : array of next labels
:param r_prev: previous CTC state
:return ctc_scores, ctc_states
'''
# initialize CTC states
output_length = len(y) - 1 # ignore sos
# new CTC states are prepared as a frame x (n or b) x n_labels tensor
# that corresponds to r_t^n(h) and r_t^b(h).
r = self.xp.ndarray((self.input_length, 2, len(cs)), dtype=np.float32)
cs = cs.cpu().numpy()
xs = self.x[:, cs]
if output_length == 0:
r[0, 0] = xs[0]
r[0, 1] = self.logzero
else:
r[output_length - 1] = self.logzero
# prepare forward probabilities for the last label
r_sum = self.xp.logaddexp(r_prev[:, 0], r_prev[:, 1]) # log(r_t^n(g) + r_t^b(g))
last = y[-1]
if output_length > 0 and last in cs:
log_phi = self.xp.ndarray((self.input_length, len(cs)), dtype=np.float32)
for i in six.moves.range(len(cs)):
log_phi[:, i] = r_sum if cs[i] != last else r_prev[:, 1]
else:
log_phi = r_sum
# compute forward probabilities log(r_t^n(h)), log(r_t^b(h)),
# and log prefix probabilites log(psi)
start = max(output_length, 1)
log_psi = r[start - 1, 0]
for t in six.moves.range(start, self.input_length):
r[t, 0] = self.xp.logaddexp(r[t - 1, 0], log_phi[t - 1]) + xs[t]
r[t, 1] = self.xp.logaddexp(r[t - 1, 0], r[t - 1, 1]) + self.x[t, self.blank]
log_psi = self.xp.logaddexp(log_psi, log_phi[t - 1] + xs[t])
# get P(...eos|X) that ends with the prefix itself
eos_pos = self.xp.where(cs == self.eos)[0]
if len(eos_pos) > 0:
log_psi[eos_pos] = r_sum[-1] # log(r_T^n(g) + r_T^b(g))
# return the log prefix probability and CTC states, where the label axis
# of the CTC states is moved to the first axis to slice it easily
return log_psi, self.xp.rollaxis(r, 2)
| 37.350318 | 120 | 0.590723 |
b1e1ec7856eb200d4d8e65977bd452c0946e7d9b | 10,560 | py | Python | h2o-py/h2o/astfun.py | ahmedengu/h2o-3 | ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11 | [
"Apache-2.0"
] | 2 | 2019-09-02T15:49:45.000Z | 2019-09-02T16:01:58.000Z | h2o-py/h2o/astfun.py | ahmedengu/h2o-3 | ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11 | [
"Apache-2.0"
] | 2 | 2021-06-02T02:24:03.000Z | 2021-11-15T17:51:49.000Z | h2o-py/h2o/astfun.py | ahmedengu/h2o-3 | ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11 | [
"Apache-2.0"
] | 1 | 2019-12-09T03:07:04.000Z | 2019-12-09T03:07:04.000Z | # -*- encoding: utf-8 -*-
"""
Disassembly support.
:copyright: (c) 2016 H2O.ai
:license: Apache License Version 2.0 (see LICENSE for details)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from opcode import * # an undocumented builtin module
import inspect
from h2o.utils.compatibility import *
from .expr import ExprNode, ASTId
from . import h2o
#
# List of supported bytecode instructions.
#
BYTECODE_INSTRS = {
"BINARY_SUBSCR": "cols", # column slice; could be row slice?
"UNARY_POSITIVE": "+",
"UNARY_NEGATIVE": "-",
"UNARY_NOT": "!",
"BINARY_POWER": "**",
"BINARY_MULTIPLY": "*",
"BINARY_FLOOR_DIVIDE": "//",
"BINARY_TRUE_DIVIDE": "/",
"BINARY_DIVIDE": "/",
"BINARY_MODULO": "%",
"BINARY_ADD": "+",
"BINARY_SUBTRACT": "-",
"BINARY_AND": "&",
"BINARY_OR": "|",
"COMPARE_OP": "", # some cmp_op
# Calls a function. argc indicates the number of positional arguments. The positional arguments
# are on the stack, with the right-most argument on top. Below the arguments, the function o
# bject to call is on the stack. Pops all function arguments, and the function
# itself off the stack, and pushes the return value.
"CALL_FUNCTION": "",
# Calls a function. argc indicates the number of arguments (positional and keyword).
# The top element on the stack contains a tuple of keyword argument names.
# Below the tuple, keyword arguments are on the stack, in the order corresponding to the tuple.
# Below the keyword arguments, the positional arguments are on the stack, with the
# right-most parameter on top. Below the arguments, the function object to call is on the stack.
# Pops all function arguments, and the function itself off the stack,
# and pushes the return value.
"CALL_FUNCTION_KW" : "",
}
def is_bytecode_instruction(instr):
return instr in BYTECODE_INSTRS
def is_comp(instr):
return "COMPARE" in instr
def is_binary(instr):
return "BINARY" in instr
def is_unary(instr):
return "UNARY" in instr
def is_func(instr):
return "CALL_FUNCTION" == instr
def is_func_kw(instr):
return "CALL_FUNCTION_KW" == instr
def is_load_fast(instr):
return "LOAD_FAST" == instr
def is_attr(instr):
return "LOAD_ATTR" == instr
def is_load_global(instr):
return "LOAD_GLOBAL" == instr
def is_return(instr):
return "RETURN_VALUE" == instr
try:
# Python 3
from dis import _unpack_opargs
except ImportError:
# Reimplement from Python3 in Python2 syntax
def _unpack_opargs(code):
extended_arg = 0
i = 0
n = len(code)
while i < n:
op = ord(code[i]) if PY2 else code[i]
pos = i
i += 1
if op >= HAVE_ARGUMENT:
if PY2:
arg = ord(code[i]) + ord(code[i+1])*256 + extended_arg
else: # to support Python version (3,3.5)
arg = code[i] + code[i+1]*256 + extended_arg
extended_arg = 0
i += 2
if op == EXTENDED_ARG:
extended_arg = arg*65536
else:
arg = None
yield (pos, op, arg)
def _disassemble_lambda(co):
code = co.co_code
ops = []
for offset, op, arg in _unpack_opargs(code):
args = []
if arg is not None:
if op in hasconst:
args.append(co.co_consts[arg]) # LOAD_CONST
elif op in hasname:
args.append(co.co_names[arg]) # LOAD_CONST
elif op in hasjrel:
raise ValueError("unimpl: op in hasjrel")
elif op in haslocal:
args.append(co.co_varnames[arg]) # LOAD_FAST
elif op in hascompare:
args.append(cmp_op[arg]) # COMPARE_OP
elif is_func(opname[op]) or is_func_kw(opname[op]):
args.append(arg) # oparg == nargs(fcn)
ops.append([opname[op], args])
return ops
def lambda_to_expr(fun):
code = fun.__code__
lambda_dis = _disassemble_lambda(code)
return _lambda_bytecode_to_ast(code, lambda_dis)
def _lambda_bytecode_to_ast(co, ops):
# have a stack of ops, read from R->L to get correct oops
s = len(ops) - 1
keys = [o[0] for o in ops]
result = [ASTId("{")] + [ASTId(arg) for arg in co.co_varnames] + [ASTId(".")]
instr = keys[s]
if is_return(instr):
s -= 1
instr = keys[s]
if is_bytecode_instruction(instr) or is_load_fast(instr) or is_load_global(instr):
body, s = _opcode_read_arg(s, ops, keys)
else:
raise ValueError("unimpl bytecode instr: " + instr)
if s > 0:
print("Dumping disassembled code: ")
for i in range(len(ops)):
if i == s:
print(i, " --> " + str(ops[i]))
else:
print(i, str(ops[i]).rjust(5))
raise ValueError("Unexpected bytecode disassembly @ " + str(s))
result += [body] + [ASTId("}")]
return result
def _opcode_read_arg(start_index, ops, keys):
instr = keys[start_index]
return_idx = start_index - 1
if is_bytecode_instruction(instr):
if is_binary(instr):
return _binop_bc(BYTECODE_INSTRS[instr], return_idx, ops, keys)
elif is_comp(instr):
return _binop_bc(ops[start_index][1][0], return_idx, ops, keys)
elif is_unary(instr):
return _unop_bc(BYTECODE_INSTRS[instr], return_idx, ops, keys)
elif is_func(instr):
return _call_func_bc(ops[start_index][1][0], return_idx, ops, keys)
elif is_func_kw(instr):
return _call_func_kw_bc(ops[start_index][1][0], return_idx, ops, keys)
else:
raise ValueError("unimpl bytecode op: " + instr)
elif is_load_fast(instr):
return [_load_fast(ops[start_index][1][0]), return_idx]
elif is_load_global(instr):
return [_load_global(ops[start_index][1][0]), return_idx]
return [ops[start_index][1][0], return_idx]
def _binop_bc(op, idx, ops, keys):
rite, idx = _opcode_read_arg(idx, ops, keys)
left, idx = _opcode_read_arg(idx, ops, keys)
return [ExprNode(op, left, rite), idx]
def _unop_bc(op, idx, ops, keys):
arg, idx = _opcode_read_arg(idx, ops, keys)
return [ExprNode(op, arg), idx]
def _call_func_bc(nargs, idx, ops, keys):
"""
Implements transformation of CALL_FUNCTION bc inst to Rapids expression.
The implementation follows definition of behavior defined in
https://docs.python.org/3/library/dis.html
:param nargs: number of arguments including keyword and positional arguments
:param idx: index of current instruction on the stack
:param ops: stack of instructions
:param keys: names of instructions
:return: ExprNode representing method call
"""
named_args = {}
unnamed_args = []
args = []
# Extract arguments based on calling convention for CALL_FUNCTION_KW
while nargs > 0:
if nargs >= 256: # named args ( foo(50,True,x=10) ) read first ( right -> left )
arg, idx = _opcode_read_arg(idx, ops, keys)
named_args[ops[idx][1][0]] = arg
idx -= 1 # skip the LOAD_CONST for the named args
nargs -= 256 # drop 256
else:
arg, idx = _opcode_read_arg(idx, ops, keys)
unnamed_args.insert(0, arg)
nargs -= 1
# LOAD_ATTR <method_name>: Map call arguments to a call of method on H2OFrame class
op = ops[idx][1][0]
args = _get_h2o_frame_method_args(op, named_args, unnamed_args) if is_attr(ops[idx][0]) else []
# Map function name to proper rapids name
op = _get_func_name(op, args)
# Go to next instruction
idx -= 1
if is_bytecode_instruction(ops[idx][0]):
arg, idx = _opcode_read_arg(idx, ops, keys)
args.insert(0, arg)
elif is_load_fast(ops[idx][0]):
args.insert(0, _load_fast(ops[idx][1][0]))
idx -= 1
return [ExprNode(op, *args), idx]
def _call_func_kw_bc(nargs, idx, ops, keys):
named_args = {}
unnamed_args = []
# Implemente calling convetion defined by CALL_FUNCTION_KW
# Read tuple of keyword arguments
keyword_args = ops[idx][1][0]
# Skip the LOAD_CONST tuple
idx -= 1
# Load keyword arguments from stack
for keyword_arg in keyword_args:
arg, idx = _opcode_read_arg(idx, ops, keys)
named_args[keyword_arg] = arg
nargs -= 1
# Load positional arguments from stack
while nargs > 0:
arg, idx = _opcode_read_arg(idx, ops, keys)
unnamed_args.insert(0, arg)
nargs -= 1
# LOAD_ATTR <method_name>: Map call arguments to a call of method on H2OFrame class
op = ops[idx][1][0]
args = _get_h2o_frame_method_args(op, named_args, unnamed_args) if is_attr(ops[idx][0]) else []
# Map function name to proper rapids name
op = _get_func_name(op, args)
# Go to next instruction
idx -= 1
if is_bytecode_instruction(ops[idx][0]):
arg, idx = _opcode_read_arg(idx, ops, keys)
args.insert(0, arg)
elif is_load_fast(ops[idx][0]):
args.insert(0, _load_fast(ops[idx][1][0]))
idx -= 1
return [ExprNode(op, *args), idx]
def _get_h2o_frame_method_args(op, named_args, unnamed_args):
fr_cls = h2o.H2OFrame
if not hasattr(fr_cls, op):
raise ValueError("Unimplemented: op <%s> not bound in H2OFrame" % op)
if PY2:
argspec = inspect.getargspec(getattr(fr_cls, op))
argnames = argspec.args[1:]
argdefs = list(argspec.defaults or [])
else:
argnames = []
argdefs = []
for name, param in inspect.signature(getattr(fr_cls, op)).parameters.items():
if name == "self": continue
if param.kind == inspect._VAR_KEYWORD: continue
argnames.append(name)
argdefs.append(param.default)
args = unnamed_args + argdefs[len(unnamed_args):]
for a in named_args: args[argnames.index(a)] = named_args[a]
return args
def _get_func_name(op, args):
if op == "ceil": op = "ceiling"
if op == "sum" and len(args) > 0 and args[0]: op = "sumNA"
if op == "min" and len(args) > 0 and args[0]: op = "minNA"
if op == "max" and len(args) > 0 and args[0]: op = "maxNA"
if op == "nacnt": op = "naCnt"
return op
def _load_fast(x):
return ASTId(x)
def _load_global(x):
if x == 'True':
return True
elif x == 'False':
return False
return x
| 33.846154 | 100 | 0.619223 |
e822a7bea72bed49e55f3e323ba3838496cd5039 | 3,940 | py | Python | tests/test_device.py | giltom/megastone | 0ef02c724d53acf7a06430f58bc5f777676d78a9 | [
"MIT"
] | 2 | 2021-04-03T01:55:09.000Z | 2021-04-12T15:12:10.000Z | tests/test_device.py | giltom/megastone | 0ef02c724d53acf7a06430f58bc5f777676d78a9 | [
"MIT"
] | null | null | null | tests/test_device.py | giltom/megastone | 0ef02c724d53acf7a06430f58bc5f777676d78a9 | [
"MIT"
] | null | null | null | from megastone.device import DeviceFaultError
import pytest
import megastone as ms
DEV_ADDR = 0x2400
DEV_SIZE = 0x100
REGDEV_ADDR = 0x2800
CODE_ADDR = 0x1000
SEG_SIZE = 0x1000
READ_CHAR = b'A'
WRITE_CHAR = b'B'
class MyDevice(ms.Device):
def __init__(self, name: str, address: int, size: int):
super().__init__(name, address, size)
self.writes = []
def read(self, offset: int, size: int) -> bytes:
return READ_CHAR * size
def write(self, offset: int, data: bytes):
self.writes.append((offset, data))
class MyRegDevice(ms.RegisterDevice):
offsets = {
0x0: 'reg0',
0x4: 'reg1',
0x8: 'reg2'
}
def __init__(self, name: str, address: int):
super().__init__(name, address, 0x20)
self.reg0 = []
self.reg1 = []
self.reg2 = []
def read_reg0(self):
return 0xAA
def write_reg0(self, value):
self.reg0.append(value)
def read_reg1(self):
return 0x12345678
def write_reg1(self, value):
self.reg1.append(value)
def init_code(emu, assembly):
return emu.mem.write_code(CODE_ADDR, assembly)
@pytest.fixture
def dev():
return MyDevice('MyDevice', DEV_ADDR, DEV_SIZE)
@pytest.fixture
def regdev():
return MyRegDevice('MyRegDevice', REGDEV_ADDR)
@pytest.fixture
def emu(dev, regdev):
emu = ms.Emulator(ms.ARCH_ARM)
emu.mem.map(CODE_ADDR, SEG_SIZE, 'code')
emu.jump(CODE_ADDR)
dev.attach(emu)
regdev.attach(emu)
return emu
def test_read(emu):
init_code(emu, f'LDR R0, =0x{DEV_ADDR:X}; LDRH R0, [R0]')
emu.run(2)
assert emu.regs.r0 == 0x4141
def test_write(dev, emu):
init_code(emu, f"""
LDR R0, =0x{DEV_ADDR:X}
LDR R1, =0x42424242
STR R1, [R0]
STRH R1, [R0, #10]
STRB R1, [R0, #4]
""")
emu.run(5)
assert dev.writes == [(0, WRITE_CHAR*4), (10, WRITE_CHAR*2), (4, WRITE_CHAR)]
def test_detach(dev, emu):
dev.detach()
init_code(emu, f'LDR R0, =0x{DEV_ADDR:X}; LDRH R0, [R0]')
emu.run(2)
assert emu.regs.r0 == 0
def test_reg_read(emu):
init_code(emu, f'LDR R0, =0x{REGDEV_ADDR:X}; LDR R1, [R0]; LDRH R2, [R0, #4]')
emu.run(3)
assert emu.regs.r1 == 0xAA
assert emu.regs.r2 == 0x5678
def test_reg_write(regdev, emu):
init_code(emu, f"""
LDR R0, =0x{REGDEV_ADDR:X}
LDR R1, =0xAABBCCDD
STR R1, [R0, #0]
STRH R1, [R0, #4]
STRB R1, [R0, #4]
""")
emu.run(5)
assert regdev.reg0 == [0xAABBCCDD]
assert regdev.reg1 == [0xCCDD, 0xDD]
def test_reg_name_read(regdev):
assert regdev.reg_read('reg1') == 0x12345678
def test_reg_name_write(regdev):
regdev.reg_write('reg0', 0x11)
assert regdev.reg0 == [0x11]
def test_no_func(emu, regdev):
init_code(emu, f"""
LDR R0, =0x{REGDEV_ADDR:X}
LDR R1, =0xBABAFEFE
STR R1, [R0, #8]
LDR R2, [R0, #8]
""")
emu.run(4)
assert emu.regs.r2 == 0xBABAFEFE
assert regdev.reg0 == []
assert regdev.reg1 == []
def test_bad_offset(emu, regdev):
init_code(emu, f"""
LDR R0, =0x{REGDEV_ADDR:X}
STR R1, [R0, #2]
""")
with pytest.raises(DeviceFaultError) as info:
emu.run(2)
assert info.value.address == CODE_ADDR + 4
assert info.value.access == ms.Access(ms.AccessType.W, REGDEV_ADDR+2, 4, bytes(4))
assert info.value.device == regdev
assert repr(info.value.access) in repr(info.value)
def test_double_attach(emu, dev):
with pytest.raises(RuntimeError):
dev.attach(emu)
def test_double_detach(emu, dev):
dev.detach()
with pytest.raises(RuntimeError):
dev.detach()
def test_existing_map(emu):
addr = CODE_ADDR+0x200
dev = MyDevice('mydev2', addr, 0x100)
dev.attach(emu)
init_code(emu, f'LDR R0, =0x{addr:X}; STR R0, [R0]')
emu.run(2)
assert dev.writes == [(0, addr.to_bytes(4, 'little'))] | 23.592814 | 86 | 0.61066 |
a9c4fccf1bdbc6c7ff5b627fc9362b875bbe90e9 | 875 | py | Python | tests/rules/test_missing_space_before_subcommand.py | eoinjordan/thefeck | e04f50409ba3069ec6a9f7c0aab39ca835a41b68 | [
"MIT"
] | null | null | null | tests/rules/test_missing_space_before_subcommand.py | eoinjordan/thefeck | e04f50409ba3069ec6a9f7c0aab39ca835a41b68 | [
"MIT"
] | null | null | null | tests/rules/test_missing_space_before_subcommand.py | eoinjordan/thefeck | e04f50409ba3069ec6a9f7c0aab39ca835a41b68 | [
"MIT"
] | null | null | null | import pytest
from thefeck.rules.missing_space_before_subcommand import (
match, get_new_command)
from thefeck.types import Command
@pytest.fixture(autouse=True)
def all_executables(mocker):
return mocker.patch(
'thefeck.rules.missing_space_before_subcommand.get_all_executables',
return_value=['git', 'ls', 'npm'])
@pytest.mark.parametrize('script', [
'gitbranch', 'ls-la', 'npminstall'])
def test_match(script):
assert match(Command(script, ''))
@pytest.mark.parametrize('script', ['git branch', 'vimfile'])
def test_not_match(script):
assert not match(Command(script, ''))
@pytest.mark.parametrize('script, result', [
('gitbranch', 'git branch'),
('ls-la', 'ls -la'),
('npminstall webpack', 'npm install webpack')])
def test_get_new_command(script, result):
assert get_new_command(Command(script, '')) == result
| 28.225806 | 76 | 0.702857 |
e413b60e64612be9fc90fa51f8d81f01e0ec45f8 | 8,516 | py | Python | src/m4_functions_vs_methods.py | lipkower/02-ObjectsFunctionsAndMethods | 426fcc59ee4337314621721e2f6a1f8fe094448c | [
"MIT"
] | null | null | null | src/m4_functions_vs_methods.py | lipkower/02-ObjectsFunctionsAndMethods | 426fcc59ee4337314621721e2f6a1f8fe094448c | [
"MIT"
] | null | null | null | src/m4_functions_vs_methods.py | lipkower/02-ObjectsFunctionsAndMethods | 426fcc59ee4337314621721e2f6a1f8fe094448c | [
"MIT"
] | null | null | null | """
Demonstrates using (calling) FUNCTIONS and using (calling) METHODS:
-- what is similar, and
-- how they differ.
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Aaron Wilkin, their colleagues, and Emma Lipkowski.
""" # done: 1. PUT YOUR NAME IN THE ABOVE LINE.
###############################################################################
# done: 2.
# With your instructor, READ the file methods_vs_functions.txt
# in this project, ASKING QUESTIONS as needed to understand its contents.
# After you have done so, mark this _TODO_ as DONE
# and continue to the next _TODO_.
#
###############################################################################
import rosegraphics as rg
def main():
"""
Makes a TurtleWindow, calls the other functions in this module
to test and/or demonstrate them, and waits for the user to click
anywhere in the window to close it.
"""
window = rg.TurtleWindow()
run_example()
try_methods()
try_functions()
try_methods_and_functions()
window.close_on_mouse_click()
def run_example():
""" An example of CALLING functions defined below. """
jump_and_move_turtle(100, 50, 200, -100)
turtle = rg.SimpleTurtle('square')
turtle.speed = 30
draw_many_squares(turtle, 3, 75, 15)
def jump_and_move_turtle(x1, y1, x2, y2):
"""
Constructs a thick, slow, magenta SimpleTurtle.
Jumps that SimpleTurtle (without drawing) to (x1, y1),
then moves that Turtle (while drawing) to (x2, y2).
"""
# -------------------------------------------------------------------------
# Students:
# Do NOT touch this function - it has no TO-DO in it.
# Do NOT copy code from this function.
#
# Instead, ** CALL ** this function as needed in the other problems.
# -------------------------------------------------------------------------
jumper = rg.SimpleTurtle()
jumper.pen = rg.Pen('magenta', 20)
jumper.speed = 3
jumper.pen_up()
jumper.go_to(rg.Point(x1, y1))
jumper.pen_down()
jumper.go_to(rg.Point(x2, y2))
def draw_many_squares(my_turtle, number_of_squares, size, twist):
"""
Makes the given SimpleTurtle object draw:
-- many squares (how many? answer: NUMBER_OF_SQUARES)
where each square:
-- has the same size (what size? answer: SIZE)
and each square is:
-- "twisted" a bit from the previous one (how much? TWIST degrees)
NOTE: The 3 lines below that begin with :type are called
"type hints". They make the "dot" trick work more effectively.
We will include them in function specifications routinely.
:type my_turtle: rg.SimpleTurtle
:type number_of_squares: int
:type size: int
:type twist: int
"""
# -------------------------------------------------------------------------
# Students:
# Do NOT touch this function - it has no TO-DO in it.
# Do NOT copy code from this function.
#
# Instead, ** CALL ** this function as needed in the other problems.
# -------------------------------------------------------------------------
for _ in range(number_of_squares):
my_turtle.draw_square(size)
my_turtle.left(twist)
###############################################################################
# done: 3.
# There are four FUNCTIONS defined ABOVE this:
# main
# run_example
# jump_and_move_turtle
# draw_many_squares
#
# 1. RUN this program.
# 2. TRACE the code, identifying what code causes what to be drawn.
# 3. READ the specifications for the following functions:
# jump_and_move_turtle
# draw_many_squares
#
# IMPORTANT: Keep those two functions (and what they do) in mind,
# since you will use them in the:
# try_functions
# try_methods_and_functions
# exercises below.
#
# Once you believe that you completely understand what the functions:
# jump_and_move_turtle
# draw_many_squares
# do, mark this _TODO_ as DONE and continue to the next _TODO_.
#
###############################################################################
def try_methods():
"""
Constructs a SimpleTurtle and sets its pen to a new rg.Pen
that is 'brown' with thickness 5.
Then makes the SimpleTurtle move as follows (in the order listed):
-- forward 150 units
-- left 90 degrees
-- forward 50 units
-- backward 100 units
"""
###########################################################################
# done: 4. Implement and test this function, per its doc-string above.
# The testing code (in main) is already written for you.
###########################################################################
william = rg.SimpleTurtle()
william.pen = rg.Pen('brown', 5)
william.forward(150)
william.left(90)
william.forward(50)
william.backward(100)
###############################################################################
# IMPORTANT: Read the NOTE below before you try to implement the next function!
###############################################################################
def try_functions():
"""
Causes several SimpleTurtles to do the following:
-- One jumps to (200, 100), then moves (while drawing) to (300, 30)
-- One jumps to (100, 200), then moves (while drawing) to (0, 0)
-- One jumps to (-50, 50), then moves (while drawing) to (100, 100)
"""
###########################################################################
# done: 5. Implement and test this function, per its doc-string above.
# The testing code (in main) is already written for you.
#
# NOTE: This function requires
# ** exactly 3 lines **
# If you think it needs more, ** ASK FOR HELP. **
# HINT: see jump_and_move_turtle above.
###########################################################################
jump_and_move_turtle(200, 100, 300, 30)
jump_and_move_turtle(100, 200, 0, 0)
jump_and_move_turtle(-50, 50, 100, 100)
###############################################################################
# IMPORTANT: Read the NOTE below before you try to implement the next function!
###############################################################################
def try_methods_and_functions():
"""
Constructs a SimpleTurtle and sets its pen to a new rg.Pen
that is 'blue' with thickness 5.
Then makes the SimpleTurtle do the following (in the order listed):
1. Go backward 150 units.
2. Change its speed to 1 (slowest).
Draw 2 squares whose size (width and height) are 100,
each "twisted" from the previous by 30 degrees.
3. Change its speed to 5 (faster).
Change its Pen's color to 'red'.
Draw 10 squares whose size (width and height) are 50,
each "twisted" from the previous by 15 degrees.
4. Change its speed to 100 (about the fastest possible).
Change its Pen's thickness to 35.
Draw 8 squares whose size (width and height) are 300,
each "twisted" from the previous by 60 degrees.
5. Change its Pen to be a NEW Pen whose color is 'black'
and whose thickness is 3.
6. Go backward 200 units.
7. Draw a CIRCLE whose radius is 30.
8. Draw a SQUARE whose sides are each of length 50.
"""
###########################################################################
# done: 6. Implement and test this function, per its doc-string above.
# The testing code (in main) is already written for you.
#
# NOTE: This function should ** CALL ** the
# draw_many_squares
# function defined above. If you don't see why, ** ASK FOR HELP. **
###########################################################################
billy = rg.SimpleTurtle()
billy.pen = rg.Pen('blue', 5)
billy.backward(150)
billy.speed = 1
draw_many_squares(billy, 2, 100, 30)
billy.speed = 5
billy.pen = rg.Pen('red', 5)
draw_many_squares(billy, 5, 50, 15)
billy.speed = 100
billy.pen = rg.Pen('red', 35)
draw_many_squares(billy, 8, 300, 60)
billy.pen = rg.Pen('black', 3)
billy.backward(200)
billy.draw_circle(30)
billy.draw_square(50)
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main()
| 34.33871 | 79 | 0.533701 |
90f078fa32a53e7510ef63d009dcb374b0201ab2 | 981 | py | Python | tests/client.py | julianss/flr | c91b3e94fa96b81d54d0d53ae685b8e9b458ec03 | [
"MIT"
] | 2 | 2020-09-09T23:28:25.000Z | 2021-01-22T18:20:56.000Z | tests/client.py | julianss/flr | c91b3e94fa96b81d54d0d53ae685b8e9b458ec03 | [
"MIT"
] | null | null | null | tests/client.py | julianss/flr | c91b3e94fa96b81d54d0d53ae685b8e9b458ec03 | [
"MIT"
] | 2 | 2020-10-08T20:02:38.000Z | 2020-10-27T01:48:43.000Z | import requests
class FlareClient():
def __init__(self, host="http://localhost:6800"):
self.host = host
self.headers = {}
def login(self, login, password, email=False):
resp = requests.post(self.host + "/auth", json={
('login' if not email else 'email'): login,
'password': password,
}).json()
if "result" in resp:
print("Login succesful")
self.headers = {'Authorization': 'Bearer ' + resp["result"]}
else:
print(resp)
def call(self, model, method, *args, **kwargs):
resp = requests.post(self.host + "/call", headers=self.headers, json={
'model': model,
'method': method,
'args': args,
'kwargs': kwargs
})
resp = resp.json()
if "result" in resp:
return resp["result"]
else:
raise Exception(resp["error"]["message"])
| 31.645161 | 79 | 0.50051 |
e0b7721aa9d2ef8a2ab5aa01697884bc10d162f9 | 6,196 | py | Python | gbf_raid_bot/calibration/__init__.py | Frizz925/gbf-raid-bot | 2e063500c8c561d8d82851f73824b4c210989309 | [
"MIT"
] | 2 | 2018-03-15T19:22:13.000Z | 2020-04-07T15:12:47.000Z | gbf_raid_bot/calibration/__init__.py | Frizz925/gbf-raid-bot | 2e063500c8c561d8d82851f73824b4c210989309 | [
"MIT"
] | null | null | null | gbf_raid_bot/calibration/__init__.py | Frizz925/gbf-raid-bot | 2e063500c8c561d8d82851f73824b4c210989309 | [
"MIT"
] | null | null | null | from gbf_raid_bot.config import ConfigReader, ConfigGenerator
from gbf_raid_bot.utilities import Timer
from gbf_raid_bot import OFFSETS
from abc import ABC, abstractmethod
from mss import mss as MSS
from PIL import Image
from threading import Thread, Lock
from pymouse import PyMouseEvent
import time
import numpy as np
import pyautogui
import cv2
address_bar_offset = OFFSETS["game"][1]
class ImageCrop:
def __init__(self):
self.original = None
self.image = None
self.crop = False
self.point = []
self.threads = [
# Thread(target=self.previewLoop),
Thread(target=self.imageLoop)
]
self.lock = Lock()
cv2.namedWindow("Image")
cv2.setMouseCallback("Image", self.mouseListener)
def setImage(self, image):
self.original = image
self.image = image
def mouseListener(self, event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN and not self.crop:
self.point = [(x, y)]
self.image = self.original.copy()
self.crop = True
elif event == cv2.EVENT_MOUSEMOVE and self.crop:
self.image = self.original.copy()
cv2.rectangle(
self.image, self.point[0], (x, y),
(0, 255, 0),
thickness=1
)
elif event == cv2.EVENT_LBUTTONUP and self.crop:
self.point.append((x, y))
self.image = self.original.copy()
self.crop = False
cv2.rectangle(
self.image, self.point[0], self.point[1],
(255, 0, 255),
thickness=1
)
def previewLoop(self):
while self.running:
if len(self.point) < 2: continue
left, top = self.point[0]
right, bottom = self.point[1]
width = right - left
height = bottom - top
if width <= 0: continue
if height <= 0: continue
cv2.imshow("Preview", self.original[top:bottom, left:right])
cv2.waitKey(1)
def imageLoop(self):
cv2.moveWindow("Image", 0, 0)
while self.running:
cv2.imshow("Image", self.image)
key = cv2.waitKey(int(round(1000 / 30))) & 0xFF
if key == 10: # ENTER key
self.accepted = True
break
elif key == 27: # ESCAPE key
break
self.running = False
def start(self):
self.running = True
self.accepted = False
for thread in self.threads:
thread.start()
def stop(self):
for thread in self.threads:
thread.join()
cv2.destroyAllWindows()
class Calibration(ABC):
@abstractmethod
def name(self):
pass
def calibrate(self):
pass
def regionFromMouse(self, region=None):
print("Press ENTER to accept")
print("Press ESCAPE to cancel")
time.sleep(0.5)
img = self.screenshot(region)
ic = ImageCrop()
ic.setImage(img)
ic.start()
ic.stop()
if not ic.accepted:
return (None, None)
left, top = ic.point[0]
right, bottom = ic.point[1]
width = right - left
height = bottom - top
return ({
"left": left, "top": top,
"width": width, "height": height
}, img[top:bottom, left:right])
def screenshot(self, region=None):
with MSS() as sct:
if region is None:
region = sct.enum_display_monitors()[0]
sct.get_pixels(region)
img = Image.frombytes("RGB", (sct.width, sct.height), sct.image)
img = np.array(img)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
def showScreenshot(self, img):
def task():
cv2.imshow("Result", img)
cv2.waitKey(0)
Thread(target=task).start()
respond = input("Result (Y/N): ").lower() == "y"
cv2.destroyAllWindows()
return respond
def saveTrainingImage(self, name, img):
filename = "training_sets/images/" + name + ".png"
cv2.imwrite(filename, img)
return filename
class ImageTraining(Calibration):
def name(self):
return "Image Training"
def takeImage(self):
window_region = ConfigReader("advanced_settings").get("image_processing.window.region")
region, img = self.regionFromMouse(window_region)
return img
def calibrate(self):
name = input("Name: ")
filename = input("Filename (leave empty if same as name): ")
if filename == "":
filename = name
img = self.takeImage()
if img is None:
input("Cancelled.")
return
filename = self.saveTrainingImage(filename, img)
config = ConfigReader("training_data")
data = config.get(name)
if isinstance(data, list):
data.append(filename)
data = list(set(data))
elif data is None:
data = filename
else:
if data == filename:
data = filename
else:
data = list(set([data, filename]))
config.set(name, data).save()
class WindowRegion(Calibration):
def name(self):
return "Window Region"
def takeRegion(self):
region, img = self.regionFromMouse()
return region
def calibrate(self):
region = self.takeRegion()
if region is None:
input("Cancelled.")
return
region["top"] -= address_bar_offset
region["height"] += address_bar_offset
(ConfigReader("advanced_settings")
.set("image_processing.window.region", region)
.save())
class ListenInterrupt(Exception):
pass
class BattlePattern(Calibration, PyMouseEvent):
def __init__(self):
Calibration.__init__(self)
self.settings = ConfigReader("advanced_settings")
self.timer = Timer()
self.region = None
def name(self):
return "Battle Pattern"
def record(self, x, y):
x -= self.region["left"]
y -= self.region["top"]
if x > self.region["width"] or y > self.region["height"]:
return
print("Move %d: %d, %d" % (len(self.positions), x, y))
elapsed = int(round(self.timer.elapsed(1000)))
self.positions.append((x, y, elapsed))
def calibrate(self):
pattern_name = input("Name: ")
self.positions = []
self.settings.load()
self.timer.reset()
self.region = self.settings.get("image_processing.window.region")
listener = MouseListener(self.click)
try:
input("Press ENTER to start recording")
listener.run()
except ListenInterrupt:
pass
print("Recorded %d move(s)" % len(self.positions))
print("Saving....")
ConfigGenerator(pattern_name, self.positions, "patterns").save(indent=None)
input("Saved.")
def click(self, x, y, button, press):
if button == 1 and press:
self.record(x, y)
elif button == 3 and press:
raise ListenInterrupt
class MouseListener(PyMouseEvent):
def __init__(self, callback):
PyMouseEvent.__init__(self)
self.callback = callback
def click(self, x, y, button, press):
self.callback(x, y, button, press)
| 23.119403 | 89 | 0.679148 |
adce71649c86c176b86ad4283af079e1ebb692c1 | 489 | py | Python | Primer/users/signals.py | RealBrandonChen/code_snippets | e7b83eeffc5c076a6585f2614fc6afed996d8381 | [
"MIT"
] | null | null | null | Primer/users/signals.py | RealBrandonChen/code_snippets | e7b83eeffc5c076a6585f2614fc6afed996d8381 | [
"MIT"
] | null | null | null | Primer/users/signals.py | RealBrandonChen/code_snippets | e7b83eeffc5c076a6585f2614fc6afed996d8381 | [
"MIT"
] | null | null | null | # Help add user's profile automatically everytime the users register
from django.db.models.signals import post_save
from django.contrib.auth.models import User
from django.dispatch import receiver
from .models import Profile
@receiver(post_save, sender=User)
def created_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_profile(sender, instance, **kwargs):
instance.profile.save() | 32.6 | 68 | 0.783231 |
03789845fcc45787256d6ac068a4aa3904415954 | 236 | py | Python | authentication/admin.py | maryamTha/zam20 | 136014bd8c2359244f9fce5adb15781914408500 | [
"MIT"
] | null | null | null | authentication/admin.py | maryamTha/zam20 | 136014bd8c2359244f9fce5adb15781914408500 | [
"MIT"
] | null | null | null | authentication/admin.py | maryamTha/zam20 | 136014bd8c2359244f9fce5adb15781914408500 | [
"MIT"
] | null | null | null | from django.contrib import admin
# Register your models here.
from .models import User
class UserAdmin(admin.ModelAdmin):
list_display = ['username', 'email', 'auth_provider', 'created_at']
admin.site.register(User, UserAdmin)
| 19.666667 | 71 | 0.75 |
135cef57a660b9a30ba797ab906ff87ac8e0ef33 | 895 | py | Python | setup.py | iamcryptoki/crawlyn | aa6e311c9668f580a0d0d451b8ea7a52ffe59a37 | [
"MIT"
] | 2 | 2021-03-29T20:51:08.000Z | 2022-03-01T01:44:12.000Z | setup.py | iamcryptoki/crawlyn | aa6e311c9668f580a0d0d451b8ea7a52ffe59a37 | [
"MIT"
] | null | null | null | setup.py | iamcryptoki/crawlyn | aa6e311c9668f580a0d0d451b8ea7a52ffe59a37 | [
"MIT"
] | 4 | 2019-05-24T07:19:28.000Z | 2021-12-28T14:33:33.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import crawlyn
from setuptools import setup
setup(
name='crawlyn',
version=crawlyn.__version__,
description="Experimental crawler to grab data from websites.",
long_description=open('README.txt').read(),
author='Galou Gentil',
author_email='hello@cryptoki.fr',
url='https://github.com/iamcryptoki/crawlyn',
license='MIT',
keywords='crawler, email, links, security, website',
packages=['crawlyn'],
package_data={
'crawlyn' : [
'bin/phantomjs/linux/32/phantomjs',
'bin/phantomjs/linux/64/phantomjs',
'bin/phantomjs/macos/phantomjs',
'bin/phantomjs/windows/phantomjs.exe'
]
},
install_requires=['docopt', 'lxml', 'selenium'],
entry_points={
'console_scripts': [
'crawlyn=crawlyn.cli:main',
]
}
)
| 26.323529 | 67 | 0.612291 |
5a6b327337c625e8626a5f29636e057d6ccbad7a | 3,674 | py | Python | src/tests/sys/netinet6/frag6/frag6_01.py | lastweek/source-freebsd | 0821950b0c40cbc891a27964b342e0202a3859ec | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | src/tests/sys/netinet6/frag6/frag6_01.py | lastweek/source-freebsd | 0821950b0c40cbc891a27964b342e0202a3859ec | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | src/tests/sys/netinet6/frag6/frag6_01.py | lastweek/source-freebsd | 0821950b0c40cbc891a27964b342e0202a3859ec | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | #!/usr/bin/env python
#-
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2019 Netflix, Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $FreeBSD$
#
import argparse
import scapy.all as sp
import socket
import sys
from sniffer import Sniffer
from time import sleep
def check_icmp6_error(args, packet):
ip6 = packet.getlayer(sp.IPv6)
if not ip6:
return False
oip6 = sp.IPv6(src=args.src[0], dst=args.to[0])
if ip6.dst != oip6.src:
return False
icmp6 = packet.getlayer(sp.ICMPv6ParamProblem)
if not icmp6:
return False
# ICMP6_PARAMPROB_HEADER 0
if icmp6.code != 0:
return False
# Should we check the payload as well?
# We are running in a very isolated environment and nothing else
# should trigger an ICMPv6 Param Prob so leave it.
#icmp6.display()
return True
def main():
parser = argparse.ArgumentParser("frag6.py",
description="IPv6 fragementation test tool")
parser.add_argument('--sendif', nargs=1,
required=True,
help='The interface through which the packet will be sent')
parser.add_argument('--recvif', nargs=1,
required=True,
help='The interface on which to check for the packet')
parser.add_argument('--src', nargs=1,
required=True,
help='The source IP address')
parser.add_argument('--to', nargs=1,
required=True,
help='The destination IP address')
parser.add_argument('--debug',
required=False, action='store_true',
help='Enable test debugging')
args = parser.parse_args()
# Start sniffing on recvif
sniffer = Sniffer(args, check_icmp6_error)
########################################################################
#
# A single start fragment with zero length IPv6 header (jumbo).
# Make sure we do hit the Fragment case, which is tricky as the
# jumbogram needs to be > 64k.
#
# A: Jumbo-Fragment not allowed.
# R: ICMPv6 param problem.
#
#data = "6" * (65536 - 2 - 6 - 8 - 8)
data = "6" * 65512
ip6f01 = sp.Ether() / \
sp.IPv6(src=args.src[0], dst=args.to[0], plen=0) / \
sp.IPv6ExtHdrHopByHop(options=sp.Jumbo(jumboplen=65536)) / \
sp.IPv6ExtHdrFragment(offset=0, m=1, id=6) / \
sp.UDP(dport=3456, sport=6543) / \
data
if args.debug :
ip6f01.display()
sp.sendp(ip6f01, iface=args.sendif[0], verbose=False)
# We should only need to sleep 0.10 but it seems scapy
# takes time for this one.
sleep(75)
sniffer.setEnd()
sniffer.join()
if not sniffer.foundCorrectPacket:
sys.exit(1)
sys.exit(0)
if __name__ == '__main__':
main()
| 31.672414 | 76 | 0.716658 |
17ff364f523163c0065ba2b10c6d3a40a475cb86 | 14,388 | py | Python | zun/tests/unit/db/test_compute_host.py | magician03/zun | 4db10041fa4db0dd81f2e110b113172db3dc8f80 | [
"Apache-2.0"
] | 3 | 2018-09-07T02:31:05.000Z | 2018-10-17T10:30:47.000Z | zun/tests/unit/db/test_compute_host.py | magician03/zun | 4db10041fa4db0dd81f2e110b113172db3dc8f80 | [
"Apache-2.0"
] | null | null | null | zun/tests/unit/db/test_compute_host.py | magician03/zun | 4db10041fa4db0dd81f2e110b113172db3dc8f80 | [
"Apache-2.0"
] | 1 | 2018-09-07T02:26:23.000Z | 2018-09-07T02:26:23.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for manipulating compute nodes via the DB API"""
import json
import mock
from oslo_config import cfg
from oslo_utils import uuidutils
import six
import etcd
from etcd import Client as etcd_client
from zun.common import exception
import zun.conf
from zun.db import api as dbapi
from zun.db.etcd import api as etcdapi
from zun.tests.unit.db import base
from zun.tests.unit.db import utils
from zun.tests.unit.db.utils import FakeEtcdMultipleResult
from zun.tests.unit.db.utils import FakeEtcdResult
CONF = zun.conf.CONF
class DbComputeNodeTestCase(base.DbTestCase):
def setUp(self):
super(DbComputeNodeTestCase, self).setUp()
def test_create_compute_node(self):
utils.create_test_compute_node(context=self.context)
def test_create_compute_node_already_exists(self):
utils.create_test_compute_node(
context=self.context, uuid='123')
with self.assertRaisesRegex(exception.ComputeNodeAlreadyExists,
'A compute node with UUID 123.*'):
utils.create_test_compute_node(
context=self.context, uuid='123')
def test_get_compute_node_by_uuid(self):
node = utils.create_test_compute_node(context=self.context)
res = dbapi.get_compute_node(
self.context, node.uuid)
self.assertEqual(node.uuid, res.uuid)
self.assertEqual(node.hostname, res.hostname)
def test_get_compute_node_by_hostname(self):
node = utils.create_test_compute_node(context=self.context)
res = dbapi.get_compute_node_by_hostname(
self.context, node.hostname)
self.assertEqual(node.uuid, res.uuid)
self.assertEqual(node.hostname, res.hostname)
def test_get_compute_node_that_does_not_exist(self):
self.assertRaises(exception.ComputeNodeNotFound,
dbapi.get_compute_node,
self.context,
uuidutils.generate_uuid())
def test_list_compute_nodes(self):
uuids = []
for i in range(1, 6):
node = utils.create_test_compute_node(
uuid=uuidutils.generate_uuid(),
context=self.context,
hostname='node' + str(i))
uuids.append(six.text_type(node['uuid']))
res = dbapi.list_compute_nodes(self.context)
res_uuids = [r.uuid for r in res]
self.assertEqual(sorted(uuids), sorted(res_uuids))
def test_list_compute_nodes_sorted(self):
uuids = []
for i in range(5):
node = utils.create_test_compute_node(
uuid=uuidutils.generate_uuid(),
context=self.context,
hostname='node' + str(i))
uuids.append(six.text_type(node.uuid))
res = dbapi.list_compute_nodes(self.context, sort_key='uuid')
res_uuids = [r.uuid for r in res]
self.assertEqual(sorted(uuids), res_uuids)
self.assertRaises(exception.InvalidParameterValue,
dbapi.list_compute_nodes,
self.context,
sort_key='foo')
def test_list_compute_nodes_with_filters(self):
node1 = utils.create_test_compute_node(
hostname='node-one',
uuid=uuidutils.generate_uuid(),
context=self.context)
node2 = utils.create_test_compute_node(
hostname='node-two',
uuid=uuidutils.generate_uuid(),
context=self.context)
res = dbapi.list_compute_nodes(
self.context, filters={'hostname': 'node-one'})
self.assertEqual([node1.uuid], [r.uuid for r in res])
res = dbapi.list_compute_nodes(
self.context, filters={'hostname': 'node-two'})
self.assertEqual([node2.uuid], [r.uuid for r in res])
res = dbapi.list_compute_nodes(
self.context, filters={'hostname': 'bad-node'})
self.assertEqual([], [r.uuid for r in res])
res = dbapi.list_compute_nodes(
self.context,
filters={'hostname': node1.hostname})
self.assertEqual([node1.uuid], [r.uuid for r in res])
def test_destroy_compute_node(self):
node = utils.create_test_compute_node(context=self.context)
dbapi.destroy_compute_node(self.context, node.uuid)
self.assertRaises(exception.ComputeNodeNotFound,
dbapi.get_compute_node,
self.context, node.uuid)
def test_destroy_compute_node_by_uuid(self):
node = utils.create_test_compute_node(context=self.context)
dbapi.destroy_compute_node(self.context, node.uuid)
self.assertRaises(exception.ComputeNodeNotFound,
dbapi.get_compute_node,
self.context, node.uuid)
def test_destroy_compute_node_that_does_not_exist(self):
self.assertRaises(exception.ComputeNodeNotFound,
dbapi.destroy_compute_node, self.context,
uuidutils.generate_uuid())
def test_update_compute_node(self):
node = utils.create_test_compute_node(context=self.context)
old_hostname = node.hostname
new_hostname = 'new-hostname'
self.assertNotEqual(old_hostname, new_hostname)
res = dbapi.update_compute_node(
self.context, node.uuid, {'hostname': new_hostname})
self.assertEqual(new_hostname, res.hostname)
def test_update_compute_node_not_found(self):
node_uuid = uuidutils.generate_uuid()
new_hostname = 'new-hostname'
self.assertRaises(exception.ComputeNodeNotFound,
dbapi.update_compute_node, self.context,
node_uuid, {'hostname': new_hostname})
def test_update_compute_node_uuid(self):
node = utils.create_test_compute_node(context=self.context)
self.assertRaises(exception.InvalidParameterValue,
dbapi.update_compute_node, self.context,
node.uuid, {'uuid': ''})
class EtcdDbComputeNodeTestCase(base.DbTestCase):
def setUp(self):
cfg.CONF.set_override('backend', 'etcd', 'database')
super(EtcdDbComputeNodeTestCase, self).setUp()
@mock.patch.object(etcd_client, 'read')
@mock.patch.object(etcd_client, 'write')
def test_create_compute_node(self, mock_write, mock_read):
mock_read.side_effect = etcd.EtcdKeyNotFound
utils.create_test_compute_node(context=self.context)
@mock.patch.object(etcd_client, 'read')
@mock.patch.object(etcd_client, 'write')
def test_create_compute_node_already_exists(self, mock_write,
mock_read):
mock_read.side_effect = etcd.EtcdKeyNotFound
utils.create_test_compute_node(context=self.context, hostname='123')
mock_read.side_effect = lambda *args: None
self.assertRaises(exception.ResourceExists,
utils.create_test_compute_node,
context=self.context, hostname='123')
@mock.patch.object(etcd_client, 'read')
@mock.patch.object(etcd_client, 'write')
@mock.patch.object(dbapi, "_get_dbdriver_instance")
def test_get_compute_node_by_uuid(self, mock_db_inst,
mock_write, mock_read):
mock_db_inst.return_value = etcdapi.get_backend()
mock_read.side_effect = etcd.EtcdKeyNotFound
compute_node = utils.create_test_compute_node(
context=self.context)
mock_read.side_effect = lambda *args: FakeEtcdResult(
compute_node.as_dict())
res = dbapi.get_compute_node(self.context, compute_node.uuid)
self.assertEqual(compute_node.uuid, res.uuid)
self.assertEqual(compute_node.hostname, res.hostname)
@mock.patch.object(etcd_client, 'read')
@mock.patch.object(etcd_client, 'write')
@mock.patch.object(dbapi, "_get_dbdriver_instance")
def test_get_compute_node_by_name(self, mock_db_inst,
mock_write, mock_read):
mock_db_inst.return_value = etcdapi.get_backend()
mock_read.side_effect = etcd.EtcdKeyNotFound
node = utils.create_test_compute_node(context=self.context)
mock_read.side_effect = lambda *args: FakeEtcdResult(
node.as_dict())
res = dbapi.get_compute_node(self.context, node.hostname)
self.assertEqual(node.uuid, res.uuid)
@mock.patch.object(etcd_client, 'read')
def test_get_compute_node_that_does_not_exist(self, mock_read):
mock_read.side_effect = etcd.EtcdKeyNotFound
self.assertRaises(exception.ComputeNodeNotFound,
dbapi.get_compute_node,
self.context, 'fake-ident')
@mock.patch.object(etcd_client, 'read')
@mock.patch.object(etcd_client, 'write')
@mock.patch.object(dbapi, "_get_dbdriver_instance")
def test_list_compute_nodes(self, mock_db_inst, mock_write, mock_read):
hostnames = []
compute_nodes = []
mock_db_inst.return_value = etcdapi.get_backend()
mock_read.side_effect = etcd.EtcdKeyNotFound
for i in range(1, 6):
res_class = utils.create_test_compute_node(
context=self.context, hostname='class' + str(i))
compute_nodes.append(res_class.as_dict())
hostnames.append(six.text_type(res_class['hostname']))
mock_read.side_effect = lambda *args: FakeEtcdMultipleResult(
compute_nodes)
res = dbapi.list_compute_nodes(self.context)
res_names = [r.hostname for r in res]
self.assertEqual(sorted(hostnames), sorted(res_names))
@mock.patch.object(etcd_client, 'read')
@mock.patch.object(etcd_client, 'write')
@mock.patch.object(dbapi, "_get_dbdriver_instance")
def test_list_compute_nodes_sorted(self, mock_db_inst,
mock_write, mock_read):
hostnames = []
compute_nodes = []
mock_db_inst.return_value = etcdapi.get_backend()
mock_read.side_effect = etcd.EtcdKeyNotFound
for i in range(1, 6):
res_class = utils.create_test_compute_node(
context=self.context, hostname='class' + str(i))
compute_nodes.append(res_class.as_dict())
hostnames.append(six.text_type(res_class['hostname']))
mock_read.side_effect = lambda *args: FakeEtcdMultipleResult(
compute_nodes)
res = dbapi.list_compute_nodes(self.context, sort_key='hostname')
res_names = [r.hostname for r in res]
self.assertEqual(sorted(hostnames), res_names)
@mock.patch.object(etcd_client, 'read')
@mock.patch.object(etcd_client, 'write')
@mock.patch.object(etcd_client, 'delete')
@mock.patch.object(dbapi, "_get_dbdriver_instance")
def test_destroy_compute_node(self, mock_db_inst, mock_delete,
mock_write, mock_read):
mock_db_inst.return_value = etcdapi.get_backend()
mock_read.side_effect = etcd.EtcdKeyNotFound
compute_node = utils.create_test_compute_node(
context=self.context)
mock_read.side_effect = lambda *args: FakeEtcdResult(
compute_node.as_dict())
dbapi.destroy_compute_node(self.context, compute_node.uuid)
mock_delete.assert_called_once_with(
'/compute_nodes/%s' % compute_node.uuid)
@mock.patch.object(etcd_client, 'read')
def test_destroy_compute_node_that_does_not_exist(self, mock_read):
mock_read.side_effect = etcd.EtcdKeyNotFound
self.assertRaises(exception.ComputeNodeNotFound,
dbapi.destroy_compute_node,
self.context,
'ca3e2a25-2901-438d-8157-de7ffd68d535')
@mock.patch.object(etcd_client, 'read')
@mock.patch.object(etcd_client, 'write')
@mock.patch.object(etcd_client, 'update')
@mock.patch.object(dbapi, "_get_dbdriver_instance")
def test_update_compute_node(self, mock_db_inst, mock_update,
mock_write, mock_read):
mock_db_inst.return_value = etcdapi.get_backend()
mock_read.side_effect = etcd.EtcdKeyNotFound
compute_node = utils.create_test_compute_node(
context=self.context)
old_name = compute_node.hostname
new_name = 'new-name'
self.assertNotEqual(old_name, new_name)
mock_read.side_effect = lambda *args: FakeEtcdResult(
compute_node.as_dict())
dbapi.update_compute_node(
self.context, compute_node.uuid, {'hostname': new_name})
self.assertEqual(new_name, json.loads(
mock_update.call_args_list[0][0][0].value)['hostname'])
@mock.patch.object(etcd_client, 'read')
def test_update_compute_node_not_found(self, mock_read):
mock_read.side_effect = etcd.EtcdKeyNotFound
new_name = 'new-name'
self.assertRaises(exception.ComputeNodeNotFound,
dbapi.update_compute_node,
self.context,
'ca3e2a25-2901-438d-8157-de7ffd68d535',
{'hostname': new_name})
@mock.patch.object(etcd_client, 'read')
@mock.patch.object(etcd_client, 'write')
def test_update_compute_node_uuid(self, mock_write, mock_read):
mock_read.side_effect = etcd.EtcdKeyNotFound
compute_node = utils.create_test_compute_node(
context=self.context)
self.assertRaises(exception.InvalidParameterValue,
dbapi.update_compute_node,
self.context, compute_node.uuid,
{'uuid': ''})
| 43.6 | 78 | 0.650125 |
c19fad3f87233200d63f4dfc25583d01b8517268 | 86,164 | py | Python | caffevis/app.py | HoLuLuLu/deep-visualization-toolbox | 1c7469b389b2bbaa3edde3d2b73fcdd35d157216 | [
"MIT"
] | null | null | null | caffevis/app.py | HoLuLuLu/deep-visualization-toolbox | 1c7469b389b2bbaa3edde3d2b73fcdd35d157216 | [
"MIT"
] | null | null | null | caffevis/app.py | HoLuLuLu/deep-visualization-toolbox | 1c7469b389b2bbaa3edde3d2b73fcdd35d157216 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8
# add parent folder to search path, to enable import of core modules like settings
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
import cv2
import numpy as np
import StringIO
from find_maxes.find_max_acts import load_max_tracker_from_file
import find_maxes.max_tracker
sys.modules['max_tracker'] = find_maxes.max_tracker
from misc import WithTimer, mkdir_p
from numpy_cache import FIFOLimitedArrayCache
from app_base import BaseApp
from image_misc import norm01, norm01c, tile_images_normalize, ensure_float01, tile_images_make_tiles, \
ensure_uint255_and_resize_to_fit, resize_without_fit, ensure_uint255, \
caffe_load_image, ensure_uint255_and_resize_without_fit, array_histogram, fig2data
from image_misc import FormattedString, cv2_typeset_text, to_255
from caffe_proc_thread import CaffeProcThread
from caffevis_app_state import CaffeVisAppState, SiameseViewMode, PatternMode, BackpropMode, BackpropViewOption, \
ColorMapOption, InputOverlayOption
from caffevis_helper import get_pretty_layer_name, read_label_file, load_sprite_image, load_square_sprite_image, \
set_mean, get_image_from_files
from caffe_misc import layer_name_to_top_name, save_caffe_image
from siamese_helper import SiameseHelper
from settings_misc import load_network, get_receptive_field
class CaffeVisApp(BaseApp):
'''App to visualize using caffe.'''
def __init__(self, settings, key_bindings):
super(CaffeVisApp, self).__init__(settings, key_bindings)
print 'Got settings', settings
self.settings = settings
self.bindings = key_bindings
self.net, self._data_mean = load_network(settings)
# set network batch size to 1
current_input_shape = self.net.blobs[self.net.inputs[0]].shape
current_input_shape[0] = 1
self.net.blobs[self.net.inputs[0]].reshape(*current_input_shape)
self.net.reshape()
self._net_channel_swap = settings._calculated_channel_swap
if self._net_channel_swap is None:
self._net_channel_swap_inv = None
else:
self._net_channel_swap_inv = tuple([self._net_channel_swap.index(ii) for ii in range(len(self._net_channel_swap))])
self.labels = None
if self.settings.caffevis_labels:
self.labels = read_label_file(self.settings.caffevis_labels)
self.proc_thread = None
self.jpgvis_thread = None
self.handled_frames = 0
if settings.caffevis_jpg_cache_size < 10*1024**2:
raise Exception('caffevis_jpg_cache_size must be at least 10MB for normal operation.')
self.img_cache = FIFOLimitedArrayCache(settings.caffevis_jpg_cache_size)
self.header_boxes = []
self.buttons_boxes = []
def start(self, live_vis):
from jpg_vis_loading_thread import JPGVisLoadingThread
self.live_vis = live_vis
self.state = CaffeVisAppState(self.net, self.settings, self.bindings, live_vis)
self.state.drawing_stale = True
self.header_print_names = [get_pretty_layer_name(self.settings, nn) for nn in self.state.get_headers()]
if self.proc_thread is None or not self.proc_thread.is_alive():
# Start thread if it's not already running
self.proc_thread = CaffeProcThread(self.settings, self.net, self.state,
self.settings.caffevis_frame_wait_sleep,
self.settings.caffevis_pause_after_keys,
self.settings.caffevis_heartbeat_required,
self.settings.caffevis_mode_gpu)
self.proc_thread.start()
if self.jpgvis_thread is None or not self.jpgvis_thread.is_alive():
# Start thread if it's not already running
self.jpgvis_thread = JPGVisLoadingThread(self.settings, self.state, self.img_cache,
self.settings.caffevis_jpg_load_sleep,
self.settings.caffevis_heartbeat_required)
self.jpgvis_thread.start()
def get_heartbeats(self):
return [self.proc_thread.heartbeat, self.jpgvis_thread.heartbeat]
def quit(self):
print 'CaffeVisApp: trying to quit'
with self.state.lock:
self.state.quit = True
if self.proc_thread != None:
for ii in range(3):
self.proc_thread.join(1)
if not self.proc_thread.is_alive():
break
if self.proc_thread.is_alive():
raise Exception('CaffeVisApp: Could not join proc_thread; giving up.')
self.proc_thread = None
print 'CaffeVisApp: quitting.'
def _can_skip_all(self, panes):
return ('caffevis_layers' not in panes.keys())
def handle_input(self, input_image, input_label, input_filename, panes):
if self.debug_level > 1:
print 'handle_input: frame number', self.handled_frames, 'is', 'None' if input_image is None else 'Available'
self.handled_frames += 1
if self._can_skip_all(panes):
return
with self.state.lock:
if self.debug_level > 1:
print 'CaffeVisApp.handle_input: pushed frame'
self.state.next_frame = input_image
self.state.next_label = input_label
self.state.next_filename = input_filename
if self.debug_level > 1:
print 'CaffeVisApp.handle_input: caffe_net_state is:', self.state.caffe_net_state
self.state.last_frame = input_image
def redraw_needed(self):
return self.state.redraw_needed()
def draw(self, panes):
if self._can_skip_all(panes):
if self.debug_level > 1:
print 'CaffeVisApp.draw: skipping'
return False
with self.state.lock:
# Hold lock throughout drawing
do_draw = self.state.drawing_stale and self.state.caffe_net_state == 'free'
# print 'CaffeProcThread.draw: caffe_net_state is:', self.state.caffe_net_state
if do_draw:
self.state.caffe_net_state = 'draw'
if do_draw:
if self.debug_level > 1:
print 'CaffeVisApp.draw: drawing'
if 'caffevis_control' in panes:
self._draw_control_pane(panes['caffevis_control'])
if 'caffevis_status' in panes:
self._draw_status_pane(panes['caffevis_status'])
if 'caffevis_buttons' in panes:
self._draw_buttons_pane(panes['caffevis_buttons'])
layer_data_3D_highres = None
if 'caffevis_layers' in panes:
layer_data_3D_highres = self._draw_layer_pane(panes['caffevis_layers'])
if 'caffevis_aux' in panes:
self._draw_aux_pane(panes['caffevis_aux'], layer_data_3D_highres)
if 'caffevis_back' in panes:
# Draw back pane as normal
self._draw_back_pane(panes['caffevis_back'])
if self.state.layers_pane_zoom_mode == 2:
# ALSO draw back pane into layers pane
self._draw_back_pane(panes['caffevis_layers'])
if 'caffevis_jpgvis' in panes:
self._draw_jpgvis_pane(panes['caffevis_jpgvis'])
with self.state.lock:
self.state.drawing_stale = False
self.state.caffe_net_state = 'free'
return do_draw
def _draw_prob_labels_pane(self, pane):
'''Adds text label annotation atop the given pane.'''
if not self.labels or not self.state.show_label_predictions or not self.settings.caffevis_prob_layer:
return
#pane.data[:] = to_255(self.settings.window_background)
defaults = {'face': getattr(cv2, self.settings.caffevis_class_face),
'fsize': self.settings.caffevis_class_fsize,
'clr': to_255(self.settings.caffevis_class_clr_0),
'thick': self.settings.caffevis_class_thick}
loc = self.settings.caffevis_class_loc[::-1] # Reverse to OpenCV c,r order
clr_0 = to_255(self.settings.caffevis_class_clr_0)
clr_1 = to_255(self.settings.caffevis_class_clr_1)
probs_flat = self.net.blobs[layer_name_to_top_name(self.net, self.settings.caffevis_prob_layer)].data.flatten()
top_5 = probs_flat.argsort()[-1:-6:-1]
strings = []
pmax = probs_flat[top_5[0]]
for idx in top_5:
prob = probs_flat[idx]
text = '%.2f %s' % (prob, self.labels[idx])
fs = FormattedString(text, defaults)
#fs.clr = tuple([clr_1[ii]*prob/pmax + clr_0[ii]*(1-prob/pmax) for ii in range(3)])
fs.clr = tuple([max(0,min(255,clr_1[ii]*prob + clr_0[ii]*(1-prob))) for ii in range(3)])
strings.append([fs]) # Line contains just fs
cv2_typeset_text(pane.data, strings, loc,
line_spacing = self.settings.caffevis_class_line_spacing)
def _draw_control_pane(self, pane):
pane.data[:] = to_255(self.settings.window_background)
with self.state.lock:
layer_idx = self.state.layer_idx
loc = self.settings.caffevis_control_loc[::-1] # Reverse to OpenCV c,r order
strings = []
defaults = {'face': getattr(cv2, self.settings.caffevis_control_face),
'fsize': self.settings.caffevis_control_fsize,
'clr': to_255(self.settings.caffevis_control_clr),
'thick': self.settings.caffevis_control_thick}
for ii in range(len(self.header_print_names)):
fs = FormattedString(self.header_print_names[ii], defaults)
this_layer_def = self.settings.layers_list[ii]
if self.state.backprop_selection_frozen and this_layer_def == self.state.get_current_backprop_layer_definition():
fs.clr = to_255(self.settings.caffevis_control_clr_bp)
fs.thick = self.settings.caffevis_control_thick_bp
if this_layer_def == self.state.get_current_layer_definition():
if self.state.cursor_area == 'top':
fs.clr = to_255(self.settings.caffevis_control_clr_cursor)
fs.thick = self.settings.caffevis_control_thick_cursor
else:
if not (self.state.backprop_selection_frozen and this_layer_def == self.state.get_current_backprop_layer_definition()):
fs.clr = to_255(self.settings.caffevis_control_clr_selected)
fs.thick = self.settings.caffevis_control_thick_selected
strings.append(fs)
locy, self.header_boxes = cv2_typeset_text(pane.data, strings, loc,
line_spacing = self.settings.caffevis_control_line_spacing,
wrap = True)
if hasattr(self.settings, 'control_pane_height'):
self.settings._calculated_control_pane_height = self.settings.control_pane_height
else:
self.settings._calculated_control_pane_height = locy - loc[1] + 4
def _draw_status_pane(self, pane):
pane.data[:] = to_255(self.settings.window_background)
defaults = {'face': getattr(cv2, self.settings.caffevis_status_face),
'fsize': self.settings.caffevis_status_fsize,
'clr': to_255(self.settings.caffevis_status_clr),
'thick': self.settings.caffevis_status_thick}
loc = self.settings.caffevis_status_loc[::-1] # Reverse to OpenCV c,r order
status = StringIO.StringIO()
status2 = StringIO.StringIO()
fps = self.proc_thread.approx_fps()
with self.state.lock:
pattern_first_mode = "first" if self.state.pattern_first_only else "all"
if self.state.pattern_mode == PatternMode.MAXIMAL_OPTIMIZED_IMAGE:
print >> status, 'pattern(' + pattern_first_mode + ' optimized max)'
elif self.state.pattern_mode == PatternMode.MAXIMAL_INPUT_IMAGE:
print >> status, 'pattern(' + pattern_first_mode + ' input max)'
elif self.state.pattern_mode == PatternMode.WEIGHTS_HISTOGRAM:
print >> status, 'histogram(weights)'
elif self.state.pattern_mode == PatternMode.MAX_ACTIVATIONS_HISTOGRAM:
print >> status, 'histogram(maximal activations)'
elif self.state.pattern_mode == PatternMode.ACTIVATIONS_CORRELATION:
print >> status, 'correlation(maximal activations)'
elif self.state.pattern_mode == PatternMode.WEIGHTS_CORRELATION:
print >> status, 'correlation(weights)'
elif self.state.layers_show_back:
print >> status, 'back'
else:
print >> status, 'fwd'
default_layer_name = self.state.get_default_layer_name()
print >>status, '%s:%d |' % (default_layer_name, self.state.selected_unit),
if not self.state.back_enabled:
print >>status, 'Back: off',
else:
print >>status, 'Back: %s (%s)' % (BackpropMode.to_string(self.state.back_mode), BackpropViewOption.to_string(self.state.back_view_option)),
print >>status, '(from %s_%d)' % (self.state.get_default_layer_name(self.state.get_current_backprop_layer_definition()), self.state.backprop_unit),
print >>status, '|',
print >>status, 'Boost: %g/%g' % (self.state.layer_boost_indiv, self.state.layer_boost_gamma)
if fps > 0:
print >>status, '| FPS: %.01f' % fps
if self.state.next_label:
print >> status, '| GT Label: %s' % self.state.next_label
if self.state.extra_msg:
print >>status, '|', self.state.extra_msg
print >> status2, 'Layer size: %s' % (self.state.get_layer_output_size_string())
print >> status2, '| Receptive field:', '%s' % (str(get_receptive_field(self.settings, self.net, default_layer_name)))
print >> status2, '| Input: %s' % (str(self.state.next_filename))
strings_line1 = [FormattedString(line, defaults) for line in status.getvalue().split('\n')]
strings_line2 = [FormattedString(line, defaults) for line in status2.getvalue().split('\n')]
locy, boxes = cv2_typeset_text(pane.data, strings_line1, (loc[0], loc[1] + 5),
line_spacing = self.settings.caffevis_status_line_spacing)
locy, boxes = cv2_typeset_text(pane.data, strings_line2, (loc[0], locy),
line_spacing=self.settings.caffevis_status_line_spacing)
def _draw_buttons_pane(self, pane):
pane.data[:] = to_255(self.settings.window_background)
header_defaults = {'face': getattr(cv2, self.settings.caffevis_buttons_header_face),
'fsize': self.settings.caffevis_buttons_header_fsize,
'clr': to_255(self.settings.caffevis_buttons_header_clr),
'thick': self.settings.caffevis_buttons_header_thick}
normal_defaults = {'face': getattr(cv2, self.settings.caffevis_buttons_normal_face),
'fsize': self.settings.caffevis_buttons_normal_fsize,
'clr': to_255(self.settings.caffevis_buttons_normal_clr),
'thick': self.settings.caffevis_buttons_normal_thick}
selected_defaults = {'face': getattr(cv2, self.settings.caffevis_buttons_selected_face),
'fsize': self.settings.caffevis_buttons_selected_fsize,
'clr': to_255(self.settings.caffevis_buttons_selected_clr),
'thick': self.settings.caffevis_buttons_selected_thick}
loc = self.settings.caffevis_buttons_loc[::-1] # Reverse to OpenCV c,r order
text = StringIO.StringIO()
fps = self.proc_thread.approx_fps()
lines = list()
with self.state.lock:
lines.append([FormattedString('Input', header_defaults)])
file_defaults = selected_defaults if self.live_vis.input_updater.static_file_mode else normal_defaults
camera_defaults = selected_defaults if not self.live_vis.input_updater.static_file_mode else normal_defaults
lines.append([FormattedString('File', file_defaults), FormattedString('Prev', normal_defaults), FormattedString('Next', normal_defaults)])
lines.append([FormattedString('Camera', camera_defaults)])
lines.append([FormattedString('', normal_defaults)])
activations_defaults = selected_defaults if self.state.pattern_mode == PatternMode.OFF and not self.state.layers_show_back else normal_defaults
gradients_defaults = selected_defaults if self.state.pattern_mode == PatternMode.OFF and self.state.layers_show_back else normal_defaults
max_optimized_defaults = selected_defaults if self.state.pattern_mode == PatternMode.MAXIMAL_OPTIMIZED_IMAGE else normal_defaults
max_input_defaults = selected_defaults if self.state.pattern_mode == PatternMode.MAXIMAL_INPUT_IMAGE else normal_defaults
weights_hist_defaults = selected_defaults if self.state.pattern_mode == PatternMode.WEIGHTS_HISTOGRAM else normal_defaults
act_hist_defaults = selected_defaults if self.state.pattern_mode == PatternMode.MAX_ACTIVATIONS_HISTOGRAM else normal_defaults
weights_corr_defaults = selected_defaults if self.state.pattern_mode == PatternMode.WEIGHTS_CORRELATION else normal_defaults
act_corr_defaults = selected_defaults if self.state.pattern_mode == PatternMode.ACTIVATIONS_CORRELATION else normal_defaults
lines.append([FormattedString('Modes', header_defaults)])
lines.append([FormattedString('Activations', activations_defaults)])
lines.append([FormattedString('Gradients', gradients_defaults)])
lines.append([FormattedString('Maximal Optimized', max_optimized_defaults)])
lines.append([FormattedString('Maximal Input', max_input_defaults)])
lines.append([FormattedString('Weights Histogram', weights_hist_defaults)])
lines.append([FormattedString('Activations Histogram', act_hist_defaults)])
lines.append([FormattedString('Weights Correlation', weights_corr_defaults)])
lines.append([FormattedString('Activations Correlation', act_corr_defaults)])
lines.append([FormattedString('', normal_defaults)])
no_overlay_defaults = selected_defaults if self.state.input_overlay_option == InputOverlayOption.OFF else normal_defaults
over_active_defaults = selected_defaults if self.state.input_overlay_option == InputOverlayOption.OVER_ACTIVE else normal_defaults
over_inactive_defaults = selected_defaults if self.state.input_overlay_option == InputOverlayOption.OVER_INACTIVE else normal_defaults
lines.append([FormattedString('Input Overlay', header_defaults)])
lines.append([FormattedString('No Overlay', no_overlay_defaults)])
lines.append([FormattedString('Over Active', over_active_defaults)])
lines.append([FormattedString('Over Inactive', over_inactive_defaults)])
lines.append([FormattedString('', normal_defaults)])
backprop_no_defaults = selected_defaults if self.state.back_mode == BackpropMode.OFF else normal_defaults
backprop_gradients_defaults = selected_defaults if self.state.back_mode == BackpropMode.GRAD else normal_defaults
backprop_zf_defaults = selected_defaults if self.state.back_mode == BackpropMode.DECONV_ZF else normal_defaults
backprop_gb_defaults = selected_defaults if self.state.back_mode == BackpropMode.DECONV_GB else normal_defaults
backprop_frozen_defaults = selected_defaults if self.state.backprop_selection_frozen else normal_defaults
lines.append([FormattedString('Backprop Modes', header_defaults)])
lines.append([FormattedString('No Backprop', backprop_no_defaults)])
lines.append([FormattedString('Gradient', backprop_gradients_defaults)])
lines.append([FormattedString('ZF Deconv', backprop_zf_defaults)])
lines.append([FormattedString('Guided Backprop', backprop_gb_defaults)])
lines.append([FormattedString('Freeze Origin', backprop_frozen_defaults)])
lines.append([FormattedString('', normal_defaults)])
backview_raw_defaults = selected_defaults if self.state.back_view_option == BackpropViewOption.RAW else normal_defaults
backview_gray_defaults = selected_defaults if self.state.back_view_option == BackpropViewOption.GRAY else normal_defaults
backview_norm_defaults = selected_defaults if self.state.back_view_option == BackpropViewOption.NORM else normal_defaults
backview_normblur_defaults = selected_defaults if self.state.back_view_option == BackpropViewOption.NORM_BLUR else normal_defaults
backview_possum_defaults = selected_defaults if self.state.back_view_option == BackpropViewOption.POS_SUM else normal_defaults
backview_hist_defaults = selected_defaults if self.state.back_view_option == BackpropViewOption.HISTOGRAM else normal_defaults
lines.append([FormattedString('Backprop Views', header_defaults)])
lines.append([FormattedString('Raw', backview_raw_defaults)])
lines.append([FormattedString('Gray', backview_gray_defaults)])
lines.append([FormattedString('Norm', backview_norm_defaults)])
lines.append([FormattedString('Blurred Norm', backview_normblur_defaults)])
lines.append([FormattedString('Sum > 0', backview_possum_defaults)])
lines.append([FormattedString('Gradient Histogram', backview_hist_defaults)])
lines.append([FormattedString('', normal_defaults)])
lines.append([FormattedString('Help', normal_defaults)])
lines.append([FormattedString('Quit', normal_defaults)])
# strings_line1 = [[FormattedString(line, defaults)] for line in text.getvalue().split('\n')]
locy, self.buttons_boxes = cv2_typeset_text(pane.data, lines, (loc[0], loc[1] + 5),
line_spacing=self.settings.caffevis_buttons_line_spacing)
return
def prepare_tile_image(self, display_3D, highlight_selected, n_tiles, tile_rows, tile_cols):
if self.state.layers_show_back and self.state.pattern_mode == PatternMode.OFF:
padval = self.settings.caffevis_layer_clr_back_background
else:
padval = self.settings.window_background
highlights = [None] * n_tiles
if highlight_selected:
with self.state.lock:
if self.state.cursor_area == 'bottom':
highlights[self.state.selected_unit] = self.settings.caffevis_layer_clr_cursor # in [0,1] range
if self.state.backprop_selection_frozen and self.state.get_current_layer_definition() == self.state.get_current_backprop_layer_definition():
highlights[self.state.backprop_unit] = self.settings.caffevis_layer_clr_back_sel # in [0,1] range
_, display_2D = tile_images_make_tiles(display_3D, hw=(tile_rows, tile_cols), padval=padval, highlights=highlights)
return display_2D
def _draw_layer_pane(self, pane):
'''Returns the data shown in highres format, b01c order.'''
default_layer_name = self.state.get_default_layer_name()
if self.state.siamese_view_mode_has_two_images():
if self.state.layers_show_back:
layer_dat_3D_0, layer_dat_3D_1 = self.state.get_siamese_selected_diff_blobs(self.net)
else:
layer_dat_3D_0, layer_dat_3D_1 = self.state.get_siamese_selected_data_blobs(self.net)
# Promote FC layers with shape (n) to have shape (n,1,1)
if len(layer_dat_3D_0.shape) == 1:
layer_dat_3D_0 = layer_dat_3D_0[:, np.newaxis, np.newaxis]
layer_dat_3D_1 = layer_dat_3D_1[:, np.newaxis, np.newaxis]
# we don't resize the images to half the size since there is no point in doing that in FC layers
elif layer_dat_3D_0.shape[2] == 1:
# we don't resize the images to half the size since it will crash
pass
else:
# resize images to half the size
half_pane_shape = (layer_dat_3D_0.shape[1], layer_dat_3D_0.shape[2] / 2)
layer_dat_3D_0 = resize_without_fit(layer_dat_3D_0.transpose((1, 2, 0)), half_pane_shape).transpose((2, 0, 1))
layer_dat_3D_1 = resize_without_fit(layer_dat_3D_1.transpose((1, 2, 0)), half_pane_shape).transpose((2, 0, 1))
# concatenate images side-by-side
layer_dat_3D = np.concatenate((layer_dat_3D_0, layer_dat_3D_1), axis=2)
else:
if self.state.layers_show_back:
layer_dat_3D = self.state.get_single_selected_diff_blob(self.net)
else:
layer_dat_3D = self.state.get_single_selected_data_blob(self.net)
# Promote FC layers with shape (n) to have shape (n,1,1)
if len(layer_dat_3D.shape) == 1:
layer_dat_3D = layer_dat_3D[:, np.newaxis, np.newaxis]
n_tiles = layer_dat_3D.shape[0]
top_name = layer_name_to_top_name(self.net, default_layer_name)
tile_rows, tile_cols = self.state.net_blob_info[top_name]['tiles_rc']
display_2D = None
display_3D_highres = None
is_layer_summary_loaded = False
if self.state.pattern_mode != PatternMode.OFF:
# Show desired patterns loaded from disk
if self.state.pattern_mode == PatternMode.MAXIMAL_OPTIMIZED_IMAGE:
if self.settings.caffevis_outputs_dir_folder_format == 'original_combined_single_image':
display_2D, display_3D, display_3D_highres, is_layer_summary_loaded = self.load_pattern_images_original_format(
default_layer_name, layer_dat_3D, n_tiles, pane, tile_cols, tile_rows)
elif self.settings.caffevis_outputs_dir_folder_format == 'max_tracker_output':
display_2D, display_3D, display_3D_highres, is_layer_summary_loaded = self.load_pattern_images_optimizer_format(
default_layer_name, layer_dat_3D, n_tiles, pane, tile_cols, tile_rows,
self.state.pattern_first_only, file_search_pattern='opt*.jpg')
elif self.state.pattern_mode == PatternMode.MAXIMAL_INPUT_IMAGE:
if self.settings.caffevis_outputs_dir_folder_format == 'original_combined_single_image':
# maximal input image patterns is not implemented in original format
display_3D_highres = np.zeros((layer_dat_3D.shape[0], pane.data.shape[0],
pane.data.shape[1],
pane.data.shape[2]), dtype=np.uint8)
display_3D = self.downsample_display_3d(display_3D_highres, layer_dat_3D, pane, tile_cols, tile_rows)
print "ERROR: patterns view with maximal input images is not implemented when settings.caffevis_outputs_dir_folder_format == 'original_combined_single_image'"
elif self.settings.caffevis_outputs_dir_folder_format == 'max_tracker_output':
display_2D, display_3D, display_3D_highres, is_layer_summary_loaded = self.load_pattern_images_optimizer_format(
default_layer_name, layer_dat_3D, n_tiles, pane, tile_cols, tile_rows,
self.state.pattern_first_only, file_search_pattern='maxim*.png')
elif self.state.pattern_mode == PatternMode.WEIGHTS_HISTOGRAM:
display_2D, display_3D, display_3D_highres, is_layer_summary_loaded = self.load_weights_histograms(
self.net, default_layer_name, layer_dat_3D, n_tiles, pane, tile_cols, tile_rows,
show_layer_summary=self.state.cursor_area == 'top')
elif self.state.pattern_mode == PatternMode.MAX_ACTIVATIONS_HISTOGRAM:
if self.settings.caffevis_histograms_format == 'load_from_file':
display_2D, display_3D, display_3D_highres, is_layer_summary_loaded = self.load_pattern_images_optimizer_format(
default_layer_name, layer_dat_3D, n_tiles, pane, tile_cols, tile_rows, True,
file_search_pattern='max_histogram.png',
show_layer_summary=self.state.cursor_area == 'top',
file_summary_pattern='layer_inactivity.png')
elif self.settings.caffevis_histograms_format == 'calculate_in_realtime':
display_2D, display_3D, display_3D_highres, is_layer_summary_loaded = self.load_maximal_activations_histograms(
default_layer_name, layer_dat_3D, n_tiles, pane, tile_cols, tile_rows,
show_layer_summary=self.state.cursor_area == 'top')
elif self.state.pattern_mode == PatternMode.ACTIVATIONS_CORRELATION:
display_2D, display_3D, display_3D_highres, is_layer_summary_loaded = self.load_pattern_images_optimizer_format(
default_layer_name, layer_dat_3D, n_tiles, pane, tile_cols, tile_rows, True,
file_search_pattern=None,
show_layer_summary=True,
file_summary_pattern='channels_correlation.png')
elif self.state.pattern_mode == PatternMode.WEIGHTS_CORRELATION:
display_2D, display_3D, display_3D_highres, is_layer_summary_loaded = self.load_weights_correlation(
self.net, default_layer_name, layer_dat_3D, n_tiles, pane, tile_cols, tile_rows,
show_layer_summary=True)
else:
raise Exception("Invalid value of pattern mode: %d" % self.state.pattern_mode)
else:
# Show data from network (activations or diffs)
if self.state.layers_show_back:
back_what_to_disp = self.get_back_what_to_disp()
if back_what_to_disp == 'disabled':
layer_dat_3D_normalized = np.tile(self.settings.window_background, layer_dat_3D.shape + (1,))
elif back_what_to_disp == 'stale':
layer_dat_3D_normalized = np.tile(self.settings.stale_background, layer_dat_3D.shape + (1,))
else:
layer_dat_3D_normalized = tile_images_normalize(layer_dat_3D,
boost_indiv = self.state.layer_boost_indiv,
boost_gamma = self.state.layer_boost_gamma,
neg_pos_colors = ((1,0,0), (0,1,0)))
else:
layer_dat_3D_normalized = tile_images_normalize(layer_dat_3D,
boost_indiv = self.state.layer_boost_indiv,
boost_gamma = self.state.layer_boost_gamma)
display_3D = layer_dat_3D_normalized
# Convert to float if necessary:
display_3D = ensure_float01(display_3D)
# Upsample gray -> color if necessary
# e.g. (1000,32,32) -> (1000,32,32,3)
if len(display_3D.shape) == 3:
display_3D = display_3D[:,:,:,np.newaxis]
if display_3D.shape[3] == 1:
display_3D = np.tile(display_3D, (1, 1, 1, 3))
# Upsample unit length tiles to give a more sane tile / highlight ratio
# e.g. (1000,1,1,3) -> (1000,3,3,3)
if (display_3D.shape[1] == 1) and (display_3D.shape[2] == 1):
display_3D = np.tile(display_3D, (1, 3, 3, 1))
# Upsample pair of unit length tiles to give a more sane tile / highlight ratio (occurs on siamese FC layers)
# e.g. (1000,1,2,3) -> (1000,2,2,3)
if (display_3D.shape[1] == 1) and (display_3D.shape[2] == 2):
display_3D = np.tile(display_3D, (1, 2, 1, 1))
if display_3D_highres is None:
display_3D_highres = display_3D
# generate 2D display by tiling the 3D images and add highlights, unless already generated
if display_2D is None:
display_2D = self.prepare_tile_image(display_3D, True, n_tiles, tile_rows, tile_cols)
self._display_pane_based_on_zoom_mode(display_2D, display_3D_highres, is_layer_summary_loaded, pane)
self._add_label_or_score_overlay(default_layer_name, pane)
return display_3D_highres
def _display_pane_based_on_zoom_mode(self, display_2D, display_3D_highres, is_layer_summary_loaded, pane):
# Display pane based on layers_pane_zoom_mode
state_layers_pane_zoom_mode = self.state.layers_pane_zoom_mode
assert state_layers_pane_zoom_mode in (0, 1, 2)
if state_layers_pane_zoom_mode == 0:
# Mode 0: normal display (activations or patterns)
if self.settings.caffevis_keep_aspect_ratio:
display_2D_resize = ensure_uint255_and_resize_to_fit(display_2D, pane.data.shape)
else:
display_2D_resize = ensure_uint255_and_resize_without_fit(display_2D, pane.data.shape)
elif state_layers_pane_zoom_mode == 1 and not is_layer_summary_loaded:
# Mode 1: zoomed selection
display_2D_resize = self.get_processed_selected_unit(display_3D_highres, pane.data.shape, use_colored_data=False)
elif state_layers_pane_zoom_mode == 2 and not is_layer_summary_loaded:
# Mode 2: zoomed backprop pane
if self.settings.caffevis_keep_aspect_ratio:
display_2D_resize = ensure_uint255_and_resize_to_fit(display_2D, pane.data.shape) * 0
else:
display_2D_resize = ensure_uint255_and_resize_without_fit(display_2D, pane.data.shape) * 0
else: # any other case = zoom_mode + is_layer_summary_loaded
if self.settings.caffevis_keep_aspect_ratio:
display_2D_resize = ensure_uint255_and_resize_to_fit(display_2D, pane.data.shape)
else:
display_2D_resize = ensure_uint255_and_resize_without_fit(display_2D, pane.data.shape)
pane.data[:] = to_255(self.settings.window_background)
pane.data[0:display_2D_resize.shape[0], 0:display_2D_resize.shape[1], :] = display_2D_resize
def _add_label_or_score_overlay(self, default_layer_name, pane):
if self.state.cursor_area == 'bottom':
# Display label annotation atop layers pane (e.g. for fc8/prob)
defaults = {'face': getattr(cv2, self.settings.caffevis_label_face),
'fsize': self.settings.caffevis_label_fsize,
'clr': to_255(self.settings.caffevis_label_clr),
'thick': self.settings.caffevis_label_thick}
loc_base = self.settings.caffevis_label_loc[::-1] # Reverse to OpenCV c,r order
text_to_display = ""
if (self.labels) and (default_layer_name in self.settings.caffevis_label_layers):
text_to_display = self.labels[self.state.selected_unit] + " "
if self.state.show_maximal_score:
if self.state.siamese_view_mode_has_two_images():
if self.state.layers_show_back:
blob1, blob2 = self.state.get_siamese_selected_diff_blobs(self.net)
if len(blob1.shape) == 1:
value1, value2 = blob1[self.state.selected_unit], blob2[self.state.selected_unit]
text_to_display += 'grad: ' + str(value1) + " " + str(value2)
else:
blob1, blob2 = self.state.get_siamese_selected_data_blobs(self.net)
if len(blob1.shape) == 1:
value1, value2 = blob1[self.state.selected_unit], blob2[self.state.selected_unit]
text_to_display += 'act: ' + str(value1) + " " + str(value2)
else:
if self.state.layers_show_back:
blob = self.state.get_single_selected_diff_blob(self.net)
if len(blob.shape) == 1:
value = blob[self.state.selected_unit]
text_to_display += 'grad: ' + str(value)
else:
blob = self.state.get_single_selected_data_blob(self.net)
if len(blob.shape) == 1:
value = blob[self.state.selected_unit]
text_to_display += 'act: ' + str(value)
lines = [FormattedString(text_to_display, defaults)]
cv2_typeset_text(pane.data, lines, loc_base)
def load_pattern_images_original_format(self, default_layer_name, layer_dat_3D, n_tiles, pane,
tile_cols, tile_rows):
display_2D = None
display_3D_highres = None
is_layer_summary_loaded = False
load_layer = default_layer_name
if self.settings.caffevis_jpgvis_remap and load_layer in self.settings.caffevis_jpgvis_remap:
load_layer = self.settings.caffevis_jpgvis_remap[load_layer]
if ((self.settings.caffevis_jpgvis_layers and load_layer in self.settings.caffevis_jpgvis_layers) or (self.settings.caffevis_jpgvis_layers is None)) and self.settings.caffevis_outputs_dir:
jpg_path = os.path.join(self.settings.caffevis_outputs_dir, 'regularized_opt', load_layer, 'whole_layer.jpg')
# Get highres version
display_3D_highres = self.img_cache.get((jpg_path, 'whole'), None)
if display_3D_highres is None:
try:
with WithTimer('CaffeVisApp:load_sprite_image', quiet=self.debug_level < 1):
display_3D_highres = load_square_sprite_image(jpg_path, n_sprites=n_tiles)
except IOError:
# File does not exist, so just display disabled.
pass
else:
if display_3D_highres is not None:
self.img_cache.set((jpg_path, 'whole'), display_3D_highres)
display_3D = self.downsample_display_3d(display_3D_highres, layer_dat_3D, pane, tile_cols, tile_rows)
return display_2D, display_3D, display_3D_highres, is_layer_summary_loaded
def load_pattern_images_optimizer_format(self, default_layer_name, layer_dat_3D, n_tiles, pane,
tile_cols, tile_rows, first_only, file_search_pattern, show_layer_summary = False, file_summary_pattern = ""):
is_layer_summary_loaded = False
display_2D = None
display_3D_highres = None
load_layer = default_layer_name
if self.settings.caffevis_jpgvis_remap and load_layer in self.settings.caffevis_jpgvis_remap:
load_layer = self.settings.caffevis_jpgvis_remap[load_layer]
if (self.settings.caffevis_jpgvis_layers and load_layer in self.settings.caffevis_jpgvis_layers) or (self.settings.caffevis_jpgvis_layers is None):
# get number of units
units_num = layer_dat_3D.shape[0]
pattern_image_key = (self.settings.caffevis_outputs_dir, load_layer, "unit_%04d", units_num, file_search_pattern, first_only, show_layer_summary, file_summary_pattern)
# Get highres version
display_3D_highres = self.img_cache.get(pattern_image_key, None)
if display_3D_highres is None:
try:
if self.settings.caffevis_outputs_dir:
resize_shape = pane.data.shape
if show_layer_summary:
# load layer summary image
layer_summary_image_path = os.path.join(self.settings.caffevis_outputs_dir, load_layer, file_summary_pattern)
layer_summary_image = caffe_load_image(layer_summary_image_path, color=True, as_uint=True)
layer_summary_image = ensure_uint255_and_resize_without_fit(layer_summary_image, resize_shape)
display_3D_highres = layer_summary_image
display_3D_highres = np.expand_dims(display_3D_highres, 0)
display_2D = display_3D_highres[0]
is_layer_summary_loaded = True
else:
if file_search_pattern is None:
display_3D_highres = None
else:
with WithTimer('CaffeVisApp:load_image_per_unit', quiet=self.debug_level < 1):
# load all images
display_3D_highres = self.load_image_per_unit(display_3D_highres, load_layer, units_num, first_only, resize_shape, file_search_pattern)
except IOError:
# File does not exist, so just display disabled.
pass
else:
if display_3D_highres is not None:
self.img_cache.set(pattern_image_key, display_3D_highres)
else:
# if layer found in cache, mark it as loaded
if show_layer_summary:
display_2D = display_3D_highres[0]
is_layer_summary_loaded = True
display_3D = self.downsample_display_3d(display_3D_highres, layer_dat_3D, pane, tile_cols, tile_rows)
return display_2D, display_3D, display_3D_highres, is_layer_summary_loaded
def load_image_per_unit(self, display_3D_highres, load_layer, units_num, first_only, resize_shape, file_search_pattern):
# limit loading
if units_num > 1000:
print "WARNING: load_image_per_unit was asked to load %d units, aborted to avoid hang" % (units_num)
return None
# for each neuron in layer
for unit_id in range(0, units_num):
unit_folder_path = os.path.join(self.settings.caffevis_outputs_dir, load_layer, "unit_%04d" % (unit_id), file_search_pattern)
try:
if unit_id % 10 == 0:
print "loading %s images for layer %s channel %d out of %d" % (file_search_pattern, load_layer, unit_id, units_num)
unit_first_image = get_image_from_files(self.settings, unit_folder_path, False, resize_shape, first_only)
# handle first generation of results container
if display_3D_highres is None:
unit_first_image_shape = unit_first_image.shape
display_3D_highres = np.zeros((units_num, unit_first_image_shape[0],
unit_first_image_shape[1],
unit_first_image_shape[2]), dtype=np.uint8)
# set in result
display_3D_highres[unit_id, :, ::] = unit_first_image
except:
print '\nAttempted to load file from %s but failed. To supress this warning, remove layer "%s" from settings.caffevis_jpgvis_layers' % \
(unit_folder_path, load_layer)
pass
return display_3D_highres
def downsample_display_3d(self, display_3D_highres, layer_dat_3D, pane, tile_cols, tile_rows):
if display_3D_highres is not None:
# Get lowres version, maybe. Assume we want at least one pixel for selection border.
row_downsamp_factor = int(
np.ceil(float(display_3D_highres.shape[1]) / (pane.data.shape[0] / tile_rows - 2)))
col_downsamp_factor = int(
np.ceil(float(display_3D_highres.shape[2]) / (pane.data.shape[1] / tile_cols - 2)))
ds = max(row_downsamp_factor, col_downsamp_factor)
if ds > 1:
# print 'Downsampling by', ds
display_3D = display_3D_highres[:, ::ds, ::ds, :]
else:
display_3D = display_3D_highres
else:
display_3D = layer_dat_3D * 0 # nothing to show
return display_3D
def load_weights_histograms(self, net, layer_name, layer_dat_3D, n_channels, pane, tile_cols, tile_rows, show_layer_summary):
is_layer_summary_loaded = False
display_2D = None
display_3D = None
empty_display_3D = np.zeros(layer_dat_3D.shape + (3,))
pattern_image_key_3d = (layer_name, "weights_histogram", show_layer_summary, self.state.selected_unit, "3D")
pattern_image_key_2d = (layer_name, "weights_histogram", show_layer_summary, self.state.selected_unit, "2D")
# Get highres version
display_3D_highres = self.img_cache.get(pattern_image_key_3d, None)
display_2D = self.img_cache.get(pattern_image_key_2d, None)
if display_3D_highres is None or display_2D is None:
pane_shape = pane.data.shape
if not self.settings.caffevis_outputs_dir:
folder_path = None
cache_layer_weights_histogram_image_path = None
cache_details_weights_histogram_image_path = None
else:
folder_path = os.path.join(self.settings.caffevis_outputs_dir, layer_name)
cache_layer_weights_histogram_image_path = os.path.join(folder_path, 'layer_weights_histogram.png')
cache_details_weights_histogram_image_path = os.path.join(folder_path, 'details_weights_histogram.png')
# plotting objects needed for
# 1. calculating size of results array
# 2. generating weights histogram for selected unit
# 3. generating weights histograms for all units
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(10, 10), facecolor='white', tight_layout=False)
ax = fig.add_subplot(111)
def calculate_weights_histogram_for_specific_unit(channel_idx, fig, ax, do_print):
if do_print and channel_idx % 10 == 0:
print "calculating weights histogram for layer %s channel %d out of %d" % (layer_name, channel_idx, n_channels)
# get vector of weights
weights = net.params[layer_name][0].data[channel_idx].flatten()
bias = net.params[layer_name][1].data[channel_idx]
weights_num = len(weights)
# create histogram
hist, bin_edges = np.histogram(weights, bins=50)
# generate histogram image file
width = 0.7 * (bin_edges[1] - bin_edges[0])
center = (bin_edges[:-1] + bin_edges[1:]) / 2
ax.bar(center, hist, align='center', width=width, color='g')
fig.suptitle('weights for unit %d, bias is %f\n %s weights used' % (channel_idx, bias, weights_num))
ax.xaxis.label.set_text('weight value')
ax.yaxis.label.set_text('count')
figure_buffer = fig2data(fig)
# save weight histogram as image file
path = os.path.join(folder_path, 'unit_%04d' % channel_idx)
mkdir_p(path)
fig.savefig(os.path.join(path, 'weight_histogram.png'))
display_3D_highres[channel_idx, :, ::] = figure_buffer
ax.cla()
try:
# handle generation of results container
figure_buffer = fig2data(fig)
first_shape = figure_buffer.shape
display_3D_highres = np.zeros((n_channels, first_shape[0], first_shape[1], first_shape[2]), dtype=np.uint8)
# try load from cache
if show_layer_summary:
# try load cache file for layer weight histogram
if cache_layer_weights_histogram_image_path and os.path.exists(cache_layer_weights_histogram_image_path):
# load 2d image from cache file
display_2D = caffe_load_image(cache_layer_weights_histogram_image_path, color=True, as_uint=False)
display_3D_highres = np.zeros(pane_shape)
display_3D_highres = np.expand_dims(display_3D_highres, 0)
display_3D_highres[0] = display_2D
is_layer_summary_loaded = True
else:
# try load cache file for details weights histogram
if cache_details_weights_histogram_image_path and os.path.exists(cache_details_weights_histogram_image_path):
# load 2d image from cache file
display_2D = caffe_load_image(cache_details_weights_histogram_image_path, color=True, as_uint=False)
# calculate weights histogram for selected unit
calculate_weights_histogram_for_specific_unit(self.state.selected_unit, fig, ax, do_print=False)
display_3D = self.downsample_display_3d(display_3D_highres, layer_dat_3D, pane, tile_cols, tile_rows)
# generate empty highlights
display_2D_highlights_only = self.prepare_tile_image(display_3D * 0, True, n_channels, tile_rows, tile_cols)
# if shapes are not equal, cache is invalid
if display_2D_highlights_only.shape == display_2D.shape:
# mix highlights with cached image
display_2D = (display_2D_highlights_only != 0) * display_2D_highlights_only + (display_2D_highlights_only == 0) * display_2D
else:
display_2D = None
# if not loaded from cache, generate the data
if display_2D is None:
# calculate weights histogram image
# check if layer has weights at all
if not net.params.has_key(layer_name):
return display_2D, empty_display_3D, empty_display_3D, is_layer_summary_loaded
# pattern_image_key_layer = (layer_name, "weights_histogram", True)
# pattern_image_key_details = (layer_name, "weights_histogram", False)
# self.img_cache.set(pattern_image_key_details, display_3D_highres)
# self.img_cache.set(pattern_image_key_layer, display_3D_highres_summary)
if show_layer_summary:
half_pane_shape = (pane_shape[0], pane_shape[1] / 2)
# generate weights histogram for layer
weights = net.params[layer_name][0].data.flatten()
hist, bin_edges = np.histogram(weights, bins=50)
weights_num = len(weights)
width = 0.7 * (bin_edges[1] - bin_edges[0])
center = (bin_edges[:-1] + bin_edges[1:]) / 2
ax.bar(center, hist, align='center', width=width, color='g')
fig.suptitle('weights for layer %s\n %s weights used' % (layer_name, weights_num))
ax.xaxis.label.set_text('weight value')
ax.yaxis.label.set_text('count')
figure_buffer = fig2data(fig)
display_3D_highres_summary_weights = ensure_uint255_and_resize_without_fit(figure_buffer, half_pane_shape)
ax.cla()
# generate bias histogram for layer
bias = net.params[layer_name][1].data.flatten()
hist, bin_edges = np.histogram(bias, bins=50)
bias_num = len(bias)
width = 0.7 * (bin_edges[1] - bin_edges[0])
center = (bin_edges[:-1] + bin_edges[1:]) / 2
ax.bar(center, hist, align='center', width=width, color='g')
fig.suptitle('bias for layer %s\n %s biases used' % (layer_name, bias_num))
ax.xaxis.label.set_text('bias value')
ax.yaxis.label.set_text('count')
figure_buffer = fig2data(fig)
display_3D_highres_summary_bias = ensure_uint255_and_resize_without_fit(figure_buffer, half_pane_shape)
display_3D_highres_summary = np.concatenate((display_3D_highres_summary_weights, display_3D_highres_summary_bias), axis=1)
display_3D_highres_summary = np.expand_dims(display_3D_highres_summary, 0)
display_3D_highres = display_3D_highres_summary
display_2D = display_3D_highres[0]
is_layer_summary_loaded = True
if folder_path:
mkdir_p(folder_path)
save_caffe_image(display_2D[:,:,::-1].astype(np.float32).transpose((2,0,1)), cache_layer_weights_histogram_image_path)
else:
print "WARNING: unable to save weight histogram to cache since caffevis_outputs_dir is not set"
else:
# for each channel
for channel_idx in xrange(n_channels):
calculate_weights_histogram_for_specific_unit(channel_idx, fig, ax, do_print=True)
display_3D = self.downsample_display_3d(display_3D_highres, layer_dat_3D, pane, tile_cols, tile_rows)
# generate display of details weights histogram image
display_2D = self.prepare_tile_image(display_3D, False, n_channels, tile_rows, tile_cols)
if folder_path:
# save histogram image to cache
mkdir_p(folder_path)
save_caffe_image(display_2D[:,:,::-1].astype(np.float32).transpose((2,0,1)), cache_details_weights_histogram_image_path)
else:
print "WARNING: unable to save weight histogram to cache since caffevis_outputs_dir is not set"
# generate empty highlights
display_2D_highlights_only = self.prepare_tile_image(display_3D * 0, True, n_channels, tile_rows, tile_cols)
# mix highlights with cached image
display_2D = (display_2D_highlights_only != 0) * display_2D_highlights_only + (display_2D_highlights_only == 0) * display_2D
except IOError:
return display_2D, empty_display_3D, empty_display_3D, is_layer_summary_loaded
# File does not exist, so just display disabled.
pass
else:
self.img_cache.set(pattern_image_key_3d, display_3D_highres)
self.img_cache.set(pattern_image_key_2d, display_2D)
fig.clf()
plt.close(fig)
else:
# here we can safely assume that display_2D is not None, so we only need to check if show_layer_summary was requested
if show_layer_summary:
is_layer_summary_loaded = True
pass
if display_3D is None:
display_3D = self.downsample_display_3d(display_3D_highres, layer_dat_3D, pane, tile_cols, tile_rows)
return display_2D, display_3D, display_3D_highres, is_layer_summary_loaded
def load_weights_correlation(self, net, layer_name, layer_dat_3D, n_channels, pane, tile_cols, tile_rows, show_layer_summary):
is_layer_summary_loaded = False
display_2D = None
display_3D = None
empty_display_3D = np.zeros(layer_dat_3D.shape + (3,))
pattern_image_key_3d = (layer_name, "weights_correlation", show_layer_summary, self.state.selected_unit, "3D")
pattern_image_key_2d = (layer_name, "weights_correlation", show_layer_summary, self.state.selected_unit, "2D")
# Get highres version
display_3D_highres = self.img_cache.get(pattern_image_key_3d, None)
display_2D = self.img_cache.get(pattern_image_key_2d, None)
if display_3D_highres is None or display_2D is None:
pane_shape = pane.data.shape
if not self.settings.caffevis_outputs_dir:
folder_path = None
cache_layer_weights_correlation_image_path = None
else:
folder_path = os.path.join(self.settings.caffevis_outputs_dir, layer_name)
cache_layer_weights_correlation_image_path = os.path.join(folder_path, 'layer_weights_correlation.png')
try:
# try load cache file for layer weight correlation
if cache_layer_weights_correlation_image_path and os.path.exists(cache_layer_weights_correlation_image_path):
# load 2d image from cache file
display_2D = caffe_load_image(cache_layer_weights_correlation_image_path, color=True, as_uint=False)
display_3D_highres = np.zeros(pane_shape)
display_3D_highres = np.expand_dims(display_3D_highres, 0)
display_3D_highres[0] = display_2D
is_layer_summary_loaded = True
# if not loaded from cache, generate the data
if display_2D is None:
# calculate weights correlation image
# check if layer has weights at all
if not net.params.has_key(layer_name):
return display_2D, empty_display_3D, empty_display_3D, is_layer_summary_loaded
# skip layers with only one channel
if n_channels == 1:
return display_2D, empty_display_3D, empty_display_3D, is_layer_summary_loaded
data_unroll = net.params[layer_name][0].data.reshape((n_channels, -1)) # Note: no copy eg (96,3025). Does nothing if not is_spatial
corr = np.corrcoef(data_unroll)
# fix possible NANs
corr = np.nan_to_num(corr)
np.fill_diagonal(corr, 1)
# sort correlation matrix
indexes = np.lexsort(corr)
sorted_corr = corr[indexes, :][:, indexes]
# plot correlation matrix
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(10, 10), facecolor='white', tight_layout=True)
plt.subplot(1, 1, 1)
plt.imshow(sorted_corr, interpolation='nearest', vmin=-1, vmax=1)
plt.colorbar()
plt.title('channels weights correlation matrix for layer %s' % (layer_name))
figure_buffer = fig2data(fig)
plt.close()
display_3D_highres_summary = ensure_uint255_and_resize_without_fit(figure_buffer, pane_shape)
display_3D_highres_summary = np.expand_dims(display_3D_highres_summary, 0)
display_3D_highres = display_3D_highres_summary
display_2D = display_3D_highres[0]
is_layer_summary_loaded = True
if folder_path:
mkdir_p(folder_path)
save_caffe_image(display_2D[:,:,::-1].astype(np.float32).transpose((2,0,1)), cache_layer_weights_correlation_image_path)
else:
print "WARNING: unable to save weight correlationto cache since caffevis_outputs_dir is not set"
self.img_cache.set(pattern_image_key_3d, display_3D_highres)
self.img_cache.set(pattern_image_key_2d, display_2D)
except IOError:
return display_2D, empty_display_3D, empty_display_3D, is_layer_summary_loaded
# File does not exist, so just display disabled.
pass
else:
# here we can safely assume that display_2D is not None, so we only need to check if show_layer_summary was requested
if show_layer_summary:
is_layer_summary_loaded = True
pass
if display_3D is None:
display_3D = self.downsample_display_3d(display_3D_highres, layer_dat_3D, pane, tile_cols, tile_rows)
return display_2D, display_3D, display_3D_highres, is_layer_summary_loaded
def load_maximal_activations_histograms(self, default_layer_name, layer_dat_3D, n_tiles, pane, tile_cols, tile_rows, show_layer_summary):
display_2D = None
empty_display_3D = np.zeros(layer_dat_3D.shape + (3,))
is_layer_summary_loaded = False
maximum_activation_histogram_data_file = os.path.join(settings.caffevis_outputs_dir, 'find_max_acts_output.pickled')
pattern_image_key = (maximum_activation_histogram_data_file, default_layer_name, "max histograms", show_layer_summary)
# Get highres version
display_3D_highres = self.img_cache.get(pattern_image_key, None)
pane_shape = pane.data.shape
if display_3D_highres is None:
try:
# load pickle file
net_max_tracker = load_max_tracker_from_file(maximum_activation_histogram_data_file)
if not net_max_tracker.max_trackers.has_key(default_layer_name):
return display_2D, empty_display_3D, empty_display_3D, is_layer_summary_loaded
# check if
if not hasattr(net_max_tracker.max_trackers[default_layer_name], 'channel_to_histogram'):
print "ERROR: file %s is missing the field channel_to_histogram, try rerun find_max_acts to generate it" % (maximum_activation_histogram_data_file)
return display_2D, empty_display_3D, empty_display_3D, is_layer_summary_loaded
channel_to_histogram = net_max_tracker.max_trackers[default_layer_name].channel_to_histogram
def channel_to_histogram_values(channel_idx):
# get channel data
hist, bin_edges = channel_to_histogram[channel_idx]
return hist, bin_edges
display_3D_highres_list = [display_3D_highres, display_3D_highres]
def process_channel_figure(channel_idx, fig):
figure_buffer = fig2data(fig)
# handle first generation of results container
if display_3D_highres_list[0] is None:
first_shape = figure_buffer.shape
display_3D_highres_list[0] = np.zeros((len(channel_to_histogram), first_shape[0],
first_shape[1],
first_shape[2]), dtype=np.uint8)
display_3D_highres_list[0][channel_idx, :, ::] = figure_buffer
pass
def process_layer_figure(fig):
figure_buffer = fig2data(fig)
display_3D_highres_list[1] = ensure_uint255_and_resize_without_fit(figure_buffer, pane_shape)
display_3D_highres_list[1] = np.expand_dims(display_3D_highres_list[1], 0)
pass
n_channels = len(channel_to_histogram)
find_maxes.max_tracker.prepare_max_histogram(default_layer_name, n_channels, channel_to_histogram_values, process_channel_figure, process_layer_figure)
pattern_image_key_layer = (maximum_activation_histogram_data_file, default_layer_name, "max histograms",True)
pattern_image_key_details = (maximum_activation_histogram_data_file, default_layer_name, "max histograms",False)
self.img_cache.set(pattern_image_key_details, display_3D_highres_list[0])
self.img_cache.set(pattern_image_key_layer, display_3D_highres_list[1])
if show_layer_summary:
display_3D_highres = display_3D_highres_list[1]
display_2D = display_3D_highres[0]
is_layer_summary_loaded = True
else:
display_3D_highres = display_3D_highres_list[0]
except IOError:
return display_2D, empty_display_3D, empty_display_3D, is_layer_summary_loaded
# File does not exist, so just display disabled.
pass
else:
# if layer found in cache, mark it as loaded
if show_layer_summary:
display_2D = display_3D_highres[0]
is_layer_summary_loaded = True
display_3D = self.downsample_display_3d(display_3D_highres, layer_dat_3D, pane, tile_cols, tile_rows)
return display_2D, display_3D, display_3D_highres, is_layer_summary_loaded
def get_processed_selected_unit(self, all_units, resize_shape, use_colored_data = False):
unit_data = all_units[self.state.selected_unit]
if self.settings.caffevis_keep_aspect_ratio:
unit_data_resize = resize_to_fit(unit_data, resize_shape)
else:
unit_data_resize = resize_without_fit(unit_data, resize_shape)
if self.state.pattern_mode == PatternMode.OFF:
if self.state.last_frame is None:
pass
input_image = SiameseHelper.get_image_from_frame(self.state.last_frame, self.state.settings.is_siamese,
resize_shape, self.state.siamese_view_mode)
normalized_mask = unit_data_resize
if use_colored_data:
unit_data_resize = self.state.gray_to_colormap(unit_data_resize)
normalized_mask = np.tile(normalized_mask[:, :, np.newaxis], 3)
if self.state.input_overlay_option == InputOverlayOption.OFF:
pass
elif self.state.input_overlay_option == InputOverlayOption.OVER_ACTIVE:
unit_data_resize = normalized_mask * input_image + (1 - normalized_mask) * unit_data_resize
elif self.state.input_overlay_option == InputOverlayOption.OVER_INACTIVE:
unit_data_resize = (normalized_mask < 0.1) * input_image + (normalized_mask >= 0.1) * unit_data_resize
pass
unit_data_resize = ensure_uint255(unit_data_resize)
return unit_data_resize
def _mix_input_overlay_with_colormap_old(self, unit_data, resize_shape, input_image):
if self.settings.caffevis_keep_aspect_ratio:
unit_data_resize = ensure_uint255_and_resize_to_fit(unit_data, resize_shape)
input_image_resize = ensure_uint255_and_resize_to_fit(input_image, resize_shape)
else:
unit_data_resize = ensure_uint255_and_resize_without_fit(unit_data, resize_shape)
input_image_resize = ensure_uint255_and_resize_without_fit(input_image, resize_shape)
normalized_mask = unit_data_resize / 255.0
normalized_mask = np.tile(normalized_mask[:, :, np.newaxis], 3)
colored_unit_data_resize = self.state.gray_to_colormap(unit_data_resize)
colored_unit_data_resize = ensure_uint255(colored_unit_data_resize)
if len(colored_unit_data_resize.shape) == 2:
colored_unit_data_resize = np.tile(colored_unit_data_resize[:, :, np.newaxis], 3)
if self.state.input_overlay_option == InputOverlayOption.OFF:
pass
elif self.state.input_overlay_option == InputOverlayOption.OVER_ACTIVE:
colored_unit_data_resize = np.array(normalized_mask * input_image_resize + (1 - normalized_mask) * colored_unit_data_resize, dtype = 'uint8')
elif self.state.input_overlay_option == InputOverlayOption.OVER_INACTIVE:
MAGIC_THRESHOLD_NUMBER = 0.3
colored_unit_data_resize = (normalized_mask < MAGIC_THRESHOLD_NUMBER) * input_image_resize + (normalized_mask >= MAGIC_THRESHOLD_NUMBER) * colored_unit_data_resize
pass
return colored_unit_data_resize
def _mix_input_overlay_with_colormap(self, unit_data, resize_shape, input_image):
# resize
if self.settings.caffevis_keep_aspect_ratio:
input_image_resize = resize_to_fit(input_image, resize_shape)
unit_data_resize = resize_to_fit(unit_data, resize_shape)
else:
input_image_resize = resize_without_fit(input_image, resize_shape)
unit_data_resize = resize_without_fit(unit_data, resize_shape)
sigma = 0.02 * max(unit_data_resize.shape[0:2])
blur_unit_data_resize = cv2.GaussianBlur(unit_data_resize, (0, 0), sigma)
normalized_blur_unit_data_resize = norm01(blur_unit_data_resize)
colored_normalized_blur_unit_data_resize = self.state.gray_to_colormap(normalized_blur_unit_data_resize)
if len(colored_normalized_blur_unit_data_resize.shape) == 2:
colored_normalized_blur_unit_data_resize = np.tile(colored_normalized_blur_unit_data_resize[:, :, np.newaxis], 3)
if self.state.input_overlay_option == InputOverlayOption.OFF:
attMap = colored_normalized_blur_unit_data_resize
pass
elif self.state.input_overlay_option == InputOverlayOption.OVER_ACTIVE:
MAGIC_NUMBER = 0.8
boost_normalized_blur_unit_data_resize = normalized_blur_unit_data_resize ** MAGIC_NUMBER
boost_normalized_blur_unit_data_resize = boost_normalized_blur_unit_data_resize.reshape(boost_normalized_blur_unit_data_resize.shape + (1,))
attMap = (boost_normalized_blur_unit_data_resize) * input_image_resize + (1 - boost_normalized_blur_unit_data_resize) * colored_normalized_blur_unit_data_resize
elif self.state.input_overlay_option == InputOverlayOption.OVER_INACTIVE:
MAGIC_NUMBER = 0.8
boost_normalized_blur_unit_data_resize = normalized_blur_unit_data_resize ** MAGIC_NUMBER
boost_normalized_blur_unit_data_resize = boost_normalized_blur_unit_data_resize.reshape(boost_normalized_blur_unit_data_resize.shape + (1,))
attMap = (1 - boost_normalized_blur_unit_data_resize) * input_image_resize + (boost_normalized_blur_unit_data_resize) * colored_normalized_blur_unit_data_resize
return attMap
def _draw_aux_pane(self, pane, layer_data_normalized):
pane.data[:] = to_255(self.settings.window_background)
mode = None
with self.state.lock:
if self.state.cursor_area == 'bottom':
mode = 'selected'
else:
mode = 'prob_labels'
if mode == 'selected':
unit_data_resize = self.get_processed_selected_unit(layer_data_normalized, pane.data.shape, use_colored_data=False)
pane.data[0:unit_data_resize.shape[0], 0:unit_data_resize.shape[1], :] = unit_data_resize
elif mode == 'prob_labels':
self._draw_prob_labels_pane(pane)
def _draw_back_pane(self, pane):
mode = None
with self.state.lock:
back_enabled = self.state.back_enabled
back_mode = self.state.back_mode
back_view_option = self.state.back_view_option
back_what_to_disp = self.get_back_what_to_disp()
if back_what_to_disp == 'disabled':
pane.data[:] = to_255(self.settings.window_background)
elif back_what_to_disp == 'stale':
pane.data[:] = to_255(self.settings.stale_background)
else: # One of the backprop modes is enabled and the back computation (gradient or deconv) is up to date
# define helper function to run processing once or twice, in case of siamese network
def run_processing_once_or_twice(resize_shape, process_image_fn):
has_pair_inputs = False
no_spatial_info = False;
# if selection is frozen we use the currently selected layer as target for visualization
if self.state.backprop_selection_frozen:
if self.state.siamese_view_mode_has_two_images():
grad_blob1, grad_blob2 = self.state.get_siamese_selected_diff_blobs(self.net)
if len(grad_blob1.shape) == 1:
no_spatial_info = True
if len(grad_blob1.shape) == 3:
grad_blob1 = grad_blob1.transpose((1, 2, 0)) # c01 -> 01c
grad_blob2 = grad_blob2.transpose((1, 2, 0)) # c01 -> 01c
has_pair_inputs = True
else:
grad_blob = self.state.get_single_selected_diff_blob(self.net)
if len(grad_blob.shape) == 1:
no_spatial_info = True
if len(grad_blob.shape) == 3:
grad_blob = grad_blob.transpose((1, 2, 0)) # c01 -> 01c
# if selection is not frozen we use the input layer as target for visualization
if (not self.state.backprop_selection_frozen) or no_spatial_info:
grad_blob = self.net.blobs['data'].diff
grad_blob = grad_blob[0] # bc01 -> c01
grad_blob = grad_blob.transpose((1, 2, 0)) # c01 -> 01c
if self._net_channel_swap_inv:
grad_blob = grad_blob[:, :, self._net_channel_swap_inv] # e.g. BGR -> RGB
# split image to image0 and image1
if self.settings.is_siamese:
# split image to image0 and image1
if self.settings.siamese_input_mode == 'concat_channelwise':
[grad_blob1, grad_blob2] = np.split(grad_blob, 2, axis=2)
elif self.settings.siamese_input_mode == 'concat_along_width':
half_width = grad_blob.shape[1] / 2
grad_blob1 = grad_blob[:, :half_width, :]
grad_blob2 = grad_blob[:, half_width:, :]
has_pair_inputs = True
# if siamese network, run processing twice
if self.settings.is_siamese:
# combine image0 and image1
if self.state.siamese_view_mode == SiameseViewMode.FIRST_IMAGE:
# run processing on image0
return process_image_fn(grad_blob, resize_shape, self.state.last_frame[0])
elif self.state.siamese_view_mode == SiameseViewMode.SECOND_IMAGE:
# run processing on image1
return process_image_fn(grad_blob, resize_shape, self.state.last_frame[1])
elif self.state.siamese_view_mode == SiameseViewMode.BOTH_IMAGES and has_pair_inputs:
# resize each gradient image to half the pane size
half_pane_shape = (resize_shape[0], resize_shape[1] / 2)
# run processing on both image0 and image1
image1 = process_image_fn(grad_blob1, half_pane_shape, self.state.last_frame[0])
image2 = process_image_fn(grad_blob2, half_pane_shape, self.state.last_frame[1])
image1 = resize_without_fit(image1[:], half_pane_shape)
image2 = resize_without_fit(image2[:], half_pane_shape)
# generate the pane image by concatenating both images
return np.concatenate((image1, image2), axis=1)
elif self.state.siamese_view_mode == SiameseViewMode.BOTH_IMAGES and not has_pair_inputs:
processed_input = self.state.convert_image_pair_to_network_input_format(self.settings, self.state.last_frame, resize_shape)
return process_image_fn(grad_blob, resize_shape, processed_input)
else:
return process_image_fn(grad_blob, resize_shape, self.state.last_frame)
# else, normal network, run processing once
else:
# run processing on image
return process_image_fn(grad_blob, resize_shape, self.state.last_frame)
raise Exception("flow should not arrive here")
if back_view_option == BackpropViewOption.RAW:
def do_raw(grad_blob, resize_shape, input_image):
if len(grad_blob.shape) == 3 and grad_blob.shape[2] != 3:
return np.zeros(resize_shape)
return norm01c(grad_blob, 0)
grad_img = run_processing_once_or_twice(pane.data.shape, do_raw)
elif back_view_option == BackpropViewOption.GRAY:
def do_gray(grad_blob, resize_shape, input_image):
return norm01c(grad_blob.mean(axis=2), 0)
grad_img = run_processing_once_or_twice(pane.data.shape, do_gray)
elif back_view_option == BackpropViewOption.NORM:
def do_norm(grad_blob, resize_shape, input_image):
norm_grad_blob = norm01(np.linalg.norm(grad_blob, axis=2))
return self._mix_input_overlay_with_colormap(norm_grad_blob, resize_shape, input_image)
grad_img = run_processing_once_or_twice(pane.data.shape, do_norm)
elif back_view_option == BackpropViewOption.NORM_BLUR:
def do_norm_blur(grad_blob, resize_shape, input_image):
grad_blob = np.linalg.norm(grad_blob, axis=2)
cv2.GaussianBlur(grad_blob, (0, 0), self.settings.caffevis_grad_norm_blur_radius, grad_blob)
norm_grad_blob = norm01(grad_blob)
return self._mix_input_overlay_with_colormap(norm_grad_blob, resize_shape, input_image)
grad_img = run_processing_once_or_twice(pane.data.shape, do_norm_blur)
elif back_view_option == BackpropViewOption.POS_SUM:
def do_pos_sum(grad_blob, resize_shape, input_image):
grad_blob = np.maximum(grad_blob.sum(-1), 0)
norm_grad_blob = norm01(grad_blob)
return self._mix_input_overlay_with_colormap(norm_grad_blob, resize_shape, input_image)
grad_img = run_processing_once_or_twice(pane.data.shape, do_pos_sum)
elif back_view_option == BackpropViewOption.HISTOGRAM:
def do_histogram(grad_blob, resize_shape, input_image):
return array_histogram(grad_blob, half_pane_shape, BackpropMode.to_string(back_mode)+' histogram', 'values', 'count')
half_pane_shape = (pane.data.shape[0],pane.data.shape[1]/2,3)
grad_img = run_processing_once_or_twice(pane.data.shape, do_histogram)
else:
raise Exception('Invalid option for back_view_option: %s' % (back_view_option))
# If necessary, re-promote from grayscale to color
if len(grad_img.shape) == 2:
grad_img = np.tile(grad_img[:,:,np.newaxis], 3)
if self.settings.caffevis_keep_aspect_ratio:
grad_img_resize = ensure_uint255_and_resize_to_fit(grad_img, pane.data.shape)
else:
grad_img_resize = ensure_uint255_and_resize_without_fit(grad_img, pane.data.shape)
pane.data[0:grad_img_resize.shape[0], 0:grad_img_resize.shape[1], :] = grad_img_resize
def _draw_jpgvis_pane(self, pane):
pane.data[:] = to_255(self.settings.window_background)
with self.state.lock:
state_layer_name, state_selected_unit, cursor_area, show_unit_jpgs = self.state.get_default_layer_name(), self.state.selected_unit, self.state.cursor_area, self.state.show_unit_jpgs
if self.settings.caffevis_jpgvis_remap and state_layer_name in self.settings.caffevis_jpgvis_remap:
img_key_layer = self.settings.caffevis_jpgvis_remap[state_layer_name]
else:
img_key_layer = state_layer_name
if ((self.settings.caffevis_jpgvis_layers and img_key_layer in self.settings.caffevis_jpgvis_layers) or (self.settings.caffevis_jpgvis_layers is None)) and \
cursor_area == 'bottom' and show_unit_jpgs:
img_key = (img_key_layer, state_selected_unit, pane.data.shape, self.state.show_maximal_score)
img_resize = self.img_cache.get(img_key, None)
if img_resize is None:
# If img_resize is None, loading has not yet been attempted, so show stale image and request load by JPGVisLoadingThread
with self.state.lock:
self.state.jpgvis_to_load_key = img_key
pane.data[:] = to_255(self.settings.stale_background)
elif img_resize.nbytes == 0:
# This is the sentinal value when the image is not
# found, i.e. loading was already attempted but no jpg
# assets were found. Just display disabled.
pane.data[:] = to_255(self.settings.window_background)
else:
# Show image
pane.data[:img_resize.shape[0], :img_resize.shape[1], :] = img_resize
else:
# Will never be available
pane.data[:] = to_255(self.settings.window_background)
def handle_key(self, key, panes):
return self.state.handle_key(key)
def handle_mouse_left_click(self, x, y, flags, param, panes):
self.state.handle_mouse_left_click(x, y, flags, param, panes, self.header_boxes, self.buttons_boxes)
def get_back_what_to_disp(self):
'''Whether to show back diff information or stale or disabled indicator'''
if (self.state.cursor_area == 'top' and not self.state.backprop_selection_frozen) or not self.state.back_enabled:
return 'disabled'
elif self.state.back_stale:
return 'stale'
else:
return 'normal'
def set_debug(self, level):
self.debug_level = level
self.proc_thread.debug_level = level
self.jpgvis_thread.debug_level = level
def draw_help(self, help_pane, locy):
defaults = {'face': getattr(cv2, self.settings.help_face),
'fsize': self.settings.help_fsize,
'clr': to_255(self.settings.help_clr),
'thick': self.settings.help_thick}
loc_base = self.settings.help_loc[::-1] # Reverse to OpenCV c,r order
locx = loc_base[0]
lines = []
lines.append([FormattedString('DeepVis keys', defaults)])
kl,_ = self.bindings.get_key_help('sel_left')
kr,_ = self.bindings.get_key_help('sel_right')
ku,_ = self.bindings.get_key_help('sel_up')
kd,_ = self.bindings.get_key_help('sel_down')
klf,_ = self.bindings.get_key_help('sel_left_fast')
krf,_ = self.bindings.get_key_help('sel_right_fast')
kuf,_ = self.bindings.get_key_help('sel_up_fast')
kdf,_ = self.bindings.get_key_help('sel_down_fast')
keys_nav_0 = ','.join([kk[0] for kk in (kl, kr, ku, kd)])
keys_nav_1 = ''
if len(kl)>1 and len(kr)>1 and len(ku)>1 and len(kd)>1:
keys_nav_1 += ' or '
keys_nav_1 += ','.join([kk[1] for kk in (kl, kr, ku, kd)])
keys_nav_f = ','.join([kk[0] for kk in (klf, krf, kuf, kdf)])
nav_string = 'Navigate with %s%s. Use %s to move faster.' % (keys_nav_0, keys_nav_1, keys_nav_f)
lines.append([FormattedString('', defaults, width=120, align='right'),
FormattedString(nav_string, defaults)])
for tag in ('help_mode', 'static_file_increment', 'static_file_decrement', 'sel_layer_left', 'sel_layer_right',
'', 'next_pattern_mode', 'pattern_first_only', '', 'next_input_overlay', 'next_ez_back_mode_loop',
'next_back_view_option', 'next_color_map', '', 'freeze_back_unit', 'show_back', 'zoom_mode',
'siamese_view_mode', 'toggle_maximal_score', 'boost_gamma', 'boost_individual', 'freeze_cam',
'toggle_input_mode', 'stretch_mode', '', 'reset_state', 'quit'):
if (tag == ''):
lines.append([FormattedString('', defaults)])
else:
key_strings, help_string = self.bindings.get_key_help(tag)
label = '%10s:' % (','.join(key_strings))
lines.append([FormattedString(label, defaults, width=120, align='right'),
FormattedString(help_string, defaults)])
locy, boxes = cv2_typeset_text(help_pane.data, lines, (locx, locy),
line_spacing = self.settings.help_line_spacing)
return locy
| 53.089341 | 196 | 0.63118 |
78ae7709adc0ab884678ce738677fe10d21a247c | 7,118 | py | Python | mycroft_voice_satellite/__init__.py | Joanguitar/HiveMind-voice-sat | f1ec8c3fa9c5ff3b87389993bdd081f7fc647620 | [
"Apache-2.0"
] | null | null | null | mycroft_voice_satellite/__init__.py | Joanguitar/HiveMind-voice-sat | f1ec8c3fa9c5ff3b87389993bdd081f7fc647620 | [
"Apache-2.0"
] | null | null | null | mycroft_voice_satellite/__init__.py | Joanguitar/HiveMind-voice-sat | f1ec8c3fa9c5ff3b87389993bdd081f7fc647620 | [
"Apache-2.0"
] | null | null | null | from mycroft_voice_satellite.speech.listener import RecognizerLoop
from mycroft_voice_satellite.configuration import CONFIGURATION
from jarbas_hive_mind.slave.terminal import HiveMindTerminalProtocol, \
HiveMindTerminal
from jarbas_hive_mind import HiveMindConnection
from ovos_utils.log import LOG
from ovos_utils import create_daemon
from ovos_utils.messagebus import Message
from tempfile import gettempdir
from os.path import join, isdir
from os import makedirs
from mycroft_voice_satellite.playback import play_audio, play_mp3, play_ogg, \
play_wav, resolve_resource_file
from ovos_plugin_manager.tts import OVOSTTSFactory
class JarbasVoiceTerminalProtocol(HiveMindTerminalProtocol):
def onOpen(self):
super().onOpen()
create_daemon(self.factory.start_listening)
def onClose(self, wasClean, code, reason):
super().onClose(wasClean, code, reason)
if "WebSocket connection upgrade failed" in reason:
utterance = "hive mind refused connection, invalid password"
self.factory.speak(utterance)
else:
self.factory.stop_listening()
class JarbasVoiceTerminal(HiveMindTerminal):
protocol = JarbasVoiceTerminalProtocol
platform = "JarbasVoiceTerminalV2.1"
def __init__(self, config=CONFIGURATION, *args, **kwargs):
super().__init__(*args, **kwargs)
self.config = config
self.loop = RecognizerLoop(self.config)
self.tts = OVOSTTSFactory.create(self.config)
LOG.debug("Using TTS engine: " + self.tts.__class__.__name__)
# Voice Output
def speak(self, utterance, lang=None):
lang = lang or self.config.get('lang', 'en-us')
LOG.info("SPEAK " + lang + ": " + utterance)
temppath = join(gettempdir(), self.tts.tts_name)
if not isdir(temppath):
makedirs(temppath)
audio_file = join(temppath, str(hash(utterance))[1:] +
"." + self.tts.audio_ext)
self.tts.get_tts(utterance, audio_file, lang=lang)
try:
if audio_file.endswith(".wav"):
play_wav(audio_file).wait()
elif audio_file.endswith(".mp3"):
play_mp3(audio_file).wait()
elif audio_file.endswith(".ogg"):
play_ogg(audio_file).wait()
else:
play_audio(audio_file).wait()
except Exception as e:
LOG.warning(e)
# Voice Input
def handle_record_begin(self):
LOG.info("Begin Recording...")
def handle_record_end(self):
LOG.info("End Recording...")
def handle_awoken(self):
""" Forward mycroft.awoken to the messagebus. """
LOG.info("Listener is now Awake: ")
def handle_wakeword(self, event):
LOG.info("Wakeword Detected: " + event['utterance'])
def handle_utterance(self, event):
context = {'platform': self.platform, "source": self.peer,
'destination': "hive_mind"}
LOG.debug(event)
msg = {"data": {"utterances": event['utterances'], "lang": event['lang']},
"type": "recognizer_loop:utterance",
"context": context}
self.send_to_hivemind_bus(msg)
def handle_ambient_noise(self):
self.recognizer.trigger_ambient_noise_adjustment()
def handle_unknown(self):
LOG.info("mycroft.speech.recognition.unknown")
def handle_hotword(self, event):
config = self.config.get("listener", {})
ww = config.get("wake_word", "hey mycroft")
suw = config.get("stand_up_word", "wake up")
if event["hotword"] != ww and event["hotword"] != suw:
LOG.info("Hotword Detected: " + event['hotword'])
def handle_sleep(self):
self.loop.sleep()
def handle_wake_up(self, event):
self.loop.awaken()
def handle_mic_mute(self, event):
self.loop.mute()
def handle_mic_unmute(self, event):
self.loop.unmute()
def handle_audio_start(self, event):
"""
Mute recognizer loop
"""
self.loop.mute()
def handle_audio_end(self, event):
"""
Request unmute, if more sources has requested the mic to be muted
it will remain muted.
"""
self.loop.unmute() # restore
def handle_stop(self, event):
"""
Handler for mycroft.stop, i.e. button press
"""
self.loop.force_unmute()
def start_listening(self):
self.loop.on('recognizer_loop:utterance',
self.handle_utterance)
self.loop.on('recognizer_loop:record_begin',
self.handle_record_begin)
self.loop.on('recognizer_loop:awoken', self.handle_awoken)
self.loop.on('recognizer_loop:wakeword', self.handle_wakeword)
self.loop.on('recognizer_loop:hotword', self.handle_hotword)
self.loop.on('recognizer_loop:record_end',
self.handle_record_end)
self.loop.on('recognizer_loop:ambient_noise',
self.handle_ambient_noise)
self.loop.run()
def stop_listening(self):
self.loop.remove_listener('recognizer_loop:utterance',
self.handle_utterance)
self.loop.remove_listener('recognizer_loop:record_begin',
self.handle_record_begin)
self.loop.remove_listener('recognizer_loop:awoken',
self.handle_awoken)
self.loop.remove_listener('recognizer_loop:wakeword',
self.handle_wakeword)
self.loop.remove_listener('recognizer_loop:hotword',
self.handle_hotword)
self.loop.remove_listener('recognizer_loop:record_end',
self.handle_record_end)
self.loop.remove_listener('recognizer_loop:ambient_noise',
self.handle_ambient_noise)
# parsed protocol messages
def handle_incoming_mycroft(self, message):
assert isinstance(message, Message)
if message.msg_type == "speak":
utterance = message.data["utterance"]
lang = message.data.get('lang', self.config.get('lang', 'en-us'))
self.speak(utterance, lang)
if message.data["expect_response"]:
self.loop.responsive_recognizer.trigger_listen(lang=lang)
elif message.msg_type == "hive.complete_intent_failure":
LOG.error("complete intent failure")
self.speak('I don\'t know how to answer that')
def connect_to_hivemind(config=CONFIGURATION, host="wss://127.0.0.1",
port=5678, name="JarbasVoiceTerminal",
access_key="RESISTENCEisFUTILE",
crypto_key="resistanceISfutile"):
con = HiveMindConnection(host, port)
terminal = JarbasVoiceTerminal(config=config,
crypto_key=crypto_key,
headers=con.get_headers(name, access_key))
con.connect(terminal)
| 37.661376 | 82 | 0.619837 |
ca777ec6572b0cd05006ef425459e9e1ee3c179b | 5,188 | py | Python | pwnlib/encoders/mips/xor.py | zaratec/pwntools | 8793decd1c9b8c822e3db6c27b9cbf6e8cddfeba | [
"MIT"
] | 1 | 2021-01-11T07:11:22.000Z | 2021-01-11T07:11:22.000Z | pwnlib/encoders/mips/xor.py | zerocool443/pwntools | 76fe04e3560419380108b398310cc35316b35dfb | [
"MIT"
] | null | null | null | pwnlib/encoders/mips/xor.py | zerocool443/pwntools | 76fe04e3560419380108b398310cc35316b35dfb | [
"MIT"
] | 1 | 2021-11-14T01:36:34.000Z | 2021-11-14T01:36:34.000Z | #!/usr/bin/env python2
# Source:
# https://github.com/zcutlip/bowcaster/blob/master/src/bowcaster/encoders/mips.py
#
# Copyright (c) 2013 Zachary Cutlip <uid000@gmail.com>,
# 2013 Tactical Network Solutions, LLC
#
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from ... import shellcraft
from ...asm import asm
from ...context import context
from ...util.fiddling import xor_key
from ..encoder import Encoder
decoders = {
'little': ''.join([
"SIZ2SIZ1\x0e\x24", # li t6,-5
"\x27\x70\xc0\x01", # nor t6,t6,zero
"\xa3\xff\x0b\x24", # li t3,-93
"\x26\x40\xce\x01", # xor t0,t6,t6
"\xff\xff\x08\x21", # addi t0,t0,-1
"\xff\xff\x10\x05", # bltzal t0,14 <next>
"\x82\x82\x08\x28", # slti t0,zero,-32126
"\xe2\xff\xfd\x23", # addi sp,ra,-30
"\x27\x58\x60\x01", # nor t3,t3,zero
"\x21\xc8\xeb\x03", # addu t9,ra,t3
"\x82\x82\x17\x28", # slti s7,zero,-32126
"\xfc\xff\x31\x8f", # lw s1,-4(t9)
"\xfb\xff\x0c\x24", # li t4,-5
"\x27\x60\x80\x01", # nor t4,t4,zero
"\xfd\xff\x8f\x21", # addi t7,t4,-3
"\xfc\xff\x28\x8f", # lw t0,-4(t9)
"\x21\xb8\xef\x02", # addu s7,s7,t7
"\x26\x18\x11\x01", # xor v1,t0,s1
"\x2b\xf0\xee\x02", # sltu s8,s7,t6
"\xfc\xff\x23\xaf", # sw v1,-4(t9)
"\xfa\xff\x1e\x14", # bne zero,s8,3c <loop>
"\x21\xc8\x2c\x03", # addu t9,t9,t4
"\xfd\xff\x86\x21", # addi a2,t4,-3
"\xf8\xff\xa6\xaf", # sw a2,-8(sp)
"\x26\x28\xce\x01", # xor a1,t6,t6
"\xfc\xff\xa5\xaf", # sw a1,-4(sp)
"\xf8\xff\xa4\x27", # addiu a0,sp,-8
"\x46\x10\x02\x24", # li v0,4166
"\x0c\x54\x4a\x01" # syscall 0x52950
]),
'big': ''.join([
"\x24\x0eSIZ1SIZ2", # li t6,-5
"\x01\xc0\x70\x27", # nor t6,t6,zero
"\x24\x0b\xff\xa3", # li t3,-93
"\x01\xce\x40\x26", # xor t0,t6,t6
"\x21\x08\xff\xff", # addi t0,t0,-1
"\x05\x10\xff\xff", # bltzal t0,14 <next>
"\x28\x08\x82\x82", # slti t0,zero,-32126
"\x23\xfd\xff\xe2", # addi sp,ra,-30
"\x01\x60\x58\x27", # nor t3,t3,zero
"\x03\xeb\xc8\x21", # addu t9,ra,t3
"\x28\x17\x82\x82", # slti s7,zero,-32126
"\x8f\x31\xff\xfc", # lw s1,-4(t9)
"\x24\x0c\xff\xfb", # li t4,-5
"\x01\x80\x60\x27", # nor t4,t4,zero
"\x21\x8f\xff\xfd", # addi t7,t4,-3
"\x8f\x28\xff\xfc", # lw t0,-4(t9)
"\x02\xef\xb8\x21", # addu s7,s7,t7
"\x01\x11\x18\x26", # xor v1,t0,s1
"\x02\xee\xf0\x2b", # sltu s8,s7,t6
"\xaf\x23\xff\xfc", # sw v1,-4(t9)
"\x14\x1e\xff\xfa", # bne zero,s8,3c <loop>
"\x03\x2c\xc8\x21", # addu t9,t9,t4
"\x21\x86\xff\xfd", # addi a2,t4,-3
"\xaf\xa6\xff\xf8", # sw a2,-8(sp)
"\x01\xce\x28\x26", # xor a1,t6,t6
"\xaf\xa5\xff\xfc", # sw a1,-4(sp)
"\x27\xa4\xff\xf8", # addiu a0,sp,-8
"\x24\x02\x10\x46", # li v0,4166
"\x01\x4a\x54\x0c" # syscall 0x52950
])
}
class MipsXorEncoder(Encoder):
r"""Generates an XOR decoder for MIPS.
>>> context.clear(arch='mips')
>>> shellcode = asm(shellcraft.sh())
>>> avoid = '/bin/sh\x00'
>>> encoded = pwnlib.encoders.mips.xor.encode(shellcode, avoid)
>>> assert not any(c in encoded for c in avoid)
>>> p = run_shellcode(encoded)
>>> p.sendline('echo hello; exit')
>>> p.recvline()
'hello\n'
"""
blacklist = cannot_avoid = set(''.join(v for v in decoders.values()))
def __call__(self, raw_bytes, avoid, pcreg=''):
assert 0 == len(raw_bytes) % context.bytes, "Payload is not aligned"
size = (len(raw_bytes)/4) + 1
assert size < 0x10000, "Payload is too long"
size = size ^ 0xffff
sizelo = size & 0xff
sizehi = size >> 8
decoder = str(decoders[context.endian])
decoder = decoder.replace('SIZ1', chr(sizehi))
decoder = decoder.replace('SIZ2', chr(sizelo))
key, data = xor_key(raw_bytes, avoid=avoid)
return decoder + key + data
encode = MipsXorEncoder()
| 38.147059 | 81 | 0.583655 |
a4fa941fc6bc8c9ece6000838973529910ac4a14 | 17,208 | py | Python | google-cloud-sdk/lib/third_party/dulwich/web.py | bopopescu/searchparty | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | [
"Apache-2.0"
] | 1 | 2017-11-29T18:52:27.000Z | 2017-11-29T18:52:27.000Z | google-cloud-sdk/lib/third_party/dulwich/web.py | bopopescu/searchparty | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | [
"Apache-2.0"
] | null | null | null | google-cloud-sdk/lib/third_party/dulwich/web.py | bopopescu/searchparty | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | [
"Apache-2.0"
] | 3 | 2017-07-27T18:44:13.000Z | 2020-07-25T17:48:53.000Z | # web.py -- WSGI smart-http server
# Copyright (C) 2010 Google, Inc.
# Copyright (C) 2012 Jelmer Vernooij <jelmer@samba.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# or (at your option) any later version of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""HTTP server for dulwich that implements the git smart HTTP protocol."""
from io import BytesIO
import shutil
import tempfile
import gzip
import os
import re
import sys
import time
from wsgiref.simple_server import (
WSGIRequestHandler,
ServerHandler,
WSGIServer,
make_server,
)
try:
from urlparse import parse_qs
except ImportError:
from urllib.parse import parse_qs
from dulwich import log_utils
from dulwich.protocol import (
ReceivableProtocol,
)
from dulwich.repo import (
Repo,
)
from dulwich.server import (
DictBackend,
DEFAULT_HANDLERS,
generate_info_refs,
generate_objects_info_packs,
)
logger = log_utils.getLogger(__name__)
# HTTP error strings
HTTP_OK = '200 OK'
HTTP_NOT_FOUND = '404 Not Found'
HTTP_FORBIDDEN = '403 Forbidden'
HTTP_ERROR = '500 Internal Server Error'
def date_time_string(timestamp=None):
# From BaseHTTPRequestHandler.date_time_string in BaseHTTPServer.py in the
# Python 2.6.5 standard library, following modifications:
# - Made a global rather than an instance method.
# - weekdayname and monthname are renamed and locals rather than class
# variables.
# Copyright (c) 2001-2010 Python Software Foundation; All Rights Reserved
weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
if timestamp is None:
timestamp = time.time()
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp)
return '%s, %02d %3s %4d %02d:%02d:%02d GMD' % (
weekdays[wd], day, months[month], year, hh, mm, ss)
def url_prefix(mat):
"""Extract the URL prefix from a regex match.
:param mat: A regex match object.
:returns: The URL prefix, defined as the text before the match in the
original string. Normalized to start with one leading slash and end with
zero.
"""
return '/' + mat.string[:mat.start()].strip('/')
def get_repo(backend, mat):
"""Get a Repo instance for the given backend and URL regex match."""
return backend.open_repository(url_prefix(mat))
def send_file(req, f, content_type):
"""Send a file-like object to the request output.
:param req: The HTTPGitRequest object to send output to.
:param f: An open file-like object to send; will be closed.
:param content_type: The MIME type for the file.
:return: Iterator over the contents of the file, as chunks.
"""
if f is None:
yield req.not_found('File not found')
return
try:
req.respond(HTTP_OK, content_type)
while True:
data = f.read(10240)
if not data:
break
yield data
f.close()
except IOError:
f.close()
yield req.error('Error reading file')
except:
f.close()
raise
def _url_to_path(url):
return url.replace('/', os.path.sep)
def get_text_file(req, backend, mat):
req.nocache()
path = _url_to_path(mat.group())
logger.info('Sending plain text file %s', path)
return send_file(req, get_repo(backend, mat).get_named_file(path),
'text/plain')
def get_loose_object(req, backend, mat):
sha = (mat.group(1) + mat.group(2)).encode('ascii')
logger.info('Sending loose object %s', sha)
object_store = get_repo(backend, mat).object_store
if not object_store.contains_loose(sha):
yield req.not_found('Object not found')
return
try:
data = object_store[sha].as_legacy_object()
except IOError:
yield req.error('Error reading object')
return
req.cache_forever()
req.respond(HTTP_OK, 'application/x-git-loose-object')
yield data
def get_pack_file(req, backend, mat):
req.cache_forever()
path = _url_to_path(mat.group())
logger.info('Sending pack file %s', path)
return send_file(req, get_repo(backend, mat).get_named_file(path),
'application/x-git-packed-objects')
def get_idx_file(req, backend, mat):
req.cache_forever()
path = _url_to_path(mat.group())
logger.info('Sending pack file %s', path)
return send_file(req, get_repo(backend, mat).get_named_file(path),
'application/x-git-packed-objects-toc')
def get_info_refs(req, backend, mat):
params = parse_qs(req.environ['QUERY_STRING'])
service = params.get('service', [None])[0]
if service and not req.dumb:
handler_cls = req.handlers.get(service, None)
if handler_cls is None:
yield req.forbidden('Unsupported service')
return
req.nocache()
write = req.respond(HTTP_OK, 'application/x-%s-advertisement' % service)
proto = ReceivableProtocol(BytesIO().read, write)
handler = handler_cls(backend, [url_prefix(mat)], proto,
http_req=req, advertise_refs=True)
handler.proto.write_pkt_line(b'# service=' + service.encode('ascii') + b'\n')
handler.proto.write_pkt_line(None)
handler.handle()
else:
# non-smart fallback
# TODO: select_getanyfile() (see http-backend.c)
req.nocache()
req.respond(HTTP_OK, 'text/plain')
logger.info('Emulating dumb info/refs')
repo = get_repo(backend, mat)
for text in generate_info_refs(repo):
yield text
def get_info_packs(req, backend, mat):
req.nocache()
req.respond(HTTP_OK, 'text/plain')
logger.info('Emulating dumb info/packs')
return generate_objects_info_packs(get_repo(backend, mat))
class _LengthLimitedFile(object):
"""Wrapper class to limit the length of reads from a file-like object.
This is used to ensure EOF is read from the wsgi.input object once
Content-Length bytes are read. This behavior is required by the WSGI spec
but not implemented in wsgiref as of 2.5.
"""
def __init__(self, input, max_bytes):
self._input = input
self._bytes_avail = max_bytes
def read(self, size=-1):
if self._bytes_avail <= 0:
return b''
if size == -1 or size > self._bytes_avail:
size = self._bytes_avail
self._bytes_avail -= size
return self._input.read(size)
# TODO: support more methods as necessary
def handle_service_request(req, backend, mat):
service = mat.group().lstrip('/')
logger.info('Handling service request for %s', service)
handler_cls = req.handlers.get(service, None)
if handler_cls is None:
yield req.forbidden('Unsupported service')
return
req.nocache()
write = req.respond(HTTP_OK, 'application/x-%s-result' % service)
proto = ReceivableProtocol(req.environ['wsgi.input'].read, write)
handler = handler_cls(backend, [url_prefix(mat)], proto, http_req=req)
handler.handle()
class HTTPGitRequest(object):
"""Class encapsulating the state of a single git HTTP request.
:ivar environ: the WSGI environment for the request.
"""
def __init__(self, environ, start_response, dumb=False, handlers=None):
self.environ = environ
self.dumb = dumb
self.handlers = handlers
self._start_response = start_response
self._cache_headers = []
self._headers = []
def add_header(self, name, value):
"""Add a header to the response."""
self._headers.append((name, value))
def respond(self, status=HTTP_OK, content_type=None, headers=None):
"""Begin a response with the given status and other headers."""
if headers:
self._headers.extend(headers)
if content_type:
self._headers.append(('Content-Type', content_type))
self._headers.extend(self._cache_headers)
return self._start_response(status, self._headers)
def not_found(self, message):
"""Begin a HTTP 404 response and return the text of a message."""
self._cache_headers = []
logger.info('Not found: %s', message)
self.respond(HTTP_NOT_FOUND, 'text/plain')
return message
def forbidden(self, message):
"""Begin a HTTP 403 response and return the text of a message."""
self._cache_headers = []
logger.info('Forbidden: %s', message)
self.respond(HTTP_FORBIDDEN, 'text/plain')
return message
def error(self, message):
"""Begin a HTTP 500 response and return the text of a message."""
self._cache_headers = []
logger.error('Error: %s', message)
self.respond(HTTP_ERROR, 'text/plain')
return message
def nocache(self):
"""Set the response to never be cached by the client."""
self._cache_headers = [
('Expires', 'Fri, 01 Jan 1980 00:00:00 GMT'),
('Pragma', 'no-cache'),
('Cache-Control', 'no-cache, max-age=0, must-revalidate'),
]
def cache_forever(self):
"""Set the response to be cached forever by the client."""
now = time.time()
self._cache_headers = [
('Date', date_time_string(now)),
('Expires', date_time_string(now + 31536000)),
('Cache-Control', 'public, max-age=31536000'),
]
class HTTPGitApplication(object):
"""Class encapsulating the state of a git WSGI application.
:ivar backend: the Backend object backing this application
"""
services = {
('GET', re.compile('/HEAD$')): get_text_file,
('GET', re.compile('/info/refs$')): get_info_refs,
('GET', re.compile('/objects/info/alternates$')): get_text_file,
('GET', re.compile('/objects/info/http-alternates$')): get_text_file,
('GET', re.compile('/objects/info/packs$')): get_info_packs,
('GET', re.compile('/objects/([0-9a-f]{2})/([0-9a-f]{38})$')): get_loose_object,
('GET', re.compile('/objects/pack/pack-([0-9a-f]{40})\\.pack$')): get_pack_file,
('GET', re.compile('/objects/pack/pack-([0-9a-f]{40})\\.idx$')): get_idx_file,
('POST', re.compile('/git-upload-pack$')): handle_service_request,
('POST', re.compile('/git-receive-pack$')): handle_service_request,
}
def __init__(self, backend, dumb=False, handlers=None, fallback_app=None):
self.backend = backend
self.dumb = dumb
self.handlers = dict(DEFAULT_HANDLERS)
self.fallback_app = fallback_app
if handlers is not None:
self.handlers.update(handlers)
def __call__(self, environ, start_response):
path = environ['PATH_INFO']
method = environ['REQUEST_METHOD']
req = HTTPGitRequest(environ, start_response, dumb=self.dumb,
handlers=self.handlers)
# environ['QUERY_STRING'] has qs args
handler = None
for smethod, spath in self.services.keys():
if smethod != method:
continue
mat = spath.search(path)
if mat:
handler = self.services[smethod, spath]
break
if handler is None:
if self.fallback_app is not None:
return self.fallback_app(environ, start_response)
else:
return req.not_found('Sorry, that method is not supported')
return handler(req, self.backend, mat)
class GunzipFilter(object):
"""WSGI middleware that unzips gzip-encoded requests before
passing on to the underlying application.
"""
def __init__(self, application):
self.app = application
def __call__(self, environ, start_response):
if environ.get('HTTP_CONTENT_ENCODING', '') == 'gzip':
if hasattr(environ['wsgi.input'], 'seek'):
wsgi_input = environ['wsgi.input']
else:
# The gzip implementation in the standard library of Python 2.x
# requires the '.seek()' and '.tell()' methods to be available
# on the input stream. Read the data into a temporary file to
# work around this limitation.
wsgi_input = tempfile.SpooledTemporaryFile(16 * 1024 * 1024)
shutil.copyfileobj(environ['wsgi.input'], wsgi_input)
wsgi_input.seek(0)
environ['wsgi.input'] = gzip.GzipFile(filename=None, fileobj=wsgi_input, mode='r')
del environ['HTTP_CONTENT_ENCODING']
if 'CONTENT_LENGTH' in environ:
del environ['CONTENT_LENGTH']
return self.app(environ, start_response)
class LimitedInputFilter(object):
"""WSGI middleware that limits the input length of a request to that
specified in Content-Length.
"""
def __init__(self, application):
self.app = application
def __call__(self, environ, start_response):
# This is not necessary if this app is run from a conforming WSGI
# server. Unfortunately, there's no way to tell that at this point.
# TODO: git may used HTTP/1.1 chunked encoding instead of specifying
# content-length
content_length = environ.get('CONTENT_LENGTH', '')
if content_length:
environ['wsgi.input'] = _LengthLimitedFile(
environ['wsgi.input'], int(content_length))
return self.app(environ, start_response)
def make_wsgi_chain(*args, **kwargs):
"""Factory function to create an instance of HTTPGitApplication,
correctly wrapped with needed middleware.
"""
app = HTTPGitApplication(*args, **kwargs)
wrapped_app = GunzipFilter(LimitedInputFilter(app))
return wrapped_app
class ServerHandlerLogger(ServerHandler):
"""ServerHandler that uses dulwich's logger for logging exceptions."""
def log_exception(self, exc_info):
if sys.version < (2, 7):
logger.exception('Exception happened during processing of request')
else:
logger.exception('Exception happened during processing of request',
exc_info=exc_info)
def log_message(self, format, *args):
logger.info(format, *args)
def log_error(self, *args):
logger.error(*args)
class WSGIRequestHandlerLogger(WSGIRequestHandler):
"""WSGIRequestHandler that uses dulwich's logger for logging exceptions."""
def log_exception(self, exc_info):
logger.exception('Exception happened during processing of request',
exc_info=exc_info)
def log_message(self, format, *args):
logger.info(format, *args)
def log_error(self, *args):
logger.error(*args)
def handle(self):
"""Handle a single HTTP request"""
self.raw_requestline = self.rfile.readline()
if not self.parse_request(): # An error code has been sent, just exit
return
handler = ServerHandlerLogger(
self.rfile, self.wfile, self.get_stderr(), self.get_environ()
)
handler.request_handler = self # backpointer for logging
handler.run(self.server.get_app())
class WSGIServerLogger(WSGIServer):
def handle_error(self, request, client_address):
"""Handle an error. """
logger.exception('Exception happened during processing of request from %s' % str(client_address))
def main(argv=sys.argv):
"""Entry point for starting an HTTP git server."""
import optparse
parser = optparse.OptionParser()
parser.add_option("-l", "--listen_address", dest="listen_address",
default="localhost",
help="Binding IP address.")
parser.add_option("-p", "--port", dest="port", type=int,
default=8000,
help="Port to listen on.")
options, args = parser.parse_args(argv)
if len(args) > 1:
gitdir = args[1]
else:
gitdir = os.getcwd()
log_utils.default_logging_config()
backend = DictBackend({'/': Repo(gitdir)})
app = make_wsgi_chain(backend)
server = make_server(options.listen_address, options.port, app,
handler_class=WSGIRequestHandlerLogger,
server_class=WSGIServerLogger)
logger.info('Listening for HTTP connections on %s:%d',
options.listen_address, options.port)
server.serve_forever()
if __name__ == '__main__':
main()
| 34.210736 | 105 | 0.637552 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.