code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import json
import re
import subprocess
import yaml
import typer
from pathlib import Path
from typing import Optional, Dict, List
from textwrap import dedent
from .console import console
def create_initial_env_specs(
env_name: str, channel: Optional[str] = None, packages: Optional[List[str]] = None
) -> Dict:
"""Create initial environment specifications that will be written to 'yml' file."""
env_specs = {}
env_specs.update({"name": env_name})
if channel:
env_specs.update({"channels": [channel, "defaults"]})
else:
env_specs.update({"channels": ["defaults"]})
if packages:
env_specs.update({"dependencies": packages})
return env_specs
def get_validate_file_name(env_name: str, file: Optional[str] = None) -> Optional[str]:
"""
Looks for a '.yml' file with the `env_name` specified. If file cannot
be located, prompt will ask for the file name. If the file provided does
not exist, the program will exit.
"""
if not file:
# first look for existing yml file
if not Path(f"{env_name}.yml").is_file():
console.print(f"[yellow]Couldn't locate {env_name}.yml")
console.print(
dedent(
f"""
[yellow]If your environment name and specifications file name are not the same,
please provide the specifications file name to update using the '-f' or '--file' flag.
"""
)
)
raise typer.Exit()
else:
file = Path(f"{env_name}.yml")
# validate the file that the user provides
else:
if not Path(file).is_file():
console.print(f"[magenta]Could not locate '{file}'")
raise typer.Exit()
return file
def read_env_file(file: str) -> Dict:
"Read '.yml' file and return a dict containing specifications in the file."
with open(file, "r") as f:
env_specs = yaml.load(f, Loader=yaml.FullLoader)
return env_specs
def write_env_file(env_specs: Dict, file: str) -> None:
"Writes '.yml' file based on the specifications provided."
with open(file, "w") as f:
yaml.safe_dump(env_specs, f, sort_keys=False)
def add_pkg_to_dependencies(env_specs: Dict, pkg_name: List[str]) -> Dict:
"""
Checks if the package/s specified already exist in 'dependencies' section in 'yml' file.
If package/s already exists, informs user and exits the program.
If package/s does not exist, adds it to 'dependencies' section in 'yml' file.
"""
existing_packages = env_specs.get("dependencies")
# check if packages already exists
if existing_packages:
# create a list of existing packages without >,<,=
existing_packages_re = [re.findall(r"\w+", d)[0] for d in existing_packages]
for pkg in pkg_name:
# strip any >,<,= from the package name that the user provided
pkg_re = re.findall(r"\w+", pkg)[0]
# exit if package already exists in the env.yml file
if pkg_re in existing_packages_re:
console.print(
f"[yellow]'{pkg_re}' already exists. Skipping installation.\n"
f"[yellow]If you want to update {pkg_re}, use `update` instead."
)
raise typer.Exit()
env_specs["dependencies"] = existing_packages + list(pkg_name)
else:
env_specs["dependencies"] = list(pkg_name)
return env_specs
def add_new_channel_to_env_specs(env_specs: Dict, channel: Optional[str]) -> Dict:
"""Add new channel to the environment specifications, if it does not exist."""
# this should always return ["defaults"] atleast!
if channel:
existing_channels = list(env_specs.get("channels"))
if existing_channels and channel not in existing_channels:
existing_channels.append(channel)
env_specs["channels"] = existing_channels
return env_specs
def remove_pkg_from_dependencies(env_specs: Dict, pkg_name: List[str]) -> Dict:
"""
Checks if the package/s specified already exist in 'dependencies' section in 'yml' file.
If package/s does not exist, informs user and exits the program.
If package/s exist, remove it from 'dependencies' section in 'yml' file.
"""
existing_packages = env_specs.get("dependencies")
# check if packages exists in specifications
if existing_packages:
# create a list of existing packages without >,<,=
existing_packages_re = [re.findall(r"\w+", d)[0] for d in existing_packages]
for pkg in pkg_name:
# strip any >,<,= from the package name that the user provided
pkg_re = re.findall(r"\w+", pkg)[0]
# check if the package exists in the existing packages list
if pkg_re not in existing_packages_re:
console.print(
f"[bold red]'{pkg}' is not listed in '{env_specs['name']}.yml' file!"
)
raise typer.Exit()
# this will only run if the package was found in the existing packages list
for ext_pkg_re, ext_pkg in zip(existing_packages_re, existing_packages):
if pkg_re == ext_pkg_re:
existing_packages.remove(ext_pkg)
existing_packages_re.remove(
ext_pkg_re
) # need to remove it from this list as well otherwise zip will create an issue
env_specs["dependencies"] = existing_packages
else:
console.print(
f"[bold red]There are no packages listed in '{env_specs['name']}.yml' file."
)
raise typer.Exit()
return env_specs
def update_channels_after_removal(env_specs: Dict, env_name: str) -> Dict:
"""
Updates channels in the environment specifications by looking at the exisiting channels in the environment.
"""
# get list of channels
p = subprocess.run(
["conda", "list", "-n", env_name, "--json"], capture_output=True, text=True
)
# identify unique ones and update channels in env_specs
complete_dict: List[Dict] = json.loads(p.stdout)
new_channels = list(set([d["channel"] for d in complete_dict]))
new_channels.append("defaults") # 'defaults' needs to be added back?
env_specs["channels"] = new_channels
return env_specs
def recheck_dependencies(env_specs: Dict, env_name: str) -> Dict:
"""
Check if while removing a package, any dependent packages are also removed from env
but not from .yml file. If so, remove them from .yml file
"""
p = subprocess.run(
["conda", "list", "-n", env_name, "--json"], capture_output=True, text=True
)
complete_dict = json.loads(p.stdout)
all_pkgs = set([d["name"] for d in complete_dict])
deps = env_specs["dependencies"] # this may have dependencies with ">,<,=" symbols
deps_re = [re.findall(r"\w+", d)[0] for d in deps] # removing the symbols
rem_pkgs_to_be_removed_from_yml = set(deps_re) - all_pkgs
if rem_pkgs_to_be_removed_from_yml:
if "python" in rem_pkgs_to_be_removed_from_yml:
rem_pkgs_to_be_removed_from_yml.remove("python")
env_specs = remove_pkg_from_dependencies(
env_specs, rem_pkgs_to_be_removed_from_yml
)
return env_specs | ezconda/_utils.py | import json
import re
import subprocess
import yaml
import typer
from pathlib import Path
from typing import Optional, Dict, List
from textwrap import dedent
from .console import console
def create_initial_env_specs(
env_name: str, channel: Optional[str] = None, packages: Optional[List[str]] = None
) -> Dict:
"""Create initial environment specifications that will be written to 'yml' file."""
env_specs = {}
env_specs.update({"name": env_name})
if channel:
env_specs.update({"channels": [channel, "defaults"]})
else:
env_specs.update({"channels": ["defaults"]})
if packages:
env_specs.update({"dependencies": packages})
return env_specs
def get_validate_file_name(env_name: str, file: Optional[str] = None) -> Optional[str]:
"""
Looks for a '.yml' file with the `env_name` specified. If file cannot
be located, prompt will ask for the file name. If the file provided does
not exist, the program will exit.
"""
if not file:
# first look for existing yml file
if not Path(f"{env_name}.yml").is_file():
console.print(f"[yellow]Couldn't locate {env_name}.yml")
console.print(
dedent(
f"""
[yellow]If your environment name and specifications file name are not the same,
please provide the specifications file name to update using the '-f' or '--file' flag.
"""
)
)
raise typer.Exit()
else:
file = Path(f"{env_name}.yml")
# validate the file that the user provides
else:
if not Path(file).is_file():
console.print(f"[magenta]Could not locate '{file}'")
raise typer.Exit()
return file
def read_env_file(file: str) -> Dict:
"Read '.yml' file and return a dict containing specifications in the file."
with open(file, "r") as f:
env_specs = yaml.load(f, Loader=yaml.FullLoader)
return env_specs
def write_env_file(env_specs: Dict, file: str) -> None:
"Writes '.yml' file based on the specifications provided."
with open(file, "w") as f:
yaml.safe_dump(env_specs, f, sort_keys=False)
def add_pkg_to_dependencies(env_specs: Dict, pkg_name: List[str]) -> Dict:
"""
Checks if the package/s specified already exist in 'dependencies' section in 'yml' file.
If package/s already exists, informs user and exits the program.
If package/s does not exist, adds it to 'dependencies' section in 'yml' file.
"""
existing_packages = env_specs.get("dependencies")
# check if packages already exists
if existing_packages:
# create a list of existing packages without >,<,=
existing_packages_re = [re.findall(r"\w+", d)[0] for d in existing_packages]
for pkg in pkg_name:
# strip any >,<,= from the package name that the user provided
pkg_re = re.findall(r"\w+", pkg)[0]
# exit if package already exists in the env.yml file
if pkg_re in existing_packages_re:
console.print(
f"[yellow]'{pkg_re}' already exists. Skipping installation.\n"
f"[yellow]If you want to update {pkg_re}, use `update` instead."
)
raise typer.Exit()
env_specs["dependencies"] = existing_packages + list(pkg_name)
else:
env_specs["dependencies"] = list(pkg_name)
return env_specs
def add_new_channel_to_env_specs(env_specs: Dict, channel: Optional[str]) -> Dict:
"""Add new channel to the environment specifications, if it does not exist."""
# this should always return ["defaults"] atleast!
if channel:
existing_channels = list(env_specs.get("channels"))
if existing_channels and channel not in existing_channels:
existing_channels.append(channel)
env_specs["channels"] = existing_channels
return env_specs
def remove_pkg_from_dependencies(env_specs: Dict, pkg_name: List[str]) -> Dict:
"""
Checks if the package/s specified already exist in 'dependencies' section in 'yml' file.
If package/s does not exist, informs user and exits the program.
If package/s exist, remove it from 'dependencies' section in 'yml' file.
"""
existing_packages = env_specs.get("dependencies")
# check if packages exists in specifications
if existing_packages:
# create a list of existing packages without >,<,=
existing_packages_re = [re.findall(r"\w+", d)[0] for d in existing_packages]
for pkg in pkg_name:
# strip any >,<,= from the package name that the user provided
pkg_re = re.findall(r"\w+", pkg)[0]
# check if the package exists in the existing packages list
if pkg_re not in existing_packages_re:
console.print(
f"[bold red]'{pkg}' is not listed in '{env_specs['name']}.yml' file!"
)
raise typer.Exit()
# this will only run if the package was found in the existing packages list
for ext_pkg_re, ext_pkg in zip(existing_packages_re, existing_packages):
if pkg_re == ext_pkg_re:
existing_packages.remove(ext_pkg)
existing_packages_re.remove(
ext_pkg_re
) # need to remove it from this list as well otherwise zip will create an issue
env_specs["dependencies"] = existing_packages
else:
console.print(
f"[bold red]There are no packages listed in '{env_specs['name']}.yml' file."
)
raise typer.Exit()
return env_specs
def update_channels_after_removal(env_specs: Dict, env_name: str) -> Dict:
"""
Updates channels in the environment specifications by looking at the exisiting channels in the environment.
"""
# get list of channels
p = subprocess.run(
["conda", "list", "-n", env_name, "--json"], capture_output=True, text=True
)
# identify unique ones and update channels in env_specs
complete_dict: List[Dict] = json.loads(p.stdout)
new_channels = list(set([d["channel"] for d in complete_dict]))
new_channels.append("defaults") # 'defaults' needs to be added back?
env_specs["channels"] = new_channels
return env_specs
def recheck_dependencies(env_specs: Dict, env_name: str) -> Dict:
"""
Check if while removing a package, any dependent packages are also removed from env
but not from .yml file. If so, remove them from .yml file
"""
p = subprocess.run(
["conda", "list", "-n", env_name, "--json"], capture_output=True, text=True
)
complete_dict = json.loads(p.stdout)
all_pkgs = set([d["name"] for d in complete_dict])
deps = env_specs["dependencies"] # this may have dependencies with ">,<,=" symbols
deps_re = [re.findall(r"\w+", d)[0] for d in deps] # removing the symbols
rem_pkgs_to_be_removed_from_yml = set(deps_re) - all_pkgs
if rem_pkgs_to_be_removed_from_yml:
if "python" in rem_pkgs_to_be_removed_from_yml:
rem_pkgs_to_be_removed_from_yml.remove("python")
env_specs = remove_pkg_from_dependencies(
env_specs, rem_pkgs_to_be_removed_from_yml
)
return env_specs | 0.712432 | 0.162945 |
import torch
from registry import registry
from models.model_base import Model, StandardTransform, StandardNormalization
from mldb.utils import load_model_state_dict
model_params = {
'resnet18_ssl': { 'arch': 'resnet18',
'eval_batch_size': 256,
'img_crop_size': 224,
'img_resize_size': 256,
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]},
'resnet18_swsl': { 'arch': 'resnet18',
'eval_batch_size': 256,
'img_crop_size': 224,
'img_resize_size': 256,
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]},
'resnet50_ssl': { 'arch': 'resnet50',
'eval_batch_size': 256,
'img_crop_size': 224,
'img_resize_size': 256,
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]},
'resnet50_swsl': { 'arch': 'resnet50',
'eval_batch_size': 256,
'img_crop_size': 224,
'img_resize_size': 256,
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]},
'resnext50_32x4d_ssl': { 'arch': 'resnext50_32x4d',
'eval_batch_size': 256,
'img_crop_size': 224,
'img_resize_size': 256,
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]},
'resnext50_32x4d_swsl': { 'arch': 'resnext50_32x4d',
'eval_batch_size': 256,
'img_crop_size': 224,
'img_resize_size': 256,
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]},
'resnext101_32x4d_ssl': { 'arch': 'resnext101_32x4d',
'eval_batch_size': 32,
'img_crop_size': 224,
'img_resize_size': 256,
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]},
'resnext101_32x4d_swsl': { 'arch': 'resnext101_32x4d',
'eval_batch_size': 32,
'img_crop_size': 224,
'img_resize_size': 256,
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]},
'resnext101_32x8d_ssl': { 'arch': 'resnext101_32x8d',
'eval_batch_size': 16,
'img_crop_size': 224,
'img_resize_size': 256,
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]},
'resnext101_32x8d_swsl': { 'arch': 'resnext101_32x8d',
'eval_batch_size': 16,
'img_crop_size': 224,
'img_resize_size': 256,
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]},
'resnext101_32x16d_ssl': { 'arch': 'resnext101_32x16d',
'eval_batch_size': 16,
'img_crop_size': 224,
'img_resize_size': 256,
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]},
# 'resnext101_32x16d_swsl': { 'arch': 'resnext101_32x16d',
# 'eval_batch_size': 16,
# 'img_crop_size': 224,
# 'img_resize_size': 256,
# 'mean': [0.485, 0.456, 0.406],
# 'std': [0.229, 0.224, 0.225]}
}
def gen_classifier_loader(name, d):
def classifier_loader():
model = torch.hub.load('facebookresearch/semi-supervised-ImageNet1K-models', name)
load_model_state_dict(model, name)
return model
return classifier_loader
for name, d in model_params.items():
registry.add_model(
Model(
name = name,
arch = d['arch'],
transform = StandardTransform(d['img_resize_size'], d['img_crop_size']),
normalization = StandardNormalization(d['mean'], d['std']),
classifier_loader = gen_classifier_loader(name, d),
eval_batch_size = d['eval_batch_size'],
adversarial_batch_size = d['adversarial_batch_size'] if 'adversarial_batch_size' in d else None
)
) | src/models/semi_supervised_facebook.py | import torch
from registry import registry
from models.model_base import Model, StandardTransform, StandardNormalization
from mldb.utils import load_model_state_dict
model_params = {
'resnet18_ssl': { 'arch': 'resnet18',
'eval_batch_size': 256,
'img_crop_size': 224,
'img_resize_size': 256,
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]},
'resnet18_swsl': { 'arch': 'resnet18',
'eval_batch_size': 256,
'img_crop_size': 224,
'img_resize_size': 256,
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]},
'resnet50_ssl': { 'arch': 'resnet50',
'eval_batch_size': 256,
'img_crop_size': 224,
'img_resize_size': 256,
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]},
'resnet50_swsl': { 'arch': 'resnet50',
'eval_batch_size': 256,
'img_crop_size': 224,
'img_resize_size': 256,
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]},
'resnext50_32x4d_ssl': { 'arch': 'resnext50_32x4d',
'eval_batch_size': 256,
'img_crop_size': 224,
'img_resize_size': 256,
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]},
'resnext50_32x4d_swsl': { 'arch': 'resnext50_32x4d',
'eval_batch_size': 256,
'img_crop_size': 224,
'img_resize_size': 256,
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]},
'resnext101_32x4d_ssl': { 'arch': 'resnext101_32x4d',
'eval_batch_size': 32,
'img_crop_size': 224,
'img_resize_size': 256,
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]},
'resnext101_32x4d_swsl': { 'arch': 'resnext101_32x4d',
'eval_batch_size': 32,
'img_crop_size': 224,
'img_resize_size': 256,
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]},
'resnext101_32x8d_ssl': { 'arch': 'resnext101_32x8d',
'eval_batch_size': 16,
'img_crop_size': 224,
'img_resize_size': 256,
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]},
'resnext101_32x8d_swsl': { 'arch': 'resnext101_32x8d',
'eval_batch_size': 16,
'img_crop_size': 224,
'img_resize_size': 256,
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]},
'resnext101_32x16d_ssl': { 'arch': 'resnext101_32x16d',
'eval_batch_size': 16,
'img_crop_size': 224,
'img_resize_size': 256,
'mean': [0.485, 0.456, 0.406],
'std': [0.229, 0.224, 0.225]},
# 'resnext101_32x16d_swsl': { 'arch': 'resnext101_32x16d',
# 'eval_batch_size': 16,
# 'img_crop_size': 224,
# 'img_resize_size': 256,
# 'mean': [0.485, 0.456, 0.406],
# 'std': [0.229, 0.224, 0.225]}
}
def gen_classifier_loader(name, d):
def classifier_loader():
model = torch.hub.load('facebookresearch/semi-supervised-ImageNet1K-models', name)
load_model_state_dict(model, name)
return model
return classifier_loader
for name, d in model_params.items():
registry.add_model(
Model(
name = name,
arch = d['arch'],
transform = StandardTransform(d['img_resize_size'], d['img_crop_size']),
normalization = StandardNormalization(d['mean'], d['std']),
classifier_loader = gen_classifier_loader(name, d),
eval_batch_size = d['eval_batch_size'],
adversarial_batch_size = d['adversarial_batch_size'] if 'adversarial_batch_size' in d else None
)
) | 0.577376 | 0.349616 |
import b3
from b3Conditions import *
from b3Actions import *
#### HEALTH
class CheckPotionAndConsumeSEQ(b3.Sequence):
def __init__(self):
childList = [hasPotionCondition(),
ConsumePotion()]
super().__init__(childList)
class CheckBuyPotionGoBuySEQ(b3.Sequence):
def __init__(self):
childList = [canBuyPotionCondition(),
SetGoalToShop(),
WalkToGoalSEQ(),
BuyPotion(),
ConsumePotion()]
super().__init__(childList)
#### WALKING
class CheckStillnessAttackWallSEL(b3.Priority):
def __init__(self):
repeatTwiceAttack = b3.Repeater(AttackLastDirection(), 2)
childSeq = b3.Sequence([wasStillCondition(), repeatTwiceAttack])
childList = [childSeq, b3.Succeeder()]
super().__init__(childList)
class GetCheckConsumeNextMoveSEQ(b3.Priority):
def __init__(self):
childSeq = b3.Sequence([GetNextMove(),
isNextMoveValidCondition(),
ConsumeNextMove()])
childList = [childSeq, b3.Succeeder()]
super().__init__(childList)
class CheckMove(b3.Sequence):
def __init__(self):
isFarFromGoal = b3.Inverter(isNextToGoalCondition())
isGoalHomeOrNoPath = b3.Priority([isGoalHomeCondition(), isFarFromGoal])
childList = [isGoalHomeOrNoPath,
GetCheckConsumeNextMoveSEQ()]
super().__init__(childList)
class CheckPathOrCreatePath(b3.Priority):
def __init__(self):
isThereNoPath = b3.Inverter(isThereAPathCondition())
ifNoPathCreateIt = b3.Sequence([isThereNoPath,
CreatePath()])
childList = [ifNoPathCreateIt, b3.Succeeder()]
super().__init__(childList)
class WalkToGoalSEQ(b3.Sequence):
def __init__(self):
childList = [isThereAGoalCondition(),
CheckPathOrCreatePath(),
CheckMove(),
RemoveGoal()]
self.id = 1
super().__init__(childList)
#### MINING
class MineTillFullOrNoLoot(b3.Priority):
def __init__(self):
isNotFull = b3.Inverter(isFullCondition())
childSequence = b3.Sequence([isNotFull,
isGoalLootCondition(),
isNextToGoalCondition(),
MineGoal()])
childList = [childSequence, b3.Succeeder()]
super().__init__(childList)
class CheckIfNearLootOrWalkThere(b3.Priority):
def __init__(self):
isFarFromGoal = b3.Inverter(isNextToGoalCondition())
childSequence = b3.Sequence([isFarFromGoal,
WalkToGoalSEQ(),
SetGoalToRessource()])
childList = [childSequence, b3.Succeeder()]
super().__init__(childList)
class CheckIfGoalOrSetRessource(b3.Priority):
def __init__(self):
isThereNoGoal = b3.Inverter(isThereAGoalCondition())
childSequence = b3.Sequence([isThereNoGoal,
SetGoalToRessource()])
childList = [childSequence, b3.Succeeder()]
super().__init__(childList)
class GoMineSEQ(b3.Sequence):
def __init__(self):
childList = [CheckIfGoalOrSetRessource(),
CheckIfNearLootOrWalkThere(),
MineTillFullOrNoLoot()]
super().__init__(childList) | b3SubTrees.py | import b3
from b3Conditions import *
from b3Actions import *
#### HEALTH
class CheckPotionAndConsumeSEQ(b3.Sequence):
def __init__(self):
childList = [hasPotionCondition(),
ConsumePotion()]
super().__init__(childList)
class CheckBuyPotionGoBuySEQ(b3.Sequence):
def __init__(self):
childList = [canBuyPotionCondition(),
SetGoalToShop(),
WalkToGoalSEQ(),
BuyPotion(),
ConsumePotion()]
super().__init__(childList)
#### WALKING
class CheckStillnessAttackWallSEL(b3.Priority):
def __init__(self):
repeatTwiceAttack = b3.Repeater(AttackLastDirection(), 2)
childSeq = b3.Sequence([wasStillCondition(), repeatTwiceAttack])
childList = [childSeq, b3.Succeeder()]
super().__init__(childList)
class GetCheckConsumeNextMoveSEQ(b3.Priority):
def __init__(self):
childSeq = b3.Sequence([GetNextMove(),
isNextMoveValidCondition(),
ConsumeNextMove()])
childList = [childSeq, b3.Succeeder()]
super().__init__(childList)
class CheckMove(b3.Sequence):
def __init__(self):
isFarFromGoal = b3.Inverter(isNextToGoalCondition())
isGoalHomeOrNoPath = b3.Priority([isGoalHomeCondition(), isFarFromGoal])
childList = [isGoalHomeOrNoPath,
GetCheckConsumeNextMoveSEQ()]
super().__init__(childList)
class CheckPathOrCreatePath(b3.Priority):
def __init__(self):
isThereNoPath = b3.Inverter(isThereAPathCondition())
ifNoPathCreateIt = b3.Sequence([isThereNoPath,
CreatePath()])
childList = [ifNoPathCreateIt, b3.Succeeder()]
super().__init__(childList)
class WalkToGoalSEQ(b3.Sequence):
def __init__(self):
childList = [isThereAGoalCondition(),
CheckPathOrCreatePath(),
CheckMove(),
RemoveGoal()]
self.id = 1
super().__init__(childList)
#### MINING
class MineTillFullOrNoLoot(b3.Priority):
def __init__(self):
isNotFull = b3.Inverter(isFullCondition())
childSequence = b3.Sequence([isNotFull,
isGoalLootCondition(),
isNextToGoalCondition(),
MineGoal()])
childList = [childSequence, b3.Succeeder()]
super().__init__(childList)
class CheckIfNearLootOrWalkThere(b3.Priority):
def __init__(self):
isFarFromGoal = b3.Inverter(isNextToGoalCondition())
childSequence = b3.Sequence([isFarFromGoal,
WalkToGoalSEQ(),
SetGoalToRessource()])
childList = [childSequence, b3.Succeeder()]
super().__init__(childList)
class CheckIfGoalOrSetRessource(b3.Priority):
def __init__(self):
isThereNoGoal = b3.Inverter(isThereAGoalCondition())
childSequence = b3.Sequence([isThereNoGoal,
SetGoalToRessource()])
childList = [childSequence, b3.Succeeder()]
super().__init__(childList)
class GoMineSEQ(b3.Sequence):
def __init__(self):
childList = [CheckIfGoalOrSetRessource(),
CheckIfNearLootOrWalkThere(),
MineTillFullOrNoLoot()]
super().__init__(childList) | 0.423816 | 0.234615 |
from preprocess import *
import os
def test_find_non_unique_ids_0():
"""
Tests the null case, i.e. no repeated ids.
"""
data = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
repeated, positions = find_non_unique_ids(data)
assert repeated.shape == (0,)
assert positions.shape == (0,)
return
def test_find_non_unique_ids_1():
"""
Tests the null reversed case, i.e. no repeated ids.
"""
data = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])[::-1]
repeated, positions = find_non_unique_ids(data)
assert repeated.shape == (0,)
assert positions.shape == (0,)
return
def test_find_non_unique_ids_2():
"""
Tests the perfect case where everything is repeated.
"""
data = np.array([1234132, 1234132, 1234132, 1234132, 1234132])
repeated, positions = find_non_unique_ids(data)
assert (repeated == np.array([1234132, 1234132, 1234132, 1234132])).all()
assert (positions == np.array([1, 2, 3, 4])).all()
return
def test_find_non_unique_ids_3():
"""
Tests a more realistic case.
"""
data = np.array([7, 5, 3, 2, 4, 5, 6, 2, 1, 7])
repeated, positions = find_non_unique_ids(data)
assert (repeated == np.array([5, 2, 7])).all()
assert (positions == np.array([5, 7, 9])).all()
return
def test_generate_new_ids_0():
"""
Tests the generation of new IDs.
"""
data = np.arange(10)
n_required = 10
expected_output = np.arange(10, 20, 1)
assert (expected_output == generate_new_ids(data, n_required)).all()
return
def test_find_and_replace_0():
"""
Tests a more realistic case.
"""
data = np.array([7, 5, 3, 2, 4, 5, 6, 2, 1, 7])
new_ids, oldpos, newpos = find_and_replace_non_unique_ids(data)
expected_new_ids = np.array([7, 5, 3, 2, 4, 8, 6, 9, 1, 10])
expected_oldpos = {5: 5, 7: 2, 9: 7}
expected_newpos = {5: 8, 7: 9, 9: 10}
assert (new_ids == expected_new_ids).all()
assert expected_oldpos == oldpos
assert expected_newpos == newpos
return
def test_write_data_0():
"""
Tests the writing of the data.
"""
newpos = {4: 7, 5: 8, 6: 9}
oldpos = {4: 1, 5: 2, 6: 3}
write_data("test.yml", oldpos, newpos)
# Let's load it back in
with open("test.yml", "r") as f:
data = yaml.load(f)
# Delete our friendly neighbourhood test file
os.remove("test.yml")
expected_data = {"old_positions": oldpos, "new_positions": newpos}
assert expected_data == data
def test_combine_and_split_0():
"""
Tests the combine_array and split_array functions.
"""
data_in = [np.arange(100), np.arange(10), np.zeros(1000)]
data_out = split_arrays(*combine_arrays(data_in))
for d_in, d_out in zip(data_in, data_out):
assert (d_in == d_out).all()
return | tests/test_preprocess.py | from preprocess import *
import os
def test_find_non_unique_ids_0():
"""
Tests the null case, i.e. no repeated ids.
"""
data = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
repeated, positions = find_non_unique_ids(data)
assert repeated.shape == (0,)
assert positions.shape == (0,)
return
def test_find_non_unique_ids_1():
"""
Tests the null reversed case, i.e. no repeated ids.
"""
data = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])[::-1]
repeated, positions = find_non_unique_ids(data)
assert repeated.shape == (0,)
assert positions.shape == (0,)
return
def test_find_non_unique_ids_2():
"""
Tests the perfect case where everything is repeated.
"""
data = np.array([1234132, 1234132, 1234132, 1234132, 1234132])
repeated, positions = find_non_unique_ids(data)
assert (repeated == np.array([1234132, 1234132, 1234132, 1234132])).all()
assert (positions == np.array([1, 2, 3, 4])).all()
return
def test_find_non_unique_ids_3():
"""
Tests a more realistic case.
"""
data = np.array([7, 5, 3, 2, 4, 5, 6, 2, 1, 7])
repeated, positions = find_non_unique_ids(data)
assert (repeated == np.array([5, 2, 7])).all()
assert (positions == np.array([5, 7, 9])).all()
return
def test_generate_new_ids_0():
"""
Tests the generation of new IDs.
"""
data = np.arange(10)
n_required = 10
expected_output = np.arange(10, 20, 1)
assert (expected_output == generate_new_ids(data, n_required)).all()
return
def test_find_and_replace_0():
"""
Tests a more realistic case.
"""
data = np.array([7, 5, 3, 2, 4, 5, 6, 2, 1, 7])
new_ids, oldpos, newpos = find_and_replace_non_unique_ids(data)
expected_new_ids = np.array([7, 5, 3, 2, 4, 8, 6, 9, 1, 10])
expected_oldpos = {5: 5, 7: 2, 9: 7}
expected_newpos = {5: 8, 7: 9, 9: 10}
assert (new_ids == expected_new_ids).all()
assert expected_oldpos == oldpos
assert expected_newpos == newpos
return
def test_write_data_0():
"""
Tests the writing of the data.
"""
newpos = {4: 7, 5: 8, 6: 9}
oldpos = {4: 1, 5: 2, 6: 3}
write_data("test.yml", oldpos, newpos)
# Let's load it back in
with open("test.yml", "r") as f:
data = yaml.load(f)
# Delete our friendly neighbourhood test file
os.remove("test.yml")
expected_data = {"old_positions": oldpos, "new_positions": newpos}
assert expected_data == data
def test_combine_and_split_0():
"""
Tests the combine_array and split_array functions.
"""
data_in = [np.arange(100), np.arange(10), np.zeros(1000)]
data_out = split_arrays(*combine_arrays(data_in))
for d_in, d_out in zip(data_in, data_out):
assert (d_in == d_out).all()
return | 0.719975 | 0.762247 |
import pickle
from keras.layers import Input, LSTM, Embedding, Dense,Dropout,TimeDistributed
from keras.models import Model
from sklearn.model_selection import train_test_split
from model_file import build
class DumbModel:
def __init__(self,vocab_size=10000,num_of_encoder_tokens,num_of_decoder_tokens):
self.vocab_size = vocab_size
self.clf=None
self.num_of_encoder_tokens = num_of_encoder_tokens
self.num_of_decoder_tokens = num_of_decoder_tokens
def generate_batch(X = X_train, y = y_train, batch_size = 128):
while True:
for j in range(0, len(X), batch_size):
#encoder input
encoder_input_data = np.zeros((batch_size, max_source_length),dtype='float32')
#decoder input
decoder_input_data = np.zeros((batch_size, max_target_size),dtype='float32')
#target
decoder_target_data = np.zeros((batch_size, max_target_size, num_of_decoder_tokens),dtype='float32')
for i, (input_text, target_text) in enumerate(zip(X[j:j+batch_size], y[j:j+batch_size])):
for t, word in enumerate(input_text.split()):
encoder_input_data[i, t] = eng_char_to_index_dict[word] # encoder input seq
for t, word in enumerate(target_text.split()):
if t<len(target_text.split())-1:
decoder_input_data[i, t] = target_char_to_index_dict[word] # decoder input seq
if t>0:
# decoder target sequence (one hot encoded)
# does not include the START_ token
# Offset by one timestep since it is one time stamp ahead
decoder_target_data[i, t - 1, target_char_to_index_dict[word]] = 1
yield([encoder_input_data, decoder_input_data], decoder_target_data)
def train(self,X_train,y_train):
model = build(num_of_encoder_tokens,num_of_decoder_tokens)
X_train, X_test, y_train, y_test = train_test_split(X_train,y_train, test_size = 0.2)
train_samples = len(X_train)
val_samples = len(X_test)
batch_size = 50
epochs = 50
model.fit_generator(generator = generate_batch(X_train, y_train, batch_size = batch_size ),
steps_per_epoch = train_samples//batch_size,
epochs=epochs,
callbacks=[es],
validation_data = generate_batch(X_test, y_test, batch_size = batch_size),
validation_steps = val_samples//batch_size,)
pass
def inference(self):
# Inference model
# Encoder
encoder_inputs = Input(shape=(None,))
enc_emb = Embedding(num_of_encoder_tokens, latent_dim, mask_zero = True)(encoder_inputs)
encoder_lstm = LSTM(latent_dim, return_state=True)
encoder_outputs, state_h, state_c = encoder_lstm(enc_emb)
# We discard `encoder_outputs` and only keep the states.
encoder_states = [state_h, state_c]
# Set up the decoder, using `encoder_states` as initial state.
decoder_inputs = Input(shape=(None,))
dec_emb_layer = Embedding(num_of_decoder_tokens, latent_dim, mask_zero = True)
dec_emb = dec_emb_layer(decoder_inputs)
decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(dec_emb,
initial_state=encoder_states)
decoder_dense = TimeDistributed(Dense(num_of_decoder_tokens, activation='softmax'))
decoder_outputs = decoder_dense(decoder_outputs)
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
#storing encoder input and internal states so as to give to decoder part
encoder_model = Model(encoder_inputs, encoder_states)
#specifying hidden and cell state for decoder part as vector process it will get output predicted and again we add to decoder states
decoder_state_input_h = Input(shape=(latent_dim,))
decoder_state_input_c = Input(shape=(latent_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
dec_emb2= dec_emb_layer(decoder_inputs) # Get the embeddings of the decoder sequence
# To predict the next word in the sequence, set the initial states to the states from the previous time step
decoder_outputs2, state_h2, state_c2 = decoder_lstm(dec_emb2, initial_state=decoder_states_inputs)
decoder_states2 = [state_h2, state_c2]
decoder_outputs2 = decoder_dense(decoder_outputs2) # A dense softmax layer to generate prob dist. over the target vocabulary
# Final decoder model
decoder_model = Model(
[decoder_inputs] + decoder_states_inputs,
[decoder_outputs2] + decoder_states2)
return encoder_model,decoder_model
def decode_sequence(self,input_seq):
# Encode the input as state vectors
encoder_model,decoder_model= inference()
states_value = encoder_model.predict(input_seq)
# Generate empty target sequence of length 1.
target_seq = np.zeros((1,1))
target_seq[0, 0] = mar_char_to_index_dict['START_']
stop_condition = False
decoded_sentence = ''
while not stop_condition:
output_tokens, h, c = decoder_model.predict([target_seq] + states_value)
# Sample a token
sampled_token_index = np.argmax(output_tokens[0, -1, :])
sampled_char = mar_index_to_char_dict[sampled_token_index]
if (sampled_char == '_END'):
break;
decoded_sentence += ' '+sampled_char
target_seq = np.zeros((1,1))
target_seq[0, 0] = sampled_token_index
# Update states
states_value = [h, c]
return decoded_sentence
def pre_process(self):
sentence = sentence.lower()
sentance = re.sub("'","",sentence).strip()
# sentence = re.sub(" +", " ", sentence)
# remove_digits = str.maketrans('','',digits)
# sentence=sentence.translate(remove_digits)
sentance = ' '.join(e.lower() for e in sentance.split() if e.lower() not in exclude)
encoder_input_data = np.zeros((1, 35),dtype='float32')
for t, word in enumerate(sentence.split()):
encoder_input_data[0, t] = eng_char_to_index_dict[word]
return encoder_input_data
def predict(self,x):
sent = pre_processing(x)
predicted_output = decode_sequence(sent)
return predicted_output
def serialize(self,fname):
with open(fname,'wb') as f:
pickle.dump(self.clf,f)
@staticmethod
def deserialize(fname):
model = DumbModel()
with open(fname,'rb') as f:
model.clf=pickle.load(f)
return model | run/model.py | import pickle
from keras.layers import Input, LSTM, Embedding, Dense,Dropout,TimeDistributed
from keras.models import Model
from sklearn.model_selection import train_test_split
from model_file import build
class DumbModel:
def __init__(self,vocab_size=10000,num_of_encoder_tokens,num_of_decoder_tokens):
self.vocab_size = vocab_size
self.clf=None
self.num_of_encoder_tokens = num_of_encoder_tokens
self.num_of_decoder_tokens = num_of_decoder_tokens
def generate_batch(X = X_train, y = y_train, batch_size = 128):
while True:
for j in range(0, len(X), batch_size):
#encoder input
encoder_input_data = np.zeros((batch_size, max_source_length),dtype='float32')
#decoder input
decoder_input_data = np.zeros((batch_size, max_target_size),dtype='float32')
#target
decoder_target_data = np.zeros((batch_size, max_target_size, num_of_decoder_tokens),dtype='float32')
for i, (input_text, target_text) in enumerate(zip(X[j:j+batch_size], y[j:j+batch_size])):
for t, word in enumerate(input_text.split()):
encoder_input_data[i, t] = eng_char_to_index_dict[word] # encoder input seq
for t, word in enumerate(target_text.split()):
if t<len(target_text.split())-1:
decoder_input_data[i, t] = target_char_to_index_dict[word] # decoder input seq
if t>0:
# decoder target sequence (one hot encoded)
# does not include the START_ token
# Offset by one timestep since it is one time stamp ahead
decoder_target_data[i, t - 1, target_char_to_index_dict[word]] = 1
yield([encoder_input_data, decoder_input_data], decoder_target_data)
def train(self,X_train,y_train):
model = build(num_of_encoder_tokens,num_of_decoder_tokens)
X_train, X_test, y_train, y_test = train_test_split(X_train,y_train, test_size = 0.2)
train_samples = len(X_train)
val_samples = len(X_test)
batch_size = 50
epochs = 50
model.fit_generator(generator = generate_batch(X_train, y_train, batch_size = batch_size ),
steps_per_epoch = train_samples//batch_size,
epochs=epochs,
callbacks=[es],
validation_data = generate_batch(X_test, y_test, batch_size = batch_size),
validation_steps = val_samples//batch_size,)
pass
def inference(self):
# Inference model
# Encoder
encoder_inputs = Input(shape=(None,))
enc_emb = Embedding(num_of_encoder_tokens, latent_dim, mask_zero = True)(encoder_inputs)
encoder_lstm = LSTM(latent_dim, return_state=True)
encoder_outputs, state_h, state_c = encoder_lstm(enc_emb)
# We discard `encoder_outputs` and only keep the states.
encoder_states = [state_h, state_c]
# Set up the decoder, using `encoder_states` as initial state.
decoder_inputs = Input(shape=(None,))
dec_emb_layer = Embedding(num_of_decoder_tokens, latent_dim, mask_zero = True)
dec_emb = dec_emb_layer(decoder_inputs)
decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(dec_emb,
initial_state=encoder_states)
decoder_dense = TimeDistributed(Dense(num_of_decoder_tokens, activation='softmax'))
decoder_outputs = decoder_dense(decoder_outputs)
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
#storing encoder input and internal states so as to give to decoder part
encoder_model = Model(encoder_inputs, encoder_states)
#specifying hidden and cell state for decoder part as vector process it will get output predicted and again we add to decoder states
decoder_state_input_h = Input(shape=(latent_dim,))
decoder_state_input_c = Input(shape=(latent_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
dec_emb2= dec_emb_layer(decoder_inputs) # Get the embeddings of the decoder sequence
# To predict the next word in the sequence, set the initial states to the states from the previous time step
decoder_outputs2, state_h2, state_c2 = decoder_lstm(dec_emb2, initial_state=decoder_states_inputs)
decoder_states2 = [state_h2, state_c2]
decoder_outputs2 = decoder_dense(decoder_outputs2) # A dense softmax layer to generate prob dist. over the target vocabulary
# Final decoder model
decoder_model = Model(
[decoder_inputs] + decoder_states_inputs,
[decoder_outputs2] + decoder_states2)
return encoder_model,decoder_model
def decode_sequence(self,input_seq):
# Encode the input as state vectors
encoder_model,decoder_model= inference()
states_value = encoder_model.predict(input_seq)
# Generate empty target sequence of length 1.
target_seq = np.zeros((1,1))
target_seq[0, 0] = mar_char_to_index_dict['START_']
stop_condition = False
decoded_sentence = ''
while not stop_condition:
output_tokens, h, c = decoder_model.predict([target_seq] + states_value)
# Sample a token
sampled_token_index = np.argmax(output_tokens[0, -1, :])
sampled_char = mar_index_to_char_dict[sampled_token_index]
if (sampled_char == '_END'):
break;
decoded_sentence += ' '+sampled_char
target_seq = np.zeros((1,1))
target_seq[0, 0] = sampled_token_index
# Update states
states_value = [h, c]
return decoded_sentence
def pre_process(self):
sentence = sentence.lower()
sentance = re.sub("'","",sentence).strip()
# sentence = re.sub(" +", " ", sentence)
# remove_digits = str.maketrans('','',digits)
# sentence=sentence.translate(remove_digits)
sentance = ' '.join(e.lower() for e in sentance.split() if e.lower() not in exclude)
encoder_input_data = np.zeros((1, 35),dtype='float32')
for t, word in enumerate(sentence.split()):
encoder_input_data[0, t] = eng_char_to_index_dict[word]
return encoder_input_data
def predict(self,x):
sent = pre_processing(x)
predicted_output = decode_sequence(sent)
return predicted_output
def serialize(self,fname):
with open(fname,'wb') as f:
pickle.dump(self.clf,f)
@staticmethod
def deserialize(fname):
model = DumbModel()
with open(fname,'rb') as f:
model.clf=pickle.load(f)
return model | 0.795062 | 0.450178 |
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Q
from django.core.exceptions import MultipleObjectsReturned
from mighty import functions
from mighty.apps import MightyConfig as conf
from mighty.applications.logger import EnableLogger
import datetime, sys, logging, csv, os.path
logger = logging.getLogger(__name__)
class BaseCommand(BaseCommand, EnableLogger):
help = 'Command Base override by Mighty'
position = 0
prefix_bar = 'Percent'
current_info = ''
errors = []
in_test = False
def get_total(self):
return self.total if self.total else 0
def set_position(self, pos=1):
self.position+=pos
def get_current_info(self):
return self.current_info
def progress_bar(self, bar_length=20):
if self.verbosity > 0:
percent = self.position / self.get_total()
if self.progressbar:
arrow = '-' * int(round(percent * bar_length)-1) + '>'
spaces = ' ' * (bar_length - len(arrow))
sys.stdout.write("\r{0}: [{1}] {2}% ({3}/{4}) {5}".format(
self.prefix_bar,
arrow + spaces,
int(round(percent * 100)),
self.position,
self.get_total(),
self.get_current_info(),
)
)
sys.stdout.flush()
else:
sys.stdout.write("\r{0}: {1}% ({2}/{3}) {4}".format(
self.prefix_bar,
int(round(percent * 100)),
self.position,
self.get_total(),
self.get_current_info())
)
print()
if self.position == self.get_total(): print()
def create_parser(self, prog_name, subcommand, **kwargs):
self.subcommand = subcommand
return super().create_parser(prog_name, subcommand)
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument('--test', action="store_true")
parser.add_argument('--total', default=0)
parser.add_argument('--encoding', default='utf8')
parser.add_argument('--logfile', default="%s_%s.log" % (str(self.subcommand).lower(), f"{datetime.datetime.now():%Y%m%d_%H%M%S_%f}"))
parser.add_argument('--progressbar', action="store_true")
def handle(self, *args, **options):
self.in_test = options.get('test')
self.encoding = options.get('encoding')
self.logfile = options.get('logfile')
self.progressbar = options.get('progressbar')
self.verbosity = options.get('verbosity', 0)
logger.debug('start')
self.makeJob()
self.showErrors()
logger.debug('end')
def makeJob(self):
self.before_job()
self.do()
self.after_job()
def before_job(self): pass
def after_job(self): pass
def showErrors(self):
for error in self.errors:
print(error)
logger.info(error)
def do(self):
raise NotImplementedError("Command should implement method do(self)")
class ModelBaseCommand(BaseCommand):
help = 'Commande Model Base'
manager = 'objects'
label = None
model = None
filter = None
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument('--create', action="store_true")
parser.add_argument('--label', default=None)
parser.add_argument('--model', default=None)
parser.add_argument('--filter', default=None)
parser.add_argument('--manager', default='objects')
parser.add_argument('--search', action="store_true")
def handle(self, *args, **options):
self.create = options.get('create')
self.label = options.get('label', self.label)
self.model = options.get('model', self.model)
self.manager = options.get('manager', self.manager)
self.filter = options.get('filter')
self.search = options.get('search')
super().handle(*args, **options)
@property
def model_use(self, *args, **kwargs):
label = kwargs.get('label', self.label)
model = kwargs.get('model', self.model)
return functions.get_model(label, model)
def get_queryset(self, *args, **kwargs):
manager = kwargs.get('manager', self.manager)
model = self.model_use
return getattr(model, manager).filter(**dict(x.split(',') for x in self.filter.split(';')) if self.filter else {})
def do(self):
self.each_objects()
def each_objects(self):
qs = self.get_queryset()
self.total = len(qs)
for obj in qs:
self.current_object = obj
self.set_position()
self.progress_bar()
self.on_object(obj)
def on_object(self, object):
raise NotImplementedError("Command should implement method on_object(self, obj)")
class CSVModelCommand(ModelBaseCommand):
column_for_current = None
def add_arguments(self, parser):
parser.add_argument('--csv')
parser.add_argument('--delimiter', default=',')
parser.add_argument('--quotechar', default='"')
parser.add_argument('--quoting', default=csv.QUOTE_ALL)
super().add_arguments(parser)
def handle(self, *args, **options):
self.csvfile = options.get('csv')
self.delimiter = options.get('delimiter')
self.quotechar = options.get('quotechar')
self.quoting = options.get('quoting')
if not os.path.isfile(self.csvfile):
raise CommandError('CSV "%s" does not exist' % self.csv)
super().handle(*args, **options)
def prepare_fields(self, fields):
if hasattr(self, 'fields'):
ofields = self.fields
rfields = {value: key for key, value in self.fields.items()}
self.fields = {}
self.reverse = {}
for field in fields:
self.fields[field] = ofields[field] if field in ofields else field
if field in rfields:
self.reverse[rfields[field]] = field
else:
self.reverse[field] = field
self.fields = {field: field for field in fields}
else:
self.fields = self.reverse = {field: field for field in fields}
def do(self):
self.total = len(open(self.csvfile).readlines())-1
with open(self.csvfile, encoding=self.encoding) as csvfile:
reader = csv.DictReader(csvfile, delimiter=self.delimiter)
self.prepare_fields(reader.fieldnames)
for row in reader:
self.set_position()
if self.column_for_current:
self.current_info = row[self.reverse['extension']]
self.progress_bar()
self.on_row(row)
def on_row(self, row):
raise NotImplementedError("Command should implement method on_object(self, obj)") | management/__init__.py | from django.core.management.base import BaseCommand, CommandError
from django.db.models import Q
from django.core.exceptions import MultipleObjectsReturned
from mighty import functions
from mighty.apps import MightyConfig as conf
from mighty.applications.logger import EnableLogger
import datetime, sys, logging, csv, os.path
logger = logging.getLogger(__name__)
class BaseCommand(BaseCommand, EnableLogger):
help = 'Command Base override by Mighty'
position = 0
prefix_bar = 'Percent'
current_info = ''
errors = []
in_test = False
def get_total(self):
return self.total if self.total else 0
def set_position(self, pos=1):
self.position+=pos
def get_current_info(self):
return self.current_info
def progress_bar(self, bar_length=20):
if self.verbosity > 0:
percent = self.position / self.get_total()
if self.progressbar:
arrow = '-' * int(round(percent * bar_length)-1) + '>'
spaces = ' ' * (bar_length - len(arrow))
sys.stdout.write("\r{0}: [{1}] {2}% ({3}/{4}) {5}".format(
self.prefix_bar,
arrow + spaces,
int(round(percent * 100)),
self.position,
self.get_total(),
self.get_current_info(),
)
)
sys.stdout.flush()
else:
sys.stdout.write("\r{0}: {1}% ({2}/{3}) {4}".format(
self.prefix_bar,
int(round(percent * 100)),
self.position,
self.get_total(),
self.get_current_info())
)
print()
if self.position == self.get_total(): print()
def create_parser(self, prog_name, subcommand, **kwargs):
self.subcommand = subcommand
return super().create_parser(prog_name, subcommand)
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument('--test', action="store_true")
parser.add_argument('--total', default=0)
parser.add_argument('--encoding', default='utf8')
parser.add_argument('--logfile', default="%s_%s.log" % (str(self.subcommand).lower(), f"{datetime.datetime.now():%Y%m%d_%H%M%S_%f}"))
parser.add_argument('--progressbar', action="store_true")
def handle(self, *args, **options):
self.in_test = options.get('test')
self.encoding = options.get('encoding')
self.logfile = options.get('logfile')
self.progressbar = options.get('progressbar')
self.verbosity = options.get('verbosity', 0)
logger.debug('start')
self.makeJob()
self.showErrors()
logger.debug('end')
def makeJob(self):
self.before_job()
self.do()
self.after_job()
def before_job(self): pass
def after_job(self): pass
def showErrors(self):
for error in self.errors:
print(error)
logger.info(error)
def do(self):
raise NotImplementedError("Command should implement method do(self)")
class ModelBaseCommand(BaseCommand):
help = 'Commande Model Base'
manager = 'objects'
label = None
model = None
filter = None
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument('--create', action="store_true")
parser.add_argument('--label', default=None)
parser.add_argument('--model', default=None)
parser.add_argument('--filter', default=None)
parser.add_argument('--manager', default='objects')
parser.add_argument('--search', action="store_true")
def handle(self, *args, **options):
self.create = options.get('create')
self.label = options.get('label', self.label)
self.model = options.get('model', self.model)
self.manager = options.get('manager', self.manager)
self.filter = options.get('filter')
self.search = options.get('search')
super().handle(*args, **options)
@property
def model_use(self, *args, **kwargs):
label = kwargs.get('label', self.label)
model = kwargs.get('model', self.model)
return functions.get_model(label, model)
def get_queryset(self, *args, **kwargs):
manager = kwargs.get('manager', self.manager)
model = self.model_use
return getattr(model, manager).filter(**dict(x.split(',') for x in self.filter.split(';')) if self.filter else {})
def do(self):
self.each_objects()
def each_objects(self):
qs = self.get_queryset()
self.total = len(qs)
for obj in qs:
self.current_object = obj
self.set_position()
self.progress_bar()
self.on_object(obj)
def on_object(self, object):
raise NotImplementedError("Command should implement method on_object(self, obj)")
class CSVModelCommand(ModelBaseCommand):
column_for_current = None
def add_arguments(self, parser):
parser.add_argument('--csv')
parser.add_argument('--delimiter', default=',')
parser.add_argument('--quotechar', default='"')
parser.add_argument('--quoting', default=csv.QUOTE_ALL)
super().add_arguments(parser)
def handle(self, *args, **options):
self.csvfile = options.get('csv')
self.delimiter = options.get('delimiter')
self.quotechar = options.get('quotechar')
self.quoting = options.get('quoting')
if not os.path.isfile(self.csvfile):
raise CommandError('CSV "%s" does not exist' % self.csv)
super().handle(*args, **options)
def prepare_fields(self, fields):
if hasattr(self, 'fields'):
ofields = self.fields
rfields = {value: key for key, value in self.fields.items()}
self.fields = {}
self.reverse = {}
for field in fields:
self.fields[field] = ofields[field] if field in ofields else field
if field in rfields:
self.reverse[rfields[field]] = field
else:
self.reverse[field] = field
self.fields = {field: field for field in fields}
else:
self.fields = self.reverse = {field: field for field in fields}
def do(self):
self.total = len(open(self.csvfile).readlines())-1
with open(self.csvfile, encoding=self.encoding) as csvfile:
reader = csv.DictReader(csvfile, delimiter=self.delimiter)
self.prepare_fields(reader.fieldnames)
for row in reader:
self.set_position()
if self.column_for_current:
self.current_info = row[self.reverse['extension']]
self.progress_bar()
self.on_row(row)
def on_row(self, row):
raise NotImplementedError("Command should implement method on_object(self, obj)") | 0.373647 | 0.065545 |
import os
import re
from first import first
from paramiko.config import SSHConfig
import pyeapi
__all__ = ['Device']
_if_shorten_find_patterns = [
r'Ethernet(?P<E2>\d+)/1',
r'Ethernet(?P<E1>\d+)',
r'Management(?P<M1>\d+)',
r'Port-Channel(?P<PO>\d+)',
r'Vlan(?P<V>\d+)'
]
_if_shorten_replace_patterns = {
'E1': 'E{}',
'E2': 'E{}',
'M1': 'M{}',
'PO': 'Po{}',
'V': 'V{}'
}
_if_shorten_regex = re.compile('|'.join('(%s)' % r for r in _if_shorten_find_patterns))
def _if_shorten_replace_func(mo):
r_name, r_val = first(filter(lambda i: i[1], mo.groupdict().items()))
return _if_shorten_replace_patterns[r_name].format(r_val)
def sorted_interfaces(if_list):
match_numbers = re.compile(r"\d+", re.M)
return sorted(if_list, key=lambda i: tuple(map(int, match_numbers.findall(i))))
class Device(object):
"""
An Arista EOS Device class that provides access via the eAPI.
"""
DEFAULT_TRANSPORT = 'https'
def __init__(self, hostname, username=None, password=None,
transport=None, port=None,
ssh_config_file=None):
self.hostname = hostname
c_args = dict()
c_args['username'] = os.getenv('EOS_USER') or os.getenv('USER') or username
c_args['password'] = os.getenv('EOS_PASSWORD') or os.getenv('PASSWORD') or password
if port:
c_args['port'] = port
ssh_config_file = ssh_config_file or os.getenv('EOS_SSH_CONFIG')
if ssh_config_file:
ssh_config = SSHConfig()
ssh_config.parse(open(ssh_config_file))
found = ssh_config.lookup(hostname)
if 'user' in found:
c_args['username'] = found['user']
if 'hostname' in found:
c_args['host'] = found['hostname']
if 'localforward' in found:
port = int(first(found['localforward']).split()[0])
c_args['port'] = port
c_args['host'] = 'localhost'
else:
c_args['host'] = hostname
c_args['transport'] = transport or self.DEFAULT_TRANSPORT
self.api = pyeapi.connect(**c_args)
def probe(self, timeout=5):
_orig_to = self.api.transport.timeout
self.api.transport.timeout = timeout
try:
self.api.transport.connect()
ok = True
except Exception:
ok = False
finally:
self.api.transport.timeout = _orig_to
return ok
def execute(self, command, encoding='json'):
"""
Execute an operational command, "show version" for example.
Parameters
----------
command : str - command to execute
encoding : str
The return format encoding, defaults to 'json'.
Returns
-------
dict - results of the command
"""
res = self.api.execute(['enable', command], encoding=encoding)
return res['result'][1]
@staticmethod
def shorten_if_name(if_name):
return _if_shorten_regex.sub(_if_shorten_replace_func, if_name) | nrfupytesteos/eos_device.py |
import os
import re
from first import first
from paramiko.config import SSHConfig
import pyeapi
__all__ = ['Device']
_if_shorten_find_patterns = [
r'Ethernet(?P<E2>\d+)/1',
r'Ethernet(?P<E1>\d+)',
r'Management(?P<M1>\d+)',
r'Port-Channel(?P<PO>\d+)',
r'Vlan(?P<V>\d+)'
]
_if_shorten_replace_patterns = {
'E1': 'E{}',
'E2': 'E{}',
'M1': 'M{}',
'PO': 'Po{}',
'V': 'V{}'
}
_if_shorten_regex = re.compile('|'.join('(%s)' % r for r in _if_shorten_find_patterns))
def _if_shorten_replace_func(mo):
r_name, r_val = first(filter(lambda i: i[1], mo.groupdict().items()))
return _if_shorten_replace_patterns[r_name].format(r_val)
def sorted_interfaces(if_list):
match_numbers = re.compile(r"\d+", re.M)
return sorted(if_list, key=lambda i: tuple(map(int, match_numbers.findall(i))))
class Device(object):
"""
An Arista EOS Device class that provides access via the eAPI.
"""
DEFAULT_TRANSPORT = 'https'
def __init__(self, hostname, username=None, password=None,
transport=None, port=None,
ssh_config_file=None):
self.hostname = hostname
c_args = dict()
c_args['username'] = os.getenv('EOS_USER') or os.getenv('USER') or username
c_args['password'] = os.getenv('EOS_PASSWORD') or os.getenv('PASSWORD') or password
if port:
c_args['port'] = port
ssh_config_file = ssh_config_file or os.getenv('EOS_SSH_CONFIG')
if ssh_config_file:
ssh_config = SSHConfig()
ssh_config.parse(open(ssh_config_file))
found = ssh_config.lookup(hostname)
if 'user' in found:
c_args['username'] = found['user']
if 'hostname' in found:
c_args['host'] = found['hostname']
if 'localforward' in found:
port = int(first(found['localforward']).split()[0])
c_args['port'] = port
c_args['host'] = 'localhost'
else:
c_args['host'] = hostname
c_args['transport'] = transport or self.DEFAULT_TRANSPORT
self.api = pyeapi.connect(**c_args)
def probe(self, timeout=5):
_orig_to = self.api.transport.timeout
self.api.transport.timeout = timeout
try:
self.api.transport.connect()
ok = True
except Exception:
ok = False
finally:
self.api.transport.timeout = _orig_to
return ok
def execute(self, command, encoding='json'):
"""
Execute an operational command, "show version" for example.
Parameters
----------
command : str - command to execute
encoding : str
The return format encoding, defaults to 'json'.
Returns
-------
dict - results of the command
"""
res = self.api.execute(['enable', command], encoding=encoding)
return res['result'][1]
@staticmethod
def shorten_if_name(if_name):
return _if_shorten_regex.sub(_if_shorten_replace_func, if_name) | 0.466359 | 0.091992 |
import os
from argparse import ArgumentParser
from pathlib import Path
import random
from collections import Counter
from tqdm import tqdm
import gzip
from lxml import etree as et
import pandas as pd
from frdocs.preprocessing.parsing import parse_reg_xml_tree, FrdocResolver
from frdocs.config import data_dir
'''
This script compiles parsed versions of each document from bulk XML files.
Each parsed document is a pandas dataframe, saved in pickle format. Files are
named by document number so that they can be retreived without looking up the
publication date.
'''
def iter_docs(xml_dir):
for xml_file in tqdm(sorted(os.listdir(xml_dir))):
pub_date = xml_file.split('.')[0]
with gzip.open(xml_dir / xml_file,'rb') as f:
tree = et.parse(f)
volume = int(tree.xpath('.//VOL/text()')[0])
for fr_type in ['NOTICE','PRORULE','RULE']:
for type_element in tree.xpath(f'.//{fr_type}S'):
try:
start_page = int(type_element.xpath('.//PRTPAGE/@P')[0])
except IndexError:
start_page = -1
for doc_element in type_element.xpath(f'.//{fr_type}'):
# doc_tree = et.ElementTree(doc_element)
doc = {
'doc_tree':et.ElementTree(doc_element),
# 'fr_type':fr_type.lower(),
'volume':volume,
'publication_date':pub_date,
'start_page':start_page,
}
# Get end page from page elements
print_pages = [int(page) for page in doc_element.xpath('.//PRTPAGE/@P') if page.isdigit()]
doc['end_page'] = max([start_page] + print_pages)
# End page for this doc is start page for next doc
start_page = doc['end_page']
# Can only get the FR document number from the end of the document
frdoc_elements = doc_element.xpath('./FRDOC')
if not frdoc_elements:
print(f'Warning: Could not find FRDOC element in {xml_file}: {tree.getpath(doc_element)}')
doc['frdoc_string'] = None
elif len(frdoc_elements) > 1:
print(f'Warning: Found {len(frdoc_elements)} FRDOC elements in {xml_file}: {tree.getpath(doc_element)}')
doc['frdoc_string'] = None
else:
doc['frdoc_string'] = ' '.join(frdoc_elements[0].itertext())
yield doc
def main(args):
print('Parsing documents from daily XML files')
xml_dir = Path(data_dir) / 'raw' / 'xml'
parsed_dir = Path(data_dir) / 'parsed'
if not os.path.isdir(parsed_dir):
os.mkdir(parsed_dir)
frdoc_resolver = FrdocResolver()
if not args.force_update:
existing = {f.rsplit('.',1)[0] for f in os.listdir(parsed_dir)}
print(f'Found {len(existing)} existing parsed files ({len(frdoc_resolver.all_frdocs - existing)} remaining to parse)')
else:
existing = set()
n_parsed = 0
frdoc_counts = Counter()
failed = []
for doc in iter_docs(xml_dir):
frdoc = frdoc_resolver(doc)
if frdoc:
frdoc_counts.update([frdoc])
if (frdoc not in existing) or args.force_update:
parsed_df = parse_reg_xml_tree(doc['doc_tree'])
parsed_df.to_pickle(parsed_dir/f'{frdoc}.pkl')
existing.add(frdoc)
n_parsed += 1
else:
failed.append(doc)
print(f'Parsed {n_parsed} new documents')
completeness = len(existing)/len(frdoc_resolver.all_frdocs)
print(f'Database now has parsed documents, covering {100*completeness:.1f}% of frdoc numbers with metadata')
missing = list(frdoc_resolver.all_frdocs - existing)
if missing:
print(f'Missing parsed documents for {len(missing)} frdoc numbers ')
print('Examples include:\n\t' + '\n\t'.join(random.sample(missing,k=min(20,len(missing)))))
n_dups = sum(c > 1 for c in frdoc_counts.values())
print(f'{n_dups} resolved document numbers appear multiple times')
if n_dups:
common_dups = {d:c for d,c in frdoc_counts.most_common(20) if c > 1}
print('Most common examples:\n\t' + '\n\t'.join(f'{d} (x{c})' for d,c in common_dups.items()))
print(f'Failed to resolve frdoc numbers for {len(failed)} documents')
if failed:
print('Examples include:')
for failed_doc in random.sample(failed,k=min(20,len(failed))):
print(failed_doc)
# Add parsed information to index
print('Adding parsing success info to index')
index_df = pd.read_csv(Path(data_dir)/'index.csv')
index_df['parsed'] = index_df['frdoc_number'].isin(existing)
index_df.to_csv(Path(data_dir)/'index.csv',index=False)
if completeness < 1:
missing_df = index_df[~index_df['parsed']]
print('Missing parsed docs by top publication date (top 20):')
print(missing_df.groupby('publication_date')[['frdoc_number']].count().sort_values('frdoc_number',ascending=False).head(20))
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('--force_update',dest='force_update',action='store_true')
args = parser.parse_args()
main(args) | preprocessing/compile_parsed.py | import os
from argparse import ArgumentParser
from pathlib import Path
import random
from collections import Counter
from tqdm import tqdm
import gzip
from lxml import etree as et
import pandas as pd
from frdocs.preprocessing.parsing import parse_reg_xml_tree, FrdocResolver
from frdocs.config import data_dir
'''
This script compiles parsed versions of each document from bulk XML files.
Each parsed document is a pandas dataframe, saved in pickle format. Files are
named by document number so that they can be retreived without looking up the
publication date.
'''
def iter_docs(xml_dir):
for xml_file in tqdm(sorted(os.listdir(xml_dir))):
pub_date = xml_file.split('.')[0]
with gzip.open(xml_dir / xml_file,'rb') as f:
tree = et.parse(f)
volume = int(tree.xpath('.//VOL/text()')[0])
for fr_type in ['NOTICE','PRORULE','RULE']:
for type_element in tree.xpath(f'.//{fr_type}S'):
try:
start_page = int(type_element.xpath('.//PRTPAGE/@P')[0])
except IndexError:
start_page = -1
for doc_element in type_element.xpath(f'.//{fr_type}'):
# doc_tree = et.ElementTree(doc_element)
doc = {
'doc_tree':et.ElementTree(doc_element),
# 'fr_type':fr_type.lower(),
'volume':volume,
'publication_date':pub_date,
'start_page':start_page,
}
# Get end page from page elements
print_pages = [int(page) for page in doc_element.xpath('.//PRTPAGE/@P') if page.isdigit()]
doc['end_page'] = max([start_page] + print_pages)
# End page for this doc is start page for next doc
start_page = doc['end_page']
# Can only get the FR document number from the end of the document
frdoc_elements = doc_element.xpath('./FRDOC')
if not frdoc_elements:
print(f'Warning: Could not find FRDOC element in {xml_file}: {tree.getpath(doc_element)}')
doc['frdoc_string'] = None
elif len(frdoc_elements) > 1:
print(f'Warning: Found {len(frdoc_elements)} FRDOC elements in {xml_file}: {tree.getpath(doc_element)}')
doc['frdoc_string'] = None
else:
doc['frdoc_string'] = ' '.join(frdoc_elements[0].itertext())
yield doc
def main(args):
print('Parsing documents from daily XML files')
xml_dir = Path(data_dir) / 'raw' / 'xml'
parsed_dir = Path(data_dir) / 'parsed'
if not os.path.isdir(parsed_dir):
os.mkdir(parsed_dir)
frdoc_resolver = FrdocResolver()
if not args.force_update:
existing = {f.rsplit('.',1)[0] for f in os.listdir(parsed_dir)}
print(f'Found {len(existing)} existing parsed files ({len(frdoc_resolver.all_frdocs - existing)} remaining to parse)')
else:
existing = set()
n_parsed = 0
frdoc_counts = Counter()
failed = []
for doc in iter_docs(xml_dir):
frdoc = frdoc_resolver(doc)
if frdoc:
frdoc_counts.update([frdoc])
if (frdoc not in existing) or args.force_update:
parsed_df = parse_reg_xml_tree(doc['doc_tree'])
parsed_df.to_pickle(parsed_dir/f'{frdoc}.pkl')
existing.add(frdoc)
n_parsed += 1
else:
failed.append(doc)
print(f'Parsed {n_parsed} new documents')
completeness = len(existing)/len(frdoc_resolver.all_frdocs)
print(f'Database now has parsed documents, covering {100*completeness:.1f}% of frdoc numbers with metadata')
missing = list(frdoc_resolver.all_frdocs - existing)
if missing:
print(f'Missing parsed documents for {len(missing)} frdoc numbers ')
print('Examples include:\n\t' + '\n\t'.join(random.sample(missing,k=min(20,len(missing)))))
n_dups = sum(c > 1 for c in frdoc_counts.values())
print(f'{n_dups} resolved document numbers appear multiple times')
if n_dups:
common_dups = {d:c for d,c in frdoc_counts.most_common(20) if c > 1}
print('Most common examples:\n\t' + '\n\t'.join(f'{d} (x{c})' for d,c in common_dups.items()))
print(f'Failed to resolve frdoc numbers for {len(failed)} documents')
if failed:
print('Examples include:')
for failed_doc in random.sample(failed,k=min(20,len(failed))):
print(failed_doc)
# Add parsed information to index
print('Adding parsing success info to index')
index_df = pd.read_csv(Path(data_dir)/'index.csv')
index_df['parsed'] = index_df['frdoc_number'].isin(existing)
index_df.to_csv(Path(data_dir)/'index.csv',index=False)
if completeness < 1:
missing_df = index_df[~index_df['parsed']]
print('Missing parsed docs by top publication date (top 20):')
print(missing_df.groupby('publication_date')[['frdoc_number']].count().sort_values('frdoc_number',ascending=False).head(20))
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('--force_update',dest='force_update',action='store_true')
args = parser.parse_args()
main(args) | 0.338186 | 0.165323 |
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
syncbn = True
data = dict(
videos_per_gpu=4, # total batch size is 8Gpus*4 == 32
workers_per_gpu=4,
train=dict(
type='CtPDataset',
data_source=dict(
type='JsonClsDataSource',
ann_file='ucf101/annotations/train_split_1.json',
),
backend=dict(
type='ZipBackend',
zip_fmt='ucf101/zips/{}.zip',
frame_fmt='img_{:05d}.jpg',
),
frame_sampler=dict(
type='RandomFrameSampler',
num_clips=1,
clip_len=16,
strides=[1, 2, 3, 4, 5],
temporal_jitter=True
),
transform_cfg=[
dict(type='GroupScale', scales=[112, 128, 144]),
dict(type='GroupRandomCrop', out_size=112),
dict(type='GroupFlip', flip_prob=0.50),
dict(
type='PatchMask',
region_sampler=dict(
scales=[16, 24, 28, 32, 48, 64],
ratios=[0.5, 0.67, 0.75, 1.0, 1.33, 1.50, 2.0],
scale_jitter=0.18,
num_rois=3,
),
key_frame_probs=[0.5, 0.3, 0.2],
loc_velocity=3,
size_velocity=0.025,
label_prob=0.8
),
dict(type='RandomHueSaturation', prob=0.25, hue_delta=12, saturation_delta=0.1),
dict(type='DynamicBrightness', prob=0.5, delta=30, num_key_frame_probs=(0.7, 0.3)),
dict(type='DynamicContrast', prob=0.5, delta=0.12, num_key_frame_probs=(0.7, 0.3)),
dict(
type='GroupToTensor',
switch_rgb_channels=True,
div255=True,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225)
)
]
)
)
# optimizer
total_epochs = 300
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
step=[100, 200]
)
checkpoint_config = dict(interval=1, max_keep_ckpts=1, create_symlink=False)
workflow = [('train', 1)]
log_config = dict(
interval=10,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook'),
]
) | configs/ctp/pretraining_runtime_ucf.py | dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
syncbn = True
data = dict(
videos_per_gpu=4, # total batch size is 8Gpus*4 == 32
workers_per_gpu=4,
train=dict(
type='CtPDataset',
data_source=dict(
type='JsonClsDataSource',
ann_file='ucf101/annotations/train_split_1.json',
),
backend=dict(
type='ZipBackend',
zip_fmt='ucf101/zips/{}.zip',
frame_fmt='img_{:05d}.jpg',
),
frame_sampler=dict(
type='RandomFrameSampler',
num_clips=1,
clip_len=16,
strides=[1, 2, 3, 4, 5],
temporal_jitter=True
),
transform_cfg=[
dict(type='GroupScale', scales=[112, 128, 144]),
dict(type='GroupRandomCrop', out_size=112),
dict(type='GroupFlip', flip_prob=0.50),
dict(
type='PatchMask',
region_sampler=dict(
scales=[16, 24, 28, 32, 48, 64],
ratios=[0.5, 0.67, 0.75, 1.0, 1.33, 1.50, 2.0],
scale_jitter=0.18,
num_rois=3,
),
key_frame_probs=[0.5, 0.3, 0.2],
loc_velocity=3,
size_velocity=0.025,
label_prob=0.8
),
dict(type='RandomHueSaturation', prob=0.25, hue_delta=12, saturation_delta=0.1),
dict(type='DynamicBrightness', prob=0.5, delta=30, num_key_frame_probs=(0.7, 0.3)),
dict(type='DynamicContrast', prob=0.5, delta=0.12, num_key_frame_probs=(0.7, 0.3)),
dict(
type='GroupToTensor',
switch_rgb_channels=True,
div255=True,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225)
)
]
)
)
# optimizer
total_epochs = 300
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
step=[100, 200]
)
checkpoint_config = dict(interval=1, max_keep_ckpts=1, create_symlink=False)
workflow = [('train', 1)]
log_config = dict(
interval=10,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook'),
]
) | 0.477798 | 0.212375 |
import settings
import random as rn
def atBat(pitcher, batter):
""" Function to complete an at bat and output result"""
""" return: [out, hit, extra base, runners advance, bb, mss]"""
print('\n')
print('Pitcher: %s' % (settings.cname(pitcher)))
print('Now batting: %s' % (settings.cname(batter)))
print('BT: %i, WT: %i' % (batter.bt, batter.wt))
batter.pa += 1
ss = rn.randint(1, 100)
if pitcher.pitchDie > 0:
pd = rn.randint(1, pitcher.pitchDie)
elif pitcher.pitchDie < 0:
pd = rn.randint(pitcher.pitchDie, -1)
mss = ss + pd
print('MSS: %i' % mss)
if mss <= batter.bt:
hd = rn.randint(1, 20)
print('hd: %s' % hd)
if settings.hitTable[hd][1] != 'N/A':
dd = rn.randint(1, 12)
print('dd: %s' % dd)
if dd == 12:
batter.ab += 1
print('Result: Out!')
return [1, 0, 0, 0, 0, mss, batter]
elif dd in [10, 11]:
hr = settings.hitTable[hd][0]
hra = settings.hitTable[hd][0] - 1
if hra == 0:
hra = 1
ra = hr
res = settings.resultTable[hra]
print(('Result: %s, with great defense! ' +
'Runners advance %i base(s).') % (res, ra))
batter.ab += 1
batter.hits += 1
if hra == 2:
batter.hh += 1
elif hra == 3:
batter.hhh += 1
elif hra == 4:
batter.hr += 1
return [0, hra, 0, ra, 0, mss, batter]
elif dd >= 3 and dd <= 9:
hr = settings.resultTable[settings.hitTable[hd][0]]
ra = settings.hitTable[hd][2]
batter.ab += 1
batter.hits += 1
if hr == 2:
batter.hh += 1
elif hr == 3:
batter.hhh += 1
elif hr == 4:
batter.hr += 1
print('Result: %s! Runners advance %i base(s).'
% (hr, ra))
return [0, settings.hitTable[hd][0], 0, ra, 0, mss, batter]
elif dd < 3:
hr = (settings.resultTable[settings.hitTable[hd][0] +
settings.defTable[dd][1]])
ra = settings.hitTable[hd][2] + settings.defTable[dd][2]
batter.ab += 1
batter.hits += 1
if settings.hitTable[hd][0] == 2:
batter.hh += 1
elif settings.hitTable[hd][0] == 3:
batter.hhh += 1
elif settings.hitTable[hd][0] == 4:
batter.hr += 1
print('Result: %s and an error! Runners advance %i base(s).'
% (hr, ra))
return [0, settings.hitTable[hd][0], 1, ra, 0, mss, batter]
elif settings.hitTable[hd][1] == 'N/A':
hr = settings.resultTable[settings.hitTable[hd][0]]
ra = settings.hitTable[hd][2]
batter.ab += 1
if hr == 2:
batter.hh += 1
elif hr == 3:
batter.hhh += 1
elif hr == 4:
batter.hr += 1
print('Result: %s! Runners advance %i base(s).'
% (hr, ra))
return [0, settings.hitTable[hd][0], 0, ra, 0, mss, batter]
elif mss > batter.bt and mss <= batter.wt:
print("Result: Walk!")
batter.bb += 1
return [0, 'bb', 0, 1, 1, mss, batter]
elif mss > batter.wt:
ot = settings.outTable[abs(mss) % 10]
batter.ab += 1
if ot in (["K", "k"]):
batter.k += 1
print("Out! %s" % ot)
return [1, 0, 0, 0, 0, mss, batter] | atbat.py | import settings
import random as rn
def atBat(pitcher, batter):
""" Function to complete an at bat and output result"""
""" return: [out, hit, extra base, runners advance, bb, mss]"""
print('\n')
print('Pitcher: %s' % (settings.cname(pitcher)))
print('Now batting: %s' % (settings.cname(batter)))
print('BT: %i, WT: %i' % (batter.bt, batter.wt))
batter.pa += 1
ss = rn.randint(1, 100)
if pitcher.pitchDie > 0:
pd = rn.randint(1, pitcher.pitchDie)
elif pitcher.pitchDie < 0:
pd = rn.randint(pitcher.pitchDie, -1)
mss = ss + pd
print('MSS: %i' % mss)
if mss <= batter.bt:
hd = rn.randint(1, 20)
print('hd: %s' % hd)
if settings.hitTable[hd][1] != 'N/A':
dd = rn.randint(1, 12)
print('dd: %s' % dd)
if dd == 12:
batter.ab += 1
print('Result: Out!')
return [1, 0, 0, 0, 0, mss, batter]
elif dd in [10, 11]:
hr = settings.hitTable[hd][0]
hra = settings.hitTable[hd][0] - 1
if hra == 0:
hra = 1
ra = hr
res = settings.resultTable[hra]
print(('Result: %s, with great defense! ' +
'Runners advance %i base(s).') % (res, ra))
batter.ab += 1
batter.hits += 1
if hra == 2:
batter.hh += 1
elif hra == 3:
batter.hhh += 1
elif hra == 4:
batter.hr += 1
return [0, hra, 0, ra, 0, mss, batter]
elif dd >= 3 and dd <= 9:
hr = settings.resultTable[settings.hitTable[hd][0]]
ra = settings.hitTable[hd][2]
batter.ab += 1
batter.hits += 1
if hr == 2:
batter.hh += 1
elif hr == 3:
batter.hhh += 1
elif hr == 4:
batter.hr += 1
print('Result: %s! Runners advance %i base(s).'
% (hr, ra))
return [0, settings.hitTable[hd][0], 0, ra, 0, mss, batter]
elif dd < 3:
hr = (settings.resultTable[settings.hitTable[hd][0] +
settings.defTable[dd][1]])
ra = settings.hitTable[hd][2] + settings.defTable[dd][2]
batter.ab += 1
batter.hits += 1
if settings.hitTable[hd][0] == 2:
batter.hh += 1
elif settings.hitTable[hd][0] == 3:
batter.hhh += 1
elif settings.hitTable[hd][0] == 4:
batter.hr += 1
print('Result: %s and an error! Runners advance %i base(s).'
% (hr, ra))
return [0, settings.hitTable[hd][0], 1, ra, 0, mss, batter]
elif settings.hitTable[hd][1] == 'N/A':
hr = settings.resultTable[settings.hitTable[hd][0]]
ra = settings.hitTable[hd][2]
batter.ab += 1
if hr == 2:
batter.hh += 1
elif hr == 3:
batter.hhh += 1
elif hr == 4:
batter.hr += 1
print('Result: %s! Runners advance %i base(s).'
% (hr, ra))
return [0, settings.hitTable[hd][0], 0, ra, 0, mss, batter]
elif mss > batter.bt and mss <= batter.wt:
print("Result: Walk!")
batter.bb += 1
return [0, 'bb', 0, 1, 1, mss, batter]
elif mss > batter.wt:
ot = settings.outTable[abs(mss) % 10]
batter.ab += 1
if ot in (["K", "k"]):
batter.k += 1
print("Out! %s" % ot)
return [1, 0, 0, 0, 0, mss, batter] | 0.236428 | 0.318651 |
import os
import requests
import json
from rich import print
from rich.console import Console
import webbrowser
from tqdm import tqdm
def clear():
if os.name == 'nt':
os.system("cls")
else:
os.system("clear")
def strip(x):
return x.strip()
def manually_filter(publications):
console = Console()
selected = {}
count = 0
for i, key in enumerate(publications.keys()):
pub = publications[key]
title = pub['paper_title']
info_link = pub['paper_info_link']
pdf_link = pub['paper_link']
abstract = pub['paper_abstract']
clear()
print(f"[bold underline sky_blue2]{title}")
console.print(f"[underline sky_blue3][{i}/{len(publications.keys())}]", end=" ")
console.print("[underline sky_blue3][Info]", style=f"link {info_link}", end=" ")
console.print("[underline sky_blue3][PDF]", style=f"link {pdf_link}")
print("")
print("[bold underline sky_blue3]Abstract")
print(abstract)
print("")
print("[yellow]Would you like to select this publication?")
print("[yellow](y/n) to accept/reject, (p) to view the PDF, (i) for more info.")
response = input("> Response (y/n/p/i): ")
while response == 'p' or response == 'i':
if response == 'p':
webbrowser.open(pdf_link)
elif response == 'i':
webbrowser.open(info_link)
response = input("> Response (y/n/p/i): ")
if response == 'y' or response == 'yes':
selected[count] = pub
count += 1
if response == 'e':
return selected
return selected
def download(url, dest_folder):
file_name = url.split("/")[-1]
file_path = os.path.join(dest_folder, file_name)
resp = requests.get(url, stream=True)
if resp.ok:
with open(file_path, 'wb') as f:
for section in resp.iter_content(chunk_size=1024*8):
if section:
f.write(section)
f.flush()
os.fsync(f.fileno())
else:
print(f"[bold red]Failed to download from {url}")
print(f"[bold red]Details: {resp.status_code} {resp.text}")
if __name__ == "__main__":
if not os.path.isdir("./libraries"):
print("[bold red]Unable to detect the 'libraries' folder. Exiting...")
exit(1)
if len(os.listdir("./libraries")) < 1:
print("[bold red]Unable to detect any CVPR libraries. Exiting...")
exit(1)
print(
"[yellow]Please pick a folder name in which selected publications "
"will be stored."
)
storage_folder = input("> Folder Name: ")
while len(storage_folder) < 1:
storage_folder = input("> Please enter a valid folder name: ")
print("")
print(
"[yellow]Please enter keywords or phrases (separated by a comma) "
"that you wish to filter publications by. \nPress enter if "
"you do not wish to filter any publications."
)
pub_filters = input("> Filter: ")
print("")
if pub_filters == '':
pub_filters = None
else:
pub_filters = list(map(strip, pub_filters.split(",")))
print(
"[yellow]Please select the CVPR libraries you wish to search. "
"To select a library, type the number beside it. \nTo select "
"multiple libraries, separate the numbers with commas."
)
libraries = os.listdir("./libraries")
libraries.sort()
for i, library in enumerate(libraries):
print(f"[bold blue][{i}] {library}")
selected_library_nums = input("> Selected: ")
selected_library_nums = list(map(strip, selected_library_nums.split(",")))
print("")
if len(selected_library_nums) > 1:
print("[yellow]Loading libraries...")
else:
print("[yellow]Loading library...")
selected_libraries = []
for i in selected_library_nums:
if not i.isdigit():
print("[bold red]Library selection is not a number. Exiting...")
exit(1)
i = int(i)
with open(os.path.join("./libraries", libraries[i])) as f:
selected_libraries.append(json.load(f))
publications = {}
if pub_filters is not None:
if len(selected_library_nums) > 1:
print("[yellow]Filtering libraries...")
else:
print("[yellow]Filtering library...")
count = 0
# Loop over all libraries
for library in selected_libraries:
# Loop through each publication in the library
for key in library.keys():
publication = library[key]
# Filter by publication filters specified
for pub_filter in pub_filters:
# Find case insensitive matches in the paper title or abstract
if (pub_filter.lower() in publication['paper_title'].lower()
or pub_filter.lower() in publication['paper_abstract'].lower()):
publications[count] = publication
count += 1
else:
if len(selected_library_nums) > 1:
print("Compiling libraries...")
else:
print("Compiling library...")
count = 0
for library in selected_libraries:
for key in library.keys():
publications[count] = library[key]
count += 1
print("")
if len(publications.keys()) < 1:
print("[bold red]No publications found. Exiting...")
exit(0)
print(
"[yellow]Would you like to manually filter the selected "
"publications? \nIf not, all publication PDFs will be downloaded "
"from the list."
)
manual_filtering = input("> Response (y/n): ")
print("")
if manual_filtering == 'y' or manual_filtering == 'yes':
publications = manually_filter(publications)
if not os.path.exists(storage_folder):
os.makedirs(storage_folder)
json_path = os.path.join(storage_folder, 'publications.json')
with open(json_path, 'w') as f:
json.dump(publications, f, indent=4)
print("")
print("[yellow]Downloading PDFs...")
publication_keys = list(publications.keys())
for i in tqdm(range(len(publication_keys))):
pub = publications[publication_keys[i]]
download(pub['paper_link'], storage_folder)
print("")
print("[bold green]Done!") | explorer.py | import os
import requests
import json
from rich import print
from rich.console import Console
import webbrowser
from tqdm import tqdm
def clear():
if os.name == 'nt':
os.system("cls")
else:
os.system("clear")
def strip(x):
return x.strip()
def manually_filter(publications):
console = Console()
selected = {}
count = 0
for i, key in enumerate(publications.keys()):
pub = publications[key]
title = pub['paper_title']
info_link = pub['paper_info_link']
pdf_link = pub['paper_link']
abstract = pub['paper_abstract']
clear()
print(f"[bold underline sky_blue2]{title}")
console.print(f"[underline sky_blue3][{i}/{len(publications.keys())}]", end=" ")
console.print("[underline sky_blue3][Info]", style=f"link {info_link}", end=" ")
console.print("[underline sky_blue3][PDF]", style=f"link {pdf_link}")
print("")
print("[bold underline sky_blue3]Abstract")
print(abstract)
print("")
print("[yellow]Would you like to select this publication?")
print("[yellow](y/n) to accept/reject, (p) to view the PDF, (i) for more info.")
response = input("> Response (y/n/p/i): ")
while response == 'p' or response == 'i':
if response == 'p':
webbrowser.open(pdf_link)
elif response == 'i':
webbrowser.open(info_link)
response = input("> Response (y/n/p/i): ")
if response == 'y' or response == 'yes':
selected[count] = pub
count += 1
if response == 'e':
return selected
return selected
def download(url, dest_folder):
file_name = url.split("/")[-1]
file_path = os.path.join(dest_folder, file_name)
resp = requests.get(url, stream=True)
if resp.ok:
with open(file_path, 'wb') as f:
for section in resp.iter_content(chunk_size=1024*8):
if section:
f.write(section)
f.flush()
os.fsync(f.fileno())
else:
print(f"[bold red]Failed to download from {url}")
print(f"[bold red]Details: {resp.status_code} {resp.text}")
if __name__ == "__main__":
if not os.path.isdir("./libraries"):
print("[bold red]Unable to detect the 'libraries' folder. Exiting...")
exit(1)
if len(os.listdir("./libraries")) < 1:
print("[bold red]Unable to detect any CVPR libraries. Exiting...")
exit(1)
print(
"[yellow]Please pick a folder name in which selected publications "
"will be stored."
)
storage_folder = input("> Folder Name: ")
while len(storage_folder) < 1:
storage_folder = input("> Please enter a valid folder name: ")
print("")
print(
"[yellow]Please enter keywords or phrases (separated by a comma) "
"that you wish to filter publications by. \nPress enter if "
"you do not wish to filter any publications."
)
pub_filters = input("> Filter: ")
print("")
if pub_filters == '':
pub_filters = None
else:
pub_filters = list(map(strip, pub_filters.split(",")))
print(
"[yellow]Please select the CVPR libraries you wish to search. "
"To select a library, type the number beside it. \nTo select "
"multiple libraries, separate the numbers with commas."
)
libraries = os.listdir("./libraries")
libraries.sort()
for i, library in enumerate(libraries):
print(f"[bold blue][{i}] {library}")
selected_library_nums = input("> Selected: ")
selected_library_nums = list(map(strip, selected_library_nums.split(",")))
print("")
if len(selected_library_nums) > 1:
print("[yellow]Loading libraries...")
else:
print("[yellow]Loading library...")
selected_libraries = []
for i in selected_library_nums:
if not i.isdigit():
print("[bold red]Library selection is not a number. Exiting...")
exit(1)
i = int(i)
with open(os.path.join("./libraries", libraries[i])) as f:
selected_libraries.append(json.load(f))
publications = {}
if pub_filters is not None:
if len(selected_library_nums) > 1:
print("[yellow]Filtering libraries...")
else:
print("[yellow]Filtering library...")
count = 0
# Loop over all libraries
for library in selected_libraries:
# Loop through each publication in the library
for key in library.keys():
publication = library[key]
# Filter by publication filters specified
for pub_filter in pub_filters:
# Find case insensitive matches in the paper title or abstract
if (pub_filter.lower() in publication['paper_title'].lower()
or pub_filter.lower() in publication['paper_abstract'].lower()):
publications[count] = publication
count += 1
else:
if len(selected_library_nums) > 1:
print("Compiling libraries...")
else:
print("Compiling library...")
count = 0
for library in selected_libraries:
for key in library.keys():
publications[count] = library[key]
count += 1
print("")
if len(publications.keys()) < 1:
print("[bold red]No publications found. Exiting...")
exit(0)
print(
"[yellow]Would you like to manually filter the selected "
"publications? \nIf not, all publication PDFs will be downloaded "
"from the list."
)
manual_filtering = input("> Response (y/n): ")
print("")
if manual_filtering == 'y' or manual_filtering == 'yes':
publications = manually_filter(publications)
if not os.path.exists(storage_folder):
os.makedirs(storage_folder)
json_path = os.path.join(storage_folder, 'publications.json')
with open(json_path, 'w') as f:
json.dump(publications, f, indent=4)
print("")
print("[yellow]Downloading PDFs...")
publication_keys = list(publications.keys())
for i in tqdm(range(len(publication_keys))):
pub = publications[publication_keys[i]]
download(pub['paper_link'], storage_folder)
print("")
print("[bold green]Done!") | 0.337531 | 0.11221 |
import importlib.util
import os
import sys
import tempfile
import time
import unittest
from pyflink.common.typeinfo import Types
from pyflink.datastream import StreamExecutionEnvironment
from pyflink.table import StreamTableEnvironment, TableDescriptor, Schema
from dl_on_flink_tensorflow.tf_cluster_config import TFClusterConfig
from dl_on_flink_tensorflow.tf_utils import train, inference, tensorboard
from tests.flink_ml_tensorflow.utils import add_dl_on_flink_jar, \
get_resource_folder, find_jar_path
add_dl_on_flink_jar()
from dl_on_flink_tensorflow.tensorflow_on_flink_mlconf import MLCONSTANTS
def _get_entry(path, func_name):
spec = importlib.util.spec_from_file_location(path, path)
module = importlib.util.module_from_spec(spec)
sys.modules[path] = module
spec.loader.exec_module(module)
return getattr(module, func_name)
class TestTFUtils(unittest.TestCase):
def setUp(self) -> None:
super().setUp()
self.env = StreamExecutionEnvironment.get_execution_environment()
self.env.add_jars("file://{}".format(find_jar_path()))
self.env.set_parallelism(1)
self.t_env = StreamTableEnvironment.create(self.env)
self.statement_set = self.t_env.create_statement_set()
def test_train_without_input(self):
config = TFClusterConfig.new_builder() \
.set_worker_count(2) \
.set_ps_count(1) \
.set_node_entry(
_get_entry(os.path.join(get_resource_folder(), "add.py"),
"map_func")) \
.build()
train(self.statement_set, config)
self.statement_set.execute().wait()
def testIterationTrain(self):
source_table = self.t_env.from_data_stream(
self.env.from_collection([1, 2, 3, 4], Types.INT()))
config = TFClusterConfig.new_builder() \
.set_node_entry(_get_entry(os.path.join(get_resource_folder(),
"print_input_iter.py"),
"map_func")) \
.set_worker_count(1) \
.set_property("input_types", "INT_32") \
.build()
train(self.statement_set, config, source_table, 4)
self.statement_set.execute().wait()
def testIterationTrainWithEarlyTermination(self):
source_table = self.t_env.from_data_stream(
self.env.from_collection([1, 2, 3, 4], Types.INT()))
config = TFClusterConfig.new_builder() \
.set_node_entry(_get_entry(os.path.join(get_resource_folder(),
"print_input_iter.py"),
"map_func")) \
.set_worker_count(1) \
.set_property("input_types", "INT_32") \
.build()
train(self.statement_set, config, source_table, 1024)
self.statement_set.execute().wait()
def test_inference(self):
config = TFClusterConfig.new_builder() \
.set_worker_count(2) \
.set_ps_count(1) \
.set_node_entry(_get_entry(os.path.join(get_resource_folder(),
"input_output.py"),
"map_func")) \
.set_property("input_types",
"INT_32,INT_64,FLOAT_32,FLOAT_64,STRING") \
.set_property("output_types",
"INT_32,INT_64,FLOAT_32,FLOAT_64,STRING") \
.build()
schema = Schema.new_builder() \
.column("f0", "INT") \
.column("f1", "BIGINT") \
.column("f2", "FLOAT") \
.column("f3", "DOUBLE") \
.column("f4", "STRING") \
.build()
descriptor = TableDescriptor.for_connector("datagen") \
.schema(schema) \
.option("number-of-rows", "10") \
.build()
input_table = self.t_env.from_descriptor(descriptor)
output_table = inference(self.statement_set, input_table, config,
schema)
self.statement_set.add_insert(TableDescriptor.for_connector("print")
.build(), output_table)
self.statement_set.execute().wait()
def test_tensorboard(self):
tmpdir = tempfile.gettempdir()
config = TFClusterConfig.new_builder() \
.set_worker_count(2) \
.set_ps_count(1) \
.set_node_entry(_get_entry(os.path.join(get_resource_folder(),
"add_withtb.py"),
"map_func")) \
.set_property(MLCONSTANTS.CHECKPOINT_DIR,
os.path.join(tmpdir, str(time.time_ns()))) \
.build()
train(self.statement_set, config)
tensorboard(self.statement_set, config)
self.statement_set.execute().wait() | dl-on-flink-tensorflow-2.x/python/tests/flink_ml_tensorflow/test_tf_utils.py | import importlib.util
import os
import sys
import tempfile
import time
import unittest
from pyflink.common.typeinfo import Types
from pyflink.datastream import StreamExecutionEnvironment
from pyflink.table import StreamTableEnvironment, TableDescriptor, Schema
from dl_on_flink_tensorflow.tf_cluster_config import TFClusterConfig
from dl_on_flink_tensorflow.tf_utils import train, inference, tensorboard
from tests.flink_ml_tensorflow.utils import add_dl_on_flink_jar, \
get_resource_folder, find_jar_path
add_dl_on_flink_jar()
from dl_on_flink_tensorflow.tensorflow_on_flink_mlconf import MLCONSTANTS
def _get_entry(path, func_name):
spec = importlib.util.spec_from_file_location(path, path)
module = importlib.util.module_from_spec(spec)
sys.modules[path] = module
spec.loader.exec_module(module)
return getattr(module, func_name)
class TestTFUtils(unittest.TestCase):
def setUp(self) -> None:
super().setUp()
self.env = StreamExecutionEnvironment.get_execution_environment()
self.env.add_jars("file://{}".format(find_jar_path()))
self.env.set_parallelism(1)
self.t_env = StreamTableEnvironment.create(self.env)
self.statement_set = self.t_env.create_statement_set()
def test_train_without_input(self):
config = TFClusterConfig.new_builder() \
.set_worker_count(2) \
.set_ps_count(1) \
.set_node_entry(
_get_entry(os.path.join(get_resource_folder(), "add.py"),
"map_func")) \
.build()
train(self.statement_set, config)
self.statement_set.execute().wait()
def testIterationTrain(self):
source_table = self.t_env.from_data_stream(
self.env.from_collection([1, 2, 3, 4], Types.INT()))
config = TFClusterConfig.new_builder() \
.set_node_entry(_get_entry(os.path.join(get_resource_folder(),
"print_input_iter.py"),
"map_func")) \
.set_worker_count(1) \
.set_property("input_types", "INT_32") \
.build()
train(self.statement_set, config, source_table, 4)
self.statement_set.execute().wait()
def testIterationTrainWithEarlyTermination(self):
source_table = self.t_env.from_data_stream(
self.env.from_collection([1, 2, 3, 4], Types.INT()))
config = TFClusterConfig.new_builder() \
.set_node_entry(_get_entry(os.path.join(get_resource_folder(),
"print_input_iter.py"),
"map_func")) \
.set_worker_count(1) \
.set_property("input_types", "INT_32") \
.build()
train(self.statement_set, config, source_table, 1024)
self.statement_set.execute().wait()
def test_inference(self):
config = TFClusterConfig.new_builder() \
.set_worker_count(2) \
.set_ps_count(1) \
.set_node_entry(_get_entry(os.path.join(get_resource_folder(),
"input_output.py"),
"map_func")) \
.set_property("input_types",
"INT_32,INT_64,FLOAT_32,FLOAT_64,STRING") \
.set_property("output_types",
"INT_32,INT_64,FLOAT_32,FLOAT_64,STRING") \
.build()
schema = Schema.new_builder() \
.column("f0", "INT") \
.column("f1", "BIGINT") \
.column("f2", "FLOAT") \
.column("f3", "DOUBLE") \
.column("f4", "STRING") \
.build()
descriptor = TableDescriptor.for_connector("datagen") \
.schema(schema) \
.option("number-of-rows", "10") \
.build()
input_table = self.t_env.from_descriptor(descriptor)
output_table = inference(self.statement_set, input_table, config,
schema)
self.statement_set.add_insert(TableDescriptor.for_connector("print")
.build(), output_table)
self.statement_set.execute().wait()
def test_tensorboard(self):
tmpdir = tempfile.gettempdir()
config = TFClusterConfig.new_builder() \
.set_worker_count(2) \
.set_ps_count(1) \
.set_node_entry(_get_entry(os.path.join(get_resource_folder(),
"add_withtb.py"),
"map_func")) \
.set_property(MLCONSTANTS.CHECKPOINT_DIR,
os.path.join(tmpdir, str(time.time_ns()))) \
.build()
train(self.statement_set, config)
tensorboard(self.statement_set, config)
self.statement_set.execute().wait() | 0.334372 | 0.172555 |
import logging
import mmln
import mmln.infer
import mmln.ground
class Learner:
def __init__(self, network):
self.n = network
self.all_labels = mmln.get_all_labels(network)
self.logger = logging.getLogger(__name__)
def learn(self, model, inf=None):
raise NotImplementedError('This class is abstract.')
class GradientDescent(Learner):
def __init__(self, network, n_steps=25, step_size=1.0, step_schedule=True, scale_gradient=True, average_steps=True):
super(GradientDescent, self).__init__(network)
self.n_steps = n_steps
self.step_size = step_size
self.step_schedule = step_schedule
self.scale_gradient = scale_gradient
self.average_steps = average_steps
def learn(self, model, inf=None):
self.logger.info('Starting learning. Setting up inference.')
if inf is None:
inf = mmln.infer.HLMRF()
manager = mmln.ground.GroundingManager(model, self.n, self.all_labels, inf)
manager.init_all_weights()
self.logger.info('Inference set up. Starting gradient descent.')
observed_potentials = self._get_observed_potentials(manager)
scaling_factor = self._get_scaling_factor(manager)
for i in range(0, self.n_steps):
inferred_potentials = self._get_inferred_potentials(manager)
for weight_map in (model.inter_node_pos, model.inter_node_neg, model.intra_node_pos, model.intra_node_neg):
for (label1, label2), weight in weight_map:
step = ()
def _get_observed_potentials(self, manager):
pass
def _get_inferred_potentials(self, manager):
pass
def _get_scaling_factor(self, manager):
pass
class HomophilyLearner(Learner):
def learn(self, model, inf=None):
model.regularization = 0.01
model.inter_node_pos_same_label_default = 0.1
p = mmln.estimate_p_values_inter_node(self.n)
for label1 in p:
for label2 in p[label1]:
if p[label1][label2] < 0.1:
model.inter_node_pos[(label1, label2)] = 2
elif p[label1][label2] < 0.2:
model.inter_node_pos[(label1, label2)] = 1
elif p[label1][label2] < 0.5:
model.inter_node_pos[(label1, label2)] = 0.5 | mmln/learn.py | import logging
import mmln
import mmln.infer
import mmln.ground
class Learner:
def __init__(self, network):
self.n = network
self.all_labels = mmln.get_all_labels(network)
self.logger = logging.getLogger(__name__)
def learn(self, model, inf=None):
raise NotImplementedError('This class is abstract.')
class GradientDescent(Learner):
def __init__(self, network, n_steps=25, step_size=1.0, step_schedule=True, scale_gradient=True, average_steps=True):
super(GradientDescent, self).__init__(network)
self.n_steps = n_steps
self.step_size = step_size
self.step_schedule = step_schedule
self.scale_gradient = scale_gradient
self.average_steps = average_steps
def learn(self, model, inf=None):
self.logger.info('Starting learning. Setting up inference.')
if inf is None:
inf = mmln.infer.HLMRF()
manager = mmln.ground.GroundingManager(model, self.n, self.all_labels, inf)
manager.init_all_weights()
self.logger.info('Inference set up. Starting gradient descent.')
observed_potentials = self._get_observed_potentials(manager)
scaling_factor = self._get_scaling_factor(manager)
for i in range(0, self.n_steps):
inferred_potentials = self._get_inferred_potentials(manager)
for weight_map in (model.inter_node_pos, model.inter_node_neg, model.intra_node_pos, model.intra_node_neg):
for (label1, label2), weight in weight_map:
step = ()
def _get_observed_potentials(self, manager):
pass
def _get_inferred_potentials(self, manager):
pass
def _get_scaling_factor(self, manager):
pass
class HomophilyLearner(Learner):
def learn(self, model, inf=None):
model.regularization = 0.01
model.inter_node_pos_same_label_default = 0.1
p = mmln.estimate_p_values_inter_node(self.n)
for label1 in p:
for label2 in p[label1]:
if p[label1][label2] < 0.1:
model.inter_node_pos[(label1, label2)] = 2
elif p[label1][label2] < 0.2:
model.inter_node_pos[(label1, label2)] = 1
elif p[label1][label2] < 0.5:
model.inter_node_pos[(label1, label2)] = 0.5 | 0.63409 | 0.293265 |
from jsog3 import jsog
import unittest
class TestJSOG(unittest.TestCase):
def test_encode_reference(self):
inner = { "foo": "bar" }
outer = { "inner1": inner, "inner2": inner }
encoded = jsog.encode(outer)
# Python 3.7+ ensures fields are always processed in order,
# however contents of inner1 and inner2 might be swapped in
# older releases. Do we care?
self.assertEqual(encoded, {
"inner1": {
"@id": "1",
"foo": "bar",
},
'inner2': { '@ref': '1' }
})
def test_decode_reference(self):
JSOGIFIED = '{"@id":"1","foo":"foo","inner1":{"@id":"2","bar":"bar"},"inner2":{"@ref":"2"}}'
parsed = jsog.loads(JSOGIFIED)
inner1 = parsed['inner1']
inner2 = parsed['inner2']
self.assertTrue(inner1 is inner2)
def test_encode_circular(self):
thing = {}
thing['me'] = thing
thing['list'] = [thing]
encoded = jsog.encode(thing)
self.assertEqual(encoded, {
'@id': '1',
'me': { '@ref': '1' },
'list': [ { '@ref': '1' } ],
})
def test_decode_circular(self):
thing = {}
thing['me'] = thing
thing['list'] = [thing]
encoded = jsog.encode(thing)
back = jsog.decode(encoded)
self.assertFalse('@id' in back)
self.assertTrue(back['me'] is back)
self.assertTrue(back['list'][0] is back)
def test_encode_null(self):
encoded = jsog.encode(None)
self.assertEqual(encoded, None)
def test_decode_null(self):
decoded = jsog.decode(None)
self.assertEqual(decoded, None)
def test_decode_plain_json(self):
json = { "foo": "bar" }
decoded = jsog.decode(json)
self.assertEqual(json, decoded)
def test_decode_list_reference(self):
JSOGIFIED = '{"@id":"1","foo":"foo","inner1":{"@id":"2","bar":"bar"},"inner2":[{"@ref":"2"}]}'
parsed = jsog.loads(JSOGIFIED)
inner1 = parsed['inner1']
inner2 = parsed['inner2'][0]
self.assertTrue(inner1 is inner2)
def test_decode_missing_id(self):
with self.assertRaises(ValueError):
json = { "foo": { "@ref": "1" }, "bar": { "@ref": "1" } }
jsog.decode(json)
def test_decode_duplicate_id(self):
with self.assertRaises(ValueError):
json = { "foo": { "@id": "1" }, "bar": { "@id": "1" } }
jsog.decode(json)
if __name__ == '__main__':
unittest.main() | test_jsog.py | from jsog3 import jsog
import unittest
class TestJSOG(unittest.TestCase):
def test_encode_reference(self):
inner = { "foo": "bar" }
outer = { "inner1": inner, "inner2": inner }
encoded = jsog.encode(outer)
# Python 3.7+ ensures fields are always processed in order,
# however contents of inner1 and inner2 might be swapped in
# older releases. Do we care?
self.assertEqual(encoded, {
"inner1": {
"@id": "1",
"foo": "bar",
},
'inner2': { '@ref': '1' }
})
def test_decode_reference(self):
JSOGIFIED = '{"@id":"1","foo":"foo","inner1":{"@id":"2","bar":"bar"},"inner2":{"@ref":"2"}}'
parsed = jsog.loads(JSOGIFIED)
inner1 = parsed['inner1']
inner2 = parsed['inner2']
self.assertTrue(inner1 is inner2)
def test_encode_circular(self):
thing = {}
thing['me'] = thing
thing['list'] = [thing]
encoded = jsog.encode(thing)
self.assertEqual(encoded, {
'@id': '1',
'me': { '@ref': '1' },
'list': [ { '@ref': '1' } ],
})
def test_decode_circular(self):
thing = {}
thing['me'] = thing
thing['list'] = [thing]
encoded = jsog.encode(thing)
back = jsog.decode(encoded)
self.assertFalse('@id' in back)
self.assertTrue(back['me'] is back)
self.assertTrue(back['list'][0] is back)
def test_encode_null(self):
encoded = jsog.encode(None)
self.assertEqual(encoded, None)
def test_decode_null(self):
decoded = jsog.decode(None)
self.assertEqual(decoded, None)
def test_decode_plain_json(self):
json = { "foo": "bar" }
decoded = jsog.decode(json)
self.assertEqual(json, decoded)
def test_decode_list_reference(self):
JSOGIFIED = '{"@id":"1","foo":"foo","inner1":{"@id":"2","bar":"bar"},"inner2":[{"@ref":"2"}]}'
parsed = jsog.loads(JSOGIFIED)
inner1 = parsed['inner1']
inner2 = parsed['inner2'][0]
self.assertTrue(inner1 is inner2)
def test_decode_missing_id(self):
with self.assertRaises(ValueError):
json = { "foo": { "@ref": "1" }, "bar": { "@ref": "1" } }
jsog.decode(json)
def test_decode_duplicate_id(self):
with self.assertRaises(ValueError):
json = { "foo": { "@id": "1" }, "bar": { "@id": "1" } }
jsog.decode(json)
if __name__ == '__main__':
unittest.main() | 0.472683 | 0.443239 |
import argparse
import logging
from collections import Counter
from dataclasses import dataclass
from typing import List
# Parse Arguments
parser = argparse.ArgumentParser()
parser.add_argument('input_file', help='input file to read')
parser.add_argument('--verbosity', help='specify verbosity level (DEBUG|INFO)')
args = parser.parse_args()
verbosity = 'INFO'
if args.verbosity:
verbosity = args.verbosity
logging.getLogger().setLevel(logging.getLevelName(verbosity))
# Helper classes
@dataclass
class Image:
width: int
height: int
raw_image: List[int]
display_charmap = {
0: ' ',
1: '█',
2: '▓',
}
def __post_init__(self):
logging.debug(f'Parsing raw image of length {len(raw_image)}')
self.layers = []
for i in range(0, len(self.raw_image), self.width * self.height):
raw_layer = self.raw_image[i:i + self.width * self.height]
layer = []
for j in range(0, len(raw_layer), self.width):
layer.append(raw_layer[j:j + self.width])
self.layers.append(layer)
def validate_image(self):
least_zeroes = float('inf')
best_product = -1
for index, layer in enumerate(self.layers):
logging.debug(f'Layer {index}')
counter = Counter()
for row in layer:
counter.update(row)
logging.debug((''.join(map(str, row)), counter))
if counter[0] < least_zeroes:
least_zeroes = counter[0]
best_product = counter[1] * counter[2]
logging.debug(f'New best product: {best_product}')
return best_product
def display(self):
output = [[2 for i in range(self.width)] for j in range(self.height)]
for layer in self.layers:
for y, row in enumerate(layer):
for x, pane in enumerate(row):
if output[y][x] == 2 and pane < 2:
output[y][x] = pane
for row in output:
print(''.join(map(self._color_map, row)))
# Helper function to map colors from dict (cannot call map on a dict)
def _color_map(self, color_key):
return self.display_charmap[color_key]
# Load Inputs
input_file = args.input_file
with open(input_file) as f:
file_contents = f.read()[:-1]
width, height, raw_image = file_contents.split(' ')
width, height = int(width), int(height)
raw_image = list(map(int, raw_image))
# Main Logic
image = Image(width, height, raw_image)
best_product = image.validate_image()
logging.info(f'Validation: {best_product}')
image.display() | 2019/day_8/run.py | import argparse
import logging
from collections import Counter
from dataclasses import dataclass
from typing import List
# Parse Arguments
parser = argparse.ArgumentParser()
parser.add_argument('input_file', help='input file to read')
parser.add_argument('--verbosity', help='specify verbosity level (DEBUG|INFO)')
args = parser.parse_args()
verbosity = 'INFO'
if args.verbosity:
verbosity = args.verbosity
logging.getLogger().setLevel(logging.getLevelName(verbosity))
# Helper classes
@dataclass
class Image:
width: int
height: int
raw_image: List[int]
display_charmap = {
0: ' ',
1: '█',
2: '▓',
}
def __post_init__(self):
logging.debug(f'Parsing raw image of length {len(raw_image)}')
self.layers = []
for i in range(0, len(self.raw_image), self.width * self.height):
raw_layer = self.raw_image[i:i + self.width * self.height]
layer = []
for j in range(0, len(raw_layer), self.width):
layer.append(raw_layer[j:j + self.width])
self.layers.append(layer)
def validate_image(self):
least_zeroes = float('inf')
best_product = -1
for index, layer in enumerate(self.layers):
logging.debug(f'Layer {index}')
counter = Counter()
for row in layer:
counter.update(row)
logging.debug((''.join(map(str, row)), counter))
if counter[0] < least_zeroes:
least_zeroes = counter[0]
best_product = counter[1] * counter[2]
logging.debug(f'New best product: {best_product}')
return best_product
def display(self):
output = [[2 for i in range(self.width)] for j in range(self.height)]
for layer in self.layers:
for y, row in enumerate(layer):
for x, pane in enumerate(row):
if output[y][x] == 2 and pane < 2:
output[y][x] = pane
for row in output:
print(''.join(map(self._color_map, row)))
# Helper function to map colors from dict (cannot call map on a dict)
def _color_map(self, color_key):
return self.display_charmap[color_key]
# Load Inputs
input_file = args.input_file
with open(input_file) as f:
file_contents = f.read()[:-1]
width, height, raw_image = file_contents.split(' ')
width, height = int(width), int(height)
raw_image = list(map(int, raw_image))
# Main Logic
image = Image(width, height, raw_image)
best_product = image.validate_image()
logging.info(f'Validation: {best_product}')
image.display() | 0.781122 | 0.198122 |
import hashlib,requests,random,string,json
import time
from app import db
from app.model import Device
from app.model import Member
from app.common.libs.Helper import getCurrentDate, getFormatDate
from sqlalchemy import func, desc
class DeviceService():
@staticmethod
def geneSN( info ):
m = hashlib.md5()
str = "%s" % time.time()
m.update(str.encode("utf-8"))
return m.hexdigest()
@staticmethod
def filterDeviceByEditor(req):
resp = { 'code':20000, 'message':'查询成功', 'data':{}}
mobile = req['mobile'] if 'mobile' in req else ''
sn = req['sn'] if 'sn' in req else ''
page = int( req['page'] ) if 'page' in req else 0
limit = int( req['limit'] ) if 'limit' in req else 0
offset = ( page - 1 ) * limit
# rule = func.concat(Device.sn, Device.position).op('regexp')('.*%s.*'%keywords.replace(' ','.*'))
query = Device.query.filter( Device.member_id == Member.id)\
.filter( Member.mobile.like('%%%s%%'%mobile) )\
.filter( Device.sn.like('%%%s%%'%sn))
total = query.count()
tap_list = query.offset( offset ).limit( limit ).all()
items = []
for tap in tap_list:
items.append({
'name': tap.name,
'number':tap.number,
'position':tap.position,
'sn': tap.sn,
'power': str(tap.power),
'online': tap.online,
'status1': tap.status1,
'status2': tap.status2,
'alias1': tap.alias1,
'alias2': tap.alias2,
'created_time': getFormatDate(tap.created_time),
})
resp['data']['items'] = items
resp['data']['total'] = total
return resp
@staticmethod
def filterDeviceByUser(req, user_info):
resp = { 'code':20000, 'message':'查询成功', 'data':{}}
position = req['position'] if 'position' in req else ''
page = int( req['page'] ) if 'page' in req else 0
limit = int( req['limit'] ) if 'limit' in req else 0
offset = ( page - 1 ) * limit
query = Device.query\
.filter( Member.id == Device.member_id )\
.filter( Member.mobile == user_info.mobile )\
.filter( Device.position.like('%' + position + '%') )
total = query.count()
tap_list = query.offset( offset ).limit( limit ).all()
items = []
for tap in tap_list:
items.append({
'name': tap.name,
'number':tap.number,
'position':tap.position,
'sn': tap.sn,
'power': str(tap.power),
'online': tap.online,
'status1': tap.status1,
'status2': tap.status2,
'alias1': tap.alias1,
'alias2': tap.alias2,
'created_time': getFormatDate(tap.created_time),
})
resp['data']['items'] = items
resp['data']['total'] = total
return resp
@staticmethod
def tapInfo(sn = ''):
resp = { 'code':20000, 'message':'查询成功', 'data':{}}
res = db.session.query(Device, Member)\
.filter( Member.id == Device.member_id )\
.filter( Device.sn == sn )\
.first()
if not res:
resp['code'] = -1
resp['message'] = "信息查询错误"
return jsonify(resp)
tap = res[0]
tap_info = {
'name': tap.name,
'number':tap.number,
'position':tap.position,
'sn': tap.sn,
'power': str(tap.power),
'online': tap.online,
'status1': tap.status1,
'status2': tap.status2,
'alias1': tap.alias1,
'alias2': tap.alias2,
'created_time': getFormatDate(tap.created_time)
}
member = res[1]
user_info = {
'nickname': member.nickname,
'mobile': member.mobile,
'sex': member.sex,
'avatar': member.avatar
}
resp['data']['tap_info'] = tap_info
resp['data']['user_info'] = user_info
return resp | app/common/libs/DeviceService.py |
import hashlib,requests,random,string,json
import time
from app import db
from app.model import Device
from app.model import Member
from app.common.libs.Helper import getCurrentDate, getFormatDate
from sqlalchemy import func, desc
class DeviceService():
@staticmethod
def geneSN( info ):
m = hashlib.md5()
str = "%s" % time.time()
m.update(str.encode("utf-8"))
return m.hexdigest()
@staticmethod
def filterDeviceByEditor(req):
resp = { 'code':20000, 'message':'查询成功', 'data':{}}
mobile = req['mobile'] if 'mobile' in req else ''
sn = req['sn'] if 'sn' in req else ''
page = int( req['page'] ) if 'page' in req else 0
limit = int( req['limit'] ) if 'limit' in req else 0
offset = ( page - 1 ) * limit
# rule = func.concat(Device.sn, Device.position).op('regexp')('.*%s.*'%keywords.replace(' ','.*'))
query = Device.query.filter( Device.member_id == Member.id)\
.filter( Member.mobile.like('%%%s%%'%mobile) )\
.filter( Device.sn.like('%%%s%%'%sn))
total = query.count()
tap_list = query.offset( offset ).limit( limit ).all()
items = []
for tap in tap_list:
items.append({
'name': tap.name,
'number':tap.number,
'position':tap.position,
'sn': tap.sn,
'power': str(tap.power),
'online': tap.online,
'status1': tap.status1,
'status2': tap.status2,
'alias1': tap.alias1,
'alias2': tap.alias2,
'created_time': getFormatDate(tap.created_time),
})
resp['data']['items'] = items
resp['data']['total'] = total
return resp
@staticmethod
def filterDeviceByUser(req, user_info):
resp = { 'code':20000, 'message':'查询成功', 'data':{}}
position = req['position'] if 'position' in req else ''
page = int( req['page'] ) if 'page' in req else 0
limit = int( req['limit'] ) if 'limit' in req else 0
offset = ( page - 1 ) * limit
query = Device.query\
.filter( Member.id == Device.member_id )\
.filter( Member.mobile == user_info.mobile )\
.filter( Device.position.like('%' + position + '%') )
total = query.count()
tap_list = query.offset( offset ).limit( limit ).all()
items = []
for tap in tap_list:
items.append({
'name': tap.name,
'number':tap.number,
'position':tap.position,
'sn': tap.sn,
'power': str(tap.power),
'online': tap.online,
'status1': tap.status1,
'status2': tap.status2,
'alias1': tap.alias1,
'alias2': tap.alias2,
'created_time': getFormatDate(tap.created_time),
})
resp['data']['items'] = items
resp['data']['total'] = total
return resp
@staticmethod
def tapInfo(sn = ''):
resp = { 'code':20000, 'message':'查询成功', 'data':{}}
res = db.session.query(Device, Member)\
.filter( Member.id == Device.member_id )\
.filter( Device.sn == sn )\
.first()
if not res:
resp['code'] = -1
resp['message'] = "信息查询错误"
return jsonify(resp)
tap = res[0]
tap_info = {
'name': tap.name,
'number':tap.number,
'position':tap.position,
'sn': tap.sn,
'power': str(tap.power),
'online': tap.online,
'status1': tap.status1,
'status2': tap.status2,
'alias1': tap.alias1,
'alias2': tap.alias2,
'created_time': getFormatDate(tap.created_time)
}
member = res[1]
user_info = {
'nickname': member.nickname,
'mobile': member.mobile,
'sex': member.sex,
'avatar': member.avatar
}
resp['data']['tap_info'] = tap_info
resp['data']['user_info'] = user_info
return resp | 0.422743 | 0.072933 |
import numpy as np
import pandas as pd
from eskapade import process_manager, DataStore, Link, StatusCode
# numeric datatypes get converted to an index, which is then used for value counting
NUMERIC_SUBSTR = [np.dtype('int'), np.dtype('float'), np.dtype('double')]
# string datatype get treated as categories
STRING_SUBSTR = [np.dtype('str'), np.dtype('object'), np.dtype('bool')]
# timestamps are converted to nanoseconds (int)
TIME_SUBSTR = [np.dtype('datetime64[ns]'), np.datetime64, np.dtype('<M8')]
NUM_NS_DAY = 24 * 3600 * int(1e9)
class HistogramFillerBase(Link):
"""Base class link to fill histograms.
It is possible to do after-filling cleaning of these histograms by rejecting certain
keys or removing inconsistent data types. Timestamp columns are
converted to nanoseconds before the binning is applied. Final histograms
are stored in the datastore.
"""
def __init__(self, **kwargs):
"""Initialize link instance.
Store and do basic check on the attributes of link HistogramFillerBase.
:param str name: name of link
:param str read_key: key of input data to read from data store
:param str store_key: key of output data to store histograms in data store
:param list columns: colums to pick up from input data. (default is all columns)
:param dict bin_specs: dictionaries used for rebinning numeric or timestamp columns
Example bin_specs dictionary is:
>>> bin_specs = {'x': {'bin_width': 1, 'bin_offset': 0},
'y': {'bin_edges': [0, 2, 3, 4, 5, 7, 8]}}
:param dict var_dtype: dict of datatypes of the columns to study from dataframe.
If not provided, try to determine datatypes directy from dataframe.
:param bool store_at_finalize: Store histograms in datastore at finalize(), not at
execute(). Useful when looping over datasets. Default is False.
:param drop_keys dict: dictionary used for dropping specific keys from bins dictionaries of histograms
Example drop_keys dictionary is:
>>> drop_keys = {'x': [1,4,8,19],
'y': ['apple', 'pear', 'tomato'],
'x:y': [(1, 'apple'), (19, 'tomato')]}
"""
# initialize Link, pass name from kwargs
Link.__init__(self, kwargs.pop('name', 'HistogramFillerBase'))
# process and register all relevant kwargs. kwargs are added as attributes of the link.
# second arg is default value for an attribute. key is popped from kwargs.
self._process_kwargs(kwargs,
read_key=None,
store_key=None,
columns=[],
bin_specs={},
var_dtype={},
drop_keys={},
store_at_finalize=False)
self._unit_bin_specs = {'bin_width': 1.0, 'bin_offset': 0.0}
self._unit_timestamp_specs = {'bin_width': pd.Timedelta(days=30).value,
'bin_offset': pd.Timestamp('2010-01-04').value}
# these get filled during execution
self._hists = {}
# initialize attributes
self.all_columns = []
self.str_cols = []
self.num_cols = []
self.dt_cols = []
def initialize(self):
"""Initialize the link."""
# check basic attribute settings
assert isinstance(self.read_key, str) and len(self.read_key), 'read_key has not been set correctly'
if self.store_key is not None:
assert isinstance(self.store_key, str) and len(self.store_key), 'store_key has not been set to string'
# default histogram creation is at execute(). Storage at finalize is useful for
# looping over datasets.
if self.store_at_finalize:
self.logger.debug('Storing (and possible post-processing) at finalize, not execute.')
# check that columns are set correctly.
for i, c in enumerate(self.columns):
if isinstance(c, str):
self.columns[i] = c.split(':')
elif not isinstance(self.columns[i], list):
raise TypeError('Columns "{}" needs to be a string or list of strings'.format(self.columns[i]))
# check for supported data types
for k in self.var_dtype:
try:
self.var_dtype[k] = np.dtype(self.var_dtype[k]).type
if (self.var_dtype[k] is np.str_) or (self.var_dtype[k] is np.string_) or (self.var_dtype[k] is np.object_):
self.var_dtype[k] = str
except BaseException:
raise RuntimeError('unknown assigned datatype to variable "{}"'.format(k))
return StatusCode.Success
def execute(self):
"""Execute the link.
Execute() four things:
* check presence and data type of requested columns
* timestamp variables are converted to nanosec (integers)
* do the actual value counting based on categories and created indices
* then convert to histograms and add to datastore
"""
ds = process_manager.service(DataStore)
# basic checks on contensts of the data frame
if self.read_key not in ds:
raise KeyError('key "{}" not in data store'.format(self.read_key))
df = ds[self.read_key]
self.assert_dataframe(df)
# determine all possible columns, used for comparison below
self.all_columns = self.get_all_columns(df)
# copy all columns from the dataframe?
if not self.columns:
self.columns = self.all_columns
for i, c in enumerate(self.all_columns):
self.columns[i] = [c]
# 1. check presence and data type of requested columns
# sort columns into numerical, timestamp and category based
self.categorize_columns(df)
# 2. timestamp variables are converted to ns here
idf = self.process_columns(df)
# 3. do the actual histogram/counter filling
for c in self.columns:
name = ':'.join(c)
self.logger.debug('Processing column(s) "{col}".', col=name)
self.fill_histogram(idf, c)
# cleanup temp df
del idf
# 4. storage
if not self.store_at_finalize:
self.process_and_store()
return StatusCode.Success
def finalize(self):
"""Finalize the link.
Store Histograms here, if requested.
"""
# convert to histograms and add to datastore
if self.store_at_finalize:
self.process_and_store()
return StatusCode.Success
def process_and_store(self):
"""Store (and possibly process) histogram objects."""
ds = process_manager.service(DataStore)
if self.store_key is not None:
ds[self.store_key] = self._hists
def assert_dataframe(self, df):
"""Check that input data is a filled pandas data frame.
:param df: input (pandas) data frame
"""
if not isinstance(df, pd.DataFrame):
raise TypeError('retrieved object not of type pandas DataFrame')
assert len(df.index) > 0, 'input dataframe is empty'
def get_all_columns(self, data):
"""Retrieve all columns / keys from input data.
:param data: input data sample (pandas dataframe or dict)
:returns: list of columns
:rtype: list
"""
if isinstance(data, pd.DataFrame):
all_columns = sorted(data.columns.tolist())
else:
raise RuntimeError('Cannot determine columns in input data found for {!s}'.format(self))
return all_columns
def get_data_type(self, df, col):
"""Get data type of dataframe column.
:param df: input data frame
:param str col: column
"""
if col not in df.columns:
raise KeyError('column "{0:s}" not in input dataframe'.format(col))
return df[col].dtype
def categorize_columns(self, df):
"""Categorize columns of dataframe by data type.
:param df: input (pandas) data frame
"""
# check presence and data type of requested columns
# sort columns into numerical, timestamp and category based
for c in self.columns:
for col in c:
if col not in df.columns:
raise KeyError('Column "{0:s}" not in dataframe "{1:s}".'.format(col, self.read_key))
dt = self.get_data_type(df, col)
if col not in self.var_dtype:
self.var_dtype[col] = dt.type
if (self.var_dtype[col] is np.str_) or (self.var_dtype[col] is np.string_) or (self.var_dtype[col] is np.object_):
self.var_dtype[col] = str
if not any(dt in types for types in (STRING_SUBSTR, NUMERIC_SUBSTR, TIME_SUBSTR)):
raise TypeError('Cannot process column "{0:s}" of data type "{1!s}".'.format(col, dt))
is_number = isinstance(dt.type(), np.number)
is_timestamp = isinstance(dt.type(), np.datetime64)
colset = self.num_cols if is_number else self.dt_cols if is_timestamp else self.str_cols
if col not in colset:
colset.append(col)
self.logger.debug('Data type of column "{col}" is "{type}".', col=col, type=self.var_dtype[col])
def process_columns(self, df):
"""Process columns before histogram filling.
Specifically, convert timestamp columns to integers
:param df: input (pandas) data frame
:returns: output (pandas) data frame with converted timestamp columns
:rtype: pandas DataFrame
"""
# timestamp variables are converted to ns here
# make temp df for value counting (used below)
idf = df[self.num_cols + self.str_cols].copy(deep=False)
for col in self.dt_cols:
self.logger.debug('Converting column "{col}" of type "{type}" to nanosec.',
col=col, type=self.var_dtype[col])
idf[col] = df[col].apply(to_ns)
return idf
def fill_histogram(self, idf, c):
"""Fill input histogram with column(s) of input dataframe.
:param idf: input data frame used for filling histogram
:param list c: histogram column(s)
"""
return
def drop_requested_keys(self, name, counts):
"""Drop requested keys from counts dictionary.
:param string name: key of drop_keys dict to get array of keys to be dropped
:param dict counts: counts dictionary to drop specific keys from
:returns: count dict without dropped keys
"""
# drop requested keys
if name in self.drop_keys:
keys_to_drop = self.drop_keys[name]
if not isinstance(keys_to_drop, list):
raise TypeError('drop_keys value needs to be a list of values')
for key in keys_to_drop:
if key in counts:
self.logger.debug('Removing key "{key}" with value: "{value}", as requested.',
key=key, value=counts[key])
del counts[key]
return counts
def var_bin_specs(self, c, idx=0):
"""Determine bin_specs to use for variable c.
:param list c: list of variables, or string variable
:param int idx: index of the variable in c, for which to return the bin specs. default is 0.
:return: selected bin_specs of variable
"""
if isinstance(c, str):
c = [c]
n = ':'.join(c)
# determine default bin specs
dt = np.dtype(self.var_dtype[c[idx]])
is_timestamp = isinstance(dt.type(), np.datetime64)
default = self._unit_bin_specs if not is_timestamp else self._unit_timestamp_specs
# get bin specs
if n in self.bin_specs and len(c) > 1 and len(c) == len(self.bin_specs[n]):
result = self.bin_specs[n][idx]
if not result:
result = self.bin_specs.get(c[idx], default)
else:
result = self.bin_specs.get(c[idx], default)
return result
def to_ns(x):
"""Convert input timestamps to nanoseconds (integers).
:param x: value to be converted
:returns: converted value
:rtype: int
"""
if pd.isnull(x):
return 0
try:
return pd.to_datetime(x).value
except Exception:
if hasattr(x, '__str__'):
return pd.to_datetime(str(x)).value
return 0
def to_str(val):
"""Convert input to (array of) string(s).
:param val: value to be converted
:returns: converted value
:rtype: str or np.ndarray
"""
if isinstance(val, str):
return val
elif hasattr(val, '__iter__'):
return np.asarray(list(map(lambda s: s if isinstance(s, str) else str(s) if hasattr(s, '__str__') else '',
val)))
elif hasattr(val, '__str__'):
return str(val)
return ''
def only_str(val):
"""Pass input value or array only if it is a string.
:param val: value to be evaluated
:returns: evaluated value
:rtype: str or np.ndarray
"""
if isinstance(val, str):
return val
elif hasattr(val, '__iter__'):
return np.asarray(list(filter(lambda s: isinstance(s, str), val)))
return None
def only_bool(val):
"""Pass input value or array only if it is a bool.
:param val: value to be evaluated
:returns: evaluated value
:rtype: np.bool or np.ndarray
"""
if isinstance(val, (np.bool_, bool)):
return np.bool(val)
elif hasattr(val, '__iter__') and not isinstance(val, str):
return np.asarray(list(filter(lambda s: isinstance(s, (np.bool_, bool)), val)))
return None
def only_int(val):
"""Pass input val value or array only if it is an integer.
:param val: value to be evaluated
:returns: evaluated value
:rtype: np.int64 or np.ndarray
"""
if isinstance(val, (np.int64, int)):
return np.int64(val)
elif hasattr(val, '__iter__') and not isinstance(val, str):
return np.asarray(list(filter(lambda s: isinstance(s, (np.int64, int)), val)))
return None
def only_float(val):
"""Pass input val value or array only if it is a float.
:param val: value to be evaluated
:returns: evaluated value
:rtype: np.float64 or np.ndarray
"""
if isinstance(val, (np.float64, float)):
return np.float64(val)
elif hasattr(val, '__iter__') and not isinstance(val, str):
return np.asarray(list(filter(lambda s: isinstance(s, (np.float64, float)), val)))
return np.nan
QUANTITY = {str: only_str, np.str_: only_str,
int: only_int, np.int64: only_int,
bool: only_bool, np.bool_: only_bool,
float: only_float, np.float64: only_float,
np.datetime64: only_int}
def value_to_bin_index(val, **kwargs):
"""Convert value to bin index.
Convert a numeric or timestamp column to an integer bin index.
:param bin_width: bin_width value needed to convert column to an integer bin index
:param bin_offset: bin_offset value needed to convert column to an integer bin index
"""
try:
# NOTE this notation also works for timestamps
bin_width = kwargs.get('bin_width', 1)
bin_offset = kwargs.get('bin_offset', 0)
bin_index = int(np.floor((val - bin_offset) / bin_width))
return bin_index
except BaseException:
pass
return val
def value_to_bin_center(val, **kwargs):
"""Convert value to bin center.
Convert a numeric or timestamp column to a common bin center value.
:param bin_width: bin_width value needed to convert column to a common bin center value
:param bin_offset: bin_offset value needed to convert column to a common bin center value
"""
try:
# NOTE this notation also works for timestamps, and does not change the
# unit
bin_width = kwargs.get('bin_width', 1)
bin_offset = kwargs.get('bin_offset', 0)
bin_index = int(np.floor((val - bin_offset) / bin_width))
obj_type = type(bin_width)
return bin_offset + obj_type((bin_index + 0.5) * bin_width)
except BaseException:
pass
return val | python/eskapade/analysis/histogram_filling.py | import numpy as np
import pandas as pd
from eskapade import process_manager, DataStore, Link, StatusCode
# numeric datatypes get converted to an index, which is then used for value counting
NUMERIC_SUBSTR = [np.dtype('int'), np.dtype('float'), np.dtype('double')]
# string datatype get treated as categories
STRING_SUBSTR = [np.dtype('str'), np.dtype('object'), np.dtype('bool')]
# timestamps are converted to nanoseconds (int)
TIME_SUBSTR = [np.dtype('datetime64[ns]'), np.datetime64, np.dtype('<M8')]
NUM_NS_DAY = 24 * 3600 * int(1e9)
class HistogramFillerBase(Link):
"""Base class link to fill histograms.
It is possible to do after-filling cleaning of these histograms by rejecting certain
keys or removing inconsistent data types. Timestamp columns are
converted to nanoseconds before the binning is applied. Final histograms
are stored in the datastore.
"""
def __init__(self, **kwargs):
"""Initialize link instance.
Store and do basic check on the attributes of link HistogramFillerBase.
:param str name: name of link
:param str read_key: key of input data to read from data store
:param str store_key: key of output data to store histograms in data store
:param list columns: colums to pick up from input data. (default is all columns)
:param dict bin_specs: dictionaries used for rebinning numeric or timestamp columns
Example bin_specs dictionary is:
>>> bin_specs = {'x': {'bin_width': 1, 'bin_offset': 0},
'y': {'bin_edges': [0, 2, 3, 4, 5, 7, 8]}}
:param dict var_dtype: dict of datatypes of the columns to study from dataframe.
If not provided, try to determine datatypes directy from dataframe.
:param bool store_at_finalize: Store histograms in datastore at finalize(), not at
execute(). Useful when looping over datasets. Default is False.
:param drop_keys dict: dictionary used for dropping specific keys from bins dictionaries of histograms
Example drop_keys dictionary is:
>>> drop_keys = {'x': [1,4,8,19],
'y': ['apple', 'pear', 'tomato'],
'x:y': [(1, 'apple'), (19, 'tomato')]}
"""
# initialize Link, pass name from kwargs
Link.__init__(self, kwargs.pop('name', 'HistogramFillerBase'))
# process and register all relevant kwargs. kwargs are added as attributes of the link.
# second arg is default value for an attribute. key is popped from kwargs.
self._process_kwargs(kwargs,
read_key=None,
store_key=None,
columns=[],
bin_specs={},
var_dtype={},
drop_keys={},
store_at_finalize=False)
self._unit_bin_specs = {'bin_width': 1.0, 'bin_offset': 0.0}
self._unit_timestamp_specs = {'bin_width': pd.Timedelta(days=30).value,
'bin_offset': pd.Timestamp('2010-01-04').value}
# these get filled during execution
self._hists = {}
# initialize attributes
self.all_columns = []
self.str_cols = []
self.num_cols = []
self.dt_cols = []
def initialize(self):
"""Initialize the link."""
# check basic attribute settings
assert isinstance(self.read_key, str) and len(self.read_key), 'read_key has not been set correctly'
if self.store_key is not None:
assert isinstance(self.store_key, str) and len(self.store_key), 'store_key has not been set to string'
# default histogram creation is at execute(). Storage at finalize is useful for
# looping over datasets.
if self.store_at_finalize:
self.logger.debug('Storing (and possible post-processing) at finalize, not execute.')
# check that columns are set correctly.
for i, c in enumerate(self.columns):
if isinstance(c, str):
self.columns[i] = c.split(':')
elif not isinstance(self.columns[i], list):
raise TypeError('Columns "{}" needs to be a string or list of strings'.format(self.columns[i]))
# check for supported data types
for k in self.var_dtype:
try:
self.var_dtype[k] = np.dtype(self.var_dtype[k]).type
if (self.var_dtype[k] is np.str_) or (self.var_dtype[k] is np.string_) or (self.var_dtype[k] is np.object_):
self.var_dtype[k] = str
except BaseException:
raise RuntimeError('unknown assigned datatype to variable "{}"'.format(k))
return StatusCode.Success
def execute(self):
"""Execute the link.
Execute() four things:
* check presence and data type of requested columns
* timestamp variables are converted to nanosec (integers)
* do the actual value counting based on categories and created indices
* then convert to histograms and add to datastore
"""
ds = process_manager.service(DataStore)
# basic checks on contensts of the data frame
if self.read_key not in ds:
raise KeyError('key "{}" not in data store'.format(self.read_key))
df = ds[self.read_key]
self.assert_dataframe(df)
# determine all possible columns, used for comparison below
self.all_columns = self.get_all_columns(df)
# copy all columns from the dataframe?
if not self.columns:
self.columns = self.all_columns
for i, c in enumerate(self.all_columns):
self.columns[i] = [c]
# 1. check presence and data type of requested columns
# sort columns into numerical, timestamp and category based
self.categorize_columns(df)
# 2. timestamp variables are converted to ns here
idf = self.process_columns(df)
# 3. do the actual histogram/counter filling
for c in self.columns:
name = ':'.join(c)
self.logger.debug('Processing column(s) "{col}".', col=name)
self.fill_histogram(idf, c)
# cleanup temp df
del idf
# 4. storage
if not self.store_at_finalize:
self.process_and_store()
return StatusCode.Success
def finalize(self):
"""Finalize the link.
Store Histograms here, if requested.
"""
# convert to histograms and add to datastore
if self.store_at_finalize:
self.process_and_store()
return StatusCode.Success
def process_and_store(self):
"""Store (and possibly process) histogram objects."""
ds = process_manager.service(DataStore)
if self.store_key is not None:
ds[self.store_key] = self._hists
def assert_dataframe(self, df):
"""Check that input data is a filled pandas data frame.
:param df: input (pandas) data frame
"""
if not isinstance(df, pd.DataFrame):
raise TypeError('retrieved object not of type pandas DataFrame')
assert len(df.index) > 0, 'input dataframe is empty'
def get_all_columns(self, data):
"""Retrieve all columns / keys from input data.
:param data: input data sample (pandas dataframe or dict)
:returns: list of columns
:rtype: list
"""
if isinstance(data, pd.DataFrame):
all_columns = sorted(data.columns.tolist())
else:
raise RuntimeError('Cannot determine columns in input data found for {!s}'.format(self))
return all_columns
def get_data_type(self, df, col):
"""Get data type of dataframe column.
:param df: input data frame
:param str col: column
"""
if col not in df.columns:
raise KeyError('column "{0:s}" not in input dataframe'.format(col))
return df[col].dtype
def categorize_columns(self, df):
"""Categorize columns of dataframe by data type.
:param df: input (pandas) data frame
"""
# check presence and data type of requested columns
# sort columns into numerical, timestamp and category based
for c in self.columns:
for col in c:
if col not in df.columns:
raise KeyError('Column "{0:s}" not in dataframe "{1:s}".'.format(col, self.read_key))
dt = self.get_data_type(df, col)
if col not in self.var_dtype:
self.var_dtype[col] = dt.type
if (self.var_dtype[col] is np.str_) or (self.var_dtype[col] is np.string_) or (self.var_dtype[col] is np.object_):
self.var_dtype[col] = str
if not any(dt in types for types in (STRING_SUBSTR, NUMERIC_SUBSTR, TIME_SUBSTR)):
raise TypeError('Cannot process column "{0:s}" of data type "{1!s}".'.format(col, dt))
is_number = isinstance(dt.type(), np.number)
is_timestamp = isinstance(dt.type(), np.datetime64)
colset = self.num_cols if is_number else self.dt_cols if is_timestamp else self.str_cols
if col not in colset:
colset.append(col)
self.logger.debug('Data type of column "{col}" is "{type}".', col=col, type=self.var_dtype[col])
def process_columns(self, df):
"""Process columns before histogram filling.
Specifically, convert timestamp columns to integers
:param df: input (pandas) data frame
:returns: output (pandas) data frame with converted timestamp columns
:rtype: pandas DataFrame
"""
# timestamp variables are converted to ns here
# make temp df for value counting (used below)
idf = df[self.num_cols + self.str_cols].copy(deep=False)
for col in self.dt_cols:
self.logger.debug('Converting column "{col}" of type "{type}" to nanosec.',
col=col, type=self.var_dtype[col])
idf[col] = df[col].apply(to_ns)
return idf
def fill_histogram(self, idf, c):
"""Fill input histogram with column(s) of input dataframe.
:param idf: input data frame used for filling histogram
:param list c: histogram column(s)
"""
return
def drop_requested_keys(self, name, counts):
"""Drop requested keys from counts dictionary.
:param string name: key of drop_keys dict to get array of keys to be dropped
:param dict counts: counts dictionary to drop specific keys from
:returns: count dict without dropped keys
"""
# drop requested keys
if name in self.drop_keys:
keys_to_drop = self.drop_keys[name]
if not isinstance(keys_to_drop, list):
raise TypeError('drop_keys value needs to be a list of values')
for key in keys_to_drop:
if key in counts:
self.logger.debug('Removing key "{key}" with value: "{value}", as requested.',
key=key, value=counts[key])
del counts[key]
return counts
def var_bin_specs(self, c, idx=0):
"""Determine bin_specs to use for variable c.
:param list c: list of variables, or string variable
:param int idx: index of the variable in c, for which to return the bin specs. default is 0.
:return: selected bin_specs of variable
"""
if isinstance(c, str):
c = [c]
n = ':'.join(c)
# determine default bin specs
dt = np.dtype(self.var_dtype[c[idx]])
is_timestamp = isinstance(dt.type(), np.datetime64)
default = self._unit_bin_specs if not is_timestamp else self._unit_timestamp_specs
# get bin specs
if n in self.bin_specs and len(c) > 1 and len(c) == len(self.bin_specs[n]):
result = self.bin_specs[n][idx]
if not result:
result = self.bin_specs.get(c[idx], default)
else:
result = self.bin_specs.get(c[idx], default)
return result
def to_ns(x):
"""Convert input timestamps to nanoseconds (integers).
:param x: value to be converted
:returns: converted value
:rtype: int
"""
if pd.isnull(x):
return 0
try:
return pd.to_datetime(x).value
except Exception:
if hasattr(x, '__str__'):
return pd.to_datetime(str(x)).value
return 0
def to_str(val):
"""Convert input to (array of) string(s).
:param val: value to be converted
:returns: converted value
:rtype: str or np.ndarray
"""
if isinstance(val, str):
return val
elif hasattr(val, '__iter__'):
return np.asarray(list(map(lambda s: s if isinstance(s, str) else str(s) if hasattr(s, '__str__') else '',
val)))
elif hasattr(val, '__str__'):
return str(val)
return ''
def only_str(val):
"""Pass input value or array only if it is a string.
:param val: value to be evaluated
:returns: evaluated value
:rtype: str or np.ndarray
"""
if isinstance(val, str):
return val
elif hasattr(val, '__iter__'):
return np.asarray(list(filter(lambda s: isinstance(s, str), val)))
return None
def only_bool(val):
"""Pass input value or array only if it is a bool.
:param val: value to be evaluated
:returns: evaluated value
:rtype: np.bool or np.ndarray
"""
if isinstance(val, (np.bool_, bool)):
return np.bool(val)
elif hasattr(val, '__iter__') and not isinstance(val, str):
return np.asarray(list(filter(lambda s: isinstance(s, (np.bool_, bool)), val)))
return None
def only_int(val):
"""Pass input val value or array only if it is an integer.
:param val: value to be evaluated
:returns: evaluated value
:rtype: np.int64 or np.ndarray
"""
if isinstance(val, (np.int64, int)):
return np.int64(val)
elif hasattr(val, '__iter__') and not isinstance(val, str):
return np.asarray(list(filter(lambda s: isinstance(s, (np.int64, int)), val)))
return None
def only_float(val):
"""Pass input val value or array only if it is a float.
:param val: value to be evaluated
:returns: evaluated value
:rtype: np.float64 or np.ndarray
"""
if isinstance(val, (np.float64, float)):
return np.float64(val)
elif hasattr(val, '__iter__') and not isinstance(val, str):
return np.asarray(list(filter(lambda s: isinstance(s, (np.float64, float)), val)))
return np.nan
QUANTITY = {str: only_str, np.str_: only_str,
int: only_int, np.int64: only_int,
bool: only_bool, np.bool_: only_bool,
float: only_float, np.float64: only_float,
np.datetime64: only_int}
def value_to_bin_index(val, **kwargs):
"""Convert value to bin index.
Convert a numeric or timestamp column to an integer bin index.
:param bin_width: bin_width value needed to convert column to an integer bin index
:param bin_offset: bin_offset value needed to convert column to an integer bin index
"""
try:
# NOTE this notation also works for timestamps
bin_width = kwargs.get('bin_width', 1)
bin_offset = kwargs.get('bin_offset', 0)
bin_index = int(np.floor((val - bin_offset) / bin_width))
return bin_index
except BaseException:
pass
return val
def value_to_bin_center(val, **kwargs):
"""Convert value to bin center.
Convert a numeric or timestamp column to a common bin center value.
:param bin_width: bin_width value needed to convert column to a common bin center value
:param bin_offset: bin_offset value needed to convert column to a common bin center value
"""
try:
# NOTE this notation also works for timestamps, and does not change the
# unit
bin_width = kwargs.get('bin_width', 1)
bin_offset = kwargs.get('bin_offset', 0)
bin_index = int(np.floor((val - bin_offset) / bin_width))
obj_type = type(bin_width)
return bin_offset + obj_type((bin_index + 0.5) * bin_width)
except BaseException:
pass
return val | 0.828731 | 0.512266 |
import json
import os.path
import box
import click
import keyring as keyring_module
from ._compat import get_user, OPEN_PARAMETERS
from .appdir import get_filename
from .env import EnvironAttrDict
from .jsonutils import to_json_file, from_json_file
from .pwd import KeyringAttrDict
from .kwargs import group_kwargs_by_funct, Signature
class Config:
cfg_name = 'config.json'
funct_args = (Signature('open', OPEN_PARAMETERS), click.get_app_dir,
Signature('box', box.BOX_PARAMETERS), json.load, json.dump)
bad_kwds = {'fp', 'name'}
safe_kwds = set()
def __init__(self, app_name, mode='w+', cfg_name=None, box=None,
keyring=True, service_name=None, **kwargs):
args = (kwargs, Config.funct_args, Config.bad_kwds, Config.safe_kwds)
self.kwargs = group_kwargs_by_funct(*args)
self.box = box
mode = mode or ''
frozen = kwargs.get('frozen_box')
self.readable = 'r' in mode or '+' in mode and not frozen
self.writeable = 'w' in mode or '+' in mode
if self.readable or self.writeable:
cfg_name = cfg_name or Config.cfg_name
app_dir_kwargs = self.kwargs['get_app_dir']
self.filename = get_filename(app_name, cfg_name, **app_dir_kwargs)
self.keyring = keyring
if keyring:
service = (service_name or app_name) + '_' + get_user()
KeyringAttrDict.service = service
KeyringAttrDict.keyring = KeyringAttrDict.keyring or keyring_module
if keyring and keyring is not True:
KeyringAttrDict.set_keyring(keyring)
def __enter__(self):
self.env = EnvironAttrDict(os.environ)
if self.keyring:
self.pwd = KeyringAttrDict()
if self.readable or self.writeable:
self.data = None
if self.readable:
json_kwargs = self.kwargs['open']
json_kwargs.update(self.kwargs['load'])
self.data = from_json_file(self.filename, **json_kwargs)
if self.box:
self.data = self.box(self.data, **self.kwargs['box'])
else:
self.data = self.box({}, **self.kwargs['box'])
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.writeable:
json_kwargs = self.kwargs['open']
json_kwargs.update(self.kwargs['dump'])
to_json_file(self.data, self.filename, **json_kwargs) | jsonconfig/core.py | import json
import os.path
import box
import click
import keyring as keyring_module
from ._compat import get_user, OPEN_PARAMETERS
from .appdir import get_filename
from .env import EnvironAttrDict
from .jsonutils import to_json_file, from_json_file
from .pwd import KeyringAttrDict
from .kwargs import group_kwargs_by_funct, Signature
class Config:
cfg_name = 'config.json'
funct_args = (Signature('open', OPEN_PARAMETERS), click.get_app_dir,
Signature('box', box.BOX_PARAMETERS), json.load, json.dump)
bad_kwds = {'fp', 'name'}
safe_kwds = set()
def __init__(self, app_name, mode='w+', cfg_name=None, box=None,
keyring=True, service_name=None, **kwargs):
args = (kwargs, Config.funct_args, Config.bad_kwds, Config.safe_kwds)
self.kwargs = group_kwargs_by_funct(*args)
self.box = box
mode = mode or ''
frozen = kwargs.get('frozen_box')
self.readable = 'r' in mode or '+' in mode and not frozen
self.writeable = 'w' in mode or '+' in mode
if self.readable or self.writeable:
cfg_name = cfg_name or Config.cfg_name
app_dir_kwargs = self.kwargs['get_app_dir']
self.filename = get_filename(app_name, cfg_name, **app_dir_kwargs)
self.keyring = keyring
if keyring:
service = (service_name or app_name) + '_' + get_user()
KeyringAttrDict.service = service
KeyringAttrDict.keyring = KeyringAttrDict.keyring or keyring_module
if keyring and keyring is not True:
KeyringAttrDict.set_keyring(keyring)
def __enter__(self):
self.env = EnvironAttrDict(os.environ)
if self.keyring:
self.pwd = KeyringAttrDict()
if self.readable or self.writeable:
self.data = None
if self.readable:
json_kwargs = self.kwargs['open']
json_kwargs.update(self.kwargs['load'])
self.data = from_json_file(self.filename, **json_kwargs)
if self.box:
self.data = self.box(self.data, **self.kwargs['box'])
else:
self.data = self.box({}, **self.kwargs['box'])
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.writeable:
json_kwargs = self.kwargs['open']
json_kwargs.update(self.kwargs['dump'])
to_json_file(self.data, self.filename, **json_kwargs) | 0.252845 | 0.046421 |
from rest_framework.generics import get_object_or_404
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.viewsets import ViewSet
from reversion.models import Version
from datahub.core.audit_utils import diff_versions
class AuditViewSet(ViewSet):
"""Generic view set for audit logs.
Subclasses must set the queryset class attribute.
Only the LimitOffsetPagination paginator is supported, and so this is set explicitly.
"""
queryset = None
pagination_class = LimitOffsetPagination
def get_object(self):
"""Get the model object referenced in the URL path."""
obj = get_object_or_404(self.queryset, pk=self.kwargs['pk'])
self.check_object_permissions(self.request, obj)
return obj
def list(self, request, *args, **kwargs):
"""Lists audit log entries (paginated)."""
instance = self.get_object()
return self.create_response(instance)
def create_response(self, instance):
"""Creates an audit log response."""
paginator = self.pagination_class()
versions = Version.objects.get_for_object(instance)
proxied_versions = _VersionQuerySetProxy(versions)
versions_subset = paginator.paginate_queryset(proxied_versions, self.request)
version_pairs = (
(versions_subset[n], versions_subset[n + 1]) for n in range(len(versions_subset) - 1)
)
results = self._construct_changelog(version_pairs)
return paginator.get_paginated_response(results)
@classmethod
def _construct_changelog(cls, version_pairs):
changelog = []
for v_new, v_old in version_pairs:
version_creator = v_new.revision.user
model_meta_data = v_new.content_type.model_class()._meta
creator_repr = None
if version_creator:
creator_repr = {
'id': str(version_creator.pk),
'first_name': version_creator.first_name,
'last_name': version_creator.last_name,
'name': version_creator.name,
'email': version_creator.email,
}
changelog.append({
'id': v_new.id,
'user': creator_repr,
'timestamp': v_new.revision.date_created,
'comment': v_new.revision.get_comment() or '',
'changes': diff_versions(
model_meta_data, v_old.field_dict, v_new.field_dict,
),
**cls._get_additional_change_information(v_new),
})
return changelog
@classmethod
def _get_additional_change_information(cls, v_new):
"""Gets additional information about a change for the a change log entry."""
return {}
class _VersionQuerySetProxy:
"""
Proxies a VersionQuerySet, modifying slicing behaviour to return an extra item.
This is allow the AuditSerializer to use the LimitOffsetPagination class
as N+1 versions are required to produce N audit log entries.
"""
def __init__(self, queryset):
"""Initialises the instance, saving a reference to the underlying query set."""
self.queryset = queryset
def __getitem__(self, item):
"""Handles self[item], forwarding calls to underlying query set.
Where item is a slice, 1 is added to item.stop.
"""
if isinstance(item, slice):
if item.step is not None:
raise TypeError('Slicing with step not supported')
stop = item.stop + 1 if item.stop is not None else None
return self.queryset[item.start:stop]
return self.queryset[item]
def count(self):
"""
Gets the count of the query set, minus 1. This is due to N audit log entries
being generated from N+1 query set results.
The return value is always non-negative.
"""
return max(self.queryset.count() - 1, 0) | datahub/core/audit.py | from rest_framework.generics import get_object_or_404
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.viewsets import ViewSet
from reversion.models import Version
from datahub.core.audit_utils import diff_versions
class AuditViewSet(ViewSet):
"""Generic view set for audit logs.
Subclasses must set the queryset class attribute.
Only the LimitOffsetPagination paginator is supported, and so this is set explicitly.
"""
queryset = None
pagination_class = LimitOffsetPagination
def get_object(self):
"""Get the model object referenced in the URL path."""
obj = get_object_or_404(self.queryset, pk=self.kwargs['pk'])
self.check_object_permissions(self.request, obj)
return obj
def list(self, request, *args, **kwargs):
"""Lists audit log entries (paginated)."""
instance = self.get_object()
return self.create_response(instance)
def create_response(self, instance):
"""Creates an audit log response."""
paginator = self.pagination_class()
versions = Version.objects.get_for_object(instance)
proxied_versions = _VersionQuerySetProxy(versions)
versions_subset = paginator.paginate_queryset(proxied_versions, self.request)
version_pairs = (
(versions_subset[n], versions_subset[n + 1]) for n in range(len(versions_subset) - 1)
)
results = self._construct_changelog(version_pairs)
return paginator.get_paginated_response(results)
@classmethod
def _construct_changelog(cls, version_pairs):
changelog = []
for v_new, v_old in version_pairs:
version_creator = v_new.revision.user
model_meta_data = v_new.content_type.model_class()._meta
creator_repr = None
if version_creator:
creator_repr = {
'id': str(version_creator.pk),
'first_name': version_creator.first_name,
'last_name': version_creator.last_name,
'name': version_creator.name,
'email': version_creator.email,
}
changelog.append({
'id': v_new.id,
'user': creator_repr,
'timestamp': v_new.revision.date_created,
'comment': v_new.revision.get_comment() or '',
'changes': diff_versions(
model_meta_data, v_old.field_dict, v_new.field_dict,
),
**cls._get_additional_change_information(v_new),
})
return changelog
@classmethod
def _get_additional_change_information(cls, v_new):
"""Gets additional information about a change for the a change log entry."""
return {}
class _VersionQuerySetProxy:
"""
Proxies a VersionQuerySet, modifying slicing behaviour to return an extra item.
This is allow the AuditSerializer to use the LimitOffsetPagination class
as N+1 versions are required to produce N audit log entries.
"""
def __init__(self, queryset):
"""Initialises the instance, saving a reference to the underlying query set."""
self.queryset = queryset
def __getitem__(self, item):
"""Handles self[item], forwarding calls to underlying query set.
Where item is a slice, 1 is added to item.stop.
"""
if isinstance(item, slice):
if item.step is not None:
raise TypeError('Slicing with step not supported')
stop = item.stop + 1 if item.stop is not None else None
return self.queryset[item.start:stop]
return self.queryset[item]
def count(self):
"""
Gets the count of the query set, minus 1. This is due to N audit log entries
being generated from N+1 query set results.
The return value is always non-negative.
"""
return max(self.queryset.count() - 1, 0) | 0.879341 | 0.232125 |
from unittest import TestCase
from traits.testing.unittest_tools import UnittestTools
from force_bdss.api import DataValue
from surfactant_example.tests.probe_classes.probe_ingredients import (
ProbePrimaryIngredient,
)
from surfactant_example.tests.probe_classes.probe_fragments import (
ProbePrimarySurfactant,
ProbePositiveIon,
ProbeSolvent
)
from surfactant_example.surfactant_plugin import SurfactantPlugin
from surfactant_example.ingredient import Ingredient
class TestIngredient(TestCase):
def setUp(self):
self.ingredient = ProbePrimaryIngredient()
def test___init__(self):
self.assertEqual(
"Positive Ion Primary Surfactant", self.ingredient.name
)
self.assertEqual(0, self.ingredient.charge)
self.assertTrue(self.ingredient.neutral)
self.assertEqual(140, self.ingredient.mass)
self.assertEqual(3, self.ingredient.atom_count)
class TestIngredientsDataSource(TestCase, UnittestTools):
def setUp(self):
self.plugin = SurfactantPlugin()
self.factory = self.plugin.data_source_factories[0]
self.data_source = self.factory.create_data_source()
def test_basic_function_surfactant(self):
model = self.factory.create_model()
model.name = "Test Ingredient"
model.role = "Surfactant"
model.price = 10
model.n_fragments = 2
input_values = [ProbePrimarySurfactant(),
ProbePositiveIon()]
in_slots = self.data_source.slots(model)[0]
self.assertEqual(2, len(in_slots))
data_values = [
DataValue(type=slot.type, value=value)
for slot, value in zip(in_slots, input_values)
]
res = self.data_source.run(model, data_values)
self.assertEqual(1, len(res))
self.assertEqual("INGREDIENT", res[0].type)
ingredient = res[0].value
self.assertIsInstance(ingredient, Ingredient)
self.assertEqual("Test Ingredient", ingredient.name)
self.assertEqual("Surfactant", ingredient.role)
self.assertEqual(2, len(ingredient.fragments))
self.assertEqual(10, ingredient.price)
def test_basic_function_solvent(self):
model = self.factory.create_model()
model.name = "<NAME>"
model.role = "Solvent"
model.price = 1
model.n_fragments = 1
input_values = [ProbeSolvent()]
in_slots = self.data_source.slots(model)[0]
self.assertEqual(1, len(in_slots))
data_values = [
DataValue(type=slot.type, value=value)
for slot, value in zip(in_slots, input_values)
]
with self.assertTraitChanges(model, "event", count=1):
res = self.data_source.run(model, data_values)
self.assertEqual(1, len(res))
self.assertEqual("INGREDIENT", res[0].type)
ingredient = res[0].value
self.assertIsInstance(ingredient, Ingredient)
self.assertEqual("Test Solvent", ingredient.name)
self.assertEqual("Solvent", ingredient.role)
self.assertEqual(1, len(ingredient.fragments))
self.assertEqual(1, ingredient.price)
def test_slots(self):
model = self.factory.create_model()
model.n_fragments = 2
in_slots = self.data_source.slots(model)[0]
self.assertEqual(model.n_fragments, len(in_slots))
types = ["FRAGMENT", "FRAGMENT"]
for type_, slot in zip(types, in_slots):
self.assertEqual(type_, slot.type)
def test_notify_ingredient(self):
model = self.factory.create_model()
ingredient = ProbePrimaryIngredient()
with self.assertTraitChanges(model, "event", count=1):
model.notify_ingredient(ingredient) | surfactant_example/ingredient/tests/test_ingredient.py | from unittest import TestCase
from traits.testing.unittest_tools import UnittestTools
from force_bdss.api import DataValue
from surfactant_example.tests.probe_classes.probe_ingredients import (
ProbePrimaryIngredient,
)
from surfactant_example.tests.probe_classes.probe_fragments import (
ProbePrimarySurfactant,
ProbePositiveIon,
ProbeSolvent
)
from surfactant_example.surfactant_plugin import SurfactantPlugin
from surfactant_example.ingredient import Ingredient
class TestIngredient(TestCase):
def setUp(self):
self.ingredient = ProbePrimaryIngredient()
def test___init__(self):
self.assertEqual(
"Positive Ion Primary Surfactant", self.ingredient.name
)
self.assertEqual(0, self.ingredient.charge)
self.assertTrue(self.ingredient.neutral)
self.assertEqual(140, self.ingredient.mass)
self.assertEqual(3, self.ingredient.atom_count)
class TestIngredientsDataSource(TestCase, UnittestTools):
def setUp(self):
self.plugin = SurfactantPlugin()
self.factory = self.plugin.data_source_factories[0]
self.data_source = self.factory.create_data_source()
def test_basic_function_surfactant(self):
model = self.factory.create_model()
model.name = "Test Ingredient"
model.role = "Surfactant"
model.price = 10
model.n_fragments = 2
input_values = [ProbePrimarySurfactant(),
ProbePositiveIon()]
in_slots = self.data_source.slots(model)[0]
self.assertEqual(2, len(in_slots))
data_values = [
DataValue(type=slot.type, value=value)
for slot, value in zip(in_slots, input_values)
]
res = self.data_source.run(model, data_values)
self.assertEqual(1, len(res))
self.assertEqual("INGREDIENT", res[0].type)
ingredient = res[0].value
self.assertIsInstance(ingredient, Ingredient)
self.assertEqual("Test Ingredient", ingredient.name)
self.assertEqual("Surfactant", ingredient.role)
self.assertEqual(2, len(ingredient.fragments))
self.assertEqual(10, ingredient.price)
def test_basic_function_solvent(self):
model = self.factory.create_model()
model.name = "<NAME>"
model.role = "Solvent"
model.price = 1
model.n_fragments = 1
input_values = [ProbeSolvent()]
in_slots = self.data_source.slots(model)[0]
self.assertEqual(1, len(in_slots))
data_values = [
DataValue(type=slot.type, value=value)
for slot, value in zip(in_slots, input_values)
]
with self.assertTraitChanges(model, "event", count=1):
res = self.data_source.run(model, data_values)
self.assertEqual(1, len(res))
self.assertEqual("INGREDIENT", res[0].type)
ingredient = res[0].value
self.assertIsInstance(ingredient, Ingredient)
self.assertEqual("Test Solvent", ingredient.name)
self.assertEqual("Solvent", ingredient.role)
self.assertEqual(1, len(ingredient.fragments))
self.assertEqual(1, ingredient.price)
def test_slots(self):
model = self.factory.create_model()
model.n_fragments = 2
in_slots = self.data_source.slots(model)[0]
self.assertEqual(model.n_fragments, len(in_slots))
types = ["FRAGMENT", "FRAGMENT"]
for type_, slot in zip(types, in_slots):
self.assertEqual(type_, slot.type)
def test_notify_ingredient(self):
model = self.factory.create_model()
ingredient = ProbePrimaryIngredient()
with self.assertTraitChanges(model, "event", count=1):
model.notify_ingredient(ingredient) | 0.678007 | 0.628949 |
import torch
from torch.nn import functional as F
from facade_project import FACADE_ROT_PROPORTIONS
def dice_loss(logits, true, eps=1e-7):
"""
Computes the Sørensen–Dice loss.
Note that PyTorch optimizers minimize a loss. In this
case, we would like to maximize the dice loss so we
return the negated dice loss.
Args:
logits: a tensor of shape [B, C, H, W]. Corresponds to
the raw output or logits of the model.
true: a tensor of shape [B, 1, H, W].
eps: added to the denominator for numerical stability.
Returns:
dice_loss: the Sørensen–Dice loss.
credits to : https://github.com/kevinzakka/pytorch-goodies/blob/master/losses.py
"""
num_classes = logits.shape[1]
if num_classes == 1:
true_1_hot = torch.eye(num_classes + 1)[true.squeeze(1)]
true_1_hot = true_1_hot.permute(0, 3, 1, 2).float()
true_1_hot_f = true_1_hot[:, 0:1, :, :]
true_1_hot_s = true_1_hot[:, 1:2, :, :]
true_1_hot = torch.cat([true_1_hot_s, true_1_hot_f], dim=1)
pos_prob = torch.sigmoid(logits)
neg_prob = 1 - pos_prob
probas = torch.cat([pos_prob, neg_prob], dim=1)
else:
true_1_hot = torch.eye(num_classes)[true.squeeze(1)]
true_1_hot = true_1_hot.permute(0, 3, 1, 2).float()
probas = F.softmax(logits, dim=1)
true_1_hot = true_1_hot.type(logits.type())
dims = (0,) + tuple(range(2, true.ndimension()))
intersection = torch.sum(probas * true_1_hot, dims)
cardinality = torch.sum(probas + true_1_hot, dims)
dice_loss = (2. * intersection / (cardinality + eps)).mean()
return 1 - dice_loss
def facade_criterion(predictions_list, predictions_weights, device, num_classes, use_dice=True, center_factor=90.):
"""
Criterion for facade parsing.
Handle 'mask' segmentation and 'heatmaps' regression
:param predictions_list: list(str), which predictions to minimize (mask and/or heatmaps)
:param predictions_weights: list(float), weights associated with the loss of single predictions
:param num_classes: int, number of classes of the mask
:param use_dice: bool, whether to use dice loss or cross entropy loss
:param center_factor: float, a factor multiplied to the center heatmap target which would otherwise be too small
in comparision to width and height.
:return: function, the criterion to use for training
"""
assert len(predictions_list) > 0
assert len(predictions_list) == len(predictions_weights)
def facade_criterion_closure(outputs, targets):
losses = []
output_idx = 0
for p in predictions_list:
target = targets[p]
n_channels = target.size(1)
if p == 'mask':
assert n_channels == 1, 'target is a one-channel mask'
output = outputs[:, output_idx:output_idx + num_classes]
output_idx += num_classes
if use_dice:
losses.append(dice_loss(output, target))
else:
percentages = torch.tensor(FACADE_ROT_PROPORTIONS, device=device)
assert num_classes == len(percentages)
inv_perc = 1 / percentages
weights = inv_perc / inv_perc.sum()
losses.append(F.cross_entropy(output, target.squeeze(1), weight=weights))
elif p == 'heatmaps':
if n_channels == 3:
# this means, there is the center which needs to be scaled
target[:, 0] = target[:, 0] * center_factor
else:
assert 1 <= n_channels <= 2, 'only handling center, width and height maps'
output = F.relu(outputs[:, output_idx:output_idx + n_channels])
output_idx += n_channels
losses.append(F.mse_loss(output, target))
assert output_idx == outputs.size(1), 'we used all the channels available for the loss'
loss = torch.zeros(1, device=device)
for l, w in zip(losses, predictions_weights):
loss = loss + l * w
return loss
return facade_criterion_closure | facade_project/nn/losses.py | import torch
from torch.nn import functional as F
from facade_project import FACADE_ROT_PROPORTIONS
def dice_loss(logits, true, eps=1e-7):
"""
Computes the Sørensen–Dice loss.
Note that PyTorch optimizers minimize a loss. In this
case, we would like to maximize the dice loss so we
return the negated dice loss.
Args:
logits: a tensor of shape [B, C, H, W]. Corresponds to
the raw output or logits of the model.
true: a tensor of shape [B, 1, H, W].
eps: added to the denominator for numerical stability.
Returns:
dice_loss: the Sørensen–Dice loss.
credits to : https://github.com/kevinzakka/pytorch-goodies/blob/master/losses.py
"""
num_classes = logits.shape[1]
if num_classes == 1:
true_1_hot = torch.eye(num_classes + 1)[true.squeeze(1)]
true_1_hot = true_1_hot.permute(0, 3, 1, 2).float()
true_1_hot_f = true_1_hot[:, 0:1, :, :]
true_1_hot_s = true_1_hot[:, 1:2, :, :]
true_1_hot = torch.cat([true_1_hot_s, true_1_hot_f], dim=1)
pos_prob = torch.sigmoid(logits)
neg_prob = 1 - pos_prob
probas = torch.cat([pos_prob, neg_prob], dim=1)
else:
true_1_hot = torch.eye(num_classes)[true.squeeze(1)]
true_1_hot = true_1_hot.permute(0, 3, 1, 2).float()
probas = F.softmax(logits, dim=1)
true_1_hot = true_1_hot.type(logits.type())
dims = (0,) + tuple(range(2, true.ndimension()))
intersection = torch.sum(probas * true_1_hot, dims)
cardinality = torch.sum(probas + true_1_hot, dims)
dice_loss = (2. * intersection / (cardinality + eps)).mean()
return 1 - dice_loss
def facade_criterion(predictions_list, predictions_weights, device, num_classes, use_dice=True, center_factor=90.):
"""
Criterion for facade parsing.
Handle 'mask' segmentation and 'heatmaps' regression
:param predictions_list: list(str), which predictions to minimize (mask and/or heatmaps)
:param predictions_weights: list(float), weights associated with the loss of single predictions
:param num_classes: int, number of classes of the mask
:param use_dice: bool, whether to use dice loss or cross entropy loss
:param center_factor: float, a factor multiplied to the center heatmap target which would otherwise be too small
in comparision to width and height.
:return: function, the criterion to use for training
"""
assert len(predictions_list) > 0
assert len(predictions_list) == len(predictions_weights)
def facade_criterion_closure(outputs, targets):
losses = []
output_idx = 0
for p in predictions_list:
target = targets[p]
n_channels = target.size(1)
if p == 'mask':
assert n_channels == 1, 'target is a one-channel mask'
output = outputs[:, output_idx:output_idx + num_classes]
output_idx += num_classes
if use_dice:
losses.append(dice_loss(output, target))
else:
percentages = torch.tensor(FACADE_ROT_PROPORTIONS, device=device)
assert num_classes == len(percentages)
inv_perc = 1 / percentages
weights = inv_perc / inv_perc.sum()
losses.append(F.cross_entropy(output, target.squeeze(1), weight=weights))
elif p == 'heatmaps':
if n_channels == 3:
# this means, there is the center which needs to be scaled
target[:, 0] = target[:, 0] * center_factor
else:
assert 1 <= n_channels <= 2, 'only handling center, width and height maps'
output = F.relu(outputs[:, output_idx:output_idx + n_channels])
output_idx += n_channels
losses.append(F.mse_loss(output, target))
assert output_idx == outputs.size(1), 'we used all the channels available for the loss'
loss = torch.zeros(1, device=device)
for l, w in zip(losses, predictions_weights):
loss = loss + l * w
return loss
return facade_criterion_closure | 0.948858 | 0.751261 |
import unittest
from gittoc import mangle_header
class GitTocBasicTestCase(unittest.TestCase):
""" Basic true asserts to see that testing is executed
"""
def setUp(self):
pass
def tearDown(self):
pass
def test_example(self):
self.assertEqual(True, True)
self.assertNotEqual(True, False)
def test_smoke_tests_header_depth(self):
self.assertEqual(mangle_header("Description", 1), "* [Description](#description)")
self.assertEqual(mangle_header("Description", 2), " * [Description](#description)")
self.assertEqual(mangle_header("Description", 3), " * [Description](#description)")
self.assertEqual(mangle_header("Description", 4), " * [Description](#description)")
def test_smoke_tests_labels(self):
self.assertEqual(mangle_header("", 1), "* [](#)")
self.assertEqual(mangle_header("a", 1), "* [a](#a)")
self.assertEqual(mangle_header("A", 1), "* [A](#a)")
self.assertEqual(mangle_header("Abcde", 1), "* [Abcde](#abcde)")
self.assertEqual(mangle_header("Switch --version", 1), "* [Switch --version](#switch---version)")
self.assertEqual(mangle_header("Introduction / Examples", 1), "* [Introduction / Examples](#introduction--examples)")
self.assertEqual(mangle_header("Revision 3 - duration v0.0.1", 1), "* [Revision 3 - duration v0.0.1](#revision-3---duration-v001)")
self.assertEqual(mangle_header("Retrospective in 3 [sec.]", 1), "* [Retrospective in 3 [sec.]](#retrospective-in-3-sec)")
def test_smoke_tests_labels_underscore_char(self):
self.assertEqual(mangle_header("Return result after __exit", 1), "* [Return result after __exit](#return-result-after-__exit)")
self.assertEqual(mangle_header("Parsing text received from DUT (line by line)", 1), "* [Parsing text received from DUT (line by line)](#parsing-text-received-from-dut-line-by-line)")
self.assertEqual(mangle_header("Before Greentea v0.2.0", 1), "* [Before Greentea v0.2.0](#before-greentea-v020)")
self.assertEqual(mangle_header("Using __rdx_line event", 1), "* [Using __rdx_line event](#using-__rdx_line-event)")
def test_headers_depth_1(self):
self.assertEqual(
mangle_header("Example 2 - digest directly from file", 1),
"* [Example 2 - digest directly from file](#example-2---digest-directly-from-file)")
self.assertEqual(
mangle_header("Example 3 - pipe test.txt file content (as in example 2)", 1),
"* [Example 3 - pipe test.txt file content (as in example 2)](#example-3---pipe-testtxt-file-content-as-in-example-2)")
self.assertEqual(
mangle_header("Switch --use-tids example", 1),
"* [Switch --use-tids example](#switch---use-tids-example)")
def test_headers_depth_2(self):
self.assertEqual(
mangle_header("Environment Pre-Check", 2),
" * [Environment Pre-Check](#environment-pre-check)")
if __name__ == '__main__':
unittest.main() | test/basic.py | import unittest
from gittoc import mangle_header
class GitTocBasicTestCase(unittest.TestCase):
""" Basic true asserts to see that testing is executed
"""
def setUp(self):
pass
def tearDown(self):
pass
def test_example(self):
self.assertEqual(True, True)
self.assertNotEqual(True, False)
def test_smoke_tests_header_depth(self):
self.assertEqual(mangle_header("Description", 1), "* [Description](#description)")
self.assertEqual(mangle_header("Description", 2), " * [Description](#description)")
self.assertEqual(mangle_header("Description", 3), " * [Description](#description)")
self.assertEqual(mangle_header("Description", 4), " * [Description](#description)")
def test_smoke_tests_labels(self):
self.assertEqual(mangle_header("", 1), "* [](#)")
self.assertEqual(mangle_header("a", 1), "* [a](#a)")
self.assertEqual(mangle_header("A", 1), "* [A](#a)")
self.assertEqual(mangle_header("Abcde", 1), "* [Abcde](#abcde)")
self.assertEqual(mangle_header("Switch --version", 1), "* [Switch --version](#switch---version)")
self.assertEqual(mangle_header("Introduction / Examples", 1), "* [Introduction / Examples](#introduction--examples)")
self.assertEqual(mangle_header("Revision 3 - duration v0.0.1", 1), "* [Revision 3 - duration v0.0.1](#revision-3---duration-v001)")
self.assertEqual(mangle_header("Retrospective in 3 [sec.]", 1), "* [Retrospective in 3 [sec.]](#retrospective-in-3-sec)")
def test_smoke_tests_labels_underscore_char(self):
self.assertEqual(mangle_header("Return result after __exit", 1), "* [Return result after __exit](#return-result-after-__exit)")
self.assertEqual(mangle_header("Parsing text received from DUT (line by line)", 1), "* [Parsing text received from DUT (line by line)](#parsing-text-received-from-dut-line-by-line)")
self.assertEqual(mangle_header("Before Greentea v0.2.0", 1), "* [Before Greentea v0.2.0](#before-greentea-v020)")
self.assertEqual(mangle_header("Using __rdx_line event", 1), "* [Using __rdx_line event](#using-__rdx_line-event)")
def test_headers_depth_1(self):
self.assertEqual(
mangle_header("Example 2 - digest directly from file", 1),
"* [Example 2 - digest directly from file](#example-2---digest-directly-from-file)")
self.assertEqual(
mangle_header("Example 3 - pipe test.txt file content (as in example 2)", 1),
"* [Example 3 - pipe test.txt file content (as in example 2)](#example-3---pipe-testtxt-file-content-as-in-example-2)")
self.assertEqual(
mangle_header("Switch --use-tids example", 1),
"* [Switch --use-tids example](#switch---use-tids-example)")
def test_headers_depth_2(self):
self.assertEqual(
mangle_header("Environment Pre-Check", 2),
" * [Environment Pre-Check](#environment-pre-check)")
if __name__ == '__main__':
unittest.main() | 0.710327 | 0.716448 |
from .wps_sleep import Sleep
from .wps_meta import Meta
from .wps_preproc_example import PreprocessExample
from .wps_consecdrydays import ConsecDryDays
from .wps_cvdp import CVDP
from .wps_ensclus import EnsClus
from .wps_shapeselect import ShapeSelect
from .wps_blocking import Blocking
from .wps_zmnam import ZMNAM
from .wps_teleconnections import Teleconnections
from .wps_weather_regimes import WeatherRegimes
from .wps_modes_variability import ModesVariability
from .wps_combined_indices import CombinedIndices
from .wps_multimodel_products import MultimodelProducts
from .wps_heatwaves_coldwaves import HeatwavesColdwaves
from .wps_diurnal_temperature_index import DiurnalTemperatureIndex
from .wps_capacity_factor import CapacityFactor
from .wps_extreme_index import ExtremeIndex
from .wps_drought_indicator import DroughtIndicator
from .wps_quantilebias import QuantileBias
from .wps_toymodel import Toymodel
from .wps_rainfarm import RainFARM
from .wps_hyint import HyInt
from .wps_perfmetrics import Perfmetrics
from .wps_smpi import SMPI
from .wps_extreme_events import ExtremeEvents
__all__ = sorted(
[
"CVDP",
"EnsClus",
# "Sleep",
"Blocking",
"PreprocessExample",
"ZMNAM",
"Teleconnections",
"WeatherRegimes",
"ModesVariability",
"CombinedIndices",
"MultimodelProducts",
"HeatwavesColdwaves",
"DiurnalTemperatureIndex",
"CapacityFactor",
"ExtremeIndex",
"DroughtIndicator",
"ConsecDryDays",
"ShapeSelect",
"QuantileBias",
"RainFARM",
"Toymodel",
"HyInt",
]
)
processes = sorted(
[
Meta(),
CVDP(),
EnsClus(),
# Sleep(),
Blocking(),
PreprocessExample(),
ZMNAM(),
Teleconnections(),
WeatherRegimes(),
ModesVariability(),
CombinedIndices(),
MultimodelProducts(),
HeatwavesColdwaves(),
DiurnalTemperatureIndex(),
CapacityFactor(),
ExtremeIndex(),
DroughtIndicator(),
ConsecDryDays(),
ShapeSelect(),
QuantileBias(),
RainFARM(),
Toymodel(),
HyInt(),
Perfmetrics(),
SMPI(),
ExtremeEvents(),
],
key=lambda process: process.title,
) | c3s_magic_wps/processes/__init__.py | from .wps_sleep import Sleep
from .wps_meta import Meta
from .wps_preproc_example import PreprocessExample
from .wps_consecdrydays import ConsecDryDays
from .wps_cvdp import CVDP
from .wps_ensclus import EnsClus
from .wps_shapeselect import ShapeSelect
from .wps_blocking import Blocking
from .wps_zmnam import ZMNAM
from .wps_teleconnections import Teleconnections
from .wps_weather_regimes import WeatherRegimes
from .wps_modes_variability import ModesVariability
from .wps_combined_indices import CombinedIndices
from .wps_multimodel_products import MultimodelProducts
from .wps_heatwaves_coldwaves import HeatwavesColdwaves
from .wps_diurnal_temperature_index import DiurnalTemperatureIndex
from .wps_capacity_factor import CapacityFactor
from .wps_extreme_index import ExtremeIndex
from .wps_drought_indicator import DroughtIndicator
from .wps_quantilebias import QuantileBias
from .wps_toymodel import Toymodel
from .wps_rainfarm import RainFARM
from .wps_hyint import HyInt
from .wps_perfmetrics import Perfmetrics
from .wps_smpi import SMPI
from .wps_extreme_events import ExtremeEvents
__all__ = sorted(
[
"CVDP",
"EnsClus",
# "Sleep",
"Blocking",
"PreprocessExample",
"ZMNAM",
"Teleconnections",
"WeatherRegimes",
"ModesVariability",
"CombinedIndices",
"MultimodelProducts",
"HeatwavesColdwaves",
"DiurnalTemperatureIndex",
"CapacityFactor",
"ExtremeIndex",
"DroughtIndicator",
"ConsecDryDays",
"ShapeSelect",
"QuantileBias",
"RainFARM",
"Toymodel",
"HyInt",
]
)
processes = sorted(
[
Meta(),
CVDP(),
EnsClus(),
# Sleep(),
Blocking(),
PreprocessExample(),
ZMNAM(),
Teleconnections(),
WeatherRegimes(),
ModesVariability(),
CombinedIndices(),
MultimodelProducts(),
HeatwavesColdwaves(),
DiurnalTemperatureIndex(),
CapacityFactor(),
ExtremeIndex(),
DroughtIndicator(),
ConsecDryDays(),
ShapeSelect(),
QuantileBias(),
RainFARM(),
Toymodel(),
HyInt(),
Perfmetrics(),
SMPI(),
ExtremeEvents(),
],
key=lambda process: process.title,
) | 0.482185 | 0.101991 |
import argparse
import subprocess
from os import path
from scapy.all import *
from scapy.utils import rdpcap
from br24_driver import multicast_socket
import time
import StringIO
import binascii
import struct
def reassemble_packet(fragment_list):
buffer=StringIO.StringIO()
for pkt in sorted(fragment_list, key = lambda pkt:pkt['IP'].frag):
buffer.seek(pkt['IP'].frag*8)
buffer.write(pkt['IP'].payload)
b = buffer.getvalue()
# the first 64 bits are the udp header
# TODO compute checksum
packet_length = struct.unpack('>H',b[4:6])[0]
return b[8:packet_length]
if __name__=="__main__":
interface_ip = '192.168.8.2'
#interface_ip = None
scale = 1.0
mcastsocket = {}
mcastsocket['172.16.58.3'] = multicast_socket('172.16.58.3', 6678, name="scan_data", iface_ip = interface_ip)
mcastsocket['172.16.31.10'] = multicast_socket('172.16.31.10', 6679, name="command_response" , iface_ip = interface_ip)
mcastsocket['192.168.127.12'] = multicast_socket('192.168.127.12', 6680, name="command_request", iface_ip = interface_ip)
try:
parser = argparse.ArgumentParser("Replay packets from a pcap_file")
parser.add_argument("pcap_file_path", type=str, help ="The path of the pcap file to replay")
args = parser.parse_args()
pcap_path, pcap_file_name = path.split(args.pcap_file_path)
pcap_file_name, ext = path.splitext(pcap_file_name)
print (pcap_path, pcap_file_name)
subprocess.call(['editcap','-c','1024','-F','libpcap',args.pcap_file_path,'_'+pcap_file_name+'_out.pcap'])
out,err = subprocess.Popen(['ls | grep '+pcap_file_name+'_out'], stdout=subprocess.PIPE, shell=True).communicate()
fragments = {}
for pcap_file in out.splitlines():
print 'Processing %s'%(pcap_file)
pkts = rdpcap(pcap_file)
timestamp = pkts[0].time
for pkt in pkts:
if pkt.haslayer('IP'):
dst = pkt['IP'].dst
if dst in mcastsocket.keys():
print "id: %d offset: %d"%(pkt['IP'].id,pkt['IP'].frag*8)
time.sleep((pkt.time - timestamp)*scale)
timestamp = pkt.time
if pkt['IP'].flags == 1:
#print [(pkt.time, fragment_id, fragments[fragment_id].len) for fragment_id in fragments.keys()]
if pkt['IP'].frag == 0:
#fragments[pkt['IP'].id] = [pkt]
#print pkt['IP'].payload
buffer=StringIO.StringIO()
buffer.seek(pkt['IP'].frag*8)
buffer.write(pkt['IP'].payload)
fragments[pkt['IP'].id] = buffer
else:
if pkt['IP'].id not in fragments.keys():
continue
#fragments[pkt['IP'].id].append(pkt)
buffer=fragments[pkt['IP'].id]
buffer.seek(pkt['IP'].frag*8)
buffer.write(pkt['IP'].payload)
fragments[pkt['IP'].id] = buffer
else:
frags = fragments.pop(pkt['IP'].id,None)
if frags is None:
mcastsocket[dst].write(pkt.load)
else:
#frags.append(pkt)
frags.seek(pkt['IP'].frag*8)
frags.write(pkt['IP'].payload)
#mcastsocket[dst].write(reassemble_packet(frags))
payload = frags.getvalue()
packet_length = struct.unpack('>H',payload[4:6])[0]
#print (packet_length,len(payload))
mcastsocket[dst].write(payload[8:packet_length])
subprocess.call(['rm _'+pcap_file_name+'*'], shell =True)
except KeyboardInterrupt:
subprocess.call(['rm _'+pcap_file_name+'*'], shell =True) | replay_pcap.py | import argparse
import subprocess
from os import path
from scapy.all import *
from scapy.utils import rdpcap
from br24_driver import multicast_socket
import time
import StringIO
import binascii
import struct
def reassemble_packet(fragment_list):
buffer=StringIO.StringIO()
for pkt in sorted(fragment_list, key = lambda pkt:pkt['IP'].frag):
buffer.seek(pkt['IP'].frag*8)
buffer.write(pkt['IP'].payload)
b = buffer.getvalue()
# the first 64 bits are the udp header
# TODO compute checksum
packet_length = struct.unpack('>H',b[4:6])[0]
return b[8:packet_length]
if __name__=="__main__":
interface_ip = '192.168.8.2'
#interface_ip = None
scale = 1.0
mcastsocket = {}
mcastsocket['172.16.58.3'] = multicast_socket('172.16.58.3', 6678, name="scan_data", iface_ip = interface_ip)
mcastsocket['172.16.31.10'] = multicast_socket('172.16.31.10', 6679, name="command_response" , iface_ip = interface_ip)
mcastsocket['192.168.127.12'] = multicast_socket('192.168.127.12', 6680, name="command_request", iface_ip = interface_ip)
try:
parser = argparse.ArgumentParser("Replay packets from a pcap_file")
parser.add_argument("pcap_file_path", type=str, help ="The path of the pcap file to replay")
args = parser.parse_args()
pcap_path, pcap_file_name = path.split(args.pcap_file_path)
pcap_file_name, ext = path.splitext(pcap_file_name)
print (pcap_path, pcap_file_name)
subprocess.call(['editcap','-c','1024','-F','libpcap',args.pcap_file_path,'_'+pcap_file_name+'_out.pcap'])
out,err = subprocess.Popen(['ls | grep '+pcap_file_name+'_out'], stdout=subprocess.PIPE, shell=True).communicate()
fragments = {}
for pcap_file in out.splitlines():
print 'Processing %s'%(pcap_file)
pkts = rdpcap(pcap_file)
timestamp = pkts[0].time
for pkt in pkts:
if pkt.haslayer('IP'):
dst = pkt['IP'].dst
if dst in mcastsocket.keys():
print "id: %d offset: %d"%(pkt['IP'].id,pkt['IP'].frag*8)
time.sleep((pkt.time - timestamp)*scale)
timestamp = pkt.time
if pkt['IP'].flags == 1:
#print [(pkt.time, fragment_id, fragments[fragment_id].len) for fragment_id in fragments.keys()]
if pkt['IP'].frag == 0:
#fragments[pkt['IP'].id] = [pkt]
#print pkt['IP'].payload
buffer=StringIO.StringIO()
buffer.seek(pkt['IP'].frag*8)
buffer.write(pkt['IP'].payload)
fragments[pkt['IP'].id] = buffer
else:
if pkt['IP'].id not in fragments.keys():
continue
#fragments[pkt['IP'].id].append(pkt)
buffer=fragments[pkt['IP'].id]
buffer.seek(pkt['IP'].frag*8)
buffer.write(pkt['IP'].payload)
fragments[pkt['IP'].id] = buffer
else:
frags = fragments.pop(pkt['IP'].id,None)
if frags is None:
mcastsocket[dst].write(pkt.load)
else:
#frags.append(pkt)
frags.seek(pkt['IP'].frag*8)
frags.write(pkt['IP'].payload)
#mcastsocket[dst].write(reassemble_packet(frags))
payload = frags.getvalue()
packet_length = struct.unpack('>H',payload[4:6])[0]
#print (packet_length,len(payload))
mcastsocket[dst].write(payload[8:packet_length])
subprocess.call(['rm _'+pcap_file_name+'*'], shell =True)
except KeyboardInterrupt:
subprocess.call(['rm _'+pcap_file_name+'*'], shell =True) | 0.06711 | 0.101145 |
import os
import base64
import random
import string
import functools
from datetime import datetime, timedelta
import jinja2
import jsonschema
from pony.orm import db_session
from werkzeug.exceptions import HTTPException, Forbidden, UnprocessableEntity
from flask import request, current_app, jsonify, make_response, session, send_from_directory, abort, render_template
from flask_babel import gettext
from flask_login import current_user
from ..utils.exceptions import InternalError, BadRequestError
def dictslice(d, keys):
return {k: v for k, v in d.items() if k in keys}
def generate_session_id():
s = string.ascii_lowercase + string.digits
return ''.join(random.choice(s) for _ in range(32))
def user_session(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
if 'smilepack_session' in request.cookies:
first_visit = False
session_id = str(request.cookies['smilepack_session'])[:32]
else:
first_visit = True
session_id = generate_session_id()
result = func(session_id, first_visit, *args, **kwargs)
if not first_visit:
return result
response = current_app.make_response(result)
response.set_cookie('smilepack_session', value=session_id, expires=datetime.now() + timedelta(365 * 10))
return response
return decorator
def json_answer(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
try:
resp = jsonify(func(*args, **kwargs))
except jsonschema.ValidationError as exc:
resp = jsonify(error=exc.message, at=tuple(exc.path))
resp.status_code = 422
except BadRequestError as exc:
if exc.at:
resp = jsonify(error=exc.message, at=exc.at)
else:
resp = jsonify(error=exc.message)
resp.status_code = 422
except InternalError as exc:
resp = jsonify(error=str(exc))
resp.status_code = 500
except HTTPException as exc:
resp = jsonify(error=exc.description)
resp.status_code = exc.code
return resp
return decorator
def csrf_protect(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
if request.method == 'GET':
pass # TODO:
elif request.method != 'OPTIONS': # TODO: recheck security
token = session.get('csrf_token')
if not token or (request.json or request.form).get('csrf_token') != token:
raise Forbidden('Invalid csrf_token')
return func(*args, **kwargs)
return decorator
def for_admin(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
if current_user.is_anonymous or not current_user.is_admin:
abort(403)
return func(*args, **kwargs)
return decorator
def default_crossdomain(methods=('GET',)):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
def decorator(f):
def wrapped_function(*args, **kwargs):
if request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
origin = request.headers.get('Origin')
if not origin:
return resp
origins = current_app.config['API_ORIGINS']
cred_origins = current_app.config['API_ALLOW_CREDENTIALS_FOR']
origins_all = '*' in origins
cred_origins_all = '*' in cred_origins
h = resp.headers
ok = False
if cred_origins_all and (origins_all or origin in origins) or origin in cred_origins:
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Credentials'] = 'true'
ok = True
elif origin in origins:
h['Access-Control-Allow-Origin'] = origin
ok = True
elif origins_all:
h['Access-Control-Allow-Origin'] = '*'
ok = True
if ok:
h['Access-Control-Allow-Methods'] = methods
h['Access-Control-Max-Age'] = str(21600)
return resp
f.provide_automatic_options = False
return functools.update_wrapper(wrapped_function, f)
return decorator
def csrf_token(reset=False):
if reset or not session.get('csrf_token'):
session['csrf_token'] = base64.b64encode(os.urandom(24)).decode('ascii')
return session['csrf_token']
def csrf_token_field():
token = jinja2.escape(csrf_token())
field = '<input type="hidden" name="csrf_token" value="{token}" />'.format(token=token)
return jinja2.Markup(field)
def favicon_link_tag():
url = current_app.config.get('FAVICON_URL')
if not url:
return ''
url = jinja2.escape(url)
meta = '<link href="{url}" rel="shortcut icon" />'.format(url=url)
return jinja2.Markup(meta)
def _handle_bad_request_error(error):
return UnprocessableEntity(str(error))
def _handle_validation_error(error):
return UnprocessableEntity('{}: {}'.format(tuple(error.path), error.message))
def _add_nocache_header(response):
response.cache_control.max_age = 0
return response
def _page403(e):
return render_template('errors/403.html'), 403
def _page404(e):
return render_template('errors/404.html'), 404
def _page500(e):
best = request.accept_mimetypes.best_match(['application/json', 'text/html'])
if best == 'application/json' and request.accept_mimetypes[best] > request.accept_mimetypes['text/html']:
error_text = str(gettext('Oops, something went wrong'))
error_text += ' (' + str(gettext('Internal Server Error')) + ')'
resp = jsonify({'error': error_text})
resp.status_code = 500
return resp
else:
return render_template('errors/500.html', is_index=request.endpoint == 'pages.index'), 500
def configure_for_app(app, package_root):
app.jinja_env.globals['csrf_token'] = csrf_token
app.jinja_env.globals['csrf_token_field'] = csrf_token_field
app.jinja_env.globals['favicon_link_tag'] = favicon_link_tag
app.register_error_handler(BadRequestError, _handle_bad_request_error)
app.register_error_handler(jsonschema.ValidationError, _handle_validation_error)
app.errorhandler(403)(db_session(_page403)) # db_session is needed for current_user object
app.errorhandler(404)(db_session(_page404))
app.errorhandler(500)(db_session(_page500))
app.after_request(_add_nocache_header)
# Webpack assets
@app.route("/assets/<path:filename>")
def send_asset(filename):
return send_from_directory(os.path.join(package_root, "public"), filename) | smilepack/views/utils.py |
import os
import base64
import random
import string
import functools
from datetime import datetime, timedelta
import jinja2
import jsonschema
from pony.orm import db_session
from werkzeug.exceptions import HTTPException, Forbidden, UnprocessableEntity
from flask import request, current_app, jsonify, make_response, session, send_from_directory, abort, render_template
from flask_babel import gettext
from flask_login import current_user
from ..utils.exceptions import InternalError, BadRequestError
def dictslice(d, keys):
return {k: v for k, v in d.items() if k in keys}
def generate_session_id():
s = string.ascii_lowercase + string.digits
return ''.join(random.choice(s) for _ in range(32))
def user_session(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
if 'smilepack_session' in request.cookies:
first_visit = False
session_id = str(request.cookies['smilepack_session'])[:32]
else:
first_visit = True
session_id = generate_session_id()
result = func(session_id, first_visit, *args, **kwargs)
if not first_visit:
return result
response = current_app.make_response(result)
response.set_cookie('smilepack_session', value=session_id, expires=datetime.now() + timedelta(365 * 10))
return response
return decorator
def json_answer(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
try:
resp = jsonify(func(*args, **kwargs))
except jsonschema.ValidationError as exc:
resp = jsonify(error=exc.message, at=tuple(exc.path))
resp.status_code = 422
except BadRequestError as exc:
if exc.at:
resp = jsonify(error=exc.message, at=exc.at)
else:
resp = jsonify(error=exc.message)
resp.status_code = 422
except InternalError as exc:
resp = jsonify(error=str(exc))
resp.status_code = 500
except HTTPException as exc:
resp = jsonify(error=exc.description)
resp.status_code = exc.code
return resp
return decorator
def csrf_protect(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
if request.method == 'GET':
pass # TODO:
elif request.method != 'OPTIONS': # TODO: recheck security
token = session.get('csrf_token')
if not token or (request.json or request.form).get('csrf_token') != token:
raise Forbidden('Invalid csrf_token')
return func(*args, **kwargs)
return decorator
def for_admin(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
if current_user.is_anonymous or not current_user.is_admin:
abort(403)
return func(*args, **kwargs)
return decorator
def default_crossdomain(methods=('GET',)):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
def decorator(f):
def wrapped_function(*args, **kwargs):
if request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
origin = request.headers.get('Origin')
if not origin:
return resp
origins = current_app.config['API_ORIGINS']
cred_origins = current_app.config['API_ALLOW_CREDENTIALS_FOR']
origins_all = '*' in origins
cred_origins_all = '*' in cred_origins
h = resp.headers
ok = False
if cred_origins_all and (origins_all or origin in origins) or origin in cred_origins:
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Credentials'] = 'true'
ok = True
elif origin in origins:
h['Access-Control-Allow-Origin'] = origin
ok = True
elif origins_all:
h['Access-Control-Allow-Origin'] = '*'
ok = True
if ok:
h['Access-Control-Allow-Methods'] = methods
h['Access-Control-Max-Age'] = str(21600)
return resp
f.provide_automatic_options = False
return functools.update_wrapper(wrapped_function, f)
return decorator
def csrf_token(reset=False):
if reset or not session.get('csrf_token'):
session['csrf_token'] = base64.b64encode(os.urandom(24)).decode('ascii')
return session['csrf_token']
def csrf_token_field():
token = jinja2.escape(csrf_token())
field = '<input type="hidden" name="csrf_token" value="{token}" />'.format(token=token)
return jinja2.Markup(field)
def favicon_link_tag():
url = current_app.config.get('FAVICON_URL')
if not url:
return ''
url = jinja2.escape(url)
meta = '<link href="{url}" rel="shortcut icon" />'.format(url=url)
return jinja2.Markup(meta)
def _handle_bad_request_error(error):
return UnprocessableEntity(str(error))
def _handle_validation_error(error):
return UnprocessableEntity('{}: {}'.format(tuple(error.path), error.message))
def _add_nocache_header(response):
response.cache_control.max_age = 0
return response
def _page403(e):
return render_template('errors/403.html'), 403
def _page404(e):
return render_template('errors/404.html'), 404
def _page500(e):
best = request.accept_mimetypes.best_match(['application/json', 'text/html'])
if best == 'application/json' and request.accept_mimetypes[best] > request.accept_mimetypes['text/html']:
error_text = str(gettext('Oops, something went wrong'))
error_text += ' (' + str(gettext('Internal Server Error')) + ')'
resp = jsonify({'error': error_text})
resp.status_code = 500
return resp
else:
return render_template('errors/500.html', is_index=request.endpoint == 'pages.index'), 500
def configure_for_app(app, package_root):
app.jinja_env.globals['csrf_token'] = csrf_token
app.jinja_env.globals['csrf_token_field'] = csrf_token_field
app.jinja_env.globals['favicon_link_tag'] = favicon_link_tag
app.register_error_handler(BadRequestError, _handle_bad_request_error)
app.register_error_handler(jsonschema.ValidationError, _handle_validation_error)
app.errorhandler(403)(db_session(_page403)) # db_session is needed for current_user object
app.errorhandler(404)(db_session(_page404))
app.errorhandler(500)(db_session(_page500))
app.after_request(_add_nocache_header)
# Webpack assets
@app.route("/assets/<path:filename>")
def send_asset(filename):
return send_from_directory(os.path.join(package_root, "public"), filename) | 0.264833 | 0.054853 |
from pyRMSD.RMSDCalculator import RMSDCalculator
from pyRMSD.condensedMatrix import CondensedMatrix
from pyproct.driver.parameters import ProtocolParameters
class RMSDMatrixBuilder(object):
def __init__(self):
pass
@classmethod
def build(cls, data_handler, matrix_creation_parameters):
"""
Generates a matrix with the method used in the handler creation.
@param trajectory_handler:
@param matrix_creation_parameters:
@return: The created matrix.
"""
calculator_type = matrix_creation_parameters.get_value("calculator_type",
default_value = "QTRFIT_OMP_CALCULATOR")
calculator_options = matrix_creation_parameters.get_value("calculator_options",
default_value = ProtocolParameters({"number_of_threads":8,
"blocks_per_grid":8,
"threads_per_block":32}))
calculator_options = ProtocolParameters(calculator_options)
structure = data_handler.get_data()
fit_selection_coordsets = structure.getFittingCoordinates()
calc_selection_coordsets = structure.getCalculationCoordinates()
if calc_selection_coordsets is None:
calculator = RMSDCalculator(calculatorType = calculator_type,
fittingCoordsets = fit_selection_coordsets)
else:
symm_groups = []
if "symmetries" in matrix_creation_parameters:
# Then prepare it to handle calculation symmetries
# Description of equivalences must have the same number of atoms
symm_groups = cls.process_symm_groups(matrix_creation_parameters,
structure,
calc_selection_coordsets)
print "Using symmetries", symm_groups
calculator = RMSDCalculator(calculatorType = calculator_type,
fittingCoordsets = fit_selection_coordsets,
calculationCoordsets = calc_selection_coordsets,
calcSymmetryGroups = symm_groups)
try:
calculator.setNumberOfOpenMPThreads(calculator_options.get_value("number_of_threads",
default_value = 8))
except KeyError:
pass
try:
calculator.setCUDAKernelThreadsPerBlock(calculator_options.get_value("threads_per_block",
default_value = 32),
calculator_options.get_value("blocks_per_grid",
default_value = 8))
except KeyError:
pass
rmsds = calculator.pairwiseRMSDMatrix()
return CondensedMatrix(rmsds)
@classmethod
def process_symm_groups(cls, matrix_parameters, structure, calc_selection_coordsets):
symm_groups = []
for equivalence_id in matrix_parameters["symmetries"]:
# Example: [["name C10", "name C30"],["name O7", "name O20"]]
symm_group = cls.process_group(equivalence_id,
matrix_parameters,
structure,
calc_selection_coordsets)
symm_groups.append(symm_group)
return symm_groups
@classmethod
def process_group(cls, equivalence_id, matrix_parameters, structure, calc_selection_coordsets):
common_selection = matrix_parameters.get_value("symmetries.%s.common"%equivalence_id,
default_value= "")
# This one is mandatory
if not "equivalences" in matrix_parameters["symmetries"][equivalence_id]:
print "[ERROR RMSDMatrixBuilder:process_group] It is mandatory to define the atom equivalences of a symmetry group (%s)."%equivalence_id
exit(-1)
atom_selections = matrix_parameters["symmetries"][equivalence_id]["equivalences"]
def build_selection(common, atom_selection):
if common == "":
return atom_selection
else:
return "%s and %s"%(common_selection, atom_selection)
symm_group = []
for atom_selection in atom_selections:
atom1_coords = cls.select_one_atom( structure,
build_selection(common_selection, atom_selection[0]))
atom1_index = cls.locate_index(atom1_coords, calc_selection_coordsets)
atom2_coords = cls.select_one_atom( structure,
build_selection(common_selection, atom_selection[1]))
atom2_index = cls.locate_index(atom2_coords, calc_selection_coordsets)
symm_group.append((atom1_index, atom2_index))
return tuple(symm_group)
@classmethod
def select_one_atom(self, structure, selection):
# We pick coordinates only for first frame
coordsets = structure.getSelectionCoordinates(selection)[0]
if len(coordsets) == 0:
print "[ERROR RMSDMatrixBuilder:select_one_atom] Selection returned 0 atoms (%s)."%selection
exit(-1)
if len(coordsets) > 1:
print "[ERROR RMSDMatrixBuilder:select_one_atom] Selection returned more than one atom (%s)."%selection
exit(-1)
return coordsets[0]
@classmethod
def locate_index(cls,atom_coords, coordsets):
for i,coord in enumerate(coordsets[0]): # <- we work with the first frame only
if coord[0] == atom_coords[0] and coord[1] == atom_coords[1] and coord[2] == atom_coords[2]:
return i
return None | pyproct/data/matrix/protein/cases/rmsd/cartesiansCase.py | from pyRMSD.RMSDCalculator import RMSDCalculator
from pyRMSD.condensedMatrix import CondensedMatrix
from pyproct.driver.parameters import ProtocolParameters
class RMSDMatrixBuilder(object):
def __init__(self):
pass
@classmethod
def build(cls, data_handler, matrix_creation_parameters):
"""
Generates a matrix with the method used in the handler creation.
@param trajectory_handler:
@param matrix_creation_parameters:
@return: The created matrix.
"""
calculator_type = matrix_creation_parameters.get_value("calculator_type",
default_value = "QTRFIT_OMP_CALCULATOR")
calculator_options = matrix_creation_parameters.get_value("calculator_options",
default_value = ProtocolParameters({"number_of_threads":8,
"blocks_per_grid":8,
"threads_per_block":32}))
calculator_options = ProtocolParameters(calculator_options)
structure = data_handler.get_data()
fit_selection_coordsets = structure.getFittingCoordinates()
calc_selection_coordsets = structure.getCalculationCoordinates()
if calc_selection_coordsets is None:
calculator = RMSDCalculator(calculatorType = calculator_type,
fittingCoordsets = fit_selection_coordsets)
else:
symm_groups = []
if "symmetries" in matrix_creation_parameters:
# Then prepare it to handle calculation symmetries
# Description of equivalences must have the same number of atoms
symm_groups = cls.process_symm_groups(matrix_creation_parameters,
structure,
calc_selection_coordsets)
print "Using symmetries", symm_groups
calculator = RMSDCalculator(calculatorType = calculator_type,
fittingCoordsets = fit_selection_coordsets,
calculationCoordsets = calc_selection_coordsets,
calcSymmetryGroups = symm_groups)
try:
calculator.setNumberOfOpenMPThreads(calculator_options.get_value("number_of_threads",
default_value = 8))
except KeyError:
pass
try:
calculator.setCUDAKernelThreadsPerBlock(calculator_options.get_value("threads_per_block",
default_value = 32),
calculator_options.get_value("blocks_per_grid",
default_value = 8))
except KeyError:
pass
rmsds = calculator.pairwiseRMSDMatrix()
return CondensedMatrix(rmsds)
@classmethod
def process_symm_groups(cls, matrix_parameters, structure, calc_selection_coordsets):
symm_groups = []
for equivalence_id in matrix_parameters["symmetries"]:
# Example: [["name C10", "name C30"],["name O7", "name O20"]]
symm_group = cls.process_group(equivalence_id,
matrix_parameters,
structure,
calc_selection_coordsets)
symm_groups.append(symm_group)
return symm_groups
@classmethod
def process_group(cls, equivalence_id, matrix_parameters, structure, calc_selection_coordsets):
common_selection = matrix_parameters.get_value("symmetries.%s.common"%equivalence_id,
default_value= "")
# This one is mandatory
if not "equivalences" in matrix_parameters["symmetries"][equivalence_id]:
print "[ERROR RMSDMatrixBuilder:process_group] It is mandatory to define the atom equivalences of a symmetry group (%s)."%equivalence_id
exit(-1)
atom_selections = matrix_parameters["symmetries"][equivalence_id]["equivalences"]
def build_selection(common, atom_selection):
if common == "":
return atom_selection
else:
return "%s and %s"%(common_selection, atom_selection)
symm_group = []
for atom_selection in atom_selections:
atom1_coords = cls.select_one_atom( structure,
build_selection(common_selection, atom_selection[0]))
atom1_index = cls.locate_index(atom1_coords, calc_selection_coordsets)
atom2_coords = cls.select_one_atom( structure,
build_selection(common_selection, atom_selection[1]))
atom2_index = cls.locate_index(atom2_coords, calc_selection_coordsets)
symm_group.append((atom1_index, atom2_index))
return tuple(symm_group)
@classmethod
def select_one_atom(self, structure, selection):
# We pick coordinates only for first frame
coordsets = structure.getSelectionCoordinates(selection)[0]
if len(coordsets) == 0:
print "[ERROR RMSDMatrixBuilder:select_one_atom] Selection returned 0 atoms (%s)."%selection
exit(-1)
if len(coordsets) > 1:
print "[ERROR RMSDMatrixBuilder:select_one_atom] Selection returned more than one atom (%s)."%selection
exit(-1)
return coordsets[0]
@classmethod
def locate_index(cls,atom_coords, coordsets):
for i,coord in enumerate(coordsets[0]): # <- we work with the first frame only
if coord[0] == atom_coords[0] and coord[1] == atom_coords[1] and coord[2] == atom_coords[2]:
return i
return None | 0.796965 | 0.646251 |
"""
This module contains the 'password' node menu.
"""
from textwrap import dedent
from evennia.server.models import ServerConfig
from menu.character import (
_text_choose_characters,
_options_choose_characters,
_login)
from menu.email_address import text_email_address
from typeclasses.scripts import WrongPassword
## Constants
LEN_PASSWD = 6
def password(caller, input):
"""Ask the user to enter the password to this player.
This is assuming the user exists (see 'create_username' and
'create_password'). This node "loops" if needed: if the
user specifies a wrong password, offers the user to try
again or to go back by entering 'b'.
If the password is correct, then login.
"""
caller.msg(echo=True)
input = input.strip()
text = ""
options = (
{
"key": "_default",
"desc": "Enter your password.",
"goto": "password",
},
)
# Check the password
player = caller.db._player
# If the account is locked, the user has to wait (maximum
# 3 seconds) before retrying
if player.db._locked:
text = "|gPlease wait, you cannot enter your password yet.|n"
return text, options
caller.msg(echo=True)
bans = ServerConfig.objects.conf("server_bans")
banned = bans and (any(tup[0] == player.name.lower() for tup in bans) \
or any(tup[2].match(caller.address) for tup in bans if tup[2]))
if not player.check_password(input):
caller.msg(echo=False)
text = dedent("""
|rIncorrect password.|n
Type |yb|n to go back to the login screen.
Or wait 3 seconds before trying a new password.
""".strip("\n"))
# Loops on the same node
player.scripts.add(WrongPassword)
scripts = player.scripts.get("wrong_password")
if scripts:
script = scripts[0]
script.db.session = caller
else:
print "Cannot retrieve the 'wrong_password' script."
options = (
{
"key": "b",
"desc": "Go back to the login screen.",
"goto": "start",
},
{
"key": "_default",
"desc": "Enter your password again.",
"goto": "password",
},
)
elif banned:
# This is a banned IP or name!
string = dedent("""
|rYou have been banned and cannot continue from here.|n
If you feel this ban is in error, please email an admin.
""".strip("\n"))
caller.msg(string)
caller.sessionhandler.disconnect(
caller, "Good bye! Disconnecting...")
else:
# The password is correct, we can log into the player.
if not player.email:
# Redirects to the node to set an e-mail address
text = text_email_address(player)
options = (
{
"key": "_default",
"desc": "Enter your e-mail address.",
"goto": "email_address",
},
)
elif not player.db.valid:
# Redirects to the node for the validation code
text = "Enter your received validation code."
options = (
{
"key": "_default",
"desc": "Enter your validation code.",
"goto": "validate_account",
},
)
else:
_login(caller, player)
text = ""
options = _options_choose_characters(player)
return text, options | menu/password.py | """
This module contains the 'password' node menu.
"""
from textwrap import dedent
from evennia.server.models import ServerConfig
from menu.character import (
_text_choose_characters,
_options_choose_characters,
_login)
from menu.email_address import text_email_address
from typeclasses.scripts import WrongPassword
## Constants
LEN_PASSWD = 6
def password(caller, input):
"""Ask the user to enter the password to this player.
This is assuming the user exists (see 'create_username' and
'create_password'). This node "loops" if needed: if the
user specifies a wrong password, offers the user to try
again or to go back by entering 'b'.
If the password is correct, then login.
"""
caller.msg(echo=True)
input = input.strip()
text = ""
options = (
{
"key": "_default",
"desc": "Enter your password.",
"goto": "password",
},
)
# Check the password
player = caller.db._player
# If the account is locked, the user has to wait (maximum
# 3 seconds) before retrying
if player.db._locked:
text = "|gPlease wait, you cannot enter your password yet.|n"
return text, options
caller.msg(echo=True)
bans = ServerConfig.objects.conf("server_bans")
banned = bans and (any(tup[0] == player.name.lower() for tup in bans) \
or any(tup[2].match(caller.address) for tup in bans if tup[2]))
if not player.check_password(input):
caller.msg(echo=False)
text = dedent("""
|rIncorrect password.|n
Type |yb|n to go back to the login screen.
Or wait 3 seconds before trying a new password.
""".strip("\n"))
# Loops on the same node
player.scripts.add(WrongPassword)
scripts = player.scripts.get("wrong_password")
if scripts:
script = scripts[0]
script.db.session = caller
else:
print "Cannot retrieve the 'wrong_password' script."
options = (
{
"key": "b",
"desc": "Go back to the login screen.",
"goto": "start",
},
{
"key": "_default",
"desc": "Enter your password again.",
"goto": "password",
},
)
elif banned:
# This is a banned IP or name!
string = dedent("""
|rYou have been banned and cannot continue from here.|n
If you feel this ban is in error, please email an admin.
""".strip("\n"))
caller.msg(string)
caller.sessionhandler.disconnect(
caller, "Good bye! Disconnecting...")
else:
# The password is correct, we can log into the player.
if not player.email:
# Redirects to the node to set an e-mail address
text = text_email_address(player)
options = (
{
"key": "_default",
"desc": "Enter your e-mail address.",
"goto": "email_address",
},
)
elif not player.db.valid:
# Redirects to the node for the validation code
text = "Enter your received validation code."
options = (
{
"key": "_default",
"desc": "Enter your validation code.",
"goto": "validate_account",
},
)
else:
_login(caller, player)
text = ""
options = _options_choose_characters(player)
return text, options | 0.46393 | 0.141489 |
from flask.ext.assets import Bundle
js_filters = ['jsmin']
css_filters = ['cssmin']
js_libs = Bundle(
'js/lib/jquery-1.9.1.min.js',
'js/lib/underscore-min.js',
'js/lib/backbone-min.js',
'js/lib/bootstrap.min.js',
output='gen/js/libs.js'
)
css = Bundle(
'css/bootstrap/css/bootstrap.min.css',
'css/bootstrap/css/bootstrap-responsive.min.css',
Bundle('css/datepicker.css',
'css/flod.css',
filters=css_filters),
filters=['cssrewrite'],
output='gen/css/bootstrap.css'
)
organisationpage_js = Bundle(
'js/lib/serialize_object.js',
'js/src/notifier.js',
'js/src/activity_code_models.js',
'js/src/modal.js',
'js/src/organisation-activity.js',
'js/src/organisation-model.js',
'js/src/organisation-form.js',
filters=js_filters,
output='gen/js/organisation.js'
)
organisation_email = Bundle(
'js/src/modal.js',
'js/src/email.js',
filters=js_filters,
output='gen/js/email.js'
)
organisation_export = Bundle(
'js/src/export_report.js',
filters=js_filters,
output='gen/js/export_report.js'
)
registerorganisation_js = Bundle(
'js/src/modal.js',
'js/src/register_org.js',
filters=js_filters,
output='gen/js/register_organisation.js'
)
profilepage_js = Bundle(
'js/src/notifier.js',
'js/lib/serialize_object.js',
'js/src/profile_page.js',
filters=js_filters,
output='gen/js/profile.js'
)
organisation_members_js = Bundle(
'js/src/modal.js',
'js/src/notifier.js',
'js/src/organisation-members.js',
filters=js_filters,
output='gen/js/profile.js'
)
umbrella_organisation_js = Bundle(
Bundle('js/src/notifier.js',
'js/src/organisation-model.js',
'js/src/Message.js',
'js/src/FormValidationMixin.js',
'js/src/UmbrellaOrganisation.js',
'js/src/umbrella_organisation_page/MemberOrganisationsView.js',
'js/src/umbrella_organisation_page/ResponsiblePersonsView.js',
'js/src/umbrella_organisation_page/InformationView.js',
'js/src/umbrella_organisation_page/Router.js',
'js/src/umbrella_organisation_page/MainPage.js',
'js/src/umbrella_organisation_page/UmbrellaOrganisationHeaderView.js',
filters=js_filters),
output='gen/js/umbrella_organisation.js'
)
organisation_internal_notes_js = Bundle(
'js/lib/moment.min.js',
Bundle(
'js/lib/moment-nb.js',
'js/src/organisation-model.js',
'js/src/organisation_internal_notes.js',
filters=js_filters,
output='gen/js/organisation_internal_notes.js'
)
) | flod_aktor_frontend/assetbundle.py | from flask.ext.assets import Bundle
js_filters = ['jsmin']
css_filters = ['cssmin']
js_libs = Bundle(
'js/lib/jquery-1.9.1.min.js',
'js/lib/underscore-min.js',
'js/lib/backbone-min.js',
'js/lib/bootstrap.min.js',
output='gen/js/libs.js'
)
css = Bundle(
'css/bootstrap/css/bootstrap.min.css',
'css/bootstrap/css/bootstrap-responsive.min.css',
Bundle('css/datepicker.css',
'css/flod.css',
filters=css_filters),
filters=['cssrewrite'],
output='gen/css/bootstrap.css'
)
organisationpage_js = Bundle(
'js/lib/serialize_object.js',
'js/src/notifier.js',
'js/src/activity_code_models.js',
'js/src/modal.js',
'js/src/organisation-activity.js',
'js/src/organisation-model.js',
'js/src/organisation-form.js',
filters=js_filters,
output='gen/js/organisation.js'
)
organisation_email = Bundle(
'js/src/modal.js',
'js/src/email.js',
filters=js_filters,
output='gen/js/email.js'
)
organisation_export = Bundle(
'js/src/export_report.js',
filters=js_filters,
output='gen/js/export_report.js'
)
registerorganisation_js = Bundle(
'js/src/modal.js',
'js/src/register_org.js',
filters=js_filters,
output='gen/js/register_organisation.js'
)
profilepage_js = Bundle(
'js/src/notifier.js',
'js/lib/serialize_object.js',
'js/src/profile_page.js',
filters=js_filters,
output='gen/js/profile.js'
)
organisation_members_js = Bundle(
'js/src/modal.js',
'js/src/notifier.js',
'js/src/organisation-members.js',
filters=js_filters,
output='gen/js/profile.js'
)
umbrella_organisation_js = Bundle(
Bundle('js/src/notifier.js',
'js/src/organisation-model.js',
'js/src/Message.js',
'js/src/FormValidationMixin.js',
'js/src/UmbrellaOrganisation.js',
'js/src/umbrella_organisation_page/MemberOrganisationsView.js',
'js/src/umbrella_organisation_page/ResponsiblePersonsView.js',
'js/src/umbrella_organisation_page/InformationView.js',
'js/src/umbrella_organisation_page/Router.js',
'js/src/umbrella_organisation_page/MainPage.js',
'js/src/umbrella_organisation_page/UmbrellaOrganisationHeaderView.js',
filters=js_filters),
output='gen/js/umbrella_organisation.js'
)
organisation_internal_notes_js = Bundle(
'js/lib/moment.min.js',
Bundle(
'js/lib/moment-nb.js',
'js/src/organisation-model.js',
'js/src/organisation_internal_notes.js',
filters=js_filters,
output='gen/js/organisation_internal_notes.js'
)
) | 0.432782 | 0.039343 |
import os
import matplotlib.pyplot as plt
import numpy as np
import argparse
import argparse
import cv2
import tensorflow as tf
from tensorflow import keras
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
IMAGE_WIDTH = 300
IMAGE_HEIGHT = 300
model = keras.models.load_model("C:/Users/jrj00/Desktop/tttt/tfg/modeltest/modelPb")
prototxtPath = os.path.sep.join([".", "deploy.prototxt.txt"])
weightsPath = os.path.sep.join([".", "res10_300x300_ssd_iter_140000.caffemodel"])
net = cv2.dnn.readNet(prototxtPath, weightsPath)
image = cv2.imread("C:/Users/jrj00/Desktop/tttt/58.jpg")
#orig = image.copy()
#video = cv2.VideoCapture(0)
while True:
#grab, image = video.read()
#video.release()
(h, w) = image.shape[:2]
blob = cv2.dnn.blobFromImage(image, 1.0, (IMAGE_WIDTH, IMAGE_HEIGHT),
(104.0, 177.0, 123.0))
net.setInput(blob)
detections = net.forward()
dShape = detections.shape[2]
for i in range(0, dShape):
confidence = detections[0, 0, i, 2]
if confidence > 0.6:
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
(startX, startY) = (max(0, startX), max(0, startY))
(endX, endY) = (min(w - 1, endX), min(h - 1, endY))
face = image[startY:endY, startX:endX]
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
face = cv2.resize(face, (IMAGE_WIDTH, IMAGE_HEIGHT))
#face = keras.preprocessing.image.img_to_array(face)
face = keras.applications.mobilenet_v2.preprocess_input(face)
face = np.expand_dims(face, axis=0)
(mask, withoutMask) = model.predict(face)[0]
print("Mascarilla {} Sin mascarilla {}".format(mask, withoutMask))
label = "Mascarilla" if mask > withoutMask else "Sin Mascarilla"
color = (0, 255, 0) if label == "Mascarilla" else (0, 0, 255)
label = "{}: {:.2f}%".format(label, max(mask, withoutMask)
* 100, min(mask, withoutMask) * 100)
cv2.putText(image, label, (startX, startY - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.75, color, 2)
cv2.rectangle(image, (startX, startY), (endX, endY), color, 1)
cv2.imshow("Out.png", image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break | PythonScripts/TensorFlow/tsDetector.py | import os
import matplotlib.pyplot as plt
import numpy as np
import argparse
import argparse
import cv2
import tensorflow as tf
from tensorflow import keras
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
IMAGE_WIDTH = 300
IMAGE_HEIGHT = 300
model = keras.models.load_model("C:/Users/jrj00/Desktop/tttt/tfg/modeltest/modelPb")
prototxtPath = os.path.sep.join([".", "deploy.prototxt.txt"])
weightsPath = os.path.sep.join([".", "res10_300x300_ssd_iter_140000.caffemodel"])
net = cv2.dnn.readNet(prototxtPath, weightsPath)
image = cv2.imread("C:/Users/jrj00/Desktop/tttt/58.jpg")
#orig = image.copy()
#video = cv2.VideoCapture(0)
while True:
#grab, image = video.read()
#video.release()
(h, w) = image.shape[:2]
blob = cv2.dnn.blobFromImage(image, 1.0, (IMAGE_WIDTH, IMAGE_HEIGHT),
(104.0, 177.0, 123.0))
net.setInput(blob)
detections = net.forward()
dShape = detections.shape[2]
for i in range(0, dShape):
confidence = detections[0, 0, i, 2]
if confidence > 0.6:
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
(startX, startY) = (max(0, startX), max(0, startY))
(endX, endY) = (min(w - 1, endX), min(h - 1, endY))
face = image[startY:endY, startX:endX]
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
face = cv2.resize(face, (IMAGE_WIDTH, IMAGE_HEIGHT))
#face = keras.preprocessing.image.img_to_array(face)
face = keras.applications.mobilenet_v2.preprocess_input(face)
face = np.expand_dims(face, axis=0)
(mask, withoutMask) = model.predict(face)[0]
print("Mascarilla {} Sin mascarilla {}".format(mask, withoutMask))
label = "Mascarilla" if mask > withoutMask else "Sin Mascarilla"
color = (0, 255, 0) if label == "Mascarilla" else (0, 0, 255)
label = "{}: {:.2f}%".format(label, max(mask, withoutMask)
* 100, min(mask, withoutMask) * 100)
cv2.putText(image, label, (startX, startY - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.75, color, 2)
cv2.rectangle(image, (startX, startY), (endX, endY), color, 1)
cv2.imshow("Out.png", image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break | 0.429071 | 0.168104 |
import logging
import sys
import os
class Loggable():
'''This class has methods to manage the logger object of its child class instances.'''
def __init__(self):
self.name = None
self.workdir = None
self.logfile_path = None
def _setup_logger(self, name, workdir):
'''Sets up the logger
Args:
name (str): name of the class instance
workdir (str): path to directory where log will be placed
Returns:
Logger: an instance of a logger object which will log to the log file of the instance as well as to std out
'''
self.name = name
self.workdir = workdir
self.logfile_path = os.path.join(self.workdir, self.name + '.log')
# log file handler
logger = logging.getLogger(self.name)
logger.propagate = False
if not logger.hasHandlers() or not os.path.exists(self.logfile_path):
f = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
fh = logging.FileHandler(self.logfile_path)
fh.setFormatter(f)
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
#log to stream handler
sh = logging.StreamHandler(stream=sys.stdout)
sh.setLevel(logging.DEBUG)
logger.addHandler(sh)
logger.setLevel(logging.DEBUG)
return logger
def _close_all_loggers(self):
'''Close all file handlers belonging to all loggers associated in the current logging tree'''
loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict]
for logger in loggers:
for handler in logger.handlers:
if isinstance(handler, logging.FileHandler):
handler.close()
def get_log(self):
'''Get logs as str
Returns:
logs (str): what is written in the log file
'''
log_path = self.logfile_path
return open(log_path).read() | copinicoos/loggable.py | import logging
import sys
import os
class Loggable():
'''This class has methods to manage the logger object of its child class instances.'''
def __init__(self):
self.name = None
self.workdir = None
self.logfile_path = None
def _setup_logger(self, name, workdir):
'''Sets up the logger
Args:
name (str): name of the class instance
workdir (str): path to directory where log will be placed
Returns:
Logger: an instance of a logger object which will log to the log file of the instance as well as to std out
'''
self.name = name
self.workdir = workdir
self.logfile_path = os.path.join(self.workdir, self.name + '.log')
# log file handler
logger = logging.getLogger(self.name)
logger.propagate = False
if not logger.hasHandlers() or not os.path.exists(self.logfile_path):
f = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
fh = logging.FileHandler(self.logfile_path)
fh.setFormatter(f)
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
#log to stream handler
sh = logging.StreamHandler(stream=sys.stdout)
sh.setLevel(logging.DEBUG)
logger.addHandler(sh)
logger.setLevel(logging.DEBUG)
return logger
def _close_all_loggers(self):
'''Close all file handlers belonging to all loggers associated in the current logging tree'''
loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict]
for logger in loggers:
for handler in logger.handlers:
if isinstance(handler, logging.FileHandler):
handler.close()
def get_log(self):
'''Get logs as str
Returns:
logs (str): what is written in the log file
'''
log_path = self.logfile_path
return open(log_path).read() | 0.42668 | 0.06078 |
import numpy as np
from math import log
class StateRelation(object):
"""
Abstract state relation object that contains the generally used atributes
in state relations (b,Dc).
Attributes
----------
b : float
Rate and state empirical parameter b.
Dc : float
Critical slip distance.
state : float
State variable.
"""
def __init__(self):
self.b = None
self.Dc = None
self.state = None
def velocity_component(self, system):
"""
General velocity contribution from a given state variable
Notes
-----
.. math::
V_\\text{contribution} = b \\text{ln}\\left(\\frac{V_0 \\theta}{D_c}\\right)
"""
return self.b * np.log(system.vref * self.state / self.Dc)
class DieterichState(StateRelation):
"""
The slowness or Dieterich state relation as proposed by [#Dieterich1979]_.
Notes
-----
.. math::
\\frac{d\\theta}{dt} = 1 - \\frac{V_\\text{slider} \\theta}{D_c}
.. [#Dieterich1979] <NAME>. "Modeling of rock friction: 1. Experimental
results and constitutive equations." Journal of Geophysical
Research: Solid Earth (19782012) 84.B5 (1979): 2161-2168.
"""
def __str__(self):
s = 'Dieterich State Relation\n'
s += f'b = {self.b}\n'
s += f'Dc = {self.Dc}\n'
return s
def set_steady_state(self, system):
self.state = self.Dc/system.vref
def evolve_state(self, system):
return 1. - system.v * self.state / self.Dc
class RuinaState(StateRelation):
"""
The slip or Ruina state relation as proposed by [#Ruina1983]_.
Notes
-----
.. math::
\\frac{d\theta}{dt} = -\\frac{V_\\text{slider} \\theta}{D_c}
\\text{ln}\left(\\frac{V_\\text{slider} \\theta}{D_c}\\right)
.. [#Ruina1983] <NAME>. "Slip instability and state variable friction laws."
J. geophys. Res 88.10 (1983): 359-10.
"""
def __str__(self):
s = 'Ruina State Relation\n'
s += f'b = {self.b}\n'
s += f'Dc = {self.Dc}\n'
return s
def set_steady_state(self, system):
self.state = self.Dc/system.vref
def evolve_state(self, system):
return -1 * (system.v * self.state / self.Dc) * log(system.v * self.state / self.Dc)
class PrzState(StateRelation):
"""
The PRZ state relation as proposed by [#PRZ1995]_:
Notes
-----
.. math::
\\frac{d\\theta}{dt} = 1 - \left(\\frac{V_\\text{slider} \\theta}{2D_c}\\right) ^2
.. [#PRZ1995] <NAME>., <NAME>., and <NAME>.
"Self-healing slip pulse on a frictional surface."
Journal of the Mechanics and Physics of Solids 43.9 (1995): 1461-1495.
"""
def __str__(self):
s = 'PRZ State Relation\n'
s += f'b = {self.b}\n'
s += f'Dc = {self.Dc}\n'
return s
def set_steady_state(self, system):
self.state = 2 * self.Dc / system.v
self.prz_vref = system.vref/(2*self.Dc)
def evolve_state(self, system):
return 1. - (system.v * self.state / (2 * self.Dc))**2
def velocity_component(self, system):
"""
Perrin-Rice velocity contribution
.. math::
V_\\text{contribution} = b \\text{ln}\left(V_{\\text{prz}0} \\theta\\right)
"""
return self.b * np.log(self.prz_vref * self.state)
class NagataState(StateRelation):
"""
The Nagata state relation as proposed by [#Nagata2012]_:
Notes
-----
.. math::
\\frac{d\\theta}{dt} = 1 - \\frac{V_\\text{slider} \\theta}{D_c}
- \\frac{c}{b}\\theta\\frac{d\mu}{dt}
.. [#Nagata2012] <NAME>., <NAME>., <NAME>., "A revised rate-and-state
-dependent friction law obtained by constraining constitutive and
evolution laws separately with laboratory data," Journal of Geophysical
Research: Solid Earth, vol 117, 2012.
"""
def __str__(self):
s = 'Nagata State Relation\n'
s += f'b = {self.b}\n'
s += f'c = {self.c}\n'
s += f'Dc = {self.Dc}\n'
return s
def __init__(self):
StateRelation.__init__(self)
self.c = None
def set_steady_state(self, system):
self.state = self.Dc / system.vref
def evolve_state(self, system):
return 1. - (system.v * self.state / self.Dc) - \
(self.c / self.b * self.state * system.dmu_dt) | rsfmodel/staterelations.py | import numpy as np
from math import log
class StateRelation(object):
"""
Abstract state relation object that contains the generally used atributes
in state relations (b,Dc).
Attributes
----------
b : float
Rate and state empirical parameter b.
Dc : float
Critical slip distance.
state : float
State variable.
"""
def __init__(self):
self.b = None
self.Dc = None
self.state = None
def velocity_component(self, system):
"""
General velocity contribution from a given state variable
Notes
-----
.. math::
V_\\text{contribution} = b \\text{ln}\\left(\\frac{V_0 \\theta}{D_c}\\right)
"""
return self.b * np.log(system.vref * self.state / self.Dc)
class DieterichState(StateRelation):
"""
The slowness or Dieterich state relation as proposed by [#Dieterich1979]_.
Notes
-----
.. math::
\\frac{d\\theta}{dt} = 1 - \\frac{V_\\text{slider} \\theta}{D_c}
.. [#Dieterich1979] <NAME>. "Modeling of rock friction: 1. Experimental
results and constitutive equations." Journal of Geophysical
Research: Solid Earth (19782012) 84.B5 (1979): 2161-2168.
"""
def __str__(self):
s = 'Dieterich State Relation\n'
s += f'b = {self.b}\n'
s += f'Dc = {self.Dc}\n'
return s
def set_steady_state(self, system):
self.state = self.Dc/system.vref
def evolve_state(self, system):
return 1. - system.v * self.state / self.Dc
class RuinaState(StateRelation):
"""
The slip or Ruina state relation as proposed by [#Ruina1983]_.
Notes
-----
.. math::
\\frac{d\theta}{dt} = -\\frac{V_\\text{slider} \\theta}{D_c}
\\text{ln}\left(\\frac{V_\\text{slider} \\theta}{D_c}\\right)
.. [#Ruina1983] <NAME>. "Slip instability and state variable friction laws."
J. geophys. Res 88.10 (1983): 359-10.
"""
def __str__(self):
s = 'Ruina State Relation\n'
s += f'b = {self.b}\n'
s += f'Dc = {self.Dc}\n'
return s
def set_steady_state(self, system):
self.state = self.Dc/system.vref
def evolve_state(self, system):
return -1 * (system.v * self.state / self.Dc) * log(system.v * self.state / self.Dc)
class PrzState(StateRelation):
"""
The PRZ state relation as proposed by [#PRZ1995]_:
Notes
-----
.. math::
\\frac{d\\theta}{dt} = 1 - \left(\\frac{V_\\text{slider} \\theta}{2D_c}\\right) ^2
.. [#PRZ1995] <NAME>., <NAME>., and <NAME>.
"Self-healing slip pulse on a frictional surface."
Journal of the Mechanics and Physics of Solids 43.9 (1995): 1461-1495.
"""
def __str__(self):
s = 'PRZ State Relation\n'
s += f'b = {self.b}\n'
s += f'Dc = {self.Dc}\n'
return s
def set_steady_state(self, system):
self.state = 2 * self.Dc / system.v
self.prz_vref = system.vref/(2*self.Dc)
def evolve_state(self, system):
return 1. - (system.v * self.state / (2 * self.Dc))**2
def velocity_component(self, system):
"""
Perrin-Rice velocity contribution
.. math::
V_\\text{contribution} = b \\text{ln}\left(V_{\\text{prz}0} \\theta\\right)
"""
return self.b * np.log(self.prz_vref * self.state)
class NagataState(StateRelation):
"""
The Nagata state relation as proposed by [#Nagata2012]_:
Notes
-----
.. math::
\\frac{d\\theta}{dt} = 1 - \\frac{V_\\text{slider} \\theta}{D_c}
- \\frac{c}{b}\\theta\\frac{d\mu}{dt}
.. [#Nagata2012] <NAME>., <NAME>., <NAME>., "A revised rate-and-state
-dependent friction law obtained by constraining constitutive and
evolution laws separately with laboratory data," Journal of Geophysical
Research: Solid Earth, vol 117, 2012.
"""
def __str__(self):
s = 'Nagata State Relation\n'
s += f'b = {self.b}\n'
s += f'c = {self.c}\n'
s += f'Dc = {self.Dc}\n'
return s
def __init__(self):
StateRelation.__init__(self)
self.c = None
def set_steady_state(self, system):
self.state = self.Dc / system.vref
def evolve_state(self, system):
return 1. - (system.v * self.state / self.Dc) - \
(self.c / self.b * self.state * system.dmu_dt) | 0.888463 | 0.562297 |
import os
from typing import Any, Dict
import torch
from transformers.file_utils import WEIGHTS_NAME
from sparseml.pytorch.optim.manager import ScheduledModifierManager
__all__ = [
"RECIPE_NAME",
"preprocess_state_dict",
"load_recipe",
]
RECIPE_NAME = "recipe.yaml"
def load_recipe(pretrained_model_name_or_path: str) -> str:
"""
Get path to recipe from the model directory
:param pretrained_model_name_or_path: path to model directory
:return: path to recipe
"""
recipe = None
if pretrained_model_name_or_path is not None:
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
if os.path.isdir(pretrained_model_name_or_path):
if os.path.isfile(os.path.join(pretrained_model_name_or_path, RECIPE_NAME)):
recipe = os.path.join(pretrained_model_name_or_path, RECIPE_NAME)
return recipe
def preprocess_state_dict(pretrained_model_name_or_path: str) -> Dict[str, Any]:
"""
Restore original parameter names that were changed by QAT process
:param pretrained_model_name_or_path: name or path to model
"""
state_dict = None
if pretrained_model_name_or_path is not None:
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
if os.path.isdir(pretrained_model_name_or_path):
if os.path.isfile(os.path.join(pretrained_model_name_or_path, RECIPE_NAME)):
recipe = os.path.join(pretrained_model_name_or_path, RECIPE_NAME)
manager = ScheduledModifierManager.from_yaml(recipe)
modifiers = [m.__class__.__name__ for m in manager.modifiers]
is_qat_recipe = "QuantizationModifier" in modifiers
else:
is_qat_recipe = False
if os.path.isfile(
os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
):
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
state_dict = torch.load(archive_file, map_location="cpu")
removed_keys = (
[
key
for key in state_dict
if (
key.endswith(".module.weight")
or key.endswith(".module.bias")
)
]
if is_qat_recipe
else []
)
for key in removed_keys:
new_key = key.replace(".module", "")
state_dict[new_key] = state_dict[key]
state_dict.pop(key)
return state_dict | src/sparseml/transformers/utils/helpers.py | import os
from typing import Any, Dict
import torch
from transformers.file_utils import WEIGHTS_NAME
from sparseml.pytorch.optim.manager import ScheduledModifierManager
__all__ = [
"RECIPE_NAME",
"preprocess_state_dict",
"load_recipe",
]
RECIPE_NAME = "recipe.yaml"
def load_recipe(pretrained_model_name_or_path: str) -> str:
"""
Get path to recipe from the model directory
:param pretrained_model_name_or_path: path to model directory
:return: path to recipe
"""
recipe = None
if pretrained_model_name_or_path is not None:
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
if os.path.isdir(pretrained_model_name_or_path):
if os.path.isfile(os.path.join(pretrained_model_name_or_path, RECIPE_NAME)):
recipe = os.path.join(pretrained_model_name_or_path, RECIPE_NAME)
return recipe
def preprocess_state_dict(pretrained_model_name_or_path: str) -> Dict[str, Any]:
"""
Restore original parameter names that were changed by QAT process
:param pretrained_model_name_or_path: name or path to model
"""
state_dict = None
if pretrained_model_name_or_path is not None:
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
if os.path.isdir(pretrained_model_name_or_path):
if os.path.isfile(os.path.join(pretrained_model_name_or_path, RECIPE_NAME)):
recipe = os.path.join(pretrained_model_name_or_path, RECIPE_NAME)
manager = ScheduledModifierManager.from_yaml(recipe)
modifiers = [m.__class__.__name__ for m in manager.modifiers]
is_qat_recipe = "QuantizationModifier" in modifiers
else:
is_qat_recipe = False
if os.path.isfile(
os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
):
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
state_dict = torch.load(archive_file, map_location="cpu")
removed_keys = (
[
key
for key in state_dict
if (
key.endswith(".module.weight")
or key.endswith(".module.bias")
)
]
if is_qat_recipe
else []
)
for key in removed_keys:
new_key = key.replace(".module", "")
state_dict[new_key] = state_dict[key]
state_dict.pop(key)
return state_dict | 0.669421 | 0.154631 |
from benker.box import Box
#: Default tiles used to draw a :class:`~benker.grid.Grid`.
#:
#: Keys are tuples (*left*, *top*, *right*, *bottom*) : which represent the
#: presence (if ``True``) or absence (if ``False``) : of the border.
#: Values are the string representation of the tiles,
#: "XXXXXXXXX" will be replaced by the cell content.
TILES = {
(True, True, True, True): """\
+-----------+
| XXXXXXXXX |
+-----------+
""",
(True, True, True, False): """\
+-----------+
| XXXXXXXXX |
""",
(True, True, False, True): """\
+-----------
| XXXXXXXXX
+-----------
""",
(True, True, False, False): """\
+-----------
| XXXXXXXXX
""",
(True, False, True, True): """\
| |
| XXXXXXXXX |
+-----------+
""",
(True, False, True, False): """\
| |
| XXXXXXXXX |
""",
(True, False, False, True): """\
|
| XXXXXXXXX
+-----------
""",
(True, False, False, False): """\
|
| XXXXXXXXX
""",
(False, True, True, True): """\
------------+
XXXXXXXXX |
------------+
""",
(False, True, True, False): """\
------------+
XXXXXXXXX |
""",
(False, True, False, True): """\
------------
XXXXXXXXX
------------
""",
(False, True, False, False): """\
------------
XXXXXXXXX
""",
(False, False, True, True): """\
|
XXXXXXXXX |
------------+
""",
(False, False, True, False): """\
|
XXXXXXXXX |
""",
(False, False, False, True): """\
XXXXXXXXX
------------
""",
(False, False, False, False): """\
XXXXXXXXX
""",
}
def iter_tiles(grid, tiles=None):
tiles = tiles or TILES
bb = grid.bounding_box
for row_idx in range(bb.min.y, bb.max.y + 1):
row = []
for col_idx in range(bb.min.x, bb.max.x + 1):
coord = col_idx, row_idx
if coord in grid:
cell = grid[coord]
box = cell.box
text = str(cell)
else:
box = Box(col_idx, row_idx)
text = ""
left = box.min.x == col_idx
top = box.min.y == row_idx
right = bb.max.x == col_idx
bottom = bb.max.y == row_idx
tile = tiles[(left, top, right, bottom)]
if (box.min.x + box.max.x) // 2 == col_idx and (box.min.y + box.max.y) // 2 == row_idx:
title = "{0:^9}".format(str(text))[:9]
tile = tile.replace('XXXXXXXXX', title)
else:
tile = tile.replace('XXXXXXXXX', ' ' * 9)
row.append(tile)
yield row
def iter_lines(grid, tiles=None):
for row in iter_tiles(grid, tiles):
tiles = [list(filter(None, tile.splitlines())) for tile in row]
size = len(tiles[0])
for index in range(size):
yield "".join(tile[index] for tile in tiles)
def draw(grid, tiles=None):
"""
Draw a grid using a collection of tiles.
:type grid: benker.grid.Grid
:param grid: Grid to draw.
:param tiles:
Collection of tiles, use :data:`~benker.drawing.TILES` if not provided.
:return: String representation of the grid.
"""
if grid:
return "\n".join(iter_lines(grid, tiles))
return "" | benker/drawing.py | from benker.box import Box
#: Default tiles used to draw a :class:`~benker.grid.Grid`.
#:
#: Keys are tuples (*left*, *top*, *right*, *bottom*) : which represent the
#: presence (if ``True``) or absence (if ``False``) : of the border.
#: Values are the string representation of the tiles,
#: "XXXXXXXXX" will be replaced by the cell content.
TILES = {
(True, True, True, True): """\
+-----------+
| XXXXXXXXX |
+-----------+
""",
(True, True, True, False): """\
+-----------+
| XXXXXXXXX |
""",
(True, True, False, True): """\
+-----------
| XXXXXXXXX
+-----------
""",
(True, True, False, False): """\
+-----------
| XXXXXXXXX
""",
(True, False, True, True): """\
| |
| XXXXXXXXX |
+-----------+
""",
(True, False, True, False): """\
| |
| XXXXXXXXX |
""",
(True, False, False, True): """\
|
| XXXXXXXXX
+-----------
""",
(True, False, False, False): """\
|
| XXXXXXXXX
""",
(False, True, True, True): """\
------------+
XXXXXXXXX |
------------+
""",
(False, True, True, False): """\
------------+
XXXXXXXXX |
""",
(False, True, False, True): """\
------------
XXXXXXXXX
------------
""",
(False, True, False, False): """\
------------
XXXXXXXXX
""",
(False, False, True, True): """\
|
XXXXXXXXX |
------------+
""",
(False, False, True, False): """\
|
XXXXXXXXX |
""",
(False, False, False, True): """\
XXXXXXXXX
------------
""",
(False, False, False, False): """\
XXXXXXXXX
""",
}
def iter_tiles(grid, tiles=None):
tiles = tiles or TILES
bb = grid.bounding_box
for row_idx in range(bb.min.y, bb.max.y + 1):
row = []
for col_idx in range(bb.min.x, bb.max.x + 1):
coord = col_idx, row_idx
if coord in grid:
cell = grid[coord]
box = cell.box
text = str(cell)
else:
box = Box(col_idx, row_idx)
text = ""
left = box.min.x == col_idx
top = box.min.y == row_idx
right = bb.max.x == col_idx
bottom = bb.max.y == row_idx
tile = tiles[(left, top, right, bottom)]
if (box.min.x + box.max.x) // 2 == col_idx and (box.min.y + box.max.y) // 2 == row_idx:
title = "{0:^9}".format(str(text))[:9]
tile = tile.replace('XXXXXXXXX', title)
else:
tile = tile.replace('XXXXXXXXX', ' ' * 9)
row.append(tile)
yield row
def iter_lines(grid, tiles=None):
for row in iter_tiles(grid, tiles):
tiles = [list(filter(None, tile.splitlines())) for tile in row]
size = len(tiles[0])
for index in range(size):
yield "".join(tile[index] for tile in tiles)
def draw(grid, tiles=None):
"""
Draw a grid using a collection of tiles.
:type grid: benker.grid.Grid
:param grid: Grid to draw.
:param tiles:
Collection of tiles, use :data:`~benker.drawing.TILES` if not provided.
:return: String representation of the grid.
"""
if grid:
return "\n".join(iter_lines(grid, tiles))
return "" | 0.880912 | 0.438244 |
from commandTree import commandTree
import re
import traceback
class msgParse:
def __init__(self):
self._command = commandTree(None, None, None, None)
self._trigger = []
self.registerCommand([(u"help", "Displays the help for registered Commands", self._help)])
pass
def _getPermission(self, fromUser): # TODO!
return 1
def _executeCommand(self, cmd, fromUser):
(c, param) = self._command.findByName(cmd[:])
if c is not None and c._executeFunction is not None:
if not c.isPermitted(self._getPermission(fromUser)):
return ("You are lacking the permission to execute this Command", 2)
try:
return c._executeFunction(param, fromUser)
except Exception as e:
return (traceback.format_exc(), 3)
else:
return None
def _executeTrigger(self, messageText, fromUser):
returnLines = []
returnRange = 1
for trigger in self._trigger:
for mat in trigger[0].finditer(messageText):
tmpreturn = self._executeCommand(trigger[1] + [mat.group(0)], fromUser)
returnLines.append(tmpreturn[0])
returnRange = tmpreturn[1] if tmpreturn[1] >= returnRange else returnRange
if len(returnLines) > 0:
return ("\n".join(returnLines), returnRange)
return None
def parseMessage(self, message):
messageText = message['body']
comRet = self._executeCommand(messageText.split(" "), message.get_from()) #['from'])
if comRet is None:
comRet = self._executeTrigger(messageText, message.get_from())
return comRet
def registerCommand(self, cmd):
try:
cmdname = " ".join([str(c[0]) for c in cmd])
self._command.integrateCMD(cmd)
print("Command has been registered: %s" % (cmdname))
return True
except Exception:
print(Exception.message)
return False
def registerTrigger(self, regex, cmd):
if self._command.findByName(cmd[:]) is not None:
newTrigger = (re.compile(regex), cmd)
self._trigger.append(newTrigger)
print("Trigger has been registered: %s maps to %s" % (regex, cmd))
return True
print("Trigger has not been registered: Command missing")
return False
def _help(self, cmd, fromUser):
(c, param) = self._command.findByName(cmd)
if c is not None:
return ("Help:\n\n.\n" + c.manpage(-1), 1)
return self._help(["help"], fromUser) | msgParse.py | from commandTree import commandTree
import re
import traceback
class msgParse:
def __init__(self):
self._command = commandTree(None, None, None, None)
self._trigger = []
self.registerCommand([(u"help", "Displays the help for registered Commands", self._help)])
pass
def _getPermission(self, fromUser): # TODO!
return 1
def _executeCommand(self, cmd, fromUser):
(c, param) = self._command.findByName(cmd[:])
if c is not None and c._executeFunction is not None:
if not c.isPermitted(self._getPermission(fromUser)):
return ("You are lacking the permission to execute this Command", 2)
try:
return c._executeFunction(param, fromUser)
except Exception as e:
return (traceback.format_exc(), 3)
else:
return None
def _executeTrigger(self, messageText, fromUser):
returnLines = []
returnRange = 1
for trigger in self._trigger:
for mat in trigger[0].finditer(messageText):
tmpreturn = self._executeCommand(trigger[1] + [mat.group(0)], fromUser)
returnLines.append(tmpreturn[0])
returnRange = tmpreturn[1] if tmpreturn[1] >= returnRange else returnRange
if len(returnLines) > 0:
return ("\n".join(returnLines), returnRange)
return None
def parseMessage(self, message):
messageText = message['body']
comRet = self._executeCommand(messageText.split(" "), message.get_from()) #['from'])
if comRet is None:
comRet = self._executeTrigger(messageText, message.get_from())
return comRet
def registerCommand(self, cmd):
try:
cmdname = " ".join([str(c[0]) for c in cmd])
self._command.integrateCMD(cmd)
print("Command has been registered: %s" % (cmdname))
return True
except Exception:
print(Exception.message)
return False
def registerTrigger(self, regex, cmd):
if self._command.findByName(cmd[:]) is not None:
newTrigger = (re.compile(regex), cmd)
self._trigger.append(newTrigger)
print("Trigger has been registered: %s maps to %s" % (regex, cmd))
return True
print("Trigger has not been registered: Command missing")
return False
def _help(self, cmd, fromUser):
(c, param) = self._command.findByName(cmd)
if c is not None:
return ("Help:\n\n.\n" + c.manpage(-1), 1)
return self._help(["help"], fromUser) | 0.218253 | 0.067701 |
from dataclasses import dataclass
from logging import getLogger
import cv2
import numpy
from PIL import Image
from app.library.pillow import pil2cv
def matching_template(image, templ, *, method=cv2.TM_CCOEFF_NORMED):
return cv2.matchTemplate(image, templ, method)
def multi_scale_matching_template(image,
templ,
linspace,
*,
method=cv2.TM_CCOEFF_NORMED,
logger=None,
debug=False):
logger = logger or getLogger(__name__)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
templ = cv2.cvtColor(templ, cv2.COLOR_BGR2GRAY)
loc = _multi_scale_matching_template_impl(image,
templ,
linspace,
method=method)
return loc
def _multi_scale_matching_template_impl(image,
templ,
linspace,
*,
method=cv2.TM_CCOEFF_NORMED):
(tH, tW) = templ.shape[:2]
found = None
for scale in linspace[::-1]:
resized = resize(image, int(image.shape[1] * scale))
r = image.shape[1] / float(resized.shape[1])
if resized.shape[0] < tH or resized.shape[1] < tW:
break
result = matching_template(resized, templ, method=method)
(_, maxVal, _, maxLoc) = cv2.minMaxLoc(result)
if found is None or maxVal > found[0]:
found = (maxVal, maxLoc, r)
if found is None:
return None
(_, maxLoc, r) = found
(startX, startY) = (int(maxLoc[0] * r), int(maxLoc[1] * r))
(endX, endY) = (int((maxLoc[0] + tW) * r), int((maxLoc[1] + tH) * r))
return (startX, startY), (endX, endY)
@dataclass(frozen=True)
class MultiScaleMatchingTemplateResult:
ratio: float
result: []
def multi_scale_matching_template_impl(
image: Image,
templ: Image,
*,
linspace=numpy.linspace(1.0, 1.1, 10),
method=cv2.TM_CCOEFF_NORMED,
):
cv2_image = pil2cv(image)
cv2_templ = pil2cv(templ)
(tH, tW) = cv2_templ.shape[:2]
gray_cv2_image = cv2.cvtColor(cv2_image, cv2.COLOR_BGR2GRAY)
gray_cv2_templ = cv2.cvtColor(cv2_templ, cv2.COLOR_BGR2GRAY)
results = []
for scale in linspace[::-1]:
resized = resize(gray_cv2_image, int(gray_cv2_image.shape[1] * scale))
r = gray_cv2_image.shape[1] / float(resized.shape[1])
if resized.shape[0] < tH or resized.shape[1] < tW:
break
result = matching_template(resized, gray_cv2_templ, method=method)
results.append(MultiScaleMatchingTemplateResult(r, result))
return results
def resize(image, width=None, height=None, inter=cv2.INTER_AREA):
(h, w) = image.shape[:2]
if width is None and height is None:
return image
if width is None:
r = height / float(h)
dim = (int(w * r), height)
else:
r = width / float(w)
dim = (width, int(h * r))
return cv2.resize(image, dim, inter) | app/library/matching_template.py | from dataclasses import dataclass
from logging import getLogger
import cv2
import numpy
from PIL import Image
from app.library.pillow import pil2cv
def matching_template(image, templ, *, method=cv2.TM_CCOEFF_NORMED):
return cv2.matchTemplate(image, templ, method)
def multi_scale_matching_template(image,
templ,
linspace,
*,
method=cv2.TM_CCOEFF_NORMED,
logger=None,
debug=False):
logger = logger or getLogger(__name__)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
templ = cv2.cvtColor(templ, cv2.COLOR_BGR2GRAY)
loc = _multi_scale_matching_template_impl(image,
templ,
linspace,
method=method)
return loc
def _multi_scale_matching_template_impl(image,
templ,
linspace,
*,
method=cv2.TM_CCOEFF_NORMED):
(tH, tW) = templ.shape[:2]
found = None
for scale in linspace[::-1]:
resized = resize(image, int(image.shape[1] * scale))
r = image.shape[1] / float(resized.shape[1])
if resized.shape[0] < tH or resized.shape[1] < tW:
break
result = matching_template(resized, templ, method=method)
(_, maxVal, _, maxLoc) = cv2.minMaxLoc(result)
if found is None or maxVal > found[0]:
found = (maxVal, maxLoc, r)
if found is None:
return None
(_, maxLoc, r) = found
(startX, startY) = (int(maxLoc[0] * r), int(maxLoc[1] * r))
(endX, endY) = (int((maxLoc[0] + tW) * r), int((maxLoc[1] + tH) * r))
return (startX, startY), (endX, endY)
@dataclass(frozen=True)
class MultiScaleMatchingTemplateResult:
ratio: float
result: []
def multi_scale_matching_template_impl(
image: Image,
templ: Image,
*,
linspace=numpy.linspace(1.0, 1.1, 10),
method=cv2.TM_CCOEFF_NORMED,
):
cv2_image = pil2cv(image)
cv2_templ = pil2cv(templ)
(tH, tW) = cv2_templ.shape[:2]
gray_cv2_image = cv2.cvtColor(cv2_image, cv2.COLOR_BGR2GRAY)
gray_cv2_templ = cv2.cvtColor(cv2_templ, cv2.COLOR_BGR2GRAY)
results = []
for scale in linspace[::-1]:
resized = resize(gray_cv2_image, int(gray_cv2_image.shape[1] * scale))
r = gray_cv2_image.shape[1] / float(resized.shape[1])
if resized.shape[0] < tH or resized.shape[1] < tW:
break
result = matching_template(resized, gray_cv2_templ, method=method)
results.append(MultiScaleMatchingTemplateResult(r, result))
return results
def resize(image, width=None, height=None, inter=cv2.INTER_AREA):
(h, w) = image.shape[:2]
if width is None and height is None:
return image
if width is None:
r = height / float(h)
dim = (int(w * r), height)
else:
r = width / float(w)
dim = (width, int(h * r))
return cv2.resize(image, dim, inter) | 0.696371 | 0.219651 |
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import str
from textwrap import dedent
from pants.backend.python.targets.python_library import PythonLibrary
from pants.base.exceptions import TaskError
from pants_test.backend.python.tasks.python_task_test_base import PythonTaskTestBase
from pants.contrib.python.checks.tasks.checkstyle.checker import PythonCheckStyleTask
from pants.contrib.python.checks.tasks.checkstyle.variable_names_subsystem import \
VariableNamesSubsystem
class PythonCheckStyleTaskTest(PythonTaskTestBase):
@classmethod
def task_type(cls):
return PythonCheckStyleTask
def setUp(self):
super(PythonCheckStyleTaskTest, self).setUp()
PythonCheckStyleTask.clear_plugins()
PythonCheckStyleTask.register_plugin('variable-names', VariableNamesSubsystem)
def tearDown(self):
super(PythonCheckStyleTaskTest, self).tearDown()
PythonCheckStyleTask.clear_plugins()
def test_no_sources(self):
task = self.create_task(self.context())
self.assertEqual(None, task.execute())
def test_pass(self):
self.create_file('a/python/pass.py', contents=dedent("""
class UpperCase:
pass
"""))
target = self.make_target('a/python:pass', PythonLibrary, sources=['pass.py'])
context = self.context(target_roots=[target])
task = self.create_task(context)
self.assertEqual(0, task.execute())
def test_failure(self):
self.create_file('a/python/fail.py', contents=dedent("""
class lower_case:
pass
"""))
target = self.make_target('a/python:fail', PythonLibrary, sources=['fail.py'])
context = self.context(target_roots=[target])
task = self.create_task(context)
with self.assertRaises(TaskError) as task_error:
task.execute()
self.assertIn('1 Python Style issues found', str(task_error.exception))
def test_suppressed_file_passes(self):
self.create_file('a/python/fail.py', contents=dedent("""
class lower_case:
pass
"""))
suppression_file = self.create_file('suppress.txt', contents=dedent("""
a/python/fail\.py::variable-names"""))
target = self.make_target('a/python:fail', PythonLibrary, sources=['fail.py'])
self.set_options(suppress=suppression_file)
context = self.context(target_roots=[target], )
task = self.create_task(context)
self.assertEqual(0, task.execute())
def test_failure_fail_false(self):
self.create_file('a/python/fail.py', contents=dedent("""
class lower_case:
pass
"""))
target = self.make_target('a/python:fail', PythonLibrary, sources=['fail.py'])
self.set_options(fail=False)
context = self.context(target_roots=[target])
task = self.create_task(context)
self.assertEqual(1, task.execute())
def test_syntax_error(self):
self.create_file('a/python/error.py', contents=dedent("""
invalid python
"""))
target = self.make_target('a/python:error', PythonLibrary, sources=['error.py'])
self.set_options(fail=False)
context = self.context(target_roots=[target])
task = self.create_task(context)
self.assertEqual(1, task.execute())
def test_failure_print_nit(self):
self.create_file('a/python/fail.py', contents=dedent("""
class lower_case:
pass
"""))
target = self.make_target('a/python:fail', PythonLibrary, sources=['fail.py'])
context = self.context(target_roots=[target])
task = self.create_task(context)
nits = list(task.get_nits('a/python/fail.py'))
self.assertEqual(1, len(nits))
self.assertEqual(
"""T000:ERROR a/python/fail.py:002 Classes must be UpperCamelCased\n"""
""" |class lower_case:""",
str(nits[0]))
def test_syntax_error_nit(self):
self.create_file('a/python/error.py', contents=dedent("""
invalid python
"""))
target = self.make_target('a/python:error', PythonLibrary, sources=['error.py'])
self.set_options(fail=False)
context = self.context(target_roots=[target])
task = self.create_task(context)
nits = list(task.get_nits('a/python/error.py'))
self.assertEqual(1, len(nits))
self.assertEqual("""E901:ERROR a/python/error.py:002 SyntaxError: invalid syntax\n"""
""" |\n"""
""" |invalid python\n"""
""" |""",
str(nits[0])) | contrib/python/tests/python/pants_test/contrib/python/checks/tasks/checkstyle/test_checker.py |
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import str
from textwrap import dedent
from pants.backend.python.targets.python_library import PythonLibrary
from pants.base.exceptions import TaskError
from pants_test.backend.python.tasks.python_task_test_base import PythonTaskTestBase
from pants.contrib.python.checks.tasks.checkstyle.checker import PythonCheckStyleTask
from pants.contrib.python.checks.tasks.checkstyle.variable_names_subsystem import \
VariableNamesSubsystem
class PythonCheckStyleTaskTest(PythonTaskTestBase):
@classmethod
def task_type(cls):
return PythonCheckStyleTask
def setUp(self):
super(PythonCheckStyleTaskTest, self).setUp()
PythonCheckStyleTask.clear_plugins()
PythonCheckStyleTask.register_plugin('variable-names', VariableNamesSubsystem)
def tearDown(self):
super(PythonCheckStyleTaskTest, self).tearDown()
PythonCheckStyleTask.clear_plugins()
def test_no_sources(self):
task = self.create_task(self.context())
self.assertEqual(None, task.execute())
def test_pass(self):
self.create_file('a/python/pass.py', contents=dedent("""
class UpperCase:
pass
"""))
target = self.make_target('a/python:pass', PythonLibrary, sources=['pass.py'])
context = self.context(target_roots=[target])
task = self.create_task(context)
self.assertEqual(0, task.execute())
def test_failure(self):
self.create_file('a/python/fail.py', contents=dedent("""
class lower_case:
pass
"""))
target = self.make_target('a/python:fail', PythonLibrary, sources=['fail.py'])
context = self.context(target_roots=[target])
task = self.create_task(context)
with self.assertRaises(TaskError) as task_error:
task.execute()
self.assertIn('1 Python Style issues found', str(task_error.exception))
def test_suppressed_file_passes(self):
self.create_file('a/python/fail.py', contents=dedent("""
class lower_case:
pass
"""))
suppression_file = self.create_file('suppress.txt', contents=dedent("""
a/python/fail\.py::variable-names"""))
target = self.make_target('a/python:fail', PythonLibrary, sources=['fail.py'])
self.set_options(suppress=suppression_file)
context = self.context(target_roots=[target], )
task = self.create_task(context)
self.assertEqual(0, task.execute())
def test_failure_fail_false(self):
self.create_file('a/python/fail.py', contents=dedent("""
class lower_case:
pass
"""))
target = self.make_target('a/python:fail', PythonLibrary, sources=['fail.py'])
self.set_options(fail=False)
context = self.context(target_roots=[target])
task = self.create_task(context)
self.assertEqual(1, task.execute())
def test_syntax_error(self):
self.create_file('a/python/error.py', contents=dedent("""
invalid python
"""))
target = self.make_target('a/python:error', PythonLibrary, sources=['error.py'])
self.set_options(fail=False)
context = self.context(target_roots=[target])
task = self.create_task(context)
self.assertEqual(1, task.execute())
def test_failure_print_nit(self):
self.create_file('a/python/fail.py', contents=dedent("""
class lower_case:
pass
"""))
target = self.make_target('a/python:fail', PythonLibrary, sources=['fail.py'])
context = self.context(target_roots=[target])
task = self.create_task(context)
nits = list(task.get_nits('a/python/fail.py'))
self.assertEqual(1, len(nits))
self.assertEqual(
"""T000:ERROR a/python/fail.py:002 Classes must be UpperCamelCased\n"""
""" |class lower_case:""",
str(nits[0]))
def test_syntax_error_nit(self):
self.create_file('a/python/error.py', contents=dedent("""
invalid python
"""))
target = self.make_target('a/python:error', PythonLibrary, sources=['error.py'])
self.set_options(fail=False)
context = self.context(target_roots=[target])
task = self.create_task(context)
nits = list(task.get_nits('a/python/error.py'))
self.assertEqual(1, len(nits))
self.assertEqual("""E901:ERROR a/python/error.py:002 SyntaxError: invalid syntax\n"""
""" |\n"""
""" |invalid python\n"""
""" |""",
str(nits[0])) | 0.687735 | 0.189277 |
import pytest
from typing import Dict
import os
from ml_gym.blueprints.component_factory import ComponentFactory, Injector
from outlier_detection.constructables.constructables import CustomDatasetRepositoryConstructable, AtisFactoryConstructable
from outlier_detection.blueprints.mlp_blueprint import MLPCollator
from data_stack.dataset.reporting import DatasetIteratorReportGenerator
from ml_gym.io.config_parser import YAMLConfigLoader
def is_ci_deployment() -> bool:
return os.getenv("od_deployment_type") == "CI"
class TestAtisIterator:
@pytest.fixture
def full_config(self) -> Dict:
directory = os.path.dirname(os.path.abspath(__file__))
config_path = os.path.join(directory, "atis_full_config.yml")
config = YAMLConfigLoader.load(config_path)
return config
@pytest.fixture
def components(self, full_config) -> Dict:
component_names = list(full_config.keys())
injection_mapping = {"id_mlp_standard_collator": MLPCollator}
injector = Injector(injection_mapping)
component_factory = ComponentFactory(injector)
component_factory.register_component_type("DATASET_REPOSITORY", "DEFAULT", CustomDatasetRepositoryConstructable)
component_factory.register_component_type("DATASET_FACTORY", "ATIS", AtisFactoryConstructable)
components = component_factory.build_components_from_config(full_config, component_names)
return components
@pytest.mark.skipif(is_ci_deployment(), reason="CI deployment")
def test_dataset_iterator_from_constructable(self, components):
train_dataset_iterator = components["dataset_iterators"]["train"]
val_dataset_iterator = components["dataset_iterators"]["val"]
test_dataset_iterator = components["dataset_iterators"]["test"]
sample, target, tag = train_dataset_iterator[0]
assert len(train_dataset_iterator) == 4274
assert len(val_dataset_iterator) == 572
assert len(test_dataset_iterator) == 586
assert list(sample.shape) == [100] and isinstance(target, str)
@pytest.mark.skipif(is_ci_deployment(), reason="CI deployment")
def test_mapped_to_outlier_labels_iterator_from_constructable(self, components: Dict, full_config: Dict):
dataset_iterators = components["mapped_to_outlier_labels_iterator"]
previous_labels = [label for mapping in full_config["mapped_to_outlier_labels_iterator"]["config"]["mappings"]
for label in mapping["previous_labels"]]
new_labels = [mapping["new_label"] for mapping in full_config["mapped_to_outlier_labels_iterator"]["config"]["mappings"]]
non_existing_labels = [label for label in previous_labels if label not in new_labels]
for _, iterator in dataset_iterators.items():
assert all([sample[iterator.dataset_meta.target_pos] not in non_existing_labels for sample in iterator])
assert all([sample[iterator.dataset_meta.target_pos] in new_labels for sample in iterator])
@pytest.mark.skipif(is_ci_deployment(), reason="CI deployment")
def test_full_pipeline(self, components: Dict):
data_loaders = components["data_loaders"]
assert sum([len(b.samples) for b in iter(data_loaders["train"])]) == 4274
assert sum([len(b.samples) for b in iter(data_loaders["val"])]) == 572
assert sum([len(b.samples) for b in iter(data_loaders["test"])]) == 586
highest_level_iterator_train = components["dataset_iterators"]["train"]
report = DatasetIteratorReportGenerator.generate_report(
highest_level_iterator_train, DatasetIteratorReportGenerator.ReportFormat.YAML)
print(report) | pytest/preprocessing/atis/test_atis_preprocessing.py | import pytest
from typing import Dict
import os
from ml_gym.blueprints.component_factory import ComponentFactory, Injector
from outlier_detection.constructables.constructables import CustomDatasetRepositoryConstructable, AtisFactoryConstructable
from outlier_detection.blueprints.mlp_blueprint import MLPCollator
from data_stack.dataset.reporting import DatasetIteratorReportGenerator
from ml_gym.io.config_parser import YAMLConfigLoader
def is_ci_deployment() -> bool:
return os.getenv("od_deployment_type") == "CI"
class TestAtisIterator:
@pytest.fixture
def full_config(self) -> Dict:
directory = os.path.dirname(os.path.abspath(__file__))
config_path = os.path.join(directory, "atis_full_config.yml")
config = YAMLConfigLoader.load(config_path)
return config
@pytest.fixture
def components(self, full_config) -> Dict:
component_names = list(full_config.keys())
injection_mapping = {"id_mlp_standard_collator": MLPCollator}
injector = Injector(injection_mapping)
component_factory = ComponentFactory(injector)
component_factory.register_component_type("DATASET_REPOSITORY", "DEFAULT", CustomDatasetRepositoryConstructable)
component_factory.register_component_type("DATASET_FACTORY", "ATIS", AtisFactoryConstructable)
components = component_factory.build_components_from_config(full_config, component_names)
return components
@pytest.mark.skipif(is_ci_deployment(), reason="CI deployment")
def test_dataset_iterator_from_constructable(self, components):
train_dataset_iterator = components["dataset_iterators"]["train"]
val_dataset_iterator = components["dataset_iterators"]["val"]
test_dataset_iterator = components["dataset_iterators"]["test"]
sample, target, tag = train_dataset_iterator[0]
assert len(train_dataset_iterator) == 4274
assert len(val_dataset_iterator) == 572
assert len(test_dataset_iterator) == 586
assert list(sample.shape) == [100] and isinstance(target, str)
@pytest.mark.skipif(is_ci_deployment(), reason="CI deployment")
def test_mapped_to_outlier_labels_iterator_from_constructable(self, components: Dict, full_config: Dict):
dataset_iterators = components["mapped_to_outlier_labels_iterator"]
previous_labels = [label for mapping in full_config["mapped_to_outlier_labels_iterator"]["config"]["mappings"]
for label in mapping["previous_labels"]]
new_labels = [mapping["new_label"] for mapping in full_config["mapped_to_outlier_labels_iterator"]["config"]["mappings"]]
non_existing_labels = [label for label in previous_labels if label not in new_labels]
for _, iterator in dataset_iterators.items():
assert all([sample[iterator.dataset_meta.target_pos] not in non_existing_labels for sample in iterator])
assert all([sample[iterator.dataset_meta.target_pos] in new_labels for sample in iterator])
@pytest.mark.skipif(is_ci_deployment(), reason="CI deployment")
def test_full_pipeline(self, components: Dict):
data_loaders = components["data_loaders"]
assert sum([len(b.samples) for b in iter(data_loaders["train"])]) == 4274
assert sum([len(b.samples) for b in iter(data_loaders["val"])]) == 572
assert sum([len(b.samples) for b in iter(data_loaders["test"])]) == 586
highest_level_iterator_train = components["dataset_iterators"]["train"]
report = DatasetIteratorReportGenerator.generate_report(
highest_level_iterator_train, DatasetIteratorReportGenerator.ReportFormat.YAML)
print(report) | 0.703346 | 0.38743 |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import skimage as sk
def show_image(image, figsize=(12,12)):
"""
simply display an Image
"""
plt.figure(figsize=figsize)
plt.imshow(image, cmap='gray')
def grid_view(video, **kwargs):
"""
make a grid of 3 columns to display each frame of the video
"""
nrow = len(video)//3+1*(len(video)%3>0)
fig, axes = plt.subplots(nrow, 3,
figsize=(15,5*nrow))
for i, im in enumerate(video):
axes.flatten()[i].imshow(im, **kwargs)
return axes
def display_video(video, figsize=(8,8), filename=None, **kwargs):
"""
Display an animation of the video
If filename is given (as a string), it saves the animation as a file
Think about %matplotlib notebook in jupyter notebooks
"""
fig = plt.figure(figsize=figsize)
# ims is a list of lists
# each row is a list of artists to draw in the current frame;
# here we are just animating one artist, the image, in
# each frame
ims = []
for v in video:
im = plt.imshow(v, animated=True, **kwargs)
ims.append([im])
ani = animation.ArtistAnimation(fig, ims, interval=40, blit=True,
repeat_delay=100)
if not filename is None:
try:
ani.save(filename)
except:
print("Could not save")
pass
return ani
def plot_frame_stat(video, stat=np.mean, ax=None, **kwargs):
"""
Compute the stat (np.mean, np.median, np.sum...) of each frame
and plot the evolution in time
You can also pass a lambda function, e.g.: (lambda x: np.min(x)/10)
"""
ax = plt.gca() if ax is None else ax
if not 'label' in kwargs:
kwargs['label'] = str(stat)
ax.plot(list(map(stat,video)), **kwargs)
ax.set_xlabel("Frame #")
ax.legend()
return ax
def plot_blobs(blobs, ax=None):
"""
Plot the blobs in an image
"""
ax = plt.gca() if ax is None else ax
for b in blobs:
circle = (plt.Circle((b[1], b[0]), np.sqrt(2) * b[2], color='r', fill=False))
ax.add_artist(circle)
return ax
def plot_blobs_scatter(blobs, ax=None, **kwargs):
"""
Version of blob plotting using only scatter and not Circle
"""
ax = plt.gca() if ax is None else ax
assert 's' not in kwargs, "the size of the rings is given by blobs array"
if len(blobs) > 0:
ax.scatter(blobs[:,1], blobs[:,0], s=blobs[:,2]*np.sqrt(2), **kwargs)
return ax
def display_video_blobs(video, blobs, figsize=(8,8), filename=None, **kwargs):
"""
Display an animation of the video
If filename is given (as a string), it saves the animation as a file
Think about %matplotlib notebook in jupyter notebooks
"""
fig, ax = plt.subplots(figsize=figsize)
# frame with most blobs
blob_max = blobs[np.array([len(b) for b in blobs]).argmax()]
# create as many circle
patches = []
for i in range(len(blob_max)):
b = blob_max[i]
patches.append(plt.Circle((b[1], b[0]), b[2]*np.sqrt(2), color='r', fill=False))
def init():
for patch in patches:
patch.center = (0, 0)
ax.add_patch(patch)
return patches,
def animate(i):
[patch.set_visible(False) for patch in patches]
ax.imshow(video[i], cmap='gray')
blob = blobs[i]
if len(blob)>0:
for j, b in enumerate(blob):
x = b[1]
y = b[0]
r = b[2] * np.sqrt(2)
patches[j].center = (x,y)
patches[j].set_radius(r)
patches[j].set_visible(True)
return patches,
anim = animation.FuncAnimation(fig, animate,
init_func=init,
frames=len(video),
interval=20,
# blit=True,
repeat=True,)
if not filename is None:
try:
ani.save(filename)
except:
print("Could not save")
pass
return anim
def plot_hist_size_blobs(blobs, ax=None, **kwargs):
"""
plot an histogram of the blobs sizes
"""
ax = plt.gca() if ax is None else ax
all_radius = [b[2] for blob in blobs for b in blob if len(blob)>0]
ax.hist(all_radius, **kwargs)
return ax
def plot_number_blob(blobs, ax=None, **kwargs):
"""
plot the number of blobs as a function of frames
"""
ax = plt.gca() if ax is None else ax
number_blob = [len(blob) for blob in blobs]
ax.plot(number_blob, 'o-')
ax.set_xlabel("Frame")
return ax | scripts/display.py | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import skimage as sk
def show_image(image, figsize=(12,12)):
"""
simply display an Image
"""
plt.figure(figsize=figsize)
plt.imshow(image, cmap='gray')
def grid_view(video, **kwargs):
"""
make a grid of 3 columns to display each frame of the video
"""
nrow = len(video)//3+1*(len(video)%3>0)
fig, axes = plt.subplots(nrow, 3,
figsize=(15,5*nrow))
for i, im in enumerate(video):
axes.flatten()[i].imshow(im, **kwargs)
return axes
def display_video(video, figsize=(8,8), filename=None, **kwargs):
"""
Display an animation of the video
If filename is given (as a string), it saves the animation as a file
Think about %matplotlib notebook in jupyter notebooks
"""
fig = plt.figure(figsize=figsize)
# ims is a list of lists
# each row is a list of artists to draw in the current frame;
# here we are just animating one artist, the image, in
# each frame
ims = []
for v in video:
im = plt.imshow(v, animated=True, **kwargs)
ims.append([im])
ani = animation.ArtistAnimation(fig, ims, interval=40, blit=True,
repeat_delay=100)
if not filename is None:
try:
ani.save(filename)
except:
print("Could not save")
pass
return ani
def plot_frame_stat(video, stat=np.mean, ax=None, **kwargs):
"""
Compute the stat (np.mean, np.median, np.sum...) of each frame
and plot the evolution in time
You can also pass a lambda function, e.g.: (lambda x: np.min(x)/10)
"""
ax = plt.gca() if ax is None else ax
if not 'label' in kwargs:
kwargs['label'] = str(stat)
ax.plot(list(map(stat,video)), **kwargs)
ax.set_xlabel("Frame #")
ax.legend()
return ax
def plot_blobs(blobs, ax=None):
"""
Plot the blobs in an image
"""
ax = plt.gca() if ax is None else ax
for b in blobs:
circle = (plt.Circle((b[1], b[0]), np.sqrt(2) * b[2], color='r', fill=False))
ax.add_artist(circle)
return ax
def plot_blobs_scatter(blobs, ax=None, **kwargs):
"""
Version of blob plotting using only scatter and not Circle
"""
ax = plt.gca() if ax is None else ax
assert 's' not in kwargs, "the size of the rings is given by blobs array"
if len(blobs) > 0:
ax.scatter(blobs[:,1], blobs[:,0], s=blobs[:,2]*np.sqrt(2), **kwargs)
return ax
def display_video_blobs(video, blobs, figsize=(8,8), filename=None, **kwargs):
"""
Display an animation of the video
If filename is given (as a string), it saves the animation as a file
Think about %matplotlib notebook in jupyter notebooks
"""
fig, ax = plt.subplots(figsize=figsize)
# frame with most blobs
blob_max = blobs[np.array([len(b) for b in blobs]).argmax()]
# create as many circle
patches = []
for i in range(len(blob_max)):
b = blob_max[i]
patches.append(plt.Circle((b[1], b[0]), b[2]*np.sqrt(2), color='r', fill=False))
def init():
for patch in patches:
patch.center = (0, 0)
ax.add_patch(patch)
return patches,
def animate(i):
[patch.set_visible(False) for patch in patches]
ax.imshow(video[i], cmap='gray')
blob = blobs[i]
if len(blob)>0:
for j, b in enumerate(blob):
x = b[1]
y = b[0]
r = b[2] * np.sqrt(2)
patches[j].center = (x,y)
patches[j].set_radius(r)
patches[j].set_visible(True)
return patches,
anim = animation.FuncAnimation(fig, animate,
init_func=init,
frames=len(video),
interval=20,
# blit=True,
repeat=True,)
if not filename is None:
try:
ani.save(filename)
except:
print("Could not save")
pass
return anim
def plot_hist_size_blobs(blobs, ax=None, **kwargs):
"""
plot an histogram of the blobs sizes
"""
ax = plt.gca() if ax is None else ax
all_radius = [b[2] for blob in blobs for b in blob if len(blob)>0]
ax.hist(all_radius, **kwargs)
return ax
def plot_number_blob(blobs, ax=None, **kwargs):
"""
plot the number of blobs as a function of frames
"""
ax = plt.gca() if ax is None else ax
number_blob = [len(blob) for blob in blobs]
ax.plot(number_blob, 'o-')
ax.set_xlabel("Frame")
return ax | 0.703753 | 0.726231 |
import imp
import os.path
import sys
import unittest
def _GetDirAbove(dirname):
"""Returns the directory "above" this file containing |dirname| (which must
also be "above" this file)."""
path = os.path.abspath(__file__)
while True:
path, tail = os.path.split(path)
assert tail
if tail == dirname:
return path
try:
imp.find_module("mojom")
except ImportError:
sys.path.append(os.path.join(_GetDirAbove("pylib"), "pylib"))
from mojom.generate import generator
class StringManipulationTest(unittest.TestCase):
"""generator contains some string utilities, this tests only those."""
def testSplitCamelCase(self):
self.assertEquals(["camel", "case"], generator.SplitCamelCase("CamelCase"))
self.assertEquals(["url", "loader", "factory"],
generator.SplitCamelCase('URLLoaderFactory'))
self.assertEquals(["get99", "entries"],
generator.SplitCamelCase('Get99Entries'))
self.assertEquals(["get99entries"],
generator.SplitCamelCase('Get99entries'))
def testToCamel(self):
self.assertEquals("CamelCase", generator.ToCamel("camel_case"))
self.assertEquals("CAMELCASE", generator.ToCamel("CAMEL_CASE"))
self.assertEquals("camelCase",
generator.ToCamel("camel_case", lower_initial=True))
self.assertEquals("CamelCase", generator.ToCamel(
"camel case", delimiter=' '))
self.assertEquals("CaMelCaSe", generator.ToCamel("caMel_caSe"))
self.assertEquals("L2Tp", generator.ToCamel("l2tp", digits_split=True))
self.assertEquals("l2tp", generator.ToCamel("l2tp", lower_initial=True))
def testToSnakeCase(self):
self.assertEquals("snake_case", generator.ToLowerSnakeCase("SnakeCase"))
self.assertEquals("snake_case", generator.ToLowerSnakeCase("snakeCase"))
self.assertEquals("snake_case", generator.ToLowerSnakeCase("SnakeCASE"))
self.assertEquals("snake_d3d11_case",
generator.ToLowerSnakeCase("SnakeD3D11Case"))
self.assertEquals("snake_d3d11_case",
generator.ToLowerSnakeCase("SnakeD3d11Case"))
self.assertEquals("snake_d3d11_case",
generator.ToLowerSnakeCase("snakeD3d11Case"))
self.assertEquals("SNAKE_CASE", generator.ToUpperSnakeCase("SnakeCase"))
self.assertEquals("SNAKE_CASE", generator.ToUpperSnakeCase("snakeCase"))
self.assertEquals("SNAKE_CASE", generator.ToUpperSnakeCase("SnakeCASE"))
self.assertEquals("SNAKE_D3D11_CASE",
generator.ToUpperSnakeCase("SnakeD3D11Case"))
self.assertEquals("SNAKE_D3D11_CASE",
generator.ToUpperSnakeCase("SnakeD3d11Case"))
self.assertEquals("SNAKE_D3D11_CASE",
generator.ToUpperSnakeCase("snakeD3d11Case"))
if __name__ == "__main__":
unittest.main() | mojo/public/tools/mojom/mojom/generate/generator_unittest.py |
import imp
import os.path
import sys
import unittest
def _GetDirAbove(dirname):
"""Returns the directory "above" this file containing |dirname| (which must
also be "above" this file)."""
path = os.path.abspath(__file__)
while True:
path, tail = os.path.split(path)
assert tail
if tail == dirname:
return path
try:
imp.find_module("mojom")
except ImportError:
sys.path.append(os.path.join(_GetDirAbove("pylib"), "pylib"))
from mojom.generate import generator
class StringManipulationTest(unittest.TestCase):
"""generator contains some string utilities, this tests only those."""
def testSplitCamelCase(self):
self.assertEquals(["camel", "case"], generator.SplitCamelCase("CamelCase"))
self.assertEquals(["url", "loader", "factory"],
generator.SplitCamelCase('URLLoaderFactory'))
self.assertEquals(["get99", "entries"],
generator.SplitCamelCase('Get99Entries'))
self.assertEquals(["get99entries"],
generator.SplitCamelCase('Get99entries'))
def testToCamel(self):
self.assertEquals("CamelCase", generator.ToCamel("camel_case"))
self.assertEquals("CAMELCASE", generator.ToCamel("CAMEL_CASE"))
self.assertEquals("camelCase",
generator.ToCamel("camel_case", lower_initial=True))
self.assertEquals("CamelCase", generator.ToCamel(
"camel case", delimiter=' '))
self.assertEquals("CaMelCaSe", generator.ToCamel("caMel_caSe"))
self.assertEquals("L2Tp", generator.ToCamel("l2tp", digits_split=True))
self.assertEquals("l2tp", generator.ToCamel("l2tp", lower_initial=True))
def testToSnakeCase(self):
self.assertEquals("snake_case", generator.ToLowerSnakeCase("SnakeCase"))
self.assertEquals("snake_case", generator.ToLowerSnakeCase("snakeCase"))
self.assertEquals("snake_case", generator.ToLowerSnakeCase("SnakeCASE"))
self.assertEquals("snake_d3d11_case",
generator.ToLowerSnakeCase("SnakeD3D11Case"))
self.assertEquals("snake_d3d11_case",
generator.ToLowerSnakeCase("SnakeD3d11Case"))
self.assertEquals("snake_d3d11_case",
generator.ToLowerSnakeCase("snakeD3d11Case"))
self.assertEquals("SNAKE_CASE", generator.ToUpperSnakeCase("SnakeCase"))
self.assertEquals("SNAKE_CASE", generator.ToUpperSnakeCase("snakeCase"))
self.assertEquals("SNAKE_CASE", generator.ToUpperSnakeCase("SnakeCASE"))
self.assertEquals("SNAKE_D3D11_CASE",
generator.ToUpperSnakeCase("SnakeD3D11Case"))
self.assertEquals("SNAKE_D3D11_CASE",
generator.ToUpperSnakeCase("SnakeD3d11Case"))
self.assertEquals("SNAKE_D3D11_CASE",
generator.ToUpperSnakeCase("snakeD3d11Case"))
if __name__ == "__main__":
unittest.main() | 0.485112 | 0.244831 |
import os
import argparse
from camera_trap_classifier.config.logging import setup_logging
from camera_trap_classifier.data.inventory import DatasetInventoryMaster
# Different functions depending on input values
def csv(args):
""" Import From CSV """
params = {'path': args['path'],
'image_path_col_list': args['image_fields'],
'capture_id_col': args['capture_id_field'],
'attributes_col_list': args['label_fields'],
'meta_col_list': args['meta_data_fields']}
dinv = DatasetInventoryMaster()
dinv.create_from_source('csv', params)
return dinv
def json(args):
""" Import From Json """
params = {'path': args['path']}
dinv = DatasetInventoryMaster()
dinv.create_from_source('json', params)
return dinv
def class_dir(args):
""" Import From Class Dirs"""
params = {'path': args['path']}
dinv = DatasetInventoryMaster()
dinv.create_from_source('image_dir', params)
return dinv
def panthera(args):
""" Import From panthera """
params = {'path': args['path']}
dinv = DatasetInventoryMaster()
dinv.create_from_source('panthera', params)
return dinv
def main():
# Parse command line arguments
parser = argparse.ArgumentParser(prog='CREATE DATSET INVENTORY')
parser.add_argument(
"-log_outdir", type=str, required=False, default=None,
help="The directory to write logfiles to (defaults to export dir)")
parser.add_argument(
"-discard_missing", default=False,
action='store_true', required=False,
help="whether to discard records with any missing label entries")
subparsers = parser.add_subparsers(help='sub-command help')
# create parser for csv input
parser_csv = subparsers.add_parser('csv',
help='specifcy if input is a csv file')
parser_csv.add_argument("-path", type=str, required=True,
help="the full path of the csv file")
parser_csv.add_argument("-export_path", type=str, required=True,
help="the full path to a json file which will contain\
the dataset inventory \
(e.g. /my_data/dataset_inventory.json)")
parser_csv.add_argument("-capture_id_field", type=str, required=True,
help="the name of the csv column with the \
capture id")
parser_csv.add_argument('-image_fields', nargs='+', type=str,
help='the name of the csv columns with paths to \
the images (more than one possible)',
required=True)
parser_csv.add_argument('-label_fields', nargs='+', type=str,
help='the name of the csv columns with paths to \
label attributes (more than one possible)',
required=True)
parser_csv.add_argument('-meta_data_fields', nargs='+', type=str,
help='the name of the csv columns with paths to \
meta data attributes (more than one poss.)',
required=False)
parser_csv.set_defaults(func=csv)
# create parser for json input
parser_json = subparsers.add_parser('json', help='if input is a json file')
parser_json.add_argument("-path", type=str, required=True)
parser_json.add_argument("-export_path", type=str, required=True,
help="the full path to a json file which will contain\
the dataset inventory \
(e.g. /my_data/dataset_inventory.json)")
parser_json.set_defaults(func=json)
# create parser for json input
parser_class_dirs = subparsers.add_parser(
'dir',
help='if input is a directory with class directories')
parser_class_dirs.add_argument("-path", type=str, required=True)
parser_class_dirs.add_argument(
"-export_path", type=str, required=True,
help="the full path to a json file which will contain\
the dataset inventory \
(e.g. /my_data/dataset_inventory.json)")
parser_class_dirs.set_defaults(func=class_dir)
# create parser for panthera input
parser_panthera = subparsers.add_parser(
'panthera',
help='if input is a panthera csv file')
parser_panthera.add_argument("-path", type=str, required=True)
parser_panthera.add_argument(
"-export_path", type=str, required=True,
help="the full path to a json file which will contain\
the dataset inventory \
(e.g. /my_data/dataset_inventory.json)")
parser_panthera.set_defaults(func=panthera)
# Parse command line arguments
args = vars(parser.parse_args())
# Configure Logging
if args['log_outdir'] is None:
args['log_outdir'] = os.path.split(args['export_path'])[0]
setup_logging(log_output_path=args['log_outdir'])
print("Using arguments:")
for k, v in args.items():
print("Arg: %s: %s" % (k, v))
dinv = args['func'](args)
if args['discard_missing']:
dinv._remove_records_with_any_missing_label()
dinv.log_stats()
dinv.export_to_json(json_path=args['export_path'])
if __name__ == '__main__':
main() | camera_trap_classifier/create_dataset_inventory.py | import os
import argparse
from camera_trap_classifier.config.logging import setup_logging
from camera_trap_classifier.data.inventory import DatasetInventoryMaster
# Different functions depending on input values
def csv(args):
""" Import From CSV """
params = {'path': args['path'],
'image_path_col_list': args['image_fields'],
'capture_id_col': args['capture_id_field'],
'attributes_col_list': args['label_fields'],
'meta_col_list': args['meta_data_fields']}
dinv = DatasetInventoryMaster()
dinv.create_from_source('csv', params)
return dinv
def json(args):
""" Import From Json """
params = {'path': args['path']}
dinv = DatasetInventoryMaster()
dinv.create_from_source('json', params)
return dinv
def class_dir(args):
""" Import From Class Dirs"""
params = {'path': args['path']}
dinv = DatasetInventoryMaster()
dinv.create_from_source('image_dir', params)
return dinv
def panthera(args):
""" Import From panthera """
params = {'path': args['path']}
dinv = DatasetInventoryMaster()
dinv.create_from_source('panthera', params)
return dinv
def main():
# Parse command line arguments
parser = argparse.ArgumentParser(prog='CREATE DATSET INVENTORY')
parser.add_argument(
"-log_outdir", type=str, required=False, default=None,
help="The directory to write logfiles to (defaults to export dir)")
parser.add_argument(
"-discard_missing", default=False,
action='store_true', required=False,
help="whether to discard records with any missing label entries")
subparsers = parser.add_subparsers(help='sub-command help')
# create parser for csv input
parser_csv = subparsers.add_parser('csv',
help='specifcy if input is a csv file')
parser_csv.add_argument("-path", type=str, required=True,
help="the full path of the csv file")
parser_csv.add_argument("-export_path", type=str, required=True,
help="the full path to a json file which will contain\
the dataset inventory \
(e.g. /my_data/dataset_inventory.json)")
parser_csv.add_argument("-capture_id_field", type=str, required=True,
help="the name of the csv column with the \
capture id")
parser_csv.add_argument('-image_fields', nargs='+', type=str,
help='the name of the csv columns with paths to \
the images (more than one possible)',
required=True)
parser_csv.add_argument('-label_fields', nargs='+', type=str,
help='the name of the csv columns with paths to \
label attributes (more than one possible)',
required=True)
parser_csv.add_argument('-meta_data_fields', nargs='+', type=str,
help='the name of the csv columns with paths to \
meta data attributes (more than one poss.)',
required=False)
parser_csv.set_defaults(func=csv)
# create parser for json input
parser_json = subparsers.add_parser('json', help='if input is a json file')
parser_json.add_argument("-path", type=str, required=True)
parser_json.add_argument("-export_path", type=str, required=True,
help="the full path to a json file which will contain\
the dataset inventory \
(e.g. /my_data/dataset_inventory.json)")
parser_json.set_defaults(func=json)
# create parser for json input
parser_class_dirs = subparsers.add_parser(
'dir',
help='if input is a directory with class directories')
parser_class_dirs.add_argument("-path", type=str, required=True)
parser_class_dirs.add_argument(
"-export_path", type=str, required=True,
help="the full path to a json file which will contain\
the dataset inventory \
(e.g. /my_data/dataset_inventory.json)")
parser_class_dirs.set_defaults(func=class_dir)
# create parser for panthera input
parser_panthera = subparsers.add_parser(
'panthera',
help='if input is a panthera csv file')
parser_panthera.add_argument("-path", type=str, required=True)
parser_panthera.add_argument(
"-export_path", type=str, required=True,
help="the full path to a json file which will contain\
the dataset inventory \
(e.g. /my_data/dataset_inventory.json)")
parser_panthera.set_defaults(func=panthera)
# Parse command line arguments
args = vars(parser.parse_args())
# Configure Logging
if args['log_outdir'] is None:
args['log_outdir'] = os.path.split(args['export_path'])[0]
setup_logging(log_output_path=args['log_outdir'])
print("Using arguments:")
for k, v in args.items():
print("Arg: %s: %s" % (k, v))
dinv = args['func'](args)
if args['discard_missing']:
dinv._remove_records_with_any_missing_label()
dinv.log_stats()
dinv.export_to_json(json_path=args['export_path'])
if __name__ == '__main__':
main() | 0.466116 | 0.22806 |
import logging
from flask import Blueprint, jsonify
from flask_pydantic import validate
from datastore_version_manager.domain import (
pending_operations,
draft_dataset,
version_bumper,
datastore_versions
)
from datastore_version_manager.exceptions.exceptions import (
ForbiddenOperation
)
from datastore_version_manager.api.request_models import (
NewPendingOperationRequest,
UpdatePendingOperationRequest,
ApplyBumpManifestoRequest
)
logger = logging.getLogger()
command_api = Blueprint('command_api', __name__)
@command_api.route('/pending-operations', methods=['GET'])
@validate()
def get_pending_operations():
logger.info('GET /pending-operations')
return jsonify(pending_operations.get_datastructure_updates())
@command_api.route('/pending-operations', methods=['POST'])
@validate()
def add_pending_operation(body: NewPendingOperationRequest):
logger.info(f'POST /pending-operations with body {body}')
operation_type = body.operationType
if operation_type == 'ADD_OR_CHANGE_DATA':
dataset_name = body.datasetName
description = body.description
draft_dataset.add_new_draft_dataset(operation_type, dataset_name, description)
return {"message": "OK"}
elif operation_type == 'PATCH_METADATA':
dataset_name = body.datasetName
description = body.description
draft_dataset.add_new_draft_dataset(operation_type, dataset_name, description)
return {"message": "OK"}
elif operation_type == 'REMOVE':
dataset_name = body.datasetName
description = body.description
pending_operations.add_new(dataset_name, 'REMOVE', 'PENDING_DELETE', description)
return {"message": "OK"}
else:
raise ForbiddenOperation(f"Forbidden operation: {operation_type}")
@command_api.route('/pending-operations/<dataset_name>', methods=['DELETE'])
@validate()
def delete_pending_operation(dataset_name):
logger.info(f'DELETE /pending-operations/{dataset_name}')
pending_operations.remove(dataset_name)
return {"message": "OK"}
@command_api.route('/pending-operations/<dataset_name>', methods=['PUT'])
@validate()
def update_pending_operation(dataset_name, body: UpdatePendingOperationRequest):
logger.info(f'PUT /pending-operations/{dataset_name} with body {body}')
release_status = body.releaseStatus
description = body.description
draft_dataset.update_pending_operation(dataset_name, release_status, description)
return {"message": "OK"}
@command_api.route('/datastore/bump', methods=['GET'])
@validate()
def get_bump_manifesto():
return jsonify(version_bumper.get_bump_manifesto())
@command_api.route('/datastore/bump', methods=['POST'])
@validate()
def apply_bump_manifesto(body: ApplyBumpManifestoRequest):
desc = body.description
client_bump_manifesto = [op.dict() for op in body.pendingOperations]
version_bumper.apply_bump_manifesto(client_bump_manifesto, desc)
return {"message": "OK"}
@command_api.route('/released-datasets', methods=['GET'])
@validate()
def get_released_datasets():
logger.info(f'GET /released-datasets')
return jsonify(datastore_versions.get_released_datasets()) | datastore_version_manager/api/command_api.py | import logging
from flask import Blueprint, jsonify
from flask_pydantic import validate
from datastore_version_manager.domain import (
pending_operations,
draft_dataset,
version_bumper,
datastore_versions
)
from datastore_version_manager.exceptions.exceptions import (
ForbiddenOperation
)
from datastore_version_manager.api.request_models import (
NewPendingOperationRequest,
UpdatePendingOperationRequest,
ApplyBumpManifestoRequest
)
logger = logging.getLogger()
command_api = Blueprint('command_api', __name__)
@command_api.route('/pending-operations', methods=['GET'])
@validate()
def get_pending_operations():
logger.info('GET /pending-operations')
return jsonify(pending_operations.get_datastructure_updates())
@command_api.route('/pending-operations', methods=['POST'])
@validate()
def add_pending_operation(body: NewPendingOperationRequest):
logger.info(f'POST /pending-operations with body {body}')
operation_type = body.operationType
if operation_type == 'ADD_OR_CHANGE_DATA':
dataset_name = body.datasetName
description = body.description
draft_dataset.add_new_draft_dataset(operation_type, dataset_name, description)
return {"message": "OK"}
elif operation_type == 'PATCH_METADATA':
dataset_name = body.datasetName
description = body.description
draft_dataset.add_new_draft_dataset(operation_type, dataset_name, description)
return {"message": "OK"}
elif operation_type == 'REMOVE':
dataset_name = body.datasetName
description = body.description
pending_operations.add_new(dataset_name, 'REMOVE', 'PENDING_DELETE', description)
return {"message": "OK"}
else:
raise ForbiddenOperation(f"Forbidden operation: {operation_type}")
@command_api.route('/pending-operations/<dataset_name>', methods=['DELETE'])
@validate()
def delete_pending_operation(dataset_name):
logger.info(f'DELETE /pending-operations/{dataset_name}')
pending_operations.remove(dataset_name)
return {"message": "OK"}
@command_api.route('/pending-operations/<dataset_name>', methods=['PUT'])
@validate()
def update_pending_operation(dataset_name, body: UpdatePendingOperationRequest):
logger.info(f'PUT /pending-operations/{dataset_name} with body {body}')
release_status = body.releaseStatus
description = body.description
draft_dataset.update_pending_operation(dataset_name, release_status, description)
return {"message": "OK"}
@command_api.route('/datastore/bump', methods=['GET'])
@validate()
def get_bump_manifesto():
return jsonify(version_bumper.get_bump_manifesto())
@command_api.route('/datastore/bump', methods=['POST'])
@validate()
def apply_bump_manifesto(body: ApplyBumpManifestoRequest):
desc = body.description
client_bump_manifesto = [op.dict() for op in body.pendingOperations]
version_bumper.apply_bump_manifesto(client_bump_manifesto, desc)
return {"message": "OK"}
@command_api.route('/released-datasets', methods=['GET'])
@validate()
def get_released_datasets():
logger.info(f'GET /released-datasets')
return jsonify(datastore_versions.get_released_datasets()) | 0.33372 | 0.095687 |
from typing import Any, List, Optional, Tuple, Union
import numpy
from QDTK.Operator import OCoef as Coeff
from QDTK.Operator import Operator
from QDTK.Operator import OTerm as Term
from mlxtk.dvr import DVRSpecification
from mlxtk.tools.diagonalize import diagonalize_1b_operator
from mlxtk.tools.operator import get_operator_matrix
class OperatorSpecification:
"""Object used to specify how to construct an operator acting on degrees
of freedom.
"""
def __init__(
self,
dofs: List[DVRSpecification],
coefficients: List[Any],
terms: List[Any],
table: Union[str, List[str]],
):
self.dofs = dofs
self.coefficients = coefficients
self.terms = terms
if isinstance(table, str):
self.table = [table]
else:
self.table = table
def __add__(self, other):
if not isinstance(other, OperatorSpecification):
raise RuntimeError(
"other object must be of type " "OperatorSpecification as well"
)
cpy = OperatorSpecification(
self.dofs, self.coefficients, self.terms, self.table
)
cpy.__iadd__(other)
return cpy
def __radd__(self, other):
return self.__add__(other)
def __iadd__(self, other):
if not isinstance(other, OperatorSpecification):
raise RuntimeError(
"other object must be of type " "OperatorSpecification as well"
)
if self.dofs != other.dofs:
raise ValueError("dofs differ")
if not set(self.coefficients.keys()).isdisjoint(set(other.coefficients.keys())):
raise ValueError("coefficient names are not unique")
if not set(self.terms.keys()).isdisjoint(set(other.terms.keys())):
raise ValueError("term names are not unique")
self.coefficients = {**self.coefficients, **other.coefficients}
self.terms = {**self.terms, **other.terms}
self.table += other.table
return self
def __imul__(self, other):
for name in self.coefficients:
self.coefficients[name] *= other
return self
def __mul__(self, other):
cpy = OperatorSpecification(
self.dofs, self.coefficients, self.terms, self.table
)
cpy.__imul__(other)
return cpy
def __rmul__(self, other):
return self.__mul__(other)
def __itruediv__(self, other):
for name in self.coefficients:
self.coefficients[name] /= other
return self
def __truediv__(self, other):
cpy = OperatorSpecification(
self.dofs, self.coefficients, self.terms, self.table
)
cpy.__itruediv__(other)
return cpy
def get_operator(self) -> Operator:
op = Operator()
op.define_grids([dof.get() for dof in self.dofs])
for coeff in self.coefficients:
op.addLabel(coeff, Coeff(self.coefficients[coeff]))
for term in self.terms:
op.addLabel(term, Term(self.terms[term]))
op.readTable("\n".join(self.table))
return op
def get_matrix(self) -> numpy.ndarray:
return get_operator_matrix(self.get_operator())
def diagonalize(
self, number_eigenfunctions: Optional[int] = None
) -> Tuple[numpy.ndarray, numpy.ndarray]:
evals, evecs = diagonalize_1b_operator(self.get_matrix(), number_eigenfunctions)
return evals, numpy.array(evecs) | mlxtk/operator/operator_specification.py | from typing import Any, List, Optional, Tuple, Union
import numpy
from QDTK.Operator import OCoef as Coeff
from QDTK.Operator import Operator
from QDTK.Operator import OTerm as Term
from mlxtk.dvr import DVRSpecification
from mlxtk.tools.diagonalize import diagonalize_1b_operator
from mlxtk.tools.operator import get_operator_matrix
class OperatorSpecification:
"""Object used to specify how to construct an operator acting on degrees
of freedom.
"""
def __init__(
self,
dofs: List[DVRSpecification],
coefficients: List[Any],
terms: List[Any],
table: Union[str, List[str]],
):
self.dofs = dofs
self.coefficients = coefficients
self.terms = terms
if isinstance(table, str):
self.table = [table]
else:
self.table = table
def __add__(self, other):
if not isinstance(other, OperatorSpecification):
raise RuntimeError(
"other object must be of type " "OperatorSpecification as well"
)
cpy = OperatorSpecification(
self.dofs, self.coefficients, self.terms, self.table
)
cpy.__iadd__(other)
return cpy
def __radd__(self, other):
return self.__add__(other)
def __iadd__(self, other):
if not isinstance(other, OperatorSpecification):
raise RuntimeError(
"other object must be of type " "OperatorSpecification as well"
)
if self.dofs != other.dofs:
raise ValueError("dofs differ")
if not set(self.coefficients.keys()).isdisjoint(set(other.coefficients.keys())):
raise ValueError("coefficient names are not unique")
if not set(self.terms.keys()).isdisjoint(set(other.terms.keys())):
raise ValueError("term names are not unique")
self.coefficients = {**self.coefficients, **other.coefficients}
self.terms = {**self.terms, **other.terms}
self.table += other.table
return self
def __imul__(self, other):
for name in self.coefficients:
self.coefficients[name] *= other
return self
def __mul__(self, other):
cpy = OperatorSpecification(
self.dofs, self.coefficients, self.terms, self.table
)
cpy.__imul__(other)
return cpy
def __rmul__(self, other):
return self.__mul__(other)
def __itruediv__(self, other):
for name in self.coefficients:
self.coefficients[name] /= other
return self
def __truediv__(self, other):
cpy = OperatorSpecification(
self.dofs, self.coefficients, self.terms, self.table
)
cpy.__itruediv__(other)
return cpy
def get_operator(self) -> Operator:
op = Operator()
op.define_grids([dof.get() for dof in self.dofs])
for coeff in self.coefficients:
op.addLabel(coeff, Coeff(self.coefficients[coeff]))
for term in self.terms:
op.addLabel(term, Term(self.terms[term]))
op.readTable("\n".join(self.table))
return op
def get_matrix(self) -> numpy.ndarray:
return get_operator_matrix(self.get_operator())
def diagonalize(
self, number_eigenfunctions: Optional[int] = None
) -> Tuple[numpy.ndarray, numpy.ndarray]:
evals, evecs = diagonalize_1b_operator(self.get_matrix(), number_eigenfunctions)
return evals, numpy.array(evecs) | 0.926628 | 0.427337 |
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
columns = get_columns()
conditions = get_conditions(filters)
data = get_data(conditions, filters)
if not data:
return [], [], None, []
chart_data = get_chart_data(data)
return columns, data, None, chart_data
def get_conditions(filters):
conditions = ""
if filters.get("warehouse"):
conditions += "AND warehouse in %(warehouse)s"
if filters.get("company"):
conditions += "AND company = %(company)s"
return conditions
def get_data(conditions, filters):
data = frappe.db.sql("""
SELECT
bin.warehouse,
bin.item_code,
bin.actual_qty ,
bin.ordered_qty ,
bin.planned_qty ,
bin.reserved_qty ,
bin.reserved_qty_for_production,
bin.projected_qty ,
warehouse.company,
item.item_name ,
item.description
FROM
`tabBin` bin,
`tabWarehouse` warehouse,
`tabItem` item
WHERE
bin.projected_qty<0
AND warehouse.name = bin.warehouse
AND bin.item_code=item.name
{0}
ORDER BY bin.projected_qty;""".format(conditions), filters, as_dict=1)
return data
def get_chart_data(data):
labels, datapoints = [], []
for row in data:
labels.append(row.get("item_code"))
datapoints.append(row.get("projected_qty"))
if len(data) > 10:
labels = labels[:10]
datapoints = datapoints[:10]
return {
"data": {
"labels": labels,
"datasets":[
{
"name": _("Projected Qty"),
"values": datapoints
}
]
},
"type": "bar"
}
def get_columns():
columns = [
{
"label": _("Warehouse"),
"fieldname": "warehouse",
"fieldtype": "Link",
"options": "Warehouse",
"width": 150
},
{
"label": _("Item"),
"fieldname": "item_code",
"fieldtype": "Link",
"options": "Item",
"width": 150
},
{
"label": _("Actual Quantity"),
"fieldname": "actual_qty",
"fieldtype": "Float",
"width": 120,
"convertible": "qty"
},
{
"label": _("Ordered Quantity"),
"fieldname": "ordered_qty",
"fieldtype": "Float",
"width": 120,
"convertible": "qty"
},
{
"label": _("Planned Quantity"),
"fieldname": "planned_qty",
"fieldtype": "Float",
"width": 120,
"convertible": "qty"
},
{
"label": _("Reserved Quantity"),
"fieldname": "reserved_qty",
"fieldtype": "Float",
"width": 120,
"convertible": "qty"
},
{
"label": _("Reserved Quantity for Production"),
"fieldname": "reserved_qty_for_production",
"fieldtype": "Float",
"width": 120,
"convertible": "qty"
},
{
"label": _("Projected Quantity"),
"fieldname": "projected_qty",
"fieldtype": "Float",
"width": 120,
"convertible": "qty"
},
{
"label": _("Company"),
"fieldname": "company",
"fieldtype": "Link",
"options": "Company",
"width": 120
},
{
"label": _("Item Name"),
"fieldname": "item_name",
"fieldtype": "Data",
"width": 100
},
{
"label": _("Description"),
"fieldname": "description",
"fieldtype": "Data",
"width": 120
}
]
return columns | mindhome_alpha/erpnext/stock/report/item_shortage_report/item_shortage_report.py |
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
columns = get_columns()
conditions = get_conditions(filters)
data = get_data(conditions, filters)
if not data:
return [], [], None, []
chart_data = get_chart_data(data)
return columns, data, None, chart_data
def get_conditions(filters):
conditions = ""
if filters.get("warehouse"):
conditions += "AND warehouse in %(warehouse)s"
if filters.get("company"):
conditions += "AND company = %(company)s"
return conditions
def get_data(conditions, filters):
data = frappe.db.sql("""
SELECT
bin.warehouse,
bin.item_code,
bin.actual_qty ,
bin.ordered_qty ,
bin.planned_qty ,
bin.reserved_qty ,
bin.reserved_qty_for_production,
bin.projected_qty ,
warehouse.company,
item.item_name ,
item.description
FROM
`tabBin` bin,
`tabWarehouse` warehouse,
`tabItem` item
WHERE
bin.projected_qty<0
AND warehouse.name = bin.warehouse
AND bin.item_code=item.name
{0}
ORDER BY bin.projected_qty;""".format(conditions), filters, as_dict=1)
return data
def get_chart_data(data):
labels, datapoints = [], []
for row in data:
labels.append(row.get("item_code"))
datapoints.append(row.get("projected_qty"))
if len(data) > 10:
labels = labels[:10]
datapoints = datapoints[:10]
return {
"data": {
"labels": labels,
"datasets":[
{
"name": _("Projected Qty"),
"values": datapoints
}
]
},
"type": "bar"
}
def get_columns():
columns = [
{
"label": _("Warehouse"),
"fieldname": "warehouse",
"fieldtype": "Link",
"options": "Warehouse",
"width": 150
},
{
"label": _("Item"),
"fieldname": "item_code",
"fieldtype": "Link",
"options": "Item",
"width": 150
},
{
"label": _("Actual Quantity"),
"fieldname": "actual_qty",
"fieldtype": "Float",
"width": 120,
"convertible": "qty"
},
{
"label": _("Ordered Quantity"),
"fieldname": "ordered_qty",
"fieldtype": "Float",
"width": 120,
"convertible": "qty"
},
{
"label": _("Planned Quantity"),
"fieldname": "planned_qty",
"fieldtype": "Float",
"width": 120,
"convertible": "qty"
},
{
"label": _("Reserved Quantity"),
"fieldname": "reserved_qty",
"fieldtype": "Float",
"width": 120,
"convertible": "qty"
},
{
"label": _("Reserved Quantity for Production"),
"fieldname": "reserved_qty_for_production",
"fieldtype": "Float",
"width": 120,
"convertible": "qty"
},
{
"label": _("Projected Quantity"),
"fieldname": "projected_qty",
"fieldtype": "Float",
"width": 120,
"convertible": "qty"
},
{
"label": _("Company"),
"fieldname": "company",
"fieldtype": "Link",
"options": "Company",
"width": 120
},
{
"label": _("Item Name"),
"fieldname": "item_name",
"fieldtype": "Data",
"width": 100
},
{
"label": _("Description"),
"fieldname": "description",
"fieldtype": "Data",
"width": 120
}
]
return columns | 0.528777 | 0.259356 |
import hashlib
import os
import random
import re
import numpy as np
from os.path import join, isdir
from scipy.io.wavfile import read, write
from pathlib import Path
MAX_NUM_WAVS_PER_CLASS = 2**27 - 1 # ~134M
UNKNOWN = '_unknown_'
def which_set(filename, validation_percentage, testing_percentage):
"""Determines which data partition the file should belong to.
We want to keep files in the same training, validation, or testing sets even
if new ones are added over time. This makes it less likely that testing
samples will accidentally be reused in training when long runs are restarted
for example. To keep this stability, a hash of the filename is taken and used
to determine which set it should belong to. This determination only depends on
the name and the set proportions, so it won't change as other files are added.
It's also useful to associate particular files as related (for example words
spoken by the same person), so anything after '_nohash_' in a filename is
ignored for set determination. This ensures that 'bobby_nohash_0.wav' and
'bobby_nohash_1.wav' are always in the same set, for example.
Args:
filename: File path of the data sample.
validation_percentage: How much of the data set to use for validation (from 0 to 100).
testing_percentage: How much of the data set to use for testing (from 0 to 100).
Returns:
String, one of 'training', 'validation', or 'testing'.
"""
base_name = os.path.basename(filename)
# We want to ignore anything after '_nohash_' in the file name when
# deciding which set to put a wav in, so the data set creator has a way of
# grouping wavs that are close variations of each other.
hash_name = re.sub(r'_nohash_.*$', '', base_name)
# This looks a bit magical, but we need to decide whether this file should
# go into the training, testing, or validation sets, and we want to keep
# existing files in the same set even if more files are subsequently
# added.
# To do that, we need a stable way of deciding based on just the file name
# itself, so we do a hash of that and then use that to generate a
# probability value that we use to assign it.
hash_name_hashed = hashlib.sha1(hash_name.encode('utf-8')).hexdigest()
percentage_hash = ((int(hash_name_hashed, 16) %
(MAX_NUM_WAVS_PER_CLASS + 1)) *
(100.0 / MAX_NUM_WAVS_PER_CLASS))
if percentage_hash < validation_percentage:
result = 'validation'
elif percentage_hash < (testing_percentage + validation_percentage):
result = 'testing'
else:
result = 'training'
return result
def load_dataset(dataset_path, val_percentage=10., test_percentage=10.):
"""
Return a list of dataset for training, validation and testing
:param dataset_path: the path of the dir within the directory named as the label of the contained samples
:param val_percentage: the percentage of validation desired
:param test_percentage: the percentage of testing desired
:return:
"""
x_train, y_train, x_val, y_val, x_test, y_test = [], [], [], [], [], []
info = {
"dataset_path": dataset_path,
"tot_sample": 0,
"discarded_sample": 0,
"labels": [],
"counters": {}
}
# load all the labels
for lab in os.listdir(dataset_path):
if isdir(join(dataset_path, lab)):
info["labels"].append(lab)
info["counters"][lab] = 0
# load all path, input:
path_list = []
for label_dir in os.listdir(dataset_path):
if isdir(join(dataset_path, label_dir)):
for file in os.listdir(join(dataset_path, label_dir)):
# filter all file that are not .wav and with duration different of 1s (32044 bytes)
if file.endswith(".wav") and Path(join(dataset_path, label_dir, file)).stat().st_size == 32044:
path_list.append(join(label_dir, file))
info["tot_sample"] += 1
info["counters"][label_dir] += 1
else:
info["discarded_sample"] += 1
# shuffle
random.shuffle(path_list)
# split train validation and test
for sample in path_list:
data = os.path.basename(sample)
label = sample.split(os.sep)[-2]
if which_set(sample, val_percentage, test_percentage) == 'training':
x_train.append(data)
y_train.append(label)
elif which_set(sample, val_percentage, test_percentage) == 'validation':
x_val.append(data)
y_val.append(label)
elif which_set(sample, val_percentage, test_percentage) == 'testing':
x_test.append(data)
y_test.append(label)
else:
raise Exception("which_set fail! Debug the method.")
return x_train, y_train, x_val, y_val, x_test, y_test, info
def dataset_generator(x_, y_, info, wanted_words, batch_size=1000, unknown_percentage=10., tot_size=-1, balanced=False):
"""
This method select the samples for train, validation and test set batches. Moreover it read the audio and the resp
label. It need the wanted_words list to differentiate sample with label that are not in wanted_words.
:param x_: the sample of the dataset
:param y_: the resp labels of the sample
:param info: contain dataset_path, tot_sample, counters for each label
:param wanted_words: the list of wanted words of the model
:param batch_size: the size of each yielded batch
:param unknown_percentage: the percentage of unknown samples added to each batch
:param tot_size: used to set a limit to the batch size
:param balanced: boolean, if True, each yielded batch has a balanced number of samples for each label in wanted
words, otherwise each sample of the dataset is added to the batch.
:return: a generator that yield one batch at a time with two components: 'audio' and 'label'.
"""
# adjust tot_size (from 1 to |x_|) and batch_size (from 1 to tot_size)
if tot_size <= 0 or tot_size > len(x_):
tot_size = len(x_)
if batch_size <= 0 or batch_size > tot_size:
batch_size = tot_size
# check if all label are available in the dataset
for label in wanted_words:
if label != UNKNOWN and label not in y_:
raise Exception("The specified label '{}' is not available in the dataset.".format(label))
# add UNKNOWN label to the dataset if not present
if unknown_percentage > 0.0 and UNKNOWN not in wanted_words:
wanted_words.append(UNKNOWN)
# alphabetically sort all the label
wanted_words.sort()
# calculate the max number of samples for each label
l_percentage = (100 - unknown_percentage)/(len(wanted_words) - 1) # -1 is because 'unknown' in ww,
max_size = {}
for label in wanted_words:
if label == UNKNOWN:
max_size[UNKNOWN] = min(int(unknown_percentage*batch_size/100)+1,
info['tot_sample'] - sum([info['counters'][label_] if UNKNOWN != label_ else 0 for label_ in wanted_words]))
else:
max_size[label] = min(int(l_percentage*batch_size/100)+1, info['counters'][label])
sample_counter = {label: 0 for label in wanted_words}
# max_iterations = int(tot_size/batch_size)+1
inner_index = 0
round_ = 0 # incremented each time inner_index >= len(x_)
step = 0
while True: # the generator can generate batch forever, cycling the whole dataset
xy_numpy = {'audio': [], 'label': []}
batch_index = 0
while batch_index < batch_size:
if inner_index >= len(x_): # the entire dataset has been consumed, restart from the beginning
print("Complete a tour of the whole dataset.")
round_ += 1
inner_index = 0 # TODO: is it the best choise? Should the list be shuffled?
if not balanced:
break
label = y_[inner_index] if y_[inner_index] in wanted_words else UNKNOWN # the label of the current sample
# add the sample to the yield batch if needed
if balanced:
# evaluate if this label has too much samples in the current batch
if sample_counter[label] < max_size[label]:
sample_counter[label] += 1
fs, data = read(join(info["dataset_path"], y_[inner_index], x_[inner_index]))
xy_numpy['audio'].append(data)
label_index = wanted_words.index(label)
xy_numpy['label'].append(label_index)
batch_index += 1
else:
sample_counter[label] += 1
fs, data = read(join(info["dataset_path"], y_[inner_index], x_[inner_index]))
xy_numpy['audio'].append(data)
label_index = wanted_words.index(label)
xy_numpy['label'].append(label_index)
batch_index += 1
inner_index += 1
if len(xy_numpy['label']) == 0: # happen when complete a tour of the whole dataset and at the same time,
continue # complete the batch. It will restart the dataset without yielding a void batch
step += 1
print("round {:3.0f}, step {} , examined {}/{} , batch_size {} ".format(round_, step, inner_index, len(x_),
len(xy_numpy['label'])))
yield xy_numpy
sample_counter = {label: 0 for label in wanted_words} # clean sample counter
print("dataset_generator end!") # it should never end
if __name__ == "__main__":
file_path = "trainset/speech_commands_v0.02/bird/9ff2d2f4_nohash_0.wav"
print(which_set(file_path, 20., 30.))
dataset_path = "trainset/speech_commands_v0.02"
x_train, y_train, x_val, y_val, x_test, y_test, info = load_dataset(dataset_path, val_percentage=10.,
test_percentage=10.)
for x_ in x_train[:10]:
print(x_) | dataset_utils.py | import hashlib
import os
import random
import re
import numpy as np
from os.path import join, isdir
from scipy.io.wavfile import read, write
from pathlib import Path
MAX_NUM_WAVS_PER_CLASS = 2**27 - 1 # ~134M
UNKNOWN = '_unknown_'
def which_set(filename, validation_percentage, testing_percentage):
"""Determines which data partition the file should belong to.
We want to keep files in the same training, validation, or testing sets even
if new ones are added over time. This makes it less likely that testing
samples will accidentally be reused in training when long runs are restarted
for example. To keep this stability, a hash of the filename is taken and used
to determine which set it should belong to. This determination only depends on
the name and the set proportions, so it won't change as other files are added.
It's also useful to associate particular files as related (for example words
spoken by the same person), so anything after '_nohash_' in a filename is
ignored for set determination. This ensures that 'bobby_nohash_0.wav' and
'bobby_nohash_1.wav' are always in the same set, for example.
Args:
filename: File path of the data sample.
validation_percentage: How much of the data set to use for validation (from 0 to 100).
testing_percentage: How much of the data set to use for testing (from 0 to 100).
Returns:
String, one of 'training', 'validation', or 'testing'.
"""
base_name = os.path.basename(filename)
# We want to ignore anything after '_nohash_' in the file name when
# deciding which set to put a wav in, so the data set creator has a way of
# grouping wavs that are close variations of each other.
hash_name = re.sub(r'_nohash_.*$', '', base_name)
# This looks a bit magical, but we need to decide whether this file should
# go into the training, testing, or validation sets, and we want to keep
# existing files in the same set even if more files are subsequently
# added.
# To do that, we need a stable way of deciding based on just the file name
# itself, so we do a hash of that and then use that to generate a
# probability value that we use to assign it.
hash_name_hashed = hashlib.sha1(hash_name.encode('utf-8')).hexdigest()
percentage_hash = ((int(hash_name_hashed, 16) %
(MAX_NUM_WAVS_PER_CLASS + 1)) *
(100.0 / MAX_NUM_WAVS_PER_CLASS))
if percentage_hash < validation_percentage:
result = 'validation'
elif percentage_hash < (testing_percentage + validation_percentage):
result = 'testing'
else:
result = 'training'
return result
def load_dataset(dataset_path, val_percentage=10., test_percentage=10.):
"""
Return a list of dataset for training, validation and testing
:param dataset_path: the path of the dir within the directory named as the label of the contained samples
:param val_percentage: the percentage of validation desired
:param test_percentage: the percentage of testing desired
:return:
"""
x_train, y_train, x_val, y_val, x_test, y_test = [], [], [], [], [], []
info = {
"dataset_path": dataset_path,
"tot_sample": 0,
"discarded_sample": 0,
"labels": [],
"counters": {}
}
# load all the labels
for lab in os.listdir(dataset_path):
if isdir(join(dataset_path, lab)):
info["labels"].append(lab)
info["counters"][lab] = 0
# load all path, input:
path_list = []
for label_dir in os.listdir(dataset_path):
if isdir(join(dataset_path, label_dir)):
for file in os.listdir(join(dataset_path, label_dir)):
# filter all file that are not .wav and with duration different of 1s (32044 bytes)
if file.endswith(".wav") and Path(join(dataset_path, label_dir, file)).stat().st_size == 32044:
path_list.append(join(label_dir, file))
info["tot_sample"] += 1
info["counters"][label_dir] += 1
else:
info["discarded_sample"] += 1
# shuffle
random.shuffle(path_list)
# split train validation and test
for sample in path_list:
data = os.path.basename(sample)
label = sample.split(os.sep)[-2]
if which_set(sample, val_percentage, test_percentage) == 'training':
x_train.append(data)
y_train.append(label)
elif which_set(sample, val_percentage, test_percentage) == 'validation':
x_val.append(data)
y_val.append(label)
elif which_set(sample, val_percentage, test_percentage) == 'testing':
x_test.append(data)
y_test.append(label)
else:
raise Exception("which_set fail! Debug the method.")
return x_train, y_train, x_val, y_val, x_test, y_test, info
def dataset_generator(x_, y_, info, wanted_words, batch_size=1000, unknown_percentage=10., tot_size=-1, balanced=False):
"""
This method select the samples for train, validation and test set batches. Moreover it read the audio and the resp
label. It need the wanted_words list to differentiate sample with label that are not in wanted_words.
:param x_: the sample of the dataset
:param y_: the resp labels of the sample
:param info: contain dataset_path, tot_sample, counters for each label
:param wanted_words: the list of wanted words of the model
:param batch_size: the size of each yielded batch
:param unknown_percentage: the percentage of unknown samples added to each batch
:param tot_size: used to set a limit to the batch size
:param balanced: boolean, if True, each yielded batch has a balanced number of samples for each label in wanted
words, otherwise each sample of the dataset is added to the batch.
:return: a generator that yield one batch at a time with two components: 'audio' and 'label'.
"""
# adjust tot_size (from 1 to |x_|) and batch_size (from 1 to tot_size)
if tot_size <= 0 or tot_size > len(x_):
tot_size = len(x_)
if batch_size <= 0 or batch_size > tot_size:
batch_size = tot_size
# check if all label are available in the dataset
for label in wanted_words:
if label != UNKNOWN and label not in y_:
raise Exception("The specified label '{}' is not available in the dataset.".format(label))
# add UNKNOWN label to the dataset if not present
if unknown_percentage > 0.0 and UNKNOWN not in wanted_words:
wanted_words.append(UNKNOWN)
# alphabetically sort all the label
wanted_words.sort()
# calculate the max number of samples for each label
l_percentage = (100 - unknown_percentage)/(len(wanted_words) - 1) # -1 is because 'unknown' in ww,
max_size = {}
for label in wanted_words:
if label == UNKNOWN:
max_size[UNKNOWN] = min(int(unknown_percentage*batch_size/100)+1,
info['tot_sample'] - sum([info['counters'][label_] if UNKNOWN != label_ else 0 for label_ in wanted_words]))
else:
max_size[label] = min(int(l_percentage*batch_size/100)+1, info['counters'][label])
sample_counter = {label: 0 for label in wanted_words}
# max_iterations = int(tot_size/batch_size)+1
inner_index = 0
round_ = 0 # incremented each time inner_index >= len(x_)
step = 0
while True: # the generator can generate batch forever, cycling the whole dataset
xy_numpy = {'audio': [], 'label': []}
batch_index = 0
while batch_index < batch_size:
if inner_index >= len(x_): # the entire dataset has been consumed, restart from the beginning
print("Complete a tour of the whole dataset.")
round_ += 1
inner_index = 0 # TODO: is it the best choise? Should the list be shuffled?
if not balanced:
break
label = y_[inner_index] if y_[inner_index] in wanted_words else UNKNOWN # the label of the current sample
# add the sample to the yield batch if needed
if balanced:
# evaluate if this label has too much samples in the current batch
if sample_counter[label] < max_size[label]:
sample_counter[label] += 1
fs, data = read(join(info["dataset_path"], y_[inner_index], x_[inner_index]))
xy_numpy['audio'].append(data)
label_index = wanted_words.index(label)
xy_numpy['label'].append(label_index)
batch_index += 1
else:
sample_counter[label] += 1
fs, data = read(join(info["dataset_path"], y_[inner_index], x_[inner_index]))
xy_numpy['audio'].append(data)
label_index = wanted_words.index(label)
xy_numpy['label'].append(label_index)
batch_index += 1
inner_index += 1
if len(xy_numpy['label']) == 0: # happen when complete a tour of the whole dataset and at the same time,
continue # complete the batch. It will restart the dataset without yielding a void batch
step += 1
print("round {:3.0f}, step {} , examined {}/{} , batch_size {} ".format(round_, step, inner_index, len(x_),
len(xy_numpy['label'])))
yield xy_numpy
sample_counter = {label: 0 for label in wanted_words} # clean sample counter
print("dataset_generator end!") # it should never end
if __name__ == "__main__":
file_path = "trainset/speech_commands_v0.02/bird/9ff2d2f4_nohash_0.wav"
print(which_set(file_path, 20., 30.))
dataset_path = "trainset/speech_commands_v0.02"
x_train, y_train, x_val, y_val, x_test, y_test, info = load_dataset(dataset_path, val_percentage=10.,
test_percentage=10.)
for x_ in x_train[:10]:
print(x_) | 0.591369 | 0.454048 |
from amqplib import client_0_8 as amqp
import sys
import random
import time
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--host", action="store", type="string", dest="host", default="localhost")
parser.add_option("--port", action="store", type="int", dest="port", default=5672)
parser.add_option("--ssl", action="store_true", dest="ssl", default=False)
parser.add_option("--vhost", action="store", type="string", dest="vhost", default="/")
parser.add_option("--queue", action="store", type="string", dest="queue", default="monitoring_queue")
parser.add_option("--user", action="store", type="string", dest="user", default="guest")
parser.add_option("--password", action="store", type="string", dest="password", default="<PASSWORD>")
parser.add_option("--critical", action="store", type="float", dest="critical", metavar="SECONDS", default=4.0)
parser.add_option("--warning", action="store", type="float", dest="warning", metavar="SECONDS", default=2.0)
(options, args) = parser.parse_args(sys.argv)
# Connection details go here
amqpServer = "%s:%i" % (options.host, options.port)
amqpQueue = "%s" % options.queue
amqpVhost = options.vhost
amqpSsl = options.ssl
amqpUid = options.user
amqpPass = <PASSWORD>
# Number of seconds before message is considered timed out
timeout = options.critical
# Number of seconds before the received message is considered late and a warning is raised
receivedTimeWarning = options.warning
# Function to check the header of a passed message and check it. If it matches the sent message
# the function checks the time it took to arrive and exits with the apropriate state. If the message does not
# match the sent message ID it is discarded.
def receive_callback(msg):
recTime = time.time()
recMessageID = msg.application_headers['messID']
timeDiff = recTime - sendTime
if recMessageID == messageID:
amqpChan.close()
amqpConn.close()
if timeDiff > timeout:
print "CRITICAL - Test message received in %s seconds|roundtrip=%s" % (timeDiff, timeDiff)
sys.exit(2)
if timeDiff > receivedTimeWarning:
print "WARNING - Test message received in %s seconds|roundtrip=%s" % (timeDiff, timeDiff)
sys.exit(1)
if timeDiff < receivedTimeWarning:
print "OK - Test message received in %s seconds|roundtrip=%s" % (timeDiff, timeDiff)
sys.exit(0)
pull_message()
# Funtion to pull a single message from the queue and continue checking for messages until the timeout is reached
def pull_message():
slept = 0
sleepInterval = 0.1
while slept < timeout:
msg = amqpChan.basic_get(amqpQueue)
if msg is not None:
amqpChan.basic_ack(msg.delivery_tag)
receive_callback(msg)
time.sleep(sleepInterval)
slept += sleepInterval
print "Timeout (%s seconds) expired while waiting for test message." % timeout
amqpChan.close()
amqpConn.close()
sys.exit(2)
# A try to test connection to the AMQP resource. If the connection fails the script exits with a critical exit status
#try:
amqpConn = amqp.Connection(host=amqpServer, userid=amqpUid, password=<PASSWORD>, virtual_host=amqpVhost, insist=False, ssl=amqpSsl)
amqpChan = amqpConn.channel()
amqpChan.queue_declare(queue=amqpQueue, durable=True, auto_delete=False)
amqpChan.exchange_declare(exchange=amqpQueue, type="direct", durable=True, auto_delete=False)
amqpChan.queue_bind(queue=amqpQueue, exchange=amqpQueue, routing_key=amqpQueue)
# Generating a random message ID and sending a single message
messageID = str(random.randint(1, 1000000))
testMsg = amqp.Message(messageID, application_headers={'messID': messageID})
testMsg.properties["delivery_mode"] = 1
sendTime = time.time()
amqpChan.basic_publish(testMsg, exchange=amqpQueue, routing_key=amqpQueue)
pull_message() | check_amqp.py | from amqplib import client_0_8 as amqp
import sys
import random
import time
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--host", action="store", type="string", dest="host", default="localhost")
parser.add_option("--port", action="store", type="int", dest="port", default=5672)
parser.add_option("--ssl", action="store_true", dest="ssl", default=False)
parser.add_option("--vhost", action="store", type="string", dest="vhost", default="/")
parser.add_option("--queue", action="store", type="string", dest="queue", default="monitoring_queue")
parser.add_option("--user", action="store", type="string", dest="user", default="guest")
parser.add_option("--password", action="store", type="string", dest="password", default="<PASSWORD>")
parser.add_option("--critical", action="store", type="float", dest="critical", metavar="SECONDS", default=4.0)
parser.add_option("--warning", action="store", type="float", dest="warning", metavar="SECONDS", default=2.0)
(options, args) = parser.parse_args(sys.argv)
# Connection details go here
amqpServer = "%s:%i" % (options.host, options.port)
amqpQueue = "%s" % options.queue
amqpVhost = options.vhost
amqpSsl = options.ssl
amqpUid = options.user
amqpPass = <PASSWORD>
# Number of seconds before message is considered timed out
timeout = options.critical
# Number of seconds before the received message is considered late and a warning is raised
receivedTimeWarning = options.warning
# Function to check the header of a passed message and check it. If it matches the sent message
# the function checks the time it took to arrive and exits with the apropriate state. If the message does not
# match the sent message ID it is discarded.
def receive_callback(msg):
recTime = time.time()
recMessageID = msg.application_headers['messID']
timeDiff = recTime - sendTime
if recMessageID == messageID:
amqpChan.close()
amqpConn.close()
if timeDiff > timeout:
print "CRITICAL - Test message received in %s seconds|roundtrip=%s" % (timeDiff, timeDiff)
sys.exit(2)
if timeDiff > receivedTimeWarning:
print "WARNING - Test message received in %s seconds|roundtrip=%s" % (timeDiff, timeDiff)
sys.exit(1)
if timeDiff < receivedTimeWarning:
print "OK - Test message received in %s seconds|roundtrip=%s" % (timeDiff, timeDiff)
sys.exit(0)
pull_message()
# Funtion to pull a single message from the queue and continue checking for messages until the timeout is reached
def pull_message():
slept = 0
sleepInterval = 0.1
while slept < timeout:
msg = amqpChan.basic_get(amqpQueue)
if msg is not None:
amqpChan.basic_ack(msg.delivery_tag)
receive_callback(msg)
time.sleep(sleepInterval)
slept += sleepInterval
print "Timeout (%s seconds) expired while waiting for test message." % timeout
amqpChan.close()
amqpConn.close()
sys.exit(2)
# A try to test connection to the AMQP resource. If the connection fails the script exits with a critical exit status
#try:
amqpConn = amqp.Connection(host=amqpServer, userid=amqpUid, password=<PASSWORD>, virtual_host=amqpVhost, insist=False, ssl=amqpSsl)
amqpChan = amqpConn.channel()
amqpChan.queue_declare(queue=amqpQueue, durable=True, auto_delete=False)
amqpChan.exchange_declare(exchange=amqpQueue, type="direct", durable=True, auto_delete=False)
amqpChan.queue_bind(queue=amqpQueue, exchange=amqpQueue, routing_key=amqpQueue)
# Generating a random message ID and sending a single message
messageID = str(random.randint(1, 1000000))
testMsg = amqp.Message(messageID, application_headers={'messID': messageID})
testMsg.properties["delivery_mode"] = 1
sendTime = time.time()
amqpChan.basic_publish(testMsg, exchange=amqpQueue, routing_key=amqpQueue)
pull_message() | 0.142321 | 0.093719 |
import unittest
import vcr
from shapely.geometry import shape
from stactools.core.utils.antimeridian import Strategy
from stactools.landsat.stac import create_stac_item
from tests.data import TEST_GEOMETRY_PATHS
class GeometryTest(unittest.TestCase):
def test_stored_stac(self):
"""Test USGS STAC geometry is returned when use_usgs_stac=True and STAC
exists in storage. Expected geometry copied from the SR STAC file in
the tests/data-files/assets2 directory.
"""
mtl_xml_href = TEST_GEOMETRY_PATHS["stac_in_storage"]
expected_geometry = {
"type":
"Polygon",
"coordinates": [[[70.70487567768816, -80.24577066106241],
[75.274690570951, -81.80839588279174],
[63.466214893229875, -82.43020599776871],
[60.660355360178656, -80.76338417289233],
[70.70487567768816, -80.24577066106241]]]
}
expected_coords = expected_geometry["coordinates"][0]
item = create_stac_item(mtl_xml_href, use_usgs_geometry=True)
item_coords = item.geometry["coordinates"][0]
for e, i in zip(expected_coords, item_coords):
self.assertAlmostEqual(e[0], i[0], 7)
self.assertAlmostEqual(e[1], i[1], 7)
@vcr.use_cassette(TEST_GEOMETRY_PATHS["vcr_cassette"])
def test_api_stac(self):
"""Test USGS STAC geometry is returned when use_usgs_stac=True and STAC
does not exist in storage but does exist on the USGS server. Expected
geometry copied from STAC generated by the USGS STAC API.
"""
mtl_xml_href = TEST_GEOMETRY_PATHS["stac_not_in_storage"]
expected_geometry = {
"type":
"Polygon",
"coordinates": [[[-124.27364628436257, 48.508467268961375],
[-124.89607929858929, 46.80220745164398],
[-122.53800038880695, 46.37691124870954],
[-121.83985903460558, 48.078084372791],
[-124.27364628436257, 48.508467268961375]]]
}
expected_coords = expected_geometry["coordinates"][0]
item = create_stac_item(mtl_xml_href, use_usgs_geometry=True)
item_coords = item.geometry["coordinates"][0]
for e, i in zip(expected_coords, item_coords):
self.assertAlmostEqual(e[0], i[0], 7)
self.assertAlmostEqual(e[1], i[1], 7)
def test_ang(self):
"""Test geometry is generated from the "ANG.txt" file data when
use_usgs_stac=False. Expected geometry copied from Planetary Computer
STAC API, which uses Items with geometries generated with the
"ANG.txt" file.
"""
mtl_xml_href = TEST_GEOMETRY_PATHS["stac_in_storage"]
expected_geometry = {
"type":
"Polygon",
"coordinates": [[[77.41721421, -81.41837295],
[65.95800182, -82.94593976],
[56.05168383, -81.25621974],
[67.44881125, -79.72178205],
[77.41721421, -81.41837295]]]
}
expected_coords = expected_geometry["coordinates"][0]
item = create_stac_item(mtl_xml_href, use_usgs_geometry=False)
item_coords = item.geometry["coordinates"][0]
for e, i in zip(expected_coords, item_coords):
self.assertAlmostEqual(e[0], i[0], 7)
self.assertAlmostEqual(e[1], i[1], 7)
def test_antimeridian(self):
"""Test that a scene spanning the antimeridian is normalized."""
mtl_xml_href = TEST_GEOMETRY_PATHS["antimeridian"]
crosssing_geometry = {
"type":
"Polygon",
"coordinates": [[[-179.70358951407547, 52.750507455036264],
[179.96672360880183, 52.00163609753924],
[-177.89334479610974, 50.62805205289558],
[-179.9847165338706, 51.002602948712465],
[-179.70358951407547, 52.750507455036264]]]
}
crossing_coords = crosssing_geometry["coordinates"][0]
crossing_lons = [lon for lon, lat in crossing_coords]
item = create_stac_item(mtl_xml_href,
legacy_l8=False,
use_usgs_geometry=True,
antimeridian_strategy=Strategy.NORMALIZE)
item_coords = item.geometry["coordinates"][0]
item_lons = [lon for lon, lat in item_coords]
self.assertFalse(
all(lon >= 0 for lon in crossing_lons)
or all(lon <= 0 for lon in crossing_lons))
self.assertTrue(
all(lon >= 0 for lon in item_lons)
or all(lon <= 0 for lon in item_lons))
def test_presplit_antimeridian_normalize(self):
"""Test that an item with geometry already split along the antimeridian
does not trigger the stactools antimeridian MultiPolygon value error.
Use the NORMALIZE strategy.
"""
mtl_xml_href = TEST_GEOMETRY_PATHS["presplit_antimeridian"]
expected_geometry = shape({
"type":
"Polygon",
"coordinates": [[[-180.09482763892856, 60.95119752303177],
[-180.0, 60.93687820884834],
[-176.7016565170453, 60.43881649233896],
[-175.51498801955913, 61.95528671380596],
[-179.06386088310478, 62.491727331163695],
[-180.0, 61.09289443379927],
[-180.09482763892856, 60.95119752303177]]]
})
item = create_stac_item(mtl_xml_href,
use_usgs_geometry=True,
legacy_l8=False,
antimeridian_strategy=Strategy.NORMALIZE)
item_geometry = shape(item.geometry)
self.assertEqual(item_geometry, expected_geometry)
def test_presplit_antimeridian_split(self):
"""Test that an item with geometry already split along the antimeridian
does not trigger the stactools antimeridian MultiPolygon value error.
Use the SPLIT strategy.
"""
mtl_xml_href = TEST_GEOMETRY_PATHS["presplit_antimeridian"]
expected_geometry = shape({
"type":
"MultiPolygon",
"coordinates": [[[[180.0, 60.93687820884834],
[180.0, 61.09289443379927],
[179.90517236107144, 60.95119752303177],
[180.0, 60.93687820884834]]],
[[[-180.0, 61.09289443379927],
[-180.0, 60.93687820884834],
[-176.7016565170453, 60.43881649233896],
[-175.51498801955913, 61.95528671380596],
[-179.06386088310478, 62.491727331163695],
[-180.0, 61.09289443379927]]]]
})
item = create_stac_item(mtl_xml_href,
use_usgs_geometry=True,
legacy_l8=False,
antimeridian_strategy=Strategy.SPLIT)
item_geometry = shape(item.geometry)
self.assertEqual(item_geometry, expected_geometry) | tests/test_geometry.py | import unittest
import vcr
from shapely.geometry import shape
from stactools.core.utils.antimeridian import Strategy
from stactools.landsat.stac import create_stac_item
from tests.data import TEST_GEOMETRY_PATHS
class GeometryTest(unittest.TestCase):
def test_stored_stac(self):
"""Test USGS STAC geometry is returned when use_usgs_stac=True and STAC
exists in storage. Expected geometry copied from the SR STAC file in
the tests/data-files/assets2 directory.
"""
mtl_xml_href = TEST_GEOMETRY_PATHS["stac_in_storage"]
expected_geometry = {
"type":
"Polygon",
"coordinates": [[[70.70487567768816, -80.24577066106241],
[75.274690570951, -81.80839588279174],
[63.466214893229875, -82.43020599776871],
[60.660355360178656, -80.76338417289233],
[70.70487567768816, -80.24577066106241]]]
}
expected_coords = expected_geometry["coordinates"][0]
item = create_stac_item(mtl_xml_href, use_usgs_geometry=True)
item_coords = item.geometry["coordinates"][0]
for e, i in zip(expected_coords, item_coords):
self.assertAlmostEqual(e[0], i[0], 7)
self.assertAlmostEqual(e[1], i[1], 7)
@vcr.use_cassette(TEST_GEOMETRY_PATHS["vcr_cassette"])
def test_api_stac(self):
"""Test USGS STAC geometry is returned when use_usgs_stac=True and STAC
does not exist in storage but does exist on the USGS server. Expected
geometry copied from STAC generated by the USGS STAC API.
"""
mtl_xml_href = TEST_GEOMETRY_PATHS["stac_not_in_storage"]
expected_geometry = {
"type":
"Polygon",
"coordinates": [[[-124.27364628436257, 48.508467268961375],
[-124.89607929858929, 46.80220745164398],
[-122.53800038880695, 46.37691124870954],
[-121.83985903460558, 48.078084372791],
[-124.27364628436257, 48.508467268961375]]]
}
expected_coords = expected_geometry["coordinates"][0]
item = create_stac_item(mtl_xml_href, use_usgs_geometry=True)
item_coords = item.geometry["coordinates"][0]
for e, i in zip(expected_coords, item_coords):
self.assertAlmostEqual(e[0], i[0], 7)
self.assertAlmostEqual(e[1], i[1], 7)
def test_ang(self):
"""Test geometry is generated from the "ANG.txt" file data when
use_usgs_stac=False. Expected geometry copied from Planetary Computer
STAC API, which uses Items with geometries generated with the
"ANG.txt" file.
"""
mtl_xml_href = TEST_GEOMETRY_PATHS["stac_in_storage"]
expected_geometry = {
"type":
"Polygon",
"coordinates": [[[77.41721421, -81.41837295],
[65.95800182, -82.94593976],
[56.05168383, -81.25621974],
[67.44881125, -79.72178205],
[77.41721421, -81.41837295]]]
}
expected_coords = expected_geometry["coordinates"][0]
item = create_stac_item(mtl_xml_href, use_usgs_geometry=False)
item_coords = item.geometry["coordinates"][0]
for e, i in zip(expected_coords, item_coords):
self.assertAlmostEqual(e[0], i[0], 7)
self.assertAlmostEqual(e[1], i[1], 7)
def test_antimeridian(self):
"""Test that a scene spanning the antimeridian is normalized."""
mtl_xml_href = TEST_GEOMETRY_PATHS["antimeridian"]
crosssing_geometry = {
"type":
"Polygon",
"coordinates": [[[-179.70358951407547, 52.750507455036264],
[179.96672360880183, 52.00163609753924],
[-177.89334479610974, 50.62805205289558],
[-179.9847165338706, 51.002602948712465],
[-179.70358951407547, 52.750507455036264]]]
}
crossing_coords = crosssing_geometry["coordinates"][0]
crossing_lons = [lon for lon, lat in crossing_coords]
item = create_stac_item(mtl_xml_href,
legacy_l8=False,
use_usgs_geometry=True,
antimeridian_strategy=Strategy.NORMALIZE)
item_coords = item.geometry["coordinates"][0]
item_lons = [lon for lon, lat in item_coords]
self.assertFalse(
all(lon >= 0 for lon in crossing_lons)
or all(lon <= 0 for lon in crossing_lons))
self.assertTrue(
all(lon >= 0 for lon in item_lons)
or all(lon <= 0 for lon in item_lons))
def test_presplit_antimeridian_normalize(self):
"""Test that an item with geometry already split along the antimeridian
does not trigger the stactools antimeridian MultiPolygon value error.
Use the NORMALIZE strategy.
"""
mtl_xml_href = TEST_GEOMETRY_PATHS["presplit_antimeridian"]
expected_geometry = shape({
"type":
"Polygon",
"coordinates": [[[-180.09482763892856, 60.95119752303177],
[-180.0, 60.93687820884834],
[-176.7016565170453, 60.43881649233896],
[-175.51498801955913, 61.95528671380596],
[-179.06386088310478, 62.491727331163695],
[-180.0, 61.09289443379927],
[-180.09482763892856, 60.95119752303177]]]
})
item = create_stac_item(mtl_xml_href,
use_usgs_geometry=True,
legacy_l8=False,
antimeridian_strategy=Strategy.NORMALIZE)
item_geometry = shape(item.geometry)
self.assertEqual(item_geometry, expected_geometry)
def test_presplit_antimeridian_split(self):
"""Test that an item with geometry already split along the antimeridian
does not trigger the stactools antimeridian MultiPolygon value error.
Use the SPLIT strategy.
"""
mtl_xml_href = TEST_GEOMETRY_PATHS["presplit_antimeridian"]
expected_geometry = shape({
"type":
"MultiPolygon",
"coordinates": [[[[180.0, 60.93687820884834],
[180.0, 61.09289443379927],
[179.90517236107144, 60.95119752303177],
[180.0, 60.93687820884834]]],
[[[-180.0, 61.09289443379927],
[-180.0, 60.93687820884834],
[-176.7016565170453, 60.43881649233896],
[-175.51498801955913, 61.95528671380596],
[-179.06386088310478, 62.491727331163695],
[-180.0, 61.09289443379927]]]]
})
item = create_stac_item(mtl_xml_href,
use_usgs_geometry=True,
legacy_l8=False,
antimeridian_strategy=Strategy.SPLIT)
item_geometry = shape(item.geometry)
self.assertEqual(item_geometry, expected_geometry) | 0.706596 | 0.557002 |
import scipy
import scipy.sparse.linalg
import torch
import torch.nn as nn
from hodgeautograd import HodgeEigensystem
class HodgeNetModel(nn.Module):
"""Main HodgeNet model.
The model inputs a batch of meshes and outputs features per vertex or
pooled to faces or the entire mesh.
"""
def __init__(self, num_edge_features, num_triangle_features,
num_output_features=32, num_eigenvectors=64,
num_extra_eigenvectors=16, mesh_feature=False, min_star=1e-2,
resample_to_triangles=False, num_bdry_edge_features=None,
num_vector_dimensions=1):
super(HodgeNetModel, self).__init__()
self.num_triangle_features = num_triangle_features
self.hodgefunc = HodgeEigensystem.apply
self.num_eigenvectors = num_eigenvectors
self.num_extra_eigenvectors = num_extra_eigenvectors
self.num_output_features = num_output_features
self.min_star = min_star
self.resample_to_triangles = resample_to_triangles
self.mesh_feature = mesh_feature
self.num_vector_dimensions = num_vector_dimensions
self.to_star1 = nn.Sequential(
nn.Linear(num_edge_features, 32),
nn.BatchNorm1d(32),
nn.LeakyReLU(),
nn.Linear(32, 32),
nn.BatchNorm1d(32),
nn.LeakyReLU(),
nn.Linear(32, 32),
nn.BatchNorm1d(32),
nn.LeakyReLU(),
nn.Linear(32, 32),
nn.BatchNorm1d(32),
nn.LeakyReLU(),
nn.Linear(32, self.num_vector_dimensions**2)
)
if num_bdry_edge_features is not None:
self.to_star1_bdry = nn.Sequential(
nn.Linear(num_bdry_edge_features, 32),
nn.BatchNorm1d(32),
nn.LeakyReLU(),
nn.Linear(32, 32),
nn.BatchNorm1d(32),
nn.LeakyReLU(),
nn.Linear(32, 32),
nn.BatchNorm1d(32),
nn.LeakyReLU(),
nn.Linear(32, 32),
nn.BatchNorm1d(32),
nn.LeakyReLU(),
nn.Linear(32, self.num_vector_dimensions**2)
)
else:
self.to_star1_bdry = None
self.to_star0_tri = nn.Sequential(
nn.Linear(num_triangle_features, 32),
nn.BatchNorm1d(32),
nn.LeakyReLU(),
nn.Linear(32, 32),
nn.BatchNorm1d(32),
nn.LeakyReLU(),
nn.Linear(32, 32),
nn.BatchNorm1d(32),
nn.LeakyReLU(),
nn.Linear(32, 32),
nn.BatchNorm1d(32),
nn.LeakyReLU(),
nn.Linear(32, self.num_vector_dimensions *
self.num_vector_dimensions)
)
self.eigenvalue_to_matrix = nn.Sequential(
nn.Linear(1, num_output_features),
nn.BatchNorm1d(num_output_features),
nn.LeakyReLU(),
nn.Linear(num_output_features, num_output_features),
nn.BatchNorm1d(num_output_features),
nn.LeakyReLU(),
nn.Linear(num_output_features, num_output_features),
nn.BatchNorm1d(num_output_features),
nn.LeakyReLU(),
nn.Linear(num_output_features, num_output_features),
nn.BatchNorm1d(num_output_features),
nn.LeakyReLU(),
nn.Linear(num_output_features, num_output_features)
)
def gather_star0(self, mesh, star0_tri):
"""Compute star0 matrix per vertex by gathering from triangles."""
star0 = torch.zeros(mesh['vertices'].shape[0],
star0_tri.shape[1]).to(star0_tri)
star0.index_add_(0, mesh['triangles'][:, 0], star0_tri)
star0.index_add_(0, mesh['triangles'][:, 1], star0_tri)
star0.index_add_(0, mesh['triangles'][:, 2], star0_tri)
star0 = star0.view(-1, self.num_vector_dimensions,
self.num_vector_dimensions)
# square the tensor to be semidefinite
star0 = torch.einsum('ijk,ilk->ijl', star0, star0)
# add min star down the diagonal
star0 += torch.eye(self.num_vector_dimensions)[None].to(star0) * \
self.min_star
return star0
def compute_mesh_eigenfunctions(self, mesh, star0, star1, bdry=False):
"""Compute eigenvectors and eigenvalues of the learned operator."""
nb = len(mesh)
inputs = []
for m, s0, s1 in zip(mesh, star0, star1):
d = m['int_d01']
if bdry:
d = scipy.sparse.vstack([d, m['bdry_d01']])
inputs.extend([s0, s1, d])
eigenvalues, eigenvectors = [], []
outputs = self.hodgefunc(nb, self.num_eigenvectors,
self.num_extra_eigenvectors, *inputs)
for i in range(nb):
eigenvalues.append(outputs[2*i])
eigenvectors.append(outputs[2*i+1])
return eigenvalues, eigenvectors
def forward(self, batch):
nb = len(batch)
all_star0_tri = self.to_star0_tri(
torch.cat([mesh['triangle_features'] for mesh in batch], dim=0))
star0_tri_split = torch.split(
all_star0_tri, [mesh['triangles'].shape[0] for mesh in batch],
dim=0)
star0_split = [self.gather_star0(mesh, star0_tri)
for mesh, star0_tri in zip(batch, star0_tri_split)]
all_star1 = self.to_star1(torch.cat([mesh['int_edge_features']
for mesh in batch], dim=0))
all_star1 = all_star1.view(-1, self.num_vector_dimensions,
self.num_vector_dimensions)
all_star1 = torch.einsum('ijk,ilk->ijl', all_star1, all_star1)
all_star1 += torch.eye(
self.num_vector_dimensions)[None].to(all_star1) * \
self.min_star
star1_split = list(torch.split(all_star1, [mesh['int_d01'].shape[0]
for mesh in batch], dim=0))
if self.to_star1_bdry is not None:
all_star1_bdry = self.to_star1_bdry(
torch.cat([mesh['bdry_edge_features'] for mesh in batch],
dim=0))
all_star1_bdry = all_star1_bdry.view(
-1, self.num_vector_dimensions, self.num_vector_dimensions)
all_star1_bdry = torch.einsum(
'ijk,ilk->ijl', all_star1_bdry, all_star1_bdry)
all_star1_bdry += torch.eye(
self.num_vector_dimensions)[None].to(all_star1_bdry) * \
self.min_star
star1_bdry_split = torch.split(
all_star1_bdry,
[mesh['bdry_d01'].shape[0] for mesh in batch], dim=0)
for i in range(nb):
star1_split[i] = torch.cat(
[star1_split[i], star1_bdry_split[i]], dim=0)
eigenvalues, eigenvectors = self.compute_mesh_eigenfunctions(
batch, star0_split, star1_split,
bdry=self.to_star1_bdry is not None)
# glue the eigenvalues back together and run through the nonlinearity
all_processed_eigenvalues = self.eigenvalue_to_matrix(
torch.stack(eigenvalues).view(-1, 1)).view(
nb, -1, self.num_output_features)
# post-multiply the set of eigenvectors by the learned matrix that's a
# function of eigenvalues (similar to HKS, WKS)
outer_products = [torch.einsum(
'ijk,ijl->ijkl', eigenvectors[i], eigenvectors[i])
for i in range(nb)] # take outer product of vectors
result = [torch.einsum(
'ijkp,jl->ilkp', outer_products[i], all_processed_eigenvalues[i])
for i in range(nb)] # multiply by learned matrix
result = [result[i].flatten(start_dim=1) for i in range(nb)]
if self.resample_to_triangles:
result = [result[i][batch[i]['triangles']].max(
1)[0] for i in range(nb)]
if self.mesh_feature:
result = [f.max(0, keepdim=True)[0] for f in result]
return torch.cat(result, dim=0) | hodgenet.py | import scipy
import scipy.sparse.linalg
import torch
import torch.nn as nn
from hodgeautograd import HodgeEigensystem
class HodgeNetModel(nn.Module):
"""Main HodgeNet model.
The model inputs a batch of meshes and outputs features per vertex or
pooled to faces or the entire mesh.
"""
def __init__(self, num_edge_features, num_triangle_features,
num_output_features=32, num_eigenvectors=64,
num_extra_eigenvectors=16, mesh_feature=False, min_star=1e-2,
resample_to_triangles=False, num_bdry_edge_features=None,
num_vector_dimensions=1):
super(HodgeNetModel, self).__init__()
self.num_triangle_features = num_triangle_features
self.hodgefunc = HodgeEigensystem.apply
self.num_eigenvectors = num_eigenvectors
self.num_extra_eigenvectors = num_extra_eigenvectors
self.num_output_features = num_output_features
self.min_star = min_star
self.resample_to_triangles = resample_to_triangles
self.mesh_feature = mesh_feature
self.num_vector_dimensions = num_vector_dimensions
self.to_star1 = nn.Sequential(
nn.Linear(num_edge_features, 32),
nn.BatchNorm1d(32),
nn.LeakyReLU(),
nn.Linear(32, 32),
nn.BatchNorm1d(32),
nn.LeakyReLU(),
nn.Linear(32, 32),
nn.BatchNorm1d(32),
nn.LeakyReLU(),
nn.Linear(32, 32),
nn.BatchNorm1d(32),
nn.LeakyReLU(),
nn.Linear(32, self.num_vector_dimensions**2)
)
if num_bdry_edge_features is not None:
self.to_star1_bdry = nn.Sequential(
nn.Linear(num_bdry_edge_features, 32),
nn.BatchNorm1d(32),
nn.LeakyReLU(),
nn.Linear(32, 32),
nn.BatchNorm1d(32),
nn.LeakyReLU(),
nn.Linear(32, 32),
nn.BatchNorm1d(32),
nn.LeakyReLU(),
nn.Linear(32, 32),
nn.BatchNorm1d(32),
nn.LeakyReLU(),
nn.Linear(32, self.num_vector_dimensions**2)
)
else:
self.to_star1_bdry = None
self.to_star0_tri = nn.Sequential(
nn.Linear(num_triangle_features, 32),
nn.BatchNorm1d(32),
nn.LeakyReLU(),
nn.Linear(32, 32),
nn.BatchNorm1d(32),
nn.LeakyReLU(),
nn.Linear(32, 32),
nn.BatchNorm1d(32),
nn.LeakyReLU(),
nn.Linear(32, 32),
nn.BatchNorm1d(32),
nn.LeakyReLU(),
nn.Linear(32, self.num_vector_dimensions *
self.num_vector_dimensions)
)
self.eigenvalue_to_matrix = nn.Sequential(
nn.Linear(1, num_output_features),
nn.BatchNorm1d(num_output_features),
nn.LeakyReLU(),
nn.Linear(num_output_features, num_output_features),
nn.BatchNorm1d(num_output_features),
nn.LeakyReLU(),
nn.Linear(num_output_features, num_output_features),
nn.BatchNorm1d(num_output_features),
nn.LeakyReLU(),
nn.Linear(num_output_features, num_output_features),
nn.BatchNorm1d(num_output_features),
nn.LeakyReLU(),
nn.Linear(num_output_features, num_output_features)
)
def gather_star0(self, mesh, star0_tri):
"""Compute star0 matrix per vertex by gathering from triangles."""
star0 = torch.zeros(mesh['vertices'].shape[0],
star0_tri.shape[1]).to(star0_tri)
star0.index_add_(0, mesh['triangles'][:, 0], star0_tri)
star0.index_add_(0, mesh['triangles'][:, 1], star0_tri)
star0.index_add_(0, mesh['triangles'][:, 2], star0_tri)
star0 = star0.view(-1, self.num_vector_dimensions,
self.num_vector_dimensions)
# square the tensor to be semidefinite
star0 = torch.einsum('ijk,ilk->ijl', star0, star0)
# add min star down the diagonal
star0 += torch.eye(self.num_vector_dimensions)[None].to(star0) * \
self.min_star
return star0
def compute_mesh_eigenfunctions(self, mesh, star0, star1, bdry=False):
"""Compute eigenvectors and eigenvalues of the learned operator."""
nb = len(mesh)
inputs = []
for m, s0, s1 in zip(mesh, star0, star1):
d = m['int_d01']
if bdry:
d = scipy.sparse.vstack([d, m['bdry_d01']])
inputs.extend([s0, s1, d])
eigenvalues, eigenvectors = [], []
outputs = self.hodgefunc(nb, self.num_eigenvectors,
self.num_extra_eigenvectors, *inputs)
for i in range(nb):
eigenvalues.append(outputs[2*i])
eigenvectors.append(outputs[2*i+1])
return eigenvalues, eigenvectors
def forward(self, batch):
nb = len(batch)
all_star0_tri = self.to_star0_tri(
torch.cat([mesh['triangle_features'] for mesh in batch], dim=0))
star0_tri_split = torch.split(
all_star0_tri, [mesh['triangles'].shape[0] for mesh in batch],
dim=0)
star0_split = [self.gather_star0(mesh, star0_tri)
for mesh, star0_tri in zip(batch, star0_tri_split)]
all_star1 = self.to_star1(torch.cat([mesh['int_edge_features']
for mesh in batch], dim=0))
all_star1 = all_star1.view(-1, self.num_vector_dimensions,
self.num_vector_dimensions)
all_star1 = torch.einsum('ijk,ilk->ijl', all_star1, all_star1)
all_star1 += torch.eye(
self.num_vector_dimensions)[None].to(all_star1) * \
self.min_star
star1_split = list(torch.split(all_star1, [mesh['int_d01'].shape[0]
for mesh in batch], dim=0))
if self.to_star1_bdry is not None:
all_star1_bdry = self.to_star1_bdry(
torch.cat([mesh['bdry_edge_features'] for mesh in batch],
dim=0))
all_star1_bdry = all_star1_bdry.view(
-1, self.num_vector_dimensions, self.num_vector_dimensions)
all_star1_bdry = torch.einsum(
'ijk,ilk->ijl', all_star1_bdry, all_star1_bdry)
all_star1_bdry += torch.eye(
self.num_vector_dimensions)[None].to(all_star1_bdry) * \
self.min_star
star1_bdry_split = torch.split(
all_star1_bdry,
[mesh['bdry_d01'].shape[0] for mesh in batch], dim=0)
for i in range(nb):
star1_split[i] = torch.cat(
[star1_split[i], star1_bdry_split[i]], dim=0)
eigenvalues, eigenvectors = self.compute_mesh_eigenfunctions(
batch, star0_split, star1_split,
bdry=self.to_star1_bdry is not None)
# glue the eigenvalues back together and run through the nonlinearity
all_processed_eigenvalues = self.eigenvalue_to_matrix(
torch.stack(eigenvalues).view(-1, 1)).view(
nb, -1, self.num_output_features)
# post-multiply the set of eigenvectors by the learned matrix that's a
# function of eigenvalues (similar to HKS, WKS)
outer_products = [torch.einsum(
'ijk,ijl->ijkl', eigenvectors[i], eigenvectors[i])
for i in range(nb)] # take outer product of vectors
result = [torch.einsum(
'ijkp,jl->ilkp', outer_products[i], all_processed_eigenvalues[i])
for i in range(nb)] # multiply by learned matrix
result = [result[i].flatten(start_dim=1) for i in range(nb)]
if self.resample_to_triangles:
result = [result[i][batch[i]['triangles']].max(
1)[0] for i in range(nb)]
if self.mesh_feature:
result = [f.max(0, keepdim=True)[0] for f in result]
return torch.cat(result, dim=0) | 0.905452 | 0.563978 |
from datetime import datetime
import pytest
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from receipt_tracker.repo.models import Seller, Buyer, Receipt
from receipt_tracker.repo.sql_repo import Base
from receipt_tracker.repo.sql_repo import SQLRepo
@pytest.fixture(scope='session')
def db_session_empty():
engine = create_engine('sqlite:///:memory:')
Session = sessionmaker(bind=engine)
Base.metadata.create_all(engine)
session = Session()
yield session
session.close()
@pytest.fixture(scope='function')
def db_data_init():
return [
Buyer(name='<NAME>'),
Buyer(name='<NAME>'),
Buyer(name='<NAME>'),
Seller(name='Steam'),
Seller(name='<NAME>'),
Seller(name='Amazon'),
Seller(name='Always Clean Coin Laundry'),
Seller(name='Eagle Dynamics')
]
@pytest.fixture(scope='function')
def db_data_receipts():
return [
Receipt(date=datetime(2020, 8, 16), seller_id=1,
total=9.67, buyer_id=1, description='Steam game'),
Receipt(date=datetime(2020, 8, 17), seller_id=2,
total=17.86, buyer_id=1, description='Groceries'),
Receipt(date=datetime(2020, 8, 18), seller_id=3,
total=57.36, buyer_id=2, description='Random amazon purchases'),
Receipt(date=datetime(2020, 8, 19), seller_id=4,
total=2.50, buyer_id=2),
]
@pytest.fixture(scope='function')
def db_session(db_session_empty, db_data_init, db_data_receipts):
db_session_empty.add_all(db_data_init)
db_session_empty.commit()
db_session_empty.add_all(db_data_receipts)
db_session_empty.commit()
yield db_session_empty
db_session_empty.query(Buyer).delete()
db_session_empty.query(Seller).delete()
db_session_empty.query(Receipt).delete()
@pytest.fixture(scope='function')
def repo(db_data_init, db_data_receipts):
CONFIG = {'sqlalchemy.url': 'sqlite:///:memory:'}
repo = SQLRepo(CONFIG)
session = repo.init_db()
session.add_all(db_data_init)
session.add_all(db_data_receipts)
session.commit()
yield repo
session.remove() | tests/conftest.py | from datetime import datetime
import pytest
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from receipt_tracker.repo.models import Seller, Buyer, Receipt
from receipt_tracker.repo.sql_repo import Base
from receipt_tracker.repo.sql_repo import SQLRepo
@pytest.fixture(scope='session')
def db_session_empty():
engine = create_engine('sqlite:///:memory:')
Session = sessionmaker(bind=engine)
Base.metadata.create_all(engine)
session = Session()
yield session
session.close()
@pytest.fixture(scope='function')
def db_data_init():
return [
Buyer(name='<NAME>'),
Buyer(name='<NAME>'),
Buyer(name='<NAME>'),
Seller(name='Steam'),
Seller(name='<NAME>'),
Seller(name='Amazon'),
Seller(name='Always Clean Coin Laundry'),
Seller(name='Eagle Dynamics')
]
@pytest.fixture(scope='function')
def db_data_receipts():
return [
Receipt(date=datetime(2020, 8, 16), seller_id=1,
total=9.67, buyer_id=1, description='Steam game'),
Receipt(date=datetime(2020, 8, 17), seller_id=2,
total=17.86, buyer_id=1, description='Groceries'),
Receipt(date=datetime(2020, 8, 18), seller_id=3,
total=57.36, buyer_id=2, description='Random amazon purchases'),
Receipt(date=datetime(2020, 8, 19), seller_id=4,
total=2.50, buyer_id=2),
]
@pytest.fixture(scope='function')
def db_session(db_session_empty, db_data_init, db_data_receipts):
db_session_empty.add_all(db_data_init)
db_session_empty.commit()
db_session_empty.add_all(db_data_receipts)
db_session_empty.commit()
yield db_session_empty
db_session_empty.query(Buyer).delete()
db_session_empty.query(Seller).delete()
db_session_empty.query(Receipt).delete()
@pytest.fixture(scope='function')
def repo(db_data_init, db_data_receipts):
CONFIG = {'sqlalchemy.url': 'sqlite:///:memory:'}
repo = SQLRepo(CONFIG)
session = repo.init_db()
session.add_all(db_data_init)
session.add_all(db_data_receipts)
session.commit()
yield repo
session.remove() | 0.421552 | 0.138928 |
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin
from .managers import UserManager
from django.conf import settings
class User(AbstractBaseUser, PermissionsMixin):
objects = UserManager()
userid = models.CharField(default=objects.name,
max_length=20,
null=True,
unique=True)
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=20, null=False, unique=True)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
is_superuser = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False)
date_joined = models.DateTimeField(auto_now_add=True)
USERNAME_FIELD = 'userid'
REQUIRED_FIELDS = ['name', 'email']
def __str__(self):
return self.name
class Writing(models.Model):
# 작성한사람이 없다
objects = UserManager()
title = models.CharField(max_length=100, verbose_name='기사 제목')
writer = models.CharField(default=objects.name, max_length=20, null=True)
pub_date = models.DateTimeField(default=timezone.now, verbose_name='작성일')
scrap = models.IntegerField(default=0, verbose_name='스크랩 수')
text = models.CharField(max_length=1000, verbose_name='기사 내용')
category = models.CharField(default='', max_length=50, verbose_name='카테고리')
photo = models.CharField(default='', max_length=500, verbose_name='사진 url')
@property
def scrap_update(self):
self.scrap += 1
self.save()
def __str__(self):
return self.title
class Meta:
ordering = ['-pub_date']
class ScrapList(models.Model):
objects = UserManager()
user_info = models.CharField(default=objects.name,
max_length=20,
verbose_name='사용자 id')
# user_scrap_id = models.IntegerField(default=0, verbose_name='스크랩 id')
title = models.CharField(max_length=100, verbose_name='기사 제목')
article_id = models.IntegerField(default=0, verbose_name='기사 번호')
category = models.CharField(default='', max_length=50, verbose_name='카테고리')
writer = models.CharField(default=objects.name, max_length=20, null=True)
scrap_date = models.DateTimeField(default=timezone.now,
verbose_name='스크랩 날짜')
scrap = models.IntegerField(default=0, verbose_name='스크랩 수')
def __str__(self):
return self.title | MXXXPXXX/sg/models.py | from django.db import models
from django.utils import timezone
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin
from .managers import UserManager
from django.conf import settings
class User(AbstractBaseUser, PermissionsMixin):
objects = UserManager()
userid = models.CharField(default=objects.name,
max_length=20,
null=True,
unique=True)
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=20, null=False, unique=True)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
is_superuser = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False)
date_joined = models.DateTimeField(auto_now_add=True)
USERNAME_FIELD = 'userid'
REQUIRED_FIELDS = ['name', 'email']
def __str__(self):
return self.name
class Writing(models.Model):
# 작성한사람이 없다
objects = UserManager()
title = models.CharField(max_length=100, verbose_name='기사 제목')
writer = models.CharField(default=objects.name, max_length=20, null=True)
pub_date = models.DateTimeField(default=timezone.now, verbose_name='작성일')
scrap = models.IntegerField(default=0, verbose_name='스크랩 수')
text = models.CharField(max_length=1000, verbose_name='기사 내용')
category = models.CharField(default='', max_length=50, verbose_name='카테고리')
photo = models.CharField(default='', max_length=500, verbose_name='사진 url')
@property
def scrap_update(self):
self.scrap += 1
self.save()
def __str__(self):
return self.title
class Meta:
ordering = ['-pub_date']
class ScrapList(models.Model):
objects = UserManager()
user_info = models.CharField(default=objects.name,
max_length=20,
verbose_name='사용자 id')
# user_scrap_id = models.IntegerField(default=0, verbose_name='스크랩 id')
title = models.CharField(max_length=100, verbose_name='기사 제목')
article_id = models.IntegerField(default=0, verbose_name='기사 번호')
category = models.CharField(default='', max_length=50, verbose_name='카테고리')
writer = models.CharField(default=objects.name, max_length=20, null=True)
scrap_date = models.DateTimeField(default=timezone.now,
verbose_name='스크랩 날짜')
scrap = models.IntegerField(default=0, verbose_name='스크랩 수')
def __str__(self):
return self.title | 0.593727 | 0.109515 |
import subprocess
from pyprobe.sensors.pegasus.controller import PegasusControllerParser
from pyprobe.sensors.process_helper import get_outputs_of_process
__author__ = '<NAME>'
def reformat_smart_values(data=None):
"""
Reformats the SMART output of the Pegasus device so that it is compatible with the Parser for smartctl output.
This is necessary, because the Pegasus device has wrapped lines and extra content that regular smartctl does not
have.
:param data: the input.
:type data: str | None
:return: content that should be compatible with smartctl
:rtype: str | None
"""
if data is None or len(data.strip()) == 0:
return None
lines = data.splitlines()
""" :type: list[str] """
body_start = _determine_start(lines)
# remove separator lines as they don't appear in regular smartctl outputs so that the parser does not get
# confused
result = lines[0:body_start - 4:]
result.append("{}{}".format(lines[body_start - 3], lines[body_start - 2]))
body = lines[body_start::]
for l in xrange(0, len(body), 2):
new_line = "{}{}".format(body[l], body[l + 1]) if l < len(body) - 1 else body[l]
# some lines might have additional information after the regular smartctl columns (e.g. lifetime min/max
# temperature. These should be stripped for latter processing.
cols = new_line.split()
if len(cols) > 10:
result.append(' '.join(cols[0:10:]))
elif len(new_line.strip()) > 0:
result.append(new_line)
return "\n".join(result) + "\n"
def _determine_start(lines):
"""
Determines where the section listing all SMART attributes begins.
:param lines: the input split into lines.
:type lines: list[str]
:return: the index of the first attribute.
:rtype: int
"""
cnt = 0
for idx, val in enumerate(lines):
if lines[idx].startswith("======"):
cnt += 1
if cnt == 2:
return idx + 1
def determine_executable(configuration):
return configuration.get('executable', "/usr/bin/promiseutil")
def determine_controllers(configuration):
"""
:type configuration: dict[str, str]
:return: a list of controllers
:rtype: list[PegasusController] | None
"""
executable = determine_executable(configuration)
proc = subprocess.Popen("LC_ALL=C {0} -C spath".format(executable), shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_, out = get_outputs_of_process(proc)
if proc.returncode != 0:
return []
parser = PegasusControllerParser(out)
return parser.controllers | src/pyprobe/sensors/pegasus/helper.py | import subprocess
from pyprobe.sensors.pegasus.controller import PegasusControllerParser
from pyprobe.sensors.process_helper import get_outputs_of_process
__author__ = '<NAME>'
def reformat_smart_values(data=None):
"""
Reformats the SMART output of the Pegasus device so that it is compatible with the Parser for smartctl output.
This is necessary, because the Pegasus device has wrapped lines and extra content that regular smartctl does not
have.
:param data: the input.
:type data: str | None
:return: content that should be compatible with smartctl
:rtype: str | None
"""
if data is None or len(data.strip()) == 0:
return None
lines = data.splitlines()
""" :type: list[str] """
body_start = _determine_start(lines)
# remove separator lines as they don't appear in regular smartctl outputs so that the parser does not get
# confused
result = lines[0:body_start - 4:]
result.append("{}{}".format(lines[body_start - 3], lines[body_start - 2]))
body = lines[body_start::]
for l in xrange(0, len(body), 2):
new_line = "{}{}".format(body[l], body[l + 1]) if l < len(body) - 1 else body[l]
# some lines might have additional information after the regular smartctl columns (e.g. lifetime min/max
# temperature. These should be stripped for latter processing.
cols = new_line.split()
if len(cols) > 10:
result.append(' '.join(cols[0:10:]))
elif len(new_line.strip()) > 0:
result.append(new_line)
return "\n".join(result) + "\n"
def _determine_start(lines):
"""
Determines where the section listing all SMART attributes begins.
:param lines: the input split into lines.
:type lines: list[str]
:return: the index of the first attribute.
:rtype: int
"""
cnt = 0
for idx, val in enumerate(lines):
if lines[idx].startswith("======"):
cnt += 1
if cnt == 2:
return idx + 1
def determine_executable(configuration):
return configuration.get('executable', "/usr/bin/promiseutil")
def determine_controllers(configuration):
"""
:type configuration: dict[str, str]
:return: a list of controllers
:rtype: list[PegasusController] | None
"""
executable = determine_executable(configuration)
proc = subprocess.Popen("LC_ALL=C {0} -C spath".format(executable), shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_, out = get_outputs_of_process(proc)
if proc.returncode != 0:
return []
parser = PegasusControllerParser(out)
return parser.controllers | 0.603465 | 0.322153 |
import torch as t
import torch.nn as nn
import torch.nn.functional as F
from model.layer import VGG19featureLayer
from functools import reduce
class WGANLoss(nn.Module):
def __init__(self):
super(WGANLoss, self).__init__()
def __call__(self, input, target):
d_loss = (input - target).mean()
g_loss = -(input.mean())
return {'g_loss': g_loss, 'd_loss': d_loss}
class IDMRFLoss(nn.Module):
def __init__(self, featureLayer=VGG19featureLayer):
super(IDMRFLoss, self).__init__()
self.featureLayer = featureLayer()
self.style_layers = {'relu3_2': 1.0, 'relu4_2': 1.0}
self.content_layers = {'relu4_2': 1.0}
self.bias = 1.0
self.nn_stretch_sigma = 0.5
self.l_s = 1.0
self.l_c = 1.0
def patch_extraction(self, featureMaps):
size = 1
stride = 1
patches_as_depth_vectors = featureMaps.unfold(2, size, stride).unfold(3, size, stride)
self.patches_OIHW = patches_as_depth_vectors.permute(0, 2, 3, 1, 4, 5)
dims = self.patches_OIHW.size()
self.patches_OIHW = self.patches_OIHW.view(-1, dims[3], dims[4], dims[5])
return self.patches_OIHW
def exp_norm_relative_dist(self, relative):
dist = t.exp((self.bias - relative)/self.nn_stretch_sigma)
self.cs_NCHW = dist / t.sum(dist, dim=1, keepdim=True)
return self.cs_NCHW
def mrf_loss(self, gen, target):
meanTemp = t.mean(target, 1, keepdim=True)
gen_feats = gen - meanTemp
gen_norm = t.norm(gen_feats, p=2, dim=1, keepdim=True)
target_feats= target - meanTemp
target_norm = t.norm(target_feats, p=2, dim=1, keepdim=True)
gen_normalized = gen_feats / gen_norm
target_normalized = target_feats / target_norm
cosine_dist_l = []
BatchSize = target.size(0)
for i in range(BatchSize):
target_feat_i = target_normalized[i:i+1, :, :, :]
gen_feat_i = gen_normalized[i:i+1, :, :, :]
patches_OIHW = self.patch_extraction(target_feat_i)
cosine_dist_l.append(F.conv2d(gen_feat_i, patches_OIHW))
cosine_dist = - (t.cat(cosine_dist_l, dim=0) - 1) / 2
relative_dist = (cosine_dist) / (t.min(cosine_dist, dim=1, keepdim=True)[0] + 1e-5)
rela_dist = self.exp_norm_relative_dist(relative_dist)
dims_div_mrf = rela_dist.size()
k_max_nc = t.max(rela_dist.view(dims_div_mrf[0], dims_div_mrf[1], -1), dim=2)[0]
div_mrf = t.mean(k_max_nc, dim=1)
div_mrf_sum = t.sum(-t.log(div_mrf))
return div_mrf_sum
def forward(self, gen, target):
gen_feats = self.featureLayer(gen)
tar_feats = self.featureLayer(target)
style_loss_list=[]
for layer in self.style_layers:
style_loss_list.append(self.style_layers[layer] * self.mrf_loss(gen_feats[layer], tar_feats[layer]))
self.style_loss = reduce(lambda x, y: x+y, style_loss_list) * self.l_s
content_loss_list=[]
for layer in self.content_layers:
content_loss_list.append(self.content_layers[layer] * self.mrf_loss(gen_feats[layer], tar_feats[layer]))
self.content_loss = reduce(lambda x, y: x+y, content_loss_list) * self.l_c
return self.style_loss + self.content_loss | Image Inpainting/model/loss.py | import torch as t
import torch.nn as nn
import torch.nn.functional as F
from model.layer import VGG19featureLayer
from functools import reduce
class WGANLoss(nn.Module):
def __init__(self):
super(WGANLoss, self).__init__()
def __call__(self, input, target):
d_loss = (input - target).mean()
g_loss = -(input.mean())
return {'g_loss': g_loss, 'd_loss': d_loss}
class IDMRFLoss(nn.Module):
def __init__(self, featureLayer=VGG19featureLayer):
super(IDMRFLoss, self).__init__()
self.featureLayer = featureLayer()
self.style_layers = {'relu3_2': 1.0, 'relu4_2': 1.0}
self.content_layers = {'relu4_2': 1.0}
self.bias = 1.0
self.nn_stretch_sigma = 0.5
self.l_s = 1.0
self.l_c = 1.0
def patch_extraction(self, featureMaps):
size = 1
stride = 1
patches_as_depth_vectors = featureMaps.unfold(2, size, stride).unfold(3, size, stride)
self.patches_OIHW = patches_as_depth_vectors.permute(0, 2, 3, 1, 4, 5)
dims = self.patches_OIHW.size()
self.patches_OIHW = self.patches_OIHW.view(-1, dims[3], dims[4], dims[5])
return self.patches_OIHW
def exp_norm_relative_dist(self, relative):
dist = t.exp((self.bias - relative)/self.nn_stretch_sigma)
self.cs_NCHW = dist / t.sum(dist, dim=1, keepdim=True)
return self.cs_NCHW
def mrf_loss(self, gen, target):
meanTemp = t.mean(target, 1, keepdim=True)
gen_feats = gen - meanTemp
gen_norm = t.norm(gen_feats, p=2, dim=1, keepdim=True)
target_feats= target - meanTemp
target_norm = t.norm(target_feats, p=2, dim=1, keepdim=True)
gen_normalized = gen_feats / gen_norm
target_normalized = target_feats / target_norm
cosine_dist_l = []
BatchSize = target.size(0)
for i in range(BatchSize):
target_feat_i = target_normalized[i:i+1, :, :, :]
gen_feat_i = gen_normalized[i:i+1, :, :, :]
patches_OIHW = self.patch_extraction(target_feat_i)
cosine_dist_l.append(F.conv2d(gen_feat_i, patches_OIHW))
cosine_dist = - (t.cat(cosine_dist_l, dim=0) - 1) / 2
relative_dist = (cosine_dist) / (t.min(cosine_dist, dim=1, keepdim=True)[0] + 1e-5)
rela_dist = self.exp_norm_relative_dist(relative_dist)
dims_div_mrf = rela_dist.size()
k_max_nc = t.max(rela_dist.view(dims_div_mrf[0], dims_div_mrf[1], -1), dim=2)[0]
div_mrf = t.mean(k_max_nc, dim=1)
div_mrf_sum = t.sum(-t.log(div_mrf))
return div_mrf_sum
def forward(self, gen, target):
gen_feats = self.featureLayer(gen)
tar_feats = self.featureLayer(target)
style_loss_list=[]
for layer in self.style_layers:
style_loss_list.append(self.style_layers[layer] * self.mrf_loss(gen_feats[layer], tar_feats[layer]))
self.style_loss = reduce(lambda x, y: x+y, style_loss_list) * self.l_s
content_loss_list=[]
for layer in self.content_layers:
content_loss_list.append(self.content_layers[layer] * self.mrf_loss(gen_feats[layer], tar_feats[layer]))
self.content_loss = reduce(lambda x, y: x+y, content_loss_list) * self.l_c
return self.style_loss + self.content_loss | 0.907114 | 0.426381 |
import numpy as np
import cv2 as cv
import glob
import matplotlib.pyplot as plt
import sys
from PIL import Image
import argparse
import os
# convenience code to annotate calibration points by clicking
# python localization.py dataset/beach/map.png dataset/beach/calib_map.txt --click
if '--click' in sys.argv:
input_fname = sys.argv[1]
output_fname = sys.argv[2]
img = cv.imread(input_fname)
img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
fig = plt.figure()
clicked_points = []
def update():
plt.clf()
plt.imshow(img, cmap='gray')
plt.plot([p[0] for p in clicked_points], [p[1] for p in clicked_points], 'rx')
fig.canvas.draw()
def onclick(event):
x,y = int(np.round(event.xdata)), int(np.round(event.ydata))
print('clicked',x,y)
clicked_points.append([x, y])
update()
def onkey(event):
if event.key==' ':
np.savetxt(output_fname, clicked_points)
sys.exit(1)
fig.canvas.mpl_connect('button_press_event', onclick)
fig.canvas.mpl_connect('key_press_event', onkey)
update()
plt.show()
sys.exit(1)
# code to project 2D detections on to an overhead map
parser = argparse.ArgumentParser()
parser.add_argument("--datasets", type=str, default="beach", required=True, help="Dataset directory name")
parser.add_argument( '--map', type=str, required=True, help='satellite image')
parser.add_argument("--gridSize", type=int, default=10, help="size of each square in the calibration grid")
parser.add_argument("--stepSize", type=int, default=1, help="density of drawing calibration grid")
flags = parser.parse_args()
# visualize calibration points in overhead map
plt.figure()
map_ax = plt.gca()
# oriented bounding box of calibration area in overhead map
calib_map = flags.map.replace('.png', '.txt')
calib_map = np.loadtxt(calib_map)
map_origin = calib_map[0]
map_x_vector = calib_map[1] - calib_map[0]
map_y_vector = calib_map[2] - calib_map[0]
maxX = 330
maxY = 130
x = 0
while x <= maxX:
p1 = map_origin + 1.0 * x / maxX * map_x_vector
p2 = map_origin + map_y_vector + 1.0 * x / maxX * map_x_vector
plt.plot([p1[0], p2[0]], [p1[1], p2[1]], 'k', linewidth=1, alpha=0.5)
x += flags.gridSize
y = 0
while y <= maxY:
p1 = map_origin + 1.0 * y / maxY * map_y_vector
p2 = map_origin + map_x_vector + 1.0 * y / maxY * map_y_vector
plt.plot([p1[0], p2[0]], [p1[1], p2[1]], 'k', linewidth=1, alpha=0.5)
y += flags.gridSize
# image of overhead map / satellite image
map_fname = flags.map
map_np = np.array(Image.open(map_fname))
plt.imshow(map_np)
plt.annotate('(%d,%d)'%(0,0), xy=(calib_map[0,0], calib_map[0,1]), xytext=(20, 20), textcoords='offset points', ha='right', va='bottom',
bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5), arrowprops=dict(arrowstyle = '->', connectionstyle='arc3,rad=0'))
plt.annotate('(%d,%d)'%(maxX,0), xy=(calib_map[1,0], calib_map[1,1]), xytext=(20, 20), textcoords='offset points', ha='right', va='bottom',
bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5), arrowprops=dict(arrowstyle = '->', connectionstyle='arc3,rad=0'))
plt.annotate('(%d,%d)'%(0,maxY), xy=(calib_map[2,0], calib_map[2,1]), xytext=(20, 20), textcoords='offset points', ha='right', va='bottom',
bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5), arrowprops=dict(arrowstyle = '->', connectionstyle='arc3,rad=0'))
plt.annotate('(%d,%d)'%(maxX,maxY), xy=(calib_map[3,0], calib_map[3,1]), xytext=(20, 20), textcoords='offset points', ha='right', va='bottom',
bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5), arrowprops=dict(arrowstyle = '->', connectionstyle='arc3,rad=0'))
plt.annotate('CCTV #2, #3, #4\nLAT=38.1899232\nLON=128.6040349', xy=(258, 424), xytext=(-30, -30), textcoords='offset points', ha='right', va='top',
bbox=dict(boxstyle='round,pad=0.5', fc='red', alpha=0.5), arrowprops=dict(arrowstyle = '->', connectionstyle='arc3,rad=0'))
plt.annotate('CCTV #1, #5, #6\nLAT=38.1893355\nLON=128.6047293', xy=(367, 583), xytext=(-30, -30), textcoords='offset points', ha='right', va='top',
bbox=dict(boxstyle='round,pad=0.5', fc='red', alpha=0.5), arrowprops=dict(arrowstyle = '->', connectionstyle='arc3,rad=0'))
for dataset in flags.datasets.split(','):
print('Loading from', dataset)
# calibration points in world coordinates (assume Z coordinate of 0)
calib_3d = dataset + '/calib_3d.txt'
# corresponding calibration point coordinates in the input image
calib_2d = dataset + '/calib_2d.txt'
calib_3d = np.loadtxt(calib_3d)
calib_3d = np.hstack((calib_3d, np.zeros((len(calib_3d), 1)))).astype(np.float32)
calib_2d = np.loadtxt(calib_2d).astype(np.float32)
calib_2d = calib_2d.reshape(-1,1,2)
img_idx = 1
while True:
# input image
img_fname = dataset + '/%d.png' % img_idx
# input semantic segmentation mask
label_fname = dataset + '/prediction%d.png' % img_idx
if os.path.exists(label_fname):
break
img_idx += 1
image_np = np.array(Image.open(img_fname))
gray = np.mean(image_np, axis=2)
mask = np.array(Image.open(label_fname))
print('Input', image_np.shape, image_np.dtype, mask.shape, mask.dtype)
image_np[:,:,0] = gray
image_np[:,:,1] = gray
image_np[:,:,2] = gray
image_np[mask] = 0.5 * image_np[mask] + [0, 128, 0]
# solve for camera parameters
_, mtx, _, rvecs, tvecs = cv.calibrateCamera([calib_3d], [calib_2d], gray.shape[::-1], None, None, flags=cv.CALIB_ZERO_TANGENT_DIST+cv.CALIB_FIX_K1+cv.CALIB_FIX_K2+cv.CALIB_FIX_K3)
fx = mtx[0,0]
fy = mtx[1,1]
cx = mtx[0,2]
cy = mtx[1,2]
R, _ = cv.Rodrigues(rvecs[0])
T = tvecs[0]
print('Camera parameters:')
print(fx,fy,cx,cy)
print(rvecs, R)
print(tvecs)
calib_2d_reprojected, _ = cv.projectPoints(calib_3d, rvecs[0], tvecs[0], mtx, np.zeros(5,dtype=np.float32))
# get detection centroids from semantic segmentation mask
min_cluster = 10
max_cluster = 1000
dt_2D = []
ret, dt_com = cv.connectedComponents(mask.astype(np.uint8))
for i in range(1, dt_com.max()+1):
if np.sum(dt_com==i) > min_cluster and np.sum(dt_com==i) < max_cluster:
my, mx = np.nonzero(dt_com==i)
dt_2D.append([np.mean(mx), np.mean(my)])
# project detection centroids to world coordinates at Z=0
dt_3D = np.zeros((len(dt_2D), 3), dtype=np.float32)
for i in range(len(dt_2D)):
u,v = dt_2D[i]
u = (u - cx) / fx
v = (v - cy) / fy
N = R.dot([0,0,1])
z = N.dot(T) / (N[0]*u + N[1]*v + N[2])
xyz = np.array([z*u, z*v, z])
dt_3D[i,:] = R.T.dot(xyz - T).flatten()
# visualize calibration points and detection centroids in input image
plt.figure()
plt.plot(calib_2d[:,:,0], calib_2d[:,:,1], 'ro')
plt.plot(calib_2d_reprojected[:,:,0], calib_2d_reprojected[:,:,1], 'bx')
plt.imshow(image_np)
for i in range(len(calib_3d)):
plt.annotate('(%d,%d)'%(calib_3d[i,0],calib_3d[i,1]), xy=(calib_2d[i,0,0], calib_2d[i,0,1]), xytext=(20, 20), textcoords='offset points', ha='right', va='bottom',
bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5), arrowprops=dict(arrowstyle = '->', connectionstyle='arc3,rad=0'))
x1 = int(np.floor(1.0 * calib_3d[:,0].min() / flags.gridSize)) * flags.gridSize
x2 = int(np.ceil(1.0 * calib_3d[:,0].max() / flags.gridSize)) * flags.gridSize
y1 = int(np.floor(1.0 * calib_3d[:,1].min() / flags.gridSize)) * flags.gridSize
y2 = int(np.ceil(1.0 * calib_3d[:,1].max() / flags.gridSize)) * flags.gridSize
# draw grid lines
x = x1
while x <= x2:
pts = np.array([[x,y,0] for y in range(y1, y2+flags.stepSize, flags.stepSize)], dtype=np.float32)
projected, _ = cv.projectPoints(pts, rvecs[0], tvecs[0], mtx, np.zeros(5,dtype=np.float32))
plt.plot(projected[:,:,0], projected[:,:,1], 'k')
x += flags.gridSize
y = y1
while y <= y2:
pts = np.array([[x,y,0] for x in range(x1, x2+flags.stepSize, flags.stepSize)], dtype=np.float32)
projected, _ = cv.projectPoints(pts, rvecs[0], tvecs[0], mtx, np.zeros(5,dtype=np.float32))
plt.plot(projected[:,:,0], projected[:,:,1], 'k')
y += flags.gridSize
# add detection centroids to overhead map
dt_map = np.zeros((len(dt_3D), 2))
for i in range(len(dt_3D)):
dt_map[i] = map_origin + dt_3D[i,0] / maxX * map_x_vector + dt_3D[i,1] / maxY * map_y_vector
map_ax.plot(dt_map[:,0], dt_map[:,1], 'x', linewidth=3, label='CCTV'+dataset.split('_')[-1])
map_ax.legend()
plt.show() | localization.py | import numpy as np
import cv2 as cv
import glob
import matplotlib.pyplot as plt
import sys
from PIL import Image
import argparse
import os
# convenience code to annotate calibration points by clicking
# python localization.py dataset/beach/map.png dataset/beach/calib_map.txt --click
if '--click' in sys.argv:
input_fname = sys.argv[1]
output_fname = sys.argv[2]
img = cv.imread(input_fname)
img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
fig = plt.figure()
clicked_points = []
def update():
plt.clf()
plt.imshow(img, cmap='gray')
plt.plot([p[0] for p in clicked_points], [p[1] for p in clicked_points], 'rx')
fig.canvas.draw()
def onclick(event):
x,y = int(np.round(event.xdata)), int(np.round(event.ydata))
print('clicked',x,y)
clicked_points.append([x, y])
update()
def onkey(event):
if event.key==' ':
np.savetxt(output_fname, clicked_points)
sys.exit(1)
fig.canvas.mpl_connect('button_press_event', onclick)
fig.canvas.mpl_connect('key_press_event', onkey)
update()
plt.show()
sys.exit(1)
# code to project 2D detections on to an overhead map
parser = argparse.ArgumentParser()
parser.add_argument("--datasets", type=str, default="beach", required=True, help="Dataset directory name")
parser.add_argument( '--map', type=str, required=True, help='satellite image')
parser.add_argument("--gridSize", type=int, default=10, help="size of each square in the calibration grid")
parser.add_argument("--stepSize", type=int, default=1, help="density of drawing calibration grid")
flags = parser.parse_args()
# visualize calibration points in overhead map
plt.figure()
map_ax = plt.gca()
# oriented bounding box of calibration area in overhead map
calib_map = flags.map.replace('.png', '.txt')
calib_map = np.loadtxt(calib_map)
map_origin = calib_map[0]
map_x_vector = calib_map[1] - calib_map[0]
map_y_vector = calib_map[2] - calib_map[0]
maxX = 330
maxY = 130
x = 0
while x <= maxX:
p1 = map_origin + 1.0 * x / maxX * map_x_vector
p2 = map_origin + map_y_vector + 1.0 * x / maxX * map_x_vector
plt.plot([p1[0], p2[0]], [p1[1], p2[1]], 'k', linewidth=1, alpha=0.5)
x += flags.gridSize
y = 0
while y <= maxY:
p1 = map_origin + 1.0 * y / maxY * map_y_vector
p2 = map_origin + map_x_vector + 1.0 * y / maxY * map_y_vector
plt.plot([p1[0], p2[0]], [p1[1], p2[1]], 'k', linewidth=1, alpha=0.5)
y += flags.gridSize
# image of overhead map / satellite image
map_fname = flags.map
map_np = np.array(Image.open(map_fname))
plt.imshow(map_np)
plt.annotate('(%d,%d)'%(0,0), xy=(calib_map[0,0], calib_map[0,1]), xytext=(20, 20), textcoords='offset points', ha='right', va='bottom',
bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5), arrowprops=dict(arrowstyle = '->', connectionstyle='arc3,rad=0'))
plt.annotate('(%d,%d)'%(maxX,0), xy=(calib_map[1,0], calib_map[1,1]), xytext=(20, 20), textcoords='offset points', ha='right', va='bottom',
bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5), arrowprops=dict(arrowstyle = '->', connectionstyle='arc3,rad=0'))
plt.annotate('(%d,%d)'%(0,maxY), xy=(calib_map[2,0], calib_map[2,1]), xytext=(20, 20), textcoords='offset points', ha='right', va='bottom',
bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5), arrowprops=dict(arrowstyle = '->', connectionstyle='arc3,rad=0'))
plt.annotate('(%d,%d)'%(maxX,maxY), xy=(calib_map[3,0], calib_map[3,1]), xytext=(20, 20), textcoords='offset points', ha='right', va='bottom',
bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5), arrowprops=dict(arrowstyle = '->', connectionstyle='arc3,rad=0'))
plt.annotate('CCTV #2, #3, #4\nLAT=38.1899232\nLON=128.6040349', xy=(258, 424), xytext=(-30, -30), textcoords='offset points', ha='right', va='top',
bbox=dict(boxstyle='round,pad=0.5', fc='red', alpha=0.5), arrowprops=dict(arrowstyle = '->', connectionstyle='arc3,rad=0'))
plt.annotate('CCTV #1, #5, #6\nLAT=38.1893355\nLON=128.6047293', xy=(367, 583), xytext=(-30, -30), textcoords='offset points', ha='right', va='top',
bbox=dict(boxstyle='round,pad=0.5', fc='red', alpha=0.5), arrowprops=dict(arrowstyle = '->', connectionstyle='arc3,rad=0'))
for dataset in flags.datasets.split(','):
print('Loading from', dataset)
# calibration points in world coordinates (assume Z coordinate of 0)
calib_3d = dataset + '/calib_3d.txt'
# corresponding calibration point coordinates in the input image
calib_2d = dataset + '/calib_2d.txt'
calib_3d = np.loadtxt(calib_3d)
calib_3d = np.hstack((calib_3d, np.zeros((len(calib_3d), 1)))).astype(np.float32)
calib_2d = np.loadtxt(calib_2d).astype(np.float32)
calib_2d = calib_2d.reshape(-1,1,2)
img_idx = 1
while True:
# input image
img_fname = dataset + '/%d.png' % img_idx
# input semantic segmentation mask
label_fname = dataset + '/prediction%d.png' % img_idx
if os.path.exists(label_fname):
break
img_idx += 1
image_np = np.array(Image.open(img_fname))
gray = np.mean(image_np, axis=2)
mask = np.array(Image.open(label_fname))
print('Input', image_np.shape, image_np.dtype, mask.shape, mask.dtype)
image_np[:,:,0] = gray
image_np[:,:,1] = gray
image_np[:,:,2] = gray
image_np[mask] = 0.5 * image_np[mask] + [0, 128, 0]
# solve for camera parameters
_, mtx, _, rvecs, tvecs = cv.calibrateCamera([calib_3d], [calib_2d], gray.shape[::-1], None, None, flags=cv.CALIB_ZERO_TANGENT_DIST+cv.CALIB_FIX_K1+cv.CALIB_FIX_K2+cv.CALIB_FIX_K3)
fx = mtx[0,0]
fy = mtx[1,1]
cx = mtx[0,2]
cy = mtx[1,2]
R, _ = cv.Rodrigues(rvecs[0])
T = tvecs[0]
print('Camera parameters:')
print(fx,fy,cx,cy)
print(rvecs, R)
print(tvecs)
calib_2d_reprojected, _ = cv.projectPoints(calib_3d, rvecs[0], tvecs[0], mtx, np.zeros(5,dtype=np.float32))
# get detection centroids from semantic segmentation mask
min_cluster = 10
max_cluster = 1000
dt_2D = []
ret, dt_com = cv.connectedComponents(mask.astype(np.uint8))
for i in range(1, dt_com.max()+1):
if np.sum(dt_com==i) > min_cluster and np.sum(dt_com==i) < max_cluster:
my, mx = np.nonzero(dt_com==i)
dt_2D.append([np.mean(mx), np.mean(my)])
# project detection centroids to world coordinates at Z=0
dt_3D = np.zeros((len(dt_2D), 3), dtype=np.float32)
for i in range(len(dt_2D)):
u,v = dt_2D[i]
u = (u - cx) / fx
v = (v - cy) / fy
N = R.dot([0,0,1])
z = N.dot(T) / (N[0]*u + N[1]*v + N[2])
xyz = np.array([z*u, z*v, z])
dt_3D[i,:] = R.T.dot(xyz - T).flatten()
# visualize calibration points and detection centroids in input image
plt.figure()
plt.plot(calib_2d[:,:,0], calib_2d[:,:,1], 'ro')
plt.plot(calib_2d_reprojected[:,:,0], calib_2d_reprojected[:,:,1], 'bx')
plt.imshow(image_np)
for i in range(len(calib_3d)):
plt.annotate('(%d,%d)'%(calib_3d[i,0],calib_3d[i,1]), xy=(calib_2d[i,0,0], calib_2d[i,0,1]), xytext=(20, 20), textcoords='offset points', ha='right', va='bottom',
bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5), arrowprops=dict(arrowstyle = '->', connectionstyle='arc3,rad=0'))
x1 = int(np.floor(1.0 * calib_3d[:,0].min() / flags.gridSize)) * flags.gridSize
x2 = int(np.ceil(1.0 * calib_3d[:,0].max() / flags.gridSize)) * flags.gridSize
y1 = int(np.floor(1.0 * calib_3d[:,1].min() / flags.gridSize)) * flags.gridSize
y2 = int(np.ceil(1.0 * calib_3d[:,1].max() / flags.gridSize)) * flags.gridSize
# draw grid lines
x = x1
while x <= x2:
pts = np.array([[x,y,0] for y in range(y1, y2+flags.stepSize, flags.stepSize)], dtype=np.float32)
projected, _ = cv.projectPoints(pts, rvecs[0], tvecs[0], mtx, np.zeros(5,dtype=np.float32))
plt.plot(projected[:,:,0], projected[:,:,1], 'k')
x += flags.gridSize
y = y1
while y <= y2:
pts = np.array([[x,y,0] for x in range(x1, x2+flags.stepSize, flags.stepSize)], dtype=np.float32)
projected, _ = cv.projectPoints(pts, rvecs[0], tvecs[0], mtx, np.zeros(5,dtype=np.float32))
plt.plot(projected[:,:,0], projected[:,:,1], 'k')
y += flags.gridSize
# add detection centroids to overhead map
dt_map = np.zeros((len(dt_3D), 2))
for i in range(len(dt_3D)):
dt_map[i] = map_origin + dt_3D[i,0] / maxX * map_x_vector + dt_3D[i,1] / maxY * map_y_vector
map_ax.plot(dt_map[:,0], dt_map[:,1], 'x', linewidth=3, label='CCTV'+dataset.split('_')[-1])
map_ax.legend()
plt.show() | 0.259169 | 0.340239 |
import csv
from etilog.models import ImpactEvent
from etilog.models import SustainabilityDomain, SustainabilityTendency, SustainabilityTag
from etilog.models import Company
from etilog.models import Reference
def exp_csv_nlp(response):
writer = get_csvwriter(response)
header = ['ID',
'date_published',
'topicID',
'topicName',
'categoryID',
'categoryName',
'tendencyID',
'tendencyName',
'CompanyID',
'CompanyName',
'ReferenceID',
'ReferenceName',
'URL',
'pub_text',
'date_text',
'article_title'
]
val_names = ['id',
'date_published',
'sust_tags__id',
'sust_tags__name',
'sust_domain__id',
'sust_domain__name',
'sust_tendency__id',
'sust_tendency__name',
'company__id',
'company__name',
'reference__id',
'reference__name',
'source_url',
'article_text',
'date_text',
'article_title'
]
nr_ok = [1, 11]
val_ie = ImpactEvent.objects.filter(result_parse_html__in=nr_ok
).exclude(article_text__isnull=True
).exclude(article_text__exact=''
).values_list(*val_names)
writer.writerow(header)
for ie in val_ie:
if len(ie[7]) > 60000: # length libreoffice
print('length ', ie[0])
continue
writer.writerow(ie)
return response
def exp_csv_basedata(response):
writer = get_csvwriter(response)
def writerow(modelname, header, vallist):
writer.writerow(modelname)
writer.writerow(header)
for row in vallist:
writer.writerow(row)
# topics
modelname = ['TOPICS', ]
header = ['ID',
'NAME',
'CATEGORY_ID',
'CATEGORY_NAME',
'TENDENCY_ID',
'TENDENCY_NAME',
]
val_names = ['id',
'name',
'sust_domains',
'sust_domains__name',
'sust_tendency',
'sust_tendency__name',
]
vallist = SustainabilityTag.objects.values_list(*val_names)
writerow(modelname, header, vallist)
# domains
modelname = ['CATEGORY', ]
header = ['ID',
'NAME',
]
val_names = ['id',
'name',
]
vallist = SustainabilityDomain.objects.values_list(*val_names)
writerow(modelname, header, vallist)
# tendency
modelname = ['TENDENCY', ]
header = ['ID',
'NAME',
]
val_names = ['id',
'name',
]
vallist = SustainabilityTendency.objects.values_list(*val_names)
writerow(modelname, header, vallist)
# companies
modelname = ['COMPANIES / ORGANISATIONS', ]
header = ['ID',
'NAME',
]
val_names = ['id',
'name',
]
vallist = Company.objects.values_list(*val_names)
writerow(modelname, header, vallist)
# references
modelname = ['REFERENCE', ]
header = ['ID',
'NAME',
]
val_names = ['id',
'name',
]
vallist = Reference.objects.values_list(*val_names)
writerow(modelname, header, vallist)
return response
def get_csvwriter(response):
DELIMITER = 'ÿ'
writer = csv.writer(response, delimiter=DELIMITER)
return writer
def extract_err_file(response):
nonerr_li = [0, 1]
q_ie_err = ImpactEvent.objects.exclude(result_parse_html__in=nonerr_li
).order_by('updated_at'
).values_list('id', 'result_parse_html', 'updated_at')
q_ie_nonparse = ImpactEvent.objects.filter(result_parse_html=0
).values_list('id', 'result_parse_html', 'updated_at')
q_ie_success = ImpactEvent.objects.filter(result_parse_html=1
).values_list('id', 'result_parse_html', 'updated_at')
rows = [('id', 'errornr', 'updated_at')]
rows.extend(q_ie_err)
header = [('id', 'nonparsed', 'updated_at')]
rows.extend(header)
rows.extend(q_ie_nonparse)
header = [('id', 'success', 'updated_at')]
rows.extend(header)
rows.extend(q_ie_success)
DELIMITER = ';'
csvwriter = csv.writer(response, delimiter=DELIMITER)
for row in rows:
csvwriter.writerow(row)
return response | impexport/Logic/ViewExport.py | import csv
from etilog.models import ImpactEvent
from etilog.models import SustainabilityDomain, SustainabilityTendency, SustainabilityTag
from etilog.models import Company
from etilog.models import Reference
def exp_csv_nlp(response):
writer = get_csvwriter(response)
header = ['ID',
'date_published',
'topicID',
'topicName',
'categoryID',
'categoryName',
'tendencyID',
'tendencyName',
'CompanyID',
'CompanyName',
'ReferenceID',
'ReferenceName',
'URL',
'pub_text',
'date_text',
'article_title'
]
val_names = ['id',
'date_published',
'sust_tags__id',
'sust_tags__name',
'sust_domain__id',
'sust_domain__name',
'sust_tendency__id',
'sust_tendency__name',
'company__id',
'company__name',
'reference__id',
'reference__name',
'source_url',
'article_text',
'date_text',
'article_title'
]
nr_ok = [1, 11]
val_ie = ImpactEvent.objects.filter(result_parse_html__in=nr_ok
).exclude(article_text__isnull=True
).exclude(article_text__exact=''
).values_list(*val_names)
writer.writerow(header)
for ie in val_ie:
if len(ie[7]) > 60000: # length libreoffice
print('length ', ie[0])
continue
writer.writerow(ie)
return response
def exp_csv_basedata(response):
writer = get_csvwriter(response)
def writerow(modelname, header, vallist):
writer.writerow(modelname)
writer.writerow(header)
for row in vallist:
writer.writerow(row)
# topics
modelname = ['TOPICS', ]
header = ['ID',
'NAME',
'CATEGORY_ID',
'CATEGORY_NAME',
'TENDENCY_ID',
'TENDENCY_NAME',
]
val_names = ['id',
'name',
'sust_domains',
'sust_domains__name',
'sust_tendency',
'sust_tendency__name',
]
vallist = SustainabilityTag.objects.values_list(*val_names)
writerow(modelname, header, vallist)
# domains
modelname = ['CATEGORY', ]
header = ['ID',
'NAME',
]
val_names = ['id',
'name',
]
vallist = SustainabilityDomain.objects.values_list(*val_names)
writerow(modelname, header, vallist)
# tendency
modelname = ['TENDENCY', ]
header = ['ID',
'NAME',
]
val_names = ['id',
'name',
]
vallist = SustainabilityTendency.objects.values_list(*val_names)
writerow(modelname, header, vallist)
# companies
modelname = ['COMPANIES / ORGANISATIONS', ]
header = ['ID',
'NAME',
]
val_names = ['id',
'name',
]
vallist = Company.objects.values_list(*val_names)
writerow(modelname, header, vallist)
# references
modelname = ['REFERENCE', ]
header = ['ID',
'NAME',
]
val_names = ['id',
'name',
]
vallist = Reference.objects.values_list(*val_names)
writerow(modelname, header, vallist)
return response
def get_csvwriter(response):
DELIMITER = 'ÿ'
writer = csv.writer(response, delimiter=DELIMITER)
return writer
def extract_err_file(response):
nonerr_li = [0, 1]
q_ie_err = ImpactEvent.objects.exclude(result_parse_html__in=nonerr_li
).order_by('updated_at'
).values_list('id', 'result_parse_html', 'updated_at')
q_ie_nonparse = ImpactEvent.objects.filter(result_parse_html=0
).values_list('id', 'result_parse_html', 'updated_at')
q_ie_success = ImpactEvent.objects.filter(result_parse_html=1
).values_list('id', 'result_parse_html', 'updated_at')
rows = [('id', 'errornr', 'updated_at')]
rows.extend(q_ie_err)
header = [('id', 'nonparsed', 'updated_at')]
rows.extend(header)
rows.extend(q_ie_nonparse)
header = [('id', 'success', 'updated_at')]
rows.extend(header)
rows.extend(q_ie_success)
DELIMITER = ';'
csvwriter = csv.writer(response, delimiter=DELIMITER)
for row in rows:
csvwriter.writerow(row)
return response | 0.278649 | 0.205456 |
import os
import random
import sys
from typing import Tuple
import cv2
import numpy as np
from numpy.lib.financial import ipmt
import pandas as pd
import torch
from torch.utils.data.dataset import Dataset
from easydict import EasyDict as edict
import matplotlib.pyplot as plt
def rand_uniform_strong(min, max):
if min > max:
swap = min
min = max
max = swap
return random.random() * (max - min) + min
def rand_scale(s):
scale = rand_uniform_strong(1, s)
if random.randint(0, 1) % 2:
return scale
return 1.0 / scale
def rand_precalc_random(min, max, random_part):
if max < min:
swap = min
min = max
max = swap
return (random_part * (max - min)) + min
def fill_truth_detection(bboxes, num_boxes, classes, flip, dx, dy, sx, sy, net_w, net_h):
if bboxes.shape[0] == 0:
return bboxes, 10000
np.random.shuffle(bboxes)
bboxes[:, 0] -= dx
bboxes[:, 2] -= dx
bboxes[:, 1] -= dy
bboxes[:, 3] -= dy
bboxes[:, 0] = np.clip(bboxes[:, 0], 0, sx)
bboxes[:, 2] = np.clip(bboxes[:, 2], 0, sx)
bboxes[:, 1] = np.clip(bboxes[:, 1], 0, sy)
bboxes[:, 3] = np.clip(bboxes[:, 3], 0, sy)
out_box = list(
np.where(
((bboxes[:, 1] == sy) & (bboxes[:, 3] == sy))
| ((bboxes[:, 0] == sx) & (bboxes[:, 2] == sx))
| ((bboxes[:, 1] == 0) & (bboxes[:, 3] == 0))
| ((bboxes[:, 0] == 0) & (bboxes[:, 2] == 0))
)[0]
)
list_box = list(range(bboxes.shape[0]))
for i in out_box:
list_box.remove(i)
bboxes = bboxes[list_box]
if bboxes.shape[0] == 0:
return bboxes, 10000
bboxes = bboxes[np.where((bboxes[:, 4] < classes) & (bboxes[:, 4] >= 0))[0]]
if bboxes.shape[0] > num_boxes:
bboxes = bboxes[:num_boxes]
min_w_h = np.array([bboxes[:, 2] - bboxes[:, 0], bboxes[:, 3] - bboxes[:, 1]]).min()
bboxes[:, 0] *= net_w / sx
bboxes[:, 2] *= net_w / sx
bboxes[:, 1] *= net_h / sy
bboxes[:, 3] *= net_h / sy
if flip:
temp = net_w - bboxes[:, 0]
bboxes[:, 0] = net_w - bboxes[:, 2]
bboxes[:, 2] = temp
return bboxes, min_w_h
def rect_intersection(a, b):
minx = max(a[0], b[0])
miny = max(a[1], b[1])
maxx = min(a[2], b[2])
maxy = min(a[3], b[3])
return [minx, miny, maxx, maxy]
def image_data_augmentation(
mat, w, h, pleft, ptop, swidth, sheight, flip, dhue, dsat, dexp, gaussian_noise, blur, truth
):
try:
img = mat
oh, ow, _ = img.shape
pleft, ptop, swidth, sheight = int(pleft), int(ptop), int(swidth), int(sheight)
# crop
src_rect = [pleft, ptop, swidth + pleft, sheight + ptop] # x1,y1,x2,y2
img_rect = [0, 0, ow, oh]
new_src_rect = rect_intersection(src_rect, img_rect) # 交集
dst_rect = [
max(0, -pleft),
max(0, -ptop),
max(0, -pleft) + new_src_rect[2] - new_src_rect[0],
max(0, -ptop) + new_src_rect[3] - new_src_rect[1],
]
# cv2.Mat sized
if (
src_rect[0] == 0
and src_rect[1] == 0
and src_rect[2] == img.shape[0]
and src_rect[3] == img.shape[1]
):
sized = cv2.resize(img, (w, h), cv2.INTER_LINEAR)
else:
cropped = np.zeros([sheight, swidth, 3])
cropped[
:,
:,
] = np.mean(img, axis=(0, 1))
cropped[dst_rect[1] : dst_rect[3], dst_rect[0] : dst_rect[2]] = img[
new_src_rect[1] : new_src_rect[3], new_src_rect[0] : new_src_rect[2]
]
# resize
sized = cv2.resize(cropped, (w, h), cv2.INTER_LINEAR)
# flip
if flip:
# cv2.Mat cropped
sized = cv2.flip(sized, 1) # 0 - x-axis, 1 - y-axis, -1 - both axes (x & y)
# HSV augmentation
# cv2.COLOR_BGR2HSV, cv2.COLOR_RGB2HSV, cv2.COLOR_HSV2BGR, cv2.COLOR_HSV2RGB
if dsat != 1 or dexp != 1 or dhue != 0:
if img.shape[2] >= 3:
hsv_src = cv2.cvtColor(sized.astype(np.float32), cv2.COLOR_RGB2HSV) # RGB to HSV
hsv = cv2.split(hsv_src)
hsv[1] *= dsat
hsv[2] *= dexp
hsv[0] += 179 * dhue
hsv_src = cv2.merge(hsv)
sized = np.clip(
cv2.cvtColor(hsv_src, cv2.COLOR_HSV2RGB), 0, 255
) # HSV to RGB (the same as previous)
else:
sized *= dexp
if blur:
if blur == 1:
dst = cv2.GaussianBlur(sized, (17, 17), 0)
# cv2.bilateralFilter(sized, dst, 17, 75, 75)
else:
ksize = (blur / 2) * 2 + 1
dst = cv2.GaussianBlur(sized, (ksize, ksize), 0)
if blur == 1:
img_rect = [0, 0, sized.cols, sized.rows]
for b in truth:
left = (b.x - b.w / 2.0) * sized.shape[1]
width = b.w * sized.shape[1]
top = (b.y - b.h / 2.0) * sized.shape[0]
height = b.h * sized.shape[0]
roi(left, top, width, height)
roi = roi & img_rect
dst[roi[0] : roi[0] + roi[2], roi[1] : roi[1] + roi[3]] = sized[
roi[0] : roi[0] + roi[2], roi[1] : roi[1] + roi[3]
]
sized = dst
if gaussian_noise:
noise = np.array(sized.shape)
gaussian_noise = min(gaussian_noise, 127)
gaussian_noise = max(gaussian_noise, 0)
cv2.randn(noise, 0, gaussian_noise) # mean and variance
sized = sized + noise
except:
print("OpenCV can't augment image: " + str(w) + " x " + str(h))
sized = mat
return sized
def filter_truth(bboxes, dx, dy, sx, sy, xd, yd):
bboxes[:, 0] -= dx
bboxes[:, 2] -= dx
bboxes[:, 1] -= dy
bboxes[:, 3] -= dy
bboxes[:, 0] = np.clip(bboxes[:, 0], 0, sx)
bboxes[:, 2] = np.clip(bboxes[:, 2], 0, sx)
bboxes[:, 1] = np.clip(bboxes[:, 1], 0, sy)
bboxes[:, 3] = np.clip(bboxes[:, 3], 0, sy)
out_box = list(
np.where(
((bboxes[:, 1] == sy) & (bboxes[:, 3] == sy))
| ((bboxes[:, 0] == sx) & (bboxes[:, 2] == sx))
| ((bboxes[:, 1] == 0) & (bboxes[:, 3] == 0))
| ((bboxes[:, 0] == 0) & (bboxes[:, 2] == 0))
)[0]
)
list_box = list(range(bboxes.shape[0]))
for i in out_box:
list_box.remove(i)
bboxes = bboxes[list_box]
bboxes[:, 0] += xd
bboxes[:, 2] += xd
bboxes[:, 1] += yd
bboxes[:, 3] += yd
return bboxes
def blend_truth_mosaic(
out_img, img, bboxes, w, h, cut_x, cut_y, i_mixup, left_shift, right_shift, top_shift, bot_shift
):
left_shift = min(left_shift, w - cut_x)
top_shift = min(top_shift, h - cut_y)
right_shift = min(right_shift, cut_x)
bot_shift = min(bot_shift, cut_y)
if i_mixup == 0:
bboxes = filter_truth(bboxes, left_shift, top_shift, cut_x, cut_y, 0, 0)
out_img[:cut_y, :cut_x] = img[top_shift : top_shift + cut_y, left_shift : left_shift + cut_x]
if i_mixup == 1:
bboxes = filter_truth(bboxes, cut_x - right_shift, top_shift, w - cut_x, cut_y, cut_x, 0)
out_img[:cut_y, cut_x:] = img[top_shift : top_shift + cut_y, cut_x - right_shift : w - right_shift]
if i_mixup == 2:
bboxes = filter_truth(bboxes, left_shift, cut_y - bot_shift, cut_x, h - cut_y, 0, cut_y)
out_img[cut_y:, :cut_x] = img[cut_y - bot_shift : h - bot_shift, left_shift : left_shift + cut_x]
if i_mixup == 3:
bboxes = filter_truth(
bboxes, cut_x - right_shift, cut_y - bot_shift, w - cut_x, h - cut_y, cut_x, cut_y
)
out_img[cut_y:, cut_x:] = img[
cut_y - bot_shift : h - bot_shift, cut_x - right_shift : w - right_shift
]
return out_img, bboxes
def draw_box(img, bboxes):
for b in bboxes:
img = cv2.rectangle(img, (b[0], b[1]), (b[2], b[3]), (0, 255, 0), 2)
return img
class Yolo_dataset(Dataset):
def __init__(self, lable_path, cfg, train=True):
super(Yolo_dataset, self).__init__()
if cfg.mixup == 2:
print("cutmix=1 - isn't supported for Detector")
raise
elif cfg.mixup == 2 and cfg.letter_box:
print("Combination: letter_box=1 & mosaic=1 - isn't supported, use only 1 of these parameters")
raise
self.cfg = cfg
self.train = train
truth = {}
f = open(lable_path, "r", encoding="utf-8")
for line in f.readlines():
data = line.split(" ")
truth[data[0]] = []
for i in data[1:]:
truth[data[0]].append([int(float(j)) for j in i.split(",")])
self.truth = truth
self.imgs = list(self.truth.keys())
def __len__(self):
return len(self.truth.keys())
def __getitem__(self, index):
if not self.train:
return self._get_val_item(index)
img_path = self.imgs[index]
bboxes = np.array(self.truth.get(img_path), dtype=np.float)
img_path = os.path.join(self.cfg.dataset_dir, img_path)
use_mixup = self.cfg.mixup
if random.randint(0, 1):
use_mixup = 0
if use_mixup == 3:
min_offset = 0.2
cut_x = random.randint(int(self.cfg.w * min_offset), int(self.cfg.w * (1 - min_offset)))
cut_y = random.randint(int(self.cfg.h * min_offset), int(self.cfg.h * (1 - min_offset)))
r1, r2, r3, r4, r_scale = 0, 0, 0, 0, 0
dhue, dsat, dexp, flip, blur = 0, 0, 0, 0, 0
gaussian_noise = 0
out_img = np.zeros([self.cfg.h, self.cfg.w, 3])
out_bboxes = []
for i in range(use_mixup + 1):
if i != 0:
img_path = random.choice(list(self.truth.keys()))
bboxes = np.array(self.truth.get(img_path), dtype=np.float)
img_path = os.path.join(self.cfg.dataset_dir, img_path)
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if img is None:
continue
oh, ow, oc = img.shape
dh, dw, dc = np.array(np.array([oh, ow, oc]) * self.cfg.jitter, dtype=np.int)
dhue = rand_uniform_strong(-self.cfg.hue, self.cfg.hue)
dsat = rand_scale(self.cfg.saturation)
dexp = rand_scale(self.cfg.exposure)
pleft = random.randint(-dw, dw)
pright = random.randint(-dw, dw)
ptop = random.randint(-dh, dh)
pbot = random.randint(-dh, dh)
flip = random.randint(0, 1) if self.cfg.flip else 0
if self.cfg.blur:
tmp_blur = random.randint(0, 2) # 0 - disable, 1 - blur background, 2 - blur the whole image
if tmp_blur == 0:
blur = 0
elif tmp_blur == 1:
blur = 1
else:
blur = self.cfg.blur
if self.cfg.gaussian and random.randint(0, 1):
gaussian_noise = self.cfg.gaussian
else:
gaussian_noise = 0
if self.cfg.letter_box:
img_ar = ow / oh
net_ar = self.cfg.w / self.cfg.h
result_ar = img_ar / net_ar
# print(" ow = %d, oh = %d, w = %d, h = %d, img_ar = %f, net_ar = %f, result_ar = %f \n", ow, oh, w, h, img_ar, net_ar, result_ar);
if result_ar > 1: # sheight - should be increased
oh_tmp = ow / net_ar
delta_h = (oh_tmp - oh) / 2
ptop = ptop - delta_h
pbot = pbot - delta_h
# print(" result_ar = %f, oh_tmp = %f, delta_h = %d, ptop = %f, pbot = %f \n", result_ar, oh_tmp, delta_h, ptop, pbot);
else: # swidth - should be increased
ow_tmp = oh * net_ar
delta_w = (ow_tmp - ow) / 2
pleft = pleft - delta_w
pright = pright - delta_w
# printf(" result_ar = %f, ow_tmp = %f, delta_w = %d, pleft = %f, pright = %f \n", result_ar, ow_tmp, delta_w, pleft, pright);
swidth = ow - pleft - pright
sheight = oh - ptop - pbot
truth, min_w_h = fill_truth_detection(
bboxes,
self.cfg.boxes,
self.cfg.classes,
flip,
pleft,
ptop,
swidth,
sheight,
self.cfg.w,
self.cfg.h,
)
if (min_w_h / 8) < blur and blur > 1: # disable blur if one of the objects is too small
blur = min_w_h / 8
ai = image_data_augmentation(
img,
self.cfg.w,
self.cfg.h,
pleft,
ptop,
swidth,
sheight,
flip,
dhue,
dsat,
dexp,
gaussian_noise,
blur,
truth,
)
if use_mixup == 0:
out_img = ai
out_bboxes = truth
if use_mixup == 1:
if i == 0:
old_img = ai.copy()
old_truth = truth.copy()
elif i == 1:
out_img = cv2.addWeighted(ai, 0.5, old_img, 0.5)
out_bboxes = np.concatenate([old_truth, truth], axis=0)
elif use_mixup == 3:
if flip:
tmp = pleft
pleft = pright
pright = tmp
left_shift = int(min(cut_x, max(0, (-int(pleft) * self.cfg.w / swidth))))
top_shift = int(min(cut_y, max(0, (-int(ptop) * self.cfg.h / sheight))))
right_shift = int(min((self.cfg.w - cut_x), max(0, (-int(pright) * self.cfg.w / swidth))))
bot_shift = int(min(self.cfg.h - cut_y, max(0, (-int(pbot) * self.cfg.h / sheight))))
out_img, out_bbox = blend_truth_mosaic(
out_img,
ai,
truth.copy(),
self.cfg.w,
self.cfg.h,
cut_x,
cut_y,
i,
left_shift,
right_shift,
top_shift,
bot_shift,
)
out_bboxes.append(out_bbox)
# print(img_path)
if use_mixup == 3:
out_bboxes = np.concatenate(out_bboxes, axis=0)
out_bboxes1 = np.zeros([self.cfg.boxes, 5])
out_bboxes1[: min(out_bboxes.shape[0], self.cfg.boxes)] = out_bboxes[
: min(out_bboxes.shape[0], self.cfg.boxes)
]
return out_img, out_bboxes1
def _get_val_item(self, index):
""""""
img_path = self.imgs[index]
bboxes_with_cls_id = np.array(self.truth.get(img_path), dtype=np.float)
img = cv2.imread(os.path.join(self.cfg.dataset_dir, img_path))
# img_height, img_width = img.shape[:2]
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# img = cv2.resize(img, (self.cfg.w, self.cfg.h))
# img = torch.from_numpy(img.transpose(2, 0, 1)).float().div(255.0).unsqueeze(0)
num_objs = len(bboxes_with_cls_id)
target = {}
# boxes to coco format
boxes = bboxes_with_cls_id[..., :4]
boxes[..., 2:] = boxes[..., 2:] - boxes[..., :2] # box width, box height
target["boxes"] = torch.as_tensor(boxes, dtype=torch.float32)
target["labels"] = torch.as_tensor(bboxes_with_cls_id[..., -1].flatten(), dtype=torch.int64)
target["image_id"] = torch.tensor([get_image_id(img_path)])
target["area"] = (target["boxes"][:, 3]) * (target["boxes"][:, 2])
target["iscrowd"] = torch.zeros((num_objs,), dtype=torch.int64)
return img, target
class Yolo_BEV_dataset(Dataset):
"""BEV pytorch dataset to load KITTI"""
def __init__(self, config: edict, split: str = "train") -> None:
"""
Args:
config (edict): Easy directory configuration file
split (str): Split to load. Can be ["train", "test", "val"]. Default = train.
"""
super(Yolo_BEV_dataset, self).__init__()
self.cfg = config
self.split = split
# read images paths
self.img_paths = []
with open(os.path.join(self.cfg.dataset_dir, f"{split}_split.txt"), "r") as f:
for line in f:
self.img_paths.append(line.strip())
# read labels
column_types = {
"ID": str,
"alpha": float,
"3D_d": float,
"3D_l": float,
"3D_w": float,
"cos": float,
"sin": float,
"type": str,
}
self.labels = pd.read_csv(
os.path.join(self.cfg.dataset_dir, f"{split}_split.csv"), dtype=column_types
)
# extra params
self.fov = 82 # KITTI fov [TODO: need to adapt to new datasets]
self.base_width = 864
self.base_height = 135
self.canvas = np.zeros(shape=(self.cfg.height, self.cfg.width, self.cfg.channels), dtype=np.float)
self.mapping = {}
with open(self.cfg.names_path, "r") as f:
for i, line in enumerate(f):
self.mapping[line] = float(i)
def __len__(self) -> int:
"""Number of elements in dataset
Returns:
int: Number of elements in dataset
"""
return len(self.img_paths)
def __getitem__(self, idx: int) -> Tuple[np.ndarray, np.ndarray]:
"""Get single item from dataset
Args:
idx (int): Sample index
Returns:
Tuple[np.ndarray, np.ndarray]: img tensor and labels. Returns
labels == [[-1,...,-1,"None"]] if no label is present for an img.
"""
############# read image
img = cv2.imread(self.img_paths[idx])
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB).astype(np.float)
img /= 255.0
# rescale image to input size and position it to canvas center
new_w = int(self.base_width * (self.fov / 360.0))
new_w = new_w if new_w % 2 == 0 else new_w - 1
new_h = int((img.shape[0] / img.shape[1]) * new_w)
new_h = new_h if new_h % 2 == 0 else new_h - 1
img = cv2.resize(img, (new_w, new_h))
# define padding borders
tb_border = (self.canvas.shape[0] - new_h) // 2 # top/bottom border
lr_border = (self.canvas.shape[1] - new_w) // 2 # left/right border
# fit image into canvas
canvas = self.canvas.copy()
canvas[tb_border : canvas.shape[0] - tb_border, lr_border : canvas.shape[1] - lr_border, :] = img
img = canvas
############## read labels
label_id = self.img_paths[idx].split("/")[-1].split(".")[0]
matches = self.labels[self.labels["ID"] == label_id]
# check if img has labels
if matches.empty:
labels = np.array([-1.0 for _ in range(7)])
else:
matches = matches.loc[:, matches.columns != "ID"]
matches = matches.replace({"cls": self.mapping})
labels = matches.to_numpy().astype(np.float)
return img, labels
def get_image_id(filename: str) -> int:
"""
Convert a string to a integer.
Make sure that the images and the `image_id`s are in one-one correspondence.
There are already `image_id`s in annotations of the COCO dataset,
in which case this function is unnecessary.
For creating one's own `get_image_id` function, one can refer to
https://github.com/google/automl/blob/master/efficientdet/dataset/create_pascal_tfrecord.py#L86
or refer to the following code (where the filenames are like 'level1_123.jpg')
>>> lv, no = os.path.splitext(os.path.basename(filename))[0].split("_")
>>> lv = lv.replace("level", "")
>>> no = f"{int(no):04d}"
>>> return int(lv+no)
"""
raise NotImplementedError("Create your own 'get_image_id' function")
lv, no = os.path.splitext(os.path.basename(filename))[0].split("_")
lv = lv.replace("level", "")
no = f"{int(no):04d}"
return int(lv + no)
if __name__ == "__main__":
from cfg import Cfg
import matplotlib.pyplot as plt
random.seed(2020)
np.random.seed(2020)
Cfg.dataset_dir = "/mnt/e/Dataset"
dataset = Yolo_dataset(Cfg.train_label, Cfg)
for i in range(100):
out_img, out_bboxes = dataset.__getitem__(i)
a = draw_box(out_img.copy(), out_bboxes.astype(np.int32))
plt.imshow(a.astype(np.int32))
plt.show() | dataset.py | import os
import random
import sys
from typing import Tuple
import cv2
import numpy as np
from numpy.lib.financial import ipmt
import pandas as pd
import torch
from torch.utils.data.dataset import Dataset
from easydict import EasyDict as edict
import matplotlib.pyplot as plt
def rand_uniform_strong(min, max):
if min > max:
swap = min
min = max
max = swap
return random.random() * (max - min) + min
def rand_scale(s):
scale = rand_uniform_strong(1, s)
if random.randint(0, 1) % 2:
return scale
return 1.0 / scale
def rand_precalc_random(min, max, random_part):
if max < min:
swap = min
min = max
max = swap
return (random_part * (max - min)) + min
def fill_truth_detection(bboxes, num_boxes, classes, flip, dx, dy, sx, sy, net_w, net_h):
if bboxes.shape[0] == 0:
return bboxes, 10000
np.random.shuffle(bboxes)
bboxes[:, 0] -= dx
bboxes[:, 2] -= dx
bboxes[:, 1] -= dy
bboxes[:, 3] -= dy
bboxes[:, 0] = np.clip(bboxes[:, 0], 0, sx)
bboxes[:, 2] = np.clip(bboxes[:, 2], 0, sx)
bboxes[:, 1] = np.clip(bboxes[:, 1], 0, sy)
bboxes[:, 3] = np.clip(bboxes[:, 3], 0, sy)
out_box = list(
np.where(
((bboxes[:, 1] == sy) & (bboxes[:, 3] == sy))
| ((bboxes[:, 0] == sx) & (bboxes[:, 2] == sx))
| ((bboxes[:, 1] == 0) & (bboxes[:, 3] == 0))
| ((bboxes[:, 0] == 0) & (bboxes[:, 2] == 0))
)[0]
)
list_box = list(range(bboxes.shape[0]))
for i in out_box:
list_box.remove(i)
bboxes = bboxes[list_box]
if bboxes.shape[0] == 0:
return bboxes, 10000
bboxes = bboxes[np.where((bboxes[:, 4] < classes) & (bboxes[:, 4] >= 0))[0]]
if bboxes.shape[0] > num_boxes:
bboxes = bboxes[:num_boxes]
min_w_h = np.array([bboxes[:, 2] - bboxes[:, 0], bboxes[:, 3] - bboxes[:, 1]]).min()
bboxes[:, 0] *= net_w / sx
bboxes[:, 2] *= net_w / sx
bboxes[:, 1] *= net_h / sy
bboxes[:, 3] *= net_h / sy
if flip:
temp = net_w - bboxes[:, 0]
bboxes[:, 0] = net_w - bboxes[:, 2]
bboxes[:, 2] = temp
return bboxes, min_w_h
def rect_intersection(a, b):
minx = max(a[0], b[0])
miny = max(a[1], b[1])
maxx = min(a[2], b[2])
maxy = min(a[3], b[3])
return [minx, miny, maxx, maxy]
def image_data_augmentation(
mat, w, h, pleft, ptop, swidth, sheight, flip, dhue, dsat, dexp, gaussian_noise, blur, truth
):
try:
img = mat
oh, ow, _ = img.shape
pleft, ptop, swidth, sheight = int(pleft), int(ptop), int(swidth), int(sheight)
# crop
src_rect = [pleft, ptop, swidth + pleft, sheight + ptop] # x1,y1,x2,y2
img_rect = [0, 0, ow, oh]
new_src_rect = rect_intersection(src_rect, img_rect) # 交集
dst_rect = [
max(0, -pleft),
max(0, -ptop),
max(0, -pleft) + new_src_rect[2] - new_src_rect[0],
max(0, -ptop) + new_src_rect[3] - new_src_rect[1],
]
# cv2.Mat sized
if (
src_rect[0] == 0
and src_rect[1] == 0
and src_rect[2] == img.shape[0]
and src_rect[3] == img.shape[1]
):
sized = cv2.resize(img, (w, h), cv2.INTER_LINEAR)
else:
cropped = np.zeros([sheight, swidth, 3])
cropped[
:,
:,
] = np.mean(img, axis=(0, 1))
cropped[dst_rect[1] : dst_rect[3], dst_rect[0] : dst_rect[2]] = img[
new_src_rect[1] : new_src_rect[3], new_src_rect[0] : new_src_rect[2]
]
# resize
sized = cv2.resize(cropped, (w, h), cv2.INTER_LINEAR)
# flip
if flip:
# cv2.Mat cropped
sized = cv2.flip(sized, 1) # 0 - x-axis, 1 - y-axis, -1 - both axes (x & y)
# HSV augmentation
# cv2.COLOR_BGR2HSV, cv2.COLOR_RGB2HSV, cv2.COLOR_HSV2BGR, cv2.COLOR_HSV2RGB
if dsat != 1 or dexp != 1 or dhue != 0:
if img.shape[2] >= 3:
hsv_src = cv2.cvtColor(sized.astype(np.float32), cv2.COLOR_RGB2HSV) # RGB to HSV
hsv = cv2.split(hsv_src)
hsv[1] *= dsat
hsv[2] *= dexp
hsv[0] += 179 * dhue
hsv_src = cv2.merge(hsv)
sized = np.clip(
cv2.cvtColor(hsv_src, cv2.COLOR_HSV2RGB), 0, 255
) # HSV to RGB (the same as previous)
else:
sized *= dexp
if blur:
if blur == 1:
dst = cv2.GaussianBlur(sized, (17, 17), 0)
# cv2.bilateralFilter(sized, dst, 17, 75, 75)
else:
ksize = (blur / 2) * 2 + 1
dst = cv2.GaussianBlur(sized, (ksize, ksize), 0)
if blur == 1:
img_rect = [0, 0, sized.cols, sized.rows]
for b in truth:
left = (b.x - b.w / 2.0) * sized.shape[1]
width = b.w * sized.shape[1]
top = (b.y - b.h / 2.0) * sized.shape[0]
height = b.h * sized.shape[0]
roi(left, top, width, height)
roi = roi & img_rect
dst[roi[0] : roi[0] + roi[2], roi[1] : roi[1] + roi[3]] = sized[
roi[0] : roi[0] + roi[2], roi[1] : roi[1] + roi[3]
]
sized = dst
if gaussian_noise:
noise = np.array(sized.shape)
gaussian_noise = min(gaussian_noise, 127)
gaussian_noise = max(gaussian_noise, 0)
cv2.randn(noise, 0, gaussian_noise) # mean and variance
sized = sized + noise
except:
print("OpenCV can't augment image: " + str(w) + " x " + str(h))
sized = mat
return sized
def filter_truth(bboxes, dx, dy, sx, sy, xd, yd):
bboxes[:, 0] -= dx
bboxes[:, 2] -= dx
bboxes[:, 1] -= dy
bboxes[:, 3] -= dy
bboxes[:, 0] = np.clip(bboxes[:, 0], 0, sx)
bboxes[:, 2] = np.clip(bboxes[:, 2], 0, sx)
bboxes[:, 1] = np.clip(bboxes[:, 1], 0, sy)
bboxes[:, 3] = np.clip(bboxes[:, 3], 0, sy)
out_box = list(
np.where(
((bboxes[:, 1] == sy) & (bboxes[:, 3] == sy))
| ((bboxes[:, 0] == sx) & (bboxes[:, 2] == sx))
| ((bboxes[:, 1] == 0) & (bboxes[:, 3] == 0))
| ((bboxes[:, 0] == 0) & (bboxes[:, 2] == 0))
)[0]
)
list_box = list(range(bboxes.shape[0]))
for i in out_box:
list_box.remove(i)
bboxes = bboxes[list_box]
bboxes[:, 0] += xd
bboxes[:, 2] += xd
bboxes[:, 1] += yd
bboxes[:, 3] += yd
return bboxes
def blend_truth_mosaic(
out_img, img, bboxes, w, h, cut_x, cut_y, i_mixup, left_shift, right_shift, top_shift, bot_shift
):
left_shift = min(left_shift, w - cut_x)
top_shift = min(top_shift, h - cut_y)
right_shift = min(right_shift, cut_x)
bot_shift = min(bot_shift, cut_y)
if i_mixup == 0:
bboxes = filter_truth(bboxes, left_shift, top_shift, cut_x, cut_y, 0, 0)
out_img[:cut_y, :cut_x] = img[top_shift : top_shift + cut_y, left_shift : left_shift + cut_x]
if i_mixup == 1:
bboxes = filter_truth(bboxes, cut_x - right_shift, top_shift, w - cut_x, cut_y, cut_x, 0)
out_img[:cut_y, cut_x:] = img[top_shift : top_shift + cut_y, cut_x - right_shift : w - right_shift]
if i_mixup == 2:
bboxes = filter_truth(bboxes, left_shift, cut_y - bot_shift, cut_x, h - cut_y, 0, cut_y)
out_img[cut_y:, :cut_x] = img[cut_y - bot_shift : h - bot_shift, left_shift : left_shift + cut_x]
if i_mixup == 3:
bboxes = filter_truth(
bboxes, cut_x - right_shift, cut_y - bot_shift, w - cut_x, h - cut_y, cut_x, cut_y
)
out_img[cut_y:, cut_x:] = img[
cut_y - bot_shift : h - bot_shift, cut_x - right_shift : w - right_shift
]
return out_img, bboxes
def draw_box(img, bboxes):
for b in bboxes:
img = cv2.rectangle(img, (b[0], b[1]), (b[2], b[3]), (0, 255, 0), 2)
return img
class Yolo_dataset(Dataset):
def __init__(self, lable_path, cfg, train=True):
super(Yolo_dataset, self).__init__()
if cfg.mixup == 2:
print("cutmix=1 - isn't supported for Detector")
raise
elif cfg.mixup == 2 and cfg.letter_box:
print("Combination: letter_box=1 & mosaic=1 - isn't supported, use only 1 of these parameters")
raise
self.cfg = cfg
self.train = train
truth = {}
f = open(lable_path, "r", encoding="utf-8")
for line in f.readlines():
data = line.split(" ")
truth[data[0]] = []
for i in data[1:]:
truth[data[0]].append([int(float(j)) for j in i.split(",")])
self.truth = truth
self.imgs = list(self.truth.keys())
def __len__(self):
return len(self.truth.keys())
def __getitem__(self, index):
if not self.train:
return self._get_val_item(index)
img_path = self.imgs[index]
bboxes = np.array(self.truth.get(img_path), dtype=np.float)
img_path = os.path.join(self.cfg.dataset_dir, img_path)
use_mixup = self.cfg.mixup
if random.randint(0, 1):
use_mixup = 0
if use_mixup == 3:
min_offset = 0.2
cut_x = random.randint(int(self.cfg.w * min_offset), int(self.cfg.w * (1 - min_offset)))
cut_y = random.randint(int(self.cfg.h * min_offset), int(self.cfg.h * (1 - min_offset)))
r1, r2, r3, r4, r_scale = 0, 0, 0, 0, 0
dhue, dsat, dexp, flip, blur = 0, 0, 0, 0, 0
gaussian_noise = 0
out_img = np.zeros([self.cfg.h, self.cfg.w, 3])
out_bboxes = []
for i in range(use_mixup + 1):
if i != 0:
img_path = random.choice(list(self.truth.keys()))
bboxes = np.array(self.truth.get(img_path), dtype=np.float)
img_path = os.path.join(self.cfg.dataset_dir, img_path)
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if img is None:
continue
oh, ow, oc = img.shape
dh, dw, dc = np.array(np.array([oh, ow, oc]) * self.cfg.jitter, dtype=np.int)
dhue = rand_uniform_strong(-self.cfg.hue, self.cfg.hue)
dsat = rand_scale(self.cfg.saturation)
dexp = rand_scale(self.cfg.exposure)
pleft = random.randint(-dw, dw)
pright = random.randint(-dw, dw)
ptop = random.randint(-dh, dh)
pbot = random.randint(-dh, dh)
flip = random.randint(0, 1) if self.cfg.flip else 0
if self.cfg.blur:
tmp_blur = random.randint(0, 2) # 0 - disable, 1 - blur background, 2 - blur the whole image
if tmp_blur == 0:
blur = 0
elif tmp_blur == 1:
blur = 1
else:
blur = self.cfg.blur
if self.cfg.gaussian and random.randint(0, 1):
gaussian_noise = self.cfg.gaussian
else:
gaussian_noise = 0
if self.cfg.letter_box:
img_ar = ow / oh
net_ar = self.cfg.w / self.cfg.h
result_ar = img_ar / net_ar
# print(" ow = %d, oh = %d, w = %d, h = %d, img_ar = %f, net_ar = %f, result_ar = %f \n", ow, oh, w, h, img_ar, net_ar, result_ar);
if result_ar > 1: # sheight - should be increased
oh_tmp = ow / net_ar
delta_h = (oh_tmp - oh) / 2
ptop = ptop - delta_h
pbot = pbot - delta_h
# print(" result_ar = %f, oh_tmp = %f, delta_h = %d, ptop = %f, pbot = %f \n", result_ar, oh_tmp, delta_h, ptop, pbot);
else: # swidth - should be increased
ow_tmp = oh * net_ar
delta_w = (ow_tmp - ow) / 2
pleft = pleft - delta_w
pright = pright - delta_w
# printf(" result_ar = %f, ow_tmp = %f, delta_w = %d, pleft = %f, pright = %f \n", result_ar, ow_tmp, delta_w, pleft, pright);
swidth = ow - pleft - pright
sheight = oh - ptop - pbot
truth, min_w_h = fill_truth_detection(
bboxes,
self.cfg.boxes,
self.cfg.classes,
flip,
pleft,
ptop,
swidth,
sheight,
self.cfg.w,
self.cfg.h,
)
if (min_w_h / 8) < blur and blur > 1: # disable blur if one of the objects is too small
blur = min_w_h / 8
ai = image_data_augmentation(
img,
self.cfg.w,
self.cfg.h,
pleft,
ptop,
swidth,
sheight,
flip,
dhue,
dsat,
dexp,
gaussian_noise,
blur,
truth,
)
if use_mixup == 0:
out_img = ai
out_bboxes = truth
if use_mixup == 1:
if i == 0:
old_img = ai.copy()
old_truth = truth.copy()
elif i == 1:
out_img = cv2.addWeighted(ai, 0.5, old_img, 0.5)
out_bboxes = np.concatenate([old_truth, truth], axis=0)
elif use_mixup == 3:
if flip:
tmp = pleft
pleft = pright
pright = tmp
left_shift = int(min(cut_x, max(0, (-int(pleft) * self.cfg.w / swidth))))
top_shift = int(min(cut_y, max(0, (-int(ptop) * self.cfg.h / sheight))))
right_shift = int(min((self.cfg.w - cut_x), max(0, (-int(pright) * self.cfg.w / swidth))))
bot_shift = int(min(self.cfg.h - cut_y, max(0, (-int(pbot) * self.cfg.h / sheight))))
out_img, out_bbox = blend_truth_mosaic(
out_img,
ai,
truth.copy(),
self.cfg.w,
self.cfg.h,
cut_x,
cut_y,
i,
left_shift,
right_shift,
top_shift,
bot_shift,
)
out_bboxes.append(out_bbox)
# print(img_path)
if use_mixup == 3:
out_bboxes = np.concatenate(out_bboxes, axis=0)
out_bboxes1 = np.zeros([self.cfg.boxes, 5])
out_bboxes1[: min(out_bboxes.shape[0], self.cfg.boxes)] = out_bboxes[
: min(out_bboxes.shape[0], self.cfg.boxes)
]
return out_img, out_bboxes1
def _get_val_item(self, index):
""""""
img_path = self.imgs[index]
bboxes_with_cls_id = np.array(self.truth.get(img_path), dtype=np.float)
img = cv2.imread(os.path.join(self.cfg.dataset_dir, img_path))
# img_height, img_width = img.shape[:2]
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# img = cv2.resize(img, (self.cfg.w, self.cfg.h))
# img = torch.from_numpy(img.transpose(2, 0, 1)).float().div(255.0).unsqueeze(0)
num_objs = len(bboxes_with_cls_id)
target = {}
# boxes to coco format
boxes = bboxes_with_cls_id[..., :4]
boxes[..., 2:] = boxes[..., 2:] - boxes[..., :2] # box width, box height
target["boxes"] = torch.as_tensor(boxes, dtype=torch.float32)
target["labels"] = torch.as_tensor(bboxes_with_cls_id[..., -1].flatten(), dtype=torch.int64)
target["image_id"] = torch.tensor([get_image_id(img_path)])
target["area"] = (target["boxes"][:, 3]) * (target["boxes"][:, 2])
target["iscrowd"] = torch.zeros((num_objs,), dtype=torch.int64)
return img, target
class Yolo_BEV_dataset(Dataset):
"""BEV pytorch dataset to load KITTI"""
def __init__(self, config: edict, split: str = "train") -> None:
"""
Args:
config (edict): Easy directory configuration file
split (str): Split to load. Can be ["train", "test", "val"]. Default = train.
"""
super(Yolo_BEV_dataset, self).__init__()
self.cfg = config
self.split = split
# read images paths
self.img_paths = []
with open(os.path.join(self.cfg.dataset_dir, f"{split}_split.txt"), "r") as f:
for line in f:
self.img_paths.append(line.strip())
# read labels
column_types = {
"ID": str,
"alpha": float,
"3D_d": float,
"3D_l": float,
"3D_w": float,
"cos": float,
"sin": float,
"type": str,
}
self.labels = pd.read_csv(
os.path.join(self.cfg.dataset_dir, f"{split}_split.csv"), dtype=column_types
)
# extra params
self.fov = 82 # KITTI fov [TODO: need to adapt to new datasets]
self.base_width = 864
self.base_height = 135
self.canvas = np.zeros(shape=(self.cfg.height, self.cfg.width, self.cfg.channels), dtype=np.float)
self.mapping = {}
with open(self.cfg.names_path, "r") as f:
for i, line in enumerate(f):
self.mapping[line] = float(i)
def __len__(self) -> int:
"""Number of elements in dataset
Returns:
int: Number of elements in dataset
"""
return len(self.img_paths)
def __getitem__(self, idx: int) -> Tuple[np.ndarray, np.ndarray]:
"""Get single item from dataset
Args:
idx (int): Sample index
Returns:
Tuple[np.ndarray, np.ndarray]: img tensor and labels. Returns
labels == [[-1,...,-1,"None"]] if no label is present for an img.
"""
############# read image
img = cv2.imread(self.img_paths[idx])
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB).astype(np.float)
img /= 255.0
# rescale image to input size and position it to canvas center
new_w = int(self.base_width * (self.fov / 360.0))
new_w = new_w if new_w % 2 == 0 else new_w - 1
new_h = int((img.shape[0] / img.shape[1]) * new_w)
new_h = new_h if new_h % 2 == 0 else new_h - 1
img = cv2.resize(img, (new_w, new_h))
# define padding borders
tb_border = (self.canvas.shape[0] - new_h) // 2 # top/bottom border
lr_border = (self.canvas.shape[1] - new_w) // 2 # left/right border
# fit image into canvas
canvas = self.canvas.copy()
canvas[tb_border : canvas.shape[0] - tb_border, lr_border : canvas.shape[1] - lr_border, :] = img
img = canvas
############## read labels
label_id = self.img_paths[idx].split("/")[-1].split(".")[0]
matches = self.labels[self.labels["ID"] == label_id]
# check if img has labels
if matches.empty:
labels = np.array([-1.0 for _ in range(7)])
else:
matches = matches.loc[:, matches.columns != "ID"]
matches = matches.replace({"cls": self.mapping})
labels = matches.to_numpy().astype(np.float)
return img, labels
def get_image_id(filename: str) -> int:
"""
Convert a string to a integer.
Make sure that the images and the `image_id`s are in one-one correspondence.
There are already `image_id`s in annotations of the COCO dataset,
in which case this function is unnecessary.
For creating one's own `get_image_id` function, one can refer to
https://github.com/google/automl/blob/master/efficientdet/dataset/create_pascal_tfrecord.py#L86
or refer to the following code (where the filenames are like 'level1_123.jpg')
>>> lv, no = os.path.splitext(os.path.basename(filename))[0].split("_")
>>> lv = lv.replace("level", "")
>>> no = f"{int(no):04d}"
>>> return int(lv+no)
"""
raise NotImplementedError("Create your own 'get_image_id' function")
lv, no = os.path.splitext(os.path.basename(filename))[0].split("_")
lv = lv.replace("level", "")
no = f"{int(no):04d}"
return int(lv + no)
if __name__ == "__main__":
from cfg import Cfg
import matplotlib.pyplot as plt
random.seed(2020)
np.random.seed(2020)
Cfg.dataset_dir = "/mnt/e/Dataset"
dataset = Yolo_dataset(Cfg.train_label, Cfg)
for i in range(100):
out_img, out_bboxes = dataset.__getitem__(i)
a = draw_box(out_img.copy(), out_bboxes.astype(np.int32))
plt.imshow(a.astype(np.int32))
plt.show() | 0.482673 | 0.378115 |
from __future__ import unicode_literals
from ckeditor.fields import RichTextField
from collections import defaultdict
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from enum import Enum, unique
from localflavor.us.models import USStateField
from local_groups.models import Group
from pages.models import AlertLevels
from phonenumber_field.modelfields import PhoneNumberField
from wagtail.wagtailadmin.edit_handlers import FieldPanel
from wagtail.wagtailcore.fields import RichTextField as WagtailRichTextField
from wagtail.wagtailsnippets.models import register_snippet
import datetime
import logging
logger = logging.getLogger(__name__)
class Nomination(models.Model):
"""
A nomination form is filled out by the group with basic information about
the group and what it will do to help the candidate.
"""
# TODO: move this into application model
group_nomination_process = models.TextField(
max_length=500,
blank=False,
null=True,
verbose_name="Briefly describe your group's nomination process"
)
STATUSES = (
('incomplete', 'Incomplete'),
('complete', 'Complete'),
)
status = models.CharField(
max_length=16,
choices=STATUSES,
default='incomplete',
blank=True
)
def __unicode__(self):
try:
return self.application.candidate_first_name + ' ' + self.application.candidate_last_name + ' - ' + ' Nomination'
except:
return 'Nomination ' + str(self.pk)
def save(self, *args, **kwargs):
super(Nomination, self).save(*args, **kwargs)
'''
Save the application to update statuses and do other conditional logic
if the nomination has an application, save that application
'''
if hasattr(self, 'application'):
self.application.save()
if self.nominationresponse_set.count() == 0:
for q in NominationQuestion.objects.all():
self.nominationresponse_set.create(question=q)
class NominationQuestion(models.Model):
text = models.TextField()
def __unicode__(self):
return self.text
class NominationResponse(models.Model):
nomination = models.ForeignKey(Nomination)
question = models.ForeignKey(NominationQuestion)
response = models.TextField(max_length=1000)
def __unicode__(self):
return unicode(self.question)
@register_snippet
@python_2_unicode_compatible # provide equivalent __unicode__ and __str__ methods on Python 2
class NominationsPlatformAlert(models.Model):
content = WagtailRichTextField()
show = models.BooleanField(
default=False,
help_text='Show alert on nominations platform pages.'
)
alert_level = models.IntegerField(
choices=[x.value for x in AlertLevels],
default=AlertLevels.warning.value[0],
blank=False,
null=False,
help_text="""
Set the alert style corresponding to Bootstrap 3 alert levels.
See: https://getbootstrap.com/docs/3.3/components/#alerts-dismissible
"""
)
panels = [
FieldPanel('content'),
FieldPanel('show'),
FieldPanel('alert_level')
]
def __str__(self):
return self.content
class Questionnaire(models.Model):
"""
A platform questionnaire is filled out by the candidate with basic information and in-depth policy positions.
"""
STATUSES = (
('incomplete', 'Incomplete'),
('complete', 'Complete'),
('sent', 'Sent to Candidate'),
)
status = models.CharField(max_length=16, choices=STATUSES, default='incomplete', blank=True)
# Candidate Information and Social Media
candidate_first_name = models.CharField(max_length=255, null=True, blank=False, verbose_name="Candidate First Name")
candidate_last_name = models.CharField(max_length=255, null=True, blank=False, verbose_name="Candidate Last Name")
candidate_bio = models.TextField(max_length=1000, blank=False, null=False, verbose_name = "Candidate Bio")
candidate_email = models.EmailField(null=True, blank=False, verbose_name="Candidate Email", max_length=255)
candidate_phone = PhoneNumberField(null=True, blank=True, verbose_name="Candidate Phone Number")
candidate_office = models.CharField(null=True, max_length=255, blank=False, verbose_name="Candidate Office")
candidate_district = models.CharField(null=True, max_length=255, blank=True, verbose_name="Candidate District")
candidate_party = models.CharField(null=True, max_length=255, blank=False, verbose_name="Candidate Party Affiliation")
candidate_held_office = models.NullBooleanField(
verbose_name="Has the candidate ever held public office?"
)
candidate_is_member = models.NullBooleanField(
verbose_name="Is candidate a member of Our Revolution?"
)
candidate_city = models.CharField(null=True, max_length=255, blank=True, verbose_name="Candidate City")
candidate_state = USStateField(max_length=2, null=True, blank=False, verbose_name="Candidate State")
general_election_date = models.DateField(verbose_name = 'General Election Date', null = True, blank = False)
primary_election_date = models.DateField(verbose_name = 'Primary Election Date', null = True, blank = True)
candidate_website_url = models.URLField(null=True, blank=True, verbose_name="Candidate Website URL", max_length=255)
candidate_volunteer_url = models.URLField(null=True, blank=True, verbose_name="Candidate Volunteer URL", max_length=255)
candidate_donate_url = models.URLField(null=True, blank=True, verbose_name="Candidate Donate URL", max_length=255)
candidate_facebook_url = models.URLField(null=True, blank=True, verbose_name="Candidate Facebook URL", max_length=255)
candidate_twitter_url = models.URLField(null=True, blank=True, verbose_name="Candidate Twitter URL", max_length=255)
candidate_instagram_url = models.URLField(null=True, blank=True, verbose_name="Candidate Instagram URL", max_length=255)
candidate_youtube_url = models.URLField(null=True, blank=True, verbose_name="Candidate YouTube URL", max_length=255)
completed_by_candidate = models.NullBooleanField(null=True, blank=True)
def __unicode__(self):
return '%s %s | %s | %s [%s]' % (
self.candidate_first_name,
self.candidate_last_name,
self.candidate_office,
self.general_election_date,
self.id,
)
"""Get response to question about issues, or None"""
def _campaign_issues(self, *args, **kwargs):
response = self.response_set.filter(
question_id=settings.NOMINATIONS_QUESTION_ISSUES_ID,
).first()
position = response.position if response else None
return position
campaign_issues = property(_campaign_issues)
def save(self, skip_application_save=False, *args, **kwargs):
super(Questionnaire, self).save(*args, **kwargs)
if self.response_set.count() == 0:
for q in Question.objects.all():
self.response_set.create(question=q)
'''
Save the application(s) attached to a questionnaire when the
questionnaire is saved.
'''
if not skip_application_save:
for app in self.application_set.all():
app.save()
class Question(models.Model):
text = models.TextField(verbose_name="Question Text")
include_multi_choice = models.BooleanField(default=True, verbose_name="Include Multiple Choice Selection")
def __unicode__(self):
return self.text
class Response(models.Model):
QUESTIONNAIRE_CHOICES = (
('a', 'Strongly Agree'),
('c', 'Somewhat Agree'),
('d', 'Somewhat Disagree'),
('b', 'Strongly Disagree'),
)
questionnaire = models.ForeignKey(Questionnaire)
question = models.ForeignKey(Question)
response = models.CharField(max_length=1, blank=False, null=False, choices=QUESTIONNAIRE_CHOICES)
position = models.TextField(max_length=1000, blank=True, null=True,verbose_name="Candidate's position on this issue:")
def __unicode__(self):
return unicode(self.question)
@unique
class ApplicationType(Enum):
basic = (1, 'Basic Support')
priority = (2, 'Priority Support')
class Application(models.Model):
"""
An application is a single submission for an endorsement. Each application
consists of a group nomination and a candidate questionnaire, and has a
many-to-one relationship with a group.
"""
# See http://www.ncsl.org/research/elections-and-campaigns/primary-types.aspx
primary_election_type_choices = (
(1, 'Closed Primary'),
(2, 'Partially Closed Primary'),
(3, 'Partially Open Primary'),
(4, 'Open to Unaffiliated Voters Primary'),
(5, 'Open Primary'),
(6, 'Top-Two Primary'),
(7, 'Presidential Primary'),
(99, 'Other'),
)
staff_recommendation_choices = (
(1, 'Recommend to Endorse'),
(2, 'Recommend Not to Endorse'),
(3, 'No Recommendation'),
)
application_type = models.IntegerField(
blank=True,
choices=[x.value for x in ApplicationType],
null=True,
)
"""Django User to use instead of legacy auth0 user"""
auth_user = models.ForeignKey(
User,
blank=True,
null=True,
)
fundraising_date_of_filing = models.DateField(
blank=True,
null=True,
verbose_name='Filing Date for Fundraising Report'
)
fundraising_date_accessed = models.DateField(
blank=True,
null=True,
verbose_name='Date fundraising information was accessed'
)
fundraising_source_url = models.URLField(
blank=True,
max_length=255,
null=True,
verbose_name='Fundraising Source URL'
)
"""Legacy field for auth0 user id"""
user_id = models.CharField(max_length=255, null=True, blank=True)
create_dt = models.DateTimeField(auto_now_add=True)
submitted_dt = models.DateTimeField(
null=True,
blank=True,
verbose_name='Submitted at'
)
nomination = models.OneToOneField(
Nomination,
on_delete=models.CASCADE,
primary_key=False,
null=True,
blank=True,
related_name='application',
verbose_name='Group Nomination Form:',
)
primary_election_type = models.IntegerField(
blank=True,
choices=primary_election_type_choices,
null=True,
)
questionnaire = models.ForeignKey(
Questionnaire,
on_delete=models.SET_NULL,
null=True,
blank=True
)
group = models.ForeignKey(Group, to_field="group_id")
rep_email = models.EmailField(
null=True,
blank=False,
verbose_name="Contact Email",
max_length=254
)
rep_first_name = models.CharField(
max_length=35,
null=True,
blank=False,
verbose_name="First Name"
)
rep_last_name = models.CharField(
max_length=35,
null=True,
blank=False,
verbose_name="Last Name"
)
rep_phone = PhoneNumberField(
null=True,
blank=True,
verbose_name="Phone Number"
)
# TODO: change to foreign key and create new object for each new candidate,
# implement autocomplete to minimize duplicate candidates
candidate_first_name = models.CharField(
max_length=255,
null=True,
blank=False,
verbose_name="Candidate First Name"
)
candidate_last_name = models.CharField(
max_length=255,
null=True,
blank=False,
verbose_name="Candidate Last Name"
)
candidate_office = models.CharField(
null=True,
max_length=255,
blank=False,
verbose_name="Candidate Office"
)
candidate_district = models.CharField(
null=True,
max_length=255,
blank=True,
verbose_name="Candidate District"
)
candidate_city = models.CharField(
null=True,
max_length=255,
blank=True,
verbose_name="Candidate City"
)
candidate_state = USStateField(max_length=2, null=True, blank=False)
authorized_email = models.EmailField(
null=True,
blank=True,
verbose_name="Authorized Email",
max_length=254
)
# TODO TECH-840 convert statuses to integer fields
STATUSES = (
(
'needs-group-form-and-questionnaire',
'Needs Group Form and Questionnaire'
),
('needs-questionnaire', 'Needs Questionnaire'),
('needs-group-form', 'Needs Group Form'),
# Deprecated as of 2019-01-08
# ('incomplete', 'Needs Submission'),
('submitted', 'Submitted'),
('needs-research', 'Needs Research'),
('needs-staff-review', 'Needs Staff Review'),
('under-review', 'Under Review'),
('approved', 'Endorsed'),
('removed', 'Not Endorsed'),
('expired', 'Expired'),
('hold', 'Hold'),
)
# Statuses that signify whether a group can still edit an application
EDITABLE_STATUSES = [
'needs-group-form-and-questionnaire',
'needs-questionnaire',
'needs-group-form',
]
status = models.CharField(
max_length=64,
choices=STATUSES,
default='needs-group-form-and-questionnaire'
)
# Volunteer Data Entry
vol_incumbent = models.NullBooleanField(
null=True,
blank=True,
verbose_name='Incumbent?'
)
vol_dem_challenger = models.NullBooleanField(
null=True,
blank=True,
verbose_name='If primary, who are the Democratic challengers?'
)
# TODO: rename to vol_other_candidates and remove old field from code
# and db after a/b deploy issues are resolved
# legacy field
vol_other_progressives = models.TextField(
null=True,
blank=True,
max_length=500,
verbose_name='Other candidates running:',
help_text='Please indicate party affiliation and other progressives. Max length 500 characters.'
)
vol_polling = models.TextField(
null=True,
blank=True,
max_length=500,
verbose_name='Polling:'
)
vol_endorsements = models.TextField(
null=True,
blank=True,
max_length=500,
verbose_name='Endorsements:'
)
vol_advantage = models.CharField(
null=True,
blank=True,
max_length=50,
verbose_name='Previous Election D% or R% Advantage:'
)
vol_turnout = models.CharField(
null=True,
blank=True,
max_length=10,
verbose_name='Previous Election Year Turnout:'
)
vol_win_number = models.IntegerField(
null=True,
blank=True,
verbose_name='Win Number:'
)
vol_fundraising = models.IntegerField(
null=True,
blank=True,
verbose_name='How much money fundraised?'
)
#legacy field
vol_opponent_fundraising = models.IntegerField(
null=True,
blank=True,
verbose_name='How much competitors have fundraised?'
)
vol_crimes = models.TextField(
null=True,
blank=True,
max_length=500,
verbose_name='Crimes or Scandals (please add links to source):'
)
vol_notes = models.TextField(
null=True,
blank=True,
max_length=1000,
verbose_name='Volunteer Notes:',
help_text='Max length 1000 characters.'
)
# Staff only research fields
CLASSIFICATIONS = (
('1', 'I'),
('2', 'II'),
('3', 'III'),
)
VET_STATUSES = (
('0', 'Pending'),
('1', 'Passed'),
('2', 'Failed'),
('3', 'Not Submitted'),
)
"""TODO: remove?"""
RECOMMENDATIONS = (
('1', 'Endorse'),
('2', 'Do Not Endorse')
)
classification_level = models.CharField(
max_length=64,
choices=CLASSIFICATIONS,
default='1'
)
staff = models.CharField(
max_length=64,
blank=True,
null=True,
)
"""TODO: remove?"""
recommendation = models.CharField(
max_length=64,
choices=RECOMMENDATIONS,
default='1'
)
staff_bio = RichTextField(
null=True,
blank=True,
verbose_name='Candidate Bio:',
help_text='This will prepopulate from the candidate questionnaire if left blank.'
)
staff_recommendation = models.IntegerField(
blank=True,
choices=staff_recommendation_choices,
null=True,
)
stand_out_information = RichTextField(
blank=True,
null=True,
)
state_of_the_race = RichTextField(
null=True,
blank=True,
verbose_name='State of the Race:',
)
local_group_info = RichTextField(
null=True,
blank=True,
verbose_name='OR Local Group Info:',
help_text='This will prepopulate from the local group\'s endorsement process if left blank.'
)
staff_notes = RichTextField(
null=True,
blank=True,
verbose_name='Staff Notes or Flags:',
help_text='This will prepopulate from volunteer notes if left blank.'
)
vet_status = models.CharField(
max_length=64,
choices=VET_STATUSES,
default='0'
)
vet = RichTextField(
null=True,
blank=True,
verbose_name='Vet Details:',
)
local_support = RichTextField(
null=True,
blank=True,
verbose_name='Local Support:',
help_text='This will prepopulate from the local group\'s support question if left blank.'
)
def __unicode__(self):
return unicode(self.group) + ' - ' + self.candidate_first_name + ' ' + self.candidate_last_name
def _candidate_name(self):
return self.candidate_first_name + ' ' + self.candidate_last_name
candidate_name = property(_candidate_name)
'''
Group candidates by party and return list
'''
def _candidates_by_party(self):
candidates = defaultdict(list)
for application_candidate in self.applicationcandidate_set.order_by(
'party',
'first_name',
'last_name'
):
candidates[application_candidate.party].append(
application_candidate
)
return candidates.items
candidates_by_party = property(_candidates_by_party)
def auto_populate_research_fields(self):
"""Auto-populate staff write-up fields from already present info"""
if self.questionnaire:
if self.questionnaire.candidate_bio and not self.staff_bio:
self.staff_bio = self.questionnaire.candidate_bio
if self.nomination:
if self.nomination.group_nomination_process and not self.local_group_info:
self.local_group_info = self.nomination.group_nomination_process
# question ID 8 is "What actions will the group take
# and how many people have agreed to volunteer/support?
question = self.nomination.nominationresponse_set.filter(
question_id=8
).first()
if question and not self.local_support:
self.local_support = question.response.encode('utf-8')
if self.vol_notes and not self.staff_notes:
self.staff_notes = self.vol_notes
def create_related_objects(self):
"""Create related nomination and questionnaire for application."""
if not self.nomination:
self.nomination = Nomination.objects.create()
if not self.questionnaire:
self.questionnaire = Questionnaire.objects.create()
def generate_application_status(self):
"""
Returns a generated status based on completion of various items.
Nomination is filled out by the group with basic information about
the group and what it will do to help the candidate.
Questionnaire is filled out by the candidate with basic information and
in-depth policy positions.
"""
if self.status in self.EDITABLE_STATUSES:
if self.nomination.status == 'incomplete':
if self.questionnaire.status == 'complete':
status = 'needs-group-form'
else:
status = 'needs-group-form-and-questionnaire'
else:
# nomination complete
if self.questionnaire.status == 'complete':
# questionnaire complete
"""
Set as submitted if nomination + questionnaire are complete
"""
status = 'submitted'
else:
# needs questionaire
status = 'needs-questionnaire'
else:
status = self.status
return status
def is_editable(self):
"""Returns whether a group can edit this application."""
if self.status in self.EDITABLE_STATUSES:
return True
else:
return False
def save(self, *args, **kwargs):
if not self.nomination or not self.questionnaire:
self.create_related_objects()
self.auto_populate_research_fields()
self.status = self.generate_application_status()
if self.status == 'submitted' and self.submitted_dt is None:
self.submitted_dt = datetime.datetime.now()
super(Application, self).save(*args, **kwargs)
class Meta:
permissions = (
(
"bulk_change_application_status",
"Can bulk change status of applications"
),
(
"export_pdf_application",
"Can export to pdf"
),
(
"admin_application",
"Can admin override application data"
),
)
verbose_name = 'Candidate Application'
class ApplicationCandidate(models.Model):
'''
Information about candidates in a race related to an application
'''
party_choices = (
(1, 'Democratic Party'),
(2, 'Green Party'),
(3, 'Independent/No Party Affiliation'),
(4, 'Republican Party'),
(5, 'Libertarian Party'),
(6, 'Vermont Progressive Party'),
(99, 'Other'),
)
application = models.ForeignKey(Application, on_delete=models.CASCADE)
description = models.CharField(
blank=True,
max_length=500,
null=True,
)
first_name = models.CharField(
blank=True,
max_length=255,
null=True,
)
fundraising = models.IntegerField(
blank=True,
null=True,
verbose_name='Cash on Hand'
)
last_name = models.CharField(
blank=True,
max_length=255,
null=True,
)
party = models.IntegerField(
blank=True,
choices=party_choices,
null=True,
)
website_url = models.URLField(
blank=True,
max_length=255,
null=True,
)
def _name(self):
if self.first_name and self.last_name:
return self.first_name + ' ' + self.last_name
elif self.first_name:
return self.first_name
elif self.last_name:
return self.last_name
else:
return None
name = property(_name)
def __unicode__(self):
return str(self.id) + (' ' + self.name if self.name else '')
class InitiativeApplication(models.Model):
"""Django User to use instead of legacy auth0 user"""
auth_user = models.ForeignKey(
User,
blank=True,
null=True,
)
"""Legacy Auth0 user id"""
user_id = models.CharField(max_length=255, null=True, blank=True)
create_dt = models.DateTimeField(auto_now_add=True)
submitted_dt = models.DateTimeField(null=True, blank=True, verbose_name = 'Submitted at')
group = models.ForeignKey(Group, to_field="group_id")
rep_email = models.EmailField(null=True, blank=False, verbose_name="Contact Email", max_length=254)
rep_first_name = models.CharField(max_length=35, null=True, blank=False, verbose_name="First Name")
rep_last_name = models.CharField(max_length=35, null=True, blank=False, verbose_name="<NAME>")
rep_phone = PhoneNumberField(null=True, blank=True, verbose_name="Phone Number")
name = models.CharField(max_length=254,null=True,blank=False, verbose_name =" Initiative Name")
election_date = models.DateField(verbose_name = 'Election Date', null = True, blank = False)
website_url = models.URLField(null=True, blank=False, verbose_name="Initiative Website URL", max_length=255)
volunteer_url = models.URLField(null=True, blank=True, verbose_name="Volunteer URL", max_length=255)
donate_url = models.URLField(null=True, blank=True, verbose_name="Donate URL", max_length=255)
city = models.CharField(max_length=254,null=True,blank=True)
county = models.CharField(max_length=254,null=True,blank=True)
state = USStateField(max_length=2, null=True, blank=False, verbose_name="State")
description = models.TextField(max_length=500, blank=False, null=True, verbose_name = "What would the initiative do?")
question = models.TextField(max_length=500, blank=True, null=True, verbose_name = "How will the question appear on the ballot?")
vote = models.NullBooleanField(null=True, blank=True, verbose_name='How to vote:')
additional_info = models.TextField(max_length=500, blank=True, null=True, verbose_name = "Any additional information you want to share?")
LOCALITIES = (
('city', 'Citywide'),
('county', 'Countywide'),
('state', 'Statewide'),
)
locality = models.CharField(max_length=16, choices=LOCALITIES, default='state', verbose_name='Is this initiative:')
STATUSES = (
('incomplete', 'Incomplete'),
('submitted', 'Submitted'),
('needs-research','Needs Research'),
('needs-staff-review', 'Needs Staff Review'),
('approved', 'Endorsed'),
('removed', 'Not Endorsed')
)
status = models.CharField(max_length=64, choices=STATUSES, default='submitted')
def __unicode__(self):
return unicode(self.group) + ' - ' + self.name
def save(self, *args, **kwargs):
if self.status == 'submitted' and self.submitted_dt is None:
self.submitted_dt = datetime.datetime.now()
super(InitiativeApplication, self).save(*args, **kwargs)
class Meta:
verbose_name = 'Ballot Initiative Application' | nominations/models.py | from __future__ import unicode_literals
from ckeditor.fields import RichTextField
from collections import defaultdict
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from enum import Enum, unique
from localflavor.us.models import USStateField
from local_groups.models import Group
from pages.models import AlertLevels
from phonenumber_field.modelfields import PhoneNumberField
from wagtail.wagtailadmin.edit_handlers import FieldPanel
from wagtail.wagtailcore.fields import RichTextField as WagtailRichTextField
from wagtail.wagtailsnippets.models import register_snippet
import datetime
import logging
logger = logging.getLogger(__name__)
class Nomination(models.Model):
"""
A nomination form is filled out by the group with basic information about
the group and what it will do to help the candidate.
"""
# TODO: move this into application model
group_nomination_process = models.TextField(
max_length=500,
blank=False,
null=True,
verbose_name="Briefly describe your group's nomination process"
)
STATUSES = (
('incomplete', 'Incomplete'),
('complete', 'Complete'),
)
status = models.CharField(
max_length=16,
choices=STATUSES,
default='incomplete',
blank=True
)
def __unicode__(self):
try:
return self.application.candidate_first_name + ' ' + self.application.candidate_last_name + ' - ' + ' Nomination'
except:
return 'Nomination ' + str(self.pk)
def save(self, *args, **kwargs):
super(Nomination, self).save(*args, **kwargs)
'''
Save the application to update statuses and do other conditional logic
if the nomination has an application, save that application
'''
if hasattr(self, 'application'):
self.application.save()
if self.nominationresponse_set.count() == 0:
for q in NominationQuestion.objects.all():
self.nominationresponse_set.create(question=q)
class NominationQuestion(models.Model):
text = models.TextField()
def __unicode__(self):
return self.text
class NominationResponse(models.Model):
nomination = models.ForeignKey(Nomination)
question = models.ForeignKey(NominationQuestion)
response = models.TextField(max_length=1000)
def __unicode__(self):
return unicode(self.question)
@register_snippet
@python_2_unicode_compatible # provide equivalent __unicode__ and __str__ methods on Python 2
class NominationsPlatformAlert(models.Model):
content = WagtailRichTextField()
show = models.BooleanField(
default=False,
help_text='Show alert on nominations platform pages.'
)
alert_level = models.IntegerField(
choices=[x.value for x in AlertLevels],
default=AlertLevels.warning.value[0],
blank=False,
null=False,
help_text="""
Set the alert style corresponding to Bootstrap 3 alert levels.
See: https://getbootstrap.com/docs/3.3/components/#alerts-dismissible
"""
)
panels = [
FieldPanel('content'),
FieldPanel('show'),
FieldPanel('alert_level')
]
def __str__(self):
return self.content
class Questionnaire(models.Model):
"""
A platform questionnaire is filled out by the candidate with basic information and in-depth policy positions.
"""
STATUSES = (
('incomplete', 'Incomplete'),
('complete', 'Complete'),
('sent', 'Sent to Candidate'),
)
status = models.CharField(max_length=16, choices=STATUSES, default='incomplete', blank=True)
# Candidate Information and Social Media
candidate_first_name = models.CharField(max_length=255, null=True, blank=False, verbose_name="Candidate First Name")
candidate_last_name = models.CharField(max_length=255, null=True, blank=False, verbose_name="Candidate Last Name")
candidate_bio = models.TextField(max_length=1000, blank=False, null=False, verbose_name = "Candidate Bio")
candidate_email = models.EmailField(null=True, blank=False, verbose_name="Candidate Email", max_length=255)
candidate_phone = PhoneNumberField(null=True, blank=True, verbose_name="Candidate Phone Number")
candidate_office = models.CharField(null=True, max_length=255, blank=False, verbose_name="Candidate Office")
candidate_district = models.CharField(null=True, max_length=255, blank=True, verbose_name="Candidate District")
candidate_party = models.CharField(null=True, max_length=255, blank=False, verbose_name="Candidate Party Affiliation")
candidate_held_office = models.NullBooleanField(
verbose_name="Has the candidate ever held public office?"
)
candidate_is_member = models.NullBooleanField(
verbose_name="Is candidate a member of Our Revolution?"
)
candidate_city = models.CharField(null=True, max_length=255, blank=True, verbose_name="Candidate City")
candidate_state = USStateField(max_length=2, null=True, blank=False, verbose_name="Candidate State")
general_election_date = models.DateField(verbose_name = 'General Election Date', null = True, blank = False)
primary_election_date = models.DateField(verbose_name = 'Primary Election Date', null = True, blank = True)
candidate_website_url = models.URLField(null=True, blank=True, verbose_name="Candidate Website URL", max_length=255)
candidate_volunteer_url = models.URLField(null=True, blank=True, verbose_name="Candidate Volunteer URL", max_length=255)
candidate_donate_url = models.URLField(null=True, blank=True, verbose_name="Candidate Donate URL", max_length=255)
candidate_facebook_url = models.URLField(null=True, blank=True, verbose_name="Candidate Facebook URL", max_length=255)
candidate_twitter_url = models.URLField(null=True, blank=True, verbose_name="Candidate Twitter URL", max_length=255)
candidate_instagram_url = models.URLField(null=True, blank=True, verbose_name="Candidate Instagram URL", max_length=255)
candidate_youtube_url = models.URLField(null=True, blank=True, verbose_name="Candidate YouTube URL", max_length=255)
completed_by_candidate = models.NullBooleanField(null=True, blank=True)
def __unicode__(self):
return '%s %s | %s | %s [%s]' % (
self.candidate_first_name,
self.candidate_last_name,
self.candidate_office,
self.general_election_date,
self.id,
)
"""Get response to question about issues, or None"""
def _campaign_issues(self, *args, **kwargs):
response = self.response_set.filter(
question_id=settings.NOMINATIONS_QUESTION_ISSUES_ID,
).first()
position = response.position if response else None
return position
campaign_issues = property(_campaign_issues)
def save(self, skip_application_save=False, *args, **kwargs):
super(Questionnaire, self).save(*args, **kwargs)
if self.response_set.count() == 0:
for q in Question.objects.all():
self.response_set.create(question=q)
'''
Save the application(s) attached to a questionnaire when the
questionnaire is saved.
'''
if not skip_application_save:
for app in self.application_set.all():
app.save()
class Question(models.Model):
text = models.TextField(verbose_name="Question Text")
include_multi_choice = models.BooleanField(default=True, verbose_name="Include Multiple Choice Selection")
def __unicode__(self):
return self.text
class Response(models.Model):
QUESTIONNAIRE_CHOICES = (
('a', 'Strongly Agree'),
('c', 'Somewhat Agree'),
('d', 'Somewhat Disagree'),
('b', 'Strongly Disagree'),
)
questionnaire = models.ForeignKey(Questionnaire)
question = models.ForeignKey(Question)
response = models.CharField(max_length=1, blank=False, null=False, choices=QUESTIONNAIRE_CHOICES)
position = models.TextField(max_length=1000, blank=True, null=True,verbose_name="Candidate's position on this issue:")
def __unicode__(self):
return unicode(self.question)
@unique
class ApplicationType(Enum):
basic = (1, 'Basic Support')
priority = (2, 'Priority Support')
class Application(models.Model):
"""
An application is a single submission for an endorsement. Each application
consists of a group nomination and a candidate questionnaire, and has a
many-to-one relationship with a group.
"""
# See http://www.ncsl.org/research/elections-and-campaigns/primary-types.aspx
primary_election_type_choices = (
(1, 'Closed Primary'),
(2, 'Partially Closed Primary'),
(3, 'Partially Open Primary'),
(4, 'Open to Unaffiliated Voters Primary'),
(5, 'Open Primary'),
(6, 'Top-Two Primary'),
(7, 'Presidential Primary'),
(99, 'Other'),
)
staff_recommendation_choices = (
(1, 'Recommend to Endorse'),
(2, 'Recommend Not to Endorse'),
(3, 'No Recommendation'),
)
application_type = models.IntegerField(
blank=True,
choices=[x.value for x in ApplicationType],
null=True,
)
"""Django User to use instead of legacy auth0 user"""
auth_user = models.ForeignKey(
User,
blank=True,
null=True,
)
fundraising_date_of_filing = models.DateField(
blank=True,
null=True,
verbose_name='Filing Date for Fundraising Report'
)
fundraising_date_accessed = models.DateField(
blank=True,
null=True,
verbose_name='Date fundraising information was accessed'
)
fundraising_source_url = models.URLField(
blank=True,
max_length=255,
null=True,
verbose_name='Fundraising Source URL'
)
"""Legacy field for auth0 user id"""
user_id = models.CharField(max_length=255, null=True, blank=True)
create_dt = models.DateTimeField(auto_now_add=True)
submitted_dt = models.DateTimeField(
null=True,
blank=True,
verbose_name='Submitted at'
)
nomination = models.OneToOneField(
Nomination,
on_delete=models.CASCADE,
primary_key=False,
null=True,
blank=True,
related_name='application',
verbose_name='Group Nomination Form:',
)
primary_election_type = models.IntegerField(
blank=True,
choices=primary_election_type_choices,
null=True,
)
questionnaire = models.ForeignKey(
Questionnaire,
on_delete=models.SET_NULL,
null=True,
blank=True
)
group = models.ForeignKey(Group, to_field="group_id")
rep_email = models.EmailField(
null=True,
blank=False,
verbose_name="Contact Email",
max_length=254
)
rep_first_name = models.CharField(
max_length=35,
null=True,
blank=False,
verbose_name="First Name"
)
rep_last_name = models.CharField(
max_length=35,
null=True,
blank=False,
verbose_name="Last Name"
)
rep_phone = PhoneNumberField(
null=True,
blank=True,
verbose_name="Phone Number"
)
# TODO: change to foreign key and create new object for each new candidate,
# implement autocomplete to minimize duplicate candidates
candidate_first_name = models.CharField(
max_length=255,
null=True,
blank=False,
verbose_name="Candidate First Name"
)
candidate_last_name = models.CharField(
max_length=255,
null=True,
blank=False,
verbose_name="Candidate Last Name"
)
candidate_office = models.CharField(
null=True,
max_length=255,
blank=False,
verbose_name="Candidate Office"
)
candidate_district = models.CharField(
null=True,
max_length=255,
blank=True,
verbose_name="Candidate District"
)
candidate_city = models.CharField(
null=True,
max_length=255,
blank=True,
verbose_name="Candidate City"
)
candidate_state = USStateField(max_length=2, null=True, blank=False)
authorized_email = models.EmailField(
null=True,
blank=True,
verbose_name="Authorized Email",
max_length=254
)
# TODO TECH-840 convert statuses to integer fields
STATUSES = (
(
'needs-group-form-and-questionnaire',
'Needs Group Form and Questionnaire'
),
('needs-questionnaire', 'Needs Questionnaire'),
('needs-group-form', 'Needs Group Form'),
# Deprecated as of 2019-01-08
# ('incomplete', 'Needs Submission'),
('submitted', 'Submitted'),
('needs-research', 'Needs Research'),
('needs-staff-review', 'Needs Staff Review'),
('under-review', 'Under Review'),
('approved', 'Endorsed'),
('removed', 'Not Endorsed'),
('expired', 'Expired'),
('hold', 'Hold'),
)
# Statuses that signify whether a group can still edit an application
EDITABLE_STATUSES = [
'needs-group-form-and-questionnaire',
'needs-questionnaire',
'needs-group-form',
]
status = models.CharField(
max_length=64,
choices=STATUSES,
default='needs-group-form-and-questionnaire'
)
# Volunteer Data Entry
vol_incumbent = models.NullBooleanField(
null=True,
blank=True,
verbose_name='Incumbent?'
)
vol_dem_challenger = models.NullBooleanField(
null=True,
blank=True,
verbose_name='If primary, who are the Democratic challengers?'
)
# TODO: rename to vol_other_candidates and remove old field from code
# and db after a/b deploy issues are resolved
# legacy field
vol_other_progressives = models.TextField(
null=True,
blank=True,
max_length=500,
verbose_name='Other candidates running:',
help_text='Please indicate party affiliation and other progressives. Max length 500 characters.'
)
vol_polling = models.TextField(
null=True,
blank=True,
max_length=500,
verbose_name='Polling:'
)
vol_endorsements = models.TextField(
null=True,
blank=True,
max_length=500,
verbose_name='Endorsements:'
)
vol_advantage = models.CharField(
null=True,
blank=True,
max_length=50,
verbose_name='Previous Election D% or R% Advantage:'
)
vol_turnout = models.CharField(
null=True,
blank=True,
max_length=10,
verbose_name='Previous Election Year Turnout:'
)
vol_win_number = models.IntegerField(
null=True,
blank=True,
verbose_name='Win Number:'
)
vol_fundraising = models.IntegerField(
null=True,
blank=True,
verbose_name='How much money fundraised?'
)
#legacy field
vol_opponent_fundraising = models.IntegerField(
null=True,
blank=True,
verbose_name='How much competitors have fundraised?'
)
vol_crimes = models.TextField(
null=True,
blank=True,
max_length=500,
verbose_name='Crimes or Scandals (please add links to source):'
)
vol_notes = models.TextField(
null=True,
blank=True,
max_length=1000,
verbose_name='Volunteer Notes:',
help_text='Max length 1000 characters.'
)
# Staff only research fields
CLASSIFICATIONS = (
('1', 'I'),
('2', 'II'),
('3', 'III'),
)
VET_STATUSES = (
('0', 'Pending'),
('1', 'Passed'),
('2', 'Failed'),
('3', 'Not Submitted'),
)
"""TODO: remove?"""
RECOMMENDATIONS = (
('1', 'Endorse'),
('2', 'Do Not Endorse')
)
classification_level = models.CharField(
max_length=64,
choices=CLASSIFICATIONS,
default='1'
)
staff = models.CharField(
max_length=64,
blank=True,
null=True,
)
"""TODO: remove?"""
recommendation = models.CharField(
max_length=64,
choices=RECOMMENDATIONS,
default='1'
)
staff_bio = RichTextField(
null=True,
blank=True,
verbose_name='Candidate Bio:',
help_text='This will prepopulate from the candidate questionnaire if left blank.'
)
staff_recommendation = models.IntegerField(
blank=True,
choices=staff_recommendation_choices,
null=True,
)
stand_out_information = RichTextField(
blank=True,
null=True,
)
state_of_the_race = RichTextField(
null=True,
blank=True,
verbose_name='State of the Race:',
)
local_group_info = RichTextField(
null=True,
blank=True,
verbose_name='OR Local Group Info:',
help_text='This will prepopulate from the local group\'s endorsement process if left blank.'
)
staff_notes = RichTextField(
null=True,
blank=True,
verbose_name='Staff Notes or Flags:',
help_text='This will prepopulate from volunteer notes if left blank.'
)
vet_status = models.CharField(
max_length=64,
choices=VET_STATUSES,
default='0'
)
vet = RichTextField(
null=True,
blank=True,
verbose_name='Vet Details:',
)
local_support = RichTextField(
null=True,
blank=True,
verbose_name='Local Support:',
help_text='This will prepopulate from the local group\'s support question if left blank.'
)
def __unicode__(self):
return unicode(self.group) + ' - ' + self.candidate_first_name + ' ' + self.candidate_last_name
def _candidate_name(self):
return self.candidate_first_name + ' ' + self.candidate_last_name
candidate_name = property(_candidate_name)
'''
Group candidates by party and return list
'''
def _candidates_by_party(self):
candidates = defaultdict(list)
for application_candidate in self.applicationcandidate_set.order_by(
'party',
'first_name',
'last_name'
):
candidates[application_candidate.party].append(
application_candidate
)
return candidates.items
candidates_by_party = property(_candidates_by_party)
def auto_populate_research_fields(self):
"""Auto-populate staff write-up fields from already present info"""
if self.questionnaire:
if self.questionnaire.candidate_bio and not self.staff_bio:
self.staff_bio = self.questionnaire.candidate_bio
if self.nomination:
if self.nomination.group_nomination_process and not self.local_group_info:
self.local_group_info = self.nomination.group_nomination_process
# question ID 8 is "What actions will the group take
# and how many people have agreed to volunteer/support?
question = self.nomination.nominationresponse_set.filter(
question_id=8
).first()
if question and not self.local_support:
self.local_support = question.response.encode('utf-8')
if self.vol_notes and not self.staff_notes:
self.staff_notes = self.vol_notes
def create_related_objects(self):
"""Create related nomination and questionnaire for application."""
if not self.nomination:
self.nomination = Nomination.objects.create()
if not self.questionnaire:
self.questionnaire = Questionnaire.objects.create()
def generate_application_status(self):
"""
Returns a generated status based on completion of various items.
Nomination is filled out by the group with basic information about
the group and what it will do to help the candidate.
Questionnaire is filled out by the candidate with basic information and
in-depth policy positions.
"""
if self.status in self.EDITABLE_STATUSES:
if self.nomination.status == 'incomplete':
if self.questionnaire.status == 'complete':
status = 'needs-group-form'
else:
status = 'needs-group-form-and-questionnaire'
else:
# nomination complete
if self.questionnaire.status == 'complete':
# questionnaire complete
"""
Set as submitted if nomination + questionnaire are complete
"""
status = 'submitted'
else:
# needs questionaire
status = 'needs-questionnaire'
else:
status = self.status
return status
def is_editable(self):
"""Returns whether a group can edit this application."""
if self.status in self.EDITABLE_STATUSES:
return True
else:
return False
def save(self, *args, **kwargs):
if not self.nomination or not self.questionnaire:
self.create_related_objects()
self.auto_populate_research_fields()
self.status = self.generate_application_status()
if self.status == 'submitted' and self.submitted_dt is None:
self.submitted_dt = datetime.datetime.now()
super(Application, self).save(*args, **kwargs)
class Meta:
permissions = (
(
"bulk_change_application_status",
"Can bulk change status of applications"
),
(
"export_pdf_application",
"Can export to pdf"
),
(
"admin_application",
"Can admin override application data"
),
)
verbose_name = 'Candidate Application'
class ApplicationCandidate(models.Model):
'''
Information about candidates in a race related to an application
'''
party_choices = (
(1, 'Democratic Party'),
(2, 'Green Party'),
(3, 'Independent/No Party Affiliation'),
(4, 'Republican Party'),
(5, 'Libertarian Party'),
(6, 'Vermont Progressive Party'),
(99, 'Other'),
)
application = models.ForeignKey(Application, on_delete=models.CASCADE)
description = models.CharField(
blank=True,
max_length=500,
null=True,
)
first_name = models.CharField(
blank=True,
max_length=255,
null=True,
)
fundraising = models.IntegerField(
blank=True,
null=True,
verbose_name='Cash on Hand'
)
last_name = models.CharField(
blank=True,
max_length=255,
null=True,
)
party = models.IntegerField(
blank=True,
choices=party_choices,
null=True,
)
website_url = models.URLField(
blank=True,
max_length=255,
null=True,
)
def _name(self):
if self.first_name and self.last_name:
return self.first_name + ' ' + self.last_name
elif self.first_name:
return self.first_name
elif self.last_name:
return self.last_name
else:
return None
name = property(_name)
def __unicode__(self):
return str(self.id) + (' ' + self.name if self.name else '')
class InitiativeApplication(models.Model):
"""Django User to use instead of legacy auth0 user"""
auth_user = models.ForeignKey(
User,
blank=True,
null=True,
)
"""Legacy Auth0 user id"""
user_id = models.CharField(max_length=255, null=True, blank=True)
create_dt = models.DateTimeField(auto_now_add=True)
submitted_dt = models.DateTimeField(null=True, blank=True, verbose_name = 'Submitted at')
group = models.ForeignKey(Group, to_field="group_id")
rep_email = models.EmailField(null=True, blank=False, verbose_name="Contact Email", max_length=254)
rep_first_name = models.CharField(max_length=35, null=True, blank=False, verbose_name="First Name")
rep_last_name = models.CharField(max_length=35, null=True, blank=False, verbose_name="<NAME>")
rep_phone = PhoneNumberField(null=True, blank=True, verbose_name="Phone Number")
name = models.CharField(max_length=254,null=True,blank=False, verbose_name =" Initiative Name")
election_date = models.DateField(verbose_name = 'Election Date', null = True, blank = False)
website_url = models.URLField(null=True, blank=False, verbose_name="Initiative Website URL", max_length=255)
volunteer_url = models.URLField(null=True, blank=True, verbose_name="Volunteer URL", max_length=255)
donate_url = models.URLField(null=True, blank=True, verbose_name="Donate URL", max_length=255)
city = models.CharField(max_length=254,null=True,blank=True)
county = models.CharField(max_length=254,null=True,blank=True)
state = USStateField(max_length=2, null=True, blank=False, verbose_name="State")
description = models.TextField(max_length=500, blank=False, null=True, verbose_name = "What would the initiative do?")
question = models.TextField(max_length=500, blank=True, null=True, verbose_name = "How will the question appear on the ballot?")
vote = models.NullBooleanField(null=True, blank=True, verbose_name='How to vote:')
additional_info = models.TextField(max_length=500, blank=True, null=True, verbose_name = "Any additional information you want to share?")
LOCALITIES = (
('city', 'Citywide'),
('county', 'Countywide'),
('state', 'Statewide'),
)
locality = models.CharField(max_length=16, choices=LOCALITIES, default='state', verbose_name='Is this initiative:')
STATUSES = (
('incomplete', 'Incomplete'),
('submitted', 'Submitted'),
('needs-research','Needs Research'),
('needs-staff-review', 'Needs Staff Review'),
('approved', 'Endorsed'),
('removed', 'Not Endorsed')
)
status = models.CharField(max_length=64, choices=STATUSES, default='submitted')
def __unicode__(self):
return unicode(self.group) + ' - ' + self.name
def save(self, *args, **kwargs):
if self.status == 'submitted' and self.submitted_dt is None:
self.submitted_dt = datetime.datetime.now()
super(InitiativeApplication, self).save(*args, **kwargs)
class Meta:
verbose_name = 'Ballot Initiative Application' | 0.452052 | 0.118589 |
from enum import Enum
class MenuCategoryEnum(Enum):
"""
Enum for categories, for item type menu <category name>: <category id>
"""
MENU_TYPE = 'Y2F0ZWdvcnk6MjQ2NQ=='
class MenuItemCategoryEnum(Enum):
"""
Enum for categories, for item type menu item <category name>: <category id>
"""
PRODUCT_CODE = 'Y2F0ZWdvcnk6MjQ2Nw=='
class PreparationEnum(Enum):
"""
Enum for preparations. <preparation name>: <preparation id>
"""
STANDALONE = 'cHJlcGFyYXRpb246MjgzMzQ='
TWO_OUNCE_RAM = 'cHJlcGFyYXRpb246MjgxMTU='
THREE_OUNCE_RAM = 'cHJlcGFyYXRpb246MjgxMTQ='
CORE_RECIPE = 'cHJlcGFyYXRpb246MzEzNjk='
class IngredientCategoryValueEnum(Enum):
"""
Enum for Ingredient CategoryValues. <categoryValue name>: <categoryValue id>
"""
FOOD_PACKAGE = 'Y2F0ZWdvcnlWYWx1ZToxNDAxNQ=='
class IngredientCategoryTagTypeEnum(Enum):
"""
Enum for category tag types at ingredient (itemType) level. <category name>: <category id>
"""
ACCOUNTING_TAG = 'Y2F0ZWdvcnk6MjQyMA=='
# ingredient only tag, separate value used for recipe bin weight
BIN_WEIGHT_TAG = 'Y2F0ZWdvcnk6MzExOA=='
class RecipeCategoryTagTypeEnum(Enum):
"""
Enum for category tag types at recipe (itemType) level. <category name>: <category id>
"""
PROTEIN_TYPE_TAG = 'Y2F0ZWdvcnk6MjUwOA=='
MEAL_TYPE_TAG = 'Y2F0ZWdvcnk6MjQyMg=='
MEAL_CONTAINER_TAG = 'Y2F0ZWdvcnk6MjU2Nw=='
PROTEIN_ADDON_TAG = 'Y2F0ZWdvcnk6MjU4MQ=='
BASE_MEAL_SLUG_TAG = 'Y2F0ZWdvcnk6MjYyMA=='
BASE_MEAL_TAG = 'Y2F0ZWdvcnk6MjU4OQ=='
HIGHLIGHT_ONE_TAG = 'Y2F0ZWdvcnk6MjU3OA=='
HIGHLIGHT_TWO_TAG = 'Y2F0ZWdvcnk6MzA0OQ=='
NO_NUTRITION_ON_WEBSITE_TAG = 'Y2F0ZWdvcnk6MzA2Ng=='
# recipe only tag, separate value used for ingredient bin weight
BIN_WEIGHT_TAG = 'Y2F0ZWdvcnk6MzExOQ=='
class RecipeMediaEnum(Enum):
"""
Enum for RecipeMedia. <field name>: <field value>
"""
MENU_CAPTION = 'menu'
PLATE_CAPTION = 'plating'
class DietaryFlagEnum(Enum):
# Enum for Dietary Flags
BEEF = 'ZGlldGFyeUZsYWc6MTM3'
CELERY = 'ZGlldGFyeUZsYWc6MTA='
COCONUT = 'ZGlldGFyeUZsYWc6OTc='
CRUSTANCEANS = 'ZGlldGFyeUZsYWc6MTQ='
EGGS = 'ZGlldGFyeUZsYWc6Mw=='
FISH = 'ZGlldGFyeUZsYWc6Nw=='
GLUTEN = 'ZGlldGFyeUZsYWc6MTE='
LAMB = 'ZGlldGFyeUZsYWc6MTM1'
LUPIN = 'ZGlldGFyeUZsYWc6MTI='
MILK = 'ZGlldGFyeUZsYWc6MQ=='
MOLLUSCS = 'ZGlldGFyeUZsYWc6MTM='
MUSTARD = 'ZGlldGFyeUZsYWc6MTU='
NON_VEGAN = 'ZGlldGFyeUZsYWc6OTk='
PEANUTS = 'ZGlldGFyeUZsYWc6NA=='
PORK = 'ZGlldGFyeUZsYWc6OTg='
SESAME_SEEDS = 'ZGlldGFyeUZsYWc6MTY='
SHELLFISH = 'ZGlldGFyeUZsYWc6OA=='
SMOKED_MEATS = 'ZGlldGFyeUZsYWc6MTM2'
SOY_BEANS = 'ZGlldGFyeUZsYWc6Ng=='
SULPHITES = 'ZGlldGFyeUZsYWc6MTc='
TREE_NUTS = 'ZGlldGFyeUZsYWc6NQ=='
WHEAT = 'ZGlldGFyeUZsYWc6Mg=='
class QuantityUnitEnum(Enum):
OZ = 'dW5pdDoz'
LB = 'dW5pdDo0' | galley/enums.py | from enum import Enum
class MenuCategoryEnum(Enum):
"""
Enum for categories, for item type menu <category name>: <category id>
"""
MENU_TYPE = 'Y2F0ZWdvcnk6MjQ2NQ=='
class MenuItemCategoryEnum(Enum):
"""
Enum for categories, for item type menu item <category name>: <category id>
"""
PRODUCT_CODE = 'Y2F0ZWdvcnk6MjQ2Nw=='
class PreparationEnum(Enum):
"""
Enum for preparations. <preparation name>: <preparation id>
"""
STANDALONE = 'cHJlcGFyYXRpb246MjgzMzQ='
TWO_OUNCE_RAM = 'cHJlcGFyYXRpb246MjgxMTU='
THREE_OUNCE_RAM = 'cHJlcGFyYXRpb246MjgxMTQ='
CORE_RECIPE = 'cHJlcGFyYXRpb246MzEzNjk='
class IngredientCategoryValueEnum(Enum):
"""
Enum for Ingredient CategoryValues. <categoryValue name>: <categoryValue id>
"""
FOOD_PACKAGE = 'Y2F0ZWdvcnlWYWx1ZToxNDAxNQ=='
class IngredientCategoryTagTypeEnum(Enum):
"""
Enum for category tag types at ingredient (itemType) level. <category name>: <category id>
"""
ACCOUNTING_TAG = 'Y2F0ZWdvcnk6MjQyMA=='
# ingredient only tag, separate value used for recipe bin weight
BIN_WEIGHT_TAG = 'Y2F0ZWdvcnk6MzExOA=='
class RecipeCategoryTagTypeEnum(Enum):
"""
Enum for category tag types at recipe (itemType) level. <category name>: <category id>
"""
PROTEIN_TYPE_TAG = 'Y2F0ZWdvcnk6MjUwOA=='
MEAL_TYPE_TAG = 'Y2F0ZWdvcnk6MjQyMg=='
MEAL_CONTAINER_TAG = 'Y2F0ZWdvcnk6MjU2Nw=='
PROTEIN_ADDON_TAG = 'Y2F0ZWdvcnk6MjU4MQ=='
BASE_MEAL_SLUG_TAG = 'Y2F0ZWdvcnk6MjYyMA=='
BASE_MEAL_TAG = 'Y2F0ZWdvcnk6MjU4OQ=='
HIGHLIGHT_ONE_TAG = 'Y2F0ZWdvcnk6MjU3OA=='
HIGHLIGHT_TWO_TAG = 'Y2F0ZWdvcnk6MzA0OQ=='
NO_NUTRITION_ON_WEBSITE_TAG = 'Y2F0ZWdvcnk6MzA2Ng=='
# recipe only tag, separate value used for ingredient bin weight
BIN_WEIGHT_TAG = 'Y2F0ZWdvcnk6MzExOQ=='
class RecipeMediaEnum(Enum):
"""
Enum for RecipeMedia. <field name>: <field value>
"""
MENU_CAPTION = 'menu'
PLATE_CAPTION = 'plating'
class DietaryFlagEnum(Enum):
# Enum for Dietary Flags
BEEF = 'ZGlldGFyeUZsYWc6MTM3'
CELERY = 'ZGlldGFyeUZsYWc6MTA='
COCONUT = 'ZGlldGFyeUZsYWc6OTc='
CRUSTANCEANS = 'ZGlldGFyeUZsYWc6MTQ='
EGGS = 'ZGlldGFyeUZsYWc6Mw=='
FISH = 'ZGlldGFyeUZsYWc6Nw=='
GLUTEN = 'ZGlldGFyeUZsYWc6MTE='
LAMB = 'ZGlldGFyeUZsYWc6MTM1'
LUPIN = 'ZGlldGFyeUZsYWc6MTI='
MILK = 'ZGlldGFyeUZsYWc6MQ=='
MOLLUSCS = 'ZGlldGFyeUZsYWc6MTM='
MUSTARD = 'ZGlldGFyeUZsYWc6MTU='
NON_VEGAN = 'ZGlldGFyeUZsYWc6OTk='
PEANUTS = 'ZGlldGFyeUZsYWc6NA=='
PORK = 'ZGlldGFyeUZsYWc6OTg='
SESAME_SEEDS = 'ZGlldGFyeUZsYWc6MTY='
SHELLFISH = 'ZGlldGFyeUZsYWc6OA=='
SMOKED_MEATS = 'ZGlldGFyeUZsYWc6MTM2'
SOY_BEANS = 'ZGlldGFyeUZsYWc6Ng=='
SULPHITES = 'ZGlldGFyeUZsYWc6MTc='
TREE_NUTS = 'ZGlldGFyeUZsYWc6NQ=='
WHEAT = 'ZGlldGFyeUZsYWc6Mg=='
class QuantityUnitEnum(Enum):
OZ = 'dW5pdDoz'
LB = 'dW5pdDo0' | 0.33928 | 0.090253 |
import os
import shutil
################### Directory Helper #############################3
def removeDirectory(path, verbose=True):
if (os.path.isdir(path)):
if (True): # input("are you sure you want to remove this directory? (Y / N): " + path) == "Y" ):
shutil.rmtree(path)
else:
if (verbose):
print("No Directory to be romved")
def makeDirectory(path, verbose=True):
try:
os.mkdir(path)
except OSError:
if (verbose):
print("Creation of the directory %s failed" % path)
else:
if (verbose):
print("Successfully created the directory %s " % path)
def resetParentDirectory(path, verbose=False):
path = '/'.join(path.rstrip("/").split("/")[:-1])
removeDirectory(path, verbose)
makeDirectory(path, verbose)
def resetDirectory(path, verbose=False):
removeDirectory(path, verbose)
makeDirectory(path, verbose)
def create_hierarchy(folder_path):
def create_hierarchy_fn(child_folders, parent_folder):
print("creating directory", parent_folder)
makeDirectory(parent_folder)
if len(child_folders) > 0:
parent_folder = parent_folder + "/" + child_folders[0]
return create_hierarchy_fn(child_folders[1:], parent_folder)
else:
return
folders = folder_path.split("/")
create_hierarchy_fn(folders[1:], folders[0])
################### Plot Helper #############################3
import matplotlib.pyplot as plt
import time
def plot_result(data, unit, legend):
print("\nLearning Performance:\n")
episodes = []
for i in range(len(data)):
episodes.append(i * unit + 1)
plt.figure(num=1)
fig, ax = plt.subplots()
plt.plot(episodes, data)
plt.title('performance')
plt.legend(legend)
plt.xlabel("Episodes")
plt.ylabel("total rewards")
plt.savefig("result" + time.strftime("%d-%m-%Y_%H:%M:%S"))
def createPlotFor(data, x_label="Default X label", y_label="Default Y label",
fileName=None, color='b.', save=False, show=True, title=None, y_scale="linear", x_scale="linear"):
"""
[(x,y,label), (x1,y1,label1) , ....]
or
{label:y, label2:y2 . . . }
"""
if isinstance(data, dict):
data = [(range(len(v)), v, str(k)) for k, v in data.items()]
import matplotlib.pyplot as plt
fileName = fileName or "default_plot_name"
for x, y, label in data:
plt.plot(x, y, label=label)
plt.xlabel("$" + x_label + "$")
plt.ylabel("$" + y_label + "$")
plt.yscale(y_scale)
plt.xscale(x_scale)
plt.title(title)
plt.legend()
if (save):
plt.savefig(fileName + x_label + "vs" + y_label + ".png")
if (show):
plt.show()
plt.gcf().clear()
def createPlotlyPlotFor(data, x_label="Default X label", y_label="Default Y label",
fileName=None, color='b.', save=False, show=True, title=None, y_scale="linear",
x_scale="linear", mode='lines'):
"""
[(x,y,label), (x1,y1,label1) , ....]
or
{label:y, label2:y2 . . . }
"""
import plotly.graph_objects as go
import plotly
if isinstance(data, dict):
data = [(list(range(len(v))), v, str(k)) for k, v in data.items()]
fileName = fileName or "default_plot_name"
fig = go.Figure()
for x, y, label in data:
fig.add_trace(go.Scatter(x=x, y=y,
mode=mode,
name=label))
# Edit the layout
fig.update_layout(title=title,
xaxis_title=x_label,
yaxis_title=y_label)
if (save):
plotly.offline.plot(fig,
filename=fileName + x_label + "vs" + y_label + '.html',
auto_open=False)
if (show):
fig.show()
print("figure shown") | utils.py | import os
import shutil
################### Directory Helper #############################3
def removeDirectory(path, verbose=True):
if (os.path.isdir(path)):
if (True): # input("are you sure you want to remove this directory? (Y / N): " + path) == "Y" ):
shutil.rmtree(path)
else:
if (verbose):
print("No Directory to be romved")
def makeDirectory(path, verbose=True):
try:
os.mkdir(path)
except OSError:
if (verbose):
print("Creation of the directory %s failed" % path)
else:
if (verbose):
print("Successfully created the directory %s " % path)
def resetParentDirectory(path, verbose=False):
path = '/'.join(path.rstrip("/").split("/")[:-1])
removeDirectory(path, verbose)
makeDirectory(path, verbose)
def resetDirectory(path, verbose=False):
removeDirectory(path, verbose)
makeDirectory(path, verbose)
def create_hierarchy(folder_path):
def create_hierarchy_fn(child_folders, parent_folder):
print("creating directory", parent_folder)
makeDirectory(parent_folder)
if len(child_folders) > 0:
parent_folder = parent_folder + "/" + child_folders[0]
return create_hierarchy_fn(child_folders[1:], parent_folder)
else:
return
folders = folder_path.split("/")
create_hierarchy_fn(folders[1:], folders[0])
################### Plot Helper #############################3
import matplotlib.pyplot as plt
import time
def plot_result(data, unit, legend):
print("\nLearning Performance:\n")
episodes = []
for i in range(len(data)):
episodes.append(i * unit + 1)
plt.figure(num=1)
fig, ax = plt.subplots()
plt.plot(episodes, data)
plt.title('performance')
plt.legend(legend)
plt.xlabel("Episodes")
plt.ylabel("total rewards")
plt.savefig("result" + time.strftime("%d-%m-%Y_%H:%M:%S"))
def createPlotFor(data, x_label="Default X label", y_label="Default Y label",
fileName=None, color='b.', save=False, show=True, title=None, y_scale="linear", x_scale="linear"):
"""
[(x,y,label), (x1,y1,label1) , ....]
or
{label:y, label2:y2 . . . }
"""
if isinstance(data, dict):
data = [(range(len(v)), v, str(k)) for k, v in data.items()]
import matplotlib.pyplot as plt
fileName = fileName or "default_plot_name"
for x, y, label in data:
plt.plot(x, y, label=label)
plt.xlabel("$" + x_label + "$")
plt.ylabel("$" + y_label + "$")
plt.yscale(y_scale)
plt.xscale(x_scale)
plt.title(title)
plt.legend()
if (save):
plt.savefig(fileName + x_label + "vs" + y_label + ".png")
if (show):
plt.show()
plt.gcf().clear()
def createPlotlyPlotFor(data, x_label="Default X label", y_label="Default Y label",
fileName=None, color='b.', save=False, show=True, title=None, y_scale="linear",
x_scale="linear", mode='lines'):
"""
[(x,y,label), (x1,y1,label1) , ....]
or
{label:y, label2:y2 . . . }
"""
import plotly.graph_objects as go
import plotly
if isinstance(data, dict):
data = [(list(range(len(v))), v, str(k)) for k, v in data.items()]
fileName = fileName or "default_plot_name"
fig = go.Figure()
for x, y, label in data:
fig.add_trace(go.Scatter(x=x, y=y,
mode=mode,
name=label))
# Edit the layout
fig.update_layout(title=title,
xaxis_title=x_label,
yaxis_title=y_label)
if (save):
plotly.offline.plot(fig,
filename=fileName + x_label + "vs" + y_label + '.html',
auto_open=False)
if (show):
fig.show()
print("figure shown") | 0.330039 | 0.328233 |
import logging
import sys
import re
import plugins
from version import __version__
from commands import command
logger = logging.getLogger(__name__)
try:
import resource
except ImportError:
logger.warning("resource is unavailable on your system")
def _initialise(bot): pass # prevents commands from being automatically added
@command.register
def help(bot, event, cmd=None, *args):
"""list supported commands, /bot help <command> will show additional details"""
help_lines = []
link_to_guide = bot.get_config_suboption(event.conv_id, 'link_to_guide')
admins_list = bot.get_config_suboption(event.conv_id, 'admins')
help_chat_id = event.user.id_.chat_id
help_conv_id = event.conv_id
commands = command.get_available_commands(bot, help_chat_id, help_conv_id)
commands_admin = commands["admin"]
commands_nonadmin = commands["user"]
if not cmd or (cmd=="impersonate" and event.user.id_.chat_id in admins_list):
if cmd == "impersonate":
if len(args) == 1:
[help_chat_id] = args
elif len(args) == 2:
[help_chat_id, help_conv_id] = args
else:
raise ValueError("impersonation: supply chat id and optional conversation id")
help_lines.append(_('<b>Impersonation:</b><br />'
'<b><pre>{}</pre></b><br />'
'<b><pre>{}</pre></b><br />').format( help_chat_id,
help_conv_id ))
if len(commands_nonadmin) > 0:
help_lines.append(_('<b>User commands:</b>'))
help_lines.append(', '.join(sorted(commands_nonadmin)))
if link_to_guide:
help_lines.append('')
help_lines.append(_('<i>For more information, please see: {}</i>').format(link_to_guide))
if len(commands_admin) > 0:
help_lines.append('')
help_lines.append(_('<b>Admin commands:</b>'))
help_lines.append(', '.join(sorted(commands_admin)))
help_lines.append("")
help_lines.append("<b>Command-specific help:</b>")
help_lines.append("/bot help <command name>")
bot_aliases = [ _alias for _alias in bot._handlers.bot_command if len(_alias) < 9 ]
if len(bot_aliases) > 1:
help_lines.append("")
help_lines.append("<b>My short-hand names:</b>")
help_lines.append(', '.join(sorted(bot_aliases)))
else:
if cmd in command.commands and (cmd in commands_admin or cmd in commands_nonadmin):
command_fn = command.commands[cmd]
elif cmd.lower() in command.commands and (cmd in commands_admin or cmd in commands_nonadmin):
command_fn = command.commands[cmd.lower()]
else:
yield from command.unknown_command(bot, event)
return
if "__doc__" in dir(command_fn) and command_fn.__doc__:
_docstring = command_fn.__doc__.strip()
else:
_docstring = "_{}_".format(_("command help not available"))
"""docstrings: apply (very) limited markdown-like formatting to command help"""
# simple bullet lists
_docstring = re.sub(r'\n +\* +', '\n* ', _docstring)
"""docstrings: handle generic whitespace
manually parse line-breaks: single break -> space; multiple breaks -> paragraph
XXX: the markdown parser is iffy on line-break processing"""
# turn standalone linebreaks into space, preserves multiple linebreaks
_docstring = re.sub(r"(?<!\n)\n(?= *[^ \t\n\r\f\v\*])", " ", _docstring)
# convert multiple consecutive spaces into single space
_docstring = re.sub(r" +", " ", _docstring)
# convert consecutive linebreaks into double linebreak (pseudo-paragraph)
_docstring = re.sub(r" *\n\n+ *(?!\*)", "\n\n", _docstring)
help_lines.append("<b>{}</b>: {}".format(command_fn.__name__, _docstring))
# replace /bot with the first alias in the command handler
# XXX: [botalias] maintained backward compatibility, please avoid using it
help_lines = [ re.sub(r"(?<!\S)\/bot(?!\S)", bot._handlers.bot_command[0], _line)
for _line in help_lines ]
yield from bot.coro_send_to_user_and_conversation(
event.user.id_.chat_id,
event.conv_id,
"<br />".join(help_lines), # via private message
_("<i>{}, I've sent you some help ;)</i>") # public message
.format(event.user.full_name))
@command.register(admin=True)
def locale(bot, event, *args):
"""set bot localisation"""
if len(args) > 0:
if bot.set_locale(args[0], reuse = (False if "reload" in args else True)):
message = _("locale set to: {}".format(args[0]))
else:
message = _("locale unchanged")
else:
message = _("language code required")
yield from bot.coro_send_message(event.conv, message)
@command.register
def ping(bot, event, *args):
"""reply to a ping"""
yield from bot.coro_send_message(event.conv, 'pong')
return { "api.response": "pong" }
@command.register
def optout(bot, event, *args):
"""toggle opt-out of bot private messages globally or on a per-conversation basis:
* /bot optout - toggles global optout on/off, or displays per-conversation optouts
* /bot optout [name|convid] - toggles per-conversation optout (overrides global settings)
* /bot optout all - clears per-conversation opt-out and forces global optout"""
chat_id = event.user.id_.chat_id
bot.initialise_memory(chat_id, "user_data")
optout = False
if bot.memory.exists(["user_data", chat_id, "optout"]):
optout = bot.memory.get_by_path(["user_data", chat_id, "optout"])
target_conv = False
if args:
search_string = ' '.join(args).strip()
if search_string == 'all':
target_conv = "all"
else:
search_results = []
if( search_string in bot.conversations.catalog
and bot.conversations.catalog[search_string]['type'] == "GROUP" ):
# directly match convid of a group conv
target_conv = search_string
else:
# search for conversation title text, must return single group
for conv_id, conv_data in bot.conversations.get("text:{0}".format(search_string)).items():
if conv_data['type'] == "GROUP":
search_results.append(conv_id)
num_of_results = len(search_results)
if num_of_results == 1:
target_conv = search_results[0]
else:
yield from bot.coro_send_message(
event.conv,
_("<i>{}, search did not match a single group conversation</i>").format(event.user.full_name))
return
type_optout = type(optout)
if type_optout is list:
if not target_conv:
if not optout:
# force global optout
optout = True
else:
# user will receive list of opted-out conversations
pass
elif target_conv.lower() == 'all':
# convert list optout to bool optout
optout = True
elif target_conv in optout:
# remove existing conversation optout
optout.remove(target_conv)
elif target_conv in bot.conversations.catalog:
# optout from a specific conversation
optout.append(target_conv)
optout = list(set(optout))
elif type_optout is bool:
if not target_conv:
# toggle global optout
optout = not optout
elif target_conv.lower() == 'all':
# force global optout
optout = True
elif target_conv in bot.conversations.catalog:
# convert bool optout to list optout
optout = [ target_conv ]
else:
raise ValueError('no conversation was matched')
else:
raise TypeError('unrecognised {} for optout, value={}'.format(type_optout, optout))
bot.memory.set_by_path(["user_data", chat_id, "optout"], optout)
bot.memory.save()
message = _('<i>{}, you <b>opted-in</b> for bot private messages</i>').format(event.user.full_name)
if isinstance(optout, bool) and optout:
message = _('<i>{}, you <b>opted-out</b> from bot private messages</i>').format(event.user.full_name)
elif isinstance(optout, list) and optout:
message = _('<i>{}, you are <b>opted-out</b> from the following conversations:\n{}</i>').format(
event.user.full_name,
"\n".join([ "* {}".format(bot.conversations.get_name(conv_id))
for conv_id in optout ]))
yield from bot.coro_send_message(event.conv, message)
@command.register
def version(bot, event, *args):
"""get the version of the bot"""
yield from bot.coro_send_message(event.conv, _("Bot Version: <b>{}</b>").format(__version__))
@command.register(admin=True)
def resourcememory(bot, event, *args):
"""print basic information about memory usage with resource library"""
if "resource" not in sys.modules:
yield from bot.coro_send_message(event.conv, "<i>resource module not available</i>")
return
# http://fa.bianp.net/blog/2013/different-ways-to-get-memory-consumption-or-lessons-learned-from-memory_profiler/
rusage_denom = 1024.
if sys.platform == 'darwin':
# ... it seems that in OSX the output is different units ...
rusage_denom = rusage_denom * rusage_denom
mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / rusage_denom
message = "memory (resource): {} MB".format(mem)
logger.info(message)
yield from bot.coro_send_message(event.conv, "<b>" + message + "</b>")
@command.register_unknown
def unknown_command(bot, event, *args):
"""handle unknown commands"""
config_silent = bot.get_config_suboption(event.conv.id_, 'silentmode')
tagged_silent = "silent" in bot.tags.useractive(event.user_id.chat_id, event.conv.id_)
if not (config_silent or tagged_silent):
yield from bot.coro_send_message( event.conv,
_('{}: Unknown Command').format(event.user.full_name) )
@command.register_blocked
def blocked_command(bot, event, *args):
"""handle blocked commands"""
config_silent = bot.get_config_suboption(event.conv.id_, 'silentmode')
tagged_silent = "silent" in bot.tags.useractive(event.user_id.chat_id, event.conv.id_)
if not (config_silent or tagged_silent):
yield from bot.coro_send_message(event.conv, _('{}: Can\'t do that.').format(
event.user.full_name)) | hangupsbot/commands/basic.py | import logging
import sys
import re
import plugins
from version import __version__
from commands import command
logger = logging.getLogger(__name__)
try:
import resource
except ImportError:
logger.warning("resource is unavailable on your system")
def _initialise(bot): pass # prevents commands from being automatically added
@command.register
def help(bot, event, cmd=None, *args):
"""list supported commands, /bot help <command> will show additional details"""
help_lines = []
link_to_guide = bot.get_config_suboption(event.conv_id, 'link_to_guide')
admins_list = bot.get_config_suboption(event.conv_id, 'admins')
help_chat_id = event.user.id_.chat_id
help_conv_id = event.conv_id
commands = command.get_available_commands(bot, help_chat_id, help_conv_id)
commands_admin = commands["admin"]
commands_nonadmin = commands["user"]
if not cmd or (cmd=="impersonate" and event.user.id_.chat_id in admins_list):
if cmd == "impersonate":
if len(args) == 1:
[help_chat_id] = args
elif len(args) == 2:
[help_chat_id, help_conv_id] = args
else:
raise ValueError("impersonation: supply chat id and optional conversation id")
help_lines.append(_('<b>Impersonation:</b><br />'
'<b><pre>{}</pre></b><br />'
'<b><pre>{}</pre></b><br />').format( help_chat_id,
help_conv_id ))
if len(commands_nonadmin) > 0:
help_lines.append(_('<b>User commands:</b>'))
help_lines.append(', '.join(sorted(commands_nonadmin)))
if link_to_guide:
help_lines.append('')
help_lines.append(_('<i>For more information, please see: {}</i>').format(link_to_guide))
if len(commands_admin) > 0:
help_lines.append('')
help_lines.append(_('<b>Admin commands:</b>'))
help_lines.append(', '.join(sorted(commands_admin)))
help_lines.append("")
help_lines.append("<b>Command-specific help:</b>")
help_lines.append("/bot help <command name>")
bot_aliases = [ _alias for _alias in bot._handlers.bot_command if len(_alias) < 9 ]
if len(bot_aliases) > 1:
help_lines.append("")
help_lines.append("<b>My short-hand names:</b>")
help_lines.append(', '.join(sorted(bot_aliases)))
else:
if cmd in command.commands and (cmd in commands_admin or cmd in commands_nonadmin):
command_fn = command.commands[cmd]
elif cmd.lower() in command.commands and (cmd in commands_admin or cmd in commands_nonadmin):
command_fn = command.commands[cmd.lower()]
else:
yield from command.unknown_command(bot, event)
return
if "__doc__" in dir(command_fn) and command_fn.__doc__:
_docstring = command_fn.__doc__.strip()
else:
_docstring = "_{}_".format(_("command help not available"))
"""docstrings: apply (very) limited markdown-like formatting to command help"""
# simple bullet lists
_docstring = re.sub(r'\n +\* +', '\n* ', _docstring)
"""docstrings: handle generic whitespace
manually parse line-breaks: single break -> space; multiple breaks -> paragraph
XXX: the markdown parser is iffy on line-break processing"""
# turn standalone linebreaks into space, preserves multiple linebreaks
_docstring = re.sub(r"(?<!\n)\n(?= *[^ \t\n\r\f\v\*])", " ", _docstring)
# convert multiple consecutive spaces into single space
_docstring = re.sub(r" +", " ", _docstring)
# convert consecutive linebreaks into double linebreak (pseudo-paragraph)
_docstring = re.sub(r" *\n\n+ *(?!\*)", "\n\n", _docstring)
help_lines.append("<b>{}</b>: {}".format(command_fn.__name__, _docstring))
# replace /bot with the first alias in the command handler
# XXX: [botalias] maintained backward compatibility, please avoid using it
help_lines = [ re.sub(r"(?<!\S)\/bot(?!\S)", bot._handlers.bot_command[0], _line)
for _line in help_lines ]
yield from bot.coro_send_to_user_and_conversation(
event.user.id_.chat_id,
event.conv_id,
"<br />".join(help_lines), # via private message
_("<i>{}, I've sent you some help ;)</i>") # public message
.format(event.user.full_name))
@command.register(admin=True)
def locale(bot, event, *args):
"""set bot localisation"""
if len(args) > 0:
if bot.set_locale(args[0], reuse = (False if "reload" in args else True)):
message = _("locale set to: {}".format(args[0]))
else:
message = _("locale unchanged")
else:
message = _("language code required")
yield from bot.coro_send_message(event.conv, message)
@command.register
def ping(bot, event, *args):
"""reply to a ping"""
yield from bot.coro_send_message(event.conv, 'pong')
return { "api.response": "pong" }
@command.register
def optout(bot, event, *args):
"""toggle opt-out of bot private messages globally or on a per-conversation basis:
* /bot optout - toggles global optout on/off, or displays per-conversation optouts
* /bot optout [name|convid] - toggles per-conversation optout (overrides global settings)
* /bot optout all - clears per-conversation opt-out and forces global optout"""
chat_id = event.user.id_.chat_id
bot.initialise_memory(chat_id, "user_data")
optout = False
if bot.memory.exists(["user_data", chat_id, "optout"]):
optout = bot.memory.get_by_path(["user_data", chat_id, "optout"])
target_conv = False
if args:
search_string = ' '.join(args).strip()
if search_string == 'all':
target_conv = "all"
else:
search_results = []
if( search_string in bot.conversations.catalog
and bot.conversations.catalog[search_string]['type'] == "GROUP" ):
# directly match convid of a group conv
target_conv = search_string
else:
# search for conversation title text, must return single group
for conv_id, conv_data in bot.conversations.get("text:{0}".format(search_string)).items():
if conv_data['type'] == "GROUP":
search_results.append(conv_id)
num_of_results = len(search_results)
if num_of_results == 1:
target_conv = search_results[0]
else:
yield from bot.coro_send_message(
event.conv,
_("<i>{}, search did not match a single group conversation</i>").format(event.user.full_name))
return
type_optout = type(optout)
if type_optout is list:
if not target_conv:
if not optout:
# force global optout
optout = True
else:
# user will receive list of opted-out conversations
pass
elif target_conv.lower() == 'all':
# convert list optout to bool optout
optout = True
elif target_conv in optout:
# remove existing conversation optout
optout.remove(target_conv)
elif target_conv in bot.conversations.catalog:
# optout from a specific conversation
optout.append(target_conv)
optout = list(set(optout))
elif type_optout is bool:
if not target_conv:
# toggle global optout
optout = not optout
elif target_conv.lower() == 'all':
# force global optout
optout = True
elif target_conv in bot.conversations.catalog:
# convert bool optout to list optout
optout = [ target_conv ]
else:
raise ValueError('no conversation was matched')
else:
raise TypeError('unrecognised {} for optout, value={}'.format(type_optout, optout))
bot.memory.set_by_path(["user_data", chat_id, "optout"], optout)
bot.memory.save()
message = _('<i>{}, you <b>opted-in</b> for bot private messages</i>').format(event.user.full_name)
if isinstance(optout, bool) and optout:
message = _('<i>{}, you <b>opted-out</b> from bot private messages</i>').format(event.user.full_name)
elif isinstance(optout, list) and optout:
message = _('<i>{}, you are <b>opted-out</b> from the following conversations:\n{}</i>').format(
event.user.full_name,
"\n".join([ "* {}".format(bot.conversations.get_name(conv_id))
for conv_id in optout ]))
yield from bot.coro_send_message(event.conv, message)
@command.register
def version(bot, event, *args):
"""get the version of the bot"""
yield from bot.coro_send_message(event.conv, _("Bot Version: <b>{}</b>").format(__version__))
@command.register(admin=True)
def resourcememory(bot, event, *args):
"""print basic information about memory usage with resource library"""
if "resource" not in sys.modules:
yield from bot.coro_send_message(event.conv, "<i>resource module not available</i>")
return
# http://fa.bianp.net/blog/2013/different-ways-to-get-memory-consumption-or-lessons-learned-from-memory_profiler/
rusage_denom = 1024.
if sys.platform == 'darwin':
# ... it seems that in OSX the output is different units ...
rusage_denom = rusage_denom * rusage_denom
mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / rusage_denom
message = "memory (resource): {} MB".format(mem)
logger.info(message)
yield from bot.coro_send_message(event.conv, "<b>" + message + "</b>")
@command.register_unknown
def unknown_command(bot, event, *args):
"""handle unknown commands"""
config_silent = bot.get_config_suboption(event.conv.id_, 'silentmode')
tagged_silent = "silent" in bot.tags.useractive(event.user_id.chat_id, event.conv.id_)
if not (config_silent or tagged_silent):
yield from bot.coro_send_message( event.conv,
_('{}: Unknown Command').format(event.user.full_name) )
@command.register_blocked
def blocked_command(bot, event, *args):
"""handle blocked commands"""
config_silent = bot.get_config_suboption(event.conv.id_, 'silentmode')
tagged_silent = "silent" in bot.tags.useractive(event.user_id.chat_id, event.conv.id_)
if not (config_silent or tagged_silent):
yield from bot.coro_send_message(event.conv, _('{}: Can\'t do that.').format(
event.user.full_name)) | 0.236781 | 0.092115 |
import geopandas as gpd
import pandas as pd
import glob
import os
from rasterstats import zonal_stats
rsterDir=r'C:\Students\Hanan\Thesis_work\Data\Processing\Lebanon\LB_Planet\LB_Rasters'
mydir=r'C:\Students\Hanan\Thesis_work\Data\Processing\Lebanon\LB_Planet\LB_Rasters'
shpDir=r'C:\Students\Hanan\Thesis_work\Data\Processing\Lebanon\LB_Planet\shp'
procDir=r'C:\Students\Hanan\Thesis_work\Data\Processing\Idaho\ID_Planet\GridStats'
os.chdir(shpDir)
for shp in glob.glob('*.shp'):
print shp
shpNmeList=shp.split('.')
shpNme=shpNmeList[0]
rasterList=[]
gdf=gpd.read_file(shp)
gdf['FID']=None
counterShp=0
while counterShp<len(gdf):
gdf.loc[counterShp,'FID']=shpNme+'_'+str(counterShp)
counterShp+=1
os.chdir(rsterDir)
## for rast in glob.glob('*'):
## rasterList.append(rast)
statOpsList=['mean','sum']
for statVar in statOpsList:
print statVar
cols=[]
cols.append('FID')
for rster in rasterList:
rster=rster.split('.')
rster=rster[0]
cols.append(rster)
df=pd.DataFrame(columns=cols)
counter=0
while counter<len(gdf):
df.loc[counter,'FID']=gdf.loc[counter,'FID']
counter+=1
for rster in rasterList:
os.chdir(rsterDir)
stats=zonal_stats(gdf,rster,stats=statVar)
rster=rster.split('.')
rster=rster[0]
print rster
counter1=0
while counter1<len(stats):
stat=stats[counter1]
fld=df.loc[counter1,'FID']
counter2=0
while counter2<len(df):
if df.loc[counter2,'FID']==fld:
df.loc[counter2,rster]=stat[statVar]
counter2+=1
counter1+=1
os.chdir(mydir)
df.to_csv(shpNme+'_'+statVar+'.csv')
if statVar=='mean':
for col in cols:
if col!='FID':
df[col]=df[col].astype(float)
gdfFinal=gdf.join(df.set_index('FID'),on='FID')
print list(gdfFinal)
gdfFinal.to_file(shpNme+'_stats.shp',driver='ESRI Shapefile')
os.chdir(shpDir) | Hanan_Stats.py | import geopandas as gpd
import pandas as pd
import glob
import os
from rasterstats import zonal_stats
rsterDir=r'C:\Students\Hanan\Thesis_work\Data\Processing\Lebanon\LB_Planet\LB_Rasters'
mydir=r'C:\Students\Hanan\Thesis_work\Data\Processing\Lebanon\LB_Planet\LB_Rasters'
shpDir=r'C:\Students\Hanan\Thesis_work\Data\Processing\Lebanon\LB_Planet\shp'
procDir=r'C:\Students\Hanan\Thesis_work\Data\Processing\Idaho\ID_Planet\GridStats'
os.chdir(shpDir)
for shp in glob.glob('*.shp'):
print shp
shpNmeList=shp.split('.')
shpNme=shpNmeList[0]
rasterList=[]
gdf=gpd.read_file(shp)
gdf['FID']=None
counterShp=0
while counterShp<len(gdf):
gdf.loc[counterShp,'FID']=shpNme+'_'+str(counterShp)
counterShp+=1
os.chdir(rsterDir)
## for rast in glob.glob('*'):
## rasterList.append(rast)
statOpsList=['mean','sum']
for statVar in statOpsList:
print statVar
cols=[]
cols.append('FID')
for rster in rasterList:
rster=rster.split('.')
rster=rster[0]
cols.append(rster)
df=pd.DataFrame(columns=cols)
counter=0
while counter<len(gdf):
df.loc[counter,'FID']=gdf.loc[counter,'FID']
counter+=1
for rster in rasterList:
os.chdir(rsterDir)
stats=zonal_stats(gdf,rster,stats=statVar)
rster=rster.split('.')
rster=rster[0]
print rster
counter1=0
while counter1<len(stats):
stat=stats[counter1]
fld=df.loc[counter1,'FID']
counter2=0
while counter2<len(df):
if df.loc[counter2,'FID']==fld:
df.loc[counter2,rster]=stat[statVar]
counter2+=1
counter1+=1
os.chdir(mydir)
df.to_csv(shpNme+'_'+statVar+'.csv')
if statVar=='mean':
for col in cols:
if col!='FID':
df[col]=df[col].astype(float)
gdfFinal=gdf.join(df.set_index('FID'),on='FID')
print list(gdfFinal)
gdfFinal.to_file(shpNme+'_stats.shp',driver='ESRI Shapefile')
os.chdir(shpDir) | 0.052122 | 0.138345 |
import torch
class Beam(object):
"""Class for managing the internals of the beam search process.
Takes care of beams, back pointers, and scores.
Args:
size (int): Number of beams to use.
pad (int): Magic integer in output vocab.
bos (int): Magic integer in output vocab.
eos (int): Magic integer in output vocab.
n_best (int): Don't stop until at least this many beams have
reached EOS.
cuda (bool): use gpu
global_scorer (onmt.translate.GNMTGlobalScorer): Scorer instance.
min_length (int): Shortest acceptable generation, not counting
begin-of-sentence or end-of-sentence.
"""
def __init__(self, size, bos, eos,
n_best=1, cuda=False,
min_length=0,
stepwise_penalty=False,
block_ngram_repeat=0,
exclusion_tokens=set()):
self.size = size
self.tt = torch
# The score for each translation on the beam.
self.scores = self.tt.FloatTensor(size).zero_()
self.all_scores = []
self.all_probs = [] # all_probs = sequence * beam * vocab
# The backpointers at each time-step.
self.prev_ks = []
# The outputs at each time-step.
self.next_ys = [self.tt.LongTensor(size).fill_(bos)]
# Has EOS topped the beam yet.
self._eos = eos
# Time and k pair for finished.
self.finished = []
# Minimum prediction length
self.min_length = min_length
@property
def current_predictions(self):
return self.next_ys[-1]
@property
def current_origin(self):
"""Get the backpointers for the current timestep."""
return self.prev_ks[-1]
def advance(self, word_probs, attn_out):
"""
Given prob over words for every last beam `wordLk` and attention
`attn_out`: Compute and update the beam search.
Args:
word_probs (FloatTensor): probs of advancing from the last step
``(K, words)`` beam_width * words
attn_out (FloatTensor): attention at the last step
Returns:
bool: True if beam search is complete.
"""
word_probs_temp = word_probs.clone()
num_words = word_probs.size(1)
# force the output to be longer than self.min_length
cur_len = len(self.next_ys)
if cur_len <= self.min_length:
# assumes there are len(word_probs) predictions OTHER
# than EOS that are greater than -1e20
for k in range(len(word_probs)):
word_probs[k][self._eos] = -1e20
# Sum the previous scores.
if len(self.prev_ks) > 0:
beam_scores = word_probs + self.scores.unsqueeze(1) # beam_width * words
# Don't let EOS have children.
for i in range(self.size):
if self.next_ys[-1][i] == self._eos:
beam_scores[i] = -1e20
else:
beam_scores = word_probs[0]
flat_beam_scores = beam_scores.view(-1)
best_scores, best_scores_id = flat_beam_scores.topk(self.size, 0, True, True)
self.all_probs.append(word_probs_temp)
self.all_scores.append(self.scores)
self.scores = best_scores
# best_scores_id is flattened beam x word array, so calculate which
# word and beam each score came from
prev_k = best_scores_id / num_words
self.prev_ks.append(prev_k)
self.next_ys.append((best_scores_id - prev_k * num_words))
for i in range(self.size):
if self.next_ys[-1][i] == self._eos:
length = len(self.next_ys) - 1
score = self.scores[i] / length
self.finished.append((score, length, i))
# End condition is when top-of-beam is EOS and no global score.
if self.next_ys[-1][0] == self._eos:
self.all_scores.append(self.scores)
def sort_finished(self):
if len(self.finished) == 0:
for i in range(self.n_best):
length = len(self.next_ys) - 1
score = self.scores[i] / length
self.finished.append((score, length, i))
self.finished = sorted(self.finished, key=lambda finished: finished[0], reverse=True)
scores = [sc for sc, _, _ in self.finished]
ks = [(t, k) for _, t, k in self.finished]
return scores, ks
def get_hyp(self, timestep, k):
"""Walk back to construct the full hypothesis."""
hyp, key_index, probability = [], [], []
for j in range(len(self.prev_ks[:timestep]) - 1, -1, -1):
hyp.append(self.next_ys[j + 1][k])
key_index.append(k)
probability.append(self.all_probs[j][k])
k = self.prev_ks[j][k]
return hyp[::-1], key_index[::-1], probability[::-1]
def fill_empty_sequence(self, stack, max_length):
for i in range(stack.size(0), max_length):
stack = torch.cat([stack, stack[0].unsqueeze(0)])
return stack | models/listen_attend_and_spell/beam_search.py | import torch
class Beam(object):
"""Class for managing the internals of the beam search process.
Takes care of beams, back pointers, and scores.
Args:
size (int): Number of beams to use.
pad (int): Magic integer in output vocab.
bos (int): Magic integer in output vocab.
eos (int): Magic integer in output vocab.
n_best (int): Don't stop until at least this many beams have
reached EOS.
cuda (bool): use gpu
global_scorer (onmt.translate.GNMTGlobalScorer): Scorer instance.
min_length (int): Shortest acceptable generation, not counting
begin-of-sentence or end-of-sentence.
"""
def __init__(self, size, bos, eos,
n_best=1, cuda=False,
min_length=0,
stepwise_penalty=False,
block_ngram_repeat=0,
exclusion_tokens=set()):
self.size = size
self.tt = torch
# The score for each translation on the beam.
self.scores = self.tt.FloatTensor(size).zero_()
self.all_scores = []
self.all_probs = [] # all_probs = sequence * beam * vocab
# The backpointers at each time-step.
self.prev_ks = []
# The outputs at each time-step.
self.next_ys = [self.tt.LongTensor(size).fill_(bos)]
# Has EOS topped the beam yet.
self._eos = eos
# Time and k pair for finished.
self.finished = []
# Minimum prediction length
self.min_length = min_length
@property
def current_predictions(self):
return self.next_ys[-1]
@property
def current_origin(self):
"""Get the backpointers for the current timestep."""
return self.prev_ks[-1]
def advance(self, word_probs, attn_out):
"""
Given prob over words for every last beam `wordLk` and attention
`attn_out`: Compute and update the beam search.
Args:
word_probs (FloatTensor): probs of advancing from the last step
``(K, words)`` beam_width * words
attn_out (FloatTensor): attention at the last step
Returns:
bool: True if beam search is complete.
"""
word_probs_temp = word_probs.clone()
num_words = word_probs.size(1)
# force the output to be longer than self.min_length
cur_len = len(self.next_ys)
if cur_len <= self.min_length:
# assumes there are len(word_probs) predictions OTHER
# than EOS that are greater than -1e20
for k in range(len(word_probs)):
word_probs[k][self._eos] = -1e20
# Sum the previous scores.
if len(self.prev_ks) > 0:
beam_scores = word_probs + self.scores.unsqueeze(1) # beam_width * words
# Don't let EOS have children.
for i in range(self.size):
if self.next_ys[-1][i] == self._eos:
beam_scores[i] = -1e20
else:
beam_scores = word_probs[0]
flat_beam_scores = beam_scores.view(-1)
best_scores, best_scores_id = flat_beam_scores.topk(self.size, 0, True, True)
self.all_probs.append(word_probs_temp)
self.all_scores.append(self.scores)
self.scores = best_scores
# best_scores_id is flattened beam x word array, so calculate which
# word and beam each score came from
prev_k = best_scores_id / num_words
self.prev_ks.append(prev_k)
self.next_ys.append((best_scores_id - prev_k * num_words))
for i in range(self.size):
if self.next_ys[-1][i] == self._eos:
length = len(self.next_ys) - 1
score = self.scores[i] / length
self.finished.append((score, length, i))
# End condition is when top-of-beam is EOS and no global score.
if self.next_ys[-1][0] == self._eos:
self.all_scores.append(self.scores)
def sort_finished(self):
if len(self.finished) == 0:
for i in range(self.n_best):
length = len(self.next_ys) - 1
score = self.scores[i] / length
self.finished.append((score, length, i))
self.finished = sorted(self.finished, key=lambda finished: finished[0], reverse=True)
scores = [sc for sc, _, _ in self.finished]
ks = [(t, k) for _, t, k in self.finished]
return scores, ks
def get_hyp(self, timestep, k):
"""Walk back to construct the full hypothesis."""
hyp, key_index, probability = [], [], []
for j in range(len(self.prev_ks[:timestep]) - 1, -1, -1):
hyp.append(self.next_ys[j + 1][k])
key_index.append(k)
probability.append(self.all_probs[j][k])
k = self.prev_ks[j][k]
return hyp[::-1], key_index[::-1], probability[::-1]
def fill_empty_sequence(self, stack, max_length):
for i in range(stack.size(0), max_length):
stack = torch.cat([stack, stack[0].unsqueeze(0)])
return stack | 0.905766 | 0.398231 |
import os
import sys
from os.path import abspath, dirname, expanduser, getsize, isfile, splitext
from typing import List, TypedDict
from zipfile import ZipFile
ROOT = dirname(dirname(abspath(__file__)))
class ProcessResponse(TypedDict):
download_filename: str
filesize: int
output_filesize: int
output_filenumber: int
output_extensions: List[str]
timer: str
status: str
def sizeof_fmt(size: float, prec: int = 1) -> str:
"""Convert file size to human readable format (https://stackoverflow.com/a/1094933).
Args:
size (int): File size in bytes.
prec (int): Floating point precision in returned string. Defaults to 1.
Returns:
str: File size in human-readable format.
"""
for unit in ["B", "KB", "MB", "GB", "TB", "PB"]:
if abs(size) < 1024:
break
size /= 1024
return f"{size:3.{prec}f} {unit}"
def load_dotenv(filepath: str = f"{ROOT}/.env") -> None:
"""Parse environment variables in .env into os.environ.
Args:
filepath (str, optional): Path to .env file. Defaults to './.env'.
"""
if not isfile(filepath) or getsize(filepath) == 0:
return
with open(filepath) as dotenv:
for line in dotenv:
if line.startswith("#"):
continue
key, val = line.replace("\n", "").split("=")
os.environ[key] = val
def del_or_keep_compressed(
pdfs: List[str], downloaded_file: str, inplace: bool, suffix: str
) -> None:
"""Check whether compressed PDFs are smaller than original. If so, relocate each
compressed file to same directory as the original either with suffix appended to
file name or overwriting the original if inplace=True.
Args:
pdfs (list[str]): File paths to PDFs uploaded to iLovePDF.
downloaded_file (str): Path to file downloaded from iLovePDF servers. Will be
PDF or ZIP depending on if single or multiple files were uploaded.
inplace (bool): Whether to overwrite original PDFs with compressed ones if
smaller.
suffix (str): String to insert after filename and before extension of compressed
PDFs. Used only if inplace=False.
"""
if len(pdfs) == 1:
compressed_files = [downloaded_file]
else:
archive = ZipFile(downloaded_file)
compressed_files = sorted(archive.namelist())
archive.extractall()
trash_path = f"{expanduser('~')}/.Trash"
for idx, (orig_path, compr_path) in enumerate(zip(pdfs, compressed_files), 1):
orig_size = getsize(orig_path)
compressed_size = getsize(compr_path)
diff = orig_size - compressed_size
if diff > 0:
print(
f"{idx}/{len(pdfs)} Compressed PDF '{orig_path}' is "
f"{sizeof_fmt(diff)} ({diff / orig_size:.1%}) smaller than original "
f"file ({sizeof_fmt(compressed_size)} vs {sizeof_fmt(orig_size)})."
)
if inplace:
# move original PDF to trash on macOS (for later retrieval if necessary)
# simply let os.rename() overwrite existing PDF on other platforms
if sys.platform == "darwin":
print("Using compressed file. Old file moved to trash.\n")
orig_file_name = os.path.split(orig_path)[1]
os.rename(orig_path, f"{trash_path}/{orig_file_name}")
else:
print("Using compressed file.\n")
os.rename(compr_path, orig_path)
elif suffix:
base_name, ext = splitext(orig_path)
new_path = f"{base_name}{suffix}{ext}"
if isfile(new_path):
counter = 2
while isfile(f"{base_name}{suffix}-{counter}{ext}"):
counter += 1
new_path = f"{base_name}{suffix}-{counter}{ext}"
os.rename(compr_path, new_path)
else:
print(
f"{idx}/{len(pdfs)} Compressed '{orig_path}' no smaller than "
"original file. Keeping original."
)
os.remove(compr_path)
# check needed since if single PDF was processed, the file will have been moved
if isfile(downloaded_file):
os.remove(downloaded_file) | pdf_compressor/utils.py | import os
import sys
from os.path import abspath, dirname, expanduser, getsize, isfile, splitext
from typing import List, TypedDict
from zipfile import ZipFile
ROOT = dirname(dirname(abspath(__file__)))
class ProcessResponse(TypedDict):
download_filename: str
filesize: int
output_filesize: int
output_filenumber: int
output_extensions: List[str]
timer: str
status: str
def sizeof_fmt(size: float, prec: int = 1) -> str:
"""Convert file size to human readable format (https://stackoverflow.com/a/1094933).
Args:
size (int): File size in bytes.
prec (int): Floating point precision in returned string. Defaults to 1.
Returns:
str: File size in human-readable format.
"""
for unit in ["B", "KB", "MB", "GB", "TB", "PB"]:
if abs(size) < 1024:
break
size /= 1024
return f"{size:3.{prec}f} {unit}"
def load_dotenv(filepath: str = f"{ROOT}/.env") -> None:
"""Parse environment variables in .env into os.environ.
Args:
filepath (str, optional): Path to .env file. Defaults to './.env'.
"""
if not isfile(filepath) or getsize(filepath) == 0:
return
with open(filepath) as dotenv:
for line in dotenv:
if line.startswith("#"):
continue
key, val = line.replace("\n", "").split("=")
os.environ[key] = val
def del_or_keep_compressed(
pdfs: List[str], downloaded_file: str, inplace: bool, suffix: str
) -> None:
"""Check whether compressed PDFs are smaller than original. If so, relocate each
compressed file to same directory as the original either with suffix appended to
file name or overwriting the original if inplace=True.
Args:
pdfs (list[str]): File paths to PDFs uploaded to iLovePDF.
downloaded_file (str): Path to file downloaded from iLovePDF servers. Will be
PDF or ZIP depending on if single or multiple files were uploaded.
inplace (bool): Whether to overwrite original PDFs with compressed ones if
smaller.
suffix (str): String to insert after filename and before extension of compressed
PDFs. Used only if inplace=False.
"""
if len(pdfs) == 1:
compressed_files = [downloaded_file]
else:
archive = ZipFile(downloaded_file)
compressed_files = sorted(archive.namelist())
archive.extractall()
trash_path = f"{expanduser('~')}/.Trash"
for idx, (orig_path, compr_path) in enumerate(zip(pdfs, compressed_files), 1):
orig_size = getsize(orig_path)
compressed_size = getsize(compr_path)
diff = orig_size - compressed_size
if diff > 0:
print(
f"{idx}/{len(pdfs)} Compressed PDF '{orig_path}' is "
f"{sizeof_fmt(diff)} ({diff / orig_size:.1%}) smaller than original "
f"file ({sizeof_fmt(compressed_size)} vs {sizeof_fmt(orig_size)})."
)
if inplace:
# move original PDF to trash on macOS (for later retrieval if necessary)
# simply let os.rename() overwrite existing PDF on other platforms
if sys.platform == "darwin":
print("Using compressed file. Old file moved to trash.\n")
orig_file_name = os.path.split(orig_path)[1]
os.rename(orig_path, f"{trash_path}/{orig_file_name}")
else:
print("Using compressed file.\n")
os.rename(compr_path, orig_path)
elif suffix:
base_name, ext = splitext(orig_path)
new_path = f"{base_name}{suffix}{ext}"
if isfile(new_path):
counter = 2
while isfile(f"{base_name}{suffix}-{counter}{ext}"):
counter += 1
new_path = f"{base_name}{suffix}-{counter}{ext}"
os.rename(compr_path, new_path)
else:
print(
f"{idx}/{len(pdfs)} Compressed '{orig_path}' no smaller than "
"original file. Keeping original."
)
os.remove(compr_path)
# check needed since if single PDF was processed, the file will have been moved
if isfile(downloaded_file):
os.remove(downloaded_file) | 0.612889 | 0.329783 |
from datetime import datetime, timedelta
from random import randrange
from uuid import uuid4
from flask import Blueprint, jsonify, request, url_for
from flask_login import current_user, login_required
from ..helper import admin_required_decorator as admin_required
from ..helper.youtube import build_youtube_api
from ..models import Callback, Channel
from ..tasks import channels_renew
api_channel_blueprint = Blueprint("api_channel", __name__)
@api_channel_blueprint.route("/search")
@login_required
def search():
query = request.args.get("query")
response = (
build_youtube_api()
.search()
.list(part="snippet", maxResults=30, q=query, type="channel")
.execute()
)
results = response
results = [
{
"title": item["snippet"]["title"],
"id": item["snippet"]["channelId"],
"thumbnail": item["snippet"]["thumbnails"]["high"]["url"],
}
for item in response["items"]
]
return jsonify(results)
@api_channel_blueprint.route("/renew-all")
@login_required
def renew_all():
"""Renew Subscription Info, Both Hub and Info"""
execution = int(request.args.to_dict().get("execution", 0))
interval = 60 * 60 * 24 * 4
if execution == 0:
task = channels_renew.apply_async(
args=[[channel.id for channel in Channel.query.all()]]
)
response = {
"id": task.id,
"status": url_for("api_task.status", task_id=task.id),
}
else:
response = {}
for channel in Channel.query.all():
expiration = channel.expiration
if expiration is None:
# Expiration is not available yet (Channel just init)
# Set ETA to four days later
countdown = 60 * 60 * 24 * 4
elif expiration > datetime.now() + timedelta(days=1):
# Expiration is more than one day
# Set ETA to one day before expiration
countdown = expiration - timedelta(days=1) - datetime.now()
countdown = countdown.total_seconds()
else:
# Expiration is less than one day
# Set ETA to now
countdown = 0
if execution == -2 and countdown > 0:
countdown = randrange(int(countdown))
task = channels_renew.apply_async(
args=[[channel.id], interval],
countdown=countdown,
task_id=f"renew_{channel.id}_{str(uuid4())[:8]}",
)
response[channel.id] = task.id
return jsonify(response)
@api_channel_blueprint.route("/callbacks")
@login_required
@admin_required
def callbacks_all():
days = int(request.args.to_dict().get("days", 3))
days_ago = (datetime.now() - timedelta(days=days)).strftime("%Y-%m-%d")
callbacks = Callback.query.filter(Callback.timestamp >= days_ago).order_by(
Callback.timestamp.desc()
)
response = list(map(dict, callbacks))
return jsonify(response)
@api_channel_blueprint.route("/<channel_id>/status")
@login_required
def status(channel_id):
"""From Hub fetch Status"""
channel = Channel.query.get_or_404(channel_id)
response = channel.refresh()
return jsonify(response)
@api_channel_blueprint.route("/<channel_id>/subscribe")
@login_required
def subscribe(channel_id):
"""Subscribe to a Channel"""
return jsonify(current_user.subscribe_to(channel_id))
@api_channel_blueprint.route("/<channel_id>/unsubscribe")
@login_required
def unsubscribe(channel_id):
"""Unsubscribe to a Channel"""
return jsonify(current_user.unbsubscribe(channel_id))
@api_channel_blueprint.route("/<channel_id>/fetch-videos")
@login_required
@admin_required
def fetch_videos(channel_id):
# TODO: deprecate this
channel = Channel.query.get_or_404(channel_id)
response = channel.fetch_videos()
return jsonify(response)
@api_channel_blueprint.route("/<channel_id>/callbacks")
@login_required
@admin_required
def callbacks(channel_id):
channel = Channel.query.get_or_404(channel_id)
callbacks = channel.callbacks.limit(50)
response = list(map(dict, callbacks))
return jsonify(response) | tubee/routes/api_channel.py | from datetime import datetime, timedelta
from random import randrange
from uuid import uuid4
from flask import Blueprint, jsonify, request, url_for
from flask_login import current_user, login_required
from ..helper import admin_required_decorator as admin_required
from ..helper.youtube import build_youtube_api
from ..models import Callback, Channel
from ..tasks import channels_renew
api_channel_blueprint = Blueprint("api_channel", __name__)
@api_channel_blueprint.route("/search")
@login_required
def search():
query = request.args.get("query")
response = (
build_youtube_api()
.search()
.list(part="snippet", maxResults=30, q=query, type="channel")
.execute()
)
results = response
results = [
{
"title": item["snippet"]["title"],
"id": item["snippet"]["channelId"],
"thumbnail": item["snippet"]["thumbnails"]["high"]["url"],
}
for item in response["items"]
]
return jsonify(results)
@api_channel_blueprint.route("/renew-all")
@login_required
def renew_all():
"""Renew Subscription Info, Both Hub and Info"""
execution = int(request.args.to_dict().get("execution", 0))
interval = 60 * 60 * 24 * 4
if execution == 0:
task = channels_renew.apply_async(
args=[[channel.id for channel in Channel.query.all()]]
)
response = {
"id": task.id,
"status": url_for("api_task.status", task_id=task.id),
}
else:
response = {}
for channel in Channel.query.all():
expiration = channel.expiration
if expiration is None:
# Expiration is not available yet (Channel just init)
# Set ETA to four days later
countdown = 60 * 60 * 24 * 4
elif expiration > datetime.now() + timedelta(days=1):
# Expiration is more than one day
# Set ETA to one day before expiration
countdown = expiration - timedelta(days=1) - datetime.now()
countdown = countdown.total_seconds()
else:
# Expiration is less than one day
# Set ETA to now
countdown = 0
if execution == -2 and countdown > 0:
countdown = randrange(int(countdown))
task = channels_renew.apply_async(
args=[[channel.id], interval],
countdown=countdown,
task_id=f"renew_{channel.id}_{str(uuid4())[:8]}",
)
response[channel.id] = task.id
return jsonify(response)
@api_channel_blueprint.route("/callbacks")
@login_required
@admin_required
def callbacks_all():
days = int(request.args.to_dict().get("days", 3))
days_ago = (datetime.now() - timedelta(days=days)).strftime("%Y-%m-%d")
callbacks = Callback.query.filter(Callback.timestamp >= days_ago).order_by(
Callback.timestamp.desc()
)
response = list(map(dict, callbacks))
return jsonify(response)
@api_channel_blueprint.route("/<channel_id>/status")
@login_required
def status(channel_id):
"""From Hub fetch Status"""
channel = Channel.query.get_or_404(channel_id)
response = channel.refresh()
return jsonify(response)
@api_channel_blueprint.route("/<channel_id>/subscribe")
@login_required
def subscribe(channel_id):
"""Subscribe to a Channel"""
return jsonify(current_user.subscribe_to(channel_id))
@api_channel_blueprint.route("/<channel_id>/unsubscribe")
@login_required
def unsubscribe(channel_id):
"""Unsubscribe to a Channel"""
return jsonify(current_user.unbsubscribe(channel_id))
@api_channel_blueprint.route("/<channel_id>/fetch-videos")
@login_required
@admin_required
def fetch_videos(channel_id):
# TODO: deprecate this
channel = Channel.query.get_or_404(channel_id)
response = channel.fetch_videos()
return jsonify(response)
@api_channel_blueprint.route("/<channel_id>/callbacks")
@login_required
@admin_required
def callbacks(channel_id):
channel = Channel.query.get_or_404(channel_id)
callbacks = channel.callbacks.limit(50)
response = list(map(dict, callbacks))
return jsonify(response) | 0.387227 | 0.061593 |
from .DFNDataReleases import get_relevant_info
from .DFNDataReleases import dir_path as REPO_DIR
from collections import defaultdict
import os
import json
def get_naf_paths(project,language,verbose=0):
"""
Get a dictionary with event type as key and a set of NAF paths as value.
:param project: the project under which the NAF files are generated.
:param language: the language of the reference texts.
:type project: string
:type language: string
"""
relevant_info = get_relevant_info(repo_dir=REPO_DIR,
project=project,
load_jsons=True)
event_type_collection = defaultdict(set)
incidents = relevant_info['proj2inc'][project]
for incident in incidents:
event_type = relevant_info['inc2type'][incident]
doc_list = relevant_info['inc2lang2doc'][incident][language]
for doc in doc_list:
path = os.path.join(relevant_info["unstructured"], language, f"{doc}.naf")
assert os.path.exists(path), f"{path} does not exist on disk"
event_type_collection[event_type].add(path)
if verbose >= 2:
for event_type, collection in event_type_collection.items():
print(f'{event_type}: {len(collection)} reference texts')
return event_type_collection
def get_lang2doc2dct_info(project,language, verbose):
"""
get the path to the json with historical distance in days per document to the event
"""
relevant_info = get_relevant_info(repo_dir=REPO_DIR,
project=project,
load_jsons=True)
path = os.path.join(relevant_info['project_statistics'], 'lang2doc2dct_info.json')
assert os.path.exists(path), f"{path} does not exist on disk"
with open(path, "r") as infile:
historical_distance_dict = json.load(infile)
if verbose >= 1:
print(f"lang2doc2dct_info contains historical distance for {len(historical_distance_dict[language])} documents")
return historical_distance_dict
def analysis_paths(time_bucket_config, absolute_path, experiment, balanced_classes, verbose):
"""take the keys of time bucket dictionary and return paths to experiment folder"""
keys = list(time_bucket_config.keys())
folder_basename = '---'.join(sorted(keys))
if balanced_classes:
balanced = "balanced"
else:
balanced = "unbalanced"
folder_basename = f"{folder_basename}+{balanced}"
paths_dict = {}
folder = f"{absolute_path}/{folder_basename}"
paths_dict['time bucket folder'] = folder
json_path = f"{absolute_path}/{folder_basename}/sampled_corpus.json"
paths_dict['sampled corpus'] = json_path
train_path = f"{absolute_path}/{folder_basename}/titles_train.pkl"
paths_dict['train path'] = train_path
dev_path = f"{absolute_path}/{folder_basename}/titles_dev.pkl"
paths_dict['dev path'] = dev_path
test_path = f"{absolute_path}/{folder_basename}/titles_test.pkl"
paths_dict['test path'] = test_path
dev_report = f"{absolute_path}/{folder_basename}/{experiment}/dev_report.txt"
paths_dict['dev report'] = dev_report
test_report = f"{absolute_path}/{folder_basename}/{experiment}/test_report.txt"
paths_dict['test report'] = test_report
unknown_path = f"{absolute_path}/{folder_basename}/unknown_distance.json"
paths_dict['unknown distance'] = unknown_path
model_path = f"{absolute_path}/{folder_basename}/{experiment}/model.pkl"
paths_dict['model'] = model_path
experiment_folder = f"{absolute_path}/{folder_basename}/{experiment}"
paths_dict["experiment folder"] = experiment_folder
error_analysis_path = f"{experiment_folder}/error_analysis.xlsx"
paths_dict["error analysis path"] = error_analysis_path
if verbose >= 1:
print(folder)
return paths_dict | path_utils.py | from .DFNDataReleases import get_relevant_info
from .DFNDataReleases import dir_path as REPO_DIR
from collections import defaultdict
import os
import json
def get_naf_paths(project,language,verbose=0):
"""
Get a dictionary with event type as key and a set of NAF paths as value.
:param project: the project under which the NAF files are generated.
:param language: the language of the reference texts.
:type project: string
:type language: string
"""
relevant_info = get_relevant_info(repo_dir=REPO_DIR,
project=project,
load_jsons=True)
event_type_collection = defaultdict(set)
incidents = relevant_info['proj2inc'][project]
for incident in incidents:
event_type = relevant_info['inc2type'][incident]
doc_list = relevant_info['inc2lang2doc'][incident][language]
for doc in doc_list:
path = os.path.join(relevant_info["unstructured"], language, f"{doc}.naf")
assert os.path.exists(path), f"{path} does not exist on disk"
event_type_collection[event_type].add(path)
if verbose >= 2:
for event_type, collection in event_type_collection.items():
print(f'{event_type}: {len(collection)} reference texts')
return event_type_collection
def get_lang2doc2dct_info(project,language, verbose):
"""
get the path to the json with historical distance in days per document to the event
"""
relevant_info = get_relevant_info(repo_dir=REPO_DIR,
project=project,
load_jsons=True)
path = os.path.join(relevant_info['project_statistics'], 'lang2doc2dct_info.json')
assert os.path.exists(path), f"{path} does not exist on disk"
with open(path, "r") as infile:
historical_distance_dict = json.load(infile)
if verbose >= 1:
print(f"lang2doc2dct_info contains historical distance for {len(historical_distance_dict[language])} documents")
return historical_distance_dict
def analysis_paths(time_bucket_config, absolute_path, experiment, balanced_classes, verbose):
"""take the keys of time bucket dictionary and return paths to experiment folder"""
keys = list(time_bucket_config.keys())
folder_basename = '---'.join(sorted(keys))
if balanced_classes:
balanced = "balanced"
else:
balanced = "unbalanced"
folder_basename = f"{folder_basename}+{balanced}"
paths_dict = {}
folder = f"{absolute_path}/{folder_basename}"
paths_dict['time bucket folder'] = folder
json_path = f"{absolute_path}/{folder_basename}/sampled_corpus.json"
paths_dict['sampled corpus'] = json_path
train_path = f"{absolute_path}/{folder_basename}/titles_train.pkl"
paths_dict['train path'] = train_path
dev_path = f"{absolute_path}/{folder_basename}/titles_dev.pkl"
paths_dict['dev path'] = dev_path
test_path = f"{absolute_path}/{folder_basename}/titles_test.pkl"
paths_dict['test path'] = test_path
dev_report = f"{absolute_path}/{folder_basename}/{experiment}/dev_report.txt"
paths_dict['dev report'] = dev_report
test_report = f"{absolute_path}/{folder_basename}/{experiment}/test_report.txt"
paths_dict['test report'] = test_report
unknown_path = f"{absolute_path}/{folder_basename}/unknown_distance.json"
paths_dict['unknown distance'] = unknown_path
model_path = f"{absolute_path}/{folder_basename}/{experiment}/model.pkl"
paths_dict['model'] = model_path
experiment_folder = f"{absolute_path}/{folder_basename}/{experiment}"
paths_dict["experiment folder"] = experiment_folder
error_analysis_path = f"{experiment_folder}/error_analysis.xlsx"
paths_dict["error analysis path"] = error_analysis_path
if verbose >= 1:
print(folder)
return paths_dict | 0.500732 | 0.327695 |
import sqlite3
import glob
class Run:
def __init__(self):
self.n = 0
self.hints = 0
self.solution_grid = None
self.input_grid = None
self.output_grid = None
self.time_millis = 0
def __str__(self):
return "{n = %d, hints = %d, solution = %s, input = %s, output = %s, time = %d}" % \
(self.n, self.hints, self.solution_grid, self.input_grid, self.output_grid, self.time_millis)
def insert(self, cursor):
cursor.execute("insert into runs (n, hints, solution_grid, input_grid, output_grid, time_millis) values (%d, %d, %s, %s, %s, %d)" %\
(self.n, self.hints, self.solution_grid, self.input_grid, self.output_grid, self.time_millis))
def split_list(lst):
ret = []
current = []
for l in lst:
if l == '':
if current != []:
ret.append(current)
current = []
else:
current.append(l)
ret.append(current)
return ret
def load_run(fname):
try:
_, n, nhints, number = fname.split('-')
number, _ = number.split('.')
fd = open(fname)
ret_run = Run()
lines = [x.strip() for x in fd.readlines()]
lines = split_list(lines)
ret_run.n = int(n)
ret_run.hints = int(nhints)
if lines[0][0] == 'Timeout':
ret_run.solution_grid = 'NULL'
ret_run.input_grid = 'NULL'
ret_run.output_grid = 'NULL'
ret_run.time_millis = 600000
else:
ret_run.solution_grid = "'" + ''.join(lines[0]) + "'"
ret_run.input_grid = "'" + ''.join(lines[1]) + "'"
ret_run.output_grid = "'" + ''.join(lines[2]) + "'"
ret_run.time_millis = int(lines[3][0][6:])
return ret_run
except IOError as e:
print(e)
return None
connection = sqlite3.connect('database.db')
cursor = connection.cursor()
filenames = glob.glob('run*')
for fname in filenames:
run = load_run(fname)
print (run)
run.insert(cursor)
connection.commit()
connection.close() | runs/database.py | import sqlite3
import glob
class Run:
def __init__(self):
self.n = 0
self.hints = 0
self.solution_grid = None
self.input_grid = None
self.output_grid = None
self.time_millis = 0
def __str__(self):
return "{n = %d, hints = %d, solution = %s, input = %s, output = %s, time = %d}" % \
(self.n, self.hints, self.solution_grid, self.input_grid, self.output_grid, self.time_millis)
def insert(self, cursor):
cursor.execute("insert into runs (n, hints, solution_grid, input_grid, output_grid, time_millis) values (%d, %d, %s, %s, %s, %d)" %\
(self.n, self.hints, self.solution_grid, self.input_grid, self.output_grid, self.time_millis))
def split_list(lst):
ret = []
current = []
for l in lst:
if l == '':
if current != []:
ret.append(current)
current = []
else:
current.append(l)
ret.append(current)
return ret
def load_run(fname):
try:
_, n, nhints, number = fname.split('-')
number, _ = number.split('.')
fd = open(fname)
ret_run = Run()
lines = [x.strip() for x in fd.readlines()]
lines = split_list(lines)
ret_run.n = int(n)
ret_run.hints = int(nhints)
if lines[0][0] == 'Timeout':
ret_run.solution_grid = 'NULL'
ret_run.input_grid = 'NULL'
ret_run.output_grid = 'NULL'
ret_run.time_millis = 600000
else:
ret_run.solution_grid = "'" + ''.join(lines[0]) + "'"
ret_run.input_grid = "'" + ''.join(lines[1]) + "'"
ret_run.output_grid = "'" + ''.join(lines[2]) + "'"
ret_run.time_millis = int(lines[3][0][6:])
return ret_run
except IOError as e:
print(e)
return None
connection = sqlite3.connect('database.db')
cursor = connection.cursor()
filenames = glob.glob('run*')
for fname in filenames:
run = load_run(fname)
print (run)
run.insert(cursor)
connection.commit()
connection.close() | 0.238639 | 0.165796 |
import argparse
import matplotlib as mpl
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.colors import ListedColormap
import matplotlib.gridspec as gridspec
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.cbook
import scipy
from scipy import cluster
from scipy.spatial import distance
from scipy.cluster import hierarchy
from GraphFangLibrary import collect_sum_two_nucleotides
import GraphTableLibrary
import GlobalVariables
# Reduce each id/tissue x location to get a corresponding tissue/id list of associations
def collect_index_by_association(dataframe,yItem,xItem):
# check re-methFreq process
# Separate by strand
PlusMeth = dataframe.loc[(dataframe['Cytosine'] == 'C') & (dataframe['methLoc'] >= (GlobalVariables.plotLineLocationThree-GlobalVariables.methylationflank)) & (dataframe['methLoc'] <= (GlobalVariables.plotLineLocationFour+GlobalVariables.methylationflank))]
MinusMeth = dataframe.loc[(dataframe['Cytosine'] == 'G') & (dataframe['methLoc'] >= (GlobalVariables.plotLineLocationThree-GlobalVariables.methylationflank)) & (dataframe['methLoc'] <= (GlobalVariables.plotLineLocationFour+GlobalVariables.methylationflank))]
# Subset just columns to use
PlussubMeth = PlusMeth[['methLoc',yItem,xItem]]
MinussubMeth = MinusMeth[['methLoc',yItem,xItem]]
PlussubMeth['methFreq'] = PlussubMeth.groupby(['methLoc',yItem,xItem])['methLoc'].transform('count')
MinussubMeth['methFreq'] = MinussubMeth.groupby(['methLoc',yItem,xItem])['methLoc'].transform('count')
# Grouping a collection of a values in the xItem column
PlusgroupMeth = PlussubMeth.join(PlussubMeth.groupby(['methLoc',yItem])[xItem].unique(),on=['methLoc',yItem],rsuffix='_r')
MinusgroupMeth = MinussubMeth.join(MinussubMeth.groupby(['methLoc',yItem])[xItem].unique(),on=['methLoc',yItem],rsuffix='_r')
# Just the new xItem list, and location
PlusxMeth = PlusgroupMeth[['methLoc',yItem,'{0}_r'.format(xItem)]].drop_duplicates(['methLoc',yItem],keep='last')
MinusxMeth = MinusgroupMeth[['methLoc',yItem,'{0}_r'.format(xItem)]].drop_duplicates(['methLoc',yItem],keep='last')
print 'Collected list of {0}s associated with Location and {1}'.format(xItem,yItem)
return PlusxMeth,MinusxMeth
# Get Tissue x Id
def collect_tissue_by_id_dataframe(dataframe,yItem,zItem):
# Separate by strand
PlusMeth = dataframe.loc[(dataframe['Cytosine'] == 'C') & (dataframe['methLoc'] >= (GlobalVariables.plotLineLocationThree-GlobalVariables.methylationflank)) & (dataframe['methLoc'] <= (GlobalVariables.plotLineLocationFour+GlobalVariables.methylationflank))]
MinusMeth = dataframe.loc[(dataframe['Cytosine'] == 'G') & (dataframe['methLoc'] >= (GlobalVariables.plotLineLocationThree-GlobalVariables.methylationflank)) & (dataframe['methLoc'] <= (GlobalVariables.plotLineLocationFour+GlobalVariables.methylationflank))]
# Subset just columns to use
PlussubMeth = PlusMeth[[yItem,zItem]]
MinussubMeth = MinusMeth[[yItem,zItem]]
PlussubMeth['methFreq'] = PlussubMeth.groupby([yItem,zItem])[yItem].transform('count')
MinussubMeth['methFreq'] = MinussubMeth.groupby([yItem,zItem])[yItem].transform('count')
# Sort ascending, in order to only use the highest value with keep = last
PlussortMeth = PlussubMeth.sort_values(['methFreq'],ascending=True)
MinussortMeth = MinussubMeth.sort_values(['methFreq'],ascending=True)
PlusdupMeth = PlussortMeth.drop_duplicates(['methFreq',yItem,zItem],keep='last')
MinusdupMeth = MinussortMeth.drop_duplicates(['methFreq',yItem,zItem],keep='last')
# Pivot the data frame so that each tissue/cell type is a column
PluspivotMeth = pd.pivot_table(PlusdupMeth,index=[yItem],columns=[zItem],values='methFreq',fill_value=0)
MinuspivotMeth = pd.pivot_table(MinusdupMeth,index=[yItem],columns=[zItem],values='methFreq',fill_value=0)
PluspivotMeth.columns.name = None
MinuspivotMeth.columns.name = None
# Remove the index column name
PluspivotMeth.index.name = None
MinuspivotMeth.index.name = None
PlusfloatMeth = PluspivotMeth[PluspivotMeth.columns].astype(float)
MinusfloatMeth = MinuspivotMeth[MinuspivotMeth.columns].astype(float)
print 'Collected {0} by {1} into data frame for Frequency'.format(yItem,zItem)
return PlusfloatMeth,MinusfloatMeth
# Transform the Frequency, Percentage and Coverage data into graphable data frames, returning just the info for the element
def collect_methylation_by_index(dataframe,yItem):
# x item is methLoc, y item is either tissue or id, z item is coverage, percentage, or frequency
new_index = range(0,num)
# Separate by strand
PlusMeth = dataframe.loc[dataframe['Cytosine'] == 'C']
MinusMeth = dataframe.loc[dataframe['Cytosine'] == 'G']
# Subset just columns to use
PlussubMeth = PlusMeth[['methLoc',yItem]]
MinussubMeth = MinusMeth[['methLoc',yItem]]
PlussubMeth['methFreq'] = PlussubMeth.groupby(['methLoc',yItem])['methLoc'].transform('count')
MinussubMeth['methFreq'] = MinussubMeth.groupby(['methLoc',yItem])['methLoc'].transform('count')
# Sort ascending, in order to only use the highest value with keep = last
PlussortMeth = PlussubMeth.sort_values(['methLoc'],ascending=True)
MinussortMeth = MinussubMeth.sort_values(['methLoc'],ascending=True)
PlusdupMeth = PlussortMeth.drop_duplicates(['methLoc',yItem,'methFreq'],keep='last')
MinusdupMeth = MinussortMeth.drop_duplicates(['methLoc',yItem,'methFreq'],keep='last')
# Pivot the data frame so that each tissue/cell type is a column
PluspivotMeth = pd.pivot_table(PlusdupMeth,index='methLoc',columns=[yItem],values='methFreq',fill_value=0)
MinuspivotMeth = pd.pivot_table(MinusdupMeth,index='methLoc',columns=[yItem],values='methFreq',fill_value=0)
PluspivotMeth.columns.name = None
MinuspivotMeth.columns.name = None
# Give new index, using the methLocations
PlusindexMeth = PluspivotMeth.reindex(new_index,fill_value=0)
MinusindexMeth = MinuspivotMeth.reindex(new_index,fill_value=0)
# Remove the index column name
PlusindexMeth.index.name = None
MinusindexMeth.index.name = None
# Get just the element
Pluselement = PlusindexMeth[(GlobalVariables.plotLineLocationThree-GlobalVariables.methylationflank):(GlobalVariables.plotLineLocationFour+GlobalVariables.methylationflank)]
Minuselement = MinusindexMeth[(GlobalVariables.plotLineLocationThree-GlobalVariables.methylationflank):(GlobalVariables.plotLineLocationFour+GlobalVariables.methylationflank)]
# Transpose the data frame for easy input into the heatamp
PlustransMeth = Pluselement.T
MinustransMeth = Minuselement.T
PlustransMeth = PlustransMeth[PlustransMeth.columns].astype(float)
MinustransMeth = MinustransMeth[MinustransMeth.columns].astype(float)
print 'Converted {0} by Frequency into data frame'.format(yItem)
return PlustransMeth,MinustransMeth
# Make dictionary for row and column colors based on standard deviation
def make_dictionary_for_colors(ATelement,huslPalette):
ATQcutPosition = pd.qcut(ATelement.std(axis=1),q=8,labels=False)
ATQcutElement = pd.qcut(ATelement.std(),q=8,labels=False)
lutElement = dict(zip(ATQcutElement.unique(), huslPalette))
elementColors = ATQcutElement.map(lutElement)
lutPosition = dict(zip(ATQcutPosition.unique(), huslPalette))
positionColors = ATQcutPosition.map(lutPosition)
print 'Made dictionary for standard deviation'
return elementColors,positionColors
# Make some graphs for fangs
def graph_cluster(dfWindow,ranWindow,pdMeth,rnMeth,names,fileName):
plt.figure(figsize=(7,7))
# Get group, mean and standard deviation for AT
ATgroup,ATmean,ATstd = collect_sum_two_nucleotides(dfWindow,names,'A','T')
ranATgroup,ranATmean,ranATstd = collect_sum_two_nucleotides(ranWindow,names,'A','T')
ATelement = ATgroup.T[(GlobalVariables.plotLineLocationThree-GlobalVariables.methylationflank):(GlobalVariables.plotLineLocationFour+GlobalVariables.methylationflank)]
ranATelement = ranATgroup.T[(GlobalVariables.plotLineLocationThree-GlobalVariables.methylationflank):(GlobalVariables.plotLineLocationFour+GlobalVariables.methylationflank)]
print 'Extracted just element and methylation flank, size {0}'.format(len(ATelement))
# Title info
info = str(fileName) + ', '+ str(len(ATgroup.index)) + ' - ' "UCES"
# Plot settings
sns.set_style('ticks')
plt.suptitle(info,fontsize=10)
pp = PdfPages('Cluster_{0}.pdf'.format(fileName))
sns.set_palette("husl",n_colors=8)#(len(nucLine)*2)
# Use the row_colors to color those with similar SD?
huslPalette = sns.husl_palette(8, s=.45)
elementColors,positionColors = make_dictionary_for_colors(ATelement,huslPalette)
heatmap0 = sns.clustermap(ATelement.T,cmap='RdPu',vmin=0,vmax=100,xticklabels=50,col_cluster=False,row_colors=elementColors,col_colors=positionColors)
plt.setp(heatmap0.ax_heatmap.tick_params(labelsize=8))
plt.setp(heatmap0.ax_heatmap.set_yticks([]))
plt.setp(heatmap0.ax_heatmap.yaxis.tick_right())
plt.setp(heatmap0.ax_heatmap.set_ylabel('{0} UCEs'.format(len(ATelement.T.index)),size=8))
plt.setp(heatmap0.ax_heatmap.set_xlabel('Position',size=10))
plt.setp(heatmap0.ax_heatmap.tick_params(labelsize=10))
plt.setp(heatmap0.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationOneFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap0.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationTwoFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap0.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationThreeFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap0.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationFourFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap0.ax_heatmap.set_title('Mean AT Content per Element',size=12))
# ATOrdered = heatmap0.dendrogram_row.reordered_ind
sns.despine()
pp.savefig()
# Use the row_colors to color those with similar SD?
ranelementColors,ranpositionColors = make_dictionary_for_colors(ranATelement,huslPalette)
heatmap1 = sns.clustermap(ranATelement.T,cmap='RdPu',vmin=0,vmax=100,xticklabels=50,col_cluster=False,row_colors=ranelementColors,col_colors=ranpositionColors)
plt.setp(heatmap1.ax_heatmap.tick_params(labelsize=8))
plt.setp(heatmap1.ax_heatmap.set_yticks([]))
plt.setp(heatmap1.ax_heatmap.yaxis.tick_right())
plt.setp(heatmap1.ax_heatmap.set_ylabel('{0} UCEs'.format(len(ranATelement.T.index)),size=8))
plt.setp(heatmap1.ax_heatmap.set_xlabel('Position',size=10))
plt.setp(heatmap1.ax_heatmap.tick_params(labelsize=10))
plt.setp(heatmap1.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationOneFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap1.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationTwoFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap1.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationThreeFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap1.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationFourFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap1.ax_heatmap.set_title('Mean AT Content per Random Region',size=12))
# ranATOrdered = heatmap1.dendrogram_row.reordered_ind
sns.despine()
pp.savefig()
print 'Plotted cluster plot for mean AT content for all elements and random regions'
# Various combinations to plot on heatmaps, just for element plus methylation flanks
# Frequency x Tissue x ID X Location
FreqPlusID,FreqMinusID = collect_methylation_by_index(pdMeth,'id')
FreqPlusTis,FreqMinusTis = collect_methylation_by_index(pdMeth,'tissue')
XPlus,XMinus = collect_tissue_by_id_dataframe(pdMeth,'id','tissue')
ranFreqPlusID,ranFreqMinusID = collect_methylation_by_index(rnMeth,'id')
ranFreqPlusTis,ranFreqMinusTis = collect_methylation_by_index(rnMeth,'tissue')
ranXPlus,ranXMinus = collect_tissue_by_id_dataframe(rnMeth,'id','tissue')
# Remove UCEs with out methylation within the element - only for ID group
FreqPlusID = FreqPlusID[(FreqPlusID.T != 0).any()]
FreqMinusID = FreqMinusID[(FreqMinusID.T != 0).any()]
ranFreqPlusID = ranFreqPlusID[(ranFreqPlusID.T != 0).any()]
ranFreqMinusID = ranFreqMinusID[(ranFreqMinusID.T != 0).any()]
# Make heatmap for # methylation on pos strand (Frequency)
heatmap2 = sns.clustermap(FreqPlusTis,cmap='RdPu',xticklabels=50,col_cluster=False)
ylabels2 = heatmap2.ax_heatmap.get_yticklabels()
plt.setp(heatmap2.ax_heatmap.set_yticklabels(ylabels2,rotation=0))
plt.setp(heatmap2.ax_heatmap.yaxis.tick_right())
plt.setp(heatmap2.ax_heatmap.set_ylabel('Sample',size=10))
plt.setp(heatmap2.ax_heatmap.set_xlabel('Position',size=10))
plt.setp(heatmap2.ax_heatmap.tick_params(labelsize=10))
plt.setp(heatmap2.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationOneFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap2.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationTwoFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap2.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationThreeFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap2.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationFourFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap2.ax_heatmap.set_title('Methylation Frequency on Plus Strand for Elements',size=12))
sns.despine()
pp.savefig()
# Make heatmap for # methylation on pos strand (Frequency)
heatmap3 = sns.clustermap(FreqMinusTis,cmap='RdPu',xticklabels=50,col_cluster=False)
ylabels3 = heatmap3.ax_heatmap.get_yticklabels()
plt.setp(heatmap3.ax_heatmap.set_yticklabels(ylabels3,rotation=0))
plt.setp(heatmap3.ax_heatmap.yaxis.tick_right())
plt.setp(heatmap3.ax_heatmap.set_ylabel('Sample',size=10))
plt.setp(heatmap3.ax_heatmap.set_xlabel('Position',size=10))
plt.setp(heatmap3.ax_heatmap.tick_params(labelsize=10))
plt.setp(heatmap3.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationOneFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap3.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationTwoFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap3.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationThreeFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap3.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationFourFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap3.ax_heatmap.set_title('Methylation Frequency on Minus Strand for Elements',size=12))
sns.despine()
pp.savefig()
print 'Plotted methylation frequency for tissue types x position, for element'
# Make heatmap for # methylation on pos strand (Frequency)
heatmap4 = sns.clustermap(ranFreqPlusTis,cmap='RdPu',xticklabels=50,col_cluster=False)
ylabels4 = heatmap4.ax_heatmap.get_yticklabels()
plt.setp(heatmap4.ax_heatmap.set_yticklabels(ylabels4,rotation=0))
plt.setp(heatmap4.ax_heatmap.yaxis.tick_right())
plt.setp(heatmap4.ax_heatmap.set_ylabel('Sample',size=10))
plt.setp(heatmap4.ax_heatmap.set_xlabel('Position',size=10))
plt.setp(heatmap4.ax_heatmap.tick_params(labelsize=10))
plt.setp(heatmap4.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationOneFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap4.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationTwoFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap4.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationThreeFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap4.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationFourFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap4.ax_heatmap.set_title('Methylation Frequency on Plus Strand for Random Regions',size=12))
sns.despine()
pp.savefig()
# Make heatmap for # methylation on pos strand (Frequency)
heatmap5 = sns.clustermap(ranFreqMinusTis,cmap='RdPu',xticklabels=50,col_cluster=False)
ylabels5 = heatmap5.ax_heatmap.get_yticklabels()
plt.setp(heatmap5.ax_heatmap.set_yticklabels(ylabels5,rotation=0))
plt.setp(heatmap5.ax_heatmap.yaxis.tick_right())
plt.setp(heatmap5.ax_heatmap.set_ylabel('Sample',size=10))
plt.setp(heatmap5.ax_heatmap.set_xlabel('Position',size=10))
plt.setp(heatmap5.ax_heatmap.tick_params(labelsize=10))
plt.setp(heatmap5.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationOneFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap5.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationTwoFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap5.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationThreeFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap5.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationFourFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap5.ax_heatmap.set_title('Methylation Frequency on Minus Strand for Random Regions',size=12))
sns.despine()
pp.savefig()
print 'Plotted methylation frequency for tissue types x position, for random regions'
# Make heatmap for # methylation on pos strand (Frequency)
heatmap6 = sns.clustermap(FreqPlusID,cmap='RdPu',xticklabels=50,col_cluster=False)
ylabels6 = heatmap6.ax_heatmap.get_yticklabels()
plt.setp(heatmap6.ax_heatmap.set_yticklabels(ylabels6,rotation=0))
plt.setp(heatmap6.ax_heatmap.yaxis.tick_right())
plt.setp(heatmap6.ax_heatmap.set_ylabel('{0} Elements'.format(len(FreqPlusID.index)),size=10))
plt.setp(heatmap6.ax_heatmap.set_xlabel('Position',size=10))
plt.setp(heatmap6.ax_heatmap.tick_params(labelsize=10))
plt.setp(heatmap6.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationOneFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap6.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationTwoFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap6.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationThreeFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap6.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationFourFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap6.ax_heatmap.set_title('Methylation Frequency on Plus Strand for Elements',size=12))
sns.despine()
pp.savefig()
# Make heatmap for # methylation on neg strand (Frequency)
heatmap7 = sns.clustermap(FreqMinusID,cmap='RdPu',xticklabels=50,col_cluster=False)
ylabels7 = heatmap7.ax_heatmap.get_yticklabels()
plt.setp(heatmap7.ax_heatmap.set_yticklabels(ylabels7,rotation=0))
plt.setp(heatmap7.ax_heatmap.yaxis.tick_right())
plt.setp(heatmap7.ax_heatmap.set_ylabel('{0} Elements'.format(len(FreqMinusID.index)),size=10))
plt.setp(heatmap7.ax_heatmap.set_xlabel('Position',size=10))
plt.setp(heatmap7.ax_heatmap.tick_params(labelsize=10))
plt.setp(heatmap7.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationOneFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap7.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationTwoFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap7.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationThreeFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap7.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationFourFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap7.ax_heatmap.set_title('Methylation Frequency on Minus Strand for Elements',size=12))
sns.despine()
pp.savefig()
print 'Plotted methylation frequency for element x position , element'
# Make heatmap for # methylation on pos strand (Frequency)
heatmap8 = sns.clustermap(ranFreqPlusID,cmap='RdPu',xticklabels=50,col_cluster=False)
ylabels8 = heatmap8.ax_heatmap.get_yticklabels()
plt.setp(heatmap8.ax_heatmap.set_yticks([]))
plt.setp(heatmap8.ax_heatmap.yaxis.tick_right())
plt.setp(heatmap8.ax_heatmap.set_ylabel('{0} Elements'.format(len(ranFreqPlusID.index)),size=10))
plt.setp(heatmap8.ax_heatmap.set_xlabel('Position',size=10))
plt.setp(heatmap8.ax_heatmap.tick_params(labelsize=10))
plt.setp(heatmap8.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationOneFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap8.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationTwoFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap8.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationThreeFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap8.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationFourFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap8.ax_heatmap.set_title('Methylation Frequency on Plus Strand for Random Regions',size=12))
sns.despine()
pp.savefig()
# Make heatmap for # methylation on neg strand (Frequency)
heatmap9 = sns.clustermap(ranFreqMinusID,cmap='RdPu',xticklabels=50,col_cluster=False)
ylabels9 = heatmap9.ax_heatmap.get_yticklabels()
plt.setp(heatmap9.ax_heatmap.set_yticks([]))
plt.setp(heatmap9.ax_heatmap.yaxis.tick_right())
plt.setp(heatmap9.ax_heatmap.set_ylabel('{0} Elements'.format(len(ranFreqMinusID.index)),size=10))
plt.setp(heatmap9.ax_heatmap.set_xlabel('Position',size=10))
plt.setp(heatmap9.ax_heatmap.tick_params(labelsize=10))
plt.setp(heatmap9.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationOneFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap9.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationTwoFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap9.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationThreeFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap9.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationFourFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap9.ax_heatmap.set_title('Methylation Frequency on Minus StrandStrand for Random Regions',size=12))
sns.despine()
pp.savefig()
print 'Plotted methylation frequency for element x position , random regions'
# Make heatmap for # methylation on pos strand (Frequency)
heatmap10 = sns.clustermap(XPlus,cmap='RdPu')
ylabels10 = heatmap10.ax_heatmap.get_yticklabels()
plt.setp(heatmap10.ax_heatmap.set_yticklabels(ylabels10,rotation=0))
plt.setp(heatmap10.ax_heatmap.yaxis.tick_right())
plt.setp(heatmap10.ax_heatmap.set_ylabel('{0} Elements'.format(len(FreqPlusID.index)),size=10))
plt.setp(heatmap10.ax_heatmap.set_xlabel('Sample',size=10))
plt.setp(heatmap10.ax_heatmap.tick_params(labelsize=10))
plt.setp(heatmap10.ax_heatmap.set_title('Methylation Frequency on Plus Strand for Elements',size=12))
sns.despine()
pp.savefig()
# Make heatmap for # methylation on neg strand (Frequency)
heatmap11 = sns.clustermap(XMinus,cmap='RdPu')
ylabels11 = heatmap11.ax_heatmap.get_yticklabels()
plt.setp(heatmap11.ax_heatmap.set_yticklabels(ylabels11,rotation=0))
plt.setp(heatmap11.ax_heatmap.yaxis.tick_right())
plt.setp(heatmap11.ax_heatmap.set_ylabel('{0} Elements'.format(len(FreqMinusID.index)),size=10))
plt.setp(heatmap11.ax_heatmap.set_xlabel('Sample',size=10))
plt.setp(heatmap11.ax_heatmap.tick_params(labelsize=10))
plt.setp(heatmap11.ax_heatmap.set_title('Methylation Frequency on Minus Strand for Elements',size=12))
sns.despine()
pp.savefig()
print 'Plotted methylation frequency for element x tissue type , element'
# Make heatmap for # methylation on pos strand (Frequency)
heatmap12 = sns.clustermap(ranXPlus,cmap='RdPu')
ylabels12 = heatmap12.ax_heatmap.get_yticklabels()
plt.setp(heatmap12.ax_heatmap.set_yticks([]))
plt.setp(heatmap12.ax_heatmap.yaxis.tick_right())
plt.setp(heatmap12.ax_heatmap.set_ylabel('{0} Elements'.format(len(ranFreqPlusID.index)),size=10))
plt.setp(heatmap12.ax_heatmap.set_xlabel('Sample',size=10))
plt.setp(heatmap12.ax_heatmap.tick_params(labelsize=10))
plt.setp(heatmap12.ax_heatmap.set_title('Methylation Frequency on Plus Strand for Random Regions',size=12))
sns.despine()
pp.savefig()
# Make heatmap for # methylation on neg strand (Frequency)
heatmap13 = sns.clustermap(ranXMinus,cmap='RdPu')
ylabels13 = heatmap13.ax_heatmap.get_yticklabels()
plt.setp(heatmap13.ax_heatmap.set_yticks([]))
plt.setp(heatmap13.ax_heatmap.yaxis.tick_right())
plt.setp(heatmap13.ax_heatmap.set_ylabel('{0} Elements'.format(len(ranFreqMinusID.index)),size=10))
plt.setp(heatmap13.ax_heatmap.set_xlabel('Sample',size=10))
plt.setp(heatmap13.ax_heatmap.tick_params(labelsize=10))
plt.setp(heatmap13.ax_heatmap.set_title('Methylation Frequency on Minus Strand for Random Regions',size=12))
sns.despine()
pp.savefig()
print 'Plotted methylation frequency for element x position , random regions'
# #put the index in a list
# UCEindex = ATelement.T.index.tolist()
# RANindex = ranATelement.T.index.tolist()
#
# reorder index based on clustering
# ATsorted = [UCEindex[i] for i in ATOrdered]
# RANsorted = [RANindex[i] for i in ranATOrdered]
#
# GraphTableLibrary.main(ATOrdered,ranATOrdered,'Cluster_{0}'.format(fileName))
# print 'Created table for re-ordered mean AT cluster data'
sns.despine()
pp.savefig()
pp.close()
def main(dfWindow,ranWindow,pdMeth,rnMeth,names,fileName):
print 'Running graph_clusterLibrary'
graph_cluster(dfWindow,ranWindow,pdMeth,rnMeth,names,fileName)
if __name__ == "__main__":
main() | GraphClusterLibrary.py | import argparse
import matplotlib as mpl
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.colors import ListedColormap
import matplotlib.gridspec as gridspec
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.cbook
import scipy
from scipy import cluster
from scipy.spatial import distance
from scipy.cluster import hierarchy
from GraphFangLibrary import collect_sum_two_nucleotides
import GraphTableLibrary
import GlobalVariables
# Reduce each id/tissue x location to get a corresponding tissue/id list of associations
def collect_index_by_association(dataframe,yItem,xItem):
# check re-methFreq process
# Separate by strand
PlusMeth = dataframe.loc[(dataframe['Cytosine'] == 'C') & (dataframe['methLoc'] >= (GlobalVariables.plotLineLocationThree-GlobalVariables.methylationflank)) & (dataframe['methLoc'] <= (GlobalVariables.plotLineLocationFour+GlobalVariables.methylationflank))]
MinusMeth = dataframe.loc[(dataframe['Cytosine'] == 'G') & (dataframe['methLoc'] >= (GlobalVariables.plotLineLocationThree-GlobalVariables.methylationflank)) & (dataframe['methLoc'] <= (GlobalVariables.plotLineLocationFour+GlobalVariables.methylationflank))]
# Subset just columns to use
PlussubMeth = PlusMeth[['methLoc',yItem,xItem]]
MinussubMeth = MinusMeth[['methLoc',yItem,xItem]]
PlussubMeth['methFreq'] = PlussubMeth.groupby(['methLoc',yItem,xItem])['methLoc'].transform('count')
MinussubMeth['methFreq'] = MinussubMeth.groupby(['methLoc',yItem,xItem])['methLoc'].transform('count')
# Grouping a collection of a values in the xItem column
PlusgroupMeth = PlussubMeth.join(PlussubMeth.groupby(['methLoc',yItem])[xItem].unique(),on=['methLoc',yItem],rsuffix='_r')
MinusgroupMeth = MinussubMeth.join(MinussubMeth.groupby(['methLoc',yItem])[xItem].unique(),on=['methLoc',yItem],rsuffix='_r')
# Just the new xItem list, and location
PlusxMeth = PlusgroupMeth[['methLoc',yItem,'{0}_r'.format(xItem)]].drop_duplicates(['methLoc',yItem],keep='last')
MinusxMeth = MinusgroupMeth[['methLoc',yItem,'{0}_r'.format(xItem)]].drop_duplicates(['methLoc',yItem],keep='last')
print 'Collected list of {0}s associated with Location and {1}'.format(xItem,yItem)
return PlusxMeth,MinusxMeth
# Get Tissue x Id
def collect_tissue_by_id_dataframe(dataframe,yItem,zItem):
# Separate by strand
PlusMeth = dataframe.loc[(dataframe['Cytosine'] == 'C') & (dataframe['methLoc'] >= (GlobalVariables.plotLineLocationThree-GlobalVariables.methylationflank)) & (dataframe['methLoc'] <= (GlobalVariables.plotLineLocationFour+GlobalVariables.methylationflank))]
MinusMeth = dataframe.loc[(dataframe['Cytosine'] == 'G') & (dataframe['methLoc'] >= (GlobalVariables.plotLineLocationThree-GlobalVariables.methylationflank)) & (dataframe['methLoc'] <= (GlobalVariables.plotLineLocationFour+GlobalVariables.methylationflank))]
# Subset just columns to use
PlussubMeth = PlusMeth[[yItem,zItem]]
MinussubMeth = MinusMeth[[yItem,zItem]]
PlussubMeth['methFreq'] = PlussubMeth.groupby([yItem,zItem])[yItem].transform('count')
MinussubMeth['methFreq'] = MinussubMeth.groupby([yItem,zItem])[yItem].transform('count')
# Sort ascending, in order to only use the highest value with keep = last
PlussortMeth = PlussubMeth.sort_values(['methFreq'],ascending=True)
MinussortMeth = MinussubMeth.sort_values(['methFreq'],ascending=True)
PlusdupMeth = PlussortMeth.drop_duplicates(['methFreq',yItem,zItem],keep='last')
MinusdupMeth = MinussortMeth.drop_duplicates(['methFreq',yItem,zItem],keep='last')
# Pivot the data frame so that each tissue/cell type is a column
PluspivotMeth = pd.pivot_table(PlusdupMeth,index=[yItem],columns=[zItem],values='methFreq',fill_value=0)
MinuspivotMeth = pd.pivot_table(MinusdupMeth,index=[yItem],columns=[zItem],values='methFreq',fill_value=0)
PluspivotMeth.columns.name = None
MinuspivotMeth.columns.name = None
# Remove the index column name
PluspivotMeth.index.name = None
MinuspivotMeth.index.name = None
PlusfloatMeth = PluspivotMeth[PluspivotMeth.columns].astype(float)
MinusfloatMeth = MinuspivotMeth[MinuspivotMeth.columns].astype(float)
print 'Collected {0} by {1} into data frame for Frequency'.format(yItem,zItem)
return PlusfloatMeth,MinusfloatMeth
# Transform the Frequency, Percentage and Coverage data into graphable data frames, returning just the info for the element
def collect_methylation_by_index(dataframe,yItem):
# x item is methLoc, y item is either tissue or id, z item is coverage, percentage, or frequency
new_index = range(0,num)
# Separate by strand
PlusMeth = dataframe.loc[dataframe['Cytosine'] == 'C']
MinusMeth = dataframe.loc[dataframe['Cytosine'] == 'G']
# Subset just columns to use
PlussubMeth = PlusMeth[['methLoc',yItem]]
MinussubMeth = MinusMeth[['methLoc',yItem]]
PlussubMeth['methFreq'] = PlussubMeth.groupby(['methLoc',yItem])['methLoc'].transform('count')
MinussubMeth['methFreq'] = MinussubMeth.groupby(['methLoc',yItem])['methLoc'].transform('count')
# Sort ascending, in order to only use the highest value with keep = last
PlussortMeth = PlussubMeth.sort_values(['methLoc'],ascending=True)
MinussortMeth = MinussubMeth.sort_values(['methLoc'],ascending=True)
PlusdupMeth = PlussortMeth.drop_duplicates(['methLoc',yItem,'methFreq'],keep='last')
MinusdupMeth = MinussortMeth.drop_duplicates(['methLoc',yItem,'methFreq'],keep='last')
# Pivot the data frame so that each tissue/cell type is a column
PluspivotMeth = pd.pivot_table(PlusdupMeth,index='methLoc',columns=[yItem],values='methFreq',fill_value=0)
MinuspivotMeth = pd.pivot_table(MinusdupMeth,index='methLoc',columns=[yItem],values='methFreq',fill_value=0)
PluspivotMeth.columns.name = None
MinuspivotMeth.columns.name = None
# Give new index, using the methLocations
PlusindexMeth = PluspivotMeth.reindex(new_index,fill_value=0)
MinusindexMeth = MinuspivotMeth.reindex(new_index,fill_value=0)
# Remove the index column name
PlusindexMeth.index.name = None
MinusindexMeth.index.name = None
# Get just the element
Pluselement = PlusindexMeth[(GlobalVariables.plotLineLocationThree-GlobalVariables.methylationflank):(GlobalVariables.plotLineLocationFour+GlobalVariables.methylationflank)]
Minuselement = MinusindexMeth[(GlobalVariables.plotLineLocationThree-GlobalVariables.methylationflank):(GlobalVariables.plotLineLocationFour+GlobalVariables.methylationflank)]
# Transpose the data frame for easy input into the heatamp
PlustransMeth = Pluselement.T
MinustransMeth = Minuselement.T
PlustransMeth = PlustransMeth[PlustransMeth.columns].astype(float)
MinustransMeth = MinustransMeth[MinustransMeth.columns].astype(float)
print 'Converted {0} by Frequency into data frame'.format(yItem)
return PlustransMeth,MinustransMeth
# Make dictionary for row and column colors based on standard deviation
def make_dictionary_for_colors(ATelement,huslPalette):
ATQcutPosition = pd.qcut(ATelement.std(axis=1),q=8,labels=False)
ATQcutElement = pd.qcut(ATelement.std(),q=8,labels=False)
lutElement = dict(zip(ATQcutElement.unique(), huslPalette))
elementColors = ATQcutElement.map(lutElement)
lutPosition = dict(zip(ATQcutPosition.unique(), huslPalette))
positionColors = ATQcutPosition.map(lutPosition)
print 'Made dictionary for standard deviation'
return elementColors,positionColors
# Make some graphs for fangs
def graph_cluster(dfWindow,ranWindow,pdMeth,rnMeth,names,fileName):
plt.figure(figsize=(7,7))
# Get group, mean and standard deviation for AT
ATgroup,ATmean,ATstd = collect_sum_two_nucleotides(dfWindow,names,'A','T')
ranATgroup,ranATmean,ranATstd = collect_sum_two_nucleotides(ranWindow,names,'A','T')
ATelement = ATgroup.T[(GlobalVariables.plotLineLocationThree-GlobalVariables.methylationflank):(GlobalVariables.plotLineLocationFour+GlobalVariables.methylationflank)]
ranATelement = ranATgroup.T[(GlobalVariables.plotLineLocationThree-GlobalVariables.methylationflank):(GlobalVariables.plotLineLocationFour+GlobalVariables.methylationflank)]
print 'Extracted just element and methylation flank, size {0}'.format(len(ATelement))
# Title info
info = str(fileName) + ', '+ str(len(ATgroup.index)) + ' - ' "UCES"
# Plot settings
sns.set_style('ticks')
plt.suptitle(info,fontsize=10)
pp = PdfPages('Cluster_{0}.pdf'.format(fileName))
sns.set_palette("husl",n_colors=8)#(len(nucLine)*2)
# Use the row_colors to color those with similar SD?
huslPalette = sns.husl_palette(8, s=.45)
elementColors,positionColors = make_dictionary_for_colors(ATelement,huslPalette)
heatmap0 = sns.clustermap(ATelement.T,cmap='RdPu',vmin=0,vmax=100,xticklabels=50,col_cluster=False,row_colors=elementColors,col_colors=positionColors)
plt.setp(heatmap0.ax_heatmap.tick_params(labelsize=8))
plt.setp(heatmap0.ax_heatmap.set_yticks([]))
plt.setp(heatmap0.ax_heatmap.yaxis.tick_right())
plt.setp(heatmap0.ax_heatmap.set_ylabel('{0} UCEs'.format(len(ATelement.T.index)),size=8))
plt.setp(heatmap0.ax_heatmap.set_xlabel('Position',size=10))
plt.setp(heatmap0.ax_heatmap.tick_params(labelsize=10))
plt.setp(heatmap0.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationOneFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap0.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationTwoFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap0.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationThreeFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap0.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationFourFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap0.ax_heatmap.set_title('Mean AT Content per Element',size=12))
# ATOrdered = heatmap0.dendrogram_row.reordered_ind
sns.despine()
pp.savefig()
# Use the row_colors to color those with similar SD?
ranelementColors,ranpositionColors = make_dictionary_for_colors(ranATelement,huslPalette)
heatmap1 = sns.clustermap(ranATelement.T,cmap='RdPu',vmin=0,vmax=100,xticklabels=50,col_cluster=False,row_colors=ranelementColors,col_colors=ranpositionColors)
plt.setp(heatmap1.ax_heatmap.tick_params(labelsize=8))
plt.setp(heatmap1.ax_heatmap.set_yticks([]))
plt.setp(heatmap1.ax_heatmap.yaxis.tick_right())
plt.setp(heatmap1.ax_heatmap.set_ylabel('{0} UCEs'.format(len(ranATelement.T.index)),size=8))
plt.setp(heatmap1.ax_heatmap.set_xlabel('Position',size=10))
plt.setp(heatmap1.ax_heatmap.tick_params(labelsize=10))
plt.setp(heatmap1.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationOneFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap1.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationTwoFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap1.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationThreeFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap1.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationFourFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap1.ax_heatmap.set_title('Mean AT Content per Random Region',size=12))
# ranATOrdered = heatmap1.dendrogram_row.reordered_ind
sns.despine()
pp.savefig()
print 'Plotted cluster plot for mean AT content for all elements and random regions'
# Various combinations to plot on heatmaps, just for element plus methylation flanks
# Frequency x Tissue x ID X Location
FreqPlusID,FreqMinusID = collect_methylation_by_index(pdMeth,'id')
FreqPlusTis,FreqMinusTis = collect_methylation_by_index(pdMeth,'tissue')
XPlus,XMinus = collect_tissue_by_id_dataframe(pdMeth,'id','tissue')
ranFreqPlusID,ranFreqMinusID = collect_methylation_by_index(rnMeth,'id')
ranFreqPlusTis,ranFreqMinusTis = collect_methylation_by_index(rnMeth,'tissue')
ranXPlus,ranXMinus = collect_tissue_by_id_dataframe(rnMeth,'id','tissue')
# Remove UCEs with out methylation within the element - only for ID group
FreqPlusID = FreqPlusID[(FreqPlusID.T != 0).any()]
FreqMinusID = FreqMinusID[(FreqMinusID.T != 0).any()]
ranFreqPlusID = ranFreqPlusID[(ranFreqPlusID.T != 0).any()]
ranFreqMinusID = ranFreqMinusID[(ranFreqMinusID.T != 0).any()]
# Make heatmap for # methylation on pos strand (Frequency)
heatmap2 = sns.clustermap(FreqPlusTis,cmap='RdPu',xticklabels=50,col_cluster=False)
ylabels2 = heatmap2.ax_heatmap.get_yticklabels()
plt.setp(heatmap2.ax_heatmap.set_yticklabels(ylabels2,rotation=0))
plt.setp(heatmap2.ax_heatmap.yaxis.tick_right())
plt.setp(heatmap2.ax_heatmap.set_ylabel('Sample',size=10))
plt.setp(heatmap2.ax_heatmap.set_xlabel('Position',size=10))
plt.setp(heatmap2.ax_heatmap.tick_params(labelsize=10))
plt.setp(heatmap2.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationOneFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap2.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationTwoFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap2.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationThreeFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap2.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationFourFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap2.ax_heatmap.set_title('Methylation Frequency on Plus Strand for Elements',size=12))
sns.despine()
pp.savefig()
# Make heatmap for # methylation on pos strand (Frequency)
heatmap3 = sns.clustermap(FreqMinusTis,cmap='RdPu',xticklabels=50,col_cluster=False)
ylabels3 = heatmap3.ax_heatmap.get_yticklabels()
plt.setp(heatmap3.ax_heatmap.set_yticklabels(ylabels3,rotation=0))
plt.setp(heatmap3.ax_heatmap.yaxis.tick_right())
plt.setp(heatmap3.ax_heatmap.set_ylabel('Sample',size=10))
plt.setp(heatmap3.ax_heatmap.set_xlabel('Position',size=10))
plt.setp(heatmap3.ax_heatmap.tick_params(labelsize=10))
plt.setp(heatmap3.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationOneFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap3.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationTwoFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap3.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationThreeFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap3.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationFourFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap3.ax_heatmap.set_title('Methylation Frequency on Minus Strand for Elements',size=12))
sns.despine()
pp.savefig()
print 'Plotted methylation frequency for tissue types x position, for element'
# Make heatmap for # methylation on pos strand (Frequency)
heatmap4 = sns.clustermap(ranFreqPlusTis,cmap='RdPu',xticklabels=50,col_cluster=False)
ylabels4 = heatmap4.ax_heatmap.get_yticklabels()
plt.setp(heatmap4.ax_heatmap.set_yticklabels(ylabels4,rotation=0))
plt.setp(heatmap4.ax_heatmap.yaxis.tick_right())
plt.setp(heatmap4.ax_heatmap.set_ylabel('Sample',size=10))
plt.setp(heatmap4.ax_heatmap.set_xlabel('Position',size=10))
plt.setp(heatmap4.ax_heatmap.tick_params(labelsize=10))
plt.setp(heatmap4.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationOneFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap4.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationTwoFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap4.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationThreeFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap4.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationFourFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap4.ax_heatmap.set_title('Methylation Frequency on Plus Strand for Random Regions',size=12))
sns.despine()
pp.savefig()
# Make heatmap for # methylation on pos strand (Frequency)
heatmap5 = sns.clustermap(ranFreqMinusTis,cmap='RdPu',xticklabels=50,col_cluster=False)
ylabels5 = heatmap5.ax_heatmap.get_yticklabels()
plt.setp(heatmap5.ax_heatmap.set_yticklabels(ylabels5,rotation=0))
plt.setp(heatmap5.ax_heatmap.yaxis.tick_right())
plt.setp(heatmap5.ax_heatmap.set_ylabel('Sample',size=10))
plt.setp(heatmap5.ax_heatmap.set_xlabel('Position',size=10))
plt.setp(heatmap5.ax_heatmap.tick_params(labelsize=10))
plt.setp(heatmap5.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationOneFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap5.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationTwoFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap5.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationThreeFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap5.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationFourFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap5.ax_heatmap.set_title('Methylation Frequency on Minus Strand for Random Regions',size=12))
sns.despine()
pp.savefig()
print 'Plotted methylation frequency for tissue types x position, for random regions'
# Make heatmap for # methylation on pos strand (Frequency)
heatmap6 = sns.clustermap(FreqPlusID,cmap='RdPu',xticklabels=50,col_cluster=False)
ylabels6 = heatmap6.ax_heatmap.get_yticklabels()
plt.setp(heatmap6.ax_heatmap.set_yticklabels(ylabels6,rotation=0))
plt.setp(heatmap6.ax_heatmap.yaxis.tick_right())
plt.setp(heatmap6.ax_heatmap.set_ylabel('{0} Elements'.format(len(FreqPlusID.index)),size=10))
plt.setp(heatmap6.ax_heatmap.set_xlabel('Position',size=10))
plt.setp(heatmap6.ax_heatmap.tick_params(labelsize=10))
plt.setp(heatmap6.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationOneFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap6.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationTwoFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap6.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationThreeFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap6.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationFourFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap6.ax_heatmap.set_title('Methylation Frequency on Plus Strand for Elements',size=12))
sns.despine()
pp.savefig()
# Make heatmap for # methylation on neg strand (Frequency)
heatmap7 = sns.clustermap(FreqMinusID,cmap='RdPu',xticklabels=50,col_cluster=False)
ylabels7 = heatmap7.ax_heatmap.get_yticklabels()
plt.setp(heatmap7.ax_heatmap.set_yticklabels(ylabels7,rotation=0))
plt.setp(heatmap7.ax_heatmap.yaxis.tick_right())
plt.setp(heatmap7.ax_heatmap.set_ylabel('{0} Elements'.format(len(FreqMinusID.index)),size=10))
plt.setp(heatmap7.ax_heatmap.set_xlabel('Position',size=10))
plt.setp(heatmap7.ax_heatmap.tick_params(labelsize=10))
plt.setp(heatmap7.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationOneFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap7.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationTwoFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap7.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationThreeFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap7.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationFourFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap7.ax_heatmap.set_title('Methylation Frequency on Minus Strand for Elements',size=12))
sns.despine()
pp.savefig()
print 'Plotted methylation frequency for element x position , element'
# Make heatmap for # methylation on pos strand (Frequency)
heatmap8 = sns.clustermap(ranFreqPlusID,cmap='RdPu',xticklabels=50,col_cluster=False)
ylabels8 = heatmap8.ax_heatmap.get_yticklabels()
plt.setp(heatmap8.ax_heatmap.set_yticks([]))
plt.setp(heatmap8.ax_heatmap.yaxis.tick_right())
plt.setp(heatmap8.ax_heatmap.set_ylabel('{0} Elements'.format(len(ranFreqPlusID.index)),size=10))
plt.setp(heatmap8.ax_heatmap.set_xlabel('Position',size=10))
plt.setp(heatmap8.ax_heatmap.tick_params(labelsize=10))
plt.setp(heatmap8.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationOneFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap8.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationTwoFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap8.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationThreeFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap8.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationFourFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap8.ax_heatmap.set_title('Methylation Frequency on Plus Strand for Random Regions',size=12))
sns.despine()
pp.savefig()
# Make heatmap for # methylation on neg strand (Frequency)
heatmap9 = sns.clustermap(ranFreqMinusID,cmap='RdPu',xticklabels=50,col_cluster=False)
ylabels9 = heatmap9.ax_heatmap.get_yticklabels()
plt.setp(heatmap9.ax_heatmap.set_yticks([]))
plt.setp(heatmap9.ax_heatmap.yaxis.tick_right())
plt.setp(heatmap9.ax_heatmap.set_ylabel('{0} Elements'.format(len(ranFreqMinusID.index)),size=10))
plt.setp(heatmap9.ax_heatmap.set_xlabel('Position',size=10))
plt.setp(heatmap9.ax_heatmap.tick_params(labelsize=10))
plt.setp(heatmap9.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationOneFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap9.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationTwoFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap9.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationThreeFull,linewidth=.05,linestyle='dashed',color='#5fc85b',alpha=0.5))
plt.setp(heatmap9.ax_heatmap.axvline(x=GlobalVariables.plotLineLocationFourFull,linewidth=.05,linestyle='dashed',color='#96c85b',alpha=0.5))
plt.setp(heatmap9.ax_heatmap.set_title('Methylation Frequency on Minus StrandStrand for Random Regions',size=12))
sns.despine()
pp.savefig()
print 'Plotted methylation frequency for element x position , random regions'
# Make heatmap for # methylation on pos strand (Frequency)
heatmap10 = sns.clustermap(XPlus,cmap='RdPu')
ylabels10 = heatmap10.ax_heatmap.get_yticklabels()
plt.setp(heatmap10.ax_heatmap.set_yticklabels(ylabels10,rotation=0))
plt.setp(heatmap10.ax_heatmap.yaxis.tick_right())
plt.setp(heatmap10.ax_heatmap.set_ylabel('{0} Elements'.format(len(FreqPlusID.index)),size=10))
plt.setp(heatmap10.ax_heatmap.set_xlabel('Sample',size=10))
plt.setp(heatmap10.ax_heatmap.tick_params(labelsize=10))
plt.setp(heatmap10.ax_heatmap.set_title('Methylation Frequency on Plus Strand for Elements',size=12))
sns.despine()
pp.savefig()
# Make heatmap for # methylation on neg strand (Frequency)
heatmap11 = sns.clustermap(XMinus,cmap='RdPu')
ylabels11 = heatmap11.ax_heatmap.get_yticklabels()
plt.setp(heatmap11.ax_heatmap.set_yticklabels(ylabels11,rotation=0))
plt.setp(heatmap11.ax_heatmap.yaxis.tick_right())
plt.setp(heatmap11.ax_heatmap.set_ylabel('{0} Elements'.format(len(FreqMinusID.index)),size=10))
plt.setp(heatmap11.ax_heatmap.set_xlabel('Sample',size=10))
plt.setp(heatmap11.ax_heatmap.tick_params(labelsize=10))
plt.setp(heatmap11.ax_heatmap.set_title('Methylation Frequency on Minus Strand for Elements',size=12))
sns.despine()
pp.savefig()
print 'Plotted methylation frequency for element x tissue type , element'
# Make heatmap for # methylation on pos strand (Frequency)
heatmap12 = sns.clustermap(ranXPlus,cmap='RdPu')
ylabels12 = heatmap12.ax_heatmap.get_yticklabels()
plt.setp(heatmap12.ax_heatmap.set_yticks([]))
plt.setp(heatmap12.ax_heatmap.yaxis.tick_right())
plt.setp(heatmap12.ax_heatmap.set_ylabel('{0} Elements'.format(len(ranFreqPlusID.index)),size=10))
plt.setp(heatmap12.ax_heatmap.set_xlabel('Sample',size=10))
plt.setp(heatmap12.ax_heatmap.tick_params(labelsize=10))
plt.setp(heatmap12.ax_heatmap.set_title('Methylation Frequency on Plus Strand for Random Regions',size=12))
sns.despine()
pp.savefig()
# Make heatmap for # methylation on neg strand (Frequency)
heatmap13 = sns.clustermap(ranXMinus,cmap='RdPu')
ylabels13 = heatmap13.ax_heatmap.get_yticklabels()
plt.setp(heatmap13.ax_heatmap.set_yticks([]))
plt.setp(heatmap13.ax_heatmap.yaxis.tick_right())
plt.setp(heatmap13.ax_heatmap.set_ylabel('{0} Elements'.format(len(ranFreqMinusID.index)),size=10))
plt.setp(heatmap13.ax_heatmap.set_xlabel('Sample',size=10))
plt.setp(heatmap13.ax_heatmap.tick_params(labelsize=10))
plt.setp(heatmap13.ax_heatmap.set_title('Methylation Frequency on Minus Strand for Random Regions',size=12))
sns.despine()
pp.savefig()
print 'Plotted methylation frequency for element x position , random regions'
# #put the index in a list
# UCEindex = ATelement.T.index.tolist()
# RANindex = ranATelement.T.index.tolist()
#
# reorder index based on clustering
# ATsorted = [UCEindex[i] for i in ATOrdered]
# RANsorted = [RANindex[i] for i in ranATOrdered]
#
# GraphTableLibrary.main(ATOrdered,ranATOrdered,'Cluster_{0}'.format(fileName))
# print 'Created table for re-ordered mean AT cluster data'
sns.despine()
pp.savefig()
pp.close()
def main(dfWindow,ranWindow,pdMeth,rnMeth,names,fileName):
print 'Running graph_clusterLibrary'
graph_cluster(dfWindow,ranWindow,pdMeth,rnMeth,names,fileName)
if __name__ == "__main__":
main() | 0.291586 | 0.29598 |
import requests
import re
import random
import configparser
from bs4 import BeautifulSoup
from flask import Flask, request, abort
from imgurpython import ImgurClient
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import *
app = Flask(__name__)
config = configparser.ConfigParser()
config.read("config.ini")
line_bot_api = LineBotApi(config['line_bot']['Channel_Access_Token'])
handler = WebhookHandler(config['line_bot']['Channel_Secret'])
client_id = config['imgur_api']['Client_ID']
client_secret = config['imgur_api']['Client_Secret']
album_id = config['imgur_api']['Album_ID']
API_Get_Image = config['other_api']['API_Get_Image']
@app.route("/callback", methods=['POST'])
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
# print("body:",body)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return 'ok'
def venue():
return "National Taiwan University of Science and Technology\n國立台灣科技大學\nNo.43, Keelung Rd., Sec.4, Da'an Dist., Taipei, Taiwan\n台灣台北市大安區基隆路四段43號"
def susenews():
target_url = 'https://www.suse.com/c/news/'
print('Start parsing news ...')
rs = requests.session()
res = rs.get(target_url, verify=False)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text, 'html.parser')
content = ""
date_lst = []
subject_lst = []
for i in soup.select('div .col-sm-3 p.date'):
date_lst.append(i.getText())
for j in soup.select('div .col-sm-8 .content'):
subject_lst.append(j.getText())
for k in range(len(date_lst)):
content += u'\u2022' + " " + date_lst[k].replace('\t','').replace('\n','') + '\n'
if k != len(date_lst) - 1:
content += subject_lst[k].replace('\n','') + '\n\n'
else:
content += subject_lst[k].replace('\n','')
return content
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
print("event.reply_token:", event.reply_token)
print("event.message.text:", event.message.text)
if event.message.text == "Logo":
client = ImgurClient(client_id, client_secret)
images = client.get_album_images(album_id)
index = random.randint(0, len(images) - 1)
url = images[index].link
#line_bot_api.reply_message(
# event.reply_token,
# TextSendMessage(text=url))
image_message = ImageSendMessage(
original_content_url=url,
preview_image_url=url
)
line_bot_api.reply_message(
event.reply_token, image_message)
return 0
if event.message.text == "Venue":
content = venue()
#line_bot_api.reply_message(
# event.reply_token,
# TextSendMessage(text=content))
image_message = ImageSendMessage(
original_content_url='https://charleswang.us/opensuse-line-bot/taiwan-tech5.jpg',
preview_image_url='https://charleswang.us/opensuse-line-bot/taiwan-tech3.jpg'
)
#line_bot_api.reply_message(
# event.reply_token, image_message)
message = LocationSendMessage(
title='台灣科技大學國際大樓',
address='10607 臺北市大安區基隆路 4 段 43 號',
latitude=25.013162196759016,
longitude=121.54029257962338
)
line_bot_api.reply_message(event.reply_token, message)
#line_bot_api.push_message(
# event.push_token,
# TextSendMessage(text=content))
#line_bot_api.replySticker(event.reply_token, { packageId: '1', stickerId: '1' })
return 0
if event.message.text == "YouTube":
target_url = 'https://www.youtube.com/user/opensusetv/videos'
rs = requests.session()
res = rs.get(target_url, verify=False)
soup = BeautifulSoup(res.text, 'html.parser')
seqs = ['https://www.youtube.com{}'.format(data.find('a')['href']) for data in soup.select('.yt-lockup-title')]
line_bot_api.reply_message(
event.reply_token, [
TextSendMessage(text=seqs[random.randint(0, len(seqs) - 1)]),
TextSendMessage(text=seqs[random.randint(0, len(seqs) - 1)])
])
return 0
if event.message.text == "News":
content = susenews()
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text=content))
return 0
if event.message.text == "About":
content = "openSUSE 亞洲高峰會是 openSUSE 社群 ( 即:貢獻者跟使用者 ) 很重要的活動之一,那些平常都在線上交流的人,現在可以一起面對面,與來自世界各地的高手進行交流,社群成員將會分享他們最新的知識、經驗,並學習關於 openSUSE FLOSS 的技術。這次在台北的活動是 openSUSE 亞洲高峰會的第五次,繼 2014 年首次的亞洲高峰會是在北京之後,過去的亞洲高峰有來自中國、台灣、印度、印度尼西亞、日本、南韓等國的參加。"
content += "\n\nRegistration: https://coscup2018.kktix.cc/events/coscup2018regist"
content += "\n\nLINE Bot Created by:\n<NAME> (<EMAIL>)"
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text=content))
return 0
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text="Hello from openSUSE.Asia Summit 2018!"))
@handler.add(MessageEvent, message=StickerMessage)
def handle_sticker_message(event):
print("package_id:", event.message.package_id)
print("sticker_id:", event.message.sticker_id)
# ref. https://developers.line.me/media/messaging-api/sticker_list.pdf
sticker_ids = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 21, 100, 101, 102, 103, 104, 105, 106,
107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125,
126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 401, 402]
index_id = random.randint(0, len(sticker_ids) - 1)
sticker_id = str(sticker_ids[index_id])
print(index_id)
sticker_message = StickerSendMessage(
package_id='1',
sticker_id=sticker_id
)
line_bot_api.reply_message(
event.reply_token,
sticker_message)
if __name__ == '__main__':
app.run() | app.py | import requests
import re
import random
import configparser
from bs4 import BeautifulSoup
from flask import Flask, request, abort
from imgurpython import ImgurClient
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import *
app = Flask(__name__)
config = configparser.ConfigParser()
config.read("config.ini")
line_bot_api = LineBotApi(config['line_bot']['Channel_Access_Token'])
handler = WebhookHandler(config['line_bot']['Channel_Secret'])
client_id = config['imgur_api']['Client_ID']
client_secret = config['imgur_api']['Client_Secret']
album_id = config['imgur_api']['Album_ID']
API_Get_Image = config['other_api']['API_Get_Image']
@app.route("/callback", methods=['POST'])
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
# print("body:",body)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return 'ok'
def venue():
return "National Taiwan University of Science and Technology\n國立台灣科技大學\nNo.43, Keelung Rd., Sec.4, Da'an Dist., Taipei, Taiwan\n台灣台北市大安區基隆路四段43號"
def susenews():
target_url = 'https://www.suse.com/c/news/'
print('Start parsing news ...')
rs = requests.session()
res = rs.get(target_url, verify=False)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text, 'html.parser')
content = ""
date_lst = []
subject_lst = []
for i in soup.select('div .col-sm-3 p.date'):
date_lst.append(i.getText())
for j in soup.select('div .col-sm-8 .content'):
subject_lst.append(j.getText())
for k in range(len(date_lst)):
content += u'\u2022' + " " + date_lst[k].replace('\t','').replace('\n','') + '\n'
if k != len(date_lst) - 1:
content += subject_lst[k].replace('\n','') + '\n\n'
else:
content += subject_lst[k].replace('\n','')
return content
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
print("event.reply_token:", event.reply_token)
print("event.message.text:", event.message.text)
if event.message.text == "Logo":
client = ImgurClient(client_id, client_secret)
images = client.get_album_images(album_id)
index = random.randint(0, len(images) - 1)
url = images[index].link
#line_bot_api.reply_message(
# event.reply_token,
# TextSendMessage(text=url))
image_message = ImageSendMessage(
original_content_url=url,
preview_image_url=url
)
line_bot_api.reply_message(
event.reply_token, image_message)
return 0
if event.message.text == "Venue":
content = venue()
#line_bot_api.reply_message(
# event.reply_token,
# TextSendMessage(text=content))
image_message = ImageSendMessage(
original_content_url='https://charleswang.us/opensuse-line-bot/taiwan-tech5.jpg',
preview_image_url='https://charleswang.us/opensuse-line-bot/taiwan-tech3.jpg'
)
#line_bot_api.reply_message(
# event.reply_token, image_message)
message = LocationSendMessage(
title='台灣科技大學國際大樓',
address='10607 臺北市大安區基隆路 4 段 43 號',
latitude=25.013162196759016,
longitude=121.54029257962338
)
line_bot_api.reply_message(event.reply_token, message)
#line_bot_api.push_message(
# event.push_token,
# TextSendMessage(text=content))
#line_bot_api.replySticker(event.reply_token, { packageId: '1', stickerId: '1' })
return 0
if event.message.text == "YouTube":
target_url = 'https://www.youtube.com/user/opensusetv/videos'
rs = requests.session()
res = rs.get(target_url, verify=False)
soup = BeautifulSoup(res.text, 'html.parser')
seqs = ['https://www.youtube.com{}'.format(data.find('a')['href']) for data in soup.select('.yt-lockup-title')]
line_bot_api.reply_message(
event.reply_token, [
TextSendMessage(text=seqs[random.randint(0, len(seqs) - 1)]),
TextSendMessage(text=seqs[random.randint(0, len(seqs) - 1)])
])
return 0
if event.message.text == "News":
content = susenews()
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text=content))
return 0
if event.message.text == "About":
content = "openSUSE 亞洲高峰會是 openSUSE 社群 ( 即:貢獻者跟使用者 ) 很重要的活動之一,那些平常都在線上交流的人,現在可以一起面對面,與來自世界各地的高手進行交流,社群成員將會分享他們最新的知識、經驗,並學習關於 openSUSE FLOSS 的技術。這次在台北的活動是 openSUSE 亞洲高峰會的第五次,繼 2014 年首次的亞洲高峰會是在北京之後,過去的亞洲高峰有來自中國、台灣、印度、印度尼西亞、日本、南韓等國的參加。"
content += "\n\nRegistration: https://coscup2018.kktix.cc/events/coscup2018regist"
content += "\n\nLINE Bot Created by:\n<NAME> (<EMAIL>)"
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text=content))
return 0
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text="Hello from openSUSE.Asia Summit 2018!"))
@handler.add(MessageEvent, message=StickerMessage)
def handle_sticker_message(event):
print("package_id:", event.message.package_id)
print("sticker_id:", event.message.sticker_id)
# ref. https://developers.line.me/media/messaging-api/sticker_list.pdf
sticker_ids = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 21, 100, 101, 102, 103, 104, 105, 106,
107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125,
126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 401, 402]
index_id = random.randint(0, len(sticker_ids) - 1)
sticker_id = str(sticker_ids[index_id])
print(index_id)
sticker_message = StickerSendMessage(
package_id='1',
sticker_id=sticker_id
)
line_bot_api.reply_message(
event.reply_token,
sticker_message)
if __name__ == '__main__':
app.run() | 0.115224 | 0.053775 |
import json
import math
import os
import pickle
import random
from multiprocessing import Pool
from pathlib import Path
import pandas as pd
import torch
import torch.nn.functional as F
import torch.utils.data
from augmentation.augmentation_methods import \
NoiseAugmentor, RirAugmentor, CodecAugmentor, \
LowpassAugmentor, HighpassAugmentor, ReverbAugmentor, \
HilbertAugmentor
from complex_data_parser import get_path_by_glob, parse_complex_data
from src.meldataset import load_wav
from textgrid_parsing import parse_textgrid
PHI = (1 + math.sqrt(5))/2
MAX_WAV_VALUE = 32768.0
labels_to_use = ['speaker', 'sex', 'mic-brand']
timed_labels_to_use = ['phones']
label_groups = {
'content': ['speaker', 'sex', 'phones'],
'style': ['mic-brand']
}
augmentation_label_groups = {
'content': [],
'style': ['noise', 'rir', 'lowpass', 'highpass', 'reverb', 'codec', 'hilbert']
}
class MultilabelWaveDataset(torch.utils.data.Dataset):
def __init__(self, data_dir, cache_dir, name, source, segment_length, sampling_rate, embedding_size,
augmentation_config=None, disable_wavs=False, split=True, size=None,
fine_tuning=False, deterministic=False):
self.data_dir = data_dir
self.cache_dir = cache_dir
self.name = name
self.source = source
self.segment_length = segment_length
self.embedding_size = embedding_size
self.sampling_rate = sampling_rate
self.split = split
self.fine_tuning = fine_tuning
self.size = size
self.deterministic = deterministic
self.random = random.Random()
self.disable_wavs = disable_wavs
self.should_augment = augmentation_config is not None
if self.should_augment:
self.aug_options = augmentation_config['options']
self.aug_probs = augmentation_config['probs']
print('Creating [{}] dataset:'.format(self.name))
name_path = Path(os.path.join(cache_dir, name))
if not name_path.exists():
os.mkdir(name_path)
cache_path = Path(os.path.join(cache_dir, name, 'labels_cache'))
if not name_path.exists():
os.mkdir(cache_path)
config_path = f'**/data_configs/{source}/*.json'
self.files_with_labels = self.do_with_pickle_cache(lambda: self.get_files_with_labels(cache_dir, config_path),
os.path.join(cache_dir, name, 'files_with_labels.pickle'))
if self.size is None:
self.size = len(self.files_with_labels)
self.label_options_weights = self.do_with_pickle_cache(self.get_all_label_options_weights,
os.path.join(cache_dir, name, 'label_options_weights.pickle'))
base_prob = self.aug_probs['prob']
sub_probs = self.aug_probs['sub_probs']
for augmentation, augmentation_labels in self.aug_options.items():
sub_prob = sub_probs[augmentation]['prob']
option_prob = 1.0/len(augmentation_labels)
self.label_options_weights[augmentation] = {'none': base_prob*(1-sub_prob), **{
label: base_prob*sub_prob*option_prob for label in augmentation_labels
}}
all_label_groups = {key: [*label_groups[key], *augmentation_label_groups[key]] for key in label_groups.keys()}
self.label_options_weights_groups = {
key: {label: self.label_options_weights[label] for label in label_group}
for key, label_group in all_label_groups.items()
}
self.label_options_groups = {
key: {label: tuple(value.keys()) for label, value in label_group.items()}
for key, label_group in self.label_options_weights_groups.items()
}
self.label_options = {
key: tuple(label_group.keys())
for key, label_group in self.label_options_weights.items()
}
self.label_weights_groups = {
key: {label: tuple(value.values()) for label, value in label_group.items()}
for key, label_group in self.label_options_weights_groups.items()
}
self.label_weights = {
key: tuple(label_group.values())
for key, label_group in self.label_options_weights.items()
}
if self.should_augment:
self.aug_methods = {
'noise': NoiseAugmentor(self.data_dir, self.label_options).augment,
'rir': RirAugmentor(self.data_dir).augment,
'reverb': ReverbAugmentor(self.sampling_rate).augment,
'lowpass': LowpassAugmentor(self.sampling_rate).augment,
'highpass': HighpassAugmentor(self.sampling_rate).augment,
'codec': CodecAugmentor(self.sampling_rate).augment,
'hilbert': HilbertAugmentor(self.sampling_rate).augment
}
print('Dataset [{}] is ready!\n'.format(self.name))
@staticmethod
def do_with_pickle_cache(func, pickle_path):
pickle_path = Path(pickle_path)
if pickle_path.exists():
with open(pickle_path, 'rb') as pickle_file:
result = pickle.load(pickle_file)
else:
if not pickle_path.parent.exists():
pickle_path.parent.mkdir(parents=True, exist_ok=True)
result = func()
with open(pickle_path, 'wb') as pickle_file:
pickle.dump(result, pickle_file)
return result
@staticmethod
def create_pickle_cache(func, pickle_path):
pickle_path = Path(pickle_path)
if not pickle_path.exists():
if not pickle_path.parent.exists():
pickle_path.parent.mkdir(parents=True, exist_ok=True)
result = func()
with open(pickle_path, 'wb') as pickle_file:
pickle.dump(result, pickle_file)
def get_all_label_options_weights(self):
all_label_options = {}
for col in labels_to_use:
all_label_options[col] = dict(self.files_with_labels[col].value_counts(normalize=True))
with Pool(16) as pool:
for label in timed_labels_to_use:
all_label_options[label] = dict()
results = pool.map(self.get_timed_labels_value_counts_by_index, range(len(self)))
rows_to_remove = []
for i, result in enumerate(results):
if isinstance(result, Exception):
rows_to_remove.append(i)
else:
for label in timed_labels_to_use:
for key, value in result[label].items():
if key not in all_label_options[label]:
all_label_options[label][key] = 0
all_label_options[label][key] += value
for label in timed_labels_to_use:
for key in all_label_options[label]:
all_label_options[label][key] /= len(results)
if len(rows_to_remove) > 0:
self.files_with_labels = self.files_with_labels.drop(rows_to_remove).reset_index(drop=True)
pickle_path = os.path.join(self.cache_dir, self.source, 'files_with_labels.pickle')
with open(pickle_path, 'wb') as pickle_file:
pickle.dump(self.files_with_labels, pickle_file)
all_label_options_weights = all_label_options
return all_label_options_weights
def get_timed_labels_value_counts_by_index(self, i):
try:
labels, timed_labels = self.get_timed_labels(i)
return self.get_labels_value_counts(timed_labels)
except Exception as e:
print('Item {} failed to get timed labels: [{}]'.format(i, e))
return e
def get_labels_value_counts(self, timed_labels):
result = {}
for label in timed_labels_to_use:
result[label] = dict(timed_labels[label]['text'].value_counts(normalize=True))
return result
def get_files_with_labels(self, main_dir, config_path):
main_dir = Path(main_dir)
subdir_list = [path for path in main_dir.glob('*/')]
results = None
for subdir in subdir_list:
try:
config_files = [path for path in subdir.glob(config_path)]
for config_file in config_files:
config = config_file.read_text()
config_dict = json.loads(config)
print('Loading [{}]...'.format(config_dict['name']))
complex_data = parse_complex_data(subdir, config_dict['config'], config_dict['result'])
print('[{}] loaded successfully!'.format(config_dict['name']))
if results is None:
results = complex_data
else:
results = pd.concat([results, complex_data], axis=0, ignore_index=True)
except Exception as e:
print(e)
print('Data config was not found or invalid, moving on.')
continue
return results
def get_timed_labels(self, index):
all_labels = self.files_with_labels.iloc[[index]].squeeze()
labels = self.get_labels(index)
timed_labels = parse_textgrid(all_labels['subdir'], all_labels['textgrid'])
return labels, {key: value for key, value in timed_labels.items() if key in timed_labels_to_use}
def get_labels(self, index):
labels = self.files_with_labels[labels_to_use].iloc[[index]].squeeze()
return labels
def get_grouped_labels(self, index):
labels = self.get_labels(index)
grouped_labels = {group: labels.filter(group_labels).to_dict() for group, group_labels in label_groups.items()}
return grouped_labels
def __getitem__(self, index):
if self.deterministic:
self.random.seed(index)
if self.size < len(self.files_with_labels):
index = (int(len(self.files_with_labels) / PHI) * index) % len(self.files_with_labels)
return self.get_augmented_item(index)
def get_augmented_item(self, index):
wav, wav_path, time_labels, grouped_labels = self.get_cut_item(index)
if self.should_augment:
wav, time_labels, grouped_labels = self.augment_item(wav, time_labels, grouped_labels)
return wav, wav_path, time_labels, grouped_labels
def create_pickle_label(self, index):
return self.create_pickle_cache(
lambda: self.get_fresh_label(index),
os.path.join(self.cache_dir, self.source, 'labels_cache', '{}.pickle'.format(index))
)
def get_pickle_label(self, index):
return self.do_with_pickle_cache(
lambda: self.get_fresh_label(index),
os.path.join(self.cache_dir, self.source, 'labels_cache', '{}.pickle'.format(index))
)
def get_fresh_label(self, index):
labels, timed_labels = self.get_timed_labels(index)
segmented_timed_labels = self.get_segmented_timed_labels(timed_labels)
all_segmented_labels = self.add_segmented_labels(segmented_timed_labels, labels)
segmented_tensor = self.convert_segmented_labels_to_tensor(all_segmented_labels, label_groups)
return segmented_tensor
def __len__(self):
return min(len(self.files_with_labels), self.size)
def get_segmented_timed_labels(self, timed_labels):
return pd.concat(
[
self.get_segmented_timed_labels_for_single(label_name, timed_label)
for label_name, timed_label in timed_labels.items()
],
axis=1
)
def get_segmented_timed_labels_for_single(self, label_name, timed_label):
result_rows = []
time_interval = self.embedding_size / self.sampling_rate
current_index = 0
current_time = 0
while current_index < len(timed_label):
result_rows.append({label_name: timed_label.iloc[[current_index]].squeeze()['text']})
current_time += time_interval
if current_time > timed_label.iloc[[current_index]].squeeze()['end']:
current_index += 1
return pd.DataFrame(result_rows)
def add_segmented_labels(self, segmented_timed_labels, labels):
for col in labels.axes[0]:
segmented_timed_labels[col] = labels[col]
return segmented_timed_labels
def convert_segmented_labels_to_tensor(self, all_segmented_labels, given_label_groups):
all_tensors = {}
for key, labels in given_label_groups.items():
tensors = {}
for col in labels:
if col in all_segmented_labels:
index_tensor = torch.tensor(
all_segmented_labels[col].apply(lambda x: self.label_options[col].index(x)).tolist(),
dtype=torch.int64
)
tensors[col] = index_tensor
all_tensors[key] = tensors
return all_tensors
def get_wav(self, index):
wav_path = get_path_by_glob(self.cache_dir, self.files_with_labels.iloc[[index]].squeeze()['wav'])
if self.disable_wavs:
return torch.zeros((self.segment_length,)), str(wav_path)
audio, sampling_rate = load_wav(wav_path)
if sampling_rate != self.sampling_rate:
raise ValueError("{} SR doesn't match target {} SR".format(
sampling_rate, self.sampling_rate))
audio = torch.FloatTensor(audio)
return audio.squeeze(0), str(wav_path)
def get_cut_item(self, index):
wav, wav_path = self.get_wav(index)
pickle_label_groups = self.get_pickle_label(index)
length = wav.size(0)
embedded_segment_length = self.segment_length // self.embedding_size
embedded_length = min(length // self.embedding_size,
next(iter(next(iter(pickle_label_groups.values())).values())).size(0))
trimed_length = embedded_length * self.embedding_size
trimed_start = 0
if len(wav) > trimed_length:
wav = wav[trimed_start:trimed_start + trimed_length]
length = wav.size(0)
# print(length, self.segment_length, embedded_length, embedded_segment_length)
if length >= self.segment_length:
max_embedded_start = embedded_length - embedded_segment_length
embedded_start = self.random.randint(0, max_embedded_start)
start = embedded_start * self.embedding_size
# print('trim: ', start, embedded_start)
else:
embedded_padding = embedded_segment_length - embedded_length
prefix_embedded_padding = self.random.randint(0, embedded_padding)
postfix_embedded_padding = embedded_padding - prefix_embedded_padding
padding = embedded_padding * self.embedding_size
prefix_padding = prefix_embedded_padding * self.embedding_size
postfix_padding = postfix_embedded_padding * self.embedding_size
for key, group in pickle_label_groups.items():
for label, label_item in group.items():
label_item = label_item[0:embedded_length]
if length >= self.segment_length:
cut_label_item = label_item[embedded_start:embedded_start + embedded_segment_length]
else:
cut_label_item = torch.nn.functional.pad(label_item,
(prefix_embedded_padding, postfix_embedded_padding),
'constant')
group[label] = cut_label_item
if length >= self.segment_length:
wav = wav[start:start + self.segment_length]
else:
wav = torch.nn.functional.pad(wav, (prefix_padding, postfix_padding), 'constant')
grouped_labels = self.get_grouped_labels(index)
return wav, wav_path, pickle_label_groups, grouped_labels
def augment_item(self, cut_wav, cut_label, grouped_labels):
options = self.aug_options
probs = self.aug_probs
methods = self.aug_methods
(length,) = next(iter(next(iter(cut_label.values())).values())).size()
augmented_wav = cut_wav
augmented_label = pd.DataFrame(['none'] * length, columns=['none'])
should_augment = probs['prob'] > self.random.random()
for augmentation in options.keys():
augmented_wav, augmented_label, value = self.augment_item_with(augmented_wav, augmented_label, cut_label,
methods, options,
probs, augmentation, should_augment)
for section, current_label_groups in augmentation_label_groups.items():
if augmentation in current_label_groups:
grouped_labels[section][augmentation] = value
augmentation_tensors = self.convert_segmented_labels_to_tensor(augmented_label, augmentation_label_groups)
for key in cut_label.keys():
current_augmentation = augmentation_tensors[key]
for label, value in current_augmentation.items():
cut_label[key][label] = value
return augmented_wav, cut_label, grouped_labels
def augment_item_with(self, augmented_wav, augmented_label, cut_label, methods, options, probs, aug_type,
should=True):
value = 'none'
probs = probs['sub_probs'][aug_type]
values = options[aug_type]
aug_method = methods[aug_type]
if should and probs['prob'] > self.random.random():
value = self.random.choice(values)
augmented_label, augmented_wav, value = aug_method(
self.random,
augmented_label,
cut_label,
augmented_wav,
value,
self.disable_wavs
)
augmented_label[aug_type] = value
return augmented_wav, augmented_label, value | src/speech_distillation/multilabel_wave_dataset.py | import json
import math
import os
import pickle
import random
from multiprocessing import Pool
from pathlib import Path
import pandas as pd
import torch
import torch.nn.functional as F
import torch.utils.data
from augmentation.augmentation_methods import \
NoiseAugmentor, RirAugmentor, CodecAugmentor, \
LowpassAugmentor, HighpassAugmentor, ReverbAugmentor, \
HilbertAugmentor
from complex_data_parser import get_path_by_glob, parse_complex_data
from src.meldataset import load_wav
from textgrid_parsing import parse_textgrid
PHI = (1 + math.sqrt(5))/2
MAX_WAV_VALUE = 32768.0
labels_to_use = ['speaker', 'sex', 'mic-brand']
timed_labels_to_use = ['phones']
label_groups = {
'content': ['speaker', 'sex', 'phones'],
'style': ['mic-brand']
}
augmentation_label_groups = {
'content': [],
'style': ['noise', 'rir', 'lowpass', 'highpass', 'reverb', 'codec', 'hilbert']
}
class MultilabelWaveDataset(torch.utils.data.Dataset):
def __init__(self, data_dir, cache_dir, name, source, segment_length, sampling_rate, embedding_size,
augmentation_config=None, disable_wavs=False, split=True, size=None,
fine_tuning=False, deterministic=False):
self.data_dir = data_dir
self.cache_dir = cache_dir
self.name = name
self.source = source
self.segment_length = segment_length
self.embedding_size = embedding_size
self.sampling_rate = sampling_rate
self.split = split
self.fine_tuning = fine_tuning
self.size = size
self.deterministic = deterministic
self.random = random.Random()
self.disable_wavs = disable_wavs
self.should_augment = augmentation_config is not None
if self.should_augment:
self.aug_options = augmentation_config['options']
self.aug_probs = augmentation_config['probs']
print('Creating [{}] dataset:'.format(self.name))
name_path = Path(os.path.join(cache_dir, name))
if not name_path.exists():
os.mkdir(name_path)
cache_path = Path(os.path.join(cache_dir, name, 'labels_cache'))
if not name_path.exists():
os.mkdir(cache_path)
config_path = f'**/data_configs/{source}/*.json'
self.files_with_labels = self.do_with_pickle_cache(lambda: self.get_files_with_labels(cache_dir, config_path),
os.path.join(cache_dir, name, 'files_with_labels.pickle'))
if self.size is None:
self.size = len(self.files_with_labels)
self.label_options_weights = self.do_with_pickle_cache(self.get_all_label_options_weights,
os.path.join(cache_dir, name, 'label_options_weights.pickle'))
base_prob = self.aug_probs['prob']
sub_probs = self.aug_probs['sub_probs']
for augmentation, augmentation_labels in self.aug_options.items():
sub_prob = sub_probs[augmentation]['prob']
option_prob = 1.0/len(augmentation_labels)
self.label_options_weights[augmentation] = {'none': base_prob*(1-sub_prob), **{
label: base_prob*sub_prob*option_prob for label in augmentation_labels
}}
all_label_groups = {key: [*label_groups[key], *augmentation_label_groups[key]] for key in label_groups.keys()}
self.label_options_weights_groups = {
key: {label: self.label_options_weights[label] for label in label_group}
for key, label_group in all_label_groups.items()
}
self.label_options_groups = {
key: {label: tuple(value.keys()) for label, value in label_group.items()}
for key, label_group in self.label_options_weights_groups.items()
}
self.label_options = {
key: tuple(label_group.keys())
for key, label_group in self.label_options_weights.items()
}
self.label_weights_groups = {
key: {label: tuple(value.values()) for label, value in label_group.items()}
for key, label_group in self.label_options_weights_groups.items()
}
self.label_weights = {
key: tuple(label_group.values())
for key, label_group in self.label_options_weights.items()
}
if self.should_augment:
self.aug_methods = {
'noise': NoiseAugmentor(self.data_dir, self.label_options).augment,
'rir': RirAugmentor(self.data_dir).augment,
'reverb': ReverbAugmentor(self.sampling_rate).augment,
'lowpass': LowpassAugmentor(self.sampling_rate).augment,
'highpass': HighpassAugmentor(self.sampling_rate).augment,
'codec': CodecAugmentor(self.sampling_rate).augment,
'hilbert': HilbertAugmentor(self.sampling_rate).augment
}
print('Dataset [{}] is ready!\n'.format(self.name))
@staticmethod
def do_with_pickle_cache(func, pickle_path):
pickle_path = Path(pickle_path)
if pickle_path.exists():
with open(pickle_path, 'rb') as pickle_file:
result = pickle.load(pickle_file)
else:
if not pickle_path.parent.exists():
pickle_path.parent.mkdir(parents=True, exist_ok=True)
result = func()
with open(pickle_path, 'wb') as pickle_file:
pickle.dump(result, pickle_file)
return result
@staticmethod
def create_pickle_cache(func, pickle_path):
pickle_path = Path(pickle_path)
if not pickle_path.exists():
if not pickle_path.parent.exists():
pickle_path.parent.mkdir(parents=True, exist_ok=True)
result = func()
with open(pickle_path, 'wb') as pickle_file:
pickle.dump(result, pickle_file)
def get_all_label_options_weights(self):
all_label_options = {}
for col in labels_to_use:
all_label_options[col] = dict(self.files_with_labels[col].value_counts(normalize=True))
with Pool(16) as pool:
for label in timed_labels_to_use:
all_label_options[label] = dict()
results = pool.map(self.get_timed_labels_value_counts_by_index, range(len(self)))
rows_to_remove = []
for i, result in enumerate(results):
if isinstance(result, Exception):
rows_to_remove.append(i)
else:
for label in timed_labels_to_use:
for key, value in result[label].items():
if key not in all_label_options[label]:
all_label_options[label][key] = 0
all_label_options[label][key] += value
for label in timed_labels_to_use:
for key in all_label_options[label]:
all_label_options[label][key] /= len(results)
if len(rows_to_remove) > 0:
self.files_with_labels = self.files_with_labels.drop(rows_to_remove).reset_index(drop=True)
pickle_path = os.path.join(self.cache_dir, self.source, 'files_with_labels.pickle')
with open(pickle_path, 'wb') as pickle_file:
pickle.dump(self.files_with_labels, pickle_file)
all_label_options_weights = all_label_options
return all_label_options_weights
def get_timed_labels_value_counts_by_index(self, i):
try:
labels, timed_labels = self.get_timed_labels(i)
return self.get_labels_value_counts(timed_labels)
except Exception as e:
print('Item {} failed to get timed labels: [{}]'.format(i, e))
return e
def get_labels_value_counts(self, timed_labels):
result = {}
for label in timed_labels_to_use:
result[label] = dict(timed_labels[label]['text'].value_counts(normalize=True))
return result
def get_files_with_labels(self, main_dir, config_path):
main_dir = Path(main_dir)
subdir_list = [path for path in main_dir.glob('*/')]
results = None
for subdir in subdir_list:
try:
config_files = [path for path in subdir.glob(config_path)]
for config_file in config_files:
config = config_file.read_text()
config_dict = json.loads(config)
print('Loading [{}]...'.format(config_dict['name']))
complex_data = parse_complex_data(subdir, config_dict['config'], config_dict['result'])
print('[{}] loaded successfully!'.format(config_dict['name']))
if results is None:
results = complex_data
else:
results = pd.concat([results, complex_data], axis=0, ignore_index=True)
except Exception as e:
print(e)
print('Data config was not found or invalid, moving on.')
continue
return results
def get_timed_labels(self, index):
all_labels = self.files_with_labels.iloc[[index]].squeeze()
labels = self.get_labels(index)
timed_labels = parse_textgrid(all_labels['subdir'], all_labels['textgrid'])
return labels, {key: value for key, value in timed_labels.items() if key in timed_labels_to_use}
def get_labels(self, index):
labels = self.files_with_labels[labels_to_use].iloc[[index]].squeeze()
return labels
def get_grouped_labels(self, index):
labels = self.get_labels(index)
grouped_labels = {group: labels.filter(group_labels).to_dict() for group, group_labels in label_groups.items()}
return grouped_labels
def __getitem__(self, index):
if self.deterministic:
self.random.seed(index)
if self.size < len(self.files_with_labels):
index = (int(len(self.files_with_labels) / PHI) * index) % len(self.files_with_labels)
return self.get_augmented_item(index)
def get_augmented_item(self, index):
wav, wav_path, time_labels, grouped_labels = self.get_cut_item(index)
if self.should_augment:
wav, time_labels, grouped_labels = self.augment_item(wav, time_labels, grouped_labels)
return wav, wav_path, time_labels, grouped_labels
def create_pickle_label(self, index):
return self.create_pickle_cache(
lambda: self.get_fresh_label(index),
os.path.join(self.cache_dir, self.source, 'labels_cache', '{}.pickle'.format(index))
)
def get_pickle_label(self, index):
return self.do_with_pickle_cache(
lambda: self.get_fresh_label(index),
os.path.join(self.cache_dir, self.source, 'labels_cache', '{}.pickle'.format(index))
)
def get_fresh_label(self, index):
labels, timed_labels = self.get_timed_labels(index)
segmented_timed_labels = self.get_segmented_timed_labels(timed_labels)
all_segmented_labels = self.add_segmented_labels(segmented_timed_labels, labels)
segmented_tensor = self.convert_segmented_labels_to_tensor(all_segmented_labels, label_groups)
return segmented_tensor
def __len__(self):
return min(len(self.files_with_labels), self.size)
def get_segmented_timed_labels(self, timed_labels):
return pd.concat(
[
self.get_segmented_timed_labels_for_single(label_name, timed_label)
for label_name, timed_label in timed_labels.items()
],
axis=1
)
def get_segmented_timed_labels_for_single(self, label_name, timed_label):
result_rows = []
time_interval = self.embedding_size / self.sampling_rate
current_index = 0
current_time = 0
while current_index < len(timed_label):
result_rows.append({label_name: timed_label.iloc[[current_index]].squeeze()['text']})
current_time += time_interval
if current_time > timed_label.iloc[[current_index]].squeeze()['end']:
current_index += 1
return pd.DataFrame(result_rows)
def add_segmented_labels(self, segmented_timed_labels, labels):
for col in labels.axes[0]:
segmented_timed_labels[col] = labels[col]
return segmented_timed_labels
def convert_segmented_labels_to_tensor(self, all_segmented_labels, given_label_groups):
all_tensors = {}
for key, labels in given_label_groups.items():
tensors = {}
for col in labels:
if col in all_segmented_labels:
index_tensor = torch.tensor(
all_segmented_labels[col].apply(lambda x: self.label_options[col].index(x)).tolist(),
dtype=torch.int64
)
tensors[col] = index_tensor
all_tensors[key] = tensors
return all_tensors
def get_wav(self, index):
wav_path = get_path_by_glob(self.cache_dir, self.files_with_labels.iloc[[index]].squeeze()['wav'])
if self.disable_wavs:
return torch.zeros((self.segment_length,)), str(wav_path)
audio, sampling_rate = load_wav(wav_path)
if sampling_rate != self.sampling_rate:
raise ValueError("{} SR doesn't match target {} SR".format(
sampling_rate, self.sampling_rate))
audio = torch.FloatTensor(audio)
return audio.squeeze(0), str(wav_path)
def get_cut_item(self, index):
wav, wav_path = self.get_wav(index)
pickle_label_groups = self.get_pickle_label(index)
length = wav.size(0)
embedded_segment_length = self.segment_length // self.embedding_size
embedded_length = min(length // self.embedding_size,
next(iter(next(iter(pickle_label_groups.values())).values())).size(0))
trimed_length = embedded_length * self.embedding_size
trimed_start = 0
if len(wav) > trimed_length:
wav = wav[trimed_start:trimed_start + trimed_length]
length = wav.size(0)
# print(length, self.segment_length, embedded_length, embedded_segment_length)
if length >= self.segment_length:
max_embedded_start = embedded_length - embedded_segment_length
embedded_start = self.random.randint(0, max_embedded_start)
start = embedded_start * self.embedding_size
# print('trim: ', start, embedded_start)
else:
embedded_padding = embedded_segment_length - embedded_length
prefix_embedded_padding = self.random.randint(0, embedded_padding)
postfix_embedded_padding = embedded_padding - prefix_embedded_padding
padding = embedded_padding * self.embedding_size
prefix_padding = prefix_embedded_padding * self.embedding_size
postfix_padding = postfix_embedded_padding * self.embedding_size
for key, group in pickle_label_groups.items():
for label, label_item in group.items():
label_item = label_item[0:embedded_length]
if length >= self.segment_length:
cut_label_item = label_item[embedded_start:embedded_start + embedded_segment_length]
else:
cut_label_item = torch.nn.functional.pad(label_item,
(prefix_embedded_padding, postfix_embedded_padding),
'constant')
group[label] = cut_label_item
if length >= self.segment_length:
wav = wav[start:start + self.segment_length]
else:
wav = torch.nn.functional.pad(wav, (prefix_padding, postfix_padding), 'constant')
grouped_labels = self.get_grouped_labels(index)
return wav, wav_path, pickle_label_groups, grouped_labels
def augment_item(self, cut_wav, cut_label, grouped_labels):
options = self.aug_options
probs = self.aug_probs
methods = self.aug_methods
(length,) = next(iter(next(iter(cut_label.values())).values())).size()
augmented_wav = cut_wav
augmented_label = pd.DataFrame(['none'] * length, columns=['none'])
should_augment = probs['prob'] > self.random.random()
for augmentation in options.keys():
augmented_wav, augmented_label, value = self.augment_item_with(augmented_wav, augmented_label, cut_label,
methods, options,
probs, augmentation, should_augment)
for section, current_label_groups in augmentation_label_groups.items():
if augmentation in current_label_groups:
grouped_labels[section][augmentation] = value
augmentation_tensors = self.convert_segmented_labels_to_tensor(augmented_label, augmentation_label_groups)
for key in cut_label.keys():
current_augmentation = augmentation_tensors[key]
for label, value in current_augmentation.items():
cut_label[key][label] = value
return augmented_wav, cut_label, grouped_labels
def augment_item_with(self, augmented_wav, augmented_label, cut_label, methods, options, probs, aug_type,
should=True):
value = 'none'
probs = probs['sub_probs'][aug_type]
values = options[aug_type]
aug_method = methods[aug_type]
if should and probs['prob'] > self.random.random():
value = self.random.choice(values)
augmented_label, augmented_wav, value = aug_method(
self.random,
augmented_label,
cut_label,
augmented_wav,
value,
self.disable_wavs
)
augmented_label[aug_type] = value
return augmented_wav, augmented_label, value | 0.575588 | 0.103477 |
from flask_sqlalchemy import SQLAlchemy
from dataclasses import dataclass
db = SQLAlchemy()
@dataclass
class User(db.Model):
__tablename__ = 'users'
id: int
name: str
email: str
password: str
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), unique=True)
email = db.Column(db.String(120), unique=True)
password = db.Column(db.String(120), unique=False)
events = db.relationship('EventSignup', backref='user', lazy=True)
admin = db.relationship('Admin', backref='users', lazy=True)
def __init__(self, name=None, email=None, password=None):
self.name = name
self.email = email
self.password = password
def __repr__(self):
return '<User %r>' % self.name
@dataclass
class Event(db.Model):
__tablename__ = 'events'
id: int
name: str
location: str
start_time: str
end_time: str
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), unique=False)
location = db.Column(db.String(50), unique=False)
start_time = db.Column(db.String(120), unique=False)
end_time = db.Column(db.String(120), unique=False)
users = db.relationship('EventSignup', backref='events', lazy=True)
def __init__(self, name=None, location=None, start_time=None, end_time=None):
self.name = name
self.location = location
self.start_time = start_time
self.end_time = end_time
def __repr__(self):
return '<event %r>' % self.name
@dataclass
class EventSignup(db.Model):
__tablename__ = 'event_signups'
__table_args__ = (
db.UniqueConstraint('event_id', 'user_id', name='unique_event_user'),
)
id: int
event_id: int
user_id: int
id = db.Column(db.Integer, primary_key=True)
event_id = db.Column(db.Integer, db.ForeignKey('events.id'), nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
def __init__(self, event_id=None, user_id=None):
self.event_id = event_id
self.user_id = user_id
def __repr__(self):
return '<EventSignup %r>' % self.id
@dataclass
class Admin(db.Model):
__tablename__ = 'admins'
id: int
user_id: int
admin_key: str
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
admin_key = db.Column(db.String(60), unique=True)
def __init__(self, user_id=None, admin_key=None):
self.user_id = user_id
self.admin_key = admin_key
def __repr__(self):
return '<Admin %r>' % self.id
@dataclass
class EmailSender(db.Model):
__tablename__ = 'email_senders'
id: int
smtp_server: str
port: int
sender_email: int
password: str
id = db.Column(db.Integer, primary_key=True)
smtp_server = db.Column(db.String(60), unique=False)
port = db.Column(db.Integer, unique=False)
sender_email = db.Column(db.String(120), unique=False)
password = db.Column(db.String(60), unique=False)
def __init__(self, smtp_server=None, port=None, sender_email=None, password=<PASSWORD>):
self.smtp_server = smtp_server
self.port = port
self.sender_email = sender_email
self.password = password
def __repr__(self):
return '<EmailSender %r>' % self.id | eventapp/models.py | from flask_sqlalchemy import SQLAlchemy
from dataclasses import dataclass
db = SQLAlchemy()
@dataclass
class User(db.Model):
__tablename__ = 'users'
id: int
name: str
email: str
password: str
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), unique=True)
email = db.Column(db.String(120), unique=True)
password = db.Column(db.String(120), unique=False)
events = db.relationship('EventSignup', backref='user', lazy=True)
admin = db.relationship('Admin', backref='users', lazy=True)
def __init__(self, name=None, email=None, password=None):
self.name = name
self.email = email
self.password = password
def __repr__(self):
return '<User %r>' % self.name
@dataclass
class Event(db.Model):
__tablename__ = 'events'
id: int
name: str
location: str
start_time: str
end_time: str
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), unique=False)
location = db.Column(db.String(50), unique=False)
start_time = db.Column(db.String(120), unique=False)
end_time = db.Column(db.String(120), unique=False)
users = db.relationship('EventSignup', backref='events', lazy=True)
def __init__(self, name=None, location=None, start_time=None, end_time=None):
self.name = name
self.location = location
self.start_time = start_time
self.end_time = end_time
def __repr__(self):
return '<event %r>' % self.name
@dataclass
class EventSignup(db.Model):
__tablename__ = 'event_signups'
__table_args__ = (
db.UniqueConstraint('event_id', 'user_id', name='unique_event_user'),
)
id: int
event_id: int
user_id: int
id = db.Column(db.Integer, primary_key=True)
event_id = db.Column(db.Integer, db.ForeignKey('events.id'), nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
def __init__(self, event_id=None, user_id=None):
self.event_id = event_id
self.user_id = user_id
def __repr__(self):
return '<EventSignup %r>' % self.id
@dataclass
class Admin(db.Model):
__tablename__ = 'admins'
id: int
user_id: int
admin_key: str
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
admin_key = db.Column(db.String(60), unique=True)
def __init__(self, user_id=None, admin_key=None):
self.user_id = user_id
self.admin_key = admin_key
def __repr__(self):
return '<Admin %r>' % self.id
@dataclass
class EmailSender(db.Model):
__tablename__ = 'email_senders'
id: int
smtp_server: str
port: int
sender_email: int
password: str
id = db.Column(db.Integer, primary_key=True)
smtp_server = db.Column(db.String(60), unique=False)
port = db.Column(db.Integer, unique=False)
sender_email = db.Column(db.String(120), unique=False)
password = db.Column(db.String(60), unique=False)
def __init__(self, smtp_server=None, port=None, sender_email=None, password=<PASSWORD>):
self.smtp_server = smtp_server
self.port = port
self.sender_email = sender_email
self.password = password
def __repr__(self):
return '<EmailSender %r>' % self.id | 0.611498 | 0.051797 |
import argparse, os, glob, tqdm, zipfile, webrtcvad
import soundfile as sf
from get_vad import *
class segmentor():
def __init__(self,merge_margin,cut_margin,res_path,vad_setting=0):
self.merge_margin=merge_margin
self.cut_margin=cut_margin
self.vad=webrtcvad.Vad(vad_setting)
self.sr=16000
self.res_path=res_path
def get_segment(self,audio, meeting_name):
out=get_seg(audio, self.merge_margin,self.vad)
self.seg_wav(audio,out,meeting_name,margin=self.cut_margin,sr=self.sr)
return
def seg_wav(self,audio,segments,meeting_name,margin=0.25,sr=16000):
s,f=sf.read(audio)
audio_name=os.path.basename(audio)[:-4]
save_path=self.res_path+'/'+meeting_name
os.makedirs(save_path,exist_ok=True)
for segment in segments:
st,en=segment
st=int((st-margin)*sr)
en=int((en+margin)*sr)
if st<0:
st=0
if en>s.shape[0] or en<0:
en=s.shape[0]
this_seg=s[st:en]
fname=save_path+'/'+meeting_name+'_'+audio_name+'_'+str(st)+'_'+str(en)+'.wav'
sf.write(fname,this_seg,16000)
return
def get_zip(zip_dir,meeting,result_dir):
os.makedirs(zip_dir,exist_ok=True)
zipf = zipfile.ZipFile(zip_dir+'/'+meeting+'.zip', 'w')
t1=glob.glob(result_dir+'/'+meeting+'/*.wav')
for ite in tqdm.tqdm(t1):
fname=os.path.basename(ite)
zipf.write(ite,arcname=fname)
return zip_dir+'/'+meeting+'.zip'
def main(args):
tool_path = os.path.normpath(args.tool_path)
am_path = os.path.normpath(args.am_path)
decode_path = os.path.normpath(args.decode_path)
# Create some directories.
result_dir = os.path.join(decode_path, 'vad')
zip_dir = os.path.join(result_dir, 'zip')
os.makedirs(zip_dir, exist_ok=True)
decoding_cmd = os.path.join(decode_path, 'decoding_cmd')
os.makedirs(decoding_cmd, exist_ok=True)
decoding_result = os.path.join(decode_path, 'decoding_result')
os.makedirs(decoding_result, exist_ok=True)
# In this baseline script, we create single channel audio files. the single channel data has been step
with open(decoding_cmd + '/zip_list.scp', 'w') as f:
meeting = glob.glob(os.path.join(args.input_path, 'overlap*'))
print(args.input_path)
for meet in meeting:
# Extract the first channel signals.
meeting_name = os.path.basename(meet)
# Do segmentation.
seg = segmentor(args.merge_margin, args.cut_margin, res_path=result_dir, vad_setting=0)
all_wav = glob.glob(meet + '/*.wav')
for audio in tqdm.tqdm(all_wav):
seg.get_segment(audio, meeting_name)
# Zip up the segmented files and add the zip location to the output file list.
zip_file = get_zip(zip_dir, meeting_name, result_dir)
f.write(zip_file + '\n')
# Create an ASR script.
with open(os.path.join(decoding_cmd, 'decode.sh'),'w') as f:
cmd = 'sh {} {} {} . {}'.format(os.path.join(tool_path, 'run_asr_continuous.sh'),
os.path.join(decoding_cmd, 'zip_list.scp'),
decoding_result,
am_path)
f.write(cmd+'\n')
if args.multi_stream:
cmd = 'python {} --with_channel --inputdir {} --outputdir {}'.format(os.path.normpath(os.path.join(tool_path, '../python/sortctm.py')),
decoding_result,
decoding_result + '.sorted')
else:
cmd = 'python {} --inputdir {} --outputdir {}'.format(os.path.normpath(os.path.join(tool_path, '../python/sortctm.py')),
decoding_result,
decoding_result + '.sorted')
f.write(cmd+'\n')
cmd = 'chown -R {}:{} {}'.format(os.getuid(), os.getgid(), decoding_result)
f.write(cmd+'\n')
cmd = 'chown -R {}:{} {}'.format(os.getuid(), os.getgid(), decoding_result + '.sorted')
f.write(cmd+'\n')
def make_argparse():
parser = argparse.ArgumentParser(description='Generate ASR input files')
parser = argparse.ArgumentParser()
parser.add_argument('--input_path', metavar='<path>', required=True,
help='Directory where input audio files are retrieved.')
parser.add_argument('--decode_path', metavar='<path>', required=True,
help='Directory in which decoding is to be performed')
parser.add_argument('--tool_path', metavar='<path>', required=True)
parser.add_argument('--am_path', metavar='<path>', required=True)
parser.add_argument('--cut_margin', default=0.25, metavar='<float>', type=float,
help='Segmentation parameter.')
parser.add_argument('--merge_margin', default=1, metavar='<float>', type=float,
help='Segmentation parameter.')
parser.add_argument('--multi_stream', action='store_true',
help='Set this flag when processing CSS (or multi-stream) outputs.')
return parser
if __name__ == '__main__':
parser = make_argparse()
args = parser.parse_args()
main(args) | asr/python/gen_asrinput_continuous.py | import argparse, os, glob, tqdm, zipfile, webrtcvad
import soundfile as sf
from get_vad import *
class segmentor():
def __init__(self,merge_margin,cut_margin,res_path,vad_setting=0):
self.merge_margin=merge_margin
self.cut_margin=cut_margin
self.vad=webrtcvad.Vad(vad_setting)
self.sr=16000
self.res_path=res_path
def get_segment(self,audio, meeting_name):
out=get_seg(audio, self.merge_margin,self.vad)
self.seg_wav(audio,out,meeting_name,margin=self.cut_margin,sr=self.sr)
return
def seg_wav(self,audio,segments,meeting_name,margin=0.25,sr=16000):
s,f=sf.read(audio)
audio_name=os.path.basename(audio)[:-4]
save_path=self.res_path+'/'+meeting_name
os.makedirs(save_path,exist_ok=True)
for segment in segments:
st,en=segment
st=int((st-margin)*sr)
en=int((en+margin)*sr)
if st<0:
st=0
if en>s.shape[0] or en<0:
en=s.shape[0]
this_seg=s[st:en]
fname=save_path+'/'+meeting_name+'_'+audio_name+'_'+str(st)+'_'+str(en)+'.wav'
sf.write(fname,this_seg,16000)
return
def get_zip(zip_dir,meeting,result_dir):
os.makedirs(zip_dir,exist_ok=True)
zipf = zipfile.ZipFile(zip_dir+'/'+meeting+'.zip', 'w')
t1=glob.glob(result_dir+'/'+meeting+'/*.wav')
for ite in tqdm.tqdm(t1):
fname=os.path.basename(ite)
zipf.write(ite,arcname=fname)
return zip_dir+'/'+meeting+'.zip'
def main(args):
tool_path = os.path.normpath(args.tool_path)
am_path = os.path.normpath(args.am_path)
decode_path = os.path.normpath(args.decode_path)
# Create some directories.
result_dir = os.path.join(decode_path, 'vad')
zip_dir = os.path.join(result_dir, 'zip')
os.makedirs(zip_dir, exist_ok=True)
decoding_cmd = os.path.join(decode_path, 'decoding_cmd')
os.makedirs(decoding_cmd, exist_ok=True)
decoding_result = os.path.join(decode_path, 'decoding_result')
os.makedirs(decoding_result, exist_ok=True)
# In this baseline script, we create single channel audio files. the single channel data has been step
with open(decoding_cmd + '/zip_list.scp', 'w') as f:
meeting = glob.glob(os.path.join(args.input_path, 'overlap*'))
print(args.input_path)
for meet in meeting:
# Extract the first channel signals.
meeting_name = os.path.basename(meet)
# Do segmentation.
seg = segmentor(args.merge_margin, args.cut_margin, res_path=result_dir, vad_setting=0)
all_wav = glob.glob(meet + '/*.wav')
for audio in tqdm.tqdm(all_wav):
seg.get_segment(audio, meeting_name)
# Zip up the segmented files and add the zip location to the output file list.
zip_file = get_zip(zip_dir, meeting_name, result_dir)
f.write(zip_file + '\n')
# Create an ASR script.
with open(os.path.join(decoding_cmd, 'decode.sh'),'w') as f:
cmd = 'sh {} {} {} . {}'.format(os.path.join(tool_path, 'run_asr_continuous.sh'),
os.path.join(decoding_cmd, 'zip_list.scp'),
decoding_result,
am_path)
f.write(cmd+'\n')
if args.multi_stream:
cmd = 'python {} --with_channel --inputdir {} --outputdir {}'.format(os.path.normpath(os.path.join(tool_path, '../python/sortctm.py')),
decoding_result,
decoding_result + '.sorted')
else:
cmd = 'python {} --inputdir {} --outputdir {}'.format(os.path.normpath(os.path.join(tool_path, '../python/sortctm.py')),
decoding_result,
decoding_result + '.sorted')
f.write(cmd+'\n')
cmd = 'chown -R {}:{} {}'.format(os.getuid(), os.getgid(), decoding_result)
f.write(cmd+'\n')
cmd = 'chown -R {}:{} {}'.format(os.getuid(), os.getgid(), decoding_result + '.sorted')
f.write(cmd+'\n')
def make_argparse():
parser = argparse.ArgumentParser(description='Generate ASR input files')
parser = argparse.ArgumentParser()
parser.add_argument('--input_path', metavar='<path>', required=True,
help='Directory where input audio files are retrieved.')
parser.add_argument('--decode_path', metavar='<path>', required=True,
help='Directory in which decoding is to be performed')
parser.add_argument('--tool_path', metavar='<path>', required=True)
parser.add_argument('--am_path', metavar='<path>', required=True)
parser.add_argument('--cut_margin', default=0.25, metavar='<float>', type=float,
help='Segmentation parameter.')
parser.add_argument('--merge_margin', default=1, metavar='<float>', type=float,
help='Segmentation parameter.')
parser.add_argument('--multi_stream', action='store_true',
help='Set this flag when processing CSS (or multi-stream) outputs.')
return parser
if __name__ == '__main__':
parser = make_argparse()
args = parser.parse_args()
main(args) | 0.333829 | 0.064742 |
from typing import List
def findKthLargest(nums: List[int], k: int) -> int:
nums = sorted(nums)
return nums[-k]
# This is the quick select solution, supposedly O(n).
def findKthLargest_partition(nums: List[int], k: int) -> int:
def partition(l: int, r: int) -> int:
if l > r:
return
pivot = nums[r]
p_index = l
for i in range(l, r):
if nums[i] < pivot:
nums[i], nums[p_index] = nums[p_index], nums[i]
p_index += 1
nums[p_index], nums[r] = nums[r], nums[p_index]
return p_index, r - p_index + 1
l = 0
r = len(nums) - 1
while True:
p_index, length = partition(l, r)
if length > k:
l = p_index + 1
elif length < k:
k -= length
r = p_index - 1
else:
return nums[p_index]
# A more decent way to write the second part would be:
def findKthLargest_qs(nums: List[int], k: int) -> int:
def quick_select(nums: List[int], l:int, r: int, k:int):
def partition(nums: List[int], l: int, r: int) -> int:
if l > r:
return
pivot = nums[r]
p_index = l
for i in range(l, r):
if nums[i] < pivot:
nums[i], nums[p_index] = nums[p_index], nums[i]
p_index += 1
nums[p_index], nums[r] = nums[r], nums[p_index]
return p_index
while l <= r:
p_index = partition(nums, l, r)
if p_index < len(nums) - k:
l = p_index + 1
else:
r = p_index - 1
return l
# The reason why the while loop in the second last function couldn't follow the pattern was because, the object of comparison, "length", was not the index itself. Always try to use the index and compare it with l and r.
index = quick_select(nums, 0, len(nums)-1, k)
return nums[index]
# Heap solution, O(k*logn), also quite easy implementation wise if we use heapq.
import heapq
def findKthLargest_heap(nums: List[int], k: int) -> int:
heapq.heapify(nums)
while len(nums) > k:
heapq.heappop(nums)
return heapq.heappop(nums)
if __name__ == "__main__":
print(findKthLargest_partition([1],1))
print(findKthLargest_heap([3,2,3,1,2,4,5,5,6], 4))
print(findKthLargest_qs([3,2,3,1,2,4,5,5,6], 4)) | PythonSolutions/215_kth_largest_element_in_an_array.py | from typing import List
def findKthLargest(nums: List[int], k: int) -> int:
nums = sorted(nums)
return nums[-k]
# This is the quick select solution, supposedly O(n).
def findKthLargest_partition(nums: List[int], k: int) -> int:
def partition(l: int, r: int) -> int:
if l > r:
return
pivot = nums[r]
p_index = l
for i in range(l, r):
if nums[i] < pivot:
nums[i], nums[p_index] = nums[p_index], nums[i]
p_index += 1
nums[p_index], nums[r] = nums[r], nums[p_index]
return p_index, r - p_index + 1
l = 0
r = len(nums) - 1
while True:
p_index, length = partition(l, r)
if length > k:
l = p_index + 1
elif length < k:
k -= length
r = p_index - 1
else:
return nums[p_index]
# A more decent way to write the second part would be:
def findKthLargest_qs(nums: List[int], k: int) -> int:
def quick_select(nums: List[int], l:int, r: int, k:int):
def partition(nums: List[int], l: int, r: int) -> int:
if l > r:
return
pivot = nums[r]
p_index = l
for i in range(l, r):
if nums[i] < pivot:
nums[i], nums[p_index] = nums[p_index], nums[i]
p_index += 1
nums[p_index], nums[r] = nums[r], nums[p_index]
return p_index
while l <= r:
p_index = partition(nums, l, r)
if p_index < len(nums) - k:
l = p_index + 1
else:
r = p_index - 1
return l
# The reason why the while loop in the second last function couldn't follow the pattern was because, the object of comparison, "length", was not the index itself. Always try to use the index and compare it with l and r.
index = quick_select(nums, 0, len(nums)-1, k)
return nums[index]
# Heap solution, O(k*logn), also quite easy implementation wise if we use heapq.
import heapq
def findKthLargest_heap(nums: List[int], k: int) -> int:
heapq.heapify(nums)
while len(nums) > k:
heapq.heappop(nums)
return heapq.heappop(nums)
if __name__ == "__main__":
print(findKthLargest_partition([1],1))
print(findKthLargest_heap([3,2,3,1,2,4,5,5,6], 4))
print(findKthLargest_qs([3,2,3,1,2,4,5,5,6], 4)) | 0.70304 | 0.606265 |
import os
import sys
import requests
import json
from flask import jsonify, request, make_response, send_from_directory
from kazoo import client as kz_client
from flask import request
from flask_pymongo import PyMongo
from flask_jwt_extended import (create_access_token, create_refresh_token,
jwt_required, jwt_refresh_token_required, get_jwt_identity,get_jwt_claims)
from app import app
from flask_jwt_extended import JWTManager
from flask_bcrypt import Bcrypt
import logging
ROOT_PATH = os.path.dirname(os.path.realpath(__file__))
os.environ.update({'ROOT_PATH': ROOT_PATH})
sys.path.append(os.path.join(ROOT_PATH, 'modules'))
import connexion
from connexion import NoContent
from jwt import(encode,decode)
# Port variable to run the server on.
PORT = os.environ.get('PORT')
flask_bcrypt = Bcrypt(app.app)
jwt = JWTManager(app.app)
# Docker
app.app.config['MONGO_URI'] = os.environ.get('DB')
mongo = PyMongo(app.app)
NODE_PATH = "/auth"
kz = kz_client.KazooClient('ZK')
def my_listener(state):
if state == kz_client.KazooState.LOST:
# Register somewhere that the session was lost
print("State: LOST!")
elif state == kz_client.KazooState.SUSPENDED:
# Handle being disconnected from Zookeeper
print("State: SUSPENDED!")
else:
print("State: CONNECTED!")
print("END OF ELSE!")
def make_zk_node():
try:
print("In making parent_node")
kz.ensure_path('/')
parent_node = kz.create(NODE_PATH, b"root")
print(parent_node)
print("Try making parent_node: Success!")
except Exception as e:
print("Try making parent_node: Exception!")
try:
print("In making child_node")
kz.ensure_path(NODE_PATH)
app_logic_node = kz.create(NODE_PATH+"/"+PORT, ephemeral=True, value=b"a value")
print(app_logic_node)
print("Try making child_node: Success!")
except Exception as e:
print("Try making child_node: Exception!")
logging.basicConfig()
kz.add_listener(my_listener)
kz.start(timeout=60)
make_zk_node()
def decode_token(token):
''' Work-around to x-bearerInfoFunction required by connexion '''
return {"token": token}
<EMAIL>.route('/auth', methods=['POST'])
def login():
''' auth login endpoint '''
req_body = request.get_json()
email, password = req_body['email'], req_body['password']
user = mongo.db.reg_users.find_one({'email': email})
if user and flask_bcrypt.check_password_hash(user['password'], password):
# token = create_access_token(identity=req_body)
token = encode(req_body,os.environ.get('SECRET'),algorithm='HS256')
mongo.db.logged_in_users.insert_one({"email": email, "token": token})
print(decode(token,os.environ.get('SECRET'),algorithms=['HS256']))
resp = jsonify({'token': token})
resp.headers['Access-Control-Allow-Origin'] = 'http://localhost:4000'
return resp, 200
resp = jsonify({'message': "Log in Failed"})
resp.headers['Access-Control-Allow-Origin'] = 'http://localhost:4000'
return resp, 400
def logout(my_email):
''' logout user endpoint '''
token = request.headers['Authorization'].split()[1]
db_response = mongo.db.logged_in_users.delete_one({'email': my_email, 'token': token})
if db_response.deleted_count == 1:
resp = jsonify({'message': "Successfully logged out"})
resp.headers['Access-Control-Allow-Origin'] = 'http://localhost:4000'
return resp, 200
resp = jsonify({'message': "Log out Failed"})
resp.headers['Access-Control-Allow-Origin'] = 'http://localhost:4000'
return resp, 400
def register():
''' register user endpoint '''
req_body = request.get_json()
name, email, password = req_body['name'], req_body['email'], req_body['password']
password_hash = <PASSWORD>.generate_password_hash(password)
# Insert registered user
if mongo.db.reg_users.find_one({'email': email}):
resp = jsonify({'message': "Failed to registered user:Email already in use!!"})
resp.headers['Access-Control-Allow-Origin'] = 'http://localhost:4000'
return resp, 400
mongo.db.reg_users.insert_one({"name": name, "email": email, "password": password_hash})
URL = "http://app_logic_service:4010/api/users/add"
data = {"email":email,"name": name}
headers = {'Content-Type':'application/json'}
r = requests.post(URL,json = data)
if r.status_code!=200 :
resp = jsonify({'message': "Failed to registered user"})
resp.headers['Access-Control-Allow-Origin'] = 'http://localhost:4000'
return resp, 400
resp = jsonify({'message': "Successfully registered user"})
resp.headers['Access-Control-Allow-Origin'] = 'http://localhost:4000'
return resp, 200
def authenticate():
''' auth request endpoint '''
# Get token from "Authorization: Bearer <token>" part of header
token = request.headers['Authorization'].split()[1]
email = decode(token,os.environ.get('SECRET'),algorithms=['HS256'])['email'];
logged_in_user = mongo.db.logged_in_users.find_one({'email': email, 'token': token})
if logged_in_user:
resp = jsonify({'message': "Successfully authenticated user"})
resp.headers['Access-Control-Allow-Origin'] = '*' #'http://localhost:4000'
return resp, 200
resp = jsonify({'message':"Failed to authenticate user"})
resp.headers['Access-Control-Allow-Origin'] = '*' #'http://localhost:4000'
return resp, 400
if __name__ == '__main__':
app.run(os.environ.get('PORT')) | auth_service/index.py | import os
import sys
import requests
import json
from flask import jsonify, request, make_response, send_from_directory
from kazoo import client as kz_client
from flask import request
from flask_pymongo import PyMongo
from flask_jwt_extended import (create_access_token, create_refresh_token,
jwt_required, jwt_refresh_token_required, get_jwt_identity,get_jwt_claims)
from app import app
from flask_jwt_extended import JWTManager
from flask_bcrypt import Bcrypt
import logging
ROOT_PATH = os.path.dirname(os.path.realpath(__file__))
os.environ.update({'ROOT_PATH': ROOT_PATH})
sys.path.append(os.path.join(ROOT_PATH, 'modules'))
import connexion
from connexion import NoContent
from jwt import(encode,decode)
# Port variable to run the server on.
PORT = os.environ.get('PORT')
flask_bcrypt = Bcrypt(app.app)
jwt = JWTManager(app.app)
# Docker
app.app.config['MONGO_URI'] = os.environ.get('DB')
mongo = PyMongo(app.app)
NODE_PATH = "/auth"
kz = kz_client.KazooClient('ZK')
def my_listener(state):
if state == kz_client.KazooState.LOST:
# Register somewhere that the session was lost
print("State: LOST!")
elif state == kz_client.KazooState.SUSPENDED:
# Handle being disconnected from Zookeeper
print("State: SUSPENDED!")
else:
print("State: CONNECTED!")
print("END OF ELSE!")
def make_zk_node():
try:
print("In making parent_node")
kz.ensure_path('/')
parent_node = kz.create(NODE_PATH, b"root")
print(parent_node)
print("Try making parent_node: Success!")
except Exception as e:
print("Try making parent_node: Exception!")
try:
print("In making child_node")
kz.ensure_path(NODE_PATH)
app_logic_node = kz.create(NODE_PATH+"/"+PORT, ephemeral=True, value=b"a value")
print(app_logic_node)
print("Try making child_node: Success!")
except Exception as e:
print("Try making child_node: Exception!")
logging.basicConfig()
kz.add_listener(my_listener)
kz.start(timeout=60)
make_zk_node()
def decode_token(token):
''' Work-around to x-bearerInfoFunction required by connexion '''
return {"token": token}
<EMAIL>.route('/auth', methods=['POST'])
def login():
''' auth login endpoint '''
req_body = request.get_json()
email, password = req_body['email'], req_body['password']
user = mongo.db.reg_users.find_one({'email': email})
if user and flask_bcrypt.check_password_hash(user['password'], password):
# token = create_access_token(identity=req_body)
token = encode(req_body,os.environ.get('SECRET'),algorithm='HS256')
mongo.db.logged_in_users.insert_one({"email": email, "token": token})
print(decode(token,os.environ.get('SECRET'),algorithms=['HS256']))
resp = jsonify({'token': token})
resp.headers['Access-Control-Allow-Origin'] = 'http://localhost:4000'
return resp, 200
resp = jsonify({'message': "Log in Failed"})
resp.headers['Access-Control-Allow-Origin'] = 'http://localhost:4000'
return resp, 400
def logout(my_email):
''' logout user endpoint '''
token = request.headers['Authorization'].split()[1]
db_response = mongo.db.logged_in_users.delete_one({'email': my_email, 'token': token})
if db_response.deleted_count == 1:
resp = jsonify({'message': "Successfully logged out"})
resp.headers['Access-Control-Allow-Origin'] = 'http://localhost:4000'
return resp, 200
resp = jsonify({'message': "Log out Failed"})
resp.headers['Access-Control-Allow-Origin'] = 'http://localhost:4000'
return resp, 400
def register():
''' register user endpoint '''
req_body = request.get_json()
name, email, password = req_body['name'], req_body['email'], req_body['password']
password_hash = <PASSWORD>.generate_password_hash(password)
# Insert registered user
if mongo.db.reg_users.find_one({'email': email}):
resp = jsonify({'message': "Failed to registered user:Email already in use!!"})
resp.headers['Access-Control-Allow-Origin'] = 'http://localhost:4000'
return resp, 400
mongo.db.reg_users.insert_one({"name": name, "email": email, "password": password_hash})
URL = "http://app_logic_service:4010/api/users/add"
data = {"email":email,"name": name}
headers = {'Content-Type':'application/json'}
r = requests.post(URL,json = data)
if r.status_code!=200 :
resp = jsonify({'message': "Failed to registered user"})
resp.headers['Access-Control-Allow-Origin'] = 'http://localhost:4000'
return resp, 400
resp = jsonify({'message': "Successfully registered user"})
resp.headers['Access-Control-Allow-Origin'] = 'http://localhost:4000'
return resp, 200
def authenticate():
''' auth request endpoint '''
# Get token from "Authorization: Bearer <token>" part of header
token = request.headers['Authorization'].split()[1]
email = decode(token,os.environ.get('SECRET'),algorithms=['HS256'])['email'];
logged_in_user = mongo.db.logged_in_users.find_one({'email': email, 'token': token})
if logged_in_user:
resp = jsonify({'message': "Successfully authenticated user"})
resp.headers['Access-Control-Allow-Origin'] = '*' #'http://localhost:4000'
return resp, 200
resp = jsonify({'message':"Failed to authenticate user"})
resp.headers['Access-Control-Allow-Origin'] = '*' #'http://localhost:4000'
return resp, 400
if __name__ == '__main__':
app.run(os.environ.get('PORT')) | 0.191706 | 0.046899 |
import weakref
from AppKit import *
from vanillaBase import VanillaBaseObject, _breakCycles
from nsSubclasses import getNSSubclass
_edgeMap = {
"left" : NSMinXEdge,
"right" : NSMaxXEdge,
"top" : NSMinYEdge,
"bottom" : NSMaxYEdge
}
try:
NSPopoverBehaviorApplicationDefined
except NameError:
NSPopoverBehaviorApplicationDefined = 0
NSPopoverBehaviorTransient = 1
NSPopoverBehaviorSemitransient = 2
_behaviorMap = {
"applicationDefined" : NSPopoverBehaviorApplicationDefined,
"transient" : NSPopoverBehaviorTransient,
"semitransient" : NSPopoverBehaviorSemitransient
}
class VanillaPopoverContentView(NSView):
def _getContentView(self):
return self
class VanillaPopoverDelegate(NSObject):
def popoverWillShow_(self, notification):
self.vanillaWrapper()._alertBindings("will show")
def popoverDidShow_(self, notification):
self.vanillaWrapper()._alertBindings("did show")
def popoverWillClose_(self, notification):
self.vanillaWrapper()._alertBindings("will close")
def popoverDidClose_(self, notification):
self.vanillaWrapper()._alertBindings("did close")
class Popover(VanillaBaseObject):
"""
A popover capable of containing controls.
**size** Tuple of form *(width, height)* representing the size of the content
in the popover.
**size** The parent view that the popover should pop out from. This can be either
a vanilla object or an instance of NSView or NSView subclass.
**preferredEdge** The edge of the parent view that you want the popover
to pop out from. These are the options:
+------------+
| *"left"* |
+------------+
| *"right"* |
+------------+
| *"top"* |
+------------+
| *"bottom"* |
+------------+
**behavior** The desired behavior of the popover. These are the options:
+------------------------+-----------------------------------------------------+
| *"applicationDefined"* | Corresponds to NSPopoverBehaviorApplicationDefined. |
+------------------------+-----------------------------------------------------+
| *"transient"* | Corresponds to NSPopoverBehaviorTransient. |
+------------------------+-----------------------------------------------------+
| *"semitransient"* | Corresponds to NSPopoverBehaviorSemitransient. |
+------------------------+-----------------------------------------------------+
"""
nsPopoverClass = NSPopover
contentViewClass = VanillaPopoverContentView
contentViewControllerClass = NSViewController
def __init__(self, size, parentView=None, preferredEdge="top", behavior="semitransient"):
if isinstance(parentView, VanillaBaseObject):
parentView = parentView._getContentView()
self._parentView = parentView
self._preferredEdge = preferredEdge
# content view and controller
self._nsObject = getNSSubclass(self.contentViewClass).alloc().initWithFrame_(((0, 0), size))
self._contentViewController = self.contentViewControllerClass.alloc().init()
self._contentViewController.setView_(self._nsObject)
# popover
cls = getNSSubclass(self.nsPopoverClass)
self._popover = cls.alloc().init()
self._popover.setContentViewController_(self._contentViewController)
self._popover.setBehavior_(_behaviorMap[behavior])
# delegate
self._delegate = VanillaPopoverDelegate.alloc().init()
self._delegate.vanillaWrapper = weakref.ref(self)
self._popover.setDelegate_(self._delegate)
def __del__(self):
self._breakCycles()
def _breakCycles(self):
super(Popover, self)._breakCycles()
view = self._getContentView()
if view is not None:
_breakCycles(view)
self._contentViewController = None
self._popover = None
self._parentView = None
self._delegate = None
def open(self, parentView=None, preferredEdge=None, relativeRect=None):
"""
Open the popover. If desired, the **parentView** may be specified.
If not, the values assigned during init will be used. Additionally,
a rect of form (x, y, width, height) may be specified to indicate
where the popover shoulw pop out from. If not provided, the parent
view's bounds will be used.
"""
if isinstance(parentView, VanillaBaseObject):
parentView = parentView._getContentView()
if parentView is None:
parentView = self._parentView
if relativeRect is not None:
if not isinstance(relativeRect, NSRect):
x, y, w, h = relativeRect
relativeRect = NSMakeRect(x, y, w, h)
else:
relativeRect = NSZeroRect
if preferredEdge is None:
preferredEdge = self._preferredEdge
preferredEdge = _edgeMap[preferredEdge]
self._popover.showRelativeToRect_ofView_preferredEdge_(relativeRect, parentView, preferredEdge)
def close(self):
"""
Close the popover.
Once a popover has been closed it can not be re-opened.
"""
self._popover.close()
def resize(self, width, height):
"""
Change the size of the popover to **width** and **height**.
"""
self._popover.setContentSize_((width, height))
def bind(self, event, callback):
"""
Bind a callback to an event.
**event** A string representing the desired event. The options are:
+----------------+-----------------------------------------------+
| *"will show"* | Called immediately before the popover shows. |
+----------------+-----------------------------------------------+
| *"did show"* | Called immediately after the popover shows. |
+----------------+-----------------------------------------------+
| *"will close"* | Called immediately before the popover closes. |
+----------------+-----------------------------------------------+
| *"did close"* | Called immediately after the popover closes. |
+----------------+-----------------------------------------------+
"""
if event not in self._bindings:
self._bindings[event] = []
self._bindings[event].append(callback)
def unbind(self, event, callback):
"""
Unbind a callback from an event.
**event** A string representing the desired event.
Refer to *bind* for the options.
**callback** The callback that has been bound to the event.
"""
self._bindings[event].remove(callback)
def _alertBindings(self, key):
if hasattr(self, "_bindings"):
if key in self._bindings:
for callback in self._bindings[key]:
# XXX why return? there could be more than one binding.
return callback(self) | Lib/vanilla/vanillaPopover.py | import weakref
from AppKit import *
from vanillaBase import VanillaBaseObject, _breakCycles
from nsSubclasses import getNSSubclass
_edgeMap = {
"left" : NSMinXEdge,
"right" : NSMaxXEdge,
"top" : NSMinYEdge,
"bottom" : NSMaxYEdge
}
try:
NSPopoverBehaviorApplicationDefined
except NameError:
NSPopoverBehaviorApplicationDefined = 0
NSPopoverBehaviorTransient = 1
NSPopoverBehaviorSemitransient = 2
_behaviorMap = {
"applicationDefined" : NSPopoverBehaviorApplicationDefined,
"transient" : NSPopoverBehaviorTransient,
"semitransient" : NSPopoverBehaviorSemitransient
}
class VanillaPopoverContentView(NSView):
def _getContentView(self):
return self
class VanillaPopoverDelegate(NSObject):
def popoverWillShow_(self, notification):
self.vanillaWrapper()._alertBindings("will show")
def popoverDidShow_(self, notification):
self.vanillaWrapper()._alertBindings("did show")
def popoverWillClose_(self, notification):
self.vanillaWrapper()._alertBindings("will close")
def popoverDidClose_(self, notification):
self.vanillaWrapper()._alertBindings("did close")
class Popover(VanillaBaseObject):
"""
A popover capable of containing controls.
**size** Tuple of form *(width, height)* representing the size of the content
in the popover.
**size** The parent view that the popover should pop out from. This can be either
a vanilla object or an instance of NSView or NSView subclass.
**preferredEdge** The edge of the parent view that you want the popover
to pop out from. These are the options:
+------------+
| *"left"* |
+------------+
| *"right"* |
+------------+
| *"top"* |
+------------+
| *"bottom"* |
+------------+
**behavior** The desired behavior of the popover. These are the options:
+------------------------+-----------------------------------------------------+
| *"applicationDefined"* | Corresponds to NSPopoverBehaviorApplicationDefined. |
+------------------------+-----------------------------------------------------+
| *"transient"* | Corresponds to NSPopoverBehaviorTransient. |
+------------------------+-----------------------------------------------------+
| *"semitransient"* | Corresponds to NSPopoverBehaviorSemitransient. |
+------------------------+-----------------------------------------------------+
"""
nsPopoverClass = NSPopover
contentViewClass = VanillaPopoverContentView
contentViewControllerClass = NSViewController
def __init__(self, size, parentView=None, preferredEdge="top", behavior="semitransient"):
if isinstance(parentView, VanillaBaseObject):
parentView = parentView._getContentView()
self._parentView = parentView
self._preferredEdge = preferredEdge
# content view and controller
self._nsObject = getNSSubclass(self.contentViewClass).alloc().initWithFrame_(((0, 0), size))
self._contentViewController = self.contentViewControllerClass.alloc().init()
self._contentViewController.setView_(self._nsObject)
# popover
cls = getNSSubclass(self.nsPopoverClass)
self._popover = cls.alloc().init()
self._popover.setContentViewController_(self._contentViewController)
self._popover.setBehavior_(_behaviorMap[behavior])
# delegate
self._delegate = VanillaPopoverDelegate.alloc().init()
self._delegate.vanillaWrapper = weakref.ref(self)
self._popover.setDelegate_(self._delegate)
def __del__(self):
self._breakCycles()
def _breakCycles(self):
super(Popover, self)._breakCycles()
view = self._getContentView()
if view is not None:
_breakCycles(view)
self._contentViewController = None
self._popover = None
self._parentView = None
self._delegate = None
def open(self, parentView=None, preferredEdge=None, relativeRect=None):
"""
Open the popover. If desired, the **parentView** may be specified.
If not, the values assigned during init will be used. Additionally,
a rect of form (x, y, width, height) may be specified to indicate
where the popover shoulw pop out from. If not provided, the parent
view's bounds will be used.
"""
if isinstance(parentView, VanillaBaseObject):
parentView = parentView._getContentView()
if parentView is None:
parentView = self._parentView
if relativeRect is not None:
if not isinstance(relativeRect, NSRect):
x, y, w, h = relativeRect
relativeRect = NSMakeRect(x, y, w, h)
else:
relativeRect = NSZeroRect
if preferredEdge is None:
preferredEdge = self._preferredEdge
preferredEdge = _edgeMap[preferredEdge]
self._popover.showRelativeToRect_ofView_preferredEdge_(relativeRect, parentView, preferredEdge)
def close(self):
"""
Close the popover.
Once a popover has been closed it can not be re-opened.
"""
self._popover.close()
def resize(self, width, height):
"""
Change the size of the popover to **width** and **height**.
"""
self._popover.setContentSize_((width, height))
def bind(self, event, callback):
"""
Bind a callback to an event.
**event** A string representing the desired event. The options are:
+----------------+-----------------------------------------------+
| *"will show"* | Called immediately before the popover shows. |
+----------------+-----------------------------------------------+
| *"did show"* | Called immediately after the popover shows. |
+----------------+-----------------------------------------------+
| *"will close"* | Called immediately before the popover closes. |
+----------------+-----------------------------------------------+
| *"did close"* | Called immediately after the popover closes. |
+----------------+-----------------------------------------------+
"""
if event not in self._bindings:
self._bindings[event] = []
self._bindings[event].append(callback)
def unbind(self, event, callback):
"""
Unbind a callback from an event.
**event** A string representing the desired event.
Refer to *bind* for the options.
**callback** The callback that has been bound to the event.
"""
self._bindings[event].remove(callback)
def _alertBindings(self, key):
if hasattr(self, "_bindings"):
if key in self._bindings:
for callback in self._bindings[key]:
# XXX why return? there could be more than one binding.
return callback(self) | 0.654895 | 0.301626 |
__author__ = 'schmidm'
import random
def make_html_file(examples, filename):
"""
Visualizes attention of a model in a HTML file.
:param examples:
:param filename:
:return:
"""
def attention_to_rgb(attention):
# red = int(attention * 255)
# green = int(255 - red)
red = 255
green = int(255 * (1-attention))
blue = int(255 * (1-attention))
return 'rgb(%s,%s,%s)' % (str(red), str(green), blue)
out_file = open(filename, 'w')
out_file.write('''<!DOCTYPE html>
<head>
<link rel="stylesheet" href="//code.jquery.com/ui/1.10.4/themes/smoothness/jquery-ui.css" />
<script src="http://ajax.googleapis.com/ajax/libs/jquery/2.1.3/jquery.min.js"></script>
<script src="http://code.jquery.com/ui/1.10.4/jquery-ui.js"></script>
</head><html>
<body><h1>Awesome Network Output</h1>
<script>
function highlight(x)
{
//alert(x)
$("." + x).addClass('foo')
//$(".foo").css({'font-weight': 600})
$(".foo").animate({
color: "blue"
}, {duration: 200} );
}
function unhighlight()
{
$(".foo").css({'font-weight': "normal"})
$(".foo").animate({
color: "black"
}, {duration: 200} );
$(".foo").removeClass('foo')
}
</script>
''')
#1.0 sort answers by how well we predicted them - the fifth entry is the correct answer index
examples = sorted(examples, key=lambda example: example[5])
for example_index, example in enumerate(examples):
question, context_words, context_attention, answers, answers_attention, correct_answer_idnex = example
out_file.write("<h2>%(example_index)s</h2>" % {'example_index' : correct_answer_idnex})
out_file.write("<p>")
for word, attention in zip(context_words, context_attention):
if(word in answers):
out_file.write('<u>')
out_file.write('<mark class="g%(class)s" cursor="pointer" title="%(pointer_text)s" style="background-color:%(rgb)s;">%(word)s </mark>' %
{'pointer_text' : str(attention), 'rgb' : attention_to_rgb(attention), 'word' : word, 'class' : str(example_index) + "-" + word.replace("@", "")})
#str(example_index) + "." + word
if(word in answers):
out_file.write('</u>')
out_file.write("</p>")
out_file.write('<p>%(question)s</p>' % {'question' : question})
out_file.write("<p>")
for i, (answer, attention) in enumerate(zip(answers, answers_attention)):
answer = answer.replace("<", "").replace(">", "")
out_file.write('<mark onmouseleave=unhighlight() onmouseover=highlight("g%(class)s") cursor="pointer" title="%(pointer_text)s" style="background-color:%(rgb)s;">%(index)s ) %(word)s; </mark>' %
{'index' : i, 'pointer_text' : str(attention), 'rgb' : attention_to_rgb(attention), 'word' : answer, 'class' : str(example_index) + "-" + answer.replace("@", "")})
out_file.write("</p>")
out_file.write('</body></html>')
out_file.close()
if __name__ == "__main__":
examples = []
for i in range(4):
question = "Who's the best?"
context = 'Ruda met Alice . Bob met Ruda . Alice met Ruda .'.split(' ')
context_attention = map(lambda x : random.uniform(0, 1), context)
answers = '<NAME> Alice'.split(' ')
answers_prob = map(lambda x : random.uniform(0, 1), answers)
correct_answer_idnex = random.uniform(0, 10)
examples.append((question, context, context_attention, answers, answers_prob, correct_answer_idnex))
make_html_file(examples, 'output.html') | asreader/text_comprehension/visualisation.py | __author__ = 'schmidm'
import random
def make_html_file(examples, filename):
"""
Visualizes attention of a model in a HTML file.
:param examples:
:param filename:
:return:
"""
def attention_to_rgb(attention):
# red = int(attention * 255)
# green = int(255 - red)
red = 255
green = int(255 * (1-attention))
blue = int(255 * (1-attention))
return 'rgb(%s,%s,%s)' % (str(red), str(green), blue)
out_file = open(filename, 'w')
out_file.write('''<!DOCTYPE html>
<head>
<link rel="stylesheet" href="//code.jquery.com/ui/1.10.4/themes/smoothness/jquery-ui.css" />
<script src="http://ajax.googleapis.com/ajax/libs/jquery/2.1.3/jquery.min.js"></script>
<script src="http://code.jquery.com/ui/1.10.4/jquery-ui.js"></script>
</head><html>
<body><h1>Awesome Network Output</h1>
<script>
function highlight(x)
{
//alert(x)
$("." + x).addClass('foo')
//$(".foo").css({'font-weight': 600})
$(".foo").animate({
color: "blue"
}, {duration: 200} );
}
function unhighlight()
{
$(".foo").css({'font-weight': "normal"})
$(".foo").animate({
color: "black"
}, {duration: 200} );
$(".foo").removeClass('foo')
}
</script>
''')
#1.0 sort answers by how well we predicted them - the fifth entry is the correct answer index
examples = sorted(examples, key=lambda example: example[5])
for example_index, example in enumerate(examples):
question, context_words, context_attention, answers, answers_attention, correct_answer_idnex = example
out_file.write("<h2>%(example_index)s</h2>" % {'example_index' : correct_answer_idnex})
out_file.write("<p>")
for word, attention in zip(context_words, context_attention):
if(word in answers):
out_file.write('<u>')
out_file.write('<mark class="g%(class)s" cursor="pointer" title="%(pointer_text)s" style="background-color:%(rgb)s;">%(word)s </mark>' %
{'pointer_text' : str(attention), 'rgb' : attention_to_rgb(attention), 'word' : word, 'class' : str(example_index) + "-" + word.replace("@", "")})
#str(example_index) + "." + word
if(word in answers):
out_file.write('</u>')
out_file.write("</p>")
out_file.write('<p>%(question)s</p>' % {'question' : question})
out_file.write("<p>")
for i, (answer, attention) in enumerate(zip(answers, answers_attention)):
answer = answer.replace("<", "").replace(">", "")
out_file.write('<mark onmouseleave=unhighlight() onmouseover=highlight("g%(class)s") cursor="pointer" title="%(pointer_text)s" style="background-color:%(rgb)s;">%(index)s ) %(word)s; </mark>' %
{'index' : i, 'pointer_text' : str(attention), 'rgb' : attention_to_rgb(attention), 'word' : answer, 'class' : str(example_index) + "-" + answer.replace("@", "")})
out_file.write("</p>")
out_file.write('</body></html>')
out_file.close()
if __name__ == "__main__":
examples = []
for i in range(4):
question = "Who's the best?"
context = 'Ruda met Alice . Bob met Ruda . Alice met Ruda .'.split(' ')
context_attention = map(lambda x : random.uniform(0, 1), context)
answers = '<NAME> Alice'.split(' ')
answers_prob = map(lambda x : random.uniform(0, 1), answers)
correct_answer_idnex = random.uniform(0, 10)
examples.append((question, context, context_attention, answers, answers_prob, correct_answer_idnex))
make_html_file(examples, 'output.html') | 0.480235 | 0.181934 |
from enum import Enum
from collections import deque
import operator as ops
from helpers import read_input
class State(Enum):
Running = 1
Waiting = 2
Halted = 3
class Computer:
def __init__(self, instructions, ram_size=4096):
self.ram = self.initialize_memory(instructions, ram_size)
self.ip = 0
self.rp = 0
self.state = State.Running
self.in_queue = deque([])
self.out_queue = deque([])
def initialize_memory(self, instructions, ram_size):
ram = [0 for _ in range(ram_size)]
ram[:len(instructions)] = instructions[:]
return ram
def set_positions(self, values):
for (pos, val) in values:
self.ram[pos] = val
def read_ram(self, pos):
return self.ram[pos]
def set_ram(self, pos, val):
self.ram[pos] = val
def read_param(self, param):
digit = 10**(param+1)
mode = (self.ram[self.ip] // digit) % 10
pos = self.ip + param
pos_val = self.ram[pos]
return [pos_val, pos, self.rp+pos_val][mode]
def receive(self, value):
self.in_queue.append(value)
def binary_operation(self, op):
noun = self.read_param(1)
verb = self.read_param(2)
dest = self.read_param(3)
case = {1: lambda: int(self.ram[noun] + self.ram[verb]),
2: lambda: int(self.ram[noun] * self.ram[verb]),
7: lambda: int(self.ram[noun] < self.ram[verb]),
8: lambda: int(self.ram[noun] == self.ram[verb])}
self.ram[dest] = case[op]()
self.ip += 4
def conditional_jumps(self, op):
noun = self.read_param(1)
verb = self.read_param(2)
neq = ops.ne if op == 5 else ops.eq
self.ip = self.ram[verb] if neq(self.ram[noun], 0) else self.ip+3
def adjust_rp(self):
noun = self.read_param(1)
self.rp += self.ram[noun]
self.ip += 2
def read_from_in_queue(self):
noun = self.read_param(1)
if self.in_queue:
self.ram[noun] = self.in_queue.popleft()
self.state = State.Running
self.ip += 2
else:
self.state = State.Waiting
def put_to_out_queue(self):
noun = self.read_param(1)
self.out_queue.append(self.ram[noun])
self.ip += 2
def get_from_out_queue(self):
return self.out_queue.popleft()
def get_3_from_output(self):
return (self.get_from_out_queue() for _ in range(3))
def receive_run_output(self, value):
self.receive(value)
self.run()
return self.get_from_out_queue()
def execute_opcode(self):
op = self.ram[self.ip] % 100
if op in {1, 2, 7, 8}:
self.binary_operation(op)
elif op == 3:
self.read_from_in_queue()
elif op == 4:
self.put_to_out_queue()
elif op in {5, 6}:
self.conditional_jumps(op)
elif op == 9:
self.adjust_rp()
elif op == 99:
self.state = State.Halted
def run(self):
self.state = State.Running
while self.state == State.Running:
self.execute_opcode()
def read_intcode_input(filename):
return read_input(filename, int, ',') | python/intcode.py | from enum import Enum
from collections import deque
import operator as ops
from helpers import read_input
class State(Enum):
Running = 1
Waiting = 2
Halted = 3
class Computer:
def __init__(self, instructions, ram_size=4096):
self.ram = self.initialize_memory(instructions, ram_size)
self.ip = 0
self.rp = 0
self.state = State.Running
self.in_queue = deque([])
self.out_queue = deque([])
def initialize_memory(self, instructions, ram_size):
ram = [0 for _ in range(ram_size)]
ram[:len(instructions)] = instructions[:]
return ram
def set_positions(self, values):
for (pos, val) in values:
self.ram[pos] = val
def read_ram(self, pos):
return self.ram[pos]
def set_ram(self, pos, val):
self.ram[pos] = val
def read_param(self, param):
digit = 10**(param+1)
mode = (self.ram[self.ip] // digit) % 10
pos = self.ip + param
pos_val = self.ram[pos]
return [pos_val, pos, self.rp+pos_val][mode]
def receive(self, value):
self.in_queue.append(value)
def binary_operation(self, op):
noun = self.read_param(1)
verb = self.read_param(2)
dest = self.read_param(3)
case = {1: lambda: int(self.ram[noun] + self.ram[verb]),
2: lambda: int(self.ram[noun] * self.ram[verb]),
7: lambda: int(self.ram[noun] < self.ram[verb]),
8: lambda: int(self.ram[noun] == self.ram[verb])}
self.ram[dest] = case[op]()
self.ip += 4
def conditional_jumps(self, op):
noun = self.read_param(1)
verb = self.read_param(2)
neq = ops.ne if op == 5 else ops.eq
self.ip = self.ram[verb] if neq(self.ram[noun], 0) else self.ip+3
def adjust_rp(self):
noun = self.read_param(1)
self.rp += self.ram[noun]
self.ip += 2
def read_from_in_queue(self):
noun = self.read_param(1)
if self.in_queue:
self.ram[noun] = self.in_queue.popleft()
self.state = State.Running
self.ip += 2
else:
self.state = State.Waiting
def put_to_out_queue(self):
noun = self.read_param(1)
self.out_queue.append(self.ram[noun])
self.ip += 2
def get_from_out_queue(self):
return self.out_queue.popleft()
def get_3_from_output(self):
return (self.get_from_out_queue() for _ in range(3))
def receive_run_output(self, value):
self.receive(value)
self.run()
return self.get_from_out_queue()
def execute_opcode(self):
op = self.ram[self.ip] % 100
if op in {1, 2, 7, 8}:
self.binary_operation(op)
elif op == 3:
self.read_from_in_queue()
elif op == 4:
self.put_to_out_queue()
elif op in {5, 6}:
self.conditional_jumps(op)
elif op == 9:
self.adjust_rp()
elif op == 99:
self.state = State.Halted
def run(self):
self.state = State.Running
while self.state == State.Running:
self.execute_opcode()
def read_intcode_input(filename):
return read_input(filename, int, ',') | 0.651244 | 0.356167 |
import logging
from logging.handlers import RotatingFileHandler
import os.path
from .utils import (
os_path_exists,
get_encoding,
check_path
)
class LogManager(object):
# noinspection PyUnresolvedReferences
"""Simple log manager for youtube-dl.
This class is mainly used to log the youtube-dl STDERR.
Attributes:
LOG_FILENAME (string): Filename of the log file.
TIME_TEMPLATE (string): Custom template to log the time.
MAX_LOGSIZE (int): Maximum size(Bytes) of the log file.
Args:
config_path (string): Absolute path where LogManager should
store the log file.
add_time (boolean): If True LogManager will also log the time.
"""
LOG_FILENAME = "log"
MAX_LOGSIZE = 524288 # Bytes
def __init__(self, config_path, add_time=False):
self.config_path = config_path
self.add_time = add_time
self.log_file = os.path.join(config_path, self.LOG_FILENAME)
self._encoding = get_encoding()
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.DEBUG)
check_path(self.config_path)
self.handler = RotatingFileHandler(filename=self.log_file,
maxBytes=LogManager.MAX_LOGSIZE,
backupCount=5,
encoding=self._encoding)
fmt = "%(levelname)s-%(threadName)s-%(message)s"
if self.add_time:
fmt = "%(asctime)s-" + fmt
self.handler.setFormatter(logging.Formatter(fmt=fmt))
self.logger.addHandler(self.handler)
def log_size(self):
"""Return log file size in Bytes. """
if not os_path_exists(self.log_file):
return 0
return os.path.getsize(self.log_file)
def clear(self):
"""Clear log file. """
with open(self.log_file, "w") as log:
log.write("")
def log(self, data):
"""Log data to the log file.
Args:
data (string): String to write to the log file.
"""
self.logger.debug(str(data)) | youtube_dl_gui/logmanager.py | import logging
from logging.handlers import RotatingFileHandler
import os.path
from .utils import (
os_path_exists,
get_encoding,
check_path
)
class LogManager(object):
# noinspection PyUnresolvedReferences
"""Simple log manager for youtube-dl.
This class is mainly used to log the youtube-dl STDERR.
Attributes:
LOG_FILENAME (string): Filename of the log file.
TIME_TEMPLATE (string): Custom template to log the time.
MAX_LOGSIZE (int): Maximum size(Bytes) of the log file.
Args:
config_path (string): Absolute path where LogManager should
store the log file.
add_time (boolean): If True LogManager will also log the time.
"""
LOG_FILENAME = "log"
MAX_LOGSIZE = 524288 # Bytes
def __init__(self, config_path, add_time=False):
self.config_path = config_path
self.add_time = add_time
self.log_file = os.path.join(config_path, self.LOG_FILENAME)
self._encoding = get_encoding()
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.DEBUG)
check_path(self.config_path)
self.handler = RotatingFileHandler(filename=self.log_file,
maxBytes=LogManager.MAX_LOGSIZE,
backupCount=5,
encoding=self._encoding)
fmt = "%(levelname)s-%(threadName)s-%(message)s"
if self.add_time:
fmt = "%(asctime)s-" + fmt
self.handler.setFormatter(logging.Formatter(fmt=fmt))
self.logger.addHandler(self.handler)
def log_size(self):
"""Return log file size in Bytes. """
if not os_path_exists(self.log_file):
return 0
return os.path.getsize(self.log_file)
def clear(self):
"""Clear log file. """
with open(self.log_file, "w") as log:
log.write("")
def log(self, data):
"""Log data to the log file.
Args:
data (string): String to write to the log file.
"""
self.logger.debug(str(data)) | 0.672977 | 0.094177 |
from typing import List, Tuple
import json
import cv2 as cv
import numpy as np
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.preprocessing import LabelEncoder
from sklearn.random_projection import GaussianRandomProjection
from tornado.web import RequestHandler
from ..types import (
DataObject,
Label,
Status,
StatusType,
)
from .utils import get_image
def resize_LDA(imgs: np.ndarray,
labels: np.ndarray,
statuses: np.ndarray) -> Tuple[np.ndarray, List[str]]:
"""
Extract features for each image by dimension reduction for the normalized image.
The images are normalized by converting to grey image and resized to (8, 8).
The dimension reduction is conducted with LDA.
Args
----
imgs : np.ndarray
The images to extract features.
labels : np.ndarray
The partial labels.
Returns
-------
X : np.ndarray
The extracted feature values.
feature_names : List[str]
The names of features.
Notes
-----
Variations:
1. change the dimension reduction method (e.g., MDS, t-SNE, isomap)
2. change the number of projected dimensions
"""
# pylint: disable=invalid-name
# normalized the images to gray scale 8 x 8
h, w = 8, 8
X_raw_normalized = []
for img in imgs:
img_gray = img if len(img.shape) == 2 else cv.cvtColor(
img, cv.COLOR_BGR2GRAY)
img_resized = cv.resize(img_gray, (h, w), interpolation=cv.INTER_AREA)
X_raw_normalized.append(img_resized)
X_raw_normalized = np.array(X_raw_normalized)
X_flatten = X_raw_normalized.reshape((-1, h * w))
n_components = 5
mask_labeled = np.array([status == StatusType.Labeled
for status in statuses])
X_labeled = X_flatten[mask_labeled]
labels_labeled = labels[mask_labeled]
categories = np.array([d for d in np.unique(labels_labeled)])
labels_labeled = LabelEncoder().fit(categories).transform(labels_labeled)
if len(labels_labeled) <= 1:
n_components_actual = n_components
reducer = GaussianRandomProjection(n_components=n_components_actual)
X = reducer.fit_transform(X_flatten)
else:
n_samples, n_features = X_flatten.shape
n_categories = len(np.unique(labels_labeled))
n_components_actual = min(n_samples, n_features,
n_categories - 1, n_components)
reducer = LinearDiscriminantAnalysis(n_components=n_components_actual)
reducer.fit(X_labeled, labels_labeled)
X = reducer.transform(X_flatten)
if n_components > n_components_actual:
zeros = np.zeros((n_samples, n_components -
n_components_actual), dtype=float)
X = np.hstack((X, zeros))
feature_names = [f'LDA[{i}]' for i in range(n_components)]
return X, feature_names
def extract_features(data_objects: List[DataObject],
labels: np.ndarray,
statuses: np.ndarray,
) -> Tuple[List[List[float]], List[str]]:
imgs = [get_image(data_object) for data_object in data_objects]
X, feature_names = resize_LDA(imgs, labels, statuses)
return X.tolist(), feature_names
class Handler(RequestHandler):
"""
The handler for feature extraction - image LDA.
"""
def post(self):
self.set_header('Access-Control-Allow-Origin', '*')
json_data = json.loads(self.request.body)
# process input: (dataObjects)
data_objects: List[DataObject] = json_data['dataObjects']
labels: List[Label] = json_data['labels'] if 'labels' in json_data else None
statuses: List[Status] = json_data['statuses'] if 'statuses' in json_data else None
labels = np.array([d['category'] for d in labels], dtype=str)
statuses = np.array([d['value'] for d in statuses], dtype=str)
features, feature_names = extract_features(
data_objects, labels, statuses)
self.write({
'features': features,
'featureNames': feature_names,
}) | server/handlers/feature_extraction/image_lda.py |
from typing import List, Tuple
import json
import cv2 as cv
import numpy as np
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.preprocessing import LabelEncoder
from sklearn.random_projection import GaussianRandomProjection
from tornado.web import RequestHandler
from ..types import (
DataObject,
Label,
Status,
StatusType,
)
from .utils import get_image
def resize_LDA(imgs: np.ndarray,
labels: np.ndarray,
statuses: np.ndarray) -> Tuple[np.ndarray, List[str]]:
"""
Extract features for each image by dimension reduction for the normalized image.
The images are normalized by converting to grey image and resized to (8, 8).
The dimension reduction is conducted with LDA.
Args
----
imgs : np.ndarray
The images to extract features.
labels : np.ndarray
The partial labels.
Returns
-------
X : np.ndarray
The extracted feature values.
feature_names : List[str]
The names of features.
Notes
-----
Variations:
1. change the dimension reduction method (e.g., MDS, t-SNE, isomap)
2. change the number of projected dimensions
"""
# pylint: disable=invalid-name
# normalized the images to gray scale 8 x 8
h, w = 8, 8
X_raw_normalized = []
for img in imgs:
img_gray = img if len(img.shape) == 2 else cv.cvtColor(
img, cv.COLOR_BGR2GRAY)
img_resized = cv.resize(img_gray, (h, w), interpolation=cv.INTER_AREA)
X_raw_normalized.append(img_resized)
X_raw_normalized = np.array(X_raw_normalized)
X_flatten = X_raw_normalized.reshape((-1, h * w))
n_components = 5
mask_labeled = np.array([status == StatusType.Labeled
for status in statuses])
X_labeled = X_flatten[mask_labeled]
labels_labeled = labels[mask_labeled]
categories = np.array([d for d in np.unique(labels_labeled)])
labels_labeled = LabelEncoder().fit(categories).transform(labels_labeled)
if len(labels_labeled) <= 1:
n_components_actual = n_components
reducer = GaussianRandomProjection(n_components=n_components_actual)
X = reducer.fit_transform(X_flatten)
else:
n_samples, n_features = X_flatten.shape
n_categories = len(np.unique(labels_labeled))
n_components_actual = min(n_samples, n_features,
n_categories - 1, n_components)
reducer = LinearDiscriminantAnalysis(n_components=n_components_actual)
reducer.fit(X_labeled, labels_labeled)
X = reducer.transform(X_flatten)
if n_components > n_components_actual:
zeros = np.zeros((n_samples, n_components -
n_components_actual), dtype=float)
X = np.hstack((X, zeros))
feature_names = [f'LDA[{i}]' for i in range(n_components)]
return X, feature_names
def extract_features(data_objects: List[DataObject],
labels: np.ndarray,
statuses: np.ndarray,
) -> Tuple[List[List[float]], List[str]]:
imgs = [get_image(data_object) for data_object in data_objects]
X, feature_names = resize_LDA(imgs, labels, statuses)
return X.tolist(), feature_names
class Handler(RequestHandler):
"""
The handler for feature extraction - image LDA.
"""
def post(self):
self.set_header('Access-Control-Allow-Origin', '*')
json_data = json.loads(self.request.body)
# process input: (dataObjects)
data_objects: List[DataObject] = json_data['dataObjects']
labels: List[Label] = json_data['labels'] if 'labels' in json_data else None
statuses: List[Status] = json_data['statuses'] if 'statuses' in json_data else None
labels = np.array([d['category'] for d in labels], dtype=str)
statuses = np.array([d['value'] for d in statuses], dtype=str)
features, feature_names = extract_features(
data_objects, labels, statuses)
self.write({
'features': features,
'featureNames': feature_names,
}) | 0.896778 | 0.581184 |
class Beautifier:
"""
Class that creates HTML file (_filename) with simple w3-css style from
input dict (_data). Example of input dict:
{ 'WI-FI keys':
[
{'ESSID': 'Burger-king', 'Pass': '<PASSWORD>'},
{'ESSID': 'Neighboring Wi-Fi', 'Pass': '<PASSWORD>'},
{'ESSID': 'MyWi-Fi', 'Pass': '<PASSWORD>!'},
],
'Chrome passes':
[
{'URL': 'facebook.com', 'Login': 'destoyer228', 'Pass': '<PASSWORD>'},
{'URL': 'github.com', 'Login': 'kovinevmv', 'Pass': '<PASSWORD>'},
]
}
"""
def __init__(self, _filename, _data):
""" Create file with _filename and write not empty data """
self.filename = _filename
self.data = _data
self.html_file = open(self.filename, 'wb')
if self.data:
self.write_data()
def create_head(self):
"""
Write head of HTML. Added w3-css to avoid external
dependency and work without Internet.
"""
self.html_file.write(
'<html><style type="text/css">.w3-table{table-layout: fixed;}.w3-third{width:33.33333%}.w3-table td,'
'.w3-table th,.w3-table-all td,.w3-table-all th{padding:8px 8px;display:table-cell;text-align:left '
';vertical-align:top}.w3-table th:first-child,.w3-table td:first-child,.w3-table-all th:first-child'
',.w3-table-all td:first-child{padding-left:16px}h1,h2,h3,h4,h5,h6{font-family:"Segoe UI",Arial,san'
's-serif;font-weight:400;margin:10px 0}html{box-sizing:border-box}*,*:before, *:after{box-sizing:in'
'herit}html,body{font-family:Verdana,sans-serif;font-size:15px;line-height:1.5}html{overflow-x:hidd'
'en}.w3-teal{color:#fff!important;background-color:#009688!important}.w3-container{padding:0.01em 1'
'6px}.w3-center{text-align:center}.w3-margin-left{margin-left:16px!important}.w3-table{border-colla'
'pse:collapse;border-spacing:0;width:100%;display:table}.w3-table td,.w3-table th.w3-table th:first'
'-child,.w3-table td:first-child.w3-bordered tr,.w3-table-all tr{border-bottom:1px solid #ddd}.w3-b'
'lue{color:#fff!important;background-color:#2196F3!important}.w3-container{padding:0.01em 16px}.w3-'
'red{color:#fff!important;background-color:#f44336!important}</style>'.encode('utf-8'))
def write_end(self):
""" Write end of HTML """
self.html_file.write('</html>'.encode('utf-8'))
self.html_file.close()
def add_head(self, text):
"""
Add above table bar with description of table.
In example it's a 'WI-FI keys' or 'Chrome passes'.
"""
self.html_file.write(('<h1 class=\"w3-teal w3-container w3-center\">' + text + '</h1>\n').encode('utf-8'))
def add_table(self, heads):
""" Create table. Add heads of table. """
tag = '<div class=\"w3-margin-left \"><table class=\"w3-table w3-bordered\">\n<tr class=\"w3-blue\">'
for head in heads:
tag += '<th class=\"w3-third\">' + head
tag += '</th></tr>\n'
self.html_file.write(tag.encode('utf-8'))
def add_table_end(self):
""" Write end of table """
self.html_file.write("</table></div>".encode('utf-8'))
def get_heads(self, index):
"""
Get keys of subdictionaries.
In example it's ['ESSID', 'Pass'] or ['URL', 'Login', 'Pass']
"""
return list(list(self.data.values())[index][0].keys())
def add_data(self, data):
"""
Write content of each subdictionary.
In example it's a value of 'WI-FI keys' or 'Chrome passes'.
"""
for row in data:
self.html_file.write('<tr>'.encode('utf-8'))
for key, value in row.items():
self.html_file.write(("<td class=\"w3-third\">" + value).encode('utf-8'))
self.html_file.write('</tr>\n'.encode('utf-8'))
def write_data(self):
""" Write main dictionary to file. For each sudictionaty create new table. """
self.create_head()
for index, (header, value) in enumerate(self.data.items()):
if value:
self.add_head(header)
self.add_table(self.get_heads(index))
self.add_data(value)
self.add_table_end()
self.write_end()
if __name__ == '__main__':
pass | python_source/beautifier.py | class Beautifier:
"""
Class that creates HTML file (_filename) with simple w3-css style from
input dict (_data). Example of input dict:
{ 'WI-FI keys':
[
{'ESSID': 'Burger-king', 'Pass': '<PASSWORD>'},
{'ESSID': 'Neighboring Wi-Fi', 'Pass': '<PASSWORD>'},
{'ESSID': 'MyWi-Fi', 'Pass': '<PASSWORD>!'},
],
'Chrome passes':
[
{'URL': 'facebook.com', 'Login': 'destoyer228', 'Pass': '<PASSWORD>'},
{'URL': 'github.com', 'Login': 'kovinevmv', 'Pass': '<PASSWORD>'},
]
}
"""
def __init__(self, _filename, _data):
""" Create file with _filename and write not empty data """
self.filename = _filename
self.data = _data
self.html_file = open(self.filename, 'wb')
if self.data:
self.write_data()
def create_head(self):
"""
Write head of HTML. Added w3-css to avoid external
dependency and work without Internet.
"""
self.html_file.write(
'<html><style type="text/css">.w3-table{table-layout: fixed;}.w3-third{width:33.33333%}.w3-table td,'
'.w3-table th,.w3-table-all td,.w3-table-all th{padding:8px 8px;display:table-cell;text-align:left '
';vertical-align:top}.w3-table th:first-child,.w3-table td:first-child,.w3-table-all th:first-child'
',.w3-table-all td:first-child{padding-left:16px}h1,h2,h3,h4,h5,h6{font-family:"Segoe UI",Arial,san'
's-serif;font-weight:400;margin:10px 0}html{box-sizing:border-box}*,*:before, *:after{box-sizing:in'
'herit}html,body{font-family:Verdana,sans-serif;font-size:15px;line-height:1.5}html{overflow-x:hidd'
'en}.w3-teal{color:#fff!important;background-color:#009688!important}.w3-container{padding:0.01em 1'
'6px}.w3-center{text-align:center}.w3-margin-left{margin-left:16px!important}.w3-table{border-colla'
'pse:collapse;border-spacing:0;width:100%;display:table}.w3-table td,.w3-table th.w3-table th:first'
'-child,.w3-table td:first-child.w3-bordered tr,.w3-table-all tr{border-bottom:1px solid #ddd}.w3-b'
'lue{color:#fff!important;background-color:#2196F3!important}.w3-container{padding:0.01em 16px}.w3-'
'red{color:#fff!important;background-color:#f44336!important}</style>'.encode('utf-8'))
def write_end(self):
""" Write end of HTML """
self.html_file.write('</html>'.encode('utf-8'))
self.html_file.close()
def add_head(self, text):
"""
Add above table bar with description of table.
In example it's a 'WI-FI keys' or 'Chrome passes'.
"""
self.html_file.write(('<h1 class=\"w3-teal w3-container w3-center\">' + text + '</h1>\n').encode('utf-8'))
def add_table(self, heads):
""" Create table. Add heads of table. """
tag = '<div class=\"w3-margin-left \"><table class=\"w3-table w3-bordered\">\n<tr class=\"w3-blue\">'
for head in heads:
tag += '<th class=\"w3-third\">' + head
tag += '</th></tr>\n'
self.html_file.write(tag.encode('utf-8'))
def add_table_end(self):
""" Write end of table """
self.html_file.write("</table></div>".encode('utf-8'))
def get_heads(self, index):
"""
Get keys of subdictionaries.
In example it's ['ESSID', 'Pass'] or ['URL', 'Login', 'Pass']
"""
return list(list(self.data.values())[index][0].keys())
def add_data(self, data):
"""
Write content of each subdictionary.
In example it's a value of 'WI-FI keys' or 'Chrome passes'.
"""
for row in data:
self.html_file.write('<tr>'.encode('utf-8'))
for key, value in row.items():
self.html_file.write(("<td class=\"w3-third\">" + value).encode('utf-8'))
self.html_file.write('</tr>\n'.encode('utf-8'))
def write_data(self):
""" Write main dictionary to file. For each sudictionaty create new table. """
self.create_head()
for index, (header, value) in enumerate(self.data.items()):
if value:
self.add_head(header)
self.add_table(self.get_heads(index))
self.add_data(value)
self.add_table_end()
self.write_end()
if __name__ == '__main__':
pass | 0.631708 | 0.264996 |
import sphinx_rtd_theme
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.imgmath',
'sphinx.ext.viewcode',
]
project = u'pwkit'
version = 'master' # also edit /setup.py, /pwkit/__init__.py!
release = '1.1.0.dev0'
copyright = u'2015-2019, <NAME> and collaborators'
author = u'<NAME> and collaborators'
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
language = None
exclude_patterns = []
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
pygments_style = 'sphinx'
todo_include_todos = False
# Intersphinx
intersphinx_mapping = {
'https://docs.python.org/': None,
}
# HTML output settings
html_theme = 'sphinx_rtd_theme'
#html_theme_options = {}
html_theme_path = [sphinx_rtd_theme.get_html_theme_path ()]
html_static_path = ['_static']
htmlhelp_basename = 'pwkitdoc'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# Tomfoolery to fake modules that readthedocs.org doesn't know. We need to do
# a super-duper hack for `pwkit.sherpa` because of how multiple inheritance
# interacts with the Mock object system.
import sys
from mock import Mock as MagicMock
class Mock (MagicMock):
@classmethod
def __getattr__ (cls, name):
if name in ('ArithmeticModel', 'XSAdditiveModel'):
return dict
if name == 'CompositeModel':
return Mock
return Mock ()
sys.modules.update ((m, Mock ()) for m in [
'cairo',
'gi',
'gi.repository',
'glib',
'gtk',
'sherpa',
'sherpa.astro',
'sherpa.astro.ui',
'sherpa.astro.xspec',
'sherpa.astro.xspec._xspec',
'sherpa.models',
'sherpa.models.parameter',
]) | docs/source/conf.py |
import sphinx_rtd_theme
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.imgmath',
'sphinx.ext.viewcode',
]
project = u'pwkit'
version = 'master' # also edit /setup.py, /pwkit/__init__.py!
release = '1.1.0.dev0'
copyright = u'2015-2019, <NAME> and collaborators'
author = u'<NAME> and collaborators'
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
language = None
exclude_patterns = []
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
pygments_style = 'sphinx'
todo_include_todos = False
# Intersphinx
intersphinx_mapping = {
'https://docs.python.org/': None,
}
# HTML output settings
html_theme = 'sphinx_rtd_theme'
#html_theme_options = {}
html_theme_path = [sphinx_rtd_theme.get_html_theme_path ()]
html_static_path = ['_static']
htmlhelp_basename = 'pwkitdoc'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# Tomfoolery to fake modules that readthedocs.org doesn't know. We need to do
# a super-duper hack for `pwkit.sherpa` because of how multiple inheritance
# interacts with the Mock object system.
import sys
from mock import Mock as MagicMock
class Mock (MagicMock):
@classmethod
def __getattr__ (cls, name):
if name in ('ArithmeticModel', 'XSAdditiveModel'):
return dict
if name == 'CompositeModel':
return Mock
return Mock ()
sys.modules.update ((m, Mock ()) for m in [
'cairo',
'gi',
'gi.repository',
'glib',
'gtk',
'sherpa',
'sherpa.astro',
'sherpa.astro.ui',
'sherpa.astro.xspec',
'sherpa.astro.xspec._xspec',
'sherpa.models',
'sherpa.models.parameter',
]) | 0.483648 | 0.123577 |
import unittest
from edopi import Chroma
class TestChroma(unittest.TestCase):
def setUp(self):
self.e1 = Chroma(8, 12)
self.e2 = Chroma(5, 12)
def test_is_generator(self):
self.assertFalse(self.e1.is_generator)
self.assertTrue(self.e2.is_generator)
def test_eq(self):
e3 = Chroma(8, 12)
self.assertTrue(self.e1 == e3)
e4 = Chroma(20, 12)
self.assertTrue(self.e1 == e4)
self.assertFalse(self.e1 == self.e2)
self.assertFalse(self.e1 == 'uff')
def test_add(self):
result = self.e1 + self.e2
e3 = Chroma(1, 12)
self.assertEqual(e3, result)
def test_sub(self):
result = self.e1 - self.e2
e3 = Chroma(3, 12)
self.assertEqual(e3, result)
result2 = self.e2 - self.e1
self.assertEqual(Chroma(9, 12), result2)
def test_mul(self):
result = self.e1 * self.e2
e3 = Chroma(4, 12)
self.assertEqual(e3, result)
def test_div(self):
result = self.e1/self.e2
exp = Chroma(4, 12)
self.assertEqual(exp, result)
def test_lt(self):
e3 = Chroma(8, 12)
self.assertTrue(self.e2 < self.e1)
self.assertFalse(e3 < self.e1)
self.assertFalse(self.e1 < self.e2)
def test_gt(self):
e3 = Chroma(8, 12)
self.assertTrue(self.e1 > self.e2)
self.assertFalse(e3 > self.e1)
self.assertFalse(self.e2 > self.e1)
def test_le(self):
e3 = Chroma(8, 12)
self.assertTrue(self.e2 <= self.e1)
self.assertTrue(e3 <= self.e1)
self.assertFalse(self.e1 <= self.e2)
def test_ge(self):
e3 = Chroma(8, 12)
self.assertTrue(self.e1 >= self.e2)
self.assertTrue(e3 >= self.e1)
self.assertFalse(self.e2 >= e3)
def test_inverse(self):
self.assertEqual(None, self.e1.inverse())
self.assertEqual(self.e2, self.e2.inverse())
def test_midi(self):
self.assertEqual(5, self.e2.midi)
self.assertEqual(500, self.e2.cents)
def test_symmetrical(self):
self.assertEqual(Chroma(4, 12), self.e1.symmetrical())
def test_subgroup(self):
self.assertEqual(12, self.e2.subgroup())
self.assertEqual(3, self.e1.subgroup())
if __name__ == '__main__':
unittest.main() | tests/test_chroma.py | import unittest
from edopi import Chroma
class TestChroma(unittest.TestCase):
def setUp(self):
self.e1 = Chroma(8, 12)
self.e2 = Chroma(5, 12)
def test_is_generator(self):
self.assertFalse(self.e1.is_generator)
self.assertTrue(self.e2.is_generator)
def test_eq(self):
e3 = Chroma(8, 12)
self.assertTrue(self.e1 == e3)
e4 = Chroma(20, 12)
self.assertTrue(self.e1 == e4)
self.assertFalse(self.e1 == self.e2)
self.assertFalse(self.e1 == 'uff')
def test_add(self):
result = self.e1 + self.e2
e3 = Chroma(1, 12)
self.assertEqual(e3, result)
def test_sub(self):
result = self.e1 - self.e2
e3 = Chroma(3, 12)
self.assertEqual(e3, result)
result2 = self.e2 - self.e1
self.assertEqual(Chroma(9, 12), result2)
def test_mul(self):
result = self.e1 * self.e2
e3 = Chroma(4, 12)
self.assertEqual(e3, result)
def test_div(self):
result = self.e1/self.e2
exp = Chroma(4, 12)
self.assertEqual(exp, result)
def test_lt(self):
e3 = Chroma(8, 12)
self.assertTrue(self.e2 < self.e1)
self.assertFalse(e3 < self.e1)
self.assertFalse(self.e1 < self.e2)
def test_gt(self):
e3 = Chroma(8, 12)
self.assertTrue(self.e1 > self.e2)
self.assertFalse(e3 > self.e1)
self.assertFalse(self.e2 > self.e1)
def test_le(self):
e3 = Chroma(8, 12)
self.assertTrue(self.e2 <= self.e1)
self.assertTrue(e3 <= self.e1)
self.assertFalse(self.e1 <= self.e2)
def test_ge(self):
e3 = Chroma(8, 12)
self.assertTrue(self.e1 >= self.e2)
self.assertTrue(e3 >= self.e1)
self.assertFalse(self.e2 >= e3)
def test_inverse(self):
self.assertEqual(None, self.e1.inverse())
self.assertEqual(self.e2, self.e2.inverse())
def test_midi(self):
self.assertEqual(5, self.e2.midi)
self.assertEqual(500, self.e2.cents)
def test_symmetrical(self):
self.assertEqual(Chroma(4, 12), self.e1.symmetrical())
def test_subgroup(self):
self.assertEqual(12, self.e2.subgroup())
self.assertEqual(3, self.e1.subgroup())
if __name__ == '__main__':
unittest.main() | 0.665302 | 0.770378 |
from django.test import TestCase
from boardinghouse.schema import get_schema_model
from ..models import AwareModel, NaiveModel
Schema = get_schema_model()
class TestPartitioning(TestCase):
def test_aware_objects_are_created_in_active_schema(self):
first = Schema.objects.create(name='first', schema='first')
second = Schema.objects.create(name='second', schema='second')
first.activate()
AwareModel.objects.create(name="Foo object")
AwareModel.objects.create(name="Bar object")
self.assertEquals(2, AwareModel.objects.count())
second.activate()
self.assertEquals(0, AwareModel.objects.count())
AwareModel.objects.create(name="Baz object")
self.assertRaises(AwareModel.DoesNotExist, AwareModel.objects.get, name='Foo object')
# TODO: Make this work? Or do we just let the error propagate?
# second.deactivate()
# self.assertEquals(0, AwareModel.objects.count())
first.activate()
self.assertRaises(AwareModel.DoesNotExist, AwareModel.objects.get, name='Baz object')
def test_boardinghouse_manager(self):
first = Schema.objects.create(name='first', schema='first')
second = Schema.objects.create(name='second', schema='second')
first.activate()
AwareModel.objects.create(name="Foo object").name
AwareModel.objects.create(name="Bar object").name
second.activate()
baz = AwareModel.objects.create(name="Baz object").name
second.deactivate()
self.assertEquals(3, len(list(AwareModel.objects.from_schemata(Schema.objects.all()))))
self.assertEquals([baz], [x.name for x in AwareModel.objects.from_schemata(second)])
self.assertNotIn(baz, [x.name for x in AwareModel.objects.from_schemata(first)])
def test_naive_objects_are_created_in_public_schema(self):
first = Schema.objects.create(name='first', schema='first')
second = Schema.objects.create(name='second', schema='second')
NaiveModel.objects.create(name="Public")
first.activate()
self.assertEquals(1, NaiveModel.objects.count())
NaiveModel.objects.create(name="First")
second.activate()
self.assertEquals(2, NaiveModel.objects.count())
NaiveModel.objects.create(name="Second")
second.deactivate()
self.assertEquals(3, NaiveModel.objects.count()) | tests/tests/test_objects_are_partitioned.py | from django.test import TestCase
from boardinghouse.schema import get_schema_model
from ..models import AwareModel, NaiveModel
Schema = get_schema_model()
class TestPartitioning(TestCase):
def test_aware_objects_are_created_in_active_schema(self):
first = Schema.objects.create(name='first', schema='first')
second = Schema.objects.create(name='second', schema='second')
first.activate()
AwareModel.objects.create(name="Foo object")
AwareModel.objects.create(name="Bar object")
self.assertEquals(2, AwareModel.objects.count())
second.activate()
self.assertEquals(0, AwareModel.objects.count())
AwareModel.objects.create(name="Baz object")
self.assertRaises(AwareModel.DoesNotExist, AwareModel.objects.get, name='Foo object')
# TODO: Make this work? Or do we just let the error propagate?
# second.deactivate()
# self.assertEquals(0, AwareModel.objects.count())
first.activate()
self.assertRaises(AwareModel.DoesNotExist, AwareModel.objects.get, name='Baz object')
def test_boardinghouse_manager(self):
first = Schema.objects.create(name='first', schema='first')
second = Schema.objects.create(name='second', schema='second')
first.activate()
AwareModel.objects.create(name="Foo object").name
AwareModel.objects.create(name="Bar object").name
second.activate()
baz = AwareModel.objects.create(name="Baz object").name
second.deactivate()
self.assertEquals(3, len(list(AwareModel.objects.from_schemata(Schema.objects.all()))))
self.assertEquals([baz], [x.name for x in AwareModel.objects.from_schemata(second)])
self.assertNotIn(baz, [x.name for x in AwareModel.objects.from_schemata(first)])
def test_naive_objects_are_created_in_public_schema(self):
first = Schema.objects.create(name='first', schema='first')
second = Schema.objects.create(name='second', schema='second')
NaiveModel.objects.create(name="Public")
first.activate()
self.assertEquals(1, NaiveModel.objects.count())
NaiveModel.objects.create(name="First")
second.activate()
self.assertEquals(2, NaiveModel.objects.count())
NaiveModel.objects.create(name="Second")
second.deactivate()
self.assertEquals(3, NaiveModel.objects.count()) | 0.419053 | 0.394959 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from six.moves import range
import attention
import math
from loss import cross_entropy, uniform_label_smooth_regulerizer
import utils
from utils import get_seq_mask_by_shape, get_seq_mask
import pdb
class RNNDecoder(torch.nn.Module):
def __init__(self, config):
super(RNNDecoder, self).__init__()
self.config = config
self.embed_dim = config["embed_dim"]
self.dropout_rate = config["dropout_rate"]
self.vocab_size = config["vocab_size"]
self.hidden_size = config["hidden_size"]
self.num_layers = config["num_layers"]
self.enc_dim = config["enc_dim"]
self.att_inner_dim = config["att_inner_dim"]
self.emb = nn.Embedding(self.vocab_size, self.embed_dim)
self.dropout = nn.Dropout(self.dropout_rate)
rnns = [torch.nn.LSTM(self.embed_dim, self.hidden_size, 1, batch_first=True)]
for _ in range(self.num_layers-1):
rnns += [torch.nn.LSTM(self.hidden_size+self.enc_dim, self.hidden_size, 1, batch_first=True)]
self.rnns = torch.nn.ModuleList(rnns)
self.attentions = torch.nn.ModuleList(
[attention.DotProductAttention(self.enc_dim, self.hidden_size, self.att_inner_dim,
math.sqrt(self.att_inner_dim)) for _ in range(self.num_layers-1)])
self.output_affine = nn.Linear(self.hidden_size, self.vocab_size)
def forward(self, enc_outputs, enc_lengths, src_ids, tgt_ids, label_smooth=0):
bz = enc_outputs.shape[0]
if bz != src_ids.shape[0]:
raise ValueError("enc_outputs does not match src_ids.")
encout_max_length = enc_outputs.shape[1]
dec_max_length = src_ids.shape[1]
att_masks = (1-get_seq_mask_by_shape(encout_max_length, dec_max_length, enc_lengths).transpose(1,2)).byte()
rnn_in = self.emb(src_ids)
rnn_in = self.dropout(rnn_in)
rnn = self.rnns[0]
rnn_output, _ = rnn(rnn_in)
for l in range(1, self.num_layers):
att_scores, att = self.attentions[l-1](enc_outputs, rnn_output, enc_outputs, mask=att_masks)
rnn_in = torch.cat([rnn_output, att], dim=-1)
rnn_in = self.dropout(rnn_in)
rnn_output, _ = self.rnns[l](rnn_in)
rnn_output = self.dropout(rnn_output)
logits = self.output_affine(rnn_output)
ce = cross_entropy(logits.view(-1, logits.size(-1)), tgt_ids.view(-1))
if label_smooth > 0:
ls = uniform_label_smooth_regulerizer(logits.view(-1, logits.size(-1)), tgt_ids.view(-1))
loss = (1-label_smooth) * ce + label_smooth * ls
else:
loss = ce
return loss
def get_attention_scores(self, enc_outputs, enc_lengths, src_ids):
bz = enc_outputs.shape[0]
if bz != src_ids.shape[0]:
raise ValueError("enc_outputs does not match src_ids.")
encout_max_length = enc_outputs.shape[1]
dec_max_length = src_ids.shape[1]
att_masks = (1-get_seq_mask_by_shape(encout_max_length, dec_max_length, enc_lengths).transpose(1,2)).byte()
rnn_in = self.emb(src_ids)
rnn_in = self.dropout(rnn_in)
rnn = self.rnns[0]
rnn_output, _ = rnn(rnn_in)
att_score_list = []
for l in range(1, self.num_layers):
att_scores, att = self.attentions[l-1](enc_outputs, rnn_output, enc_outputs, mask=att_masks)
att_score_list.append(att_scores)
rnn_in = torch.cat([rnn_output, att], dim=-1)
rnn_in = self.dropout(rnn_in)
rnn_output, _ = self.rnns[l](rnn_in)
return att_score_list
def zero_states(self, batch_size):
states = []
for _ in range(len(self.rnns)):
states.append(None)
return states
def forward_step(self, enc_outputs, enc_lengths, decoder_states, src_ids):
'''
decoder_states
src_ids: batch_size x 1
'''
bz = enc_outputs.shape[0]
if bz != src_ids.shape[0]:
raise ValueError("enc_outputs does not match src_ids.")
encout_max_length = enc_outputs.shape[1]
if src_ids.shape[1] != 1:
raise ValueError('The src_ids is not for one step.')
att_masks = (1-get_seq_mask_by_shape(encout_max_length, 1, enc_lengths).transpose(1,2)).byte()
src_ids = src_ids.to(enc_outputs.device)
next_states = []
rnn_in = self.emb(src_ids)
rnn_in = self.dropout(rnn_in)
rnn = self.rnns[0]
rnn_output, states = rnn(rnn_in, decoder_states[0])
next_states.append(states)
for l in range(1, self.num_layers):
att_scores, att = self.attentions[l-1](enc_outputs, rnn_output, enc_outputs, mask=att_masks)
rnn_in = torch.cat([rnn_output, att], dim=-1)
rnn_in = self.dropout(rnn_in)
rnn_output, states = self.rnns[l](rnn_in, decoder_states[l])
next_states.append(states)
rnn_output = self.dropout(rnn_output)
logits = self.output_affine(rnn_output)
log_probs = F.log_softmax(logits, dim=-1)
return log_probs, next_states
if __name__ == "__main__":
# For debugging
config = {
"embed_dim": 8,
"vocab_size": 128,
"hidden_size": 64,
"num_layers": 2,
"enc_dim": 32,
"att_inner_dim": 32,
"dropout_rate": 0.5
}
decoder = RNNDecoder(config)
enc_outputs = torch.randn(2, 20, 32)
enc_lengths = torch.tensor([15, 16]).long()
src_ids = torch.tensor([[1,2,3,4,5],
[6,7,8,9,10]])
tgt_ids = torch.tensor([[2, 3, 4, 5, 6],
[7,8,9,10,-1]])
log_probs, loss = decoder(enc_outputs, enc_lengths, src_ids, tgt_ids)
states = decoder.zero_states(2)
log_probs2 = []
states2 = []
for i in range(1):
res, states = decoder.forward_step(enc_outputs, enc_lengths, states, src_ids[:, i][:, None])
log_probs2.append(res)
states2.append(states)
log_probs2 = torch.cat(log_probs2, dim=1) | src/decoder.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from six.moves import range
import attention
import math
from loss import cross_entropy, uniform_label_smooth_regulerizer
import utils
from utils import get_seq_mask_by_shape, get_seq_mask
import pdb
class RNNDecoder(torch.nn.Module):
def __init__(self, config):
super(RNNDecoder, self).__init__()
self.config = config
self.embed_dim = config["embed_dim"]
self.dropout_rate = config["dropout_rate"]
self.vocab_size = config["vocab_size"]
self.hidden_size = config["hidden_size"]
self.num_layers = config["num_layers"]
self.enc_dim = config["enc_dim"]
self.att_inner_dim = config["att_inner_dim"]
self.emb = nn.Embedding(self.vocab_size, self.embed_dim)
self.dropout = nn.Dropout(self.dropout_rate)
rnns = [torch.nn.LSTM(self.embed_dim, self.hidden_size, 1, batch_first=True)]
for _ in range(self.num_layers-1):
rnns += [torch.nn.LSTM(self.hidden_size+self.enc_dim, self.hidden_size, 1, batch_first=True)]
self.rnns = torch.nn.ModuleList(rnns)
self.attentions = torch.nn.ModuleList(
[attention.DotProductAttention(self.enc_dim, self.hidden_size, self.att_inner_dim,
math.sqrt(self.att_inner_dim)) for _ in range(self.num_layers-1)])
self.output_affine = nn.Linear(self.hidden_size, self.vocab_size)
def forward(self, enc_outputs, enc_lengths, src_ids, tgt_ids, label_smooth=0):
bz = enc_outputs.shape[0]
if bz != src_ids.shape[0]:
raise ValueError("enc_outputs does not match src_ids.")
encout_max_length = enc_outputs.shape[1]
dec_max_length = src_ids.shape[1]
att_masks = (1-get_seq_mask_by_shape(encout_max_length, dec_max_length, enc_lengths).transpose(1,2)).byte()
rnn_in = self.emb(src_ids)
rnn_in = self.dropout(rnn_in)
rnn = self.rnns[0]
rnn_output, _ = rnn(rnn_in)
for l in range(1, self.num_layers):
att_scores, att = self.attentions[l-1](enc_outputs, rnn_output, enc_outputs, mask=att_masks)
rnn_in = torch.cat([rnn_output, att], dim=-1)
rnn_in = self.dropout(rnn_in)
rnn_output, _ = self.rnns[l](rnn_in)
rnn_output = self.dropout(rnn_output)
logits = self.output_affine(rnn_output)
ce = cross_entropy(logits.view(-1, logits.size(-1)), tgt_ids.view(-1))
if label_smooth > 0:
ls = uniform_label_smooth_regulerizer(logits.view(-1, logits.size(-1)), tgt_ids.view(-1))
loss = (1-label_smooth) * ce + label_smooth * ls
else:
loss = ce
return loss
def get_attention_scores(self, enc_outputs, enc_lengths, src_ids):
bz = enc_outputs.shape[0]
if bz != src_ids.shape[0]:
raise ValueError("enc_outputs does not match src_ids.")
encout_max_length = enc_outputs.shape[1]
dec_max_length = src_ids.shape[1]
att_masks = (1-get_seq_mask_by_shape(encout_max_length, dec_max_length, enc_lengths).transpose(1,2)).byte()
rnn_in = self.emb(src_ids)
rnn_in = self.dropout(rnn_in)
rnn = self.rnns[0]
rnn_output, _ = rnn(rnn_in)
att_score_list = []
for l in range(1, self.num_layers):
att_scores, att = self.attentions[l-1](enc_outputs, rnn_output, enc_outputs, mask=att_masks)
att_score_list.append(att_scores)
rnn_in = torch.cat([rnn_output, att], dim=-1)
rnn_in = self.dropout(rnn_in)
rnn_output, _ = self.rnns[l](rnn_in)
return att_score_list
def zero_states(self, batch_size):
states = []
for _ in range(len(self.rnns)):
states.append(None)
return states
def forward_step(self, enc_outputs, enc_lengths, decoder_states, src_ids):
'''
decoder_states
src_ids: batch_size x 1
'''
bz = enc_outputs.shape[0]
if bz != src_ids.shape[0]:
raise ValueError("enc_outputs does not match src_ids.")
encout_max_length = enc_outputs.shape[1]
if src_ids.shape[1] != 1:
raise ValueError('The src_ids is not for one step.')
att_masks = (1-get_seq_mask_by_shape(encout_max_length, 1, enc_lengths).transpose(1,2)).byte()
src_ids = src_ids.to(enc_outputs.device)
next_states = []
rnn_in = self.emb(src_ids)
rnn_in = self.dropout(rnn_in)
rnn = self.rnns[0]
rnn_output, states = rnn(rnn_in, decoder_states[0])
next_states.append(states)
for l in range(1, self.num_layers):
att_scores, att = self.attentions[l-1](enc_outputs, rnn_output, enc_outputs, mask=att_masks)
rnn_in = torch.cat([rnn_output, att], dim=-1)
rnn_in = self.dropout(rnn_in)
rnn_output, states = self.rnns[l](rnn_in, decoder_states[l])
next_states.append(states)
rnn_output = self.dropout(rnn_output)
logits = self.output_affine(rnn_output)
log_probs = F.log_softmax(logits, dim=-1)
return log_probs, next_states
if __name__ == "__main__":
# For debugging
config = {
"embed_dim": 8,
"vocab_size": 128,
"hidden_size": 64,
"num_layers": 2,
"enc_dim": 32,
"att_inner_dim": 32,
"dropout_rate": 0.5
}
decoder = RNNDecoder(config)
enc_outputs = torch.randn(2, 20, 32)
enc_lengths = torch.tensor([15, 16]).long()
src_ids = torch.tensor([[1,2,3,4,5],
[6,7,8,9,10]])
tgt_ids = torch.tensor([[2, 3, 4, 5, 6],
[7,8,9,10,-1]])
log_probs, loss = decoder(enc_outputs, enc_lengths, src_ids, tgt_ids)
states = decoder.zero_states(2)
log_probs2 = []
states2 = []
for i in range(1):
res, states = decoder.forward_step(enc_outputs, enc_lengths, states, src_ids[:, i][:, None])
log_probs2.append(res)
states2.append(states)
log_probs2 = torch.cat(log_probs2, dim=1) | 0.810929 | 0.241121 |
from typing import Dict
from enums import MeterType, StressType
class Line:
'''Functionality to load, process and store information about specific lines of text.'''
# Available vowels - they signify syllables
VOWELS: str = "АаОоУуЕеИиІіЯяЄєЇїЮю"
# An ord of stress mark that signifies which syllable is stressed
STRESS_MARK_ORD: int = 769
STRESS_MARK: str = chr(STRESS_MARK_ORD)
STRESS_TYPES: Dict[str, int] = {
"unstressed"
}
def __init__(self, line: str) -> None:
'''Load and process the line, then store it in corresponding fields.'''
self.line = line
self._make_reduced_line()
self._generate_pattern()
self._recognise_meter()
def _make_reduced_line(self):
'''Reduce the loaded line to the list of syllables consisting of vowels.'''
reduced_line = self.line
allowed_letters = self.VOWELS + " " + self.STRESS_MARK
for letter in reduced_line:
if letter in allowed_letters:
continue
reduced_line = reduced_line.replace(letter, "")
self.reduced_line = reduced_line
def _generate_pattern(self):
'''Process reduced line to generate its pattern.'''
if self.reduced_line.strip() == "":
self.pattern = None
return
reversed_pattern = []
for syllables in self.reduced_line.split(" ")[::-1]:
syllables_count = self._get_letters_count(syllables)
skip_next_symbol = False
for idx, symbol in enumerate(syllables[::-1]):
if skip_next_symbol:
skip_next_symbol = False
continue
if ord(symbol) != self.STRESS_MARK_ORD and syllables_count > 1:
reversed_pattern.append(StressType.UNSTRESSED)
continue
skip_next_symbol = True
if syllables_count == 1:
reversed_pattern.append(StressType.ONE_SYLLABLE)
elif syllables_count == 2:
if idx == 0:
reversed_pattern.append(StressType.TWO_SYLLABLES_SECOND_STRESSED)
else:
reversed_pattern.append(StressType.TWO_SYLLABLES_FIRST_STRESSED)
elif syllables_count > 2:
reversed_pattern.append(StressType.MORE_THAN_TWO_SYLLABLES)
else:
reversed_pattern.append(StressType.UNKNOWN)
self.pattern = "".join(str(x) for x in reversed_pattern[::-1])
def _get_letters_count(self, word: str) -> int:
'''Get amount of letters in the string, knowing that stress mark is counted as a separate letter.'''
return len(word) - word.count(self.STRESS_MARK)
def _recognise_meter(self):
'''Make assumption on which metrical foot this line belongs to.'''
if self.pattern is None:
return
two_syllables_types = (StressType.UNSTRESSED, StressType.ONE_SYLLABLE)
# Checking for iamb
for i in range(0, len(self.pattern), 2):
if int(self.pattern[i]) not in two_syllables_types:
break
else:
self.meter = MeterType.IAMB
return
# Checking for choree
for i in range(1, len(self.pattern), 2):
if int(self.pattern[i]) not in two_syllables_types:
break
else:
self.meter = MeterType.CHOREE
return
three_syllables_types1 = (StressType.UNSTRESSED, StressType.ONE_SYLLABLE, StressType.TWO_SYLLABLES_FIRST_STRESSED)
three_syllables_types2 = (StressType.UNSTRESSED, StressType.ONE_SYLLABLE, StressType.TWO_SYLLABLES_SECOND_STRESSED)
# Checking for dactyl
for i in range(1, len(self.pattern), 3):
if int(self.pattern[i]) not in three_syllables_types1:
break
else:
for i in range(2, len(self.pattern), 3):
if int(self.pattern[i]) not in three_syllables_types2:
break
else:
self.meter = MeterType.DACTYL
return
# Checking for amphibrach
for i in range(0, len(self.pattern), 3):
if int(self.pattern[i]) not in three_syllables_types2:
break
else:
for i in range(2, len(self.pattern), 3):
if int(self.pattern[i]) not in three_syllables_types1:
break
else:
self.meter = MeterType.AMPHIBRACH
return
# Checking for anapest
for i in range(0, len(self.pattern), 3):
if int(self.pattern[i]) not in three_syllables_types1:
break
else:
for i in range(1, len(self.pattern), 3):
if int(self.pattern[i]) not in three_syllables_types2:
break
else:
self.meter = MeterType.ANAPEST
return
self.meter = MeterType.UNKNOWN
def __str__(self) -> str:
'''Override str method to show pattern on print().'''
return f"{self.pattern}: {self.meter}" | line.py | from typing import Dict
from enums import MeterType, StressType
class Line:
'''Functionality to load, process and store information about specific lines of text.'''
# Available vowels - they signify syllables
VOWELS: str = "АаОоУуЕеИиІіЯяЄєЇїЮю"
# An ord of stress mark that signifies which syllable is stressed
STRESS_MARK_ORD: int = 769
STRESS_MARK: str = chr(STRESS_MARK_ORD)
STRESS_TYPES: Dict[str, int] = {
"unstressed"
}
def __init__(self, line: str) -> None:
'''Load and process the line, then store it in corresponding fields.'''
self.line = line
self._make_reduced_line()
self._generate_pattern()
self._recognise_meter()
def _make_reduced_line(self):
'''Reduce the loaded line to the list of syllables consisting of vowels.'''
reduced_line = self.line
allowed_letters = self.VOWELS + " " + self.STRESS_MARK
for letter in reduced_line:
if letter in allowed_letters:
continue
reduced_line = reduced_line.replace(letter, "")
self.reduced_line = reduced_line
def _generate_pattern(self):
'''Process reduced line to generate its pattern.'''
if self.reduced_line.strip() == "":
self.pattern = None
return
reversed_pattern = []
for syllables in self.reduced_line.split(" ")[::-1]:
syllables_count = self._get_letters_count(syllables)
skip_next_symbol = False
for idx, symbol in enumerate(syllables[::-1]):
if skip_next_symbol:
skip_next_symbol = False
continue
if ord(symbol) != self.STRESS_MARK_ORD and syllables_count > 1:
reversed_pattern.append(StressType.UNSTRESSED)
continue
skip_next_symbol = True
if syllables_count == 1:
reversed_pattern.append(StressType.ONE_SYLLABLE)
elif syllables_count == 2:
if idx == 0:
reversed_pattern.append(StressType.TWO_SYLLABLES_SECOND_STRESSED)
else:
reversed_pattern.append(StressType.TWO_SYLLABLES_FIRST_STRESSED)
elif syllables_count > 2:
reversed_pattern.append(StressType.MORE_THAN_TWO_SYLLABLES)
else:
reversed_pattern.append(StressType.UNKNOWN)
self.pattern = "".join(str(x) for x in reversed_pattern[::-1])
def _get_letters_count(self, word: str) -> int:
'''Get amount of letters in the string, knowing that stress mark is counted as a separate letter.'''
return len(word) - word.count(self.STRESS_MARK)
def _recognise_meter(self):
'''Make assumption on which metrical foot this line belongs to.'''
if self.pattern is None:
return
two_syllables_types = (StressType.UNSTRESSED, StressType.ONE_SYLLABLE)
# Checking for iamb
for i in range(0, len(self.pattern), 2):
if int(self.pattern[i]) not in two_syllables_types:
break
else:
self.meter = MeterType.IAMB
return
# Checking for choree
for i in range(1, len(self.pattern), 2):
if int(self.pattern[i]) not in two_syllables_types:
break
else:
self.meter = MeterType.CHOREE
return
three_syllables_types1 = (StressType.UNSTRESSED, StressType.ONE_SYLLABLE, StressType.TWO_SYLLABLES_FIRST_STRESSED)
three_syllables_types2 = (StressType.UNSTRESSED, StressType.ONE_SYLLABLE, StressType.TWO_SYLLABLES_SECOND_STRESSED)
# Checking for dactyl
for i in range(1, len(self.pattern), 3):
if int(self.pattern[i]) not in three_syllables_types1:
break
else:
for i in range(2, len(self.pattern), 3):
if int(self.pattern[i]) not in three_syllables_types2:
break
else:
self.meter = MeterType.DACTYL
return
# Checking for amphibrach
for i in range(0, len(self.pattern), 3):
if int(self.pattern[i]) not in three_syllables_types2:
break
else:
for i in range(2, len(self.pattern), 3):
if int(self.pattern[i]) not in three_syllables_types1:
break
else:
self.meter = MeterType.AMPHIBRACH
return
# Checking for anapest
for i in range(0, len(self.pattern), 3):
if int(self.pattern[i]) not in three_syllables_types1:
break
else:
for i in range(1, len(self.pattern), 3):
if int(self.pattern[i]) not in three_syllables_types2:
break
else:
self.meter = MeterType.ANAPEST
return
self.meter = MeterType.UNKNOWN
def __str__(self) -> str:
'''Override str method to show pattern on print().'''
return f"{self.pattern}: {self.meter}" | 0.792062 | 0.263937 |
owner_users = {
'email': '<EMAIL>',
'password': '<PASSWORD>-'
}
provider_users = {
'<EMAIL>': '<PASSWORD>-',
'<EMAIL>': '<PASSWORD>-',
'<EMAIL>': 'Kapital-Ist',
'<EMAIL>': 'Kapital-Ist',
'<EMAIL>': 'Kapital-Ist'
}
broker = {'url': 'https://prozorro.kapital-ist.kiev.ua'}
# login
login_button = '#loginLink'
username_field = '#Email'
pass_field = <PASSWORD>'
submit_login_button = 'body > div.body-wrapper > div > div > form > div:nth-child(4) > div > input'
# create tender
create_tender_url = 'https://prozorro.kapital-ist.kiev.ua/draft/belowThreshold/createTender'
input_title = '#Title'
input_description = '#Description'
input_start_enquiry = '#EnquiryPeriod_StartDate_Local'
input_end_enquiry = '#EnquiryPeriod_EndDate_Local'
input_start_tender = '#TenderPeriod_StartDate_Local'
input_end_tender = '#TenderPeriod_EndDate_Local'
# 6/1/2017 13:00 AM format
save_draft = 'body > div.body-wrapper > div > div > form > div:nth-child(6) > div > input'
add_lot = '#draftTender > fieldset:nth-child(5) > a:nth-child(5)'
input_lot_title = '#Title'
input_lot_description = '#Description'
input_value_amount = 'body > div.body-wrapper > div > div > form > div:nth-child(5) > div.form-group > div > span.k-widget.k-numerictextbox.currency.text-box.single-line > span > input.k-formatted-value.currency.text-box.single-line.k-input'
input_min_step = '#MinimalStep_Amount'
save_draft2 = 'body > div.body-wrapper > div > div > form > div.col-md-offset-3.col-md-9 > input'
add_item = '#draftTender > fieldset:nth-child(6) > a:nth-child(5)'
input_item_description = '#Description'
select_cpv = '#ListCPVTitle'
select_cpv_1item = r'#\30 3000000-1_anchor'
cpv_selected = '#SelectedCPV'
select_unit = '#UnitId_chosen > a'
select_unit1 = '#UnitId_chosen > div > ul > li:nth-child(1)'
input_quantity = '#Quantity'
input_delivery_start_date = '#DeliveryDate_StartDate_Local'
input_delivery_end_date = '#DeliveryDate_EndDate_Local'
input_dropdown_region = 'body > div.body-wrapper > div > div > form > div:nth-child(11) > div:nth-child(5) > div > span.k-widget.k-combobox.k-header.form-control.text-box.single-line > span > input'
input_postal_code = '#DeliveryAddress_PostalCode'
input_locality = '#DeliveryAddress_Locality'
input_delivery_address = '#DeliveryAddress_Street'
save_draft3 = 'body > div.body-wrapper > div > div > form > div.col-md-offset-3.col-md-9 > input'
add_doc_button = '#draftTender > fieldset:nth-child(5) > a:nth-child(7)'
doc_title = '#Description'
doc_input = '#Document'
save_draft4 = 'body > div.body-wrapper > div > div > form > div.col-md-offset-3.col-md-9 > input'
submit_create_tender = '#submitPublish'
# search for tender
tender_get_id_locator = 'body > div.body-wrapper > div > div > h3 > a' # xpath UA-2017-05-30-000023
# go to create tender url
select_search_type = 'body > div.body-wrapper > div > div > div:nth-child(2) > a:nth-child(2)'
input_search_field = '#ProcurementNumber'
search_tender_button = '#search'
select_tender = '#tender-table > div > table > tbody > tr > td:nth-child(1) > a'
select_bids = '#tabstrip > li:nth-child(2) > a'
make_bid_button = '#bids > div > div > a'
select_lot = '#form0 > div.modal-body > div > div.lots > div.form-group > div > label > span.cr'
input_bid_amount = '//input[@class="k-formatted-value currency text-box single-line k-input"]'
input_bid_doc = '#files'
# doc - add_doc
submit_bid_button = '#form0 > div.modal-footer > input'
delete_bid_button = '#bids > div > fieldset:nth-child(1) > div > div.col-md-2 > a' | kapitalist_load/locators.py | owner_users = {
'email': '<EMAIL>',
'password': '<PASSWORD>-'
}
provider_users = {
'<EMAIL>': '<PASSWORD>-',
'<EMAIL>': '<PASSWORD>-',
'<EMAIL>': 'Kapital-Ist',
'<EMAIL>': 'Kapital-Ist',
'<EMAIL>': 'Kapital-Ist'
}
broker = {'url': 'https://prozorro.kapital-ist.kiev.ua'}
# login
login_button = '#loginLink'
username_field = '#Email'
pass_field = <PASSWORD>'
submit_login_button = 'body > div.body-wrapper > div > div > form > div:nth-child(4) > div > input'
# create tender
create_tender_url = 'https://prozorro.kapital-ist.kiev.ua/draft/belowThreshold/createTender'
input_title = '#Title'
input_description = '#Description'
input_start_enquiry = '#EnquiryPeriod_StartDate_Local'
input_end_enquiry = '#EnquiryPeriod_EndDate_Local'
input_start_tender = '#TenderPeriod_StartDate_Local'
input_end_tender = '#TenderPeriod_EndDate_Local'
# 6/1/2017 13:00 AM format
save_draft = 'body > div.body-wrapper > div > div > form > div:nth-child(6) > div > input'
add_lot = '#draftTender > fieldset:nth-child(5) > a:nth-child(5)'
input_lot_title = '#Title'
input_lot_description = '#Description'
input_value_amount = 'body > div.body-wrapper > div > div > form > div:nth-child(5) > div.form-group > div > span.k-widget.k-numerictextbox.currency.text-box.single-line > span > input.k-formatted-value.currency.text-box.single-line.k-input'
input_min_step = '#MinimalStep_Amount'
save_draft2 = 'body > div.body-wrapper > div > div > form > div.col-md-offset-3.col-md-9 > input'
add_item = '#draftTender > fieldset:nth-child(6) > a:nth-child(5)'
input_item_description = '#Description'
select_cpv = '#ListCPVTitle'
select_cpv_1item = r'#\30 3000000-1_anchor'
cpv_selected = '#SelectedCPV'
select_unit = '#UnitId_chosen > a'
select_unit1 = '#UnitId_chosen > div > ul > li:nth-child(1)'
input_quantity = '#Quantity'
input_delivery_start_date = '#DeliveryDate_StartDate_Local'
input_delivery_end_date = '#DeliveryDate_EndDate_Local'
input_dropdown_region = 'body > div.body-wrapper > div > div > form > div:nth-child(11) > div:nth-child(5) > div > span.k-widget.k-combobox.k-header.form-control.text-box.single-line > span > input'
input_postal_code = '#DeliveryAddress_PostalCode'
input_locality = '#DeliveryAddress_Locality'
input_delivery_address = '#DeliveryAddress_Street'
save_draft3 = 'body > div.body-wrapper > div > div > form > div.col-md-offset-3.col-md-9 > input'
add_doc_button = '#draftTender > fieldset:nth-child(5) > a:nth-child(7)'
doc_title = '#Description'
doc_input = '#Document'
save_draft4 = 'body > div.body-wrapper > div > div > form > div.col-md-offset-3.col-md-9 > input'
submit_create_tender = '#submitPublish'
# search for tender
tender_get_id_locator = 'body > div.body-wrapper > div > div > h3 > a' # xpath UA-2017-05-30-000023
# go to create tender url
select_search_type = 'body > div.body-wrapper > div > div > div:nth-child(2) > a:nth-child(2)'
input_search_field = '#ProcurementNumber'
search_tender_button = '#search'
select_tender = '#tender-table > div > table > tbody > tr > td:nth-child(1) > a'
select_bids = '#tabstrip > li:nth-child(2) > a'
make_bid_button = '#bids > div > div > a'
select_lot = '#form0 > div.modal-body > div > div.lots > div.form-group > div > label > span.cr'
input_bid_amount = '//input[@class="k-formatted-value currency text-box single-line k-input"]'
input_bid_doc = '#files'
# doc - add_doc
submit_bid_button = '#form0 > div.modal-footer > input'
delete_bid_button = '#bids > div > fieldset:nth-child(1) > div > div.col-md-2 > a' | 0.228845 | 0.065605 |
from abc import abstractmethod
from io import StringIO
from typing import *
import numpy as np
import torch
from torch.utils.data import DataLoader
# The Protocol type does not exist until Python 3.7.
# TODO: Remove the try-except when Python 3.6 support is dropped.
try:
from typing import Protocol
except ImportError:
from abc import ABC as Protocol
ColumnShape = Optional[Tuple[Optional[int], ...]]
RowShape = Optional[Tuple[ColumnShape, ...]]
class Dataset(Protocol):
'''The dataset protocol.
'''
@abstractmethod
def __len__(self):
raise NotImplementedError
@abstractmethod
def __getitem__(self, index):
raise NotImplementedError
def common_shape(shape1, shape2):
'''Compute the most immediate common shape of the inputs.
E.g.::
>>> common_shape(shape1=(1, (1, 1)), shape2=(2, (1, 1)))
(None, (1, 1))
>>> common_shape(shape1=(1, (1, 1)), shape2=(1, (2, 1)))
(1, (None, 1))
>>> common_shape(shape1=(1, (1, 1)), shape2=(1, (1, 1, 2)))
(1, None)
>>> common_shape(shape1=(1, (1, 1)), shape2=(1, (1, 1), 2))
None
Arguments:
shape1 (VariableShape):
The first shape.
shape2 (VariableShape):
The second shape.
Returns:
VariableShape:
The least general (i.e. most specialized) shape for which both
input shapes are instances.
'''
if shape1 == shape2:
return shape1
if np.isscalar(shape1) or np.isscalar(shape2):
return None
if len(shape1) != len(shape2):
return None
return tuple(common_shape(a, b) for a, b in zip(shape1, shape2))
def shape(dataset):
'''Infer the shape of the dataset.
This function will sample up to four rows from the dataset to identify
if any part of the shape is variable.
Arguments:
dataset (Dataset):
The dataset whose shape will be checked.
Returns:
RowShape:
A tuple of shapes, one for each column. If any part of the shape
is variable, it is replaced by :obj:`None`.
Example:
>>> from toys.datasets import SimulatedLinear
>>> a = SimulatedLinear(100, in_shape=(32,32,3), out_shape=10)
>>> toys.shape(a)
((32, 32, 3), (10,))
.. todo::
The example does not run.
'''
n = len(dataset)
if n == 0: return None
row1 = dataset[np.random.randint(n)]
row2 = dataset[np.random.randint(n)]
row3 = dataset[np.random.randint(n)]
row4 = dataset[np.random.randint(n)]
shape1 = tuple(np.shape(col) for col in row1)
shape2 = tuple(np.shape(col) for col in row2)
shape3 = tuple(np.shape(col) for col in row3)
shape4 = tuple(np.shape(col) for col in row4)
shape5 = common_shape(shape1, shape2)
shape6 = common_shape(shape3, shape4)
return common_shape(shape5, shape6)
class Subset(Dataset):
'''A non-empty subset of some other dataset.
Attributes:
dataset (Dataset):
The source dataset.
indices (Sequence[int]):
The indices of elements contained in this subset.
'''
def __init__(self, dataset, indices):
assert 0 <= max(indices) < len(dataset)
assert 0 <= min(indices) < len(dataset)
self.dataset = dataset
self.indices = indices
def __len__(self):
return len(self.indices)
def __getitem__(self, index):
i = self.indices[index]
cols = self.dataset[i]
return cols
def __repr__(self):
return f'Subset({repr(self.dataset)}, {repr(self.indices)})'
@property
def hints(self):
return getattr(self.dataset, 'hints', {})
def subset(dataset, indices):
'''Select a subset of some dataset by row indices.
Arguments:
dataset (Dataset):
The source dataset.
indices (Sequence[int]):
The indices of elements contained in this subset.
Returns:
Dataset:
A subset of the input.
Example:
>>> from toys.datasets import SimulatedLinear
>>> a = SimulatedLinear(100)
>>> len(a)
100
>>> b = toys.subset(a, np.arange(0, 50))
>>> len(b)
50
'''
return Subset(dataset, indices)
class Zip(Dataset):
'''Combines the columns of many datasets into one.
'''
def __init__(self, *datasets):
if len(datasets) == 0:
raise TypeError('Zip() requires at least 1 dataset.')
for d in datasets:
if len(d) != len(datasets[0]):
raise ValueError('Zip() requires all datasets to be the same length.')
self.datasets = datasets
def __len__(self):
return len(self.datasets[0])
def __getitem__(self, index):
columns = []
for dataset in self.datasets:
x = dataset[index]
columns.extend(x)
return tuple(columns)
def __repr__(self):
buf = StringIO()
buf.write('Zip(')
datasets = (repr(ds) for ds in self.datasets)
print(*datasets, sep=', ', end=')', file=buf)
return buf.getvalue()
@property
def hints(self):
ret = {}
for ds in reversed(self.datasets):
sub = getattr(ds, 'hints', {})
ret.update(sub)
return ret
# This is reexported as toys.zip.
# The underscore is used here to prevent overriding builtins.zip.
def zip_(*datasets):
'''Returns a dataset with all of the columns of the given datasets.
Arguments:
datasets (Dataset):
The datasets to combine.
Returns:
Dataset:
The combined dataset.
Example:
>>> from toys.datasets import SimulatedLinear
>>> a = SimulatedLinear(100, in_shape=4, out_shape=5)
>>> b = SimulatedLinear(100, in_shape=6, out_shape=7)
>>> c = toys.zip(a, b)
>>> len(a) == len(b)
True
>>> toys.shape(a)
((4,), (5,))
>>> toys.shape(b)
((6,), (7,))
>>> toys.shape(c)
((4,), (5,), (6,), (7,))
'''
if len(datasets) == 0:
raise TypeError('zip() requires at least 1 dataset.')
if len(datasets) == 1:
return datasets[0]
else:
return Zip(*datasets)
class Concat(Dataset):
'''Combines the rows of many datasets into one.
'''
def __init__(self, *datasets):
if len(datasets) == 0:
raise TypeError('Concat() requires at least 1 dataset.')
self.lens = tuple(len(d) for d in datasets)
self.datasets = datasets
def __len__(self):
return sum(self.lens)
def __getitem__(self, index):
for i, n in enumerate(self.lens):
if n <= index:
index -= n
else:
return self.datasets[i][index]
def __repr__(self):
buf = StringIO()
buf.write('Concat(')
datasets = (repr(ds) for ds in self.datasets)
print(*datasets, sep=', ', end=')', file=buf)
return buf.getvalue()
@property
def hints(self):
ret = {}
for ds in reversed(self.datasets):
sub = getattr(ds, 'hints', {})
ret.update(sub)
return ret
def concat(*datasets):
'''Returns a dataset with all of the rows of the given datasets.
Arguments:
datasets (Dataset):
The datasets to combine.
Returns:
Dataset:
The combined dataset.
Example:
>>> from toys.datasets import SimulatedLinear
>>> a = SimulatedLinear(100)
>>> b = SimulatedLinear(200)
>>> c = toys.concat(a, b)
>>> toys.shape(a) == toys.shape(b) == toys.shape(c)
True
>>> len(a)
100
>>> len(b)
200
>>> len(c)
300
'''
if len(datasets) == 0:
raise TypeError('concat() requires at least 1 dataset.')
if len(datasets) == 1:
return datasets[0]
else:
return Concat(*datasets)
class Flat(Dataset):
'''Flatten and concatenate the columns of a dataset.
If ``supervised=True``, then the rightmost column is flattened but not
concatenated to the others, e.g. treat that column as the targets.
'''
def __init__(self, base, supervised=True):
super().__init__()
self.base = base
self.supervised = supervised
def __len__(self):
return len(self.base)
def __getitem__(self, index):
row = self.base[index]
row = [x.reshape(-1) for x in row]
if self.supervised:
*features, target = row
features = np.concatenate(features)
return features, target
else:
features = np.concatenate(row)
return (features,)
def __repr__(self):
return f'Flat({repr(self.base)}, supervised={repr(self.supervised)})'
@property
def hints(self):
return self.base.hints
def flatten(dataset, supervised=True):
'''Returns a dataset whose columns are flattened and concatenated together.
In supervised mode, the rightmost column is flattened but is kept as a
separate column. This is for supervised estimators which expect a target
value in a separate column.
Arguments:
dataset (Dataset):
The dataset to flatten.
supervised (bool):
Operate in supervised mode.
Returns:
Dataset:
The combined dataset. If supervised is False, the dataset contains
a single column with a flat shape. If supervised is True, the
dataset contains two columns with flat shape.
Example:
>>> a = SimulatedLinear(100, in_shape=(32,32,3), out_shape=(32,32,15))
>>> toys.shape(a)
((32, 32, 3), (32, 32, 15))
>>> b = toys.flatten(a)
>>> toys.shape(b)
((3072,), (15360,))
.. todo::
The example does not run.
'''
cols = dataset[0]
if supervised:
assert 2 <= len(cols)
if 3 <= len(cols):
return Flat(dataset, supervised)
if 2 == len(cols) and not supervised:
return Flat(dataset, supervised)
for col in cols:
if len(col.shape) != 1:
return Flat(dataset, supervised)
# If we've got this far, the dataset is already flat
return dataset
def batches(dataset, batch_size=None, **kwargs):
'''Iterates over a dataset in batches.
If the dataset has an attribute :attr:`~Dataset.hints`, then it must be a
dictionary mapping argument names to recommended values.
.. seealso::
See the :doc:`/guides/datasets` user guide for information on batching
and argument hinting.
Arguments:
dataset (Dataset):
The dataset to iterate over.
batch_size (int):
The maximum size of the batches.
Keyword Arguments:
**kwargs:
Keyword arguments are forwarded to |DataLoader|.
Returns:
torch.utils.data.DataLoader:
An iteratable over batches of the dataset.
Example:
.. todo::
Add an example.
.. |DataLoader| replace::
:class:`~torch.utils.data.DataLoader`
'''
if batch_size is not None:
kwargs.setdefault('batch_size', batch_size)
hints = getattr(dataset, 'hints', {})
kwargs = {**hints, **kwargs}
kwargs.setdefault('pin_memory', torch.cuda.is_available())
kwargs.setdefault('batch_size', batch_size)
return DataLoader(dataset, **kwargs) | toys/data.py | from abc import abstractmethod
from io import StringIO
from typing import *
import numpy as np
import torch
from torch.utils.data import DataLoader
# The Protocol type does not exist until Python 3.7.
# TODO: Remove the try-except when Python 3.6 support is dropped.
try:
from typing import Protocol
except ImportError:
from abc import ABC as Protocol
ColumnShape = Optional[Tuple[Optional[int], ...]]
RowShape = Optional[Tuple[ColumnShape, ...]]
class Dataset(Protocol):
'''The dataset protocol.
'''
@abstractmethod
def __len__(self):
raise NotImplementedError
@abstractmethod
def __getitem__(self, index):
raise NotImplementedError
def common_shape(shape1, shape2):
'''Compute the most immediate common shape of the inputs.
E.g.::
>>> common_shape(shape1=(1, (1, 1)), shape2=(2, (1, 1)))
(None, (1, 1))
>>> common_shape(shape1=(1, (1, 1)), shape2=(1, (2, 1)))
(1, (None, 1))
>>> common_shape(shape1=(1, (1, 1)), shape2=(1, (1, 1, 2)))
(1, None)
>>> common_shape(shape1=(1, (1, 1)), shape2=(1, (1, 1), 2))
None
Arguments:
shape1 (VariableShape):
The first shape.
shape2 (VariableShape):
The second shape.
Returns:
VariableShape:
The least general (i.e. most specialized) shape for which both
input shapes are instances.
'''
if shape1 == shape2:
return shape1
if np.isscalar(shape1) or np.isscalar(shape2):
return None
if len(shape1) != len(shape2):
return None
return tuple(common_shape(a, b) for a, b in zip(shape1, shape2))
def shape(dataset):
'''Infer the shape of the dataset.
This function will sample up to four rows from the dataset to identify
if any part of the shape is variable.
Arguments:
dataset (Dataset):
The dataset whose shape will be checked.
Returns:
RowShape:
A tuple of shapes, one for each column. If any part of the shape
is variable, it is replaced by :obj:`None`.
Example:
>>> from toys.datasets import SimulatedLinear
>>> a = SimulatedLinear(100, in_shape=(32,32,3), out_shape=10)
>>> toys.shape(a)
((32, 32, 3), (10,))
.. todo::
The example does not run.
'''
n = len(dataset)
if n == 0: return None
row1 = dataset[np.random.randint(n)]
row2 = dataset[np.random.randint(n)]
row3 = dataset[np.random.randint(n)]
row4 = dataset[np.random.randint(n)]
shape1 = tuple(np.shape(col) for col in row1)
shape2 = tuple(np.shape(col) for col in row2)
shape3 = tuple(np.shape(col) for col in row3)
shape4 = tuple(np.shape(col) for col in row4)
shape5 = common_shape(shape1, shape2)
shape6 = common_shape(shape3, shape4)
return common_shape(shape5, shape6)
class Subset(Dataset):
'''A non-empty subset of some other dataset.
Attributes:
dataset (Dataset):
The source dataset.
indices (Sequence[int]):
The indices of elements contained in this subset.
'''
def __init__(self, dataset, indices):
assert 0 <= max(indices) < len(dataset)
assert 0 <= min(indices) < len(dataset)
self.dataset = dataset
self.indices = indices
def __len__(self):
return len(self.indices)
def __getitem__(self, index):
i = self.indices[index]
cols = self.dataset[i]
return cols
def __repr__(self):
return f'Subset({repr(self.dataset)}, {repr(self.indices)})'
@property
def hints(self):
return getattr(self.dataset, 'hints', {})
def subset(dataset, indices):
'''Select a subset of some dataset by row indices.
Arguments:
dataset (Dataset):
The source dataset.
indices (Sequence[int]):
The indices of elements contained in this subset.
Returns:
Dataset:
A subset of the input.
Example:
>>> from toys.datasets import SimulatedLinear
>>> a = SimulatedLinear(100)
>>> len(a)
100
>>> b = toys.subset(a, np.arange(0, 50))
>>> len(b)
50
'''
return Subset(dataset, indices)
class Zip(Dataset):
'''Combines the columns of many datasets into one.
'''
def __init__(self, *datasets):
if len(datasets) == 0:
raise TypeError('Zip() requires at least 1 dataset.')
for d in datasets:
if len(d) != len(datasets[0]):
raise ValueError('Zip() requires all datasets to be the same length.')
self.datasets = datasets
def __len__(self):
return len(self.datasets[0])
def __getitem__(self, index):
columns = []
for dataset in self.datasets:
x = dataset[index]
columns.extend(x)
return tuple(columns)
def __repr__(self):
buf = StringIO()
buf.write('Zip(')
datasets = (repr(ds) for ds in self.datasets)
print(*datasets, sep=', ', end=')', file=buf)
return buf.getvalue()
@property
def hints(self):
ret = {}
for ds in reversed(self.datasets):
sub = getattr(ds, 'hints', {})
ret.update(sub)
return ret
# This is reexported as toys.zip.
# The underscore is used here to prevent overriding builtins.zip.
def zip_(*datasets):
'''Returns a dataset with all of the columns of the given datasets.
Arguments:
datasets (Dataset):
The datasets to combine.
Returns:
Dataset:
The combined dataset.
Example:
>>> from toys.datasets import SimulatedLinear
>>> a = SimulatedLinear(100, in_shape=4, out_shape=5)
>>> b = SimulatedLinear(100, in_shape=6, out_shape=7)
>>> c = toys.zip(a, b)
>>> len(a) == len(b)
True
>>> toys.shape(a)
((4,), (5,))
>>> toys.shape(b)
((6,), (7,))
>>> toys.shape(c)
((4,), (5,), (6,), (7,))
'''
if len(datasets) == 0:
raise TypeError('zip() requires at least 1 dataset.')
if len(datasets) == 1:
return datasets[0]
else:
return Zip(*datasets)
class Concat(Dataset):
'''Combines the rows of many datasets into one.
'''
def __init__(self, *datasets):
if len(datasets) == 0:
raise TypeError('Concat() requires at least 1 dataset.')
self.lens = tuple(len(d) for d in datasets)
self.datasets = datasets
def __len__(self):
return sum(self.lens)
def __getitem__(self, index):
for i, n in enumerate(self.lens):
if n <= index:
index -= n
else:
return self.datasets[i][index]
def __repr__(self):
buf = StringIO()
buf.write('Concat(')
datasets = (repr(ds) for ds in self.datasets)
print(*datasets, sep=', ', end=')', file=buf)
return buf.getvalue()
@property
def hints(self):
ret = {}
for ds in reversed(self.datasets):
sub = getattr(ds, 'hints', {})
ret.update(sub)
return ret
def concat(*datasets):
'''Returns a dataset with all of the rows of the given datasets.
Arguments:
datasets (Dataset):
The datasets to combine.
Returns:
Dataset:
The combined dataset.
Example:
>>> from toys.datasets import SimulatedLinear
>>> a = SimulatedLinear(100)
>>> b = SimulatedLinear(200)
>>> c = toys.concat(a, b)
>>> toys.shape(a) == toys.shape(b) == toys.shape(c)
True
>>> len(a)
100
>>> len(b)
200
>>> len(c)
300
'''
if len(datasets) == 0:
raise TypeError('concat() requires at least 1 dataset.')
if len(datasets) == 1:
return datasets[0]
else:
return Concat(*datasets)
class Flat(Dataset):
'''Flatten and concatenate the columns of a dataset.
If ``supervised=True``, then the rightmost column is flattened but not
concatenated to the others, e.g. treat that column as the targets.
'''
def __init__(self, base, supervised=True):
super().__init__()
self.base = base
self.supervised = supervised
def __len__(self):
return len(self.base)
def __getitem__(self, index):
row = self.base[index]
row = [x.reshape(-1) for x in row]
if self.supervised:
*features, target = row
features = np.concatenate(features)
return features, target
else:
features = np.concatenate(row)
return (features,)
def __repr__(self):
return f'Flat({repr(self.base)}, supervised={repr(self.supervised)})'
@property
def hints(self):
return self.base.hints
def flatten(dataset, supervised=True):
'''Returns a dataset whose columns are flattened and concatenated together.
In supervised mode, the rightmost column is flattened but is kept as a
separate column. This is for supervised estimators which expect a target
value in a separate column.
Arguments:
dataset (Dataset):
The dataset to flatten.
supervised (bool):
Operate in supervised mode.
Returns:
Dataset:
The combined dataset. If supervised is False, the dataset contains
a single column with a flat shape. If supervised is True, the
dataset contains two columns with flat shape.
Example:
>>> a = SimulatedLinear(100, in_shape=(32,32,3), out_shape=(32,32,15))
>>> toys.shape(a)
((32, 32, 3), (32, 32, 15))
>>> b = toys.flatten(a)
>>> toys.shape(b)
((3072,), (15360,))
.. todo::
The example does not run.
'''
cols = dataset[0]
if supervised:
assert 2 <= len(cols)
if 3 <= len(cols):
return Flat(dataset, supervised)
if 2 == len(cols) and not supervised:
return Flat(dataset, supervised)
for col in cols:
if len(col.shape) != 1:
return Flat(dataset, supervised)
# If we've got this far, the dataset is already flat
return dataset
def batches(dataset, batch_size=None, **kwargs):
'''Iterates over a dataset in batches.
If the dataset has an attribute :attr:`~Dataset.hints`, then it must be a
dictionary mapping argument names to recommended values.
.. seealso::
See the :doc:`/guides/datasets` user guide for information on batching
and argument hinting.
Arguments:
dataset (Dataset):
The dataset to iterate over.
batch_size (int):
The maximum size of the batches.
Keyword Arguments:
**kwargs:
Keyword arguments are forwarded to |DataLoader|.
Returns:
torch.utils.data.DataLoader:
An iteratable over batches of the dataset.
Example:
.. todo::
Add an example.
.. |DataLoader| replace::
:class:`~torch.utils.data.DataLoader`
'''
if batch_size is not None:
kwargs.setdefault('batch_size', batch_size)
hints = getattr(dataset, 'hints', {})
kwargs = {**hints, **kwargs}
kwargs.setdefault('pin_memory', torch.cuda.is_available())
kwargs.setdefault('batch_size', batch_size)
return DataLoader(dataset, **kwargs) | 0.720663 | 0.677261 |
import datetime
import logging
import rx
from rx.core import AnonymousObservable
import xml.etree.ElementTree as ET
from phone_communication_backup_coalescer.files import dir_to_files_mapper
from phone_communication_backup_coalescer import __version__, __name__
def as_list(item):
if not hasattr(item, '__iter__'):
item = [item]
return item
class Coalescer:
def __init__(self, controller):
self._controller = controller
def coalesce(self, source_dirs, output_file_name):
def write_tree(tree):
with open(output_file_name, 'w') as f:
xml_declaration = ET.ProcessingInstruction('xml',
"version='1.0' encoding='UTF-8' standalone='yes'")
build_info = ET.Comment('Created by {} v{} on {}'.format(__name__, __version__, datetime.datetime.now()))
xsl_declaration = ET.ProcessingInstruction('xml-stylesheet',
"type='text/xsl' href='{}'".format(self._controller.xsl_file_name))
f.write(ET.tostring(xml_declaration))
f.write(ET.tostring(build_info))
f.write(ET.tostring(xsl_declaration))
tree.write(f)
def append_item_to_tree(root, item):
self._controller.tree_appender(root, item)
return root
meta = [[], 0]
def rememberFile(f):
meta[0].append(f)
def safely_parse(f):
def subscribe(observer):
try:
items = list(self._controller.parse_file(f))
except Exception as ex:
error = {'type': 'error', 'value': ex, 'file': f}
observer.on_next(error)
observer.on_completed()
return
for item in items:
observer.on_next(item)
observer.on_completed()
return AnonymousObservable(subscribe)
def increment_counter(_):
meta[1] += 1
def print_errors(e):
if e['type'] == 'error':
logging.error('Error: %s', e['value'])
# TODO print warnings
source = rx.Observable.from_iterable(as_list(source_dirs))\
.flat_map(dir_to_files_mapper(self._controller.filename_pattern))\
.distinct()\
.do_action(rememberFile)\
.do_action(lambda f: logging.info('processing %s', f))\
.flat_map(safely_parse)\
.do_action(print_errors)\
.where(lambda e: e['type'] == 'item')\
.map(lambda e: e['value'])\
.distinct()\
.do_action(increment_counter)\
.to_list()\
.flat_map(lambda l: self._controller.sort(l))\
.reduce(append_item_to_tree, self._controller.tree_seed())\
.do_action(lambda _: logging.info('writing %s', output_file_name))\
.subscribe(write_tree)
return tuple(meta) | phone_communication_backup_coalescer/coalesce.py | import datetime
import logging
import rx
from rx.core import AnonymousObservable
import xml.etree.ElementTree as ET
from phone_communication_backup_coalescer.files import dir_to_files_mapper
from phone_communication_backup_coalescer import __version__, __name__
def as_list(item):
if not hasattr(item, '__iter__'):
item = [item]
return item
class Coalescer:
def __init__(self, controller):
self._controller = controller
def coalesce(self, source_dirs, output_file_name):
def write_tree(tree):
with open(output_file_name, 'w') as f:
xml_declaration = ET.ProcessingInstruction('xml',
"version='1.0' encoding='UTF-8' standalone='yes'")
build_info = ET.Comment('Created by {} v{} on {}'.format(__name__, __version__, datetime.datetime.now()))
xsl_declaration = ET.ProcessingInstruction('xml-stylesheet',
"type='text/xsl' href='{}'".format(self._controller.xsl_file_name))
f.write(ET.tostring(xml_declaration))
f.write(ET.tostring(build_info))
f.write(ET.tostring(xsl_declaration))
tree.write(f)
def append_item_to_tree(root, item):
self._controller.tree_appender(root, item)
return root
meta = [[], 0]
def rememberFile(f):
meta[0].append(f)
def safely_parse(f):
def subscribe(observer):
try:
items = list(self._controller.parse_file(f))
except Exception as ex:
error = {'type': 'error', 'value': ex, 'file': f}
observer.on_next(error)
observer.on_completed()
return
for item in items:
observer.on_next(item)
observer.on_completed()
return AnonymousObservable(subscribe)
def increment_counter(_):
meta[1] += 1
def print_errors(e):
if e['type'] == 'error':
logging.error('Error: %s', e['value'])
# TODO print warnings
source = rx.Observable.from_iterable(as_list(source_dirs))\
.flat_map(dir_to_files_mapper(self._controller.filename_pattern))\
.distinct()\
.do_action(rememberFile)\
.do_action(lambda f: logging.info('processing %s', f))\
.flat_map(safely_parse)\
.do_action(print_errors)\
.where(lambda e: e['type'] == 'item')\
.map(lambda e: e['value'])\
.distinct()\
.do_action(increment_counter)\
.to_list()\
.flat_map(lambda l: self._controller.sort(l))\
.reduce(append_item_to_tree, self._controller.tree_seed())\
.do_action(lambda _: logging.info('writing %s', output_file_name))\
.subscribe(write_tree)
return tuple(meta) | 0.293506 | 0.090655 |
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from bottle import run
from sqlalchemy import exc
import warnings
from bottle import route, request, HTTPResponse
from model import User, Skill
from utilitiy import search, delete, create, set_ext
# create methods
@route("/user/create", method="PUT")
def user_create():
# we get the json from REST
json = request.json
entry = create(json, User, Skill, session, "skills", "skill_id")
try:
session.add(entry)
session.commit()
except exc.IntegrityError:
warnings.simplefilter("default", Warning)
warnings.warn("You tried to add an already existing item.")
session.rollback()
return HTTPResponse(status=201)
@route("/skill/create", method="PUT")
def skill_create():
# we get the json from REST
json = request.json
entry = create(json, Skill, User, session, "users", "user_id")
try:
session.add(entry)
session.commit()
except exc.IntegrityError:
warnings.simplefilter("default", Warning)
warnings.warn("You tried to add an already existing item.")
session.rollback()
return HTTPResponse(status=201)
# delete methods
@route("/user/delete", method="DELETE")
def user_delete():
json = request.json
delete(json, User, session)
return HTTPResponse(status=200)
@route("/skill/delete", method="DELETE")
def skill_delete():
json = request.json
delete(json, Skill, session)
return HTTPResponse(status=200)
# research methods
@route("/user/research")
def user_research():
json = request.json
results = search(json, User, session)
return HTTPResponse(status=200, body="<br>".join([str(ele) for ele in results]))
@route("/skill/research")
def skill_research():
json = request.json
results = search(json, Skill, session)
return HTTPResponse(status=200, body="<br>".join([str(ele) for ele in results]))
# update methods
@route("/user/update", method="POST")
def user_update():
json = request.json
my_class = User
user_id = json.get("user_id", None)
if not user_id:
warnings.simplefilter("error", Warning)
warnings.warn("You need to specify user_id")
# we use the id to search user
query = session.query(my_class).filter(
getattr(my_class, "user_id") == json["user_id"]
)
row = query.first()
row = set_ext(row, json, Skill, session, "skills", "skill_id")
for key, value in json.items():
setattr(row, key, value)
session.commit()
return HTTPResponse(stauts=200)
@route("/skill/update", method="POST")
def skill_update():
json = request.json
my_class = Skill
skill_id = json.get("skill_id", None)
if not skill_id:
warnings.simplefilter("error", Warning)
warnings.warn("You need to specify skill_id")
# we use the id to search skill
query = session.query(my_class).filter(
getattr(my_class, "skill_id") == json["skill_id"]
)
row = query.first()
row = set_ext(row, json, User, session, "users", "user_id")
for key, value in json.items():
setattr(row, key, value)
session.commit()
return HTTPResponse(stauts=200)
if __name__ == "__main__":
# setup the ORM part with sqlalchemy
engine = create_engine(f"sqlite:///../sqlite3.db")
Base = declarative_base()
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
run(host="localhost", port=8080, debug=True) | code/api.py | from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from bottle import run
from sqlalchemy import exc
import warnings
from bottle import route, request, HTTPResponse
from model import User, Skill
from utilitiy import search, delete, create, set_ext
# create methods
@route("/user/create", method="PUT")
def user_create():
# we get the json from REST
json = request.json
entry = create(json, User, Skill, session, "skills", "skill_id")
try:
session.add(entry)
session.commit()
except exc.IntegrityError:
warnings.simplefilter("default", Warning)
warnings.warn("You tried to add an already existing item.")
session.rollback()
return HTTPResponse(status=201)
@route("/skill/create", method="PUT")
def skill_create():
# we get the json from REST
json = request.json
entry = create(json, Skill, User, session, "users", "user_id")
try:
session.add(entry)
session.commit()
except exc.IntegrityError:
warnings.simplefilter("default", Warning)
warnings.warn("You tried to add an already existing item.")
session.rollback()
return HTTPResponse(status=201)
# delete methods
@route("/user/delete", method="DELETE")
def user_delete():
json = request.json
delete(json, User, session)
return HTTPResponse(status=200)
@route("/skill/delete", method="DELETE")
def skill_delete():
json = request.json
delete(json, Skill, session)
return HTTPResponse(status=200)
# research methods
@route("/user/research")
def user_research():
json = request.json
results = search(json, User, session)
return HTTPResponse(status=200, body="<br>".join([str(ele) for ele in results]))
@route("/skill/research")
def skill_research():
json = request.json
results = search(json, Skill, session)
return HTTPResponse(status=200, body="<br>".join([str(ele) for ele in results]))
# update methods
@route("/user/update", method="POST")
def user_update():
json = request.json
my_class = User
user_id = json.get("user_id", None)
if not user_id:
warnings.simplefilter("error", Warning)
warnings.warn("You need to specify user_id")
# we use the id to search user
query = session.query(my_class).filter(
getattr(my_class, "user_id") == json["user_id"]
)
row = query.first()
row = set_ext(row, json, Skill, session, "skills", "skill_id")
for key, value in json.items():
setattr(row, key, value)
session.commit()
return HTTPResponse(stauts=200)
@route("/skill/update", method="POST")
def skill_update():
json = request.json
my_class = Skill
skill_id = json.get("skill_id", None)
if not skill_id:
warnings.simplefilter("error", Warning)
warnings.warn("You need to specify skill_id")
# we use the id to search skill
query = session.query(my_class).filter(
getattr(my_class, "skill_id") == json["skill_id"]
)
row = query.first()
row = set_ext(row, json, User, session, "users", "user_id")
for key, value in json.items():
setattr(row, key, value)
session.commit()
return HTTPResponse(stauts=200)
if __name__ == "__main__":
# setup the ORM part with sqlalchemy
engine = create_engine(f"sqlite:///../sqlite3.db")
Base = declarative_base()
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
run(host="localhost", port=8080, debug=True) | 0.417509 | 0.104249 |
import time
from etcdb import WAIT_WAIT_TIMEOUT, InternalError
from etcdb.eval_expr import eval_expr
from etcdb.execute.dml.insert import get_table_columns
from etcdb.execute.dml.select import prepare_columns, list_table, \
get_row_by_primary_key, eval_row
from etcdb.resultset import ResultSet, Row
def execute_wait(etcd_client, tree, db):
"""Execute WAIT.
:param etcd_client: Etcd client.
:type etcd_client: Client
:param tree: Parsing tree.
:type tree: SQLTree
:param db: Current database.
:type db: str
"""
result_columns = prepare_columns(tree)
result_set = ResultSet(result_columns)
table_columns = get_table_columns(etcd_client, db, tree.table)
for primary_key in list_table(etcd_client, db, tree.table):
table_row = get_row_by_primary_key(etcd_client, db, tree.table,
primary_key)
etcd_index = table_row.etcd_index
if tree.where:
expr = tree.where
try:
wait_index = tree.options['after']
except KeyError:
wait_index = etcd_index + 1
if eval_expr((table_columns, table_row), expr)[1]:
start = time.time()
while True:
if time.time() > start + WAIT_WAIT_TIMEOUT:
raise InternalError('Wait timeout %d '
'seconds expired'
% WAIT_WAIT_TIMEOUT)
try:
new_row = get_row_by_primary_key(etcd_client,
db,
tree.table,
primary_key,
wait=True,
wait_index=wait_index)
break
except KeyError:
wait_index += 1
row = Row(eval_row(table_columns, new_row, tree),
etcd_index=new_row.etcd_index,
modified_index=new_row.modified_index)
result_set.add_row(row)
else:
row = Row(eval_row(table_columns, table_row, tree),
etcd_index=etcd_index,
modified_index=etcd_index)
result_set.add_row(row)
return result_set | etcdb/execute/dml/wait.py | import time
from etcdb import WAIT_WAIT_TIMEOUT, InternalError
from etcdb.eval_expr import eval_expr
from etcdb.execute.dml.insert import get_table_columns
from etcdb.execute.dml.select import prepare_columns, list_table, \
get_row_by_primary_key, eval_row
from etcdb.resultset import ResultSet, Row
def execute_wait(etcd_client, tree, db):
"""Execute WAIT.
:param etcd_client: Etcd client.
:type etcd_client: Client
:param tree: Parsing tree.
:type tree: SQLTree
:param db: Current database.
:type db: str
"""
result_columns = prepare_columns(tree)
result_set = ResultSet(result_columns)
table_columns = get_table_columns(etcd_client, db, tree.table)
for primary_key in list_table(etcd_client, db, tree.table):
table_row = get_row_by_primary_key(etcd_client, db, tree.table,
primary_key)
etcd_index = table_row.etcd_index
if tree.where:
expr = tree.where
try:
wait_index = tree.options['after']
except KeyError:
wait_index = etcd_index + 1
if eval_expr((table_columns, table_row), expr)[1]:
start = time.time()
while True:
if time.time() > start + WAIT_WAIT_TIMEOUT:
raise InternalError('Wait timeout %d '
'seconds expired'
% WAIT_WAIT_TIMEOUT)
try:
new_row = get_row_by_primary_key(etcd_client,
db,
tree.table,
primary_key,
wait=True,
wait_index=wait_index)
break
except KeyError:
wait_index += 1
row = Row(eval_row(table_columns, new_row, tree),
etcd_index=new_row.etcd_index,
modified_index=new_row.modified_index)
result_set.add_row(row)
else:
row = Row(eval_row(table_columns, table_row, tree),
etcd_index=etcd_index,
modified_index=etcd_index)
result_set.add_row(row)
return result_set | 0.253214 | 0.107883 |
import plotly.offline as plotly
import plotly.graph_objs as graph
import sys
import os
outputdir = 'D:/11p/TCC/workspace/tcc/plots'
auto_open_htmls = False
def plot_history(component_type, repoName):
add = []
rem = []
mod = []
dates = []
total = []
component_type = repoName + component_type
f = open(component_type + 'Driller.csv', 'r')
for line in f:
commit = line.split(',')
add.append(int(commit[0]))
rem.append(int(commit[1]))
mod.append(int(commit[2]))
total.append(int(commit[3]))
dates.append(commit[4])
f.close()
addBar = graph.Bar(x = dates, y = add, name = 'Added', marker = dict(color='green'), text = add, textposition = 'outside')
remBar = graph.Bar(x = dates, y = [-r for r in rem], name = 'Removed', marker = dict(color='red'), text = rem, textposition = 'outside')
modBar = graph.Bar(x = dates, y = mod, name = 'Modified', marker = dict(color='blue'), text = mod, textposition = 'outside')
totalLine = graph.Scatter(x = dates, y = total, name = 'Total', mode = 'lines+markers', line = dict(color='orange'))
data = [addBar, remBar, modBar, totalLine]
layout = graph.Layout(
title = component_type + ' Commit History',
barmode = 'relative',
)
figure = graph.Figure(data = data, layout = layout)
plotly.plot(figure, image = 'png', auto_open = auto_open_htmls,
filename = component_type + '_history.html',
image_filename = component_type.replace('\\','_') + '_history')
def plot_repo(repo):
repoName = repo + '\\'
plot_history('activity', repoName)
plot_history('service', repoName)
plot_history('broadcastReceiver', repoName)
plot_history('contentProvider', repoName)
plot_history('permission', repoName)
plot_history('usesPermission', repoName)
def get_dirs():
return [t[1] for t in os.walk(outputdir)][0]
def printAll():
repos = get_dirs()
for repo in repos:
plot_repo(repo)
def printOnly():
repo = raw_input()
while repo != '##':
if '#' not in repo:
plot_repo(repo)
repo = raw_input()
if __name__ == '__main__':
printAll()
#printOnly() | plots/componentHistoryPlot.py | import plotly.offline as plotly
import plotly.graph_objs as graph
import sys
import os
outputdir = 'D:/11p/TCC/workspace/tcc/plots'
auto_open_htmls = False
def plot_history(component_type, repoName):
add = []
rem = []
mod = []
dates = []
total = []
component_type = repoName + component_type
f = open(component_type + 'Driller.csv', 'r')
for line in f:
commit = line.split(',')
add.append(int(commit[0]))
rem.append(int(commit[1]))
mod.append(int(commit[2]))
total.append(int(commit[3]))
dates.append(commit[4])
f.close()
addBar = graph.Bar(x = dates, y = add, name = 'Added', marker = dict(color='green'), text = add, textposition = 'outside')
remBar = graph.Bar(x = dates, y = [-r for r in rem], name = 'Removed', marker = dict(color='red'), text = rem, textposition = 'outside')
modBar = graph.Bar(x = dates, y = mod, name = 'Modified', marker = dict(color='blue'), text = mod, textposition = 'outside')
totalLine = graph.Scatter(x = dates, y = total, name = 'Total', mode = 'lines+markers', line = dict(color='orange'))
data = [addBar, remBar, modBar, totalLine]
layout = graph.Layout(
title = component_type + ' Commit History',
barmode = 'relative',
)
figure = graph.Figure(data = data, layout = layout)
plotly.plot(figure, image = 'png', auto_open = auto_open_htmls,
filename = component_type + '_history.html',
image_filename = component_type.replace('\\','_') + '_history')
def plot_repo(repo):
repoName = repo + '\\'
plot_history('activity', repoName)
plot_history('service', repoName)
plot_history('broadcastReceiver', repoName)
plot_history('contentProvider', repoName)
plot_history('permission', repoName)
plot_history('usesPermission', repoName)
def get_dirs():
return [t[1] for t in os.walk(outputdir)][0]
def printAll():
repos = get_dirs()
for repo in repos:
plot_repo(repo)
def printOnly():
repo = raw_input()
while repo != '##':
if '#' not in repo:
plot_repo(repo)
repo = raw_input()
if __name__ == '__main__':
printAll()
#printOnly() | 0.119434 | 0.107555 |
import os
import tempfile
import tkFileDialog
from Tkinter import *
import tkMessageBox
import json
import requests
import subprocess
import maskgen.maskgen_loader
from maskgen.software_loader import getFileName
from tkinter import ttk
from hp.hp_data import orgs
key = os.path.join(os.path.expanduser("~"), "medifor_ingest.gpg")
hp_settings = os.path.join(os.path.expanduser("~"), ".hpsettings")
class Window(Frame):
def __init__(self, parent, errors):
Frame.__init__(self, parent)
self.parent = parent
self.parent.title("Settings")
self.loader = maskgen.maskgen_loader.MaskGenLoader()
self.setup_window()
maskgen.maskgen_loader.imageLoaded = False
if errors:
tkMessageBox.showerror("Error", "\n".join(errors))
self.info = {"username": ["Username Field", "Enter your project codename."],
"organization": ["Organization Field", "Enter the organization you are affiliated with."],
"apiurl": ["API URL Field", "Enter the API URL for the browser."],
"busername": ["Browser Username Field", "Enter your browser username."],
"bpassword": ["Browser Password Field", "Enter your browser password."],
"hporganization": ["HP Organization Field", "Enter your organization abbreviation for the HP Tool."],
"uploadfolder": ["Folder Field", "Enter the location you would like to upload the tar files to."
"\n\"s3://\" is not necessary."],
"s3-endpoint": ["AWS endpoint URL field", "Enter your endpoint url if you have one."],
"s3-profile": ["AWS profile name field", "Enter your aws profile name if you have multiple config profiles."],
"s3-region": ["AWS region field", "Enter your aws region if you have one."],
"help": ["Help", "For additional help contact <EMAIL>."]}
def setup_window(self):
r = 0
# Info heading
info_text = Label(text="Enter all of the following\ninformation in order to guarantee\nproper setup of"
" the Journaling Tool\nand High Provenance Tool.\nFields marked with an * are"
" mandatory")
info_text.grid(row=r, columnspan=2, pady=5)
r += 1
ufile = getFileName("ManipulatorCodeNames.txt")
if ufile:
with open(ufile, "r") as names:
self.valid_usernames = sorted(names.read().splitlines())
else:
self.valid_usernames = []
self.maskgen_button = Button(text="Select Maskgen Folder", command=self.get_maskgen)
self.maskgen_button.grid(row=r, column=0, columnspan=2)
r += 1
self.master.withdraw()
tkMessageBox.showerror("No Username File", "A username list file could not be found.")
self.master.deiconify()
# General Header
general_label = Label(text="General Setup")
general_label.grid(row=r, columnspan=2)
r += 1
# API URL
self.apiurl_label = Button(text="API URL*", command=lambda: self.get_info("apiurl"))
self.apiurl_label.grid(row=r, column=0, padx=10)
self.apiurl_field = Entry(self.parent)
self.apiurl_field.insert(0, self.loader.get_key('apiurl', ''))
self.apiurl_field.grid(row=r, column=1, padx=10)
r += 1
# Browser Username
self.busername_label = Button(text="Browser Username*", command=lambda: self.get_info("busername"))
self.busername_label.grid(row=r, column=0, padx=10)
self.busername_field = Entry(self.parent)
self.busername_field.grid(row=r, column=1, padx=10)
r += 1
# Browser Password
self.bpassword_label = Button(text="Browser Password*", command=lambda: self.get_info("bpassword"))
self.bpassword_label.grid(row=r, column=0, padx=10)
self.bpassword_field = Entry(self.parent, show="*")
self.bpassword_field.grid(row=r, column=1, padx=10)
r += 1
# Username
self.username_label = Button(text="Username*", command=lambda: self.get_info("username"))
self.username_label.grid(row=r, column=0, padx=10)
self.username_field = ttk.Combobox(values=self.valid_usernames)
self.username_field.insert(0, self.loader.get_key('username', ''))
self.username_field.grid(row=r, column=1, padx=10)
r += 1
# JT Setup
jt_setup = Label(text="Journaling Tool Setup")
jt_setup.grid(row=r, columnspan=2, pady=5)
r += 1
# Organization
self.organization_label = Button(text="Organization*", command=lambda: self.get_info("organization"))
self.organization_label.grid(row=r, column=0, padx=10)
self.organization_field = Entry(self.parent)
self.organization_field.insert(0, self.loader.get_key('organization', ''))
self.organization_field.grid(row=r, column=1, padx=10)
r += 1
# Journal Upload Folder
self.jt_uploadfolder_label = Button(text="Journal Upload Folder", command=lambda: self.get_info("uploadfolder"))
self.jt_uploadfolder_label.grid(row=r, column=0, padx=10)
self.jt_uploadfolder_field = Entry(self.parent)
self.jt_uploadfolder_field.insert(0, self.loader.get_key('s3info', ''))
self.jt_uploadfolder_field.grid(row=r, column=1, padx=10)
r += 1
# HP Tool Setup
jt_setup = Label(text="High Provenance Tool Setup")
jt_setup.grid(row=r, columnspan=2, pady=5)
r += 1
# HP Organization
hporg_button = Button(text="HP Organization*", command=lambda: self.get_info("hporganization"))
hporg_button.grid(row=r, column=0, padx=10)
self.hporganization = StringVar()
self.hporganization.set(self.loader.get_key('hporganization', ''))
hporg_optionmenu = OptionMenu(None, self.hporganization, *orgs.keys())
hporg_optionmenu.grid(row=r, column=1, padx=10)
r += 1
# High Provenance Upload Folder
self.hpupload_button = Button(text="HP Upload Folder", command=lambda: self.get_info("uploadfolder"))
self.hpupload_button.grid(row=r, column=0, padx=10)
self.hpupload_field = Entry(self.parent)
self.hpupload_field.insert(0, self.loader.get_key('aws-hp', ''))
self.hpupload_field.grid(row=r, column=1, padx=10)
r += 1
# PRNU Upload Folder
self.prnuupload_button = Button(text="PRNU Upload Folder", command=lambda: self.get_info("uploadfolder"))
self.prnuupload_button.grid(row=r, column=0, padx=10)
self.prnuupload_field = Entry(self.parent)
self.prnuupload_field.insert(0, self.loader.get_key('aws-prnu', ''))
self.prnuupload_field.grid(row=r, column=1, padx=10)
r += 1
# AWS Profile
self.profile_button = Button(text="AWS Profile Name", command=lambda: self.get_info("s3-profile"))
self.profile_button.grid(row=r, column=0, padx=10)
self.profile_field = Entry(self.parent)
self.profile_field.insert(0, self.loader.get_key('s3-profile', 'default'))
self.profile_field.grid(row=r, column=1, padx=10)
r+=1
# AWS Endpoint
self.endpoint_button = Button(text="AWS Endpoint URL", command=lambda: self.get_info("s3-endpoint"))
self.endpoint_button.grid(row=r, column=0, padx=10)
self.endpoint_field = Entry(self.parent)
self.endpoint_field.insert(0, self.loader.get_key('s3-endpoint', ''))
self.endpoint_field.grid(row=r, column=1, padx=10)
r += 1
# AWS Region
self.region_button = Button(text="AWS Region", command=lambda: self.get_info("s3-region"))
self.region_button.grid(row=r, column=0, padx=10)
self.region_field = Entry(self.parent)
self.region_field.insert(0, self.loader.get_key('s3-region', 'us-east-1'))
self.region_field.grid(row=r, column=1, padx=10)
r += 1
# Submit Button
submit = Button(text="Submit", command=lambda: self.submit_data())
submit.grid(row=r, column=0, padx=10, pady=5)
# Help Button
help = Button(text="Help", command=lambda: self.get_info("help"))
help.grid(row=r, column=1, padx=10, pady=5)
def get_info(self, item):
tkMessageBox.showinfo(*self.info[item])
def submit_data(self):
self.username = self.username_field.get()
self.organization = self.organization_field.get()
self.apiurl = self.apiurl_field.get()
self.busername = self.busername_field.get()
self.bpassword = self.bpassword_field.get()
self.jt_uploadfolder = self.jt_uploadfolder_field.get()
self.hpupload_folder = self.hpupload_field.get()
self.prnuupload_folder = self.prnuupload_field.get()
self.s3_profile = self.profile_field.get()
self.s3_endpoint = self.endpoint_field.get()
self.s3_region = self.region_field.get()
self.eemail = self.get_recipient()
self.full_org = self.hporganization.get() + " (" + orgs[self.hporganization.get()] + ")"
if not all([self.username, self.organization, self.apiurl, self.busername, self.bpassword,
self.hporganization.get()]):
tkMessageBox.showerror("Missing Fields", "One or more fields are missing required information.")
return
if self.username not in self.valid_usernames:
tkMessageBox.showerror("Invalid Username", "Username not in list of valid usernames.")
self.apitoken = self.get_token()
if self.apitoken:
self.create_json()
tkMessageBox.showinfo("Success!", "Configuration file for {0} has been successfully created!".format(
self.username))
if os.path.isfile(key):
os.remove(key)
self.parent.destroy()
def get_recipient(self):
if not os.path.isfile(key):
return None
try:
gpg_result = subprocess.Popen(["gpg", "--with-colons", key], stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
except Exception:
tkMessageBox.showerror("Error", "There has been an error retrieving the encryption key.")
return
for line in gpg_result[0].split("\n"):
if line.startswith("uid"):
email = line.split("<")[1].split(">")[0]
return email
return None
def get_token(self):
try:
url = self.apiurl[:-1] if self.apiurl.endswith('/') else self.apiurl
headers = {'Content-Type': 'application/json'}
url = url + '/login/'
data = '{"username": "' + self.busername + '","password":"' + self.bpassword + '"}'
response = requests.post(url, data=data, headers=headers)
if response.status_code != requests.codes.ok:
tkMessageBox.showerror("Invalid API Token", "Error calling external service {0} : {1}".format(
url, str(response.content)))
return None
else:
r = json.loads(response.content)
return r['key']
except Exception as e:
return "Error calling external service: {0} : {1}".format(url, str(e.message))
def create_json(self):
data = {"username": self.username, "apitoken": self.apitoken, "organization": self.organization,
"s3info": self.jt_uploadfolder, "apiurl": self.apiurl, "archive_recipient": self.eemail, "aws-hp":
self.hpupload_folder, "aws-prnu": self.prnuupload_folder, "autosave": "600", "fullorgname":
self.full_org, "hp-organization": orgs[self.hporganization.get()], "git.branch": branch,
"s3-endpoint": self.s3_endpoint, "s3-profile": self.s3_profile, "s3-region": self.s3_region}
self.loader.saveall(data.items())
def get_maskgen(self):
maskgen_dir = tkFileDialog.askdirectory()
if maskgen_dir:
namefile = os.path.join(maskgen_dir, "resources", "ManipulatorCodeNames.txt")
if not os.path.isfile(namefile):
tkMessageBox.showerror("Usernames Not Found", "Could not find username text file at {0}.".format(
namefile))
return
with open(namefile) as f:
self.valid_usernames = sorted(f.read().splitlines())
self.username_field['values'] = self.valid_usernames
def update_user_name():
import json
from maskgen.software_loader import getFileName
property_file = getFileName('project_properties.json')
if property_file is None:
return
with open(property_file, 'r') as f:
props = json.load(f)
for prop in props['properties']:
if prop['name'] == 'username':
prop['type'] = 'listfromfile:ManipulatorCodeNames.txt'
with open(property_file, 'w') as f:
json.dump(props, f, indent=2, encoding='utf-8')
def setup():
errors = []
if os.path.isfile(key):
try:
key_installed = subprocess.Popen(["gpg", "--list-keys", key], stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
key_installed.communicate()
if key_installed.returncode == 2:
subprocess.Popen(["gpg", "--import", key])
else:
os.remove(key)
except WindowsError as e:
errors.append("Error encountered while installing encryption key: " + str(e))
# Set autosave to 600s by default
maskgen.maskgen_loader.imageLoaded = False
settings = maskgen.maskgen_loader.MaskGenLoader()
autosave = settings.get_key("autosave")
if autosave is None:
settings.save("autosave", "600")
if sys.platform.startswith("win"):
# Will only run if .maskgen2 doesn't exist, so delete the old commands
if os.path.isfile(os.path.join(os.path.expanduser("~"), "Desktop", "JT.cmd")):
os.remove(os.path.join(os.path.expanduser("~"), "Desktop", "JT.cmd"))
with open(os.path.join(os.path.expanduser("~"), "Desktop", "JT.cmd"), "a+") as startjt:
startjt.writelines(["title Journaling Tool\n", "cd {0}\n".format(os.path.expanduser("~")), "cls\n",
"jtui"])
if os.path.isfile(os.path.join(os.path.expanduser("~"), "Desktop", "HP_Tool.cmd")):
os.remove(os.path.join(os.path.expanduser("~"), "Desktop", "HP_Tool.cmd"))
with open(os.path.join(os.path.expanduser("~"), "Desktop", "HP_Tool.cmd"), "a+") as starthp:
starthp.writelines(["title HP Tool\n", "cd {0}\n".format(os.path.expanduser("~")), "cls\n", "hpgui"])
update_user_name()
return errors
def combine_settings():
maskgen.maskgen_loader.imageLoaded = False
hp_loader = maskgen.maskgen_loader.MaskGenLoader(hp_settings)
hp_keys = {}
for hp_key in hp_loader.__iter__():
hp_keys[hp_key] = hp_loader.get_key(hp_key)
conversions = {"aws": "aws-hp", "aws-prnu": "aws-prnu", "archive_recipient":
"archive_recipient", "inputdir": "inputdir", "outputdir": "outputdir", "organization":
"hp-organization", "seq": "seq"}
maskgen.maskgen_loader.imageLoaded = False
jt_loader = maskgen.maskgen_loader.MaskGenLoader()
jt_keys = {}
for jt_key in jt_loader.__iter__():
jt_keys[jt_key] = jt_loader.get_key(jt_key)
for k, v in hp_keys.items():
if k in conversions.keys():
jt_keys[conversions[k]] = hp_keys[k]
if k == "metadata":
for mk, mv in v.items():
jt_keys[mk] = mv
jt_loader.saveall(jt_keys.items())
os.remove(hp_settings)
def main():
root = Tk()
if os.path.isfile(hp_settings):
combine_settings()
if os.path.isfile(os.path.join(os.path.expanduser("~"), ".maskgen2")):
# Get a maskgen loader to check if fields are defined
maskgen.maskgen_loader.imageLoaded = False
loader = maskgen.maskgen_loader.MaskGenLoader()
if "apitoken" in loader:
exit(0)
if "git.branch" in loader:
global branch
branch = loader.get_key("git.branch")
maskgen.maskgen_loader.imageLoaded = False
errs = setup()
Window(root, errs)
root.wm_resizable(width=FALSE, height=FALSE)
root.mainloop()
if __name__ == "__main__":
branch = "master"
main() | scripts/python/jtprefs.py | import os
import tempfile
import tkFileDialog
from Tkinter import *
import tkMessageBox
import json
import requests
import subprocess
import maskgen.maskgen_loader
from maskgen.software_loader import getFileName
from tkinter import ttk
from hp.hp_data import orgs
key = os.path.join(os.path.expanduser("~"), "medifor_ingest.gpg")
hp_settings = os.path.join(os.path.expanduser("~"), ".hpsettings")
class Window(Frame):
def __init__(self, parent, errors):
Frame.__init__(self, parent)
self.parent = parent
self.parent.title("Settings")
self.loader = maskgen.maskgen_loader.MaskGenLoader()
self.setup_window()
maskgen.maskgen_loader.imageLoaded = False
if errors:
tkMessageBox.showerror("Error", "\n".join(errors))
self.info = {"username": ["Username Field", "Enter your project codename."],
"organization": ["Organization Field", "Enter the organization you are affiliated with."],
"apiurl": ["API URL Field", "Enter the API URL for the browser."],
"busername": ["Browser Username Field", "Enter your browser username."],
"bpassword": ["Browser Password Field", "Enter your browser password."],
"hporganization": ["HP Organization Field", "Enter your organization abbreviation for the HP Tool."],
"uploadfolder": ["Folder Field", "Enter the location you would like to upload the tar files to."
"\n\"s3://\" is not necessary."],
"s3-endpoint": ["AWS endpoint URL field", "Enter your endpoint url if you have one."],
"s3-profile": ["AWS profile name field", "Enter your aws profile name if you have multiple config profiles."],
"s3-region": ["AWS region field", "Enter your aws region if you have one."],
"help": ["Help", "For additional help contact <EMAIL>."]}
def setup_window(self):
r = 0
# Info heading
info_text = Label(text="Enter all of the following\ninformation in order to guarantee\nproper setup of"
" the Journaling Tool\nand High Provenance Tool.\nFields marked with an * are"
" mandatory")
info_text.grid(row=r, columnspan=2, pady=5)
r += 1
ufile = getFileName("ManipulatorCodeNames.txt")
if ufile:
with open(ufile, "r") as names:
self.valid_usernames = sorted(names.read().splitlines())
else:
self.valid_usernames = []
self.maskgen_button = Button(text="Select Maskgen Folder", command=self.get_maskgen)
self.maskgen_button.grid(row=r, column=0, columnspan=2)
r += 1
self.master.withdraw()
tkMessageBox.showerror("No Username File", "A username list file could not be found.")
self.master.deiconify()
# General Header
general_label = Label(text="General Setup")
general_label.grid(row=r, columnspan=2)
r += 1
# API URL
self.apiurl_label = Button(text="API URL*", command=lambda: self.get_info("apiurl"))
self.apiurl_label.grid(row=r, column=0, padx=10)
self.apiurl_field = Entry(self.parent)
self.apiurl_field.insert(0, self.loader.get_key('apiurl', ''))
self.apiurl_field.grid(row=r, column=1, padx=10)
r += 1
# Browser Username
self.busername_label = Button(text="Browser Username*", command=lambda: self.get_info("busername"))
self.busername_label.grid(row=r, column=0, padx=10)
self.busername_field = Entry(self.parent)
self.busername_field.grid(row=r, column=1, padx=10)
r += 1
# Browser Password
self.bpassword_label = Button(text="Browser Password*", command=lambda: self.get_info("bpassword"))
self.bpassword_label.grid(row=r, column=0, padx=10)
self.bpassword_field = Entry(self.parent, show="*")
self.bpassword_field.grid(row=r, column=1, padx=10)
r += 1
# Username
self.username_label = Button(text="Username*", command=lambda: self.get_info("username"))
self.username_label.grid(row=r, column=0, padx=10)
self.username_field = ttk.Combobox(values=self.valid_usernames)
self.username_field.insert(0, self.loader.get_key('username', ''))
self.username_field.grid(row=r, column=1, padx=10)
r += 1
# JT Setup
jt_setup = Label(text="Journaling Tool Setup")
jt_setup.grid(row=r, columnspan=2, pady=5)
r += 1
# Organization
self.organization_label = Button(text="Organization*", command=lambda: self.get_info("organization"))
self.organization_label.grid(row=r, column=0, padx=10)
self.organization_field = Entry(self.parent)
self.organization_field.insert(0, self.loader.get_key('organization', ''))
self.organization_field.grid(row=r, column=1, padx=10)
r += 1
# Journal Upload Folder
self.jt_uploadfolder_label = Button(text="Journal Upload Folder", command=lambda: self.get_info("uploadfolder"))
self.jt_uploadfolder_label.grid(row=r, column=0, padx=10)
self.jt_uploadfolder_field = Entry(self.parent)
self.jt_uploadfolder_field.insert(0, self.loader.get_key('s3info', ''))
self.jt_uploadfolder_field.grid(row=r, column=1, padx=10)
r += 1
# HP Tool Setup
jt_setup = Label(text="High Provenance Tool Setup")
jt_setup.grid(row=r, columnspan=2, pady=5)
r += 1
# HP Organization
hporg_button = Button(text="HP Organization*", command=lambda: self.get_info("hporganization"))
hporg_button.grid(row=r, column=0, padx=10)
self.hporganization = StringVar()
self.hporganization.set(self.loader.get_key('hporganization', ''))
hporg_optionmenu = OptionMenu(None, self.hporganization, *orgs.keys())
hporg_optionmenu.grid(row=r, column=1, padx=10)
r += 1
# High Provenance Upload Folder
self.hpupload_button = Button(text="HP Upload Folder", command=lambda: self.get_info("uploadfolder"))
self.hpupload_button.grid(row=r, column=0, padx=10)
self.hpupload_field = Entry(self.parent)
self.hpupload_field.insert(0, self.loader.get_key('aws-hp', ''))
self.hpupload_field.grid(row=r, column=1, padx=10)
r += 1
# PRNU Upload Folder
self.prnuupload_button = Button(text="PRNU Upload Folder", command=lambda: self.get_info("uploadfolder"))
self.prnuupload_button.grid(row=r, column=0, padx=10)
self.prnuupload_field = Entry(self.parent)
self.prnuupload_field.insert(0, self.loader.get_key('aws-prnu', ''))
self.prnuupload_field.grid(row=r, column=1, padx=10)
r += 1
# AWS Profile
self.profile_button = Button(text="AWS Profile Name", command=lambda: self.get_info("s3-profile"))
self.profile_button.grid(row=r, column=0, padx=10)
self.profile_field = Entry(self.parent)
self.profile_field.insert(0, self.loader.get_key('s3-profile', 'default'))
self.profile_field.grid(row=r, column=1, padx=10)
r+=1
# AWS Endpoint
self.endpoint_button = Button(text="AWS Endpoint URL", command=lambda: self.get_info("s3-endpoint"))
self.endpoint_button.grid(row=r, column=0, padx=10)
self.endpoint_field = Entry(self.parent)
self.endpoint_field.insert(0, self.loader.get_key('s3-endpoint', ''))
self.endpoint_field.grid(row=r, column=1, padx=10)
r += 1
# AWS Region
self.region_button = Button(text="AWS Region", command=lambda: self.get_info("s3-region"))
self.region_button.grid(row=r, column=0, padx=10)
self.region_field = Entry(self.parent)
self.region_field.insert(0, self.loader.get_key('s3-region', 'us-east-1'))
self.region_field.grid(row=r, column=1, padx=10)
r += 1
# Submit Button
submit = Button(text="Submit", command=lambda: self.submit_data())
submit.grid(row=r, column=0, padx=10, pady=5)
# Help Button
help = Button(text="Help", command=lambda: self.get_info("help"))
help.grid(row=r, column=1, padx=10, pady=5)
def get_info(self, item):
tkMessageBox.showinfo(*self.info[item])
def submit_data(self):
self.username = self.username_field.get()
self.organization = self.organization_field.get()
self.apiurl = self.apiurl_field.get()
self.busername = self.busername_field.get()
self.bpassword = self.bpassword_field.get()
self.jt_uploadfolder = self.jt_uploadfolder_field.get()
self.hpupload_folder = self.hpupload_field.get()
self.prnuupload_folder = self.prnuupload_field.get()
self.s3_profile = self.profile_field.get()
self.s3_endpoint = self.endpoint_field.get()
self.s3_region = self.region_field.get()
self.eemail = self.get_recipient()
self.full_org = self.hporganization.get() + " (" + orgs[self.hporganization.get()] + ")"
if not all([self.username, self.organization, self.apiurl, self.busername, self.bpassword,
self.hporganization.get()]):
tkMessageBox.showerror("Missing Fields", "One or more fields are missing required information.")
return
if self.username not in self.valid_usernames:
tkMessageBox.showerror("Invalid Username", "Username not in list of valid usernames.")
self.apitoken = self.get_token()
if self.apitoken:
self.create_json()
tkMessageBox.showinfo("Success!", "Configuration file for {0} has been successfully created!".format(
self.username))
if os.path.isfile(key):
os.remove(key)
self.parent.destroy()
def get_recipient(self):
if not os.path.isfile(key):
return None
try:
gpg_result = subprocess.Popen(["gpg", "--with-colons", key], stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
except Exception:
tkMessageBox.showerror("Error", "There has been an error retrieving the encryption key.")
return
for line in gpg_result[0].split("\n"):
if line.startswith("uid"):
email = line.split("<")[1].split(">")[0]
return email
return None
def get_token(self):
try:
url = self.apiurl[:-1] if self.apiurl.endswith('/') else self.apiurl
headers = {'Content-Type': 'application/json'}
url = url + '/login/'
data = '{"username": "' + self.busername + '","password":"' + self.bpassword + '"}'
response = requests.post(url, data=data, headers=headers)
if response.status_code != requests.codes.ok:
tkMessageBox.showerror("Invalid API Token", "Error calling external service {0} : {1}".format(
url, str(response.content)))
return None
else:
r = json.loads(response.content)
return r['key']
except Exception as e:
return "Error calling external service: {0} : {1}".format(url, str(e.message))
def create_json(self):
data = {"username": self.username, "apitoken": self.apitoken, "organization": self.organization,
"s3info": self.jt_uploadfolder, "apiurl": self.apiurl, "archive_recipient": self.eemail, "aws-hp":
self.hpupload_folder, "aws-prnu": self.prnuupload_folder, "autosave": "600", "fullorgname":
self.full_org, "hp-organization": orgs[self.hporganization.get()], "git.branch": branch,
"s3-endpoint": self.s3_endpoint, "s3-profile": self.s3_profile, "s3-region": self.s3_region}
self.loader.saveall(data.items())
def get_maskgen(self):
maskgen_dir = tkFileDialog.askdirectory()
if maskgen_dir:
namefile = os.path.join(maskgen_dir, "resources", "ManipulatorCodeNames.txt")
if not os.path.isfile(namefile):
tkMessageBox.showerror("Usernames Not Found", "Could not find username text file at {0}.".format(
namefile))
return
with open(namefile) as f:
self.valid_usernames = sorted(f.read().splitlines())
self.username_field['values'] = self.valid_usernames
def update_user_name():
import json
from maskgen.software_loader import getFileName
property_file = getFileName('project_properties.json')
if property_file is None:
return
with open(property_file, 'r') as f:
props = json.load(f)
for prop in props['properties']:
if prop['name'] == 'username':
prop['type'] = 'listfromfile:ManipulatorCodeNames.txt'
with open(property_file, 'w') as f:
json.dump(props, f, indent=2, encoding='utf-8')
def setup():
errors = []
if os.path.isfile(key):
try:
key_installed = subprocess.Popen(["gpg", "--list-keys", key], stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
key_installed.communicate()
if key_installed.returncode == 2:
subprocess.Popen(["gpg", "--import", key])
else:
os.remove(key)
except WindowsError as e:
errors.append("Error encountered while installing encryption key: " + str(e))
# Set autosave to 600s by default
maskgen.maskgen_loader.imageLoaded = False
settings = maskgen.maskgen_loader.MaskGenLoader()
autosave = settings.get_key("autosave")
if autosave is None:
settings.save("autosave", "600")
if sys.platform.startswith("win"):
# Will only run if .maskgen2 doesn't exist, so delete the old commands
if os.path.isfile(os.path.join(os.path.expanduser("~"), "Desktop", "JT.cmd")):
os.remove(os.path.join(os.path.expanduser("~"), "Desktop", "JT.cmd"))
with open(os.path.join(os.path.expanduser("~"), "Desktop", "JT.cmd"), "a+") as startjt:
startjt.writelines(["title Journaling Tool\n", "cd {0}\n".format(os.path.expanduser("~")), "cls\n",
"jtui"])
if os.path.isfile(os.path.join(os.path.expanduser("~"), "Desktop", "HP_Tool.cmd")):
os.remove(os.path.join(os.path.expanduser("~"), "Desktop", "HP_Tool.cmd"))
with open(os.path.join(os.path.expanduser("~"), "Desktop", "HP_Tool.cmd"), "a+") as starthp:
starthp.writelines(["title HP Tool\n", "cd {0}\n".format(os.path.expanduser("~")), "cls\n", "hpgui"])
update_user_name()
return errors
def combine_settings():
maskgen.maskgen_loader.imageLoaded = False
hp_loader = maskgen.maskgen_loader.MaskGenLoader(hp_settings)
hp_keys = {}
for hp_key in hp_loader.__iter__():
hp_keys[hp_key] = hp_loader.get_key(hp_key)
conversions = {"aws": "aws-hp", "aws-prnu": "aws-prnu", "archive_recipient":
"archive_recipient", "inputdir": "inputdir", "outputdir": "outputdir", "organization":
"hp-organization", "seq": "seq"}
maskgen.maskgen_loader.imageLoaded = False
jt_loader = maskgen.maskgen_loader.MaskGenLoader()
jt_keys = {}
for jt_key in jt_loader.__iter__():
jt_keys[jt_key] = jt_loader.get_key(jt_key)
for k, v in hp_keys.items():
if k in conversions.keys():
jt_keys[conversions[k]] = hp_keys[k]
if k == "metadata":
for mk, mv in v.items():
jt_keys[mk] = mv
jt_loader.saveall(jt_keys.items())
os.remove(hp_settings)
def main():
root = Tk()
if os.path.isfile(hp_settings):
combine_settings()
if os.path.isfile(os.path.join(os.path.expanduser("~"), ".maskgen2")):
# Get a maskgen loader to check if fields are defined
maskgen.maskgen_loader.imageLoaded = False
loader = maskgen.maskgen_loader.MaskGenLoader()
if "apitoken" in loader:
exit(0)
if "git.branch" in loader:
global branch
branch = loader.get_key("git.branch")
maskgen.maskgen_loader.imageLoaded = False
errs = setup()
Window(root, errs)
root.wm_resizable(width=FALSE, height=FALSE)
root.mainloop()
if __name__ == "__main__":
branch = "master"
main() | 0.345768 | 0.091301 |
import sqlite3
class User:
def __init__(self, id, name, authstring):
self.id = id
self.name = name
self.authstring = authstring
@staticmethod
def get_user_by_name(name):
connection = sqlite3.connect('./db.db')
cursor = connection.cursor()
sql = '''SELECT * FROM USER WHERE NAME=?'''
cursor.execute(sql, (name,))
row = cursor.fetchone()
connection.close()
if not row:
return None
return User(*row)
@staticmethod
def get_all(name):
connection = sqlite3.connect('./db.db')
cursor = connection.cursor()
sql = '''SELECT * FROM USER'''
cursor.execute(sql)
row = cursor.fetchone()
connection.close()
if not row:
return None
return User(*row)
@staticmethod
def insert(elem):
sql = '''INSERT INTO USER (NAME, AUTHSTRING)
VALUES (?,?)
'''
connection = sqlite3.connect('./db.db')
cursor = connection.cursor()
cursor.executemany(sql, [(elem.name, elem.authstring)])
connection.commit()
connection.close()
def update(self):
connection = sqlite3.connect('./db.db')
cursor = connection.cursor()
sql = '''UPDATE USER SET NAME=?, AUTHSTRING=? WHERE ID=?'''
cursor.execute(sql, (self.name, self.authstring, self.id))
connection.commit()
connection.close()
def remove(self):
childs = self.get_elems()
for i in childs:
i.remove()
# removing it self
connection = sqlite3.connect('./db.db')
cursor = connection.cursor()
sql = '''DELETE FROM USER WHERE ID=?'''
cursor.execute(sql, (self.id,))
connection.commit()
connection.close()
FOLDER = 1
LINK = 2
class Elem:
def __init__(self, id, name, desc, parent=None):
self.id = id
self.name = name
self.desc = desc
self.parent = parent
def get_parent(self):
if self.parent:
return Elem.get_by_id(self.parent)
else:
return None
def update(self):
connection = sqlite3.connect('./db.db')
cursor = connection.cursor()
sql = '''UPDATE ELEM SET NAME=?, DESCRIPTION=? WHERE ID=?'''
cursor.execute(sql, (self.name, self.desc, self.id))
connection.commit()
connection.close()
def get_elems(self):
connection = sqlite3.connect('./db.db')
cursor = connection.cursor()
sql = '''SELECT * FROM ELEM WHERE PARENT=?'''
return [Elem(*i) for i in cursor.execute(sql, (self.id,))]
connection.close() # fixme
def remove(self):
childs = self.get_elems()
for i in childs:
i.remove()
# removing it self
connection = sqlite3.connect('./db.db')
cursor = connection.cursor()
sql = '''DELETE FROM ELEM WHERE ID=?'''
cursor.execute(sql, (self.id,))
connection.commit()
connection.close()
@staticmethod
def insert(elem):
connection = sqlite3.connect('./db.db')
cursor = connection.cursor()
sql = u'''INSERT INTO ELEM (NAME, DESCRIPTION, PARENT)
VALUES (?,?,?)
'''
cursor.executemany(
sql, [(elem.name, elem.desc, elem.parent)])
connection.commit()
connection.close()
@staticmethod
def get_by_id(id):
connection = sqlite3.connect('./db.db')
cursor = connection.cursor()
sql = '''SELECT * FROM ELEM WHERE ID=?'''
cursor.execute(sql, (id,))
row = cursor.fetchone()
connection.close()
if not row:
return None
return Elem(*row)
@staticmethod
def search_by_name_description(name, description):
connection = sqlite3.connect('./db.db')
cursor = connection.cursor()
sql = '''SELECT * FROM ELEM WHERE NAME=? AND DESCRIPTION=?'''
cursor.execute(sql, (name, description))
row = cursor.fetchone()
connection.close()
if not row:
return None
return Elem(*row)
def __repr__(self):
return u'({}) {} = {}'.format(
self.id, self.name, self.desc) | models.py | import sqlite3
class User:
def __init__(self, id, name, authstring):
self.id = id
self.name = name
self.authstring = authstring
@staticmethod
def get_user_by_name(name):
connection = sqlite3.connect('./db.db')
cursor = connection.cursor()
sql = '''SELECT * FROM USER WHERE NAME=?'''
cursor.execute(sql, (name,))
row = cursor.fetchone()
connection.close()
if not row:
return None
return User(*row)
@staticmethod
def get_all(name):
connection = sqlite3.connect('./db.db')
cursor = connection.cursor()
sql = '''SELECT * FROM USER'''
cursor.execute(sql)
row = cursor.fetchone()
connection.close()
if not row:
return None
return User(*row)
@staticmethod
def insert(elem):
sql = '''INSERT INTO USER (NAME, AUTHSTRING)
VALUES (?,?)
'''
connection = sqlite3.connect('./db.db')
cursor = connection.cursor()
cursor.executemany(sql, [(elem.name, elem.authstring)])
connection.commit()
connection.close()
def update(self):
connection = sqlite3.connect('./db.db')
cursor = connection.cursor()
sql = '''UPDATE USER SET NAME=?, AUTHSTRING=? WHERE ID=?'''
cursor.execute(sql, (self.name, self.authstring, self.id))
connection.commit()
connection.close()
def remove(self):
childs = self.get_elems()
for i in childs:
i.remove()
# removing it self
connection = sqlite3.connect('./db.db')
cursor = connection.cursor()
sql = '''DELETE FROM USER WHERE ID=?'''
cursor.execute(sql, (self.id,))
connection.commit()
connection.close()
FOLDER = 1
LINK = 2
class Elem:
def __init__(self, id, name, desc, parent=None):
self.id = id
self.name = name
self.desc = desc
self.parent = parent
def get_parent(self):
if self.parent:
return Elem.get_by_id(self.parent)
else:
return None
def update(self):
connection = sqlite3.connect('./db.db')
cursor = connection.cursor()
sql = '''UPDATE ELEM SET NAME=?, DESCRIPTION=? WHERE ID=?'''
cursor.execute(sql, (self.name, self.desc, self.id))
connection.commit()
connection.close()
def get_elems(self):
connection = sqlite3.connect('./db.db')
cursor = connection.cursor()
sql = '''SELECT * FROM ELEM WHERE PARENT=?'''
return [Elem(*i) for i in cursor.execute(sql, (self.id,))]
connection.close() # fixme
def remove(self):
childs = self.get_elems()
for i in childs:
i.remove()
# removing it self
connection = sqlite3.connect('./db.db')
cursor = connection.cursor()
sql = '''DELETE FROM ELEM WHERE ID=?'''
cursor.execute(sql, (self.id,))
connection.commit()
connection.close()
@staticmethod
def insert(elem):
connection = sqlite3.connect('./db.db')
cursor = connection.cursor()
sql = u'''INSERT INTO ELEM (NAME, DESCRIPTION, PARENT)
VALUES (?,?,?)
'''
cursor.executemany(
sql, [(elem.name, elem.desc, elem.parent)])
connection.commit()
connection.close()
@staticmethod
def get_by_id(id):
connection = sqlite3.connect('./db.db')
cursor = connection.cursor()
sql = '''SELECT * FROM ELEM WHERE ID=?'''
cursor.execute(sql, (id,))
row = cursor.fetchone()
connection.close()
if not row:
return None
return Elem(*row)
@staticmethod
def search_by_name_description(name, description):
connection = sqlite3.connect('./db.db')
cursor = connection.cursor()
sql = '''SELECT * FROM ELEM WHERE NAME=? AND DESCRIPTION=?'''
cursor.execute(sql, (name, description))
row = cursor.fetchone()
connection.close()
if not row:
return None
return Elem(*row)
def __repr__(self):
return u'({}) {} = {}'.format(
self.id, self.name, self.desc) | 0.311008 | 0.076857 |
from pathlib import Path
import time
import traceback
from ._base_subcommand import BaseSubcommand
class Subcommand(BaseSubcommand):
help = "Build dirs for pending jobs."
def add_arguments(self, parser):
defaults = self._get_defaults()
parser.add_argument(
'--tag',
help=("Only build jobs that have this tag. Can specify"
" this arg multiple times to filter for multiple tags."),
dest='tags',
action='append'
)
parser.add_argument(
'--parent_request_tag',
help=("Only build jobs which have a parent request with this tag."
" Can specify this arg multiple times to filter for multiple"
" tags."),
dest='parent_request_tags',
action='append'
)
parser.add_argument(
'--output_dir',
help=("Where to put created job dirs. If dir does not exist it"
" will be created."),
default=defaults['output_dir']
)
parser.add_argument(
'--dry_run',
help=("Don't actually build jobs. Just return # of jobs that would"
" be built."),
action='store_true'
)
parser.add_argument(
'--limit',
help="Maximum number of jobs to claim.",
type=int,
default=defaults['limit']
)
parser.add_argument(
'--job_dir_tpl',
help=("Template for naming job_dirs"),
default=defaults['job_dir_tpl']
)
def _get_defaults(self):
return {
'dry_run': False,
'limit': 100,
'job_dir_tpl': '{timestamp}.{key}',
'output_dir': self.houston.utils.job_dirs['pending'],
'parent_request_tags': [],
'tags': [],
}
def _run(self):
self.session.begin_nested()
output_dir = self._get_output_dir()
claimed_jobs = self._get_and_claim_jobs()
if self.parsed_args['dry_run']:
build_results = {'DRY_RUN': '<nothing built>'}
else:
build_results = self._build_jobs(jobs=claimed_jobs,
output_dir=output_dir)
if self.parsed_args['dry_run']:
self.session.rollback()
else:
self.session.commit()
if build_results.get('errors'):
raise Exception("\n".join(build_results['errors']))
return {
**build_results,
'output_dir': output_dir,
'num_claimed': len(claimed_jobs),
}
def _get_output_dir(self):
output_dir = self.parsed_args['output_dir']
if not self.parsed_args['dry_run']:
Path(output_dir).mkdir(parents=True, exist_ok=True)
return output_dir
def _get_and_claim_jobs(self):
claim_time = time.time()
modified_clause = (self.Job.modified < claim_time)
q = (
self.session.query(self.Job)
.filter(self.Job.status == 'PENDING')
.filter(modified_clause)
)
for tag_name in self.parsed_args['tags']:
q = q.filter(self.Job.tags_set.any(name=tag_name))
if self.parsed_args['limit']:
q = q.limit(self.parsed_args['limit'])
if self.parsed_args['parent_request_tags']:
q = (
q.join(self.Job.parents, aliased=True)
.join(self.Request, aliased=True, from_joinpoint=True)
.filter(
self.Request.request_tag.in_(
self.parsed_args['parent_request_tags']
)
)
.reset_joinpoint()
)
jobs_to_claim = q.all()
if not jobs_to_claim:
return []
keys_clause = self.Job.key.in_([j.key for j in jobs_to_claim])
(
self.session.query(self.Job)
.filter(keys_clause)
.filter(modified_clause)
.update(
{'status': 'BUILDING', 'modified': claim_time},
synchronize_session='fetch'
)
)
claimed_jobs = (
self.session.query(self.Job)
.filter(keys_clause)
.filter(self.Job.modified == claim_time)
.all()
)
return claimed_jobs
@property
def Job(self): return self.db.models.Job
@property
def Request(self): return self.db.models.Request
def _build_jobs(self, jobs=None, output_dir=None):
num_built = 0
errors = []
job_dir_builder = self._get_job_dir_builder()
for job in jobs:
try:
self._build_job(job=job,
job_dir_builder=job_dir_builder,
output_dir=output_dir)
num_built += 1
except:
errors.append(traceback.format_exc())
with self.session.begin(subtransactions=True):
self.session.add_all(jobs)
return {'num_built': num_built, 'errors': errors}
def _get_job_dir_builder(self):
from mc.utils.job_modules.job_dir_builder import JobDirBuilder
return JobDirBuilder()
def _build_job(self, job=None, job_dir_builder=None,
output_dir=None):
try:
job_dir_builder.build_job_dir(
job_dict=job.to_dict(),
output_dir=self._get_job_output_dir(job=job,
parent_dir=output_dir)
)
job.status = 'BUILT'
except Exception as exc:
job.status = 'FAILED'
raise
def _get_job_output_dir(self, job=None, parent_dir=None):
tpl = self.parsed_args.get('job_dir_tpl')
job_dir_name = tpl.format(timestamp=int(time.time()), key=job.key)
return Path(parent_dir, job_dir_name) | mc/houston/subcommands/build_job_dirs.py | from pathlib import Path
import time
import traceback
from ._base_subcommand import BaseSubcommand
class Subcommand(BaseSubcommand):
help = "Build dirs for pending jobs."
def add_arguments(self, parser):
defaults = self._get_defaults()
parser.add_argument(
'--tag',
help=("Only build jobs that have this tag. Can specify"
" this arg multiple times to filter for multiple tags."),
dest='tags',
action='append'
)
parser.add_argument(
'--parent_request_tag',
help=("Only build jobs which have a parent request with this tag."
" Can specify this arg multiple times to filter for multiple"
" tags."),
dest='parent_request_tags',
action='append'
)
parser.add_argument(
'--output_dir',
help=("Where to put created job dirs. If dir does not exist it"
" will be created."),
default=defaults['output_dir']
)
parser.add_argument(
'--dry_run',
help=("Don't actually build jobs. Just return # of jobs that would"
" be built."),
action='store_true'
)
parser.add_argument(
'--limit',
help="Maximum number of jobs to claim.",
type=int,
default=defaults['limit']
)
parser.add_argument(
'--job_dir_tpl',
help=("Template for naming job_dirs"),
default=defaults['job_dir_tpl']
)
def _get_defaults(self):
return {
'dry_run': False,
'limit': 100,
'job_dir_tpl': '{timestamp}.{key}',
'output_dir': self.houston.utils.job_dirs['pending'],
'parent_request_tags': [],
'tags': [],
}
def _run(self):
self.session.begin_nested()
output_dir = self._get_output_dir()
claimed_jobs = self._get_and_claim_jobs()
if self.parsed_args['dry_run']:
build_results = {'DRY_RUN': '<nothing built>'}
else:
build_results = self._build_jobs(jobs=claimed_jobs,
output_dir=output_dir)
if self.parsed_args['dry_run']:
self.session.rollback()
else:
self.session.commit()
if build_results.get('errors'):
raise Exception("\n".join(build_results['errors']))
return {
**build_results,
'output_dir': output_dir,
'num_claimed': len(claimed_jobs),
}
def _get_output_dir(self):
output_dir = self.parsed_args['output_dir']
if not self.parsed_args['dry_run']:
Path(output_dir).mkdir(parents=True, exist_ok=True)
return output_dir
def _get_and_claim_jobs(self):
claim_time = time.time()
modified_clause = (self.Job.modified < claim_time)
q = (
self.session.query(self.Job)
.filter(self.Job.status == 'PENDING')
.filter(modified_clause)
)
for tag_name in self.parsed_args['tags']:
q = q.filter(self.Job.tags_set.any(name=tag_name))
if self.parsed_args['limit']:
q = q.limit(self.parsed_args['limit'])
if self.parsed_args['parent_request_tags']:
q = (
q.join(self.Job.parents, aliased=True)
.join(self.Request, aliased=True, from_joinpoint=True)
.filter(
self.Request.request_tag.in_(
self.parsed_args['parent_request_tags']
)
)
.reset_joinpoint()
)
jobs_to_claim = q.all()
if not jobs_to_claim:
return []
keys_clause = self.Job.key.in_([j.key for j in jobs_to_claim])
(
self.session.query(self.Job)
.filter(keys_clause)
.filter(modified_clause)
.update(
{'status': 'BUILDING', 'modified': claim_time},
synchronize_session='fetch'
)
)
claimed_jobs = (
self.session.query(self.Job)
.filter(keys_clause)
.filter(self.Job.modified == claim_time)
.all()
)
return claimed_jobs
@property
def Job(self): return self.db.models.Job
@property
def Request(self): return self.db.models.Request
def _build_jobs(self, jobs=None, output_dir=None):
num_built = 0
errors = []
job_dir_builder = self._get_job_dir_builder()
for job in jobs:
try:
self._build_job(job=job,
job_dir_builder=job_dir_builder,
output_dir=output_dir)
num_built += 1
except:
errors.append(traceback.format_exc())
with self.session.begin(subtransactions=True):
self.session.add_all(jobs)
return {'num_built': num_built, 'errors': errors}
def _get_job_dir_builder(self):
from mc.utils.job_modules.job_dir_builder import JobDirBuilder
return JobDirBuilder()
def _build_job(self, job=None, job_dir_builder=None,
output_dir=None):
try:
job_dir_builder.build_job_dir(
job_dict=job.to_dict(),
output_dir=self._get_job_output_dir(job=job,
parent_dir=output_dir)
)
job.status = 'BUILT'
except Exception as exc:
job.status = 'FAILED'
raise
def _get_job_output_dir(self, job=None, parent_dir=None):
tpl = self.parsed_args.get('job_dir_tpl')
job_dir_name = tpl.format(timestamp=int(time.time()), key=job.key)
return Path(parent_dir, job_dir_name) | 0.576542 | 0.085709 |
import io
import pathlib
from abc import ABC, abstractclassmethod
class SerpentFile(ABC):
"""Most basic interface for a Serpent file
Parameters
----------
filename : str, optional
Helpful identifier for the source of data
Attributes
----------
filename : str or None
Name of the source of data
"""
def __init__(self, filename=None):
self.filename = filename
@classmethod
def fromSerpent(
cls, source, sourcename=None, postcheck=True, strict=True, **kwargs
):
"""
Load data from a Serpent output file
Parameters
----------
source : str or pathlib.Path or io.IOBase
Source of Serpent output data. File names can be passed
as either strings or :class:`pathlib.Path`. Otherwise,
source must be readable, e.g. support ``source.read`` and
``source.readline``
sourcename : str, optional
Alternative identifier for the source. If not provided
and ``source`` is a string or :class:`pathlib.Path`,
the name will reflect the name of the file
postcheck : bool, optional
Perform simple checks after the file has been processed.
Default is to perform the check
strict : bool, optional
If simple checks fail during post-check routines,
raise an error if ``True`` or a warning. Default is to
raise an error
kwargs :
Additional keyword arguments will be passed directly to
the concrete stream reader, implemented by each subclass.
Returns
-------
SerpentFile
Specific subclass corresponding to the file type
Raises
------
serpentTools.SerpentToolsException
If ``postcheck``, a check fails, and ``strict``
Warns
-----
UserWarning
If ``postcheck``, a check fails, and not ``strict``
"""
if isinstance(source, str):
with open(source, mode="r") as stream:
return cls._fromSerpentStream(
stream, sourcename or source, postcheck, strict, **kwargs
)
elif isinstance(source, pathlib.Path):
with source.open(mode="r") as stream:
return cls._fromSerpentStream(
stream,
sourcename or str(source),
postcheck,
strict,
**kwargs,
)
# Special case for binary data, e.g. zip files
elif isinstance(source, io.BufferedIOBase):
return cls._fromSerpentStream(
io.TextIOWrapper(source),
sourcename,
postcheck,
strict,
**kwargs,
)
elif not isinstance(source, io.IOBase):
raise TypeError(
"Source must be string or pathlib.Path for file names, or a "
"readable IO stream. Got {}".format(type(source))
)
return cls._fromSerpentStream(
source, sourcename, postcheck, strict, **kwargs
)
@abstractclassmethod
def _fromSerpentStream(
cls, source, sourcename, postcheck, strict, **kwargs
):
"""Process a stream of Serpent text data.
Source will be an :class:`io.TextIOBase`, which supports at
least ``source.read`` and ``source.readline``. Seeking
with ``source.seek`` might not be available in all
cases, but can be checked with ``source.seekable``
Other arguments correspond with their intent in
:meth:`fromSerpent`
""" | serpentTools/next/base.py | import io
import pathlib
from abc import ABC, abstractclassmethod
class SerpentFile(ABC):
"""Most basic interface for a Serpent file
Parameters
----------
filename : str, optional
Helpful identifier for the source of data
Attributes
----------
filename : str or None
Name of the source of data
"""
def __init__(self, filename=None):
self.filename = filename
@classmethod
def fromSerpent(
cls, source, sourcename=None, postcheck=True, strict=True, **kwargs
):
"""
Load data from a Serpent output file
Parameters
----------
source : str or pathlib.Path or io.IOBase
Source of Serpent output data. File names can be passed
as either strings or :class:`pathlib.Path`. Otherwise,
source must be readable, e.g. support ``source.read`` and
``source.readline``
sourcename : str, optional
Alternative identifier for the source. If not provided
and ``source`` is a string or :class:`pathlib.Path`,
the name will reflect the name of the file
postcheck : bool, optional
Perform simple checks after the file has been processed.
Default is to perform the check
strict : bool, optional
If simple checks fail during post-check routines,
raise an error if ``True`` or a warning. Default is to
raise an error
kwargs :
Additional keyword arguments will be passed directly to
the concrete stream reader, implemented by each subclass.
Returns
-------
SerpentFile
Specific subclass corresponding to the file type
Raises
------
serpentTools.SerpentToolsException
If ``postcheck``, a check fails, and ``strict``
Warns
-----
UserWarning
If ``postcheck``, a check fails, and not ``strict``
"""
if isinstance(source, str):
with open(source, mode="r") as stream:
return cls._fromSerpentStream(
stream, sourcename or source, postcheck, strict, **kwargs
)
elif isinstance(source, pathlib.Path):
with source.open(mode="r") as stream:
return cls._fromSerpentStream(
stream,
sourcename or str(source),
postcheck,
strict,
**kwargs,
)
# Special case for binary data, e.g. zip files
elif isinstance(source, io.BufferedIOBase):
return cls._fromSerpentStream(
io.TextIOWrapper(source),
sourcename,
postcheck,
strict,
**kwargs,
)
elif not isinstance(source, io.IOBase):
raise TypeError(
"Source must be string or pathlib.Path for file names, or a "
"readable IO stream. Got {}".format(type(source))
)
return cls._fromSerpentStream(
source, sourcename, postcheck, strict, **kwargs
)
@abstractclassmethod
def _fromSerpentStream(
cls, source, sourcename, postcheck, strict, **kwargs
):
"""Process a stream of Serpent text data.
Source will be an :class:`io.TextIOBase`, which supports at
least ``source.read`` and ``source.readline``. Seeking
with ``source.seek`` might not be available in all
cases, but can be checked with ``source.seekable``
Other arguments correspond with their intent in
:meth:`fromSerpent`
""" | 0.78964 | 0.282132 |
# Commented out IPython magic to ensure Python compatibility.
try:
# %tensorflow_version only exists in Colab.
# %tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Flatten, Dense, Dropout, Lambda
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.datasets import fashion_mnist
from tensorflow.python.keras.utils.vis_utils import plot_model
from tensorflow.keras import backend as K
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image, ImageFont, ImageDraw
import random
def create_pairs(x, digit_indices):
'''Positive and negative pair creation.
Alternates between positive and negative pairs.
'''
pairs = []
labels = []
n = min([len(digit_indices[d]) for d in range(10)]) - 1
for d in range(10):
for i in range(n):
z1, z2 = digit_indices[d][i], digit_indices[d][i + 1]
pairs += [[x[z1], x[z2]]]
inc = random.randrange(1, 10)
dn = (d + inc) % 10
z1, z2 = digit_indices[d][i], digit_indices[dn][i]
pairs += [[x[z1], x[z2]]]
labels += [1, 0]
return np.array(pairs), np.array(labels)
def create_pairs_on_set(images, labels):
digit_indices = [np.where(labels == i)[0] for i in range(10)]
pairs, y = create_pairs(images, digit_indices)
y = y.astype('float32')
return pairs, y
def show_image(image):
plt.figure()
plt.imshow(image)
plt.colorbar()
plt.grid(False)
plt.show()
# load the dataset
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
# prepare train and test sets
train_images = train_images.astype('float32')
test_images = test_images.astype('float32')
# normalize values
train_images = train_images / 255.0
test_images = test_images / 255.0
# create pairs on train and test sets
tr_pairs, tr_y = create_pairs_on_set(train_images, train_labels)
ts_pairs, ts_y = create_pairs_on_set(test_images, test_labels)
# array index
this_pair = 8
# show images at this index
show_image(ts_pairs[this_pair][0])
show_image(ts_pairs[this_pair][1])
# print the label for this pair
print(tr_y[this_pair])
# print other pairs
show_image(tr_pairs[:,0][0])
show_image(tr_pairs[:,0][1])
show_image(tr_pairs[:,1][0])
show_image(tr_pairs[:,1][1])
def initialize_base_network():
input = Input(shape=(28,28,), name="base_input")
x = Flatten(name="flatten_input")(input)
x = Dense(128, activation='relu', name="first_base_dense")(x)
x = Dropout(0.1, name="first_dropout")(x)
x = Dense(128, activation='relu', name="second_base_dense")(x)
x = Dropout(0.1, name="second_dropout")(x)
x = Dense(128, activation='relu', name="third_base_dense")(x)
return Model(inputs=input, outputs=x)
def euclidean_distance(vects):
x, y = vects
sum_square = K.sum(K.square(x - y), axis=1, keepdims=True)
return K.sqrt(K.maximum(sum_square, K.epsilon()))
def eucl_dist_output_shape(shapes):
shape1, shape2 = shapes
return (shape1[0], 1)
base_network = initialize_base_network()
plot_model(base_network, show_shapes=True, show_layer_names=True, to_file='base-model.png')
# create the left input and point to the base network
input_a = Input(shape=(28,28,), name="left_input")
vect_output_a = base_network(input_a)
# create the right input and point to the base network
input_b = Input(shape=(28,28,), name="right_input")
vect_output_b = base_network(input_b)
# measure the similarity of the two vector outputs
output = Lambda(euclidean_distance, name="output_layer", output_shape=eucl_dist_output_shape)([vect_output_a, vect_output_b])
# specify the inputs and output of the model
model = Model([input_a, input_b], output)
# plot model graph
plot_model(model, show_shapes=True, show_layer_names=True, to_file='outer-model.png')
def contrastive_loss_with_margin(margin):
def contrastive_loss(y_true, y_pred):
'''Contrastive loss from Hadsell-et-al.'06
http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
'''
square_pred = K.square(y_pred)
margin_square = K.square(K.maximum(margin - y_pred, 0))
return K.mean(y_true * square_pred + (1 - y_true) * margin_square)
return contrastive_loss
rms = RMSprop()
model.compile(loss=contrastive_loss_with_margin(margin=1), optimizer=rms)
history = model.fit([tr_pairs[:,0], tr_pairs[:,1]], tr_y, epochs=20, batch_size=128, validation_data=([ts_pairs[:,0], ts_pairs[:,1]], ts_y))
def compute_accuracy(y_true, y_pred):
'''Compute classification accuracy with a fixed threshold on distances.
'''
pred = y_pred.ravel() > 0.5
return np.mean(pred == y_true)
loss = model.evaluate(x=[ts_pairs[:,0],ts_pairs[:,1]], y=ts_y)
y_pred_train = model.predict([tr_pairs[:,0], tr_pairs[:,1]])
train_accuracy = compute_accuracy(tr_y, y_pred_train)
y_pred_test = model.predict([ts_pairs[:,0], ts_pairs[:,1]])
test_accuracy = compute_accuracy(ts_y, y_pred_test)
print("Loss = {}, Train Accuracy = {} Test Accuracy = {}".format(loss, train_accuracy, test_accuracy))
def plot_metrics(metric_name, title, ylim=5):
plt.title(title)
plt.ylim(0,ylim)
plt.plot(history.history[metric_name],color='blue',label=metric_name)
plt.plot(history.history['val_' + metric_name],color='green',label='val_' + metric_name)
plot_metrics(metric_name='loss', title="Loss", ylim=0.2)
# Matplotlib config
def visualize_images():
plt.rc('image', cmap='gray_r')
plt.rc('grid', linewidth=0)
plt.rc('xtick', top=False, bottom=False, labelsize='large')
plt.rc('ytick', left=False, right=False, labelsize='large')
plt.rc('axes', facecolor='F8F8F8', titlesize="large", edgecolor='white')
plt.rc('text', color='a8151a')
plt.rc('figure', facecolor='F0F0F0')# Matplotlib fonts
# utility to display a row of digits with their predictions
def display_images(left, right, predictions, labels, title, n):
plt.figure(figsize=(17,3))
plt.title(title)
plt.yticks([])
plt.xticks([])
plt.grid(None)
left = np.reshape(left, [n, 28, 28])
left = np.swapaxes(left, 0, 1)
left = np.reshape(left, [28, 28*n])
plt.imshow(left)
plt.figure(figsize=(17,3))
plt.yticks([])
plt.xticks([28*x+14 for x in range(n)], predictions)
for i,t in enumerate(plt.gca().xaxis.get_ticklabels()):
if predictions[i] > 0.5: t.set_color('red') # bad predictions in red
plt.grid(None)
right = np.reshape(right, [n, 28, 28])
right = np.swapaxes(right, 0, 1)
right = np.reshape(right, [28, 28*n])
plt.imshow(right)
y_pred_train = np.squeeze(y_pred_train)
indexes = np.random.choice(len(y_pred_train), size=10)
display_images(tr_pairs[:, 0][indexes], tr_pairs[:, 1][indexes], y_pred_train[indexes], tr_y[indexes], "clothes and their dissimilarity", 10) | Main.py |
# Commented out IPython magic to ensure Python compatibility.
try:
# %tensorflow_version only exists in Colab.
# %tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Flatten, Dense, Dropout, Lambda
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.datasets import fashion_mnist
from tensorflow.python.keras.utils.vis_utils import plot_model
from tensorflow.keras import backend as K
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image, ImageFont, ImageDraw
import random
def create_pairs(x, digit_indices):
'''Positive and negative pair creation.
Alternates between positive and negative pairs.
'''
pairs = []
labels = []
n = min([len(digit_indices[d]) for d in range(10)]) - 1
for d in range(10):
for i in range(n):
z1, z2 = digit_indices[d][i], digit_indices[d][i + 1]
pairs += [[x[z1], x[z2]]]
inc = random.randrange(1, 10)
dn = (d + inc) % 10
z1, z2 = digit_indices[d][i], digit_indices[dn][i]
pairs += [[x[z1], x[z2]]]
labels += [1, 0]
return np.array(pairs), np.array(labels)
def create_pairs_on_set(images, labels):
digit_indices = [np.where(labels == i)[0] for i in range(10)]
pairs, y = create_pairs(images, digit_indices)
y = y.astype('float32')
return pairs, y
def show_image(image):
plt.figure()
plt.imshow(image)
plt.colorbar()
plt.grid(False)
plt.show()
# load the dataset
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
# prepare train and test sets
train_images = train_images.astype('float32')
test_images = test_images.astype('float32')
# normalize values
train_images = train_images / 255.0
test_images = test_images / 255.0
# create pairs on train and test sets
tr_pairs, tr_y = create_pairs_on_set(train_images, train_labels)
ts_pairs, ts_y = create_pairs_on_set(test_images, test_labels)
# array index
this_pair = 8
# show images at this index
show_image(ts_pairs[this_pair][0])
show_image(ts_pairs[this_pair][1])
# print the label for this pair
print(tr_y[this_pair])
# print other pairs
show_image(tr_pairs[:,0][0])
show_image(tr_pairs[:,0][1])
show_image(tr_pairs[:,1][0])
show_image(tr_pairs[:,1][1])
def initialize_base_network():
input = Input(shape=(28,28,), name="base_input")
x = Flatten(name="flatten_input")(input)
x = Dense(128, activation='relu', name="first_base_dense")(x)
x = Dropout(0.1, name="first_dropout")(x)
x = Dense(128, activation='relu', name="second_base_dense")(x)
x = Dropout(0.1, name="second_dropout")(x)
x = Dense(128, activation='relu', name="third_base_dense")(x)
return Model(inputs=input, outputs=x)
def euclidean_distance(vects):
x, y = vects
sum_square = K.sum(K.square(x - y), axis=1, keepdims=True)
return K.sqrt(K.maximum(sum_square, K.epsilon()))
def eucl_dist_output_shape(shapes):
shape1, shape2 = shapes
return (shape1[0], 1)
base_network = initialize_base_network()
plot_model(base_network, show_shapes=True, show_layer_names=True, to_file='base-model.png')
# create the left input and point to the base network
input_a = Input(shape=(28,28,), name="left_input")
vect_output_a = base_network(input_a)
# create the right input and point to the base network
input_b = Input(shape=(28,28,), name="right_input")
vect_output_b = base_network(input_b)
# measure the similarity of the two vector outputs
output = Lambda(euclidean_distance, name="output_layer", output_shape=eucl_dist_output_shape)([vect_output_a, vect_output_b])
# specify the inputs and output of the model
model = Model([input_a, input_b], output)
# plot model graph
plot_model(model, show_shapes=True, show_layer_names=True, to_file='outer-model.png')
def contrastive_loss_with_margin(margin):
def contrastive_loss(y_true, y_pred):
'''Contrastive loss from Hadsell-et-al.'06
http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
'''
square_pred = K.square(y_pred)
margin_square = K.square(K.maximum(margin - y_pred, 0))
return K.mean(y_true * square_pred + (1 - y_true) * margin_square)
return contrastive_loss
rms = RMSprop()
model.compile(loss=contrastive_loss_with_margin(margin=1), optimizer=rms)
history = model.fit([tr_pairs[:,0], tr_pairs[:,1]], tr_y, epochs=20, batch_size=128, validation_data=([ts_pairs[:,0], ts_pairs[:,1]], ts_y))
def compute_accuracy(y_true, y_pred):
'''Compute classification accuracy with a fixed threshold on distances.
'''
pred = y_pred.ravel() > 0.5
return np.mean(pred == y_true)
loss = model.evaluate(x=[ts_pairs[:,0],ts_pairs[:,1]], y=ts_y)
y_pred_train = model.predict([tr_pairs[:,0], tr_pairs[:,1]])
train_accuracy = compute_accuracy(tr_y, y_pred_train)
y_pred_test = model.predict([ts_pairs[:,0], ts_pairs[:,1]])
test_accuracy = compute_accuracy(ts_y, y_pred_test)
print("Loss = {}, Train Accuracy = {} Test Accuracy = {}".format(loss, train_accuracy, test_accuracy))
def plot_metrics(metric_name, title, ylim=5):
plt.title(title)
plt.ylim(0,ylim)
plt.plot(history.history[metric_name],color='blue',label=metric_name)
plt.plot(history.history['val_' + metric_name],color='green',label='val_' + metric_name)
plot_metrics(metric_name='loss', title="Loss", ylim=0.2)
# Matplotlib config
def visualize_images():
plt.rc('image', cmap='gray_r')
plt.rc('grid', linewidth=0)
plt.rc('xtick', top=False, bottom=False, labelsize='large')
plt.rc('ytick', left=False, right=False, labelsize='large')
plt.rc('axes', facecolor='F8F8F8', titlesize="large", edgecolor='white')
plt.rc('text', color='a8151a')
plt.rc('figure', facecolor='F0F0F0')# Matplotlib fonts
# utility to display a row of digits with their predictions
def display_images(left, right, predictions, labels, title, n):
plt.figure(figsize=(17,3))
plt.title(title)
plt.yticks([])
plt.xticks([])
plt.grid(None)
left = np.reshape(left, [n, 28, 28])
left = np.swapaxes(left, 0, 1)
left = np.reshape(left, [28, 28*n])
plt.imshow(left)
plt.figure(figsize=(17,3))
plt.yticks([])
plt.xticks([28*x+14 for x in range(n)], predictions)
for i,t in enumerate(plt.gca().xaxis.get_ticklabels()):
if predictions[i] > 0.5: t.set_color('red') # bad predictions in red
plt.grid(None)
right = np.reshape(right, [n, 28, 28])
right = np.swapaxes(right, 0, 1)
right = np.reshape(right, [28, 28*n])
plt.imshow(right)
y_pred_train = np.squeeze(y_pred_train)
indexes = np.random.choice(len(y_pred_train), size=10)
display_images(tr_pairs[:, 0][indexes], tr_pairs[:, 1][indexes], y_pred_train[indexes], tr_y[indexes], "clothes and their dissimilarity", 10) | 0.749454 | 0.572185 |
from functools import lru_cache
from typing import List
from datetime import datetime
from fastapi import APIRouter, Depends, Query, Body
from api_v1.estimate.vbm import VBM
from api_v1.residual.residual import Residual
from api_v1.utils.s3_utils import S3
from api_v1.database.db import DatabaseAnalytic
import config
router = APIRouter()
@lru_cache()
def get_settings():
return config.Settings()
@router.get("/estimates")
async def estimate_sensor(date: str = 'date', sensors: List[str] = Query(None), actuals: List[float] = Query(None),
settings: config.Settings = Depends(get_settings)):
# load state matrix from s3
s3 = S3(date=date,
bucket_name=settings.AWS_S3_BUCKET_NAME,
access_key=settings.AWS_ACCESS_KEY_ID,
secret_key=settings.AWS_SECRET_ACCESS_KEY,
session_token=settings.AWS_SESSION_TOKEN,
region_name=settings.AWS_REGION)
if s3.check_if_file_exists():
state_matrix = s3.load_state_matrix()
else:
# load the previous state matrix
state_matrix = s3.load_previous_state_matrix()
# get actual thresholds from database
db = DatabaseAnalytic(database=settings.DB_DATABASE,
user=settings.DB_USER,
password=settings.DB_PASSWORD,
host=settings.DB_HOST,
port=settings.DB_PORT)
id_sensor, actual_low, actual_high = db.get_threshold_actual(sensors)
vbm = VBM(actual_low, actual_high)
estimates, state_matrix = vbm.estimate_sensors(actuals, state_matrix)
# update state matrix in s3
s3.upload_state_matrix(state_matrix)
# calculate residual values
residual_negative_treshold, residual_positive_treshold = db.get_residual_threshold(id_sensor)
residual_indication_positives = []
residual_indication_negatives = []
residuals = []
for i in range(len(actuals)):
resid = Residual(actuals[i], estimates[i], residual_positive_treshold[i], residual_negative_treshold[i])
residuals.append(resid.residual)
residual_indication_positives.append(resid.residual_indication_positive)
residual_indication_negatives.append(resid.residual_indication_negative)
# structuring the output
timestamp = datetime.strptime(date, "%Y-%m-%d %H:%M:%S")
values = []
for i in range(len(sensors)):
val = (date, id_sensor[i], actuals[i], None, estimates[i], residuals[i], None, residual_indication_positives[i], residual_indication_negatives[i])
values.append(val)
try:
# insert into table runtime
print(values)
db.insert_to_runtime(values)
return {"message": "success!",
"sensors": sensors, "actuals": actuals, "estimates": list(estimates), "residuals": residuals,
"residual_indication_positive": residual_indication_positives, "residual_indication_negative": residual_indication_negatives,}
except:
return {"message": "insert failed!"} | src/api_v1/api.py | from functools import lru_cache
from typing import List
from datetime import datetime
from fastapi import APIRouter, Depends, Query, Body
from api_v1.estimate.vbm import VBM
from api_v1.residual.residual import Residual
from api_v1.utils.s3_utils import S3
from api_v1.database.db import DatabaseAnalytic
import config
router = APIRouter()
@lru_cache()
def get_settings():
return config.Settings()
@router.get("/estimates")
async def estimate_sensor(date: str = 'date', sensors: List[str] = Query(None), actuals: List[float] = Query(None),
settings: config.Settings = Depends(get_settings)):
# load state matrix from s3
s3 = S3(date=date,
bucket_name=settings.AWS_S3_BUCKET_NAME,
access_key=settings.AWS_ACCESS_KEY_ID,
secret_key=settings.AWS_SECRET_ACCESS_KEY,
session_token=settings.AWS_SESSION_TOKEN,
region_name=settings.AWS_REGION)
if s3.check_if_file_exists():
state_matrix = s3.load_state_matrix()
else:
# load the previous state matrix
state_matrix = s3.load_previous_state_matrix()
# get actual thresholds from database
db = DatabaseAnalytic(database=settings.DB_DATABASE,
user=settings.DB_USER,
password=settings.DB_PASSWORD,
host=settings.DB_HOST,
port=settings.DB_PORT)
id_sensor, actual_low, actual_high = db.get_threshold_actual(sensors)
vbm = VBM(actual_low, actual_high)
estimates, state_matrix = vbm.estimate_sensors(actuals, state_matrix)
# update state matrix in s3
s3.upload_state_matrix(state_matrix)
# calculate residual values
residual_negative_treshold, residual_positive_treshold = db.get_residual_threshold(id_sensor)
residual_indication_positives = []
residual_indication_negatives = []
residuals = []
for i in range(len(actuals)):
resid = Residual(actuals[i], estimates[i], residual_positive_treshold[i], residual_negative_treshold[i])
residuals.append(resid.residual)
residual_indication_positives.append(resid.residual_indication_positive)
residual_indication_negatives.append(resid.residual_indication_negative)
# structuring the output
timestamp = datetime.strptime(date, "%Y-%m-%d %H:%M:%S")
values = []
for i in range(len(sensors)):
val = (date, id_sensor[i], actuals[i], None, estimates[i], residuals[i], None, residual_indication_positives[i], residual_indication_negatives[i])
values.append(val)
try:
# insert into table runtime
print(values)
db.insert_to_runtime(values)
return {"message": "success!",
"sensors": sensors, "actuals": actuals, "estimates": list(estimates), "residuals": residuals,
"residual_indication_positive": residual_indication_positives, "residual_indication_negative": residual_indication_negatives,}
except:
return {"message": "insert failed!"} | 0.629661 | 0.221193 |
import json, subprocess
from .... pyaz_utils import get_cli_name, get_params
def create(resource_namespace=None, resource_parent=None, resource_type=None, resource_group=None, autoscale_name, condition, scale, profile_name=None, cooldown=None, resource=None, timegrain=None):
params = get_params(locals())
command = "az monitor autoscale rule create " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def list(autoscale_name, resource_group, profile_name=None):
params = get_params(locals())
command = "az monitor autoscale rule list " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def delete(autoscale_name, resource_group, index, profile_name=None):
params = get_params(locals())
command = "az monitor autoscale rule delete " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def copy(autoscale_name, resource_group, dest_schedule, index, source_schedule=None):
params = get_params(locals())
command = "az monitor autoscale rule copy " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr) | test/pyaz/monitor/autoscale/rule/__init__.py | import json, subprocess
from .... pyaz_utils import get_cli_name, get_params
def create(resource_namespace=None, resource_parent=None, resource_type=None, resource_group=None, autoscale_name, condition, scale, profile_name=None, cooldown=None, resource=None, timegrain=None):
params = get_params(locals())
command = "az monitor autoscale rule create " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def list(autoscale_name, resource_group, profile_name=None):
params = get_params(locals())
command = "az monitor autoscale rule list " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def delete(autoscale_name, resource_group, index, profile_name=None):
params = get_params(locals())
command = "az monitor autoscale rule delete " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def copy(autoscale_name, resource_group, dest_schedule, index, source_schedule=None):
params = get_params(locals())
command = "az monitor autoscale rule copy " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr) | 0.226014 | 0.057652 |
import http
import requests
_HTTP_CODE_TO_EXCEPTION = {}
def _register(cls):
_HTTP_CODE_TO_EXCEPTION[cls.code] = cls
return cls
class APIError(Exception):
def __init__(self, msg, orig_error=None):
super().__init__(msg)
self.msg = msg
self.orig_error = orig_error
class ClientError(APIError):
''' Base exception for 4xx responses '''
pass
@_register
class BadRequest(ClientError):
''' 400 bad request exception '''
code = http.HTTPStatus.BAD_REQUEST
@_register
class Unauthorized(ClientError):
''' 401 unauthorized exception '''
code = http.HTTPStatus.UNAUTHORIZED
@_register
class Forbidden(ClientError):
''' 403 forbidden exception '''
code = http.HTTPStatus.FORBIDDEN
@_register
class NotFound(ClientError):
''' 404 not found exception '''
code = http.HTTPStatus.NOT_FOUND
@_register
class Conflict(ClientError):
''' 409 conflict exception '''
code = http.HTTPStatus.CONFLICT
@_register
class UnprocessableEntity(ClientError):
''' 422 unprocessable entity '''
code = http.HTTPStatus.UNPROCESSABLE_ENTITY
class ServerError(APIError):
''' Base exception for 5xx responses '''
pass
@_register
class InternalServerError(ServerError):
''' 500 internal server error '''
code = http.HTTPStatus.INTERNAL_SERVER_ERROR
@_register
class BadGateway(ServerError):
''' 502 bad gateway exception '''
code = http.HTTPStatus.BAD_GATEWAY
@_register
class GatewayTimeout(ServerError):
''' 504 gateway timeout exception '''
code = http.HTTPStatus.GATEWAY_TIMEOUT
def get_exception_from_response(response: requests.Response):
try:
payload = response.json()
except ValueError:
payload = response.text
msg = '{method} {url}\nstatus: {status} {reason}\ndetail: {detail}'.format(
method=response.request.method,
url=response.request.url,
status=response.status_code,
reason=response.reason,
detail=payload
)
_exception_class = _HTTP_CODE_TO_EXCEPTION.get(
response.status_code, APIError
)
exception = _exception_class(msg)
if not getattr(exception, 'code', None):
setattr(exception, 'code', response.status_code)
return exception | mydjango/libs/exceptions.py | import http
import requests
_HTTP_CODE_TO_EXCEPTION = {}
def _register(cls):
_HTTP_CODE_TO_EXCEPTION[cls.code] = cls
return cls
class APIError(Exception):
def __init__(self, msg, orig_error=None):
super().__init__(msg)
self.msg = msg
self.orig_error = orig_error
class ClientError(APIError):
''' Base exception for 4xx responses '''
pass
@_register
class BadRequest(ClientError):
''' 400 bad request exception '''
code = http.HTTPStatus.BAD_REQUEST
@_register
class Unauthorized(ClientError):
''' 401 unauthorized exception '''
code = http.HTTPStatus.UNAUTHORIZED
@_register
class Forbidden(ClientError):
''' 403 forbidden exception '''
code = http.HTTPStatus.FORBIDDEN
@_register
class NotFound(ClientError):
''' 404 not found exception '''
code = http.HTTPStatus.NOT_FOUND
@_register
class Conflict(ClientError):
''' 409 conflict exception '''
code = http.HTTPStatus.CONFLICT
@_register
class UnprocessableEntity(ClientError):
''' 422 unprocessable entity '''
code = http.HTTPStatus.UNPROCESSABLE_ENTITY
class ServerError(APIError):
''' Base exception for 5xx responses '''
pass
@_register
class InternalServerError(ServerError):
''' 500 internal server error '''
code = http.HTTPStatus.INTERNAL_SERVER_ERROR
@_register
class BadGateway(ServerError):
''' 502 bad gateway exception '''
code = http.HTTPStatus.BAD_GATEWAY
@_register
class GatewayTimeout(ServerError):
''' 504 gateway timeout exception '''
code = http.HTTPStatus.GATEWAY_TIMEOUT
def get_exception_from_response(response: requests.Response):
try:
payload = response.json()
except ValueError:
payload = response.text
msg = '{method} {url}\nstatus: {status} {reason}\ndetail: {detail}'.format(
method=response.request.method,
url=response.request.url,
status=response.status_code,
reason=response.reason,
detail=payload
)
_exception_class = _HTTP_CODE_TO_EXCEPTION.get(
response.status_code, APIError
)
exception = _exception_class(msg)
if not getattr(exception, 'code', None):
setattr(exception, 'code', response.status_code)
return exception | 0.387227 | 0.044101 |
# Imports
import logging
from requests.exceptions import HTTPError
from nltk.parse.corenlp import CoreNLPServer
from nltk.parse.corenlp import CoreNLPServerError
from nltk.parse.corenlp import CoreNLPDependencyParser
# Define type aliases for review summarization
Remark = tuple[str, str] # opinion-feature pair, example: ('nice', 'food')
Summary = list[Remark] # list of remarks, i.e. opinion-feature pairs
class Summarizer:
"""
Summarizer class implementing opinion-feature extraction. Uses Stanford CoreNLP dependency parser.
Attributes:
server (CoreNLPServer): CoreNLP server for accessing Stanford CoreNLP services.
parser (CoreNLPDependencyParser): CoreNLP dependency parser.
"""
def __init__(self, jar_path, models_jar_path):
"""
The constructor for Summarizer class.
Parameters:
jar_path (str): Filepath to Stanford CoreNLP .jar file.
models_jar_path (str): Filepath to Stanford CoreNLP models .jar file.
"""
logging.info('Starting CoreNLP server...')
self.server = CoreNLPServer(path_to_jar=jar_path, path_to_models_jar=models_jar_path)
try:
self.server.start()
logging.info('CoreNLP server started.')
# CoreNLPServerError is thrown when a server is already running
except CoreNLPServerError:
logging.warning('CoreNLP server is already running.')
self.parser = CoreNLPDependencyParser()
def summarize(self, text):
"""
Summarizes a review. Extracts opinion-feature pairs from it.
Parameters:
text (str): Review text.
Returns:
Summary: List of opinion-feature pairs extracted from the review text.
"""
try:
parse = next(self.parser.raw_parse(text))
# An HTTPError raised by the CoreNLP server is related to unrecognized characters in the review text
except HTTPError:
logging.warning(f'Review skipped: {text}')
return []
# Search dependency parsing result to find "nsubj" or "amod" tags
summary = list()
for governor, dep, dependent in parse.triples():
if dep == 'nsubj':
# Look if the nominal subject is noun and if it is modified by an adjective
if governor[1] == 'JJ' and dependent[1] in {'NN', 'NNS'}:
summary.append((governor[0].lower(), dependent[0].lower()))
elif dep == 'amod':
# Look if the adjective is linked to a noun
if dependent[1] == 'JJ' and governor[1] in {'NN', 'NNS'}:
summary.append((dependent[0].lower(), governor[0].lower()))
return summary
def stop(self):
"""
Stops the CoreNLP server of the summarizer object.
"""
self.server.stop()
logging.info('CoreNLP server stopped.') | summarizer/summarizer.py |
# Imports
import logging
from requests.exceptions import HTTPError
from nltk.parse.corenlp import CoreNLPServer
from nltk.parse.corenlp import CoreNLPServerError
from nltk.parse.corenlp import CoreNLPDependencyParser
# Define type aliases for review summarization
Remark = tuple[str, str] # opinion-feature pair, example: ('nice', 'food')
Summary = list[Remark] # list of remarks, i.e. opinion-feature pairs
class Summarizer:
"""
Summarizer class implementing opinion-feature extraction. Uses Stanford CoreNLP dependency parser.
Attributes:
server (CoreNLPServer): CoreNLP server for accessing Stanford CoreNLP services.
parser (CoreNLPDependencyParser): CoreNLP dependency parser.
"""
def __init__(self, jar_path, models_jar_path):
"""
The constructor for Summarizer class.
Parameters:
jar_path (str): Filepath to Stanford CoreNLP .jar file.
models_jar_path (str): Filepath to Stanford CoreNLP models .jar file.
"""
logging.info('Starting CoreNLP server...')
self.server = CoreNLPServer(path_to_jar=jar_path, path_to_models_jar=models_jar_path)
try:
self.server.start()
logging.info('CoreNLP server started.')
# CoreNLPServerError is thrown when a server is already running
except CoreNLPServerError:
logging.warning('CoreNLP server is already running.')
self.parser = CoreNLPDependencyParser()
def summarize(self, text):
"""
Summarizes a review. Extracts opinion-feature pairs from it.
Parameters:
text (str): Review text.
Returns:
Summary: List of opinion-feature pairs extracted from the review text.
"""
try:
parse = next(self.parser.raw_parse(text))
# An HTTPError raised by the CoreNLP server is related to unrecognized characters in the review text
except HTTPError:
logging.warning(f'Review skipped: {text}')
return []
# Search dependency parsing result to find "nsubj" or "amod" tags
summary = list()
for governor, dep, dependent in parse.triples():
if dep == 'nsubj':
# Look if the nominal subject is noun and if it is modified by an adjective
if governor[1] == 'JJ' and dependent[1] in {'NN', 'NNS'}:
summary.append((governor[0].lower(), dependent[0].lower()))
elif dep == 'amod':
# Look if the adjective is linked to a noun
if dependent[1] == 'JJ' and governor[1] in {'NN', 'NNS'}:
summary.append((dependent[0].lower(), governor[0].lower()))
return summary
def stop(self):
"""
Stops the CoreNLP server of the summarizer object.
"""
self.server.stop()
logging.info('CoreNLP server stopped.') | 0.821796 | 0.164785 |
from lib.web_data import WebData
def bitcore_claimer_line(n):
src_addr = n['src_addr']
txid = "<%s-airdrop-txid>" % src_addr
priv_key = "%s-private-key" % n['src_addr']
dst_addr = "bitcore-destination-address"
txindex = "<%s-airdrop-txindex>" % src_addr
satoshis = "<%s-airdrop-satoshis>" % src_addr
force = "--force " if n.bfc_force else ""
return ("python2.7 bitcoin_fork_claimer/claimer.py BTX %s %s %s %s"
" %s--txindex %s --satoshis %s" %
(txid, priv_key, src_addr, dst_addr, force, txindex, satoshis))
BITCORE_INSTRUCTIONS = """
BitCore has a separate blockchain that aidropped value on BTC addresses as new
transactions. To use the bitcoin_fork_claimer tool privately, the details of
the transactions must be manually found and provided here.
One must use a BitCore node or block explorer to find:
1) The transaction hash (a.k.a transaction ID) which credits the address
2) The transaction index of the specific output
3) The amount of BitCore satoshis credited
This has been automated to access the BitCore block explorer via the
direct-query-claim-prep.py script included in forkdrop_suite. This will gather
the balances and provide a more specific report tailored to claiming Bitcoin
Private.
WARNING: These quereis are less private than blockchain.info queries and may be
less reliable.
"""
UNSPENT_URL = "https://chainz.cryptoid.info/btx/api.dws?q=unspent&active=%s&key=<KEY>"
class BitcoreQuery(object):
def __init__(self, vdb, settings):
self.vdb = vdb
self.coin = self.vdb.get_coin_info('bitcore')
self.addrs = [{'addr': a,
'p2sh_p2wpkh': a[:1] == "3",
'bech32': a[:3] == "bc1"}
for a in settings.addresses]
self.tails = not settings.not_tails
self.cache = settings.cache_requests
self.wd = WebData(tails=self.tails, cache=self.cache)
self._add_nuggets()
def _add_nuggets(self):
for a in self.addrs:
addr = a['addr']
url = UNSPENT_URL % addr
unspent_info = self.wd.fetch_web_url_json_info(url)
for u in unspent_info['unspent_outputs']:
self.vdb['nuggets'].append_direct_query(a, self.coin,
u['tx_hash'],
u['tx_ouput_n'],
int(u['value']))
BITCORE = {
'id': 'bitcore',
'instructions': BITCORE_INSTRUCTIONS,
'claimer_line': bitcore_claimer_line,
'direct_query': BitcoreQuery,
} | lib/coin/bitcore.py |
from lib.web_data import WebData
def bitcore_claimer_line(n):
src_addr = n['src_addr']
txid = "<%s-airdrop-txid>" % src_addr
priv_key = "%s-private-key" % n['src_addr']
dst_addr = "bitcore-destination-address"
txindex = "<%s-airdrop-txindex>" % src_addr
satoshis = "<%s-airdrop-satoshis>" % src_addr
force = "--force " if n.bfc_force else ""
return ("python2.7 bitcoin_fork_claimer/claimer.py BTX %s %s %s %s"
" %s--txindex %s --satoshis %s" %
(txid, priv_key, src_addr, dst_addr, force, txindex, satoshis))
BITCORE_INSTRUCTIONS = """
BitCore has a separate blockchain that aidropped value on BTC addresses as new
transactions. To use the bitcoin_fork_claimer tool privately, the details of
the transactions must be manually found and provided here.
One must use a BitCore node or block explorer to find:
1) The transaction hash (a.k.a transaction ID) which credits the address
2) The transaction index of the specific output
3) The amount of BitCore satoshis credited
This has been automated to access the BitCore block explorer via the
direct-query-claim-prep.py script included in forkdrop_suite. This will gather
the balances and provide a more specific report tailored to claiming Bitcoin
Private.
WARNING: These quereis are less private than blockchain.info queries and may be
less reliable.
"""
UNSPENT_URL = "https://chainz.cryptoid.info/btx/api.dws?q=unspent&active=%s&key=<KEY>"
class BitcoreQuery(object):
def __init__(self, vdb, settings):
self.vdb = vdb
self.coin = self.vdb.get_coin_info('bitcore')
self.addrs = [{'addr': a,
'p2sh_p2wpkh': a[:1] == "3",
'bech32': a[:3] == "bc1"}
for a in settings.addresses]
self.tails = not settings.not_tails
self.cache = settings.cache_requests
self.wd = WebData(tails=self.tails, cache=self.cache)
self._add_nuggets()
def _add_nuggets(self):
for a in self.addrs:
addr = a['addr']
url = UNSPENT_URL % addr
unspent_info = self.wd.fetch_web_url_json_info(url)
for u in unspent_info['unspent_outputs']:
self.vdb['nuggets'].append_direct_query(a, self.coin,
u['tx_hash'],
u['tx_ouput_n'],
int(u['value']))
BITCORE = {
'id': 'bitcore',
'instructions': BITCORE_INSTRUCTIONS,
'claimer_line': bitcore_claimer_line,
'direct_query': BitcoreQuery,
} | 0.503662 | 0.15034 |
import os
import datetime
from pathlib import Path
import shutil
from pymediainfo import MediaInfo
from .core import *
class MediaObject(object):
def __init__(self, factory, json_data = None):
self._factory = factory
if json_data is None:
self.load_from_pvr()
else:
self.load_from_json(json_data)
def __eq__(self, other):
is_match = False
if type(other) is type(self):
is_match = self.media_id == other.media_id
elif isinstance(other, str):
## This is likely IMDB
if not is_match and self.imdb is not None:
is_match = self.imdb == other
elif isinstance(other, int):
# This is for TVDB or TheMovieDB
if not is_match and hasattr(self, "tv_db_id"):
is_match = self.tv_db_id == other
if not is_match and hasattr(self, "movie_db_id"):
is_match = self.movie_db_id == other
return is_match
def __getattr__(self, name):
media_stat_properties = ["files_720p", "files_1080p", "files_2160p", "webrip_files", "webdl_files", "sdr_4k_files", "remux_files"]
if any(attr in name for attr in media_stat_properties):
# print("calling to load the media stats for media object '" + self.title + "', trigger attribute: " + name)
self.load_media_stats()
return getattr(self, name)
# print ("AttributeError on: " + name)
raise AttributeError
@property
def imdb(self):
if hasattr(self, "_imdb"):
return self._imdb
return None
@property
def media_id(self):
if hasattr(self, "series_id"):
return self.series_id
elif hasattr(self, "movie_id"):
return self.movie_id
def rescan(self):
self._factory.rescan(self.media_id)
@property
def public_db_id(self):
if hasattr(self, "series_id"):
return self.tv_db_id
elif hasattr(self, "movie_id"):
return self.movie_db_id
@property
def pvr_is_available(self):
return self._pvr_is_available
@property
def path(self):
if self._path[-1] != "/":
return self._path + "/"
return self._path
@property
def title(self):
return self._title
@property
def ideal_folder_name(self):
return self.title.replace(":", "") + " (" + str(self.year) + ")"
@property
def json(self):
if self._json["path"] != self.path:
self._json["path"] = self.path
return self._json
@property
def media_files(self):
if hasattr(self, "movie_id"):
ignore_files = ["-behindthescenes", "-deleted", "-featurette", "-interview", "-scene", "-short", "-trailer", "-other"]
else:
ignore_files = []
return self._media_files_helper(ignore_files)
@property
def all_media_files(self):
return self._media_files_helper()
def best_rip_type(self):
if len(self.remux_files) > 0:
return RIPType.REMUX
elif len(self.webdl_files) > 0:
return RIPType.WEBDL
elif len(self.webrip_files) > 0:
return RIPType.WEBRIP
return None
@property
def resolution(self):
if len(self.files_2160p) > 0:
return RESOLUTION.UHD
elif len(self.files_1080p) > 0:
return RESOLUTION.FULLHD
elif len(self.files_720p) > 0:
return RESOLUTION.HD720
return None
def load_media_stats(self):
file_count = 0
media_files_stats = []
if self.media_files is None:
return
# print ("loading media stats for media '" + self.title + "'")
for media_file in self.media_files:
full_path_media_file = self.path + media_file
media_info = MediaInfo.parse(full_path_media_file)
file_size = os.path.getsize(full_path_media_file)
file_count = file_count + 1
track_count = 0
track_highest_resolution = 0
# print(" File " + str(file_count) + ": " + str(media_file))
# print(" file size: " + str(convert_size(file_size)))
for track in media_info.tracks:
track_count = track_count + 1
if track.track_type == 'Video':
video_track_resolution = int(track.height)
video_track_bit_rate = track.bit_rate
if video_track_resolution > track_highest_resolution:
track_highest_resolution = video_track_resolution
movie_rip_type = None
if media_file.lower().find("webrip") > 0:
movie_rip_type = RIPType.WEBRIP
elif media_file.lower().find("web-dl") > 0:
movie_rip_type = RIPType.WEBDL
elif media_file.lower().find("remux") or media_file.lower().find("bluray"):
movie_rip_type = RIPType.REMUX
# print ("file resolution: " + str(track_highest_resolution) + ", RIP Type: " + str(movie_rip_type))
media_files_stats.append([full_path_media_file, media_file, track_highest_resolution, movie_rip_type])
self.files_720p = [f for f in media_files_stats if f[2] > 480 and f[2] <= 720]
self.files_1080p = [f for f in media_files_stats if f[2] > 720 and f[2] <= 1080]
self.files_2160p = [f for f in media_files_stats if f[2] > 1080]
self.webrip_files = [f for f in media_files_stats if f[3] == RIPType.WEBRIP]
self.webdl_files = [f for f in media_files_stats if f[3] == RIPType.WEBDL]
self.remux_files = [f for f in media_files_stats if f[3] == RIPType.REMUX]
# self.sdr_4k_files = [f for f in media_files_stats if f[2] >= 2160 and str(f[1]).lower().find("sdr") > 0]
self.sdr_4k_files = []
for f in media_files_stats:
if f[1].lower().find(".sdr.") > 0:
self.sdr_4k_files.append(f)
def load_from_json(self, json_data):
self._json = json_data
self._title = json_data["title"]
self._path = json_data["path"]
if "isAvailable" in json_data:
self._pvr_is_available = json_data["isAvailable"]
self.pvr_size_on_disk = json_data["sizeOnDisk"]
self.year = json_data["year"]
self.pvr_media_files = []
if "movieFile" in json_data:
# for file in json_data["movieFile"]:
self.pvr_media_files.append(self.path + json_data["movieFile"]["relativePath"])
if "physical_release" in json_data:
self.physical_release = datetime.datetime.strptime(json_data["physicalRelease"], '%Y-%m-%dT%H:%M:%SZ')
else:
self.physical_release = "unknown"
if "imdbId" in json_data:
self._imdb = json_data["imdbId"]
def move_media_files(self, new_path):
print ()
print ("Attempting to move media files from " + self.path + " to " + new_path + ".")
if not os.path.isdir(new_path):
print (" could not move media files because the path does not exist.")
return
if len(self.all_media_files) == 0:
print (" no relavent media files to move.")
return
if hasattr(self, "season_folders"):
## Need to move the season folders first....
for season_folder in self.season_folders:
new_season_path = new_path + season_folder
if not os.path.isdir(new_season_path):
print (" creating Season folder: " + new_season_path)
os.mkdir(new_season_path)
else:
print (" skipping creating season folder because it already exists: " + new_season_path)
## Then need to move each of the files...
for media_file in self.all_media_files:
to_move_file_name = os.path.basename(media_file)
to_move_season_folder = ""
if hasattr(self, "season_folders"):
for season_folder in self.season_folders:
if media_file.find(season_folder) >= 0:
to_move_season_folder = season_folder
break
# We could not find the season folder for the media file.... strange. Log it.
if len(to_move_season_folder) == 0:
print(" Could not identify the Season for file: " + media_file)
continue
new_file_path = new_path + to_move_season_folder + to_move_file_name
print (" moving file " + media_file + " to " + new_file_path + ".")
os.replace(self.path + media_file, new_file_path)
if len(self.all_media_files) == 0:
print (" sucessfully moved all media files from " + self.path + " to " + new_path + ". Now deleting the empty directory")
### Need to do this recursively.
shutil.rmtree(self.path)
def update_path(self, new_path):
self._path = new_path
assert (new_path.find("Season") == -1)
self._factory.update_media(self.media_id, self.json) | media_manager/mediaobject.py | import os
import datetime
from pathlib import Path
import shutil
from pymediainfo import MediaInfo
from .core import *
class MediaObject(object):
def __init__(self, factory, json_data = None):
self._factory = factory
if json_data is None:
self.load_from_pvr()
else:
self.load_from_json(json_data)
def __eq__(self, other):
is_match = False
if type(other) is type(self):
is_match = self.media_id == other.media_id
elif isinstance(other, str):
## This is likely IMDB
if not is_match and self.imdb is not None:
is_match = self.imdb == other
elif isinstance(other, int):
# This is for TVDB or TheMovieDB
if not is_match and hasattr(self, "tv_db_id"):
is_match = self.tv_db_id == other
if not is_match and hasattr(self, "movie_db_id"):
is_match = self.movie_db_id == other
return is_match
def __getattr__(self, name):
media_stat_properties = ["files_720p", "files_1080p", "files_2160p", "webrip_files", "webdl_files", "sdr_4k_files", "remux_files"]
if any(attr in name for attr in media_stat_properties):
# print("calling to load the media stats for media object '" + self.title + "', trigger attribute: " + name)
self.load_media_stats()
return getattr(self, name)
# print ("AttributeError on: " + name)
raise AttributeError
@property
def imdb(self):
if hasattr(self, "_imdb"):
return self._imdb
return None
@property
def media_id(self):
if hasattr(self, "series_id"):
return self.series_id
elif hasattr(self, "movie_id"):
return self.movie_id
def rescan(self):
self._factory.rescan(self.media_id)
@property
def public_db_id(self):
if hasattr(self, "series_id"):
return self.tv_db_id
elif hasattr(self, "movie_id"):
return self.movie_db_id
@property
def pvr_is_available(self):
return self._pvr_is_available
@property
def path(self):
if self._path[-1] != "/":
return self._path + "/"
return self._path
@property
def title(self):
return self._title
@property
def ideal_folder_name(self):
return self.title.replace(":", "") + " (" + str(self.year) + ")"
@property
def json(self):
if self._json["path"] != self.path:
self._json["path"] = self.path
return self._json
@property
def media_files(self):
if hasattr(self, "movie_id"):
ignore_files = ["-behindthescenes", "-deleted", "-featurette", "-interview", "-scene", "-short", "-trailer", "-other"]
else:
ignore_files = []
return self._media_files_helper(ignore_files)
@property
def all_media_files(self):
return self._media_files_helper()
def best_rip_type(self):
if len(self.remux_files) > 0:
return RIPType.REMUX
elif len(self.webdl_files) > 0:
return RIPType.WEBDL
elif len(self.webrip_files) > 0:
return RIPType.WEBRIP
return None
@property
def resolution(self):
if len(self.files_2160p) > 0:
return RESOLUTION.UHD
elif len(self.files_1080p) > 0:
return RESOLUTION.FULLHD
elif len(self.files_720p) > 0:
return RESOLUTION.HD720
return None
def load_media_stats(self):
file_count = 0
media_files_stats = []
if self.media_files is None:
return
# print ("loading media stats for media '" + self.title + "'")
for media_file in self.media_files:
full_path_media_file = self.path + media_file
media_info = MediaInfo.parse(full_path_media_file)
file_size = os.path.getsize(full_path_media_file)
file_count = file_count + 1
track_count = 0
track_highest_resolution = 0
# print(" File " + str(file_count) + ": " + str(media_file))
# print(" file size: " + str(convert_size(file_size)))
for track in media_info.tracks:
track_count = track_count + 1
if track.track_type == 'Video':
video_track_resolution = int(track.height)
video_track_bit_rate = track.bit_rate
if video_track_resolution > track_highest_resolution:
track_highest_resolution = video_track_resolution
movie_rip_type = None
if media_file.lower().find("webrip") > 0:
movie_rip_type = RIPType.WEBRIP
elif media_file.lower().find("web-dl") > 0:
movie_rip_type = RIPType.WEBDL
elif media_file.lower().find("remux") or media_file.lower().find("bluray"):
movie_rip_type = RIPType.REMUX
# print ("file resolution: " + str(track_highest_resolution) + ", RIP Type: " + str(movie_rip_type))
media_files_stats.append([full_path_media_file, media_file, track_highest_resolution, movie_rip_type])
self.files_720p = [f for f in media_files_stats if f[2] > 480 and f[2] <= 720]
self.files_1080p = [f for f in media_files_stats if f[2] > 720 and f[2] <= 1080]
self.files_2160p = [f for f in media_files_stats if f[2] > 1080]
self.webrip_files = [f for f in media_files_stats if f[3] == RIPType.WEBRIP]
self.webdl_files = [f for f in media_files_stats if f[3] == RIPType.WEBDL]
self.remux_files = [f for f in media_files_stats if f[3] == RIPType.REMUX]
# self.sdr_4k_files = [f for f in media_files_stats if f[2] >= 2160 and str(f[1]).lower().find("sdr") > 0]
self.sdr_4k_files = []
for f in media_files_stats:
if f[1].lower().find(".sdr.") > 0:
self.sdr_4k_files.append(f)
def load_from_json(self, json_data):
self._json = json_data
self._title = json_data["title"]
self._path = json_data["path"]
if "isAvailable" in json_data:
self._pvr_is_available = json_data["isAvailable"]
self.pvr_size_on_disk = json_data["sizeOnDisk"]
self.year = json_data["year"]
self.pvr_media_files = []
if "movieFile" in json_data:
# for file in json_data["movieFile"]:
self.pvr_media_files.append(self.path + json_data["movieFile"]["relativePath"])
if "physical_release" in json_data:
self.physical_release = datetime.datetime.strptime(json_data["physicalRelease"], '%Y-%m-%dT%H:%M:%SZ')
else:
self.physical_release = "unknown"
if "imdbId" in json_data:
self._imdb = json_data["imdbId"]
def move_media_files(self, new_path):
print ()
print ("Attempting to move media files from " + self.path + " to " + new_path + ".")
if not os.path.isdir(new_path):
print (" could not move media files because the path does not exist.")
return
if len(self.all_media_files) == 0:
print (" no relavent media files to move.")
return
if hasattr(self, "season_folders"):
## Need to move the season folders first....
for season_folder in self.season_folders:
new_season_path = new_path + season_folder
if not os.path.isdir(new_season_path):
print (" creating Season folder: " + new_season_path)
os.mkdir(new_season_path)
else:
print (" skipping creating season folder because it already exists: " + new_season_path)
## Then need to move each of the files...
for media_file in self.all_media_files:
to_move_file_name = os.path.basename(media_file)
to_move_season_folder = ""
if hasattr(self, "season_folders"):
for season_folder in self.season_folders:
if media_file.find(season_folder) >= 0:
to_move_season_folder = season_folder
break
# We could not find the season folder for the media file.... strange. Log it.
if len(to_move_season_folder) == 0:
print(" Could not identify the Season for file: " + media_file)
continue
new_file_path = new_path + to_move_season_folder + to_move_file_name
print (" moving file " + media_file + " to " + new_file_path + ".")
os.replace(self.path + media_file, new_file_path)
if len(self.all_media_files) == 0:
print (" sucessfully moved all media files from " + self.path + " to " + new_path + ". Now deleting the empty directory")
### Need to do this recursively.
shutil.rmtree(self.path)
def update_path(self, new_path):
self._path = new_path
assert (new_path.find("Season") == -1)
self._factory.update_media(self.media_id, self.json) | 0.235548 | 0.095602 |
from collections import Counter, defaultdict
import csv
import requests
CSV_URL = 'https://raw.githubusercontent.com/pybites/SouthParkData/master/by-season/Season-{}.csv' # noqa E501
def get_season_csv_file(season):
"""Receives a season int, and downloads loads in its
corresponding CSV_URL"""
with requests.Session() as s:
download = s.get(CSV_URL.format(season))
return download.content.decode('utf-8')
def get_num_words_spoken_by_character_per_episode(content):
"""Receives loaded csv content (str) and returns a dict of
keys=characters and values=Counter object,
which is a mapping of episode=>words spoken"""
data = csv.DictReader(content.splitlines(), delimiter=',')
# count_character_words = defaultdict(lambda: Counter())
count_character_words = defaultdict(Counter)
for datum in data:
character = datum['Character']
episode = datum['Episode']
word_length = len(datum['Line'].split())
count_character_words[character][episode] += word_length
return count_character_words
# tests
import pytest
from southpark import (get_season_csv_file,
get_num_words_spoken_by_character_per_episode)
@pytest.fixture(scope="module")
def words_spoken_s1():
# module scope to not call requests for every test function
content = get_season_csv_file(season=1)
return get_num_words_spoken_by_character_per_episode(content)
@pytest.fixture(scope="module")
def words_spoken_s5():
content = get_season_csv_file(season=5)
return get_num_words_spoken_by_character_per_episode(content)
def test_get_words_spoken_season1_stan(words_spoken_s1):
expected = [('4', 615), ('6', 572), ('5', 514)]
assert words_spoken_s1['Stan'].most_common()[:3] == expected
def test_get_words_spoken_season1_cartman(words_spoken_s1):
expected = [('1', 735), ('10', 669), ('13', 621)]
alt_expected = [('1', 738), ('10', 669), ('13', 621)]
assert words_spoken_s1['Cartman'].most_common()[:3] in (expected,
alt_expected)
def test_get_words_spoken_season1_cartman_least_talkative(words_spoken_s1):
expected = [('11', 285), ('6', 264), ('4', 244)]
assert words_spoken_s1['Cartman'].most_common()[-3:] == expected
def get_words_spoken_non_existing_character(words_spoken_s1):
assert words_spoken_s1['bogus'].most_common() == []
# let's look at another season and other characters
def test_get_words_spoken_season5_sheila(words_spoken_s5):
expected = [('11', 295), ('6', 212), ('7', 52)]
assert words_spoken_s5['Sheila'].most_common()[:3] == expected
def test_get_words_spoken_season5_choksondik(words_spoken_s5):
expected = [('7', 749), ('10', 131), ('1', 129)]
assert words_spoken_s5['Ms. Choksondik'].most_common()[:3] == expected | bites/bite090.py | from collections import Counter, defaultdict
import csv
import requests
CSV_URL = 'https://raw.githubusercontent.com/pybites/SouthParkData/master/by-season/Season-{}.csv' # noqa E501
def get_season_csv_file(season):
"""Receives a season int, and downloads loads in its
corresponding CSV_URL"""
with requests.Session() as s:
download = s.get(CSV_URL.format(season))
return download.content.decode('utf-8')
def get_num_words_spoken_by_character_per_episode(content):
"""Receives loaded csv content (str) and returns a dict of
keys=characters and values=Counter object,
which is a mapping of episode=>words spoken"""
data = csv.DictReader(content.splitlines(), delimiter=',')
# count_character_words = defaultdict(lambda: Counter())
count_character_words = defaultdict(Counter)
for datum in data:
character = datum['Character']
episode = datum['Episode']
word_length = len(datum['Line'].split())
count_character_words[character][episode] += word_length
return count_character_words
# tests
import pytest
from southpark import (get_season_csv_file,
get_num_words_spoken_by_character_per_episode)
@pytest.fixture(scope="module")
def words_spoken_s1():
# module scope to not call requests for every test function
content = get_season_csv_file(season=1)
return get_num_words_spoken_by_character_per_episode(content)
@pytest.fixture(scope="module")
def words_spoken_s5():
content = get_season_csv_file(season=5)
return get_num_words_spoken_by_character_per_episode(content)
def test_get_words_spoken_season1_stan(words_spoken_s1):
expected = [('4', 615), ('6', 572), ('5', 514)]
assert words_spoken_s1['Stan'].most_common()[:3] == expected
def test_get_words_spoken_season1_cartman(words_spoken_s1):
expected = [('1', 735), ('10', 669), ('13', 621)]
alt_expected = [('1', 738), ('10', 669), ('13', 621)]
assert words_spoken_s1['Cartman'].most_common()[:3] in (expected,
alt_expected)
def test_get_words_spoken_season1_cartman_least_talkative(words_spoken_s1):
expected = [('11', 285), ('6', 264), ('4', 244)]
assert words_spoken_s1['Cartman'].most_common()[-3:] == expected
def get_words_spoken_non_existing_character(words_spoken_s1):
assert words_spoken_s1['bogus'].most_common() == []
# let's look at another season and other characters
def test_get_words_spoken_season5_sheila(words_spoken_s5):
expected = [('11', 295), ('6', 212), ('7', 52)]
assert words_spoken_s5['Sheila'].most_common()[:3] == expected
def test_get_words_spoken_season5_choksondik(words_spoken_s5):
expected = [('7', 749), ('10', 131), ('1', 129)]
assert words_spoken_s5['Ms. Choksondik'].most_common()[:3] == expected | 0.743075 | 0.355901 |