code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
# Version 1.0 by <NAME>
# License The MIT License (MIT)
# Copyright 2019 <NAME>
# Work in progress
import digitalocean
import os
# USER CONFIG
# Digital Ocean API Token
my_token = open('api_key.txt', 'r').readline().rstrip('\n')
# Region
region = "nyc3"
# Image
dist = "centos-7-x64"
# Droplet Size
size = "s-1vcpu-1gb"
# SCRIPT START
def clear_screen():
os.system("cls" if os.name == "nt" else "clear")
clear_screen()
manager = digitalocean.Manager(token=my_token)
print("Pick Server Name")
server_name = input("Server Name: ")
# Droplet Tag
my_tag = [server_name]
print("Pick ssh keys to use")
print(manager.get_all_sshkeys())
input_keys = input("SSH Key IDs: ").split(",")
my_keys = []
for ssh_key in input_keys:
the_key = manager.get_ssh_key(ssh_key)
my_keys.append(the_key)
# Droplet Creating
droplet = digitalocean.Droplet(token=my_token, name=server_name, region=region, image=dist, size=size, ssh_keys=my_keys, tags=my_tag)
droplet.create()
# Server Status Check
def server_status():
actions = droplet.get_actions()
for action in actions:
action.load()
# Once it shows complete, droplet is up and running
return action.status
# Get Server IP address
def get_droplet_ip():
cur_droplets = manager.get_all_droplets()
# Iterate through all existing droplets
for i in cur_droplets:
if i.name == server_name:
return i.ip_address
print("Creating Server {}".format(server_name))
# Wait for Server Created
while server_status() == "in-progress":
server_status()
print("Your Droplet IP is {}".format(get_droplet_ip()))
print("Server {} Created, Bye Bye!".format(server_name))
|
[
"digitalocean.Droplet",
"digitalocean.Manager",
"os.system"
] |
[((445, 481), 'digitalocean.Manager', 'digitalocean.Manager', ([], {'token': 'my_token'}), '(token=my_token)\n', (465, 481), False, 'import digitalocean\n'), ((839, 967), 'digitalocean.Droplet', 'digitalocean.Droplet', ([], {'token': 'my_token', 'name': 'server_name', 'region': 'region', 'image': 'dist', 'size': 'size', 'ssh_keys': 'my_keys', 'tags': 'my_tag'}), '(token=my_token, name=server_name, region=region, image\n =dist, size=size, ssh_keys=my_keys, tags=my_tag)\n', (859, 967), False, 'import digitalocean\n'), ((368, 416), 'os.system', 'os.system', (["('cls' if os.name == 'nt' else 'clear')"], {}), "('cls' if os.name == 'nt' else 'clear')\n", (377, 416), False, 'import os\n')]
|
import pandas as pd
from datacode.psm.typing import DfDict, StrList, StrOrNone, StrListOrNone
import pyexlatex.table as lt
from datacode.psm.predict import explain_probability_of_treatment
from datacode.psm.summarize.latex import matching_latex_table_from_df_dict
from datacode.psm.summarize.predictions import summarize_predictions
from datacode.psm.summarize.stats import matching_summary_stats
from datacode.psm.names import get_prob_treated_varname
from datacode.psm.typing import FloatOrNone
def create_and_output_matching_summary_latex_table(df: pd.DataFrame, matched_df: pd.DataFrame, predict_df: pd.DataFrame,
treated_var: str, xvars: StrList, entity_var: str, time_var: str,
fe: StrListOrNone = None,
prob_treated_var: StrOrNone = None, control_name: str = 'Control',
treated_name: str = 'Treated', below_text: StrOrNone = None,
outfolder: str = '.', caption: str = 'Propensity Score Matching',
min_matching_pct: FloatOrNone = None,
) -> lt.Table:
summary_df_dict = create_matching_summary_df_dict(
df,
matched_df,
predict_df,
treated_var,
xvars,
entity_var,
fe=fe,
prob_treated_var=prob_treated_var,
control_name=control_name,
treated_name=treated_name
)
table = matching_latex_table_from_df_dict(
summary_df_dict,
entity_var,
time_var,
below_text=below_text,
caption=caption,
min_matching_pct=min_matching_pct
)
table.to_pdf_and_move(
outfolder=outfolder,
outname=caption
)
return table
def create_matching_summary_df_dict(df: pd.DataFrame, matched_df: pd.DataFrame, predict_df: pd.DataFrame,
treated_var: str, xvars: StrList, entity_var: str, fe: StrListOrNone = None,
prob_treated_var: StrOrNone = None, control_name: str = 'Control',
treated_name: str = 'Treated') -> DfDict:
prob_treated_var = get_prob_treated_varname(treated_var, prob_treated_var_name=prob_treated_var)
_, model_summ = explain_probability_of_treatment(
df,
treated_var,
xvars,
fe=fe
)
predict_summ = summarize_predictions(
predict_df,
treated_var,
treated_name=treated_name,
control_name=control_name
)
describe_vars = [treated_var, prob_treated_var] + xvars
match_summ = matching_summary_stats(
df,
matched_df,
treated_var,
describe_vars,
entity_var,
control_name=control_name,
treated_name=treated_name
)
return {
'model': model_summ.tables[0],
'predict': predict_summ,
'match': match_summ
}
|
[
"datacode.psm.summarize.predictions.summarize_predictions",
"datacode.psm.summarize.stats.matching_summary_stats",
"datacode.psm.summarize.latex.matching_latex_table_from_df_dict",
"datacode.psm.predict.explain_probability_of_treatment",
"datacode.psm.names.get_prob_treated_varname"
] |
[((1624, 1775), 'datacode.psm.summarize.latex.matching_latex_table_from_df_dict', 'matching_latex_table_from_df_dict', (['summary_df_dict', 'entity_var', 'time_var'], {'below_text': 'below_text', 'caption': 'caption', 'min_matching_pct': 'min_matching_pct'}), '(summary_df_dict, entity_var, time_var,\n below_text=below_text, caption=caption, min_matching_pct=min_matching_pct)\n', (1657, 1775), False, 'from datacode.psm.summarize.latex import matching_latex_table_from_df_dict\n'), ((2356, 2433), 'datacode.psm.names.get_prob_treated_varname', 'get_prob_treated_varname', (['treated_var'], {'prob_treated_var_name': 'prob_treated_var'}), '(treated_var, prob_treated_var_name=prob_treated_var)\n', (2380, 2433), False, 'from datacode.psm.names import get_prob_treated_varname\n'), ((2454, 2517), 'datacode.psm.predict.explain_probability_of_treatment', 'explain_probability_of_treatment', (['df', 'treated_var', 'xvars'], {'fe': 'fe'}), '(df, treated_var, xvars, fe=fe)\n', (2486, 2517), False, 'from datacode.psm.predict import explain_probability_of_treatment\n'), ((2576, 2680), 'datacode.psm.summarize.predictions.summarize_predictions', 'summarize_predictions', (['predict_df', 'treated_var'], {'treated_name': 'treated_name', 'control_name': 'control_name'}), '(predict_df, treated_var, treated_name=treated_name,\n control_name=control_name)\n', (2597, 2680), False, 'from datacode.psm.summarize.predictions import summarize_predictions\n'), ((2793, 2929), 'datacode.psm.summarize.stats.matching_summary_stats', 'matching_summary_stats', (['df', 'matched_df', 'treated_var', 'describe_vars', 'entity_var'], {'control_name': 'control_name', 'treated_name': 'treated_name'}), '(df, matched_df, treated_var, describe_vars,\n entity_var, control_name=control_name, treated_name=treated_name)\n', (2815, 2929), False, 'from datacode.psm.summarize.stats import matching_summary_stats\n')]
|
# Generated by Django 3.1.7 on 2021-02-23 12:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bugtracker_app', '0004_auto_20210222_2058'),
]
operations = [
migrations.AlterField(
model_name='customusermodel',
name='role',
field=models.CharField(blank=True, choices=[('Admin', 'Admin'), ('Dev', 'Dev'), ('Boss Man', 'Boss Man')], max_length=60, null=True),
),
]
|
[
"django.db.models.CharField"
] |
[((350, 481), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'choices': "[('Admin', 'Admin'), ('Dev', 'Dev'), ('Boss Man', 'Boss Man')]", 'max_length': '(60)', 'null': '(True)'}), "(blank=True, choices=[('Admin', 'Admin'), ('Dev', 'Dev'), (\n 'Boss Man', 'Boss Man')], max_length=60, null=True)\n", (366, 481), False, 'from django.db import migrations, models\n')]
|
#!/usr/bin/env python
import glob
import cv2
import numpy as np
import torch
import architecture as arch
import argparse
import warnings
import time
import sys
try:
import tqdm
except ImportError:
pass
from pathlib import Path
from chunks import DataChunks
model_docs = {
"RRDB_ESRGAN_x4.pth": "Official perceptual upscaling model.",
"RRDB_PSNR_x4.pth": "Official PSNR upscaling model.",
"4x_interp_08.pth": "RRDB_PSNR_x4 interpolated with RRDB_ESRGAN_x4 with 0.8 strength.",
"4x_interp_09.pth": "RRDB_PSNR_x4 interpolated with RRDB_ESRGAN_x4 with 0.9 strength.",
"4x_Box.pth": "General purpose upscaling. Larger dataset than RRDB_ESRGAN_x4.",
"4x_NickelbackFS_72000_G.pth": "General purpose upscaling. Larger dataset than Box.",
"4x_Misc_220000.pth": "Surface upscaling. Works well as general/manga upscaler too.",
"4x_Faces_N_250000.pth": "Face upscaling.",
"4x_face_focus_275k.pth": "Face deblurring and upscaling.",
"4x_Fatality_01_265000_G.pth": "Upscales pixel art.",
"4x_rebout_325k.pth": "Upscales pixel art. Trained on KOF94 Rebout.",
"4x_rebout_interp.pth": "Upscales pixel art. Trained on KOF94 Rebout. Interped.",
"4x_falcoon300.pth": "Manga upscaling. Removes dithering.",
"4x_unholy03.pth": "Manga upscaling. Interpolation of many models.",
"4x_WaifuGAN_v3_30000.pth": "Manga upscaling.",
"4x_Manga109Attempt.pth": "Manga upscaling.",
"4x_ESRGAN_Skyrim_NonTiled_Alpha_NN_128_32_105000.pth": "Upscales greyscale maps. Trained on Skyrim textures.",
"4x_detoon_225k.pth": "Tries to make toon images realistic.",
"4x_detoon_alt.pth": "Tries to make toon images realistic. Softer version.",
"4x_Lady0101_208000.pth": "Upscaled pixel art to painting style. Original version.",
"4x_Lady0101_v3_340000.pth": "Upscaled pixel art to painting style. Moderate blendering, moderate dedithering.",
"4x_Lady0101_v3_blender.pth": "Upscaled pixel art to painting style. Heavy blending, low dedithering.",
"4x_scalenx_90k.pth": "Upscales pixel art in scalenx style.",
"4x_xbrz_90k.pth": "Upscales pixel art in xbr style. No dedithering.",
"4x_xbrdd_90k.pth": "Upscales pixel art in xbr style. Dedithering.",
"1x_JPEG_00-20.pth": "Cleans up JPEG compression. For images with 0-20%% compression ratio.",
"1x_JPEG_20-40.pth": "Cleans up JPEG compression. For images with 20-40%% compression ratio.",
"1x_JPEG_40-60.pth": "Cleans up JPEG compression. For images with 40-60%% compression ratio.",
"1x_JPEG_60-80.pth": "Cleans up JPEG compression. For images with 60-80%% compression ratio.",
"1x_JPEG_80-100.pth": "Cleans up JPEG compression. For images with 80-100%% compression ratio.",
"1x_DeJpeg_Fatality_PlusULTRA_200000_G.pth": "Cleans up JPEG compression. Any compression ratio. Increases contrast and sharpness.",
"1x_BC1_take2_260850.pth": "Cleans up BC1 compression. Restricted version (only works with low-noise images).",
"1x_BC1NoiseAgressiveTake3_400000_G.pth": "Cleans up BC1 compression. Free version (more aggressive than restricted).",
"1x_cinepak_200000.pth": "Cleans up Cinepak, msvideo1 and Roq compression.",
"1x_DeSharpen.pth": "Removes over-sharpening.",
"1x_normals_generator_general_215k.pth": "Attempts to generate a normal map from a texture.",
"1x_Alias_200000_G.pth": "Performs anti-aliasing on the image.",
"1x_SSAntiAlias9x.pth": "Performs anti-aliasing on the image. Newer.",
"1x_DEDITHER_32_512_126900_G.pth": "Tries to remove dithering patterns.",
"1x_BS_Debandizer_34000G.pth": "Tries to remove banding.",
}
aliases = {
"esrgan": "RRDB_ESRGAN_x4.pth",
"psnr": "RRDB_PSNR_x4.pth",
"0.8": "4x_interp_08.pth",
"0.9": "4x_interp_09.pth",
"desharpen": "1x_DeSharpen.pth",
"deblur": "1x_Fatality_DeBlur_275000_G.pth",
"jpeg20": "1x_JPEG_00-20.pth",
"jpeg40": "1x_JPEG_20-40.pth",
"jpeg60": "1x_JPEG_40-60.pth",
"jpeg80": "1x_JPEG_60-80.pth",
"jpeg100": "1x_JPEG_80-100.pth",
"jpegF": "1x_DeJpeg_Fatality_PlusULTRA_200000_G.pth",
"box": "4x_Box.pth",
"nickelback": "4x_NickelbackFS_72000_G.pth",
"misc": "4x_Misc_220000.pth",
"facefocus": "4x_face_focus_275k.pth",
"face": "4x_Faces_N_250000.pth",
"fatality": "4x_Fatality_01_265000_G.pth",
"unholy": "4x_unholy03.pth",
"waifugan": "4x_WaifuGAN_v3_30000.pth",
"manga109": "4x_Manga109Attempt.pth",
"falcoon": "4x_falcoon300.pth",
"rebout": "4x_rebout_325k.pth",
"rebouti": "4x_rebout_interp.pth",
"detoon": "4x_detoon_225k.pth",
"detoon_alt": "4x_detoon_alt.pth",
"bc1r": "1x_BC1_take2_260850.pth",
"bc1f": "1x_BC1NoiseAgressiveTake3_400000_G.pth",
"aa": "1x_Alias_200000_G.pth",
"aa2": "1x_SSAntiAlias9x.pth",
"dedither": "1x_DEDITHER_32_512_126900_G.pth",
"deband": "1x_BS_Debandizer_34000G.pth",
"alpha": "4x_ESRGAN_Skyrim_NonTiled_Alpha_NN_128_32_105000.pth",
"ladyold": "4x_Lady0101_208000.pth",
"ladyblend": "4x_Lady0101_v3_blender.pth",
"lady": "4x_Lady0101_v3_340000.pth",
"scalenx": "4x_scalenx_90k.pth",
"xbrz": "4x_xbrz_90k.pth",
"xbrzdd": "4x_xbrdd_90k.pth",
}
class SmartFormatter(argparse.HelpFormatter):
"""
Custom Help Formatter used to split help text when '\n' was
inserted in it.
"""
def _split_lines(self, text, width):
r = []
for t in text.splitlines():
r.extend(argparse.HelpFormatter._split_lines(self, t, width))
r.append("")
return r
def enum_models(model_dir, aliases=aliases):
models = {model.name: model for model in model_dir.rglob("*.pth")}
for alias, original in aliases.items():
models[alias] = model_dir / original
return models
def get_models_help(models, aliases=aliases, model_docs=model_docs):
lines = []
for model, docs in model_docs.items():
if model not in models.keys():
continue
names = [model]
for alias, original in aliases.items():
if original == model:
names.append(alias)
quoted_names = (f'"{name}"' for name in names)
lines.append(f"{' | '.join(quoted_names)}: {docs}")
return "\n".join(lines)
def parse_args(models, models_help):
parser = argparse.ArgumentParser(
description="Upscale images with ESRGAN", formatter_class=SmartFormatter
)
parser.add_argument("images", nargs="+", type=Path, help="The images to process.")
parser.add_argument(
"-o",
"--out-dir",
type=Path,
required=False,
help="The directory to write output to. Defaults to source directory.",
)
parser.add_argument(
"-s",
"--scale",
type=int,
default=1,
help="The number of times to perform scaling. Defaults to 1.",
)
parser.add_argument(
"-m",
"--model",
choices=models.keys(),
default="0.8",
help=f'The model to use for upscaling. Defaults to "0.8" (RRDB_PSNR_x4 - RRDB_ESRGAN_x4 x 0.8 interp).\n{models_help}',
)
parser.add_argument(
"--cpu",
action="store_true",
help="Use CPU for upscaling, instead of attempting to use CUDA.",
)
parser.add_argument(
"-e",
"--end",
default="_scaled",
help="The suffix to append to scaled images. Defaults to `_scaled`.",
)
parser.add_argument(
"-a",
"--append-model",
action="store_true",
help="Append the model name to the filename, before any custom suffix.",
)
parser.add_argument(
"-x",
"--max-dimension",
help="Split image into chunks of max-dimension.",
type=int,
required=False,
default=0,
)
parser.add_argument(
"-p",
"--padding",
help="Pad image when splitting into quadrants.",
type=int,
required=False,
default=0,
)
parser.add_argument(
"-t",
"--threads",
help="Number of CPU threads to use.",
type=int,
required=False,
)
args = parser.parse_args()
if args.max_dimension != 0 or args.padding != 0:
assert (
args.padding >= 0 and args.max_dimension >= 0
), "padding and max-dimension must be positive"
assert (
args.padding < args.max_dimension
), "padding must be smaller than max-dimension"
if args.threads is not None:
assert args.threads > 0, "threads must be larger than 0"
return args
def main():
start = time.perf_counter_ns()
model_dir = Path(__file__).resolve().parent / "models"
models = enum_models(model_dir)
models_help = get_models_help(models)
args = parse_args(models, models_help)
model_path = model_dir / models[args.model]
state_dict = torch.load(model_path)
if "conv_first.weight" in state_dict:
print("Error: Attempted to load a new-format model")
return 1
# Extract model information
scale2 = 0
max_part = 0
in_nc = 3
out_nc = 3
nf = 64
nb = 23
for part in list(state_dict):
parts = part.split(".")
n_parts = len(parts)
if n_parts == 5 and parts[2] == "sub":
nb = int(parts[3])
elif n_parts == 3:
part_num = int(parts[1])
if part_num > 6 and parts[2] == "weight":
scale2 += 1
if part_num > max_part:
max_part = part_num
out_nc = state_dict[part].shape[0]
upscale = 2 ** scale2
in_nc = state_dict["model.0.weight"].shape[1]
nf = state_dict["model.0.weight"].shape[0]
if args.threads is not None and args.threads > 0:
torch.set_num_threads(args.threads)
torch.set_num_interop_threads(args.threads)
if torch.cuda.is_available() and not args.cpu:
device = torch.device("cuda")
else:
device = torch.device("cpu")
model = arch.RRDB_Net(
in_nc,
out_nc,
nf,
nb,
gc=32,
upscale=upscale,
norm_type=None,
act_type="leakyrelu",
mode="CNA",
res_scale=1,
upsample_mode="upconv",
)
model.load_state_dict(state_dict, strict=True)
del state_dict
model.eval()
for k, v in model.named_parameters():
v.requires_grad = False
model = model.to(device)
for i, path in enumerate(
Path(img_path)
for img_glob in args.images
for img_path in glob.glob(str(img_glob))
):
print(i + 1, path.name)
# read image
img = cv2.imread(str(path), cv2.IMREAD_COLOR)
img = img * 1.0 / 255
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for _ in range(args.scale):
img = torch.from_numpy(
np.transpose(img[:, :, [2, 1, 0]], (2, 0, 1))
).float()
img = img.unsqueeze(0)
if args.max_dimension:
data_chunks = DataChunks(
img, args.max_dimension, args.padding, upscale
)
chunks = data_chunks.iter()
if "tqdm" in sys.modules.keys():
chunks_count = data_chunks.hlen * data_chunks.vlen
chunks = tqdm.tqdm(chunks, total=chunks_count, unit=" chunks")
for chunk in chunks:
input = chunk.to(device)
output = model(input)
data_chunks.gather(output)
output = data_chunks.concatenate()
else:
input = img.to(device)
output = model(input)
img = output.data.squeeze().float().cpu().clamp_(0, 1).numpy()
img = np.transpose(img[[2, 1, 0], :, :], (1, 2, 0))
img = (img * 255.0).round()
out_dir = args.out_dir if args.out_dir is not None else path.parent
suffix = f"_{args.model}{args.end}" if args.append_model else args.end
out_path = out_dir / (path.stem + suffix + ".png")
cv2.imwrite(str(out_path), img)
period = time.perf_counter_ns() - start
print("Done in {:,}s".format(period / 1_000_000_000.0))
return 0
if __name__ == "__main__":
sys.exit(main())
|
[
"tqdm.tqdm",
"argparse.ArgumentParser",
"warnings.simplefilter",
"chunks.DataChunks",
"sys.modules.keys",
"torch.load",
"architecture.RRDB_Net",
"numpy.transpose",
"torch.set_num_interop_threads",
"torch.set_num_threads",
"torch.cuda.is_available",
"pathlib.Path",
"warnings.catch_warnings",
"torch.device",
"time.perf_counter_ns",
"argparse.HelpFormatter._split_lines"
] |
[((6290, 6391), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Upscale images with ESRGAN"""', 'formatter_class': 'SmartFormatter'}), "(description='Upscale images with ESRGAN',\n formatter_class=SmartFormatter)\n", (6313, 6391), False, 'import argparse\n'), ((8599, 8621), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (8619, 8621), False, 'import time\n'), ((8868, 8890), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (8878, 8890), False, 'import torch\n'), ((9995, 10146), 'architecture.RRDB_Net', 'arch.RRDB_Net', (['in_nc', 'out_nc', 'nf', 'nb'], {'gc': '(32)', 'upscale': 'upscale', 'norm_type': 'None', 'act_type': '"""leakyrelu"""', 'mode': '"""CNA"""', 'res_scale': '(1)', 'upsample_mode': '"""upconv"""'}), "(in_nc, out_nc, nf, nb, gc=32, upscale=upscale, norm_type=None,\n act_type='leakyrelu', mode='CNA', res_scale=1, upsample_mode='upconv')\n", (10008, 10146), True, 'import architecture as arch\n'), ((9757, 9792), 'torch.set_num_threads', 'torch.set_num_threads', (['args.threads'], {}), '(args.threads)\n', (9778, 9792), False, 'import torch\n'), ((9801, 9844), 'torch.set_num_interop_threads', 'torch.set_num_interop_threads', (['args.threads'], {}), '(args.threads)\n', (9830, 9844), False, 'import torch\n'), ((9853, 9878), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (9876, 9878), False, 'import torch\n'), ((9914, 9934), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (9926, 9934), False, 'import torch\n'), ((9962, 9981), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (9974, 9981), False, 'import torch\n'), ((12253, 12275), 'time.perf_counter_ns', 'time.perf_counter_ns', ([], {}), '()\n', (12273, 12275), False, 'import time\n'), ((10468, 10482), 'pathlib.Path', 'Path', (['img_path'], {}), '(img_path)\n', (10472, 10482), False, 'from pathlib import Path\n'), ((10726, 10751), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (10749, 10751), False, 'import warnings\n'), ((10765, 10796), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (10786, 10796), False, 'import warnings\n'), ((5444, 5495), 'argparse.HelpFormatter._split_lines', 'argparse.HelpFormatter._split_lines', (['self', 't', 'width'], {}), '(self, t, width)\n', (5479, 5495), False, 'import argparse\n'), ((11901, 11946), 'numpy.transpose', 'np.transpose', (['img[[2, 1, 0], :, :]', '(1, 2, 0)'], {}), '(img[[2, 1, 0], :, :], (1, 2, 0))\n', (11913, 11946), True, 'import numpy as np\n'), ((8638, 8652), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (8642, 8652), False, 'from pathlib import Path\n'), ((11082, 11140), 'chunks.DataChunks', 'DataChunks', (['img', 'args.max_dimension', 'args.padding', 'upscale'], {}), '(img, args.max_dimension, args.padding, upscale)\n', (11092, 11140), False, 'from chunks import DataChunks\n'), ((11268, 11286), 'sys.modules.keys', 'sys.modules.keys', ([], {}), '()\n', (11284, 11286), False, 'import sys\n'), ((11396, 11449), 'tqdm.tqdm', 'tqdm.tqdm', (['chunks'], {'total': 'chunks_count', 'unit': '""" chunks"""'}), "(chunks, total=chunks_count, unit=' chunks')\n", (11405, 11449), False, 'import tqdm\n'), ((10897, 10942), 'numpy.transpose', 'np.transpose', (['img[:, :, [2, 1, 0]]', '(2, 0, 1)'], {}), '(img[:, :, [2, 1, 0]], (2, 0, 1))\n', (10909, 10942), True, 'import numpy as np\n')]
|
from typing import Iterable, Sequence
from django.db import IntegrityError
from django.db.models import QuerySet
from treeckle.common.constants import (
ID,
CREATED_AT,
UPDATED_AT,
USER,
EVENT_ID,
STATUS,
)
from treeckle.common.parsers import parse_datetime_to_ms_timestamp
from organizations.models import Organization
from users.models import User
from users.logic import user_to_json, get_users
from events.models import Event, EventSignUp, SignUpStatus, SignUpAction
def event_sign_up_to_json(event_sign_up: EventSignUp) -> dict:
return {
ID: event_sign_up.id,
CREATED_AT: parse_datetime_to_ms_timestamp(event_sign_up.created_at),
UPDATED_AT: parse_datetime_to_ms_timestamp(event_sign_up.updated_at),
USER: user_to_json(event_sign_up.user),
EVENT_ID: event_sign_up.event_id,
STATUS: event_sign_up.status,
}
def get_event_sign_ups(*args, **kwargs) -> QuerySet[EventSignUp]:
return EventSignUp.objects.filter(*args, **kwargs)
def get_or_create_event_sign_up(
event: Event, user: User, status: SignUpStatus
) -> EventSignUp:
event_sign_up, _ = EventSignUp.objects.get_or_create(
event=event, user=user, status=status
)
return event_sign_up
def create_event_sign_up(event: Event, user: User) -> EventSignUp:
if not event.is_sign_up_allowed:
raise Exception("Event cannot be signed up.")
status = (
SignUpStatus.PENDING
if event.is_sign_up_approval_required
else SignUpStatus.CONFIRMED
)
try:
event_sign_up = get_or_create_event_sign_up(
event=event, user=user, status=status
)
except IntegrityError as e:
event_sign_up = (
get_event_sign_ups(event=event, user=user)
.select_related("user__organization", "user__profile_image", "event")
.get()
)
return event_sign_up
def attend_event_sign_up(event: Event, user: User) -> EventSignUp:
try:
event_sign_up = (
get_event_sign_ups(event=event, user=user)
.select_related("user__organization", "user__profile_image", "event")
.get()
)
except EventSignUp.DoesNotExist as e:
raise Exception("Event is not signed up.")
if event_sign_up.status == SignUpStatus.PENDING:
raise Exception("Cannot attend an event where sign up is not confirmed.")
if event_sign_up.status == SignUpStatus.ATTENDED:
raise Exception("You have already attended the event.")
event_sign_up.status = SignUpStatus.ATTENDED
event_sign_up.save()
return event_sign_up
def confirm_event_sign_up(event: Event, user: User) -> EventSignUp:
try:
event_sign_up = (
get_event_sign_ups(event=event, user=user)
.select_related("user__organization", "user__profile_image", "event")
.get()
)
except EventSignUp.DoesNotExist as e:
raise Exception("Event is not signed up")
if event_sign_up.status != SignUpStatus.PENDING:
raise Exception("No approval required.")
event_sign_up.status = SignUpStatus.CONFIRMED
event_sign_up.save()
return event_sign_up
def delete_event_sign_up(event: Event, user: User) -> None:
get_event_sign_ups(event=event, user=user).delete()
def update_event_sign_ups(
actions: Iterable[dict], event: Event, organization: Organization
) -> Sequence[EventSignUp]:
same_organization_users = get_users(organization=organization)
updated_event_sign_ups = []
associated_updated_event_sign_ups_user_ids = set()
for data in actions:
action = data.get("action")
user_id = data.get("user_id")
## prevents multiple actions on same user
if user_id in associated_updated_event_sign_ups_user_ids:
continue
user = same_organization_users.get(id=user_id)
if action == SignUpAction.ATTEND:
updated_event_sign_up = attend_event_sign_up(event=event, user=user)
elif action == SignUpAction.CONFIRM:
updated_event_sign_up = confirm_event_sign_up(event=event, user=user)
elif action == SignUpAction.REJECT:
delete_event_sign_up(event=event, user=user)
continue
else:
continue
updated_event_sign_ups.append(updated_event_sign_up)
associated_updated_event_sign_ups_user_ids.add(user_id)
return updated_event_sign_ups
|
[
"users.logic.user_to_json",
"events.models.EventSignUp.objects.filter",
"events.models.EventSignUp.objects.get_or_create",
"treeckle.common.parsers.parse_datetime_to_ms_timestamp",
"users.logic.get_users"
] |
[((974, 1017), 'events.models.EventSignUp.objects.filter', 'EventSignUp.objects.filter', (['*args'], {}), '(*args, **kwargs)\n', (1000, 1017), False, 'from events.models import Event, EventSignUp, SignUpStatus, SignUpAction\n'), ((1145, 1217), 'events.models.EventSignUp.objects.get_or_create', 'EventSignUp.objects.get_or_create', ([], {'event': 'event', 'user': 'user', 'status': 'status'}), '(event=event, user=user, status=status)\n', (1178, 1217), False, 'from events.models import Event, EventSignUp, SignUpStatus, SignUpAction\n'), ((3482, 3518), 'users.logic.get_users', 'get_users', ([], {'organization': 'organization'}), '(organization=organization)\n', (3491, 3518), False, 'from users.logic import user_to_json, get_users\n'), ((625, 681), 'treeckle.common.parsers.parse_datetime_to_ms_timestamp', 'parse_datetime_to_ms_timestamp', (['event_sign_up.created_at'], {}), '(event_sign_up.created_at)\n', (655, 681), False, 'from treeckle.common.parsers import parse_datetime_to_ms_timestamp\n'), ((703, 759), 'treeckle.common.parsers.parse_datetime_to_ms_timestamp', 'parse_datetime_to_ms_timestamp', (['event_sign_up.updated_at'], {}), '(event_sign_up.updated_at)\n', (733, 759), False, 'from treeckle.common.parsers import parse_datetime_to_ms_timestamp\n'), ((775, 807), 'users.logic.user_to_json', 'user_to_json', (['event_sign_up.user'], {}), '(event_sign_up.user)\n', (787, 807), False, 'from users.logic import user_to_json, get_users\n')]
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""API for generating OAuth2 access tokens from service account
keys predeployed to Chrome Ops bots via Puppet.
"""
import contextlib
import os
import subprocess
import tempfile
@contextlib.contextmanager
def with_access_token(service_account_json):
"""Yields an access token for the service account.
Args:
service_account_json: The path to the service account JSON file.
"""
fd, path = tempfile.mkstemp(suffix='.json', prefix='tok')
try:
args = ['luci-auth', 'token']
if service_account_json:
args += ['-service-account-json', service_account_json]
subprocess.check_call(args, stdout=fd)
os.close(fd)
fd = None
yield path
finally:
if fd is not None:
os.close(fd)
os.remove(path)
|
[
"os.remove",
"subprocess.check_call",
"os.close",
"tempfile.mkstemp"
] |
[((566, 612), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".json"""', 'prefix': '"""tok"""'}), "(suffix='.json', prefix='tok')\n", (582, 612), False, 'import tempfile\n'), ((749, 787), 'subprocess.check_call', 'subprocess.check_call', (['args'], {'stdout': 'fd'}), '(args, stdout=fd)\n', (770, 787), False, 'import subprocess\n'), ((792, 804), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (800, 804), False, 'import os\n'), ((891, 906), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (900, 906), False, 'import os\n'), ((874, 886), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (882, 886), False, 'import os\n')]
|
import re
import time
import praw
from prawcore.exceptions import PrawcoreException
from database import TaskerNetDatabase
from utils import TASKERNET_RE, PRAW_SITE_NAME, MONITORED_SUBREDDITS
reddit = praw.Reddit(PRAW_SITE_NAME)
subreddit = reddit.subreddit(MONITORED_SUBREDDITS)
db = TaskerNetDatabase()
running = True
while running:
try:
for submission in subreddit.stream.submissions():
source_link = f'https://redd.it/{submission.id}'
taskernet_links = TASKERNET_RE.findall(f'{submission.url} {submission.selftext}')
for link in taskernet_links:
db.add_share(link, source_link)
except KeyboardInterrupt:
print('Ending now')
running = False
except PrawcoreException:
time.sleep(15)
|
[
"time.sleep",
"praw.Reddit",
"database.TaskerNetDatabase",
"utils.TASKERNET_RE.findall"
] |
[((204, 231), 'praw.Reddit', 'praw.Reddit', (['PRAW_SITE_NAME'], {}), '(PRAW_SITE_NAME)\n', (215, 231), False, 'import praw\n'), ((289, 308), 'database.TaskerNetDatabase', 'TaskerNetDatabase', ([], {}), '()\n', (306, 308), False, 'from database import TaskerNetDatabase\n'), ((480, 543), 'utils.TASKERNET_RE.findall', 'TASKERNET_RE.findall', (['f"""{submission.url} {submission.selftext}"""'], {}), "(f'{submission.url} {submission.selftext}')\n", (500, 543), False, 'from utils import TASKERNET_RE, PRAW_SITE_NAME, MONITORED_SUBREDDITS\n'), ((723, 737), 'time.sleep', 'time.sleep', (['(15)'], {}), '(15)\n', (733, 737), False, 'import time\n')]
|
import matplotlib.pyplot as plt
import numpy as np
import math
# tip geometry
dia = 0.057 # m
r = dia/2
offset = 3 * dia
pen_rate_labels = ['1 m/min','2 m/min','3 m/min','4 m/min','5 m/min']
pen_rates = [1/60 , 2/60 , 3/60 , 4/60 , 5/60] # m/seks
rpm = 25 * (2*math.pi) / 60 # rad/secs
X = []
Y = []
Z = []
k=0
for rate in pen_rates:
t = np.linspace(0, 1/rate, num=300) # secs
theta = t * rpm
X.append( r*np.sin(theta) + k*offset )
Y.append( r*np.cos(theta) )
Z.append( t*rate )
# needed to set 3D aspect ratio
if k == 0:
xs = X[-1].tolist()
ys = Y[-1].tolist()
zs = Z[-1].tolist()
else:
xs += X[-1].tolist()
ys += Y[-1].tolist()
zs += Z[-1].tolist()
k += 1
# ... aspect ratio
xs = np.array(xs)
ys = np.array(ys)
zs = np.array(zs)
max_range = np.array([xs.max()-xs.min(), ys.max()-ys.min(), zs.max()-zs.min()]).max() / 2.0
mid_x = (xs.max()+xs.min()) * 0.5
mid_y = (ys.max()+ys.min()) * 0.5
mid_z = (zs.max()+zs.min()) * 0.5
fig = plt.figure()
ax = fig.gca(projection='3d')
# plot each curve
for rate, x, y, z in zip( pen_rate_labels, X, Y, Z ):
ax.plot(x, y, z, label=rate)
leg = plt.legend(loc='best', fancybox=True)
# ...aspect ratio by setting limits
ax.set_xlim(mid_x - max_range, mid_x + max_range)
ax.set_ylim(mid_y - max_range, mid_y + max_range)
ax.set_zlim(mid_z - max_range, mid_z + max_range)
ax.invert_zaxis()
ax.set_ylabel('Distance y (m)', multialignment='center')
ax.set_xlabel('Distance x (m)', multialignment='center')
ax.set_zlabel('Depth (m)', multialignment='center')
plt.show()
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.array",
"numpy.linspace",
"numpy.cos"
] |
[((769, 781), 'numpy.array', 'np.array', (['xs'], {}), '(xs)\n', (777, 781), True, 'import numpy as np\n'), ((787, 799), 'numpy.array', 'np.array', (['ys'], {}), '(ys)\n', (795, 799), True, 'import numpy as np\n'), ((805, 817), 'numpy.array', 'np.array', (['zs'], {}), '(zs)\n', (813, 817), True, 'import numpy as np\n'), ((1019, 1031), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1029, 1031), True, 'import matplotlib.pyplot as plt\n'), ((1174, 1211), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""', 'fancybox': '(True)'}), "(loc='best', fancybox=True)\n", (1184, 1211), True, 'import matplotlib.pyplot as plt\n'), ((1584, 1594), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1592, 1594), True, 'import matplotlib.pyplot as plt\n'), ((347, 380), 'numpy.linspace', 'np.linspace', (['(0)', '(1 / rate)'], {'num': '(300)'}), '(0, 1 / rate, num=300)\n', (358, 380), True, 'import numpy as np\n'), ((465, 478), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (471, 478), True, 'import numpy as np\n'), ((422, 435), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (428, 435), True, 'import numpy as np\n')]
|
import os
import pickle
LOG_DIR = "logs"
class Logger:
def __init__(self, *metrics_):
self.metrics = dict()
for metric in metrics_:
self.metrics[metric] = []
if not os.path.exists(LOG_DIR):
os.mkdir(LOG_DIR)
def log(self, val, metric):
self.metrics[metric].append(val)
pickle.dump(self.metrics[metric], open(os.path.join(LOG_DIR, f"{metric}.pkl"), "wb"))
|
[
"os.mkdir",
"os.path.exists",
"os.path.join"
] |
[((187, 210), 'os.path.exists', 'os.path.exists', (['LOG_DIR'], {}), '(LOG_DIR)\n', (201, 210), False, 'import os\n'), ((218, 235), 'os.mkdir', 'os.mkdir', (['LOG_DIR'], {}), '(LOG_DIR)\n', (226, 235), False, 'import os\n'), ((347, 385), 'os.path.join', 'os.path.join', (['LOG_DIR', 'f"""{metric}.pkl"""'], {}), "(LOG_DIR, f'{metric}.pkl')\n", (359, 385), False, 'import os\n')]
|
from __future__ import unicode_literals
from django.contrib.auth.models import User
#https://scottbarnham.com/blog/2008/08/21/extending-the-django-user-model-with-inheritance.1.html
from django.contrib.auth.models import UserManager
from django.db import models
from django.template.defaultfilters import default
class Categoria(models.Model):
nome = models.CharField(max_length=255, blank=True, null=True)
descricao = models.CharField(max_length=255, blank=True, null=True)
objetos = models.Manager()
def __str__(self):
return str(self.nome) +" | "+ str(self.descricao)
class Curso(models.Model):
nome=models.CharField(primary_key=True, max_length=255)
objetos = models.Manager()
def __str__(self):
return str(self.nome)
class Usuario(User):
#user_ptr_id = models.CharField(primary_key=True, max_length=14, null=False, blank=False, default=1 )
matricula = models.CharField(
max_length=14,
null=False,
blank=False
)
nome = models.CharField(
max_length=14,
null=False,
blank=False,
default=""
)
#password = models.CharField( max_length=8, null=False, blank=False )
#email = models.CharField( max_length=255, null=False, blank=False )
CATEGORIA = ( ('professor', 'professor'),
('laboratorista', 'laboratorista'),
)
categoria = models.CharField(
choices=CATEGORIA,
default='professor', null=False,
blank=False, max_length=255)
timezone = models.CharField(max_length=50, default='America/Recife')
objetos = UserManager()
"""
class Meta:
abstract =True
"""
def __str__(self):
return str(self.matricula) + " | " + str(self.nome) + " | "+ str(self.email)
class Foo(models.Model):
name = models.CharField(
max_length=255,
null=False,
blank=False
)
objetos = models.Manager()
class TipoLaboratorio (models.Model):
nome = models.CharField(max_length=255, blank=True, null=True)
descricao = models.CharField(max_length=255, blank=True, null=True)
objetos = models.Manager()
def __str__(self):
return str(self.nome) +" | "+ str(self.descricao)
class Recurso(models.Model):
LOCALIZACAO = ( ('terreo', 'terreo'),
('1 andar', '1 andar'),
)
localizacao = models.CharField(choices=LOCALIZACAO,default='sem localizacao', null=True, blank=True, max_length=255)
SALA = ( ('LAB 03', 'LAB 03'),
('LAB 04', 'LAB 04'),
('LAB 05', 'LAB 05'),
('LAB 06', 'LAB 06'),
('LAB 07', 'LAB 07'),
('LAB 08', 'LAB 08'),
('LAB 09', 'LAB 09'),
('LAB 10', 'LAB 10'),
('LAB 11', 'LAB 11'),
('LAB 13', 'LAB 13'),
('LAB 14', 'LAB 14'),
('LAB 15', 'LAB 15'),
('LAB 16', 'LAB 16'),
('LAB 17', 'LAB 17'),
('LAB 18', 'LAB 18'),
('LAB 19', 'LAB 19'),
('LAB 20', 'LAB 20'),
)
sala = models.CharField(choices=SALA,default='LAB XX', null=True, blank=True, max_length=255)
descricao = models.CharField(max_length=255, blank=True, null=True, default ="sem descricao")
tipo_laboratorio = models.ForeignKey(TipoLaboratorio, on_delete=models.PROTECT, blank=True, null=True )#tem que ajustar para null?
#Projetor
numero = models.IntegerField(blank=True, null=True)
tipo_recurso = models.CharField(max_length=255, blank=True, null=True)
objetos = models.Manager()
def __str__(self):
if(self.tipo_recurso == "laboratorio"):
return str(self.sala) +" | "+ str(self.tipo_laboratorio) + " | "+ str(self.localizacao) +" | " + str(self.descricao)
else:
return str(self.numero) + " | " + str(self.descricao)
class Situacao (models.Model):
nome = models.CharField(max_length=255, blank=True, null=True)
descricao = models.CharField(max_length=255, blank=True, null=True)
objetos = models.Manager()
def __str__(self):
return str(self.id) +" | "+ str(self.nome) +" | "+ str(self.descricao)
class Reserva(models.Model):
data_hora_chegada = models.DateTimeField(blank=True, null=True)
data_hora_saida = models.DateTimeField(blank=True, null=True)
situacao = models.ForeignKey(Situacao, on_delete=models.PROTECT)
justificativa = models.CharField(max_length=255, blank=True, null=True)
observacao = models.CharField(max_length=255, blank=True, null=True)
confirmacao = models.BooleanField(blank=True, null=True)
id_usuario = models.ForeignKey(Usuario, on_delete=models.PROTECT)
id_recurso = models.ForeignKey(Recurso, on_delete=models.PROTECT)
tipo_recurso = models.CharField(max_length=255, blank=True, null=True)
disciplina = models.CharField(max_length=255, blank=True, null=True)
curso = models.ForeignKey(Curso, on_delete=models.PROTECT, default = None,blank=True, null=True, )
nome_professor = models.CharField(max_length=255, blank=True, null=True)
turno = models.CharField(max_length=255, blank=True, null=True)
primeira_aula = models.BooleanField(blank=True, null=True)
segunda_aula = models.BooleanField(blank=True, null=True)
objetos = models.Manager()
def __str__(self):
return str(self.id) +" | "+ str(self.data_hora_saida) + " | "+ str(self.data_hora_chegada) +" | " + str(self.situacao.nome) +" | " + str(self.tipo_recurso)
|
[
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.Manager",
"django.db.models.BooleanField",
"django.db.models.IntegerField",
"django.db.models.DateTimeField",
"django.contrib.auth.models.UserManager"
] |
[((358, 413), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)', 'null': '(True)'}), '(max_length=255, blank=True, null=True)\n', (374, 413), False, 'from django.db import models\n'), ((430, 485), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)', 'null': '(True)'}), '(max_length=255, blank=True, null=True)\n', (446, 485), False, 'from django.db import models\n'), ((500, 516), 'django.db.models.Manager', 'models.Manager', ([], {}), '()\n', (514, 516), False, 'from django.db import models\n'), ((641, 691), 'django.db.models.CharField', 'models.CharField', ([], {'primary_key': '(True)', 'max_length': '(255)'}), '(primary_key=True, max_length=255)\n', (657, 691), False, 'from django.db import models\n'), ((706, 722), 'django.db.models.Manager', 'models.Manager', ([], {}), '()\n', (720, 722), False, 'from django.db import models\n'), ((944, 1000), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(14)', 'null': '(False)', 'blank': '(False)'}), '(max_length=14, null=False, blank=False)\n', (960, 1000), False, 'from django.db import models\n'), ((1035, 1103), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(14)', 'null': '(False)', 'blank': '(False)', 'default': '""""""'}), "(max_length=14, null=False, blank=False, default='')\n", (1051, 1103), False, 'from django.db import models\n'), ((1447, 1549), 'django.db.models.CharField', 'models.CharField', ([], {'choices': 'CATEGORIA', 'default': '"""professor"""', 'null': '(False)', 'blank': '(False)', 'max_length': '(255)'}), "(choices=CATEGORIA, default='professor', null=False, blank=\n False, max_length=255)\n", (1463, 1549), False, 'from django.db import models\n'), ((1644, 1701), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'default': '"""America/Recife"""'}), "(max_length=50, default='America/Recife')\n", (1660, 1701), False, 'from django.db import models\n'), ((1722, 1735), 'django.contrib.auth.models.UserManager', 'UserManager', ([], {}), '()\n', (1733, 1735), False, 'from django.contrib.auth.models import UserManager\n'), ((1964, 2021), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'null': '(False)', 'blank': '(False)'}), '(max_length=255, null=False, blank=False)\n', (1980, 2021), False, 'from django.db import models\n'), ((2056, 2072), 'django.db.models.Manager', 'models.Manager', ([], {}), '()\n', (2070, 2072), False, 'from django.db import models\n'), ((2127, 2182), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)', 'null': '(True)'}), '(max_length=255, blank=True, null=True)\n', (2143, 2182), False, 'from django.db import models\n'), ((2199, 2254), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)', 'null': '(True)'}), '(max_length=255, blank=True, null=True)\n', (2215, 2254), False, 'from django.db import models\n'), ((2269, 2285), 'django.db.models.Manager', 'models.Manager', ([], {}), '()\n', (2283, 2285), False, 'from django.db import models\n'), ((2562, 2669), 'django.db.models.CharField', 'models.CharField', ([], {'choices': 'LOCALIZACAO', 'default': '"""sem localizacao"""', 'null': '(True)', 'blank': '(True)', 'max_length': '(255)'}), "(choices=LOCALIZACAO, default='sem localizacao', null=True,\n blank=True, max_length=255)\n", (2578, 2669), False, 'from django.db import models\n'), ((3334, 3425), 'django.db.models.CharField', 'models.CharField', ([], {'choices': 'SALA', 'default': '"""LAB XX"""', 'null': '(True)', 'blank': '(True)', 'max_length': '(255)'}), "(choices=SALA, default='LAB XX', null=True, blank=True,\n max_length=255)\n", (3350, 3425), False, 'from django.db import models\n'), ((3441, 3526), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)', 'null': '(True)', 'default': '"""sem descricao"""'}), "(max_length=255, blank=True, null=True, default='sem descricao'\n )\n", (3457, 3526), False, 'from django.db import models\n'), ((3546, 3633), 'django.db.models.ForeignKey', 'models.ForeignKey', (['TipoLaboratorio'], {'on_delete': 'models.PROTECT', 'blank': '(True)', 'null': '(True)'}), '(TipoLaboratorio, on_delete=models.PROTECT, blank=True,\n null=True)\n', (3563, 3633), False, 'from django.db import models\n'), ((3690, 3732), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (3709, 3732), False, 'from django.db import models\n'), ((3757, 3812), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)', 'null': '(True)'}), '(max_length=255, blank=True, null=True)\n', (3773, 3812), False, 'from django.db import models\n'), ((3832, 3848), 'django.db.models.Manager', 'models.Manager', ([], {}), '()\n', (3846, 3848), False, 'from django.db import models\n'), ((4183, 4238), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)', 'null': '(True)'}), '(max_length=255, blank=True, null=True)\n', (4199, 4238), False, 'from django.db import models\n'), ((4255, 4310), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)', 'null': '(True)'}), '(max_length=255, blank=True, null=True)\n', (4271, 4310), False, 'from django.db import models\n'), ((4325, 4341), 'django.db.models.Manager', 'models.Manager', ([], {}), '()\n', (4339, 4341), False, 'from django.db import models\n'), ((4505, 4548), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (4525, 4548), False, 'from django.db import models\n'), ((4571, 4614), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (4591, 4614), False, 'from django.db import models\n'), ((4630, 4683), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Situacao'], {'on_delete': 'models.PROTECT'}), '(Situacao, on_delete=models.PROTECT)\n', (4647, 4683), False, 'from django.db import models\n'), ((4704, 4759), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)', 'null': '(True)'}), '(max_length=255, blank=True, null=True)\n', (4720, 4759), False, 'from django.db import models\n'), ((4777, 4832), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)', 'null': '(True)'}), '(max_length=255, blank=True, null=True)\n', (4793, 4832), False, 'from django.db import models\n'), ((4851, 4893), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (4870, 4893), False, 'from django.db import models\n'), ((4916, 4968), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Usuario'], {'on_delete': 'models.PROTECT'}), '(Usuario, on_delete=models.PROTECT)\n', (4933, 4968), False, 'from django.db import models\n'), ((4986, 5038), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Recurso'], {'on_delete': 'models.PROTECT'}), '(Recurso, on_delete=models.PROTECT)\n', (5003, 5038), False, 'from django.db import models\n'), ((5059, 5114), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)', 'null': '(True)'}), '(max_length=255, blank=True, null=True)\n', (5075, 5114), False, 'from django.db import models\n'), ((5137, 5192), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)', 'null': '(True)'}), '(max_length=255, blank=True, null=True)\n', (5153, 5192), False, 'from django.db import models\n'), ((5205, 5296), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Curso'], {'on_delete': 'models.PROTECT', 'default': 'None', 'blank': '(True)', 'null': '(True)'}), '(Curso, on_delete=models.PROTECT, default=None, blank=True,\n null=True)\n', (5222, 5296), False, 'from django.db import models\n'), ((5317, 5372), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)', 'null': '(True)'}), '(max_length=255, blank=True, null=True)\n', (5333, 5372), False, 'from django.db import models\n'), ((5385, 5440), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)', 'null': '(True)'}), '(max_length=255, blank=True, null=True)\n', (5401, 5440), False, 'from django.db import models\n'), ((5461, 5503), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (5480, 5503), False, 'from django.db import models\n'), ((5523, 5565), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (5542, 5565), False, 'from django.db import models\n'), ((5586, 5602), 'django.db.models.Manager', 'models.Manager', ([], {}), '()\n', (5600, 5602), False, 'from django.db import models\n')]
|
#coding=utf-8
#Python 3.4
##从豆瓣网页中得到用户的所有读书短评
##网页地址类型:http://book.douban.com/people/1000001/collect?sort=time&start=0&filter=all&mode=grid&tags_sort=count
## http://book.douban.com/people/1000001/collect?sort=time&start=15&filter=all&mode=grid&tags_sort=count
from bs4 import BeautifulSoup
import time
import urllib.request,urllib.parse
from urllib.error import URLError,HTTPError
import os
import markdown
#换行符
lineSep='\n'
#设置HTTP代理
ans=input('Do you want to use a HTTP Proxy (N/y)? ')
ans=ans.lower()
if ans=='y' or ans=='yes':
print('HTTP Proxy formart: IP:PORT \nExample: 127.0.0.1:80')
print('Do NOT contain any unnecessary characters.')
proxyInfo=input('Please type in your HTTP Proxy: ')
proxySupport=urllib.request.ProxyHandler({'http':proxyInfo})
opener=urllib.request.build_opener(proxySupport)
urllib.request.install_opener(opener)
else:
pass
#头信息
head= {
'Accept':'text/html, application/xhtml+xml, image/jxr, */*',
'Accept-Language': 'zh-Hans-CN, zh-Hans; q=0.5',
'Connection':'Keep-Alive',
'Cookie':'bid=lkpO8Id/Kbs; __utma=30149280.1824146216.1438612767.1440248573.1440319237.13; __utmz=30149280.1438612767.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); as=http://book.douban.com/people/133476248/; ll=108288; viewed=26274009_1051580; ap=1; ps=y; ct=y; __utmb=30149280.23.10.1440319237; __utmc=30149280; __utmt_douban=1; _pk_id.100001.3ac3=b288f385b4d73e38.1438657126.3.1440319394.1440248628.; __utma=81379588.142106303.1438657126.1440248573.1440319240.3; __utmz=81379588.1440319240.3.2.utmcsr=movie.douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/; _pk_ses.100001.3ac3=*; __utmb=81379588.23.10.1440319240; __utmt=1; __utmc=81379588; _pk_ref.100001.3ac3=%5B%22%22%2C%22%22%2C1440319240%2C%22http%3A%2F%2Fmovie.douban.com%2F%22%5D',
'Host':'book.douban.com',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.10240'}
#url=url_1+uId+url_2+Index+url_3
url_1='http://book.douban.com/people/'
url_2='/collect?sort=time&start='
url_3='&filter=all&mode=grid&tags_sort=count'
def is_chinese(uchar):
"""判断一个unicode是否是汉字
"""
if uchar >= u'\u4e00' and uchar<=u'\u9fa5':
return True
else:
return False
def isChineseBook(title):
"""判断书名是否为中文书名
"""
for c in title:
if(is_chinese(c)):
return True
return False
def getHtml(url):
"""返回指定的网页内容
"""
print('Loading: '+url+'......')
full_url=urllib.request.Request(url,headers=head)
TRY_TIMES=3
response=None
while TRY_TIMES>0 and response==None :
TRY_TIMES-=1
try:
response=urllib.request.urlopen(full_url) #open=urlopen
except HTTPError as e:
print('HTTP Error:',e.code)
except URLError as e:
print('URL Error: ',e.reason)
if response==None:
print('Error!')
os.system("pause")
exit()
html=response.read()
return html
def getBookComment(html):
"""解析网页并返回5个列表:
书名,出版信息,标记日期,标签,评论
"""
titleList=[] #书名
pubList=[] #出版信息
dateList=[] #标记日期
tagsList=[] #标签
commentList=[] #评论
soup=BeautifulSoup(html,'html.parser')
lis=soup.findAll('li','subject-item')
for li in lis:
infoDiv=li.find('div','info')
commentP=infoDiv.find('p','comment')
if commentP!=None:
a=infoDiv.a
#书名
title1=a.get('title').strip()
title2Span=a.span
if title2Span!=None:
title2=a.span.text.strip()
else:
title2=''
title=title1+title2
c1=title[0]
c2=title[-1]
#如果是中文书名,则加上书名号
if isChineseBook(title):
title=u'《'+title+u'》'
else: #英文书加斜体
title='*'+title+'*'
titleList.append(title)
#出版信息
pubDiv=infoDiv.find('div','pub')
pub=pubDiv.text.strip()
pubList.append(pub)
#标记日期
dataSpan=infoDiv.find('span','date')
words=dataSpan.text.split('\n')
date=words[0]+words[1]
dateList.append(date)
#标签
tagsSpan=infoDiv.find('span','tags')
if tagsSpan!=None:
tags=tagsSpan.text.strip()
else:
tags=''
tagsList.append(tags)
#评论
comment=commentP.text.strip()
commentList.append(comment)
return (titleList,pubList,dateList,tagsList,commentList)
def getHtmlTitle(html):
"""
获取网页标题
"""
soup=BeautifulSoup(html,'html.parser')
title=soup.head.title.text
return title
def clearOldFile(uId):
"""
清除之前已保存的文件
"""
fileName='booksComments_'+uId+'.md'
temp=open(fileName,'w',encoding='utf-8')
temp.close()
def saveBookComment(titleList,pubList,dateList,tagsList,commentList,uId):
"""保存书评至文件
"""
fileName='booksComments_'+uId+'.md'
wf=open(fileName,mode='a',encoding='utf-8')
size=len(titleList)
for i in range(size):
title=titleList[i]
pub=pubList[i]
date=dateList[i]
tags=tagsList[i]
comment=commentList[i]
wf.write('## '+title+lineSep)
wf.write(pub+' '+lineSep)
wf.write(date+' '+lineSep)
wf.write(tags+lineSep+lineSep)
wf.write(comment+lineSep+lineSep)
wf.close()
return fileName
def getPageNum(html):
"""解析第一页网页,返回该用户的书评页数
"""
soup=BeautifulSoup(html,'html.parser')
paginator=soup.find('div','paginator')
pas=paginator.findAll('a')
num=int(pas[-2].text)
return num
def convertMd2Html(mdName,title):
"""
将Markdown文件转换为Html格式文件
"""
htmlName=mdName.replace('.md','.html')
mdFile=open(mdName,'r',encoding='utf-8')
contents=mdFile.read()
mdFile.close()
md = markdown.markdown(contents)
html = '<html><meta charset="UTF-8">'
html+='<title>'+title+'</title>'
html += "<body>" + md + "</body></html>"
htmlFile=open(htmlName,'w',encoding='utf-8')
htmlFile.write(html)
htmlFile.close()
return htmlName
#输入User-Id
print('\nYou can find User-Id in the url.')
print('E.g. Someone\'s homepage\'url is http://book.douban.com/people/1000001/ , the User-Id should be 1000001 .')
uId=input('User-Id: ')
while(uId==''):
uId=input('User-Id: ')
#计数器
count=0
#读取第一页
index=0
url=url=url_1+uId+url_2+str(index)+url_3
html=getHtml(url)
(titleList,pubList,dateList,tagsList,commentList)=getBookComment(html)
htmlTitle=getHtmlTitle(html)
clearOldFile(uId);
fileName=saveBookComment(titleList,pubList,dateList,tagsList,commentList,uId)
count+=len(titleList)
try:
pageNum=getPageNum(html) #用户读过的书的网页页数
except:
pageNum=1
index+=1
#读取后续页
for i in range(index*15,15*pageNum,15):
print('Sleep for 5 seconds.')
time.sleep(5)
print('%d/%d' %(i/15+1,pageNum))
url=url=url_1+uId+url_2+str(i)+url_3
html=getHtml(url)
(titleList,pubList,dateList,tagsList,commentList)=getBookComment(html)
count+=len(titleList)
saveBookComment(titleList,pubList,dateList,tagsList,commentList,uId)
print('\nMission accomplished!')
print('%d comments have been saved to %s.' %(count,fileName))
ans=input('\nDo you want to convert Markdown file to html file(Y/n)?')
ans=ans.lower()
if ans!='n':
htmlName=convertMd2Html(fileName,htmlTitle)
print('Convert success: %s' %htmlName)
os.system("pause")
|
[
"bs4.BeautifulSoup",
"time.sleep",
"os.system",
"markdown.markdown"
] |
[((7533, 7551), 'os.system', 'os.system', (['"""pause"""'], {}), "('pause')\n", (7542, 7551), False, 'import os\n'), ((3246, 3280), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (3259, 3280), False, 'from bs4 import BeautifulSoup\n'), ((4713, 4747), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (4726, 4747), False, 'from bs4 import BeautifulSoup\n'), ((5610, 5644), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (5623, 5644), False, 'from bs4 import BeautifulSoup\n'), ((5980, 6007), 'markdown.markdown', 'markdown.markdown', (['contents'], {}), '(contents)\n', (5997, 6007), False, 'import markdown\n'), ((6959, 6972), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (6969, 6972), False, 'import time\n'), ((2959, 2977), 'os.system', 'os.system', (['"""pause"""'], {}), "('pause')\n", (2968, 2977), False, 'import os\n')]
|
import functools
import sys
def optionalarg(decorator):
'''
Transforms a decorator `decorator(func, args)` into a decorator that can be
executed both with @decorator and @decorator(args)
'''
@functools.wraps(decorator)
def __decorator(*args, **kwargs):
if len(args) == 1 and callable(args[0]) and not kwargs:
# Execute the decorator immediately
func = args[0]
return decorator(func, **kwargs)
else:
def __impl(func):
return decorator(func, args, **kwargs)
return __impl
return __decorator
|
[
"functools.wraps"
] |
[((214, 240), 'functools.wraps', 'functools.wraps', (['decorator'], {}), '(decorator)\n', (229, 240), False, 'import functools\n')]
|
import torch
import torch.nn as nn
import numpy as np
print('loading model and train data...')
# load model from file
net = torch.load('models/torch_rnn.model')
# load data from file
train_loader = torch.load('data/processed/torch_rnn_train.loader')
valid_loader = torch.load('data/processed/torch_rnn_validate.loader')
# loss and optimization functions
lr = 0.001
criterion = nn.BCELoss()
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
# training params
epochs = 8
counter = 0
print_every = 50
clip = 5 # gradient clipping
batch_size = 50
# check for gpu
print('checking if gpu avaliable...')
train_on_gpu = torch.cuda.is_available()
print('training on gpu') if train_on_gpu else print(
'no gpu avaliable, training on cpu')
# move model to gpu, if avaliable
if train_on_gpu:
net.cuda()
print('training model...')
net.train()
# train for some number of epochs
for e in range(epochs):
# initialize hidden state
h = net.init_hidden(batch_size)
# batch loop
for inputs, labels in train_loader:
counter += 1
if(train_on_gpu):
inputs, labels = inputs.cuda(), labels.cuda()
# create new variables for hidden state to prevent
# backpropping through entire training history
h = tuple([each.data for each in h])
# zero accumulated gradients
net.zero_grad()
# get the output from the model
output, h = net(inputs, h)
# calculate the loss and perform backprop
loss = criterion(output.squeeze(), labels.float())
loss.backward()
# clip_grad_norm helps prevent the exploding gradient problem in RNNs
nn.utils.clip_grad_norm_(net.parameters(), clip)
optimizer.step()
# loss stats
if counter % print_every == 0:
# get validation loss
val_h = net.init_hidden(batch_size)
val_losses = []
net.eval()
for inputs, labels in valid_loader:
# create new variables for hidden state
val_h = tuple([each.data for each in h])
if train_on_gpu:
inputs, labels = inputs.cuda(), labels.cuda()
output, val_h = net(inputs, val_h)
val_loss = criterion(output.squeeze(), labels.float())
val_losses.append(val_loss.item())
net.train()
print("Epoch: {}/{}...".format(e+1, epochs),
"Step: {}...".format(counter),
"Loss: {:.6f}...".format(loss.item()),
"Val Loss: {:.6f}".format(np.mean(val_losses)))
# save trained model
print('saving trained model...')
torch.save(net, 'models/torch_rnn_trained.model')
|
[
"torch.nn.BCELoss",
"torch.load",
"torch.save",
"numpy.mean",
"torch.cuda.is_available"
] |
[((125, 161), 'torch.load', 'torch.load', (['"""models/torch_rnn.model"""'], {}), "('models/torch_rnn.model')\n", (135, 161), False, 'import torch\n'), ((200, 251), 'torch.load', 'torch.load', (['"""data/processed/torch_rnn_train.loader"""'], {}), "('data/processed/torch_rnn_train.loader')\n", (210, 251), False, 'import torch\n'), ((267, 321), 'torch.load', 'torch.load', (['"""data/processed/torch_rnn_validate.loader"""'], {}), "('data/processed/torch_rnn_validate.loader')\n", (277, 321), False, 'import torch\n'), ((381, 393), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (391, 393), True, 'import torch.nn as nn\n'), ((624, 649), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (647, 649), False, 'import torch\n'), ((2677, 2726), 'torch.save', 'torch.save', (['net', '"""models/torch_rnn_trained.model"""'], {}), "(net, 'models/torch_rnn_trained.model')\n", (2687, 2726), False, 'import torch\n'), ((2599, 2618), 'numpy.mean', 'np.mean', (['val_losses'], {}), '(val_losses)\n', (2606, 2618), True, 'import numpy as np\n')]
|
import numpy as np
class RBFMMD2(object):
def __init__(self, sigma_list, num_bit, is_binary):
self.sigma_list = sigma_list
self.num_bit = num_bit
self.is_binary = is_binary
self.basis = np.arange(2**num_bit,dtype='int32')
self.K = mix_rbf_kernel(self.basis, self.basis, self.sigma_list, is_binary)
def __call__(self, px, py):
'''
Args:
px (1darray, default=None): probability for data set x, used only when self.is_exact==True.
py (1darray, default=None): same as px, but for data set y.
Returns:
float, loss.
'''
pxy = px-py
return self.kernel_expect(pxy, pxy)
def kernel_expect(self, px, py):
res = px.dot(self.K.dot(py))
return res
def mix_rbf_kernel(x, y, sigma_list, is_binary):
if is_binary:
dx2 = np.zeros([len(x)]*2, dtype='int64')
num_bit = int(np.round(np.log(len(x))/np.log(2)))
for i in range(num_bit):
dx2 += (x[:,None]>>i)&1 != (y>>i)&1
else:
dx2 = (x[:, None] - y)**2
return _mix_rbf_kernel_d(dx2, sigma_list)
def _mix_rbf_kernel_d(dx2, sigma_list):
K = 0.0
for sigma in sigma_list:
gamma = 1.0 / (2 * sigma)
K = K + np.exp(-gamma * dx2)
return K
|
[
"numpy.log",
"numpy.arange",
"numpy.exp"
] |
[((223, 261), 'numpy.arange', 'np.arange', (['(2 ** num_bit)'], {'dtype': '"""int32"""'}), "(2 ** num_bit, dtype='int32')\n", (232, 261), True, 'import numpy as np\n'), ((1270, 1290), 'numpy.exp', 'np.exp', (['(-gamma * dx2)'], {}), '(-gamma * dx2)\n', (1276, 1290), True, 'import numpy as np\n'), ((955, 964), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (961, 964), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
#coding=utf-8
"""
Description : Calculate the difference between two georeferenced rasters.
Author : <NAME>
"""
#Python libraries
import argparse, os
import numpy as np
import gdal
#Personal libraries
import georaster as geor
## Dictionary to convert between resampling algo names and GDAL codes ##
GRA_codes = {'near':gdal.GRA_NearestNeighbour, 'bilinear':gdal.GRA_Bilinear, 'cubic':gdal.GRA_Cubic, 'cubicspline':gdal.GRA_CubicSpline, 'lanczos':gdal.GRA_Lanczos, 'average':gdal.GRA_Average, 'mode':gdal.GRA_Mode, 'max':gdal.GRA_Max, 'min':gdal.GRA_Min, 'med':gdal.GRA_Med, 'Q1':gdal.GRA_Q1, 'Q3':gdal.GRA_Q3}
def geodiff(im1,im2,imout,inverse=False,resampling=1):
"""
Calculate the difference between images im1 and im2. First, im1 is loaded in area of overlap with im2 and im2 is reprojected on im1's grid. Then the difference im1-im2 is calculated. If inverse is set to True, im2-im1 is calculated instead.
Inputs:
im1: str, path to the first image
im2: str, path to the second image
imout: str, path to the output image
inverse: optional - bool, set to True to calculate im2-im1 (Default is False)
resampling: optional - int, resampling algorithm. Use the gdal.GRA codes: 0 is nearest, 1 is bilinear etc. (Default is 1 - bilinear)
"""
# Read first image
img1 = geor.SingleBandRaster(im1, load_data=False)
overlap = img1.intersection(im2)
nodata1 = img1.ds.GetRasterBand(1).GetNoDataValue()
# Load second image
img2 = geor.SingleBandRaster(im2,load_data=False)
dtype2 = img2.ds.GetRasterBand(1).DataType
nodata2 = img2.ds.GetRasterBand(1).GetNoDataValue()
# Read first image in overlap area
img1 = geor.SingleBandRaster(im1, load_data=overlap,latlon=False)
xmin, xmax, ymin, ymax = img1.extent
gt = img1.ds.GetGeoTransform()
new_gt = (xmin, gt[1], gt[2], ymax, gt[4], gt[5])
# Reproject img2 if needed
if ((img1.nx != img2.nx) or (img1.ny != img2.ny)):
img2_proj = img2.reproject(img1.srs, nx=img1.nx, ny=img1.ny, xmin=xmin, ymax=ymax, xres=img1.xres, yres=img1.yres, dtype=dtype2, nodata=nodata2, interp_type=resampling, progress=True)
else:
img2_proj = geor.SingleBandRaster(im2)
# Calculate difference
if inverse==False:
diff = img1.r - img2_proj.r
elif inverse==True:
diff = img2_proj.r - img1.r
else:
raise ValueError("'inverse' must be True or False")
out_nodata = -32767 #np.finfo('float').min
diff[img1.r==nodata1] = out_nodata
diff[img2_proj.r==nodata2] = out_nodata
# Save to GTiff
return geor.simple_write_geotiff(imout, diff, new_gt, wkt=img1.srs.ExportToWkt(), dtype=6, nodata_value=out_nodata, options=None)
if __name__=='__main__':
#Set up arguments
parser = argparse.ArgumentParser(description="Calculate the difference between images im1 and im2. First, im1 is loaded in area of overlap with im2 and im2 is reprojected on im1's grid. Then the difference im1-im2 is calculated. If inverse is set to True, im2-im1 is calculated instead.")
#Positional arguments
parser.add_argument('im1', type=str, help='str, path to the first image')
parser.add_argument('im2', type=str, help='str, path to the second image')
parser.add_argument('imout', type=str, help='str, path to the output file')
# Optional arguments
parser.add_argument('-i', dest='inverse', action='store_true', help='If set, will calculate im2-im1.')
parser.add_argument('-r', dest='resampling', type=str, default='bilinear', help="str, GDAL resampling algorithms used to reproject im2 onto im1's grid (Default is 'near')")
args = parser.parse_args()
geodiff(args.im1,args.im2,args.imout,inverse=args.inverse, resampling=GRA_codes[args.resampling])
|
[
"georaster.SingleBandRaster",
"argparse.ArgumentParser"
] |
[((1342, 1385), 'georaster.SingleBandRaster', 'geor.SingleBandRaster', (['im1'], {'load_data': '(False)'}), '(im1, load_data=False)\n', (1363, 1385), True, 'import georaster as geor\n'), ((1519, 1562), 'georaster.SingleBandRaster', 'geor.SingleBandRaster', (['im2'], {'load_data': '(False)'}), '(im2, load_data=False)\n', (1540, 1562), True, 'import georaster as geor\n'), ((1716, 1775), 'georaster.SingleBandRaster', 'geor.SingleBandRaster', (['im1'], {'load_data': 'overlap', 'latlon': '(False)'}), '(im1, load_data=overlap, latlon=False)\n', (1737, 1775), True, 'import georaster as geor\n'), ((2815, 3105), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Calculate the difference between images im1 and im2. First, im1 is loaded in area of overlap with im2 and im2 is reprojected on im1\'s grid. Then the difference im1-im2 is calculated. If inverse is set to True, im2-im1 is calculated instead."""'}), '(description=\n "Calculate the difference between images im1 and im2. First, im1 is loaded in area of overlap with im2 and im2 is reprojected on im1\'s grid. Then the difference im1-im2 is calculated. If inverse is set to True, im2-im1 is calculated instead."\n )\n', (2838, 3105), False, 'import argparse, os\n'), ((2214, 2240), 'georaster.SingleBandRaster', 'geor.SingleBandRaster', (['im2'], {}), '(im2)\n', (2235, 2240), True, 'import georaster as geor\n')]
|
import imp
import pygame
from settings import *
class UI:
def __init__(self):
self.display_surface = pygame.display.get_surface()
self.font = pygame.font.Font(UI_FONT,UI_FONT_SIZE)
self.health_bar_rect = pygame.Rect(10,10,HEALTH_BAR_WIDTH,BAR_HEIGHT)
self.energy_bar_rect = pygame.Rect(10,34,ENERGY_BAR_WIDTH,BAR_HEIGHT)
# convert weapon dict
self.weapon_graphics = []
for weapon in weapon_data.values():
path = weapon['graphic']
weapon = pygame.image.load(path).convert_alpha()
self.weapon_graphics.append(weapon)
# convert magic dict
self.magic_graphics = []
for magic in magic_data.values():
path = magic['graphic']
magic = pygame.image.load(path).convert_alpha()
self.magic_graphics.append(magic)
def show_bar(self,current,max,bg_rect,colour):
# draw background surface
pygame.draw.rect(self.display_surface,UI_BG_COLOUR,bg_rect)
# stat to pixel conversion
ratio = current / max
current_width = bg_rect.width * ratio
current_rect = bg_rect.copy()
current_rect.width = current_width
# Draw stat bar stats
pygame.draw.rect(self.display_surface,colour,current_rect)
pygame.draw.rect(self.display_surface,UI_BORDER_COLOUR,bg_rect,3)
def show_exp(self,exp):
text_surf = self.font.render(str(int(exp)),False,TEXT_COLOUR)
x = self.display_surface.get_size()[0] / 2
y = self.display_surface.get_size()[1] - 20
text_rect = text_surf.get_rect(midbottom = (x,y))
pygame.draw.rect(self.display_surface,UI_BG_COLOUR,text_rect.inflate(20,20))
self.display_surface.blit(text_surf,text_rect)
pygame.draw.rect(self.display_surface,UI_BORDER_COLOUR,text_rect.inflate(20,20),3)
def selection_box(self,left,top,has_switched):
bg_rect = pygame.Rect(left,top,ITEM_BOX_SIZE,ITEM_BOX_SIZE)
pygame.draw.rect(self.display_surface,UI_BG_COLOUR,bg_rect)
if not has_switched:
pygame.draw.rect(self.display_surface,UI_BORDER_COLOUR_ACTIVE,bg_rect,3)
else:
pygame.draw.rect(self.display_surface,UI_BORDER_COLOUR,bg_rect,3)
return bg_rect
def weapon_overlay(self,weapon_index,active):
bg_rect = self.selection_box(10,630,active) # weapon box
weapon_surf = self.weapon_graphics[weapon_index]
weapon_rect = weapon_surf.get_rect(center = bg_rect.center)
self.display_surface.blit(weapon_surf,weapon_rect)
def magic_overlay(self,magic_index,active):
bg_rect = self.selection_box(80,635,active) # weapon box
magic_surf = self.magic_graphics[magic_index]
magic_rect = magic_surf.get_rect(center = bg_rect.center)
self.display_surface.blit(magic_surf,magic_rect)
def display(self,player):
# Display user interface information:
self.show_bar(player.health,player.stats['health'],self.health_bar_rect,HEALTH_COLOUR)
self.show_bar(player.energy,player.stats['energy'],self.energy_bar_rect,ENERGY_COLOUR)
self.show_exp(player.exp)
self.weapon_overlay(player.weapon_index,player.can_switch_weapon)
self.magic_overlay(player.magic_index,player.can_switch_magic)
|
[
"pygame.draw.rect",
"pygame.Rect",
"pygame.display.get_surface",
"pygame.font.Font",
"pygame.image.load"
] |
[((114, 142), 'pygame.display.get_surface', 'pygame.display.get_surface', ([], {}), '()\n', (140, 142), False, 'import pygame\n'), ((163, 202), 'pygame.font.Font', 'pygame.font.Font', (['UI_FONT', 'UI_FONT_SIZE'], {}), '(UI_FONT, UI_FONT_SIZE)\n', (179, 202), False, 'import pygame\n'), ((234, 283), 'pygame.Rect', 'pygame.Rect', (['(10)', '(10)', 'HEALTH_BAR_WIDTH', 'BAR_HEIGHT'], {}), '(10, 10, HEALTH_BAR_WIDTH, BAR_HEIGHT)\n', (245, 283), False, 'import pygame\n'), ((312, 361), 'pygame.Rect', 'pygame.Rect', (['(10)', '(34)', 'ENERGY_BAR_WIDTH', 'BAR_HEIGHT'], {}), '(10, 34, ENERGY_BAR_WIDTH, BAR_HEIGHT)\n', (323, 361), False, 'import pygame\n'), ((955, 1016), 'pygame.draw.rect', 'pygame.draw.rect', (['self.display_surface', 'UI_BG_COLOUR', 'bg_rect'], {}), '(self.display_surface, UI_BG_COLOUR, bg_rect)\n', (971, 1016), False, 'import pygame\n'), ((1247, 1307), 'pygame.draw.rect', 'pygame.draw.rect', (['self.display_surface', 'colour', 'current_rect'], {}), '(self.display_surface, colour, current_rect)\n', (1263, 1307), False, 'import pygame\n'), ((1314, 1382), 'pygame.draw.rect', 'pygame.draw.rect', (['self.display_surface', 'UI_BORDER_COLOUR', 'bg_rect', '(3)'], {}), '(self.display_surface, UI_BORDER_COLOUR, bg_rect, 3)\n', (1330, 1382), False, 'import pygame\n'), ((1942, 1994), 'pygame.Rect', 'pygame.Rect', (['left', 'top', 'ITEM_BOX_SIZE', 'ITEM_BOX_SIZE'], {}), '(left, top, ITEM_BOX_SIZE, ITEM_BOX_SIZE)\n', (1953, 1994), False, 'import pygame\n'), ((2000, 2061), 'pygame.draw.rect', 'pygame.draw.rect', (['self.display_surface', 'UI_BG_COLOUR', 'bg_rect'], {}), '(self.display_surface, UI_BG_COLOUR, bg_rect)\n', (2016, 2061), False, 'import pygame\n'), ((2101, 2176), 'pygame.draw.rect', 'pygame.draw.rect', (['self.display_surface', 'UI_BORDER_COLOUR_ACTIVE', 'bg_rect', '(3)'], {}), '(self.display_surface, UI_BORDER_COLOUR_ACTIVE, bg_rect, 3)\n', (2117, 2176), False, 'import pygame\n'), ((2200, 2268), 'pygame.draw.rect', 'pygame.draw.rect', (['self.display_surface', 'UI_BORDER_COLOUR', 'bg_rect', '(3)'], {}), '(self.display_surface, UI_BORDER_COLOUR, bg_rect, 3)\n', (2216, 2268), False, 'import pygame\n'), ((526, 549), 'pygame.image.load', 'pygame.image.load', (['path'], {}), '(path)\n', (543, 549), False, 'import pygame\n'), ((775, 798), 'pygame.image.load', 'pygame.image.load', (['path'], {}), '(path)\n', (792, 798), False, 'import pygame\n')]
|
import pygame
from pygame.sprite import Sprite
class Bullet(Sprite):
"""Bullet kontrolli klass"""
def __init__(self, game_setting, screen, ship):
"""Looge laeva asukohas kuulide objekt"""
super().__init__()
self.screen = screen
# Loovad kuulid
self.rect = pygame.Rect(0, 0, game_setting.bullet_width, game_setting.bullet_height)
self.rect.centerx = ship.rect.centerx
self.rect.top = ship.rect.top
# kuuli positioon
self.y = float(self.rect.y)
# kuuli sätted
self.color = game_setting.bullet_color
self.speed_factor = game_setting.bullet_speed_factor
def update(self):
"""Kuuli positsiooni värskendamine"""
self.y -= self.speed_factor
self.rect.y = self.y
def draw_bullet(self):
"""Joonistage kuul ekraanil"""
pygame.draw.rect(self.screen, self.color, self.rect)
|
[
"pygame.draw.rect",
"pygame.Rect"
] |
[((307, 379), 'pygame.Rect', 'pygame.Rect', (['(0)', '(0)', 'game_setting.bullet_width', 'game_setting.bullet_height'], {}), '(0, 0, game_setting.bullet_width, game_setting.bullet_height)\n', (318, 379), False, 'import pygame\n'), ((866, 918), 'pygame.draw.rect', 'pygame.draw.rect', (['self.screen', 'self.color', 'self.rect'], {}), '(self.screen, self.color, self.rect)\n', (882, 918), False, 'import pygame\n')]
|
import sys
import yaml
from pathlib import Path
from optparse import OptionParser
from . import Loader
from .pretty import pretty_print_yaml
def parse_cmd_line():
parser = OptionParser()
parser.add_option(
"-e",
"--environ-vars",
dest="env_vars",
help="import values from environent variables",
default=None,
)
parser.add_option(
"-x",
"--extra-file",
dest="extra_file",
help="read extra file with yaml values",
metavar="FILE",
)
options, args = parser.parse_args()
if len(args) < 1:
print("Usage: {} file.yaml".format(sys.argv[0]))
exit(1)
return options, args
def main():
options, args = parse_cmd_line()
filename = Path(args[0])
loader = Loader(filename, extra_file=options.extra_file, env_vars=options.env_vars)
result = loader.resolve()
for result_item in result:
if len(result) > 1:
print("---")
output_yaml = yaml.safe_dump(result_item)
if sys.stdout.isatty():
pretty_print_yaml(output_yaml)
else:
print(output_yaml)
|
[
"pathlib.Path",
"sys.stdout.isatty",
"optparse.OptionParser",
"yaml.safe_dump"
] |
[((178, 192), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (190, 192), False, 'from optparse import OptionParser\n'), ((758, 771), 'pathlib.Path', 'Path', (['args[0]'], {}), '(args[0])\n', (762, 771), False, 'from pathlib import Path\n'), ((996, 1023), 'yaml.safe_dump', 'yaml.safe_dump', (['result_item'], {}), '(result_item)\n', (1010, 1023), False, 'import yaml\n'), ((1035, 1054), 'sys.stdout.isatty', 'sys.stdout.isatty', ([], {}), '()\n', (1052, 1054), False, 'import sys\n')]
|
import argparse
import config
import train
import os
import util
import inference
parser = argparse.ArgumentParser()
parser.add_argument("input_dir", help="path to training fields / See ... for Houdini based data generator")
parser.add_argument("-od", "--output_dir", default='', help="path to training fields / See ... for Houdini based data generator")
parser.add_argument("-mg", "--meta graph_dir", default='', help="graph saving directory")
parser.add_argument("-t", "--train", help="train the network", action='store_true')
parser.add_argument("-lss", "--latent_state_size", type=int, help= "size of the latent state space. Should be 2^n", default=8, choices=[8, 16, 32, 64, 128])
parser.add_argument("-sb", "--small_blocks", help="number of small convolutional blocks in the network. ~1-4 should work well.", default=4, type=int, choices=[1,2,3,4,5,6,7])
parser.add_argument("-f", "--filters", help="number of filters in each convolution", default=128, type=int)
parser.add_argument("-ti", "--train_integrator", help="train the network", action='store_true')
parser.add_argument("-seq", "--sequence_length", help= "sequence length at inference time. How long a sequence should the networks generate ?", default=30, type=int)
parser.add_argument('-sg', '--graph_saving_freq', help= "save meta graph every n frame. no saves when set to zero", default=5000, type=int)
parser.add_argument('-tb', '--tensorboard_saving_freq', help= "save tensorboard plot every n frame. no saves when set to zero", default=5, type=int)
parser.add_argument('-pd', '--prediction_length', help = "Number of frames to predict", default=30, type=int)
parser.add_argument('-dp', '--deploy_path', default='', help="Alternative dir for inference data")
parser.add_argument('-lr_min', '--min_learn_rate', type=float, default=0.0000025, help="Minimum learning rate attained during cosine annealing")
parser.add_argument('-lr_max', '--max_learn_rate', type=float, default=0.0001, help="Maximum learning rate attained during cosine annealing")
parser.add_argument('-ep', '--period', default=2500, help="period of cosine annealing")
parser.add_argument('-tri', '--trilinear', action='store_true', help="use tri-linear interpolation for resampling and not nearest neighbour")
parser.add_argument('-mlp', '--encoder_mlp_layers', default = 1, type = int, help="MLP layers to use on each side of the latent state projection")
parser.add_argument('-sdf', '--sdf_state_size', default = 8, type = int, help="size of the boundary conditions encoding")
parser.add_argument('-gif', '--gif_saver_f', default = 5000, type = int, help="Frequency for saving gifs")
parser.add_argument('-b', '--batch_size', default = 1, type=int, help='Batch size for training')
parser.add_argument('-fem', '--use_differential_kernels', action='store_true', help="use fem layers")
parser.add_argument('-cv', '--convolution', action='store_true', help="use convolutions all the way through the autoencoder")
parser.add_argument('-fem_loss', '--fem_difference', action='store_true', help="use the fem differentials as loss metric")
parser.add_argument('-clear', '--clear', action='store_true', help="clear graphs and test fields in native dirs")
parser.add_argument('-inf', '--iframe', default=-1, help="display frame during inference")
parser.add_argument('-g', '--grid_path', default ='', help='Path to grid dictionary')
args = parser.parse_args()
config.data_path = args.input_dir
if os.path.isdir(config.data_path):
util.create_dirs(args.clear)
else:
print('Input dir is not valid')
if not os.path.isdir(config.output_dir):
print('WARNING - output dir is not valid. Meta graphs are not saved')
exit()
config.resample = args.trilinear
config.param_state_size = args.latent_state_size
config.n_filters = args.filters
config.output_dir = args.output_dir
config.save_freq = args.graph_saving_freq
config.f_tensorboard = args.tensorboard_saving_freq
config.sb_blocks = args.small_blocks
config.batch_size = args.batch_size
config.sequence_length = args.sequence_length
config.alt_dir = args.deploy_path
config.lr_max = args.max_learn_rate
config.lr_min = args.min_learn_rate
config.period = args.period
config.encoder_mlp_layers = args.encoder_mlp_layers
config.sdf_state = args.sdf_state_size
config.save_gif = args.gif_saver_f
if os.path.isdir(args.grid_path):
config.grid_dir = args.grid_path
config.use_fem = args.use_differential_kernels
config.fem_loss = args.fem_difference
config.conv = args.convolution
if args.train:
train.train_network()
elif args.train_integrator:
train.train_integrator()
else:
print('Inference AE with random field')
inference.restore_ae(data=config.data_path, graph_path=config.path_e, grid=config.grid_dir, frame=args.iframe)
|
[
"argparse.ArgumentParser",
"os.path.isdir",
"util.create_dirs",
"train.train_integrator",
"train.train_network",
"inference.restore_ae"
] |
[((93, 118), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (116, 118), False, 'import argparse\n'), ((3437, 3468), 'os.path.isdir', 'os.path.isdir', (['config.data_path'], {}), '(config.data_path)\n', (3450, 3468), False, 'import os\n'), ((4301, 4330), 'os.path.isdir', 'os.path.isdir', (['args.grid_path'], {}), '(args.grid_path)\n', (4314, 4330), False, 'import os\n'), ((3474, 3502), 'util.create_dirs', 'util.create_dirs', (['args.clear'], {}), '(args.clear)\n', (3490, 3502), False, 'import util\n'), ((3553, 3585), 'os.path.isdir', 'os.path.isdir', (['config.output_dir'], {}), '(config.output_dir)\n', (3566, 3585), False, 'import os\n'), ((4506, 4527), 'train.train_network', 'train.train_network', ([], {}), '()\n', (4525, 4527), False, 'import train\n'), ((4561, 4585), 'train.train_integrator', 'train.train_integrator', ([], {}), '()\n', (4583, 4585), False, 'import train\n'), ((4640, 4755), 'inference.restore_ae', 'inference.restore_ae', ([], {'data': 'config.data_path', 'graph_path': 'config.path_e', 'grid': 'config.grid_dir', 'frame': 'args.iframe'}), '(data=config.data_path, graph_path=config.path_e, grid=\n config.grid_dir, frame=args.iframe)\n', (4660, 4755), False, 'import inference\n')]
|
import sys
import argparse
import pandas as pd
parser = argparse.ArgumentParser()
parser.add_argument("input_file", help = "File containing list of map files to average (one per line)")
parser.add_argument("output_file")
args = parser.parse_args()
fptr = open(args.input_file)
firstfile = fptr.readline()[:-1]
df = pd.read_csv(firstfile, index_col=None)
ndf = 1
for line in fptr:
dftmp = pd.read_csv(line[:-1],index_col=None)
df = df + dftmp
ndf += 1
df = df / ndf
df.to_csv(args.output_file,index=False)
|
[
"pandas.read_csv",
"argparse.ArgumentParser"
] |
[((57, 82), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (80, 82), False, 'import argparse\n'), ((317, 355), 'pandas.read_csv', 'pd.read_csv', (['firstfile'], {'index_col': 'None'}), '(firstfile, index_col=None)\n', (328, 355), True, 'import pandas as pd\n'), ((391, 429), 'pandas.read_csv', 'pd.read_csv', (['line[:-1]'], {'index_col': 'None'}), '(line[:-1], index_col=None)\n', (402, 429), True, 'import pandas as pd\n')]
|
import os
import subprocess
import sys
from config import TOOLS_DIR
VERSION = 'aad2120'
SUPPORTED_PLATFORMS = {
'cygwin': 'windows',
'darwin': 'mac',
'linux2': 'linux',
'win32': 'windows',
}
def is_platform_supported(platform):
return platform in SUPPORTED_PLATFORMS
def get_binary_path():
platform = sys.platform
if not is_platform_supported(platform):
return None
platform_dir = SUPPORTED_PLATFORMS[platform]
path = os.path.join(TOOLS_DIR, 'sccache', VERSION, platform_dir, 'sccache')
if platform_dir == 'windows':
path += '.exe'
return path
def run(*args):
binary_path = get_binary_path()
if binary_path is None:
raise Exception('No sccache binary found for the current platform.')
call_args = [binary_path] + list(args)
return subprocess.call(call_args)
|
[
"subprocess.call",
"os.path.join"
] |
[((449, 517), 'os.path.join', 'os.path.join', (['TOOLS_DIR', '"""sccache"""', 'VERSION', 'platform_dir', '"""sccache"""'], {}), "(TOOLS_DIR, 'sccache', VERSION, platform_dir, 'sccache')\n", (461, 517), False, 'import os\n'), ((786, 812), 'subprocess.call', 'subprocess.call', (['call_args'], {}), '(call_args)\n', (801, 812), False, 'import subprocess\n')]
|
from __future__ import annotations
import atexit
from ctypes import pointer, c_int, c_void_p, c_char_p, c_bool, c_uint, CDLL
from functools import partial
from typing import (
Callable,
Any,
Optional,
Tuple,
TYPE_CHECKING,
cast,
Union,
Type,
TypeVar,
)
from .error_handling import raise_for_code
from .forms_api import dlls
if TYPE_CHECKING: # pragma: no cover
from ctypes import _FuncPointer
from .generic_object import BaseObject
from .forms_objects import Module
T = TypeVar("T")
Setter = Callable[["BaseObject", int, T], None]
Getter = Callable[["BaseObject", int], T]
CTypes = Union[Type[c_void_p], Type[c_bool], Type[c_int], Type["String"]]
class Context:
version: str
encoding: str
api: Optional[CDLL]
free: Optional[_FuncPointer]
def __init__(self) -> None:
self.version, self.encoding = "12c", "utf-8"
self.api, self.free = None, None
self._as_parameter_ = c_void_p(0)
def __bool__(self) -> bool:
return bool(self._as_parameter_)
def init(self, version: str, encoding: str) -> None:
if not self:
self.version, self.encoding = version, encoding
self.api, msvcrt = dlls(self.version)
self.free = msvcrt.free
self.create_context()
atexit.register(self.destroy_context)
def create_context(self) -> None:
# todo: maybe better way than just an assert?
assert self.api is not None
ctx = c_void_p()
func = self.api.d2fctxcr_Create
func.argtypes = (c_void_p, c_void_p)
error_code = func(pointer(ctx), pointer(c_int()))
if error_code: # pragma: no cover
raise_for_code(error_code)
self._as_parameter_ = ctx
def destroy_context(self) -> None:
if self._as_parameter_:
handled_api_function("d2fctxde_Destroy", tuple())(self)
self._as_parameter_ = c_void_p(0)
context: Context = Context()
class String(c_char_p):
def __init__(self) -> None:
# todo: maybe better way than just an assert?
assert context.free is not None
super().__init__()
self.free = context.free
def __del__(self) -> None:
self.free(self)
def api_function( # type: ignore
api_function_name: str, arguments: Tuple[Any, ...]
) -> Callable[..., int]:
api_func = getattr(context.api, api_function_name)
api_func.argtypes = (c_void_p,) + arguments
return partial(api_func, context)
def inject_return_value( # type: ignore
args: Tuple[Any, ...], return_value_index: Optional[int]
) -> Tuple[Any, ...]:
if return_value_index is not None:
func_args = list(args)
return_value = func_args[return_value_index]
func_args[return_value_index] = pointer(return_value)
injected_args = tuple(func_args)
else:
injected_args, return_value = args, None
return injected_args, return_value
def handled_api_function( # type: ignore
api_function_name: str,
arguments: Tuple[Any, ...],
return_value_index: Optional[int] = None,
) -> Callable[..., Any]:
def _handled_api_function(*args: Any) -> Any: # type: ignore
injected_args, return_value = inject_return_value(args, return_value_index)
error_code = api_function(api_function_name, arguments)(*injected_args)
if error_code:
raise_for_code(error_code)
return return_value
return _handled_api_function
def handle_return_value(result: int) -> bool:
if result in (2, 3): # YES, NO
return bool(result == 2)
raise_for_code(result)
def is_subclassed(generic_object: BaseObject) -> bool:
func = api_function("d2fobis_IsSubclassed", (c_void_p,))
result = func(generic_object)
return handle_return_value(result)
def has_property(generic_object: BaseObject, property_number: int) -> bool:
func = api_function("d2fobhp_HasProp", (c_void_p, c_int))
result = func(generic_object, property_number)
return handle_return_value(result)
def setter(function_name: str, setter_type: CTypes) -> Setter[T]:
return handled_api_function(function_name, (c_void_p, c_int, setter_type))
set_text: Setter[bytes] = setter("d2fobst_SetTextProp", c_void_p)
set_boolean: Setter[bool] = setter("d2fobsb_SetBoolProp", c_bool)
set_number: Setter[int] = setter("d2fobsn_SetNumProp", c_int)
set_object: Setter[BaseObject] = setter("d2fobso_SetObjProp", c_void_p)
def getter(function_name: str, return_type: CTypes) -> Getter[T]:
func = handled_api_function(function_name, (c_void_p, c_int, c_void_p), 2)
def _getter(generic_object: BaseObject, property_number: int) -> Getter[T]:
return func(generic_object, property_number, return_type()).value
return cast(Getter[T], _getter)
get_boolean: Getter[bool] = getter("d2fobgb_GetBoolProp", c_bool)
get_number: Getter[int] = getter("d2fobgn_GetNumProp", c_int)
get_object: Getter[BaseObject] = getter("d2fobgo_GetObjProp", c_void_p)
get_text: Getter[bytes] = getter("d2fobgt_GetTextProp", String)
def load_library(library_path: str) -> c_void_p:
return handled_api_function(
"d2flibld_Load", (c_void_p, c_char_p), return_value_index=0
)(c_void_p(), library_path.encode(context.encoding))
def load_module(form_path: str) -> c_void_p:
return handled_api_function(
"d2ffmdld_Load", (c_void_p, c_char_p, c_bool), return_value_index=0
)(c_void_p(), form_path.encode(context.encoding), False)
def create_module(name: str) -> c_void_p:
return handled_api_function(
"d2ffmdcr_Create", (c_void_p, c_char_p), return_value_index=0
)(c_void_p(), name.encode(context.encoding))
def save_module(module: Module, path: str) -> None:
handled_api_function("d2ffmdsv_Save", (c_void_p, c_char_p, c_bool))(
module, path.encode(context.encoding), False
)
def create(owner: BaseObject, name: str, obj_number: int) -> c_void_p:
return handled_api_function(
"d2fobcr_Create", (c_void_p, c_void_p, c_char_p, c_int), return_value_index=1
)(owner, c_void_p(), name.encode(context.encoding), obj_number)
def destroy(generic_object: BaseObject) -> None:
handled_api_function("d2fobde_Destroy", (c_void_p,))(generic_object)
def move(generic_object: BaseObject, next_object: Optional[BaseObject]) -> None:
handled_api_function("d2fobmv_Move", (c_void_p, c_void_p))(
generic_object, next_object
)
def query_type(generic_object: Union[BaseObject, c_void_p]) -> int:
return int(
handled_api_function("d2fobqt_QueryType", (c_void_p,), return_value_index=1)(
generic_object, c_int()
).value
)
GetConstant = Callable[[int], str]
def get_constant(function_name: str) -> GetConstant:
def _get_constant(constant_property: int) -> str:
constant_value = handled_api_function(
function_name, (c_int, c_void_p), return_value_index=1
)(constant_property, c_char_p())
return (constant_value.value or b"").decode(context.encoding)
return _get_constant
object_name: GetConstant = get_constant("d2fobgcn_GetConstName")
property_constant_name: GetConstant = get_constant("d2fprgcn_GetConstName")
property_name: GetConstant = get_constant("d2fprgn_GetName")
def property_type(property_number: int) -> int:
return int(api_function("d2fprgt_GetType", (c_uint,))(property_number))
def property_constant_number(property_const_name: str) -> int:
return handled_api_function(
"d2fprgcv_GetConstValue", (c_char_p, c_void_p), return_value_index=1
)(property_const_name.encode(context.encoding), c_int()).value
def object_number(obj_name: str) -> int:
return int(
handled_api_function(
"d2fobgcv_GetConstValue", (c_char_p, c_void_p), return_value_index=1
)(obj_name.encode(context.encoding), c_int()).value
)
def set_subclass(
to_subclass: BaseObject, parent: BaseObject, keep_path: bool = False
) -> None:
handled_api_function("d2fobsc_SubClass", (c_void_p, c_void_p, c_bool))(
to_subclass, parent, keep_path
)
def remove_subclass(to_un_subclass: BaseObject) -> None:
handled_api_function("d2fobus_UnSubClass", (c_void_p,))(to_un_subclass)
|
[
"functools.partial",
"atexit.register",
"ctypes.c_char_p",
"ctypes.c_int",
"typing.cast",
"ctypes.pointer",
"ctypes.c_void_p",
"typing.TypeVar"
] |
[((523, 535), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (530, 535), False, 'from typing import Callable, Any, Optional, Tuple, TYPE_CHECKING, cast, Union, Type, TypeVar\n'), ((2493, 2519), 'functools.partial', 'partial', (['api_func', 'context'], {}), '(api_func, context)\n', (2500, 2519), False, 'from functools import partial\n'), ((4795, 4819), 'typing.cast', 'cast', (['Getter[T]', '_getter'], {}), '(Getter[T], _getter)\n', (4799, 4819), False, 'from typing import Callable, Any, Optional, Tuple, TYPE_CHECKING, cast, Union, Type, TypeVar\n'), ((967, 978), 'ctypes.c_void_p', 'c_void_p', (['(0)'], {}), '(0)\n', (975, 978), False, 'from ctypes import pointer, c_int, c_void_p, c_char_p, c_bool, c_uint, CDLL\n'), ((1506, 1516), 'ctypes.c_void_p', 'c_void_p', ([], {}), '()\n', (1514, 1516), False, 'from ctypes import pointer, c_int, c_void_p, c_char_p, c_bool, c_uint, CDLL\n'), ((2809, 2830), 'ctypes.pointer', 'pointer', (['return_value'], {}), '(return_value)\n', (2816, 2830), False, 'from ctypes import pointer, c_int, c_void_p, c_char_p, c_bool, c_uint, CDLL\n'), ((5244, 5254), 'ctypes.c_void_p', 'c_void_p', ([], {}), '()\n', (5252, 5254), False, 'from ctypes import pointer, c_int, c_void_p, c_char_p, c_bool, c_uint, CDLL\n'), ((5457, 5467), 'ctypes.c_void_p', 'c_void_p', ([], {}), '()\n', (5465, 5467), False, 'from ctypes import pointer, c_int, c_void_p, c_char_p, c_bool, c_uint, CDLL\n'), ((5665, 5675), 'ctypes.c_void_p', 'c_void_p', ([], {}), '()\n', (5673, 5675), False, 'from ctypes import pointer, c_int, c_void_p, c_char_p, c_bool, c_uint, CDLL\n'), ((6099, 6109), 'ctypes.c_void_p', 'c_void_p', ([], {}), '()\n', (6107, 6109), False, 'from ctypes import pointer, c_int, c_void_p, c_char_p, c_bool, c_uint, CDLL\n'), ((1324, 1361), 'atexit.register', 'atexit.register', (['self.destroy_context'], {}), '(self.destroy_context)\n', (1339, 1361), False, 'import atexit\n'), ((1629, 1641), 'ctypes.pointer', 'pointer', (['ctx'], {}), '(ctx)\n', (1636, 1641), False, 'from ctypes import pointer, c_int, c_void_p, c_char_p, c_bool, c_uint, CDLL\n'), ((1952, 1963), 'ctypes.c_void_p', 'c_void_p', (['(0)'], {}), '(0)\n', (1960, 1963), False, 'from ctypes import pointer, c_int, c_void_p, c_char_p, c_bool, c_uint, CDLL\n'), ((6986, 6996), 'ctypes.c_char_p', 'c_char_p', ([], {}), '()\n', (6994, 6996), False, 'from ctypes import pointer, c_int, c_void_p, c_char_p, c_bool, c_uint, CDLL\n'), ((7651, 7658), 'ctypes.c_int', 'c_int', ([], {}), '()\n', (7656, 7658), False, 'from ctypes import pointer, c_int, c_void_p, c_char_p, c_bool, c_uint, CDLL\n'), ((1651, 1658), 'ctypes.c_int', 'c_int', ([], {}), '()\n', (1656, 1658), False, 'from ctypes import pointer, c_int, c_void_p, c_char_p, c_bool, c_uint, CDLL\n'), ((6667, 6674), 'ctypes.c_int', 'c_int', ([], {}), '()\n', (6672, 6674), False, 'from ctypes import pointer, c_int, c_void_p, c_char_p, c_bool, c_uint, CDLL\n'), ((7881, 7888), 'ctypes.c_int', 'c_int', ([], {}), '()\n', (7886, 7888), False, 'from ctypes import pointer, c_int, c_void_p, c_char_p, c_bool, c_uint, CDLL\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 11 21:57:41 2020
@author: inderpreet
calculate statistics for MWHS point estimates, the results are given in latex format
results from scattering index and beuhler et al
"""
import netCDF4
import os
import matplotlib.pyplot as plt
import numpy as np
import ICI.stats as S
from read_qrnn import read_qrnn
plt.rcParams.update({'font.size': 26})
from tabulate import tabulate
from mwhs import mwhsData
from scipy.stats import skew
#%%
def get_SI_land(y_ob, y_fg, i89, i150):
"""
compute scattering index over land
"""
SI_ob = y_ob[i89, :] - y_ob[i150, :]
SI_fg = y_fg[i89, :] - y_fg[i150, :]
return (SI_ob + SI_fg)/2
def get_SI_ocean(y_ob, y_fg, y_cl, i89, i150):
"""
compute scattering index over ocean
"""
SI_ob = y_ob[i89, :] - y_ob[i150, :] -(y_cl[i89, :] - y_cl[i150, :])
SI_fg = y_fg[i89, :] - y_fg[i150, :] - (y_cl[i89, :] - y_cl[i150, :])
return (SI_ob + SI_fg)/2
def bias(y , y0):
return np.mean(y-y0)
def std(y , y0):
return np.std(y-y0)
def mae(y, y0):
return np.mean(np.abs(y-y0))
def filter_buehler_19(TB18, TB19):
"""
Filtering with buehler et al criteria
Parameters
----------
data : MWI dataset containing testing data
Returns
-------
im : logical array for the filtered data
"""
# x = data.add_noise(data.x, data.index)
im1 = TB18 < 240.0
dtb = TB19 - TB18
im2 = dtb < 0
im = np.logical_or(im1, im2)
print (np.sum(im1), np.sum(im2))
return im
if __name__ == "__main__":
#%% input parameters
depth = 3
width = 128
quantiles = np.array([0.002, 0.03, 0.16, 0.5, 0.84, 0.97, 0.998])
batchSize = 128
targets = [11, 12, 13, 14, 15]
targets = [15]
test_file = os.path.expanduser("~/Dendrite/Projects/AWS-325GHz/MWHS/data/TB_MWHS_test.nc")
iq = np.argwhere(quantiles == 0.5)[0,0]
qrnn_dir = "C89+150"
#qrnn_dir = "C150"
#qrnn_dir = "C150+118"
d = {"C89+150" : [1, 10],
"C89+150+118" : [1, 10, 6, 7 ],
"C150" : [10],
"C89+150+183" : [1, 10, 11, 12, 13, 14, 15]
}
Channels = [[1, 10], [1, 6, 7, 10]]
qrnn_dirs = ["C89+150"]
#qrnn_dirs = ["C89+150", "C89+150+118", "C150" ]
path = os.path.expanduser('~/Dendrite/Projects/AWS-325GHz/MWHS/data/')
allChannels = np.arange(1, 16, 1)
#%%
if __name__ == "__main__":
#%%
TB_ob = np.load(os.path.join(path, 'TB_obs.npy'))
TB_fg = np.load(os.path.join(path, 'TB_fg.npy'))
TB_cl = np.load(os.path.join(path, 'TB_cl.npy'))
i89, = np.argwhere(allChannels == 1)[0]
i150, = np.argwhere(allChannels == 10)[0]
for qrnn_dir, target in zip(qrnn_dirs, targets) :
qrnn_path = os.path.expanduser("~/Dendrite/Projects/AWS-325GHz/MWHS/qrnn_output/all_with_flag/%s/"%(qrnn_dir))
channels = np.array(d[qrnn_dir])
if target not in channels:
inChannels = np.concatenate([[target], channels])
else:
inChannels = channels
print(qrnn_dir, channels, inChannels)
qrnn_file = os.path.join(qrnn_path, "qrnn_mwhs_%s.nc"%(target))
i183, = np.argwhere(inChannels == target)[0]
y_pre, y_prior, y0, y, y_pos_mean = read_qrnn(qrnn_file, test_file, inChannels, target)
im1 = (np.abs(y_pre[:, 3] - y_prior[:, i183] )< 5)
data = mwhsData(test_file,
inChannels, target, ocean = False, test_data = True)
#%% SI approach
SI_land = get_SI_land(TB_ob, TB_fg, i89, i150)
SI_ocean = get_SI_ocean(TB_ob, TB_fg, TB_cl, i89, i150)
SI_land = SI_land[data.im]
SI_ocean = SI_ocean[data.im]
iocean = np.squeeze(data.lsm[:] == 0)
iland = ~iocean
SI_land[iocean] = SI_ocean[iocean]
SI = SI_land.copy()
im = np.abs(SI) <= 5
y_fil = y_prior[im, i183]
y0_fil = y0[im]
#%% Buehler et al approach
test_file_noise = os.path.join(path, "TB_MWHS_test_noisy_allsky.nc")
file = netCDF4.Dataset(test_file_noise, mode = "r")
TB_var = file.variables["TB"]
TB_noise = TB_var[:]
i18, = np.where(allChannels == 11)[0]
i19, = np.where(allChannels == 13)[0]
TB18 = TB_noise[1, i18, data.im].data
TB19 = TB_noise[1, i19, data.im].data
im18 = np.isfinite(TB18)
im19 = np.isfinite(TB19)
im18 = np.logical_and(TB18, TB19)
im_183 = filter_buehler_19(TB18, TB19)
# im_183 = im_183[data.im]
#%%
print ("-----------------channel %s-------------------------"%str(target))
# print ("bias uncorr", bias(y_prior[:, i183], y0))
print ("bias SI", bias(y_fil, y0[im]))
print ("bias B183", bias(y_prior[~im_183, i183], y0[~im_183]))
# print ("bias QRNN", bias(y_prior[im1, i183], y0[im1]))
# print ("bias QRNN_corr", bias(y_pre[im1, 3], y0[im1]))
# print ("std uncorr", std(y_prior[:, i183], y0))
print ("std SI", std(y_fil, y0[im]))
print ("std B183", std(y_prior[~im_183, i183], y0[~im_183]))
# print ("std QRNN", std(y_prior[im1, i183], y0[im1]))
# print ("std QRNN_corr", std(y_pre[im1, 3], y0[im1]))
# print ("mae uncorr", mae(y_prior[:, i183], y0))
print ("mae SI", mae(y_fil, y0[im]))
print ("mae B183", mae(y_prior[~im_183, i183], y0[~im_183]))
# print ("mae QRNN", mae(y_prior[im1, i183], y0[im1]))
# print ("mae QRNN_corr", mae(y_pre[im1, 3], y0[im1]))
print ("skew SI", skew(y_fil-y0[im]))
print ("skew B183", skew(y_prior[~im_183, i183]- y0[~im_183]))
print ("skew all", skew(y_prior[:, i183]- y0[:]))
print ("% rejected SI", np.sum(~im)/im.shape)
print ("% rejected B183", np.sum(im_183)/im.shape)
#%%
bins = np.arange(-30, 20, 0.5)
hist = np.histogram(y_fil - y0_fil, bins)
fig, ax = plt.subplots(1, 1)
# ax.plot(bins[:-1], hist[0], 'k')
ax.set_yscale('log')
hist = np.histogram(y_prior[:, i183]- y0 , bins)
ax.plot(bins[:-1], hist[0], 'b')
y_pre_fil = y_prior[~im, i183]
hist = np.histogram(y_pre_fil - y0[~im] , bins)
ax.plot(bins[:-1], hist[0], 'r')
hist = np.histogram(y_pre[im1, 3]- y0[im1], bins)
ax.plot(bins[:-1], hist[0], 'g')
TB_15 = TB_ob[14, data.im]
hist = np.histogram(y_prior[im_183, i183] - y0[im_183], bins)
ax.plot(bins[:-1], hist[0], 'y')
|
[
"numpy.abs",
"numpy.sum",
"numpy.histogram",
"numpy.mean",
"numpy.arange",
"os.path.join",
"read_qrnn.read_qrnn",
"netCDF4.Dataset",
"numpy.std",
"numpy.isfinite",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.subplots",
"mwhs.mwhsData",
"numpy.argwhere",
"numpy.squeeze",
"numpy.concatenate",
"numpy.logical_and",
"scipy.stats.skew",
"numpy.where",
"numpy.array",
"numpy.logical_or",
"os.path.expanduser"
] |
[((377, 415), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 26}"], {}), "({'font.size': 26})\n", (396, 415), True, 'import matplotlib.pyplot as plt\n'), ((1027, 1042), 'numpy.mean', 'np.mean', (['(y - y0)'], {}), '(y - y0)\n', (1034, 1042), True, 'import numpy as np\n'), ((1070, 1084), 'numpy.std', 'np.std', (['(y - y0)'], {}), '(y - y0)\n', (1076, 1084), True, 'import numpy as np\n'), ((1502, 1525), 'numpy.logical_or', 'np.logical_or', (['im1', 'im2'], {}), '(im1, im2)\n', (1515, 1525), True, 'import numpy as np\n'), ((1680, 1733), 'numpy.array', 'np.array', (['[0.002, 0.03, 0.16, 0.5, 0.84, 0.97, 0.998]'], {}), '([0.002, 0.03, 0.16, 0.5, 0.84, 0.97, 0.998])\n', (1688, 1733), True, 'import numpy as np\n'), ((1829, 1907), 'os.path.expanduser', 'os.path.expanduser', (['"""~/Dendrite/Projects/AWS-325GHz/MWHS/data/TB_MWHS_test.nc"""'], {}), "('~/Dendrite/Projects/AWS-325GHz/MWHS/data/TB_MWHS_test.nc')\n", (1847, 1907), False, 'import os\n'), ((2372, 2435), 'os.path.expanduser', 'os.path.expanduser', (['"""~/Dendrite/Projects/AWS-325GHz/MWHS/data/"""'], {}), "('~/Dendrite/Projects/AWS-325GHz/MWHS/data/')\n", (2390, 2435), False, 'import os\n'), ((2459, 2478), 'numpy.arange', 'np.arange', (['(1)', '(16)', '(1)'], {}), '(1, 16, 1)\n', (2468, 2478), True, 'import numpy as np\n'), ((1119, 1133), 'numpy.abs', 'np.abs', (['(y - y0)'], {}), '(y - y0)\n', (1125, 1133), True, 'import numpy as np\n'), ((1537, 1548), 'numpy.sum', 'np.sum', (['im1'], {}), '(im1)\n', (1543, 1548), True, 'import numpy as np\n'), ((1550, 1561), 'numpy.sum', 'np.sum', (['im2'], {}), '(im2)\n', (1556, 1561), True, 'import numpy as np\n'), ((1922, 1951), 'numpy.argwhere', 'np.argwhere', (['(quantiles == 0.5)'], {}), '(quantiles == 0.5)\n', (1933, 1951), True, 'import numpy as np\n'), ((2560, 2592), 'os.path.join', 'os.path.join', (['path', '"""TB_obs.npy"""'], {}), "(path, 'TB_obs.npy')\n", (2572, 2592), False, 'import os\n'), ((2618, 2649), 'os.path.join', 'os.path.join', (['path', '"""TB_fg.npy"""'], {}), "(path, 'TB_fg.npy')\n", (2630, 2649), False, 'import os\n'), ((2675, 2706), 'os.path.join', 'os.path.join', (['path', '"""TB_cl.npy"""'], {}), "(path, 'TB_cl.npy')\n", (2687, 2706), False, 'import os\n'), ((2723, 2752), 'numpy.argwhere', 'np.argwhere', (['(allChannels == 1)'], {}), '(allChannels == 1)\n', (2734, 2752), True, 'import numpy as np\n'), ((2772, 2802), 'numpy.argwhere', 'np.argwhere', (['(allChannels == 10)'], {}), '(allChannels == 10)\n', (2783, 2802), True, 'import numpy as np\n'), ((2947, 3054), 'os.path.expanduser', 'os.path.expanduser', (["('~/Dendrite/Projects/AWS-325GHz/MWHS/qrnn_output/all_with_flag/%s/' % qrnn_dir\n )"], {}), "(\n '~/Dendrite/Projects/AWS-325GHz/MWHS/qrnn_output/all_with_flag/%s/' %\n qrnn_dir)\n", (2965, 3054), False, 'import os\n'), ((3095, 3116), 'numpy.array', 'np.array', (['d[qrnn_dir]'], {}), '(d[qrnn_dir])\n', (3103, 3116), True, 'import numpy as np\n'), ((3408, 3459), 'os.path.join', 'os.path.join', (['qrnn_path', "('qrnn_mwhs_%s.nc' % target)"], {}), "(qrnn_path, 'qrnn_mwhs_%s.nc' % target)\n", (3420, 3459), False, 'import os\n'), ((3591, 3642), 'read_qrnn.read_qrnn', 'read_qrnn', (['qrnn_file', 'test_file', 'inChannels', 'target'], {}), '(qrnn_file, test_file, inChannels, target)\n', (3600, 3642), False, 'from read_qrnn import read_qrnn\n'), ((3739, 3807), 'mwhs.mwhsData', 'mwhsData', (['test_file', 'inChannels', 'target'], {'ocean': '(False)', 'test_data': '(True)'}), '(test_file, inChannels, target, ocean=False, test_data=True)\n', (3747, 3807), False, 'from mwhs import mwhsData\n'), ((4123, 4151), 'numpy.squeeze', 'np.squeeze', (['(data.lsm[:] == 0)'], {}), '(data.lsm[:] == 0)\n', (4133, 4151), True, 'import numpy as np\n'), ((4512, 4562), 'os.path.join', 'os.path.join', (['path', '"""TB_MWHS_test_noisy_allsky.nc"""'], {}), "(path, 'TB_MWHS_test_noisy_allsky.nc')\n", (4524, 4562), False, 'import os\n'), ((4591, 4633), 'netCDF4.Dataset', 'netCDF4.Dataset', (['test_file_noise'], {'mode': '"""r"""'}), "(test_file_noise, mode='r')\n", (4606, 4633), False, 'import netCDF4\n'), ((4943, 4960), 'numpy.isfinite', 'np.isfinite', (['TB18'], {}), '(TB18)\n', (4954, 4960), True, 'import numpy as np\n'), ((4980, 4997), 'numpy.isfinite', 'np.isfinite', (['TB19'], {}), '(TB19)\n', (4991, 4997), True, 'import numpy as np\n'), ((5017, 5043), 'numpy.logical_and', 'np.logical_and', (['TB18', 'TB19'], {}), '(TB18, TB19)\n', (5031, 5043), True, 'import numpy as np\n'), ((6737, 6760), 'numpy.arange', 'np.arange', (['(-30)', '(20)', '(0.5)'], {}), '(-30, 20, 0.5)\n', (6746, 6760), True, 'import numpy as np\n'), ((6780, 6814), 'numpy.histogram', 'np.histogram', (['(y_fil - y0_fil)', 'bins'], {}), '(y_fil - y0_fil, bins)\n', (6792, 6814), True, 'import numpy as np\n'), ((6837, 6855), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (6849, 6855), True, 'import matplotlib.pyplot as plt\n'), ((6967, 7008), 'numpy.histogram', 'np.histogram', (['(y_prior[:, i183] - y0)', 'bins'], {}), '(y_prior[:, i183] - y0, bins)\n', (6979, 7008), True, 'import numpy as np\n'), ((7132, 7171), 'numpy.histogram', 'np.histogram', (['(y_pre_fil - y0[~im])', 'bins'], {}), '(y_pre_fil - y0[~im], bins)\n', (7144, 7171), True, 'import numpy as np\n'), ((7263, 7306), 'numpy.histogram', 'np.histogram', (['(y_pre[im1, 3] - y0[im1])', 'bins'], {}), '(y_pre[im1, 3] - y0[im1], bins)\n', (7275, 7306), True, 'import numpy as np\n'), ((7435, 7489), 'numpy.histogram', 'np.histogram', (['(y_prior[im_183, i183] - y0[im_183])', 'bins'], {}), '(y_prior[im_183, i183] - y0[im_183], bins)\n', (7447, 7489), True, 'import numpy as np\n'), ((3211, 3247), 'numpy.concatenate', 'np.concatenate', (['[[target], channels]'], {}), '([[target], channels])\n', (3225, 3247), True, 'import numpy as np\n'), ((3493, 3526), 'numpy.argwhere', 'np.argwhere', (['(inChannels == target)'], {}), '(inChannels == target)\n', (3504, 3526), True, 'import numpy as np\n'), ((3662, 3700), 'numpy.abs', 'np.abs', (['(y_pre[:, 3] - y_prior[:, i183])'], {}), '(y_pre[:, 3] - y_prior[:, i183])\n', (3668, 3700), True, 'import numpy as np\n'), ((4302, 4312), 'numpy.abs', 'np.abs', (['SI'], {}), '(SI)\n', (4308, 4312), True, 'import numpy as np\n'), ((4730, 4757), 'numpy.where', 'np.where', (['(allChannels == 11)'], {}), '(allChannels == 11)\n', (4738, 4757), True, 'import numpy as np\n'), ((4780, 4807), 'numpy.where', 'np.where', (['(allChannels == 13)'], {}), '(allChannels == 13)\n', (4788, 4807), True, 'import numpy as np\n'), ((6398, 6418), 'scipy.stats.skew', 'skew', (['(y_fil - y0[im])'], {}), '(y_fil - y0[im])\n', (6402, 6418), False, 'from scipy.stats import skew\n'), ((6450, 6492), 'scipy.stats.skew', 'skew', (['(y_prior[~im_183, i183] - y0[~im_183])'], {}), '(y_prior[~im_183, i183] - y0[~im_183])\n', (6454, 6492), False, 'from scipy.stats import skew\n'), ((6524, 6554), 'scipy.stats.skew', 'skew', (['(y_prior[:, i183] - y0[:])'], {}), '(y_prior[:, i183] - y0[:])\n', (6528, 6554), False, 'from scipy.stats import skew\n'), ((6604, 6615), 'numpy.sum', 'np.sum', (['(~im)'], {}), '(~im)\n', (6610, 6615), True, 'import numpy as np\n'), ((6664, 6678), 'numpy.sum', 'np.sum', (['im_183'], {}), '(im_183)\n', (6670, 6678), True, 'import numpy as np\n')]
|
from __future__ import print_function
from django.shortcuts import render
from .models import CreateRecord
from tqdm import tqdm
import os
import codecs
import traceback
def IndexView(request):
output('s3log/')
return render(request, 'analyzer/index.html')
def input(filepath):
try:
with codecs.open(filepath, mode='r', encoding='us-ascii') as file:
return(file.read())
except Exception:
traceback.print_exc()
print(filepath)
return ""
def parse(log, callback, constraint=None):
if constraint and not (constraint in log):
return
log_split, log_elem, paren_flag, quote_flag = [], "", False, False
for one_char in list(log):
if one_char == ' ':
if not paren_flag and not quote_flag:
log_split.append(log_elem)
log_elem = ""
else:
log_elem += one_char
elif one_char == '[':
paren_flag = True
elif one_char == ']':
paren_flag = False
elif one_char == '"':
quote_flag = not quote_flag
else:
log_elem += one_char
if log_elem != "":
log_split.append(log_elem)
try:
callback(log_split)
except Exception as e:
print(e)
def output(dirpath):
for filename in tqdm(os.listdir(dirpath)):
logs = input(dirpath + filename)
for log in logs.splitlines():
parse(log, CreateRecord)
|
[
"django.shortcuts.render",
"codecs.open",
"os.listdir",
"traceback.print_exc"
] |
[((228, 266), 'django.shortcuts.render', 'render', (['request', '"""analyzer/index.html"""'], {}), "(request, 'analyzer/index.html')\n", (234, 266), False, 'from django.shortcuts import render\n'), ((1340, 1359), 'os.listdir', 'os.listdir', (['dirpath'], {}), '(dirpath)\n', (1350, 1359), False, 'import os\n'), ((312, 364), 'codecs.open', 'codecs.open', (['filepath'], {'mode': '"""r"""', 'encoding': '"""us-ascii"""'}), "(filepath, mode='r', encoding='us-ascii')\n", (323, 364), False, 'import codecs\n'), ((436, 457), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (455, 457), False, 'import traceback\n')]
|
from parameterized import parameterized
from nose.tools import assert_equal, assert_true
import os
import sys
dir_path = os.path.dirname(os.path.abspath(__file__))
module_path = os.path.join(dir_path, "../src")
if module_path not in sys.path:
sys.path.append(module_path)
os.chdir(dir_path)
class TestSPTokenizer:
def test_init(self):
from distance.levenshtein import Levenshtein
Levenshtein()
def test_measure(self):
from distance.levenshtein import Levenshtein
l = Levenshtein()
assert_equal(0, l.measure("あいう", "あいう"))
assert_equal(1, l.measure("あいう", "あい"))
assert_equal(1, l.measure("いう", "あいう"))
assert_equal(1, l.measure("あいう", "あえう"))
assert_equal(2, l.measure("あいう", "あ"))
assert_equal(2, l.measure("あいう", "あいうえお"))
assert_equal(2, l.measure("あいう", "あえいうお"))
assert_equal(2, l.measure("あ", "愛"))
assert_equal(2, l.measure("愛", "「"))
assert_equal(1, l.measure("あ", "「"))
|
[
"sys.path.append",
"os.path.abspath",
"distance.levenshtein.Levenshtein",
"os.path.join",
"os.chdir"
] |
[((179, 211), 'os.path.join', 'os.path.join', (['dir_path', '"""../src"""'], {}), "(dir_path, '../src')\n", (191, 211), False, 'import os\n'), ((278, 296), 'os.chdir', 'os.chdir', (['dir_path'], {}), '(dir_path)\n', (286, 296), False, 'import os\n'), ((138, 163), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (153, 163), False, 'import os\n'), ((248, 276), 'sys.path.append', 'sys.path.append', (['module_path'], {}), '(module_path)\n', (263, 276), False, 'import sys\n'), ((408, 421), 'distance.levenshtein.Levenshtein', 'Levenshtein', ([], {}), '()\n', (419, 421), False, 'from distance.levenshtein import Levenshtein\n'), ((516, 529), 'distance.levenshtein.Levenshtein', 'Levenshtein', ([], {}), '()\n', (527, 529), False, 'from distance.levenshtein import Levenshtein\n')]
|
import enum
import logging
from pathlib import Path
from pathlib import PurePath
from typing import List
from goodsplit.interface import Event
from goodsplit.reactor import Reactor
from goodsplit.sources.inotify import INotifyEventSource
from goodsplit.sources.inotify import OpenFileEvent
from goodsplit.sources.inotify import CloseFileEvent
from goodsplit.time_base import MonotonicFloatSeconds
LOG = logging.getLogger("system_shock_2")
class RunState(enum.Enum):
"""
The current state of the run.
STOPPED = Not started
STOPPED_AWAITING_EARTH_MIS = Waiting for earth.mis to load
RUNNING = Running the game
FINISHED = Finished the game
"""
STOPPED = enum.auto()
STOPPED_AWAITING_EARTH_MIS = enum.auto()
RUNNING = enum.auto()
FINISHED = enum.auto()
class SystemShock2Reactor(Reactor):
"""A reactor for System Shock 2 runs."""
__slots__ = (
"_path_cs1_avi",
"_path_cs3_avi",
"_path_earth_mis",
"_path_ss2_exe",
"_root_dir",
"_run_state",
"_missions_entered",
)
def __init__(self, *, root_dir: Path) -> None:
# Set up paths
self._root_dir = root_dir.resolve()
self._path_cs1_avi = self._root_dir / "Data" / "cutscenes" / "cs1.avi"
self._path_cs3_avi = self._root_dir / "Data" / "cutscenes" / "Cs3.avi"
self._path_earth_mis = self._root_dir / "Data" / "earth.mis"
self._path_ss2_exe = self._root_dir / "ss2.exe"
self._run_state = RunState.STOPPED
super().__init__(
event_sources=[
INotifyEventSource(fpaths=[
# Start, stop, split
self._root_dir / "Data",
self._root_dir / "Data" / "cutscenes",
# Crash monitoring
self._root_dir / "ss2.exe",
]),
],
time_bases = [
MonotonicFloatSeconds(),
],
)
@classmethod
def get_game_title(cls) -> str:
return "System Shock 2"
@classmethod
def get_game_key(cls) -> str:
return "system_shock_2"
def on_event(self, ts: List[float], ev: Event) -> None:
time_str = self.convert_times_to_str(ts)
if isinstance(ev, OpenFileEvent):
if ev.fpath == self._path_cs1_avi:
# Open cs1.avi: Opening cutscene. We're about to start a run.
self.cancel_run()
self._run_state = RunState.STOPPED_AWAITING_EARTH_MIS
LOG.info(f"{time_str} Watching cs1.avi, previous run has been cancelled")
elif ev.fpath == self._path_cs3_avi:
# Open cs3.avi: Ending cutscene. Run is (probably) finished.
self.finish_run()
self._run_state = RunState.FINISHED
LOG.info(f"{time_str} Watching cs3.avi, run is over!")
elif ev.fpath.name.lower().endswith(".avi"):
# Some cutscene.
#LOG.info(f"{time_str} TODO: Cutscene open {ev}")
self.do_fuse_split(ts, f"cutscene:{ev.fpath.name.lower()}")
elif ev.fpath == self._path_earth_mis:
# earth.mis is special.
LOG.info(f"{time_str} Loading earth.mis... run starts when it gets closed")
elif ev.fpath.name.lower().endswith(".mis"):
# Some mission.
#LOG.info(f"{time_str} Splitting on {ev.fpath.name.lower()}")
self.do_fuse_split(ts, f"mission:{ev.fpath.name.lower()}")
self.start_loading(ts)
elif ev.fpath.name.lower() in ["shock2.gam", "allobjs.osm", "motiondb.bin"]:
# Some files we don't care about.
pass
else:
LOG.info(f"{time_str} TODO: {ev}")
elif isinstance(ev, CloseFileEvent):
if ev.fpath == self._path_earth_mis:
# Close earth.mis: If we're waiting for this, then start the run!
if self._run_state == RunState.STOPPED_AWAITING_EARTH_MIS:
self._run_state = RunState.RUNNING
self.start_run()
elif ev.fpath.name.lower().endswith(".mis"):
# Some mission.
#LOG.info(f"{time_str} TODO: Mission close {ev} (for load removal)")
self.stop_loading(ts)
elif ev.fpath.name.lower().endswith(".avi"):
# Some cutscene.
# We really don't care when these close.
pass
elif ev.fpath.name.lower() in ["shock2.gam", "allobjs.osm", "motiondb.bin"]:
# Some files we don't care about.
pass
else:
LOG.info(f"{time_str} TODO: {ev}")
|
[
"enum.auto",
"goodsplit.time_base.MonotonicFloatSeconds",
"logging.getLogger",
"goodsplit.sources.inotify.INotifyEventSource"
] |
[((405, 440), 'logging.getLogger', 'logging.getLogger', (['"""system_shock_2"""'], {}), "('system_shock_2')\n", (422, 440), False, 'import logging\n'), ((688, 699), 'enum.auto', 'enum.auto', ([], {}), '()\n', (697, 699), False, 'import enum\n'), ((733, 744), 'enum.auto', 'enum.auto', ([], {}), '()\n', (742, 744), False, 'import enum\n'), ((759, 770), 'enum.auto', 'enum.auto', ([], {}), '()\n', (768, 770), False, 'import enum\n'), ((786, 797), 'enum.auto', 'enum.auto', ([], {}), '()\n', (795, 797), False, 'import enum\n'), ((1596, 1719), 'goodsplit.sources.inotify.INotifyEventSource', 'INotifyEventSource', ([], {'fpaths': "[self._root_dir / 'Data', self._root_dir / 'Data' / 'cutscenes', self.\n _root_dir / 'ss2.exe']"}), "(fpaths=[self._root_dir / 'Data', self._root_dir / 'Data' /\n 'cutscenes', self._root_dir / 'ss2.exe'])\n", (1614, 1719), False, 'from goodsplit.sources.inotify import INotifyEventSource\n'), ((1935, 1958), 'goodsplit.time_base.MonotonicFloatSeconds', 'MonotonicFloatSeconds', ([], {}), '()\n', (1956, 1958), False, 'from goodsplit.time_base import MonotonicFloatSeconds\n')]
|
# -*- coding: utf-8 -*-
import copy
from datetime import datetime
from fiqs.i18n import _
class Field(object):
def __init__(self, type, key=None, verbose_name=None, storage_field=None,
unit=None, choices=None, data=None, parent=None, model=None):
verbose_name = verbose_name or key
storage_field = storage_field or key
choices = choices or ()
data = data or {}
self.key = key
self.type = type
self.verbose_name = verbose_name
self.storage_field = storage_field
self.unit = unit
self.choices = choices
self.data = data
self.parent = parent
if model:
self.model = model
def get_copy(self):
return self.__class__(
key=self.key, verbose_name=self.verbose_name,
storage_field=self.storage_field, unit=self.unit,
choices=self.choices, data=self.data, parent=self.parent,
)
def __repr__(self):
if hasattr(self, 'model'):
return '<{}: {}.{}>'.format(
self.__class__.__name__,
self.model.__name__,
self.key,
)
return '<{}: {}>'.format(
self.__class__.__name__,
self.key,
)
def _set_key(self, key):
self.key = key
if not self.verbose_name:
self.verbose_name = key
if not self.storage_field:
self.storage_field = key
def get_storage_field(self):
parent_field = self.get_parent_field()
if not parent_field:
return self.storage_field
return '{}.{}'.format(
parent_field.get_storage_field(),
self.storage_field,
)
def bucket_params(self):
d = {
'name': self.key,
'field': self.get_storage_field(),
'agg_type': 'terms',
}
if 'script' in self.data:
# should we remove field?
d['script'] = self.data['script'].format('_value')
if 'size' in self.data:
size = self.data['size']
if size == 0:
size = 2 ** 31 - 1
d['size'] = size
return d
def is_range(self):
return 'ranges' in self.data
def _has_pretty_choices(self):
return self.choices and isinstance(self.choices[0], tuple)
def _get_ranges_as_dict(self):
if 'ranges' not in self.data:
return None
if isinstance(self.data['ranges'][0], dict):
return self.data['ranges']
if isinstance(self.data['ranges'][0], tuple) or\
isinstance(self.data['ranges'][0], list):
ranges = []
for start, end in self.data['ranges']:
ranges.append({
'from': start,
'to': end,
'key': '{} - {}'.format(start, end),
})
return ranges
raise NotImplementedError()
def range_params(self):
params = self.bucket_params()
params['agg_type'] = 'range'
params['keyed'] = True
if 'ranges' in self.data:
params['ranges'] = self._get_ranges_as_dict()
else:
raise NotImplementedError()
return params
def choice_keys(self):
if self._has_pretty_choices():
return [choice[0] for choice in self.choices]
if self.choices:
return self.choices
if self.is_range():
return [r['key'] for r in self._get_ranges_as_dict()]
raise NotImplementedError()
def min_key(self):
if self.data and 'min' in self.data:
return self.data['min']
if self.choices:
return min(self.choice_keys())
def max_key(self):
if self.data and 'max' in self.data:
return self.data['max']
if self.choices:
return max(self.choice_keys())
def get_parent_field(self):
if not self.parent:
return None
return getattr(self.model, self.parent)
def get_casted_value(self, v):
return v
class TextField(Field):
def __init__(self, **kwargs):
super(TextField, self).__init__('text', **kwargs)
class KeywordField(Field):
def __init__(self, **kwargs):
super(KeywordField, self).__init__('keyword', **kwargs)
class DateField(Field):
def __init__(self, **kwargs):
super(DateField, self).__init__('date', **kwargs)
def get_casted_value(self, v):
# Careful, we lose the milliseconds here
return datetime.utcfromtimestamp(v / 1000)
class BaseIntegerField(Field):
def __init__(self, **kwargs):
super(BaseIntegerField, self).__init__(self.type, **kwargs)
def get_casted_value(self, v):
return int(v) if v is not None else v
class LongField(BaseIntegerField):
type = 'long'
class IntegerField(BaseIntegerField):
type = 'integer'
class ShortField(BaseIntegerField):
type = 'short'
class ByteField(BaseIntegerField):
type = 'byte'
class BaseFloatField(Field):
def __init__(self, **kwargs):
super(BaseFloatField, self).__init__(self.type, **kwargs)
def get_casted_value(self, v):
return float(v) if v is not None else v
class DoubleField(BaseFloatField):
type = 'double'
class FloatField(BaseFloatField):
type = 'float'
def get_weekdays():
return [
(0, _('Monday')),
(1, _('Tuesday')),
(2, _('Wednesday')),
(3, _('Thursday')),
(4, _('Friday')),
(5, _('Saturday')),
(6, _('Sunday')),
]
def get_iso_weekdays():
return [
(1, _('Monday')),
(2, _('Tuesday')),
(3, _('Wednesday')),
(4, _('Thursday')),
(5, _('Friday')),
(6, _('Saturday')),
(7, _('Sunday')),
]
class DayOfWeekField(ByteField):
def __init__(self, iso=True, **kwargs):
if iso:
choices = get_iso_weekdays()
data = {'min': 1, 'max': 7}
else:
choices = get_weekdays()
data = {'min': 0, 'max': 6}
kwargs['choices'] = choices
kwargs['data'] = data
super(DayOfWeekField, self).__init__(**kwargs)
class HourOfDayField(ByteField):
def __init__(self, **kwargs):
kwargs['choices'] = [
(i, _('{hour}h').format(hour=i)) for i in range(24)]
kwargs['data'] = {'min': 0, 'max': 23}
super(HourOfDayField, self).__init__(**kwargs)
class BooleanField(Field):
def __init__(self, **kwargs):
super(BooleanField, self).__init__('boolean', **kwargs)
class NestedField(Field):
def __init__(self, **kwargs):
super(NestedField, self).__init__('nested', **kwargs)
def nested_params(self):
params = {
'name': self.key,
'agg_type': 'nested',
'path': self.get_storage_field(),
}
return params
class FieldWithChoices(Field):
def __init__(self, field, choices=None):
choices = choices or ()
data = copy.deepcopy(field.data)
return super(FieldWithChoices, self).__init__(
field.type,
key=field.key,
verbose_name=field.verbose_name,
storage_field=field.storage_field,
unit=field.unit,
choices=choices,
data=data,
parent=field.parent,
model=field.model,
)
class FieldWithRanges(Field):
def __init__(self, field, ranges=None):
data = copy.deepcopy(field.data)
if ranges is not None:
data['ranges'] = ranges
return super(FieldWithRanges, self).__init__(
field.type,
key=field.key,
verbose_name=field.verbose_name,
storage_field=field.storage_field,
unit=field.unit,
choices=field.choices,
data=data,
parent=field.parent,
model=field.model,
)
class DataExtendedField(Field):
def __init__(self, field, **kwargs):
data = copy.deepcopy(field.data)
data.update(**kwargs)
return super(DataExtendedField, self).__init__(
field.type,
key=field.key,
verbose_name=field.verbose_name,
storage_field=field.storage_field,
unit=field.unit,
choices=field.choices,
data=data,
parent=field.parent,
model=field.model,
)
class GroupedField(Field):
def __init__(self, field, groups):
self.groups = groups
return super(GroupedField, self).__init__(
field.type,
key=field.key,
verbose_name=field.verbose_name,
storage_field=field.storage_field,
unit=field.unit,
choices=list(self.groups),
data=field.data,
parent=field.parent,
model=field.model,
)
def bucket_params(self):
filters = {
key: {
'terms': {
self.get_storage_field(): field_values,
}
}
for key, field_values in self.groups.items()
}
return {
'name': self.key,
'filters': filters,
'agg_type': 'filters',
}
|
[
"datetime.datetime.utcfromtimestamp",
"copy.deepcopy",
"fiqs.i18n._"
] |
[((4641, 4676), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['(v / 1000)'], {}), '(v / 1000)\n', (4666, 4676), False, 'from datetime import datetime\n'), ((7139, 7164), 'copy.deepcopy', 'copy.deepcopy', (['field.data'], {}), '(field.data)\n', (7152, 7164), False, 'import copy\n'), ((7609, 7634), 'copy.deepcopy', 'copy.deepcopy', (['field.data'], {}), '(field.data)\n', (7622, 7634), False, 'import copy\n'), ((8152, 8177), 'copy.deepcopy', 'copy.deepcopy', (['field.data'], {}), '(field.data)\n', (8165, 8177), False, 'import copy\n'), ((5496, 5507), 'fiqs.i18n._', '_', (['"""Monday"""'], {}), "('Monday')\n", (5497, 5507), False, 'from fiqs.i18n import _\n'), ((5522, 5534), 'fiqs.i18n._', '_', (['"""Tuesday"""'], {}), "('Tuesday')\n", (5523, 5534), False, 'from fiqs.i18n import _\n'), ((5549, 5563), 'fiqs.i18n._', '_', (['"""Wednesday"""'], {}), "('Wednesday')\n", (5550, 5563), False, 'from fiqs.i18n import _\n'), ((5578, 5591), 'fiqs.i18n._', '_', (['"""Thursday"""'], {}), "('Thursday')\n", (5579, 5591), False, 'from fiqs.i18n import _\n'), ((5606, 5617), 'fiqs.i18n._', '_', (['"""Friday"""'], {}), "('Friday')\n", (5607, 5617), False, 'from fiqs.i18n import _\n'), ((5632, 5645), 'fiqs.i18n._', '_', (['"""Saturday"""'], {}), "('Saturday')\n", (5633, 5645), False, 'from fiqs.i18n import _\n'), ((5660, 5671), 'fiqs.i18n._', '_', (['"""Sunday"""'], {}), "('Sunday')\n", (5661, 5671), False, 'from fiqs.i18n import _\n'), ((5731, 5742), 'fiqs.i18n._', '_', (['"""Monday"""'], {}), "('Monday')\n", (5732, 5742), False, 'from fiqs.i18n import _\n'), ((5757, 5769), 'fiqs.i18n._', '_', (['"""Tuesday"""'], {}), "('Tuesday')\n", (5758, 5769), False, 'from fiqs.i18n import _\n'), ((5784, 5798), 'fiqs.i18n._', '_', (['"""Wednesday"""'], {}), "('Wednesday')\n", (5785, 5798), False, 'from fiqs.i18n import _\n'), ((5813, 5826), 'fiqs.i18n._', '_', (['"""Thursday"""'], {}), "('Thursday')\n", (5814, 5826), False, 'from fiqs.i18n import _\n'), ((5841, 5852), 'fiqs.i18n._', '_', (['"""Friday"""'], {}), "('Friday')\n", (5842, 5852), False, 'from fiqs.i18n import _\n'), ((5867, 5880), 'fiqs.i18n._', '_', (['"""Saturday"""'], {}), "('Saturday')\n", (5868, 5880), False, 'from fiqs.i18n import _\n'), ((5895, 5906), 'fiqs.i18n._', '_', (['"""Sunday"""'], {}), "('Sunday')\n", (5896, 5906), False, 'from fiqs.i18n import _\n'), ((6420, 6432), 'fiqs.i18n._', '_', (['"""{hour}h"""'], {}), "('{hour}h')\n", (6421, 6432), False, 'from fiqs.i18n import _\n')]
|
from geometry_msgs.msg import Pose, Point
from gazebo_msgs.srv import SpawnModel
from std_msgs.msg import Bool
from erdos.op import Op
from erdos.data_stream import DataStream
from erdos.timestamp import Timestamp
from erdos.message import Message
import rospkg
import rospy
class InsertTableOperator(Op):
"""
Inserts a Table into the currently running Gazebo environment with the
given coordinates.
"""
stream_name = "insert-table-stream"
def __init__(self, name, _x, _y, _z, ref_frame):
"""
Initialize the state of the operator.
_x: The x-coordinate of the table.
_y: The y-coordinate of the table.
_z: The z-coordinate of the table.
ref_frame: The frame in which to insert the table at the given coordinates.
"""
super(InsertTableOperator, self).__init__(name)
self.table_pose = Pose(position=Point(x=_x, y=_y, z=_z))
self.table_reference_frame = ref_frame
@staticmethod
def setup_streams(input_streams):
"""
Returns a single datastream which sends a boolean success value of the
insertion operation.
"""
return [
DataStream(
data_type=Bool,
name=InsertTableOperator.stream_name,
labels={"object": "table"})
]
def execute(self):
"""
Retrieves the model file of the table and inserts it into the current
Gazebo environment.
"""
model_path = rospkg.RosPack().get_path(
'sawyer_sim_examples') + "/models/"
with open(model_path + "cafe_table/model.sdf", "r") as table_file:
table_xml = table_file.read().replace('\n', '')
rospy.wait_for_service('/gazebo/spawn_sdf_model')
spawn_sdf = rospy.ServiceProxy('/gazebo/spawn_sdf_model', SpawnModel)
resp_sdf = spawn_sdf("cafe_table", table_xml, '/', self.table_pose,
self.table_reference_frame)
output_msg = Message(True, Timestamp(coordinates=[0]))
self.get_output_stream(InsertTableOperator.stream_name).\
send(output_msg)
self.spin()
|
[
"rospkg.RosPack",
"rospy.ServiceProxy",
"geometry_msgs.msg.Point",
"erdos.timestamp.Timestamp",
"rospy.wait_for_service",
"erdos.data_stream.DataStream"
] |
[((1733, 1782), 'rospy.wait_for_service', 'rospy.wait_for_service', (['"""/gazebo/spawn_sdf_model"""'], {}), "('/gazebo/spawn_sdf_model')\n", (1755, 1782), False, 'import rospy\n'), ((1803, 1860), 'rospy.ServiceProxy', 'rospy.ServiceProxy', (['"""/gazebo/spawn_sdf_model"""', 'SpawnModel'], {}), "('/gazebo/spawn_sdf_model', SpawnModel)\n", (1821, 1860), False, 'import rospy\n'), ((1187, 1284), 'erdos.data_stream.DataStream', 'DataStream', ([], {'data_type': 'Bool', 'name': 'InsertTableOperator.stream_name', 'labels': "{'object': 'table'}"}), "(data_type=Bool, name=InsertTableOperator.stream_name, labels={\n 'object': 'table'})\n", (1197, 1284), False, 'from erdos.data_stream import DataStream\n'), ((2029, 2055), 'erdos.timestamp.Timestamp', 'Timestamp', ([], {'coordinates': '[0]'}), '(coordinates=[0])\n', (2038, 2055), False, 'from erdos.timestamp import Timestamp\n'), ((897, 920), 'geometry_msgs.msg.Point', 'Point', ([], {'x': '_x', 'y': '_y', 'z': '_z'}), '(x=_x, y=_y, z=_z)\n', (902, 920), False, 'from geometry_msgs.msg import Pose, Point\n'), ((1514, 1530), 'rospkg.RosPack', 'rospkg.RosPack', ([], {}), '()\n', (1528, 1530), False, 'import rospkg\n')]
|
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.contrib.auth.models import User
from django.test import TestCase
from quotes.models import Board, Quote
class TestQuotes(TestCase):
def setUp(self):
self.board = Board.objects.create(slug = 'board', name = 'BOARD')
self.silent_bob = User.objects.create_user(
username = 'sbob',
password = 'password')
def test_empty_quote(self):
with self.assertRaises(ValidationError):
self.create_quote('')
def test_single_word_text(self):
with self.assertRaises(ValidationError):
self.create_quote('*shrugs*')
def test_valid_text(self):
self.create_quote('Shut up.')
def test_edit_text(self):
quote = self.create_quote('Do something.')
quote.text = '*nods*'
with self.assertRaises(ValidationError):
quote.full_clean()
quote.text = ''
with self.assertRaises(ValidationError):
quote.full_clean()
def create_quote(self, text):
quote = Quote.objects.create(board = self.board, author = self.silent_bob, text = text)
quote.full_clean()
return quote
|
[
"quotes.models.Quote.objects.create",
"django.contrib.auth.models.User.objects.create_user",
"quotes.models.Board.objects.create"
] |
[((278, 326), 'quotes.models.Board.objects.create', 'Board.objects.create', ([], {'slug': '"""board"""', 'name': '"""BOARD"""'}), "(slug='board', name='BOARD')\n", (298, 326), False, 'from quotes.models import Board, Quote\n'), ((357, 419), 'django.contrib.auth.models.User.objects.create_user', 'User.objects.create_user', ([], {'username': '"""sbob"""', 'password': '"""password"""'}), "(username='sbob', password='password')\n", (381, 419), False, 'from django.contrib.auth.models import User\n'), ((1113, 1186), 'quotes.models.Quote.objects.create', 'Quote.objects.create', ([], {'board': 'self.board', 'author': 'self.silent_bob', 'text': 'text'}), '(board=self.board, author=self.silent_bob, text=text)\n', (1133, 1186), False, 'from quotes.models import Board, Quote\n')]
|
import unittest
import json
import os
import sys
import sqlite3
sys.path.append("../../src")
from database import Database
class TestDatabase(unittest.TestCase):
@classmethod
def setUpClass(cls):
json_raw = open(os.path.dirname(os.path.realpath(__file__)) + "/config.json")
cls._config = json.loads(json_raw.read())
cls._config['database']['reset'] = True
cls.db = Database(cls._config['database'])
@classmethod
def tearDownClass(cls):
os.remove("test.db")
def testInit(self):
self.assertIsInstance(self.db, Database)
self.assertIsInstance(self.db._dbh, sqlite3.Connection)
self.assertTrue(self.db._tables_exist())
cfg2 = self._config['database']
cfg2['server'] = "test2.db"
cfg2['reset'] = False
with Database(cfg2) as db2:
self.assertIsInstance(db2, Database)
self.assertTrue(db2._tables_exist())
cfg2['type'] = 'badtype'
self.assertRaises(Exception, Database, cfg2)
os.remove('test2.db')
def testGetComicConfig(self):
if not self.db.comic_exists("test"):
self.db.insert_comic("test", "test", "test", "test", "test", 0, 0, "test", "test")
result = self.db.get_comic_config("test")
self.assertEquals(result['name'], "test")
result2 = self.db.get_comic_config("bad")
self.assertIsNone(result2)
def testComicInsert(self):
self.db = Database(self._config['database'])
self.assertFalse(self.db.comic_exists("bad"))
self.db.insert_comic("test", "test", "test", "test", "test", 0, 0, "test", "test")
self.assertTrue(self.db.comic_exists("test"))
def testFileInsert(self):
if not self.db.comic_exists("test"):
self.db.insert_comic("test", "test", "test", "test", "test", 0, 0, "test", "test")
self.db.insert_file("test", "test", "test", "test")
result = self.db._dbh.execute("select * from files where comic='test'").fetchall()
self.assertEquals(len(result), 1)
self.assertEquals(result[0][0], 'test')
def testSetLast(self):
if not self.db.comic_exists("test"):
self.db.insert_comic("test", "test", "test", "test", "test", 0, 0, "test", "test")
self.db.set_last("test", "lasturl")
result = self.db._dbh.execute("select * from comics").fetchall()
self.assertEquals(result[0][9], "lasturl")
if __name__ == '__main__':
unittest.main()
|
[
"sys.path.append",
"unittest.main",
"os.remove",
"os.path.realpath",
"database.Database"
] |
[((65, 93), 'sys.path.append', 'sys.path.append', (['"""../../src"""'], {}), "('../../src')\n", (80, 93), False, 'import sys\n'), ((2492, 2507), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2505, 2507), False, 'import unittest\n'), ((408, 441), 'database.Database', 'Database', (["cls._config['database']"], {}), "(cls._config['database'])\n", (416, 441), False, 'from database import Database\n'), ((496, 516), 'os.remove', 'os.remove', (['"""test.db"""'], {}), "('test.db')\n", (505, 516), False, 'import os\n'), ((1042, 1063), 'os.remove', 'os.remove', (['"""test2.db"""'], {}), "('test2.db')\n", (1051, 1063), False, 'import os\n'), ((1475, 1509), 'database.Database', 'Database', (["self._config['database']"], {}), "(self._config['database'])\n", (1483, 1509), False, 'from database import Database\n'), ((825, 839), 'database.Database', 'Database', (['cfg2'], {}), '(cfg2)\n', (833, 839), False, 'from database import Database\n'), ((247, 273), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (263, 273), False, 'import os\n')]
|
#!/usr/bin/python
from Solution import Solution
obj = Solution()
A = 500
B = [1, 2, 5]
out = obj.change(A, B)
print(out)
|
[
"Solution.Solution"
] |
[((55, 65), 'Solution.Solution', 'Solution', ([], {}), '()\n', (63, 65), False, 'from Solution import Solution\n')]
|
import encoding
def test_is_a_valid_number():
suite = range(1, 26)
assert encoding.is_valid(suite, number=26)
assert not encoding.is_valid(suite, number=50)
suite = range(2, 27)
assert encoding.is_valid(suite, number=49)
suite = list(range(3, 27)) + [49]
assert not encoding.is_valid(suite, number=100)
def test_find_from_file_non_valid_number():
assert 127 == encoding.find_first_non_valid_in_file('9/test_input.txt', preamble=5)
def test_find_contiguous_numbers_adds_up_to_invalid_number():
assert (127, [15, 25, 47, 40], 62) == encoding.find_contiguous_numbers_adds_up_to_invalid_number('9/test_input.txt', preamble=5)
|
[
"encoding.find_first_non_valid_in_file",
"encoding.find_contiguous_numbers_adds_up_to_invalid_number",
"encoding.is_valid"
] |
[((84, 119), 'encoding.is_valid', 'encoding.is_valid', (['suite'], {'number': '(26)'}), '(suite, number=26)\n', (101, 119), False, 'import encoding\n'), ((207, 242), 'encoding.is_valid', 'encoding.is_valid', (['suite'], {'number': '(49)'}), '(suite, number=49)\n', (224, 242), False, 'import encoding\n'), ((135, 170), 'encoding.is_valid', 'encoding.is_valid', (['suite'], {'number': '(50)'}), '(suite, number=50)\n', (152, 170), False, 'import encoding\n'), ((296, 332), 'encoding.is_valid', 'encoding.is_valid', (['suite'], {'number': '(100)'}), '(suite, number=100)\n', (313, 332), False, 'import encoding\n'), ((397, 466), 'encoding.find_first_non_valid_in_file', 'encoding.find_first_non_valid_in_file', (['"""9/test_input.txt"""'], {'preamble': '(5)'}), "('9/test_input.txt', preamble=5)\n", (434, 466), False, 'import encoding\n'), ((573, 667), 'encoding.find_contiguous_numbers_adds_up_to_invalid_number', 'encoding.find_contiguous_numbers_adds_up_to_invalid_number', (['"""9/test_input.txt"""'], {'preamble': '(5)'}), "('9/test_input.txt',\n preamble=5)\n", (631, 667), False, 'import encoding\n')]
|
import threading
from cmg import widgets
from cmg.color import Color
from cmg.event import Event
from study_tool.card import Card
from study_tool.card_set import CardSet
from study_tool.config import Config
from study_tool.gui.generic_table_widget import GenericTableWidget
from study_tool.gui.card_set_browser_widget import CardSetBrowserWidget
from study_tool.gui.create_card_set_widget import CreateCardSetWidget
class AddCardToSetWidget(widgets.Widget):
"""
Widget for adding/removing a card from a card set.
"""
def __init__(self, card: Card, application):
super().__init__()
self.set_window_title("Card Sets Containing {}".format(card.get_russian().text))
self.__card = card
self.__application = application
self.__card_database = application.card_database
self.updated = Event(Card)
# Create widgets
self.__box_search = widgets.TextEdit()
self.__label_russian = widgets.Label("<russian>")
self.__label_english = widgets.Label("<english>")
self.__label_type = widgets.Label("<type>")
self.__button_apply = widgets.Button("Apply")
self.__button_create_set = widgets.Button("Create New Card Set")
self.__table_card_sets = GenericTableWidget()
self.__table_card_sets.add_text_column(lambda item: item.get_name(), stretch=1)
self.__table_card_sets.add_button_column("Remove", self.__on_card_set_clicked, stretch=0)
self.__table_search_results = GenericTableWidget()
self.__table_search_results.add_text_column(lambda item: item.get_name())
self.__table_search_results.add_text_column(lambda item: str(len(item.get_cards())), stretch=0)
self.__table_search_results.add_button_column("Add", self.__on_search_card_set_clicked)
self.__label_result_count = widgets.Label("<result-count>")
self.__card_set_browser = CardSetBrowserWidget(
application.card_database.get_root_package(),
close_on_select=False)
# Create layouts
layout_left = widgets.VBoxLayout()
layout_left.add(self.__label_russian)
layout_left.add(self.__label_english)
layout_left.add(self.__label_type)
layout_left.add(widgets.AbstractScrollArea(self.__table_card_sets))
layout_left.add(self.__button_apply)
layout_right = widgets.VBoxLayout()
layout_search_box = widgets.HBoxLayout()
layout_search_box.add(widgets.Label("Search:"), stretch=0)
layout_search_box.add(self.__box_search, stretch=1)
layout_right.add(layout_search_box)
layout_right.add(self.__label_result_count)
layout_right.add(widgets.AbstractScrollArea(self.__table_search_results))
layout_right.add(self.__card_set_browser)
layout_right.add(self.__button_create_set)
layout = widgets.VBoxLayout()
layout.add(widgets.HBoxLayout(layout_left, layout_right))
self.set_layout(layout)
# Connect signals
self.__box_search.text_edited.connect(self.__on_search_text_changed)
self.__box_search.return_pressed.connect(self.__on_search_return_pressed)
self.__button_apply.clicked.connect(self.apply)
self.__button_create_set.clicked.connect(self.__on_click_create_set)
self.__card_set_browser.card_set_selected.connect(self.__on_select_card_set)
self.select_card(card)
self.__box_search.focus()
def get_card(self) -> Card:
"""Get the card being edited."""
return self.__card
def select_card(self, card: Card):
"""Sets the card that is being edited."""
# Initialize with card data
self.__card = card
if self.__card.get_word_type() is not None:
self.__label_type.set_text(
"Type: " + self.__card.get_word_type().name)
else:
self.__label_type.set_text("Type:")
self.__label_russian.set_text(
"Russian: " + repr(self.__card.get_russian()))
self.__label_english.set_text(
"English: " + repr(self.__card.get_english()))
self.__table_card_sets.clear()
for card_set in self.__card_database.iter_card_sets():
if card_set.has_card(self.__card):
self.add_card_set(card_set, save=False)
self.__refresh_search_results()
def apply(self):
"""Applies changes to the the card."""
new_card_sets = self.__table_card_sets.get_items()
changed = False
# Add/remove the card from card sets
for card_set in self.__card_database.iter_card_sets():
if card_set in new_card_sets and not card_set.has_card(self.__card):
self.__card_database.add_card_to_set(self.__card, card_set)
changed = True
elif card_set not in new_card_sets and card_set.has_card(self.__card):
self.__card_database.remove_card_from_set(self.__card, card_set)
changed = True
if changed:
self.updated.emit(self.__card)
def on_close(self):
"""Called when the widget is closed."""
self.apply()
thread = threading.Thread(target=self.__card_database.save_all_changes)
thread.start()
def add_card_set(self, card_set: CardSet, save=False):
"""Add a card set to the list of card sets."""
self.__table_card_sets.add(card_set, enabled=not card_set.is_fixed_card_set())
self.__refresh_search_results()
if save:
self.apply()
def remove_card_set(self, card_set: CardSet):
"""Remove a card set from the list of card sets."""
self.__table_card_sets.remove(card_set)
self.__refresh_search_results()
self.apply()
def __on_click_create_set(self):
"""Called when Create New Card Set is clicked."""
widget = CreateCardSetWidget(
card_set_package=self.__card_set_browser.get_package(),
name=self.__box_search.get_text())
Config.app.push_gui_state(widget)
widget.card_set_created.connect(self.__on_create_card_set)
def __on_create_card_set(self, card_set: CardSet):
"""Called when a new Card Set is created."""
self.__card_set_browser.select_package(card_set.get_package())
self.add_card_set(card_set)
def __on_search_card_set_clicked(self, card_set: CardSet):
"""Called when a card set in the search results is clicked."""
self.add_card_set(card_set, save=True)
def __on_card_set_clicked(self, card_set: CardSet):
"""Called when a card set in the card set list is clicked."""
self.remove_card_set(card_set)
def __on_select_card_set(self, card_set: CardSet):
"""Called when a card set in the card set browser is clicked."""
if self.__table_card_sets.contains(card_set):
self.remove_card_set(card_set)
else:
self.add_card_set(card_set)
def __on_search_return_pressed(self):
"""Called when pressing enter in the search box."""
if not self.__table_search_results.get_items():
return
card = self.__table_search_results.get_items()[0]
self.add_card_set(card, save=True)
self.__box_search.select_all()
def __on_search_text_changed(self):
"""Called when the search box text changes."""
self.__refresh_search_results()
def __refresh_search_results(self):
"""Refresh the list of card search results."""
text = self.__box_search.get_text()
self.__table_search_results.clear()
result_count = 0
if text.strip():
matching_card_sets = []
for index, card_set in enumerate(self.__card_database.iter_card_sets()):
match_score = self.__matches(card_set=card_set, text=text)
if (match_score is not None and
card_set not in self.__table_card_sets.get_items()):
matching_card_sets.append((card_set, match_score))
matching_card_sets.sort(key=lambda x: x[1], reverse=True)
result_count = len(matching_card_sets)
for index, (card_set, _) in enumerate(matching_card_sets[:20]):
enabled = not card_set.is_fixed_card_set()
row = self.__table_search_results.add(card_set, enabled=enabled)
#if index == 0:
# row.set_color(Color(255, 255, 200))
self.__label_result_count.set_text("{} results".format(result_count))
def __matches(self, card_set: CardSet, text: str):
"""Check if a card set matches the given text."""
text = text.lower().replace("ё", "е")
name = card_set.get_name().text.lower().replace("ё", "е")
if text in name:
return -(len(name) - len(text))
return None
|
[
"cmg.widgets.Button",
"threading.Thread",
"cmg.widgets.Label",
"cmg.event.Event",
"study_tool.gui.generic_table_widget.GenericTableWidget",
"cmg.widgets.HBoxLayout",
"cmg.widgets.AbstractScrollArea",
"study_tool.config.Config.app.push_gui_state",
"cmg.widgets.VBoxLayout",
"cmg.widgets.TextEdit"
] |
[((845, 856), 'cmg.event.Event', 'Event', (['Card'], {}), '(Card)\n', (850, 856), False, 'from cmg.event import Event\n'), ((911, 929), 'cmg.widgets.TextEdit', 'widgets.TextEdit', ([], {}), '()\n', (927, 929), False, 'from cmg import widgets\n'), ((961, 987), 'cmg.widgets.Label', 'widgets.Label', (['"""<russian>"""'], {}), "('<russian>')\n", (974, 987), False, 'from cmg import widgets\n'), ((1019, 1045), 'cmg.widgets.Label', 'widgets.Label', (['"""<english>"""'], {}), "('<english>')\n", (1032, 1045), False, 'from cmg import widgets\n'), ((1074, 1097), 'cmg.widgets.Label', 'widgets.Label', (['"""<type>"""'], {}), "('<type>')\n", (1087, 1097), False, 'from cmg import widgets\n'), ((1128, 1151), 'cmg.widgets.Button', 'widgets.Button', (['"""Apply"""'], {}), "('Apply')\n", (1142, 1151), False, 'from cmg import widgets\n'), ((1187, 1224), 'cmg.widgets.Button', 'widgets.Button', (['"""Create New Card Set"""'], {}), "('Create New Card Set')\n", (1201, 1224), False, 'from cmg import widgets\n'), ((1258, 1278), 'study_tool.gui.generic_table_widget.GenericTableWidget', 'GenericTableWidget', ([], {}), '()\n', (1276, 1278), False, 'from study_tool.gui.generic_table_widget import GenericTableWidget\n'), ((1503, 1523), 'study_tool.gui.generic_table_widget.GenericTableWidget', 'GenericTableWidget', ([], {}), '()\n', (1521, 1523), False, 'from study_tool.gui.generic_table_widget import GenericTableWidget\n'), ((1842, 1873), 'cmg.widgets.Label', 'widgets.Label', (['"""<result-count>"""'], {}), "('<result-count>')\n", (1855, 1873), False, 'from cmg import widgets\n'), ((2071, 2091), 'cmg.widgets.VBoxLayout', 'widgets.VBoxLayout', ([], {}), '()\n', (2089, 2091), False, 'from cmg import widgets\n'), ((2371, 2391), 'cmg.widgets.VBoxLayout', 'widgets.VBoxLayout', ([], {}), '()\n', (2389, 2391), False, 'from cmg import widgets\n'), ((2420, 2440), 'cmg.widgets.HBoxLayout', 'widgets.HBoxLayout', ([], {}), '()\n', (2438, 2440), False, 'from cmg import widgets\n'), ((2864, 2884), 'cmg.widgets.VBoxLayout', 'widgets.VBoxLayout', ([], {}), '()\n', (2882, 2884), False, 'from cmg import widgets\n'), ((5219, 5281), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.__card_database.save_all_changes'}), '(target=self.__card_database.save_all_changes)\n', (5235, 5281), False, 'import threading\n'), ((6086, 6119), 'study_tool.config.Config.app.push_gui_state', 'Config.app.push_gui_state', (['widget'], {}), '(widget)\n', (6111, 6119), False, 'from study_tool.config import Config\n'), ((2251, 2301), 'cmg.widgets.AbstractScrollArea', 'widgets.AbstractScrollArea', (['self.__table_card_sets'], {}), '(self.__table_card_sets)\n', (2277, 2301), False, 'from cmg import widgets\n'), ((2471, 2495), 'cmg.widgets.Label', 'widgets.Label', (['"""Search:"""'], {}), "('Search:')\n", (2484, 2495), False, 'from cmg import widgets\n'), ((2689, 2744), 'cmg.widgets.AbstractScrollArea', 'widgets.AbstractScrollArea', (['self.__table_search_results'], {}), '(self.__table_search_results)\n', (2715, 2744), False, 'from cmg import widgets\n'), ((2904, 2949), 'cmg.widgets.HBoxLayout', 'widgets.HBoxLayout', (['layout_left', 'layout_right'], {}), '(layout_left, layout_right)\n', (2922, 2949), False, 'from cmg import widgets\n')]
|
from sklearn.datasets import load_iris
iris = load_iris()
from sklearn.cluster import DBSCAN
dbscan = DBSCAN(eps=0.2, metric='euclidean', min_samples=5)
import numpy
# DBSCAN(eps=0.5, metric='euclidean', min_samples=5,random_state=111)
iris = load_iris()
print (iris.feature_names)
X, y = load_iris(return_X_y=True)
# X = numpy.delete(X, 0, 0)
X = numpy.delete(X, 1, 0)
# X = numpy.delete(X, 2, 0)
# X = numpy.delete(X, 3, 0)
# X = numpy.delete(X, 0, 0)
# X = numpy.delete(X, 0, 0)
# X = numpy.delete(X, 0, 0)
# X = numpy.delete(X, 1, 0)
# X = numpy.delete(X, 0, 0)
# X = numpy.delete(X, 2, 0)
# X = numpy.delete(X, 0, 0)
# X = numpy.delete(X, 2, 0)
# X = numpy.delete(X, 1, 0)
# X = numpy.delete(X, 1, 0)
# X = numpy.delete(X, 1, 0)
# X = numpy.delete(X, 2, 0)
# X = numpy.delete(X, 0, 0)
# X = numpy.delete(X, 1, 0)
# X = numpy.delete(X, 2, 0)
# X = numpy.delete(X, 2, 0)
# x = numpy.delete(X, 1, 0)
# x = numpy.delete(X, 1, 0)
# x = numpy.delete(X, 1, 0)
# x = numpy.delete(X, 0, 0)
# x = numpy.delete(X, 1, 0)
# x = numpy.delete(X, 1, 0)
# x = numpy.delete(X, 0, 0)
# x = numpy.delete(X, 0, 0)
# x = numpy.delete(X, 1, 0)
dbscan.fit(X)
# from sklearn.decomposition import PCA
# import matplotlib.pyplot as pl
# pca = PCA(n_components=2).fit(X)
# pca_2d = pca.transform(X)
# for i in range(0, pca_2d.shape[0]):
# if dbscan.labels_[i] == 0:
# c1 = pl.scatter(pca_2d[i,0],pca_2d[i,1],c='r',marker='+')
# elif dbscan.labels_[i] == 1:
# c2 = pl.scatter(pca_2d[i,0],pca_2d[i,1],c='g', marker='o')
# elif dbscan.labels_[i] == -1:
# c3 = pl.scatter(pca_2d[i,0],pca_2d[i,1],c='b', marker='*')
# pl.legend([c1, c2, c3], ['Cluster 1', 'Cluster 2', 'Noise'])
# pl.title('DBSCAN finds 2 clusters and noise')
# pl.show()
from sklearn.metrics import silhouette_score
res_sil = silhouette_score(X, dbscan.labels_)
print (res_sil)
|
[
"sklearn.metrics.silhouette_score",
"sklearn.datasets.load_iris",
"numpy.delete",
"sklearn.cluster.DBSCAN"
] |
[((46, 57), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (55, 57), False, 'from sklearn.datasets import load_iris\n'), ((103, 153), 'sklearn.cluster.DBSCAN', 'DBSCAN', ([], {'eps': '(0.2)', 'metric': '"""euclidean"""', 'min_samples': '(5)'}), "(eps=0.2, metric='euclidean', min_samples=5)\n", (109, 153), False, 'from sklearn.cluster import DBSCAN\n'), ((248, 259), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (257, 259), False, 'from sklearn.datasets import load_iris\n'), ((295, 321), 'sklearn.datasets.load_iris', 'load_iris', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (304, 321), False, 'from sklearn.datasets import load_iris\n'), ((356, 377), 'numpy.delete', 'numpy.delete', (['X', '(1)', '(0)'], {}), '(X, 1, 0)\n', (368, 377), False, 'import numpy\n'), ((1833, 1868), 'sklearn.metrics.silhouette_score', 'silhouette_score', (['X', 'dbscan.labels_'], {}), '(X, dbscan.labels_)\n', (1849, 1868), False, 'from sklearn.metrics import silhouette_score\n')]
|
"""
aionanomsg socket library.
This is probably what you're looking for.
"""
import asyncio
from . import _nanomsg, symbols
class NNSocket(_nanomsg.NNSocket):
""" Public interface for nanomsg operations. """
def __init__(self, nn_type, domain=_nanomsg.AF_SP, loop=None):
if loop is None:
self._loop = asyncio.get_event_loop()
self._sending = False
self._receiving = False
self._send_waiter = None
self._recv_waiter = None
self._eids = []
try:
self._create_future = self._loop.create_future
except AttributeError:
self._create_future = lambda: asyncio.Future(loop=self._loop)
super().__init__(domain, nn_type)
@property
def recv_poll_fd(self):
try:
return self._recv_poll_fd
except AttributeError:
self._recv_poll_fd = self.getsockopt(symbols.NN_SOL_SOCKET,
symbols.NN_RCVFD)
return self._recv_poll_fd
@property
def send_poll_fd(self):
try:
return self._send_poll_fd
except AttributeError:
self._send_poll_fd = self.getsockopt(symbols.NN_SOL_SOCKET,
symbols.NN_SNDFD)
return self._send_poll_fd
def bind(self, addr):
eid = self._nn_bind(addr)
self._eids.append(eid)
def connect(self, addr):
eid = self._nn_connect(addr)
self._eids.append(eid)
return eid
def getsockopt(self, level, option):
assert isinstance(level, symbols.NNSymbol), 'level must be NNSymbol'
assert isinstance(option, symbols.NNSymbol), 'option must be NNSymbol'
return self._nn_getsockopt(level, option, option.type)
def setsockopt(self, level, option, value):
assert isinstance(level, symbols.NNSymbol), 'level must be NNSymbol'
assert isinstance(option, symbols.NNSymbol), 'option must be NNSymbol'
return self._nn_setsockopt(level, option, option.type, value)
async def send(self, data):
assert not self._sending, 'send() is already running'
self._sending = True
try:
self._send(data)
except BlockingIOError:
await self._send_on_ready(data)
finally:
self._sending = False
async def recv(self):
assert not self._receiving, 'recv() is already running'
self._receiving = True
try:
return self._recv()
except BlockingIOError:
return await self._recv_on_ready()
finally:
self._receiving = False
def shutdown(self, eid):
""" Shutdown an endpoint which is sort of like a connection. """
self._nn_shutdown(eid)
def close(self):
self._nn_close()
def _recvable_event(self):
if self._recv_waiter is None:
# Under load remove_reader can fall behind another event.
return
waiter = self._recv_waiter
self._recv_waiter = None
if not waiter.cancelled():
try:
waiter.set_result(self._recv())
except Exception as e:
waiter.set_exception(e)
def _sendable_event(self):
if self._send_waiter is None:
# Under load remove_reader can fall behind another event.
return
waiter = self._send_waiter
self._send_waiter = None
if not waiter.cancelled():
try:
waiter.set_result(self._send(waiter.data))
except Exception as e:
waiter.set_exception(e)
def _send(self, data, _flags=_nanomsg.NN_DONTWAIT):
return self._nn_send(data, _flags)
def _recv(self, _flags=_nanomsg.NN_DONTWAIT):
return self._nn_recv(_flags)
def _send_on_ready(self, data):
""" Wait for socket to be writable before sending. """
assert self._send_waiter is None or self._send_waiter.cancelled(), \
'send_waiter already exists'
self._send_waiter = f = self._create_future()
f.data = data
# Even the send fd notifies via read events.
self._loop.add_reader(self.send_poll_fd, self._sendable_event)
f.add_done_callback(self._remove_send_handler)
return f
def _recv_on_ready(self):
assert self._recv_waiter is None or self._recv_waiter.cancelled(), \
'recv_waiter already exists'
self._recv_waiter = f = self._create_future()
self._loop.add_reader(self.recv_poll_fd, self._recvable_event)
f.add_done_callback(self._remove_recv_handler)
return f
def _remove_send_handler(self, f):
self._loop.remove_reader(self.send_poll_fd)
def _remove_recv_handler(self, f):
self._loop.remove_reader(self.recv_poll_fd)
|
[
"asyncio.get_event_loop",
"asyncio.Future"
] |
[((334, 358), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (356, 358), False, 'import asyncio\n'), ((656, 687), 'asyncio.Future', 'asyncio.Future', ([], {'loop': 'self._loop'}), '(loop=self._loop)\n', (670, 687), False, 'import asyncio\n')]
|
"""
Script to evaluate the activation functions for the selected network + grid.
"""
import sys
import os
sys.path.insert(0, os.getcwd())
import numpy as np
import sys
import os
import subprocess
import itertools
import imageio
import json
import torch
import io
import shutil
import matplotlib.pyplot as plt
import matplotlib.ticker
from collections import defaultdict
from typing import Tuple
# From eval_VolumetricFeatures.py / Section 5.2
BEST_NETWORK = (32,4)
GRID_RESOLUTION = 32
GRID_CHANNELS = 16
activationX = ["ReLU", "Sine", "Snake:2", "SnakeAlt:1"]
BASE_PATH = 'volnet/results/eval_ActivationFunctions'
configX = [
("plume100", "config-files/plume100-v2-dvr.json", "Plume"),
("ejecta70", "config-files/ejecta70-v6-dvr.json", "Ejecta"),
("RM60", "config-files/RichtmyerMeshkov-t60-v1-dvr.json", "RM"),
#("Skull5", "neuraltextures/config-files/skull-v5-dvr.json"),
]
def main():
train()
statistics_file = eval()
make_plots(statistics_file)
def get_args_and_hdf5_file(activation, config: Tuple[str, str, str]):
"""
Assembles the command line arguments for training and the filename for the hdf5-file
with the results
:param activation: the activation function name
:param network: the network combination (channels, layers)
:return: args, filename
"""
config_name, config_settings, human_name = config
output_name = "run_%s_%s"%(config_name, activation.replace(':','-'))
parameters = [
sys.executable, "volnet/train_volnet.py",
config_settings,
"--train:mode", "world",
"--train:samples", "256**3",
"--train:batchsize", "64*64*128",
"--train:sampler_importance", "0.01",
"--val:copy_and_split",
"--outputmode", "density:direct",
"--lossmode", "density",
"-l1", "1",
"--lr_step", "50",
"-i", "200",
'--fouriercount', str((BEST_NETWORK[0]-4)//2), '--fourierstd', '1.0',
"--activation", activation,
"--layers", ':'.join([str(BEST_NETWORK[0])]*(BEST_NETWORK[1]-1)),
"--volumetric_features_resolution", str(GRID_RESOLUTION),
"--volumetric_features_channels", str(GRID_CHANNELS),
"--logdir", BASE_PATH+'/log',
"--modeldir", BASE_PATH+'/model',
"--hdf5dir", BASE_PATH+'/hdf5',
'--name', output_name,
'--save_frequency', '50'
]
hdf5_file = BASE_PATH+'/hdf5/' + output_name + ".hdf5"
return parameters, hdf5_file, output_name
def train():
print("Configurations:", len(activationX) * len(configX))
for config in configX:
for activation in activationX:
args, filename, _ = get_args_and_hdf5_file(activation, config)
if os.path.exists(filename):
print("Skipping test", filename)
else:
print("\n=====================================\nRun", filename)
subprocess.run(args, check=True)
print("\n===========================================\nDONE!")
def eval():
print("Evaluate")
statistics_file = os.path.join(BASE_PATH, 'stats.json')
if os.path.exists(statistics_file):
print("Statistics file already exists!")
return statistics_file
import common.utils as utils
import pyrenderer
from volnet.inference import LoadedModel
from losses.lossbuilder import LossBuilder
num_cameras = 64
width = 512
height = 512
STEPSIZE = 1 / 512
timer = pyrenderer.GPUTimer()
if os.name != 'nt':
rendering_mode = LoadedModel.EvaluationMode.PYTORCH16
else:
rendering_mode = LoadedModel.EvaluationMode.TENSORCORES_MIXED
enable_preintegration = True
device = torch.device('cuda')
ssim_loss = LossBuilder(device).ssim_loss(4)
lpips_loss = LossBuilder(device).lpips_loss(4, 0.0, 1.0)
def compute_stats(ln: LoadedModel, mode, reference_images, stepsize, filename_template=None,
do_ssim=False, do_lpips=False):
timingsX = []
ssimX = []
lpipsX = []
for i in range(num_cameras):
if enable_preintegration:
ln.enable_preintegration(True, convert_to_texture=True)
else:
ln.enable_preintegration(False)
current_image = ln.render_network(
cameras[i], width, height, mode,
stepsize, timer=timer)
if i > 0:
timingsX.append(timer.elapsed_milliseconds())
if filename_template is not None:
imageio.imwrite(
filename_template % i,
LoadedModel.convert_image(current_image))
if do_ssim:
ssimX.append(ssim_loss(current_image, reference_images[i]).item())
if do_lpips:
lpipsX.append(lpips_loss(current_image, reference_images[i]).item())
return \
(np.mean(timingsX), np.std(timingsX)), \
(np.mean(ssimX), np.std(ssimX)) if do_ssim else (np.NaN, np.NaN), \
(np.mean(lpipsX), np.std(lpipsX)) if do_lpips else (np.NaN, np.NaN)
# load networks
def load_and_save(activation, config):
_, filename, output_name = get_args_and_hdf5_file(activation, config)
filename = os.path.abspath(filename)
if not os.path.exists(filename):
print("File not found:", filename, file=sys.stderr)
return None, None
try:
ln = LoadedModel(filename)
# if enable_preintegration:
# ln.enable_preintegration(True)
ln.save_compiled_network(filename.replace('.hdf5', '.volnet'))
return ln, output_name
except Exception as e:
print("Unable to load '%s':" % filename, e)
return None, None
output_stats = {}
for cfg_index, config in enumerate(configX):
image_folder = os.path.join(BASE_PATH, "images_" + config[0])
local_stats = {
'cfg_index': cfg_index,
'cfg': config[1]}
reference_images = None
# collect models
lns = dict()
base_ln = None
for activation in activationX:
ln, name = load_and_save(activation, config)
lns[activation] = (ln, name)
if base_ln is None: base_ln = ln
# render reference
image_folder_reference = os.path.join(image_folder, "reference")
os.makedirs(image_folder_reference, exist_ok=True)
print("\n===================================== Render reference", cfg_index)
cameras = base_ln.get_rotation_cameras(num_cameras)
reference_images = [None] * num_cameras
for i in range(num_cameras):
reference_images[i] = base_ln.render_reference(cameras[i], width, height)
imageio.imwrite(
os.path.join(image_folder_reference, 'reference%03d.png' % i),
LoadedModel.convert_image(reference_images[i]))
# render networks
for activation in activationX:
ln, name = lns[activation]
if ln is None:
print("Skip", name, ", network is None")
continue
print("Render", name)
image_folder_screen = os.path.join(image_folder, "%s" % name)
os.makedirs(image_folder_screen, exist_ok=True)
time, ssim, lpips = compute_stats(
ln, rendering_mode, reference_images, STEPSIZE,
os.path.join(image_folder_screen, 'img%03d.png'),
True, True)
local_stats[name] = {
'time': time,
'ssim': ssim,
'lpips': lpips,
}
output_stats[config[0]] = local_stats
# save statistics
print("\n===================================== Done, save statistics")
with open(statistics_file, "w") as f:
json.dump(output_stats, f)
return statistics_file
def make_plots(statistics_file):
print("\n===================================== Make Plots")
with open(statistics_file, "r") as f:
stats = json.load(f)
output_folder = os.path.split(statistics_file)[0]
statNames = ['SSIM $\\uparrow$', 'LPIPS $\\downarrow$']
statTags = ["ssim", "lpips"]
statAggregation = [max, min]
latex = io.StringIO()
# latex header
latex.write("\\begin{tabular}{r%s}\n" % ("cc" * (len(configX))))
latex.write("\\toprule\n")
latex.write("\\multirow{2}{*}{Activation}")
for config in configX:
latex.write(" & \\multicolumn{2}{c}{%s}"%config[2])
latex.write("\\\\\n")
for config in configX:
latex.write(" & %s & %s" % tuple(statNames))
latex.write("\\\\\n")
latex.write("\n\\midrule\n")
best_per_dataset = dict()
for config in configX:
cfg_index = stats[config[0]]['cfg_index']
for tag, aggr in zip(statTags, statAggregation):
values = []
for activation in activationX:
_, _, n = get_args_and_hdf5_file(activation, configX[cfg_index])
v = "%.4f" % stats[config[0]][n][tag][0]
values.append(v)
best_per_dataset[(cfg_index, tag)] = aggr(values)
# main content
for activation in activationX:
latex.write(activation.split(':')[0])
for config in configX:
cfg_index = stats[config[0]]['cfg_index']
_, _, n = get_args_and_hdf5_file(activation, configX[cfg_index])
for tag in statTags:
v = "%.4f"%stats[config[0]][n][tag][0]
if v == best_per_dataset[(cfg_index, tag)]:
latex.write(" & $\\bm{%s}$"%v)
else:
latex.write(" & $%s$" % v)
latex.write("\\\\\n")
#footer
latex.write("\n\\bottomrule\n")
latex.write("\\end{tabular}\n")
latex = latex.getvalue()
with open(os.path.join(output_folder, "ActivationFunctions.tex"), 'w') as f:
f.write(latex)
print(latex)
print("Done")
if __name__ == '__main__':
main()
|
[
"pyrenderer.GPUTimer",
"io.StringIO",
"os.path.abspath",
"json.dump",
"os.makedirs",
"json.load",
"volnet.inference.LoadedModel",
"os.getcwd",
"subprocess.run",
"numpy.std",
"os.path.exists",
"losses.lossbuilder.LossBuilder",
"volnet.inference.LoadedModel.convert_image",
"numpy.mean",
"torch.device",
"os.path.split",
"os.path.join"
] |
[((134, 145), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (143, 145), False, 'import os\n'), ((3188, 3225), 'os.path.join', 'os.path.join', (['BASE_PATH', '"""stats.json"""'], {}), "(BASE_PATH, 'stats.json')\n", (3200, 3225), False, 'import os\n'), ((3234, 3265), 'os.path.exists', 'os.path.exists', (['statistics_file'], {}), '(statistics_file)\n', (3248, 3265), False, 'import os\n'), ((3598, 3619), 'pyrenderer.GPUTimer', 'pyrenderer.GPUTimer', ([], {}), '()\n', (3617, 3619), False, 'import pyrenderer\n'), ((3842, 3862), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (3854, 3862), False, 'import torch\n'), ((8557, 8570), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (8568, 8570), False, 'import io\n'), ((5445, 5470), 'os.path.abspath', 'os.path.abspath', (['filename'], {}), '(filename)\n', (5460, 5470), False, 'import os\n'), ((6086, 6132), 'os.path.join', 'os.path.join', (['BASE_PATH', "('images_' + config[0])"], {}), "(BASE_PATH, 'images_' + config[0])\n", (6098, 6132), False, 'import os\n'), ((6583, 6622), 'os.path.join', 'os.path.join', (['image_folder', '"""reference"""'], {}), "(image_folder, 'reference')\n", (6595, 6622), False, 'import os\n'), ((6632, 6682), 'os.makedirs', 'os.makedirs', (['image_folder_reference'], {'exist_ok': '(True)'}), '(image_folder_reference, exist_ok=True)\n', (6643, 6682), False, 'import os\n'), ((8127, 8153), 'json.dump', 'json.dump', (['output_stats', 'f'], {}), '(output_stats, f)\n', (8136, 8153), False, 'import json\n'), ((8343, 8355), 'json.load', 'json.load', (['f'], {}), '(f)\n', (8352, 8355), False, 'import json\n'), ((8377, 8407), 'os.path.split', 'os.path.split', (['statistics_file'], {}), '(statistics_file)\n', (8390, 8407), False, 'import os\n'), ((2834, 2858), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (2848, 2858), False, 'import os\n'), ((3880, 3899), 'losses.lossbuilder.LossBuilder', 'LossBuilder', (['device'], {}), '(device)\n', (3891, 3899), False, 'from losses.lossbuilder import LossBuilder\n'), ((3931, 3950), 'losses.lossbuilder.LossBuilder', 'LossBuilder', (['device'], {}), '(device)\n', (3942, 3950), False, 'from losses.lossbuilder import LossBuilder\n'), ((5487, 5511), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (5501, 5511), False, 'import os\n'), ((5641, 5662), 'volnet.inference.LoadedModel', 'LoadedModel', (['filename'], {}), '(filename)\n', (5652, 5662), False, 'from volnet.inference import LoadedModel\n'), ((7470, 7509), 'os.path.join', 'os.path.join', (['image_folder', "('%s' % name)"], {}), "(image_folder, '%s' % name)\n", (7482, 7509), False, 'import os\n'), ((7523, 7570), 'os.makedirs', 'os.makedirs', (['image_folder_screen'], {'exist_ok': '(True)'}), '(image_folder_screen, exist_ok=True)\n', (7534, 7570), False, 'import os\n'), ((10190, 10244), 'os.path.join', 'os.path.join', (['output_folder', '"""ActivationFunctions.tex"""'], {}), "(output_folder, 'ActivationFunctions.tex')\n", (10202, 10244), False, 'import os\n'), ((3027, 3059), 'subprocess.run', 'subprocess.run', (['args'], {'check': '(True)'}), '(args, check=True)\n', (3041, 3059), False, 'import subprocess\n'), ((5077, 5094), 'numpy.mean', 'np.mean', (['timingsX'], {}), '(timingsX)\n', (5084, 5094), True, 'import numpy as np\n'), ((5096, 5112), 'numpy.std', 'np.std', (['timingsX'], {}), '(timingsX)\n', (5102, 5112), True, 'import numpy as np\n'), ((7051, 7112), 'os.path.join', 'os.path.join', (['image_folder_reference', "('reference%03d.png' % i)"], {}), "(image_folder_reference, 'reference%03d.png' % i)\n", (7063, 7112), False, 'import os\n'), ((7131, 7177), 'volnet.inference.LoadedModel.convert_image', 'LoadedModel.convert_image', (['reference_images[i]'], {}), '(reference_images[i])\n', (7156, 7177), False, 'from volnet.inference import LoadedModel\n'), ((7701, 7749), 'os.path.join', 'os.path.join', (['image_folder_screen', '"""img%03d.png"""'], {}), "(image_folder_screen, 'img%03d.png')\n", (7713, 7749), False, 'import os\n'), ((4782, 4822), 'volnet.inference.LoadedModel.convert_image', 'LoadedModel.convert_image', (['current_image'], {}), '(current_image)\n', (4807, 4822), False, 'from volnet.inference import LoadedModel\n'), ((5131, 5145), 'numpy.mean', 'np.mean', (['ssimX'], {}), '(ssimX)\n', (5138, 5145), True, 'import numpy as np\n'), ((5147, 5160), 'numpy.std', 'np.std', (['ssimX'], {}), '(ssimX)\n', (5153, 5160), True, 'import numpy as np\n'), ((5212, 5227), 'numpy.mean', 'np.mean', (['lpipsX'], {}), '(lpipsX)\n', (5219, 5227), True, 'import numpy as np\n'), ((5229, 5243), 'numpy.std', 'np.std', (['lpipsX'], {}), '(lpipsX)\n', (5235, 5243), True, 'import numpy as np\n')]
|
"""Module for controlling Qmotion blinds through a Qsync controller."""
__version__ = "0.1.0"
import socket
from socket import timeout
import logging
from .position import Position
from .const import DEFAULT_TIMEOUT
from .const import TCP_PORT
from .const import BROADCAST_ADDRESS
from .const import UDP_PORT
from .exceptions import InputError, QmotionConnectionError, Timeout, UnexpectedDataError
class ShadeGroup:
"""Class representing a shade group, previously created through the qsync application"""
def __init__(self, channel:str, name:str="", code:str="", mac_address:str=""):
self.channel = channel
self.name = name
self.code = code
self.mac_address = mac_address
class Scene:
"""Class representing a shade group, previously createed through the qsync application
"""
def __init__(self, name: str, command_list: list, mac_address:str=""):
self.name = name
self.command_list = command_list
self.mac_address = mac_address
class ShadeGroupCommand:
"""Class representing a command for a shade group - which shade and which position
group: ShadeGroup to change position
percentage: 0-100 percentage to close the group (0 = full open, 100 = full closed)
position_code: internal position code string, specify either percentage or position_code
"""
def __init__(self, group:ShadeGroup, percentage:int=-1, position_code:str=""):
self.group = group
self.percentage = percentage
self.position_code = position_code
class Qsync:
"""Class representing an Qsync controller
host: hostname or ip address
socket_timeout: optional socket timeout that will overwrite default
group_list: list of ShadeGroup objects (only in fully populated Qsync object)
scene_list: list of Scene objects (only in fully populated Qsync object)
"""
def __init__(self, host: str, socket_timeout:int=DEFAULT_TIMEOUT, set_groups_and_scenes:bool=False):
self.host = host
self.socket_timeout = socket_timeout
self.group_list = []
self.scene_list = []
# Will be defined during discovery, not used otherwise
self.name = ""
self.mac_address = ""
if set_groups_and_scenes:
self.set_groups_and_scenes()
def set_group_position(self, group_command: ShadeGroupCommand) -> None:
"""Set position of a list of shade groups.
group_command: List of ShadeGroupCommand objects to set. Note: you may only specify
from 1 to 8 different shade group command objects in a list
"""
if len(group_command) > 8:
raise InputError("Cannot specify more than eight groups to control")
# returned values can vary from input, so update for return
response_list = []
command_body = ''
for command in group_command:
channel_code = int_to_hex(command.group.channel)
if command.position_code:
position = Position.get_position_code(command.position_code)
else:
position = Position.get_position(command.percentage)
command_code = position.command_code
command_body += '000000' + channel_code + command_code
response_list.append(
ShadeGroupCommand(ShadeGroup(command.group), position.position_times_ten / 10))
socket_tcp = None
try:
socket_tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_tcp.settimeout(self.socket_timeout)
logging.debug("Qsync: connect host [%s], port [%d]", self.host, TCP_PORT)
socket_tcp.connect((self.host, TCP_PORT))
command_body_length = int(len(command_body)/2) # number of bytes
# Example: '1b050000000901'
command = '1b' + int_to_hex(command_body_length) + command_body
socket_tcp.send(bytes.fromhex(command))
logging.debug('Qsync: send [%s]', command)
data = socket_tcp.recv(2048)
logging.debug('Qsync: receive [%s]', bytes_to_hex(data))
return response_list
except socket.error:
error_message = "Could not connect to qysnc host [{host}], port [{tcp_port}]".format(
host=self.host, tcp_port=TCP_PORT)
logging.debug(error_message)
raise QmotionConnectionError(error_message) from socket.error
finally:
if socket_tcp is not None:
socket_tcp.close()
def set_scene(self, name: Scene) -> None:
"""
Set a number of blinds into a previous-defined scene.
name: Plain language name of the scene to set.
"""
if not self.scene_list:
self.set_groups_and_scenes
for scene in self.scene_list:
if name == scene.name:
self.set_group_position(scene.command_list)
def set_groups_and_scenes(self) -> None:
"""
Get the list of groups and scenes defined within the qsync device and set into qsync.
"""
socket_tcp = None
try:
socket_tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
logging.debug("Qsync: connect host [%s], port [%d]", self.host, TCP_PORT)
socket_tcp.connect((self.host, TCP_PORT))
socket_tcp.settimeout(self.socket_timeout)
try:
data_in_hex = send_header(socket_tcp)
except UnexpectedDataError:
# Qsync gets into a bad state, clear it out and try again
clear_socket(socket_tcp)
data_in_hex = send_header(socket_tcp)
body = data_in_hex[4:]
number_of_groups = int(body[2:4], 16)
logging.debug('Qsync: number of groups [%s]', number_of_groups)
number_of_scenes = int(body[6:8], 16)
logging.debug('Qsync: number of scenes [%s]', number_of_scenes)
# Parse groups and scenes
groups = []
scenes = []
for _ in range(number_of_groups + number_of_scenes):
# Qsync sometimes appends the first group/scene onto the header
if is_header(data_in_hex) and len(data_in_hex) > 12:
data_in_hex = body[12:]
else:
data = socket_tcp.recv(2048)
data_in_hex = bytes_to_hex(data)
logging.debug('Qsync: receive [%s]', data_in_hex)
if is_group(data_in_hex):
group = parse_group(data_in_hex)
groups.append(group)
if not self.mac_address:
# The mac address is really of the qsync, set it as a convenience
self.mac_address = group.mac_address
if is_scene(data_in_hex):
scene = parse_scene(data_in_hex)
scenes.append(scene)
if not self.mac_address:
# The mac address is really of the qsync, set it as a convenience
self.mac_address = scene.mac_address
group_dict = build_group_dict(groups)
for scene in scenes:
hydrate_scene(scene=scene, groups=group_dict)
self.group_list = groups
self.scene_list = scenes
except socket.error:
error_message = "Could not connect to qysnc host [{host}], port [{tcp_port}]".format(
host=self.host, tcp_port=TCP_PORT)
logging.debug(error_message)
raise QmotionConnectionError(error_message) from socket.error
finally:
if socket_tcp is not None:
socket_tcp.close()
def discover_qsync(socket_timeout:int = DEFAULT_TIMEOUT) -> Qsync:
"""
Search for Qsync device on the local network.
Note: uses UDP
Returns Qsync object populatd with groups and scenes associated with this qsync device
"""
# Single 00 byte
message = bytes(1)
address = (BROADCAST_ADDRESS, UDP_PORT)
socket_udp = None
try:
socket_udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
socket_udp.settimeout(socket_timeout)
socket_udp.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
socket_udp.sendto(message, address)
(data, (host, _port)) = socket_udp.recvfrom(1024)
data_in_hex = bytes_to_hex(data)
name_in_hex = data_in_hex[:30]
name = bytes.fromhex(name_in_hex).decode().rstrip('\x00').strip()
mac_address = data_in_hex[32:44]
logging.debug('Qsync: found qsync at [%s], name [%s], mac [%s]', host, name,
mac_address)
retval = Qsync(host)
retval.name = name
retval.mac_address = mac_address
retval.set_groups_and_scenes()
return retval
except Exception:
error_message = "Could not connect to qysnc"
logging.debug(error_message)
raise QmotionConnectionError(error_message) from Exception
finally:
if socket_udp is not None:
socket_udp.close()
def int_to_hex(input_int:int) -> str:
""" Convert integer to hex string"""
return '{:02x}'.format(input_int)
def bytes_to_hex(input_bytes:bytes) -> str:
""" Convert bytes to hex string"""
return ''.join('{:02x}'.format(x) for x in input_bytes)
def is_header(data_in_hex:str) -> bool:
"""
Check if input is a header line.
A header is a control character string from Qsync. This is important to know if you are
correctly at the start of the conversation or if you're picking it up midway.
"""
return data_in_hex[:4] == '1604'
def is_group(data_in_hex:str) -> bool:
"""
Check if input is a group line.
A group is one or more blinds that will all simultaneously react to group commands. Note that
group membership is not stored on Qsync. Instead, each blind is manually programmed to groups
and only the blinds themselves know which groups they belong to.
"""
return data_in_hex[:4] == '162c'
def is_scene(data_in_hex:str) -> bool:
"""
Check if input is a scene line.
A scene is between one and eight groups, and each group has a defined position. Not that scenes
are not implemented like groups - the blinds do not understand scenes. Instead, the Qsync stores
the scene <-> group relationship and then calls for each group to move in a single command.
Therefore, there is nothing that can be done in a scene that could not be accomplished directly
sending a list of group and positions.
"""
return data_in_hex[:4] == '163b'
def parse_group(data_in_hex:str) -> ShadeGroup:
"""Parse a group entity from the group list returned by Qsync"""
name_in_hex = data_in_hex[52:]
name = bytes.fromhex(name_in_hex).decode().rstrip('\x00')
code = data_in_hex[48:52]
channel_in_hex = data_in_hex[6:8]
channel = int(channel_in_hex, 16)
mac_address = data_in_hex[22:34]
logging.debug('Qsync: Group name [%s], channel [%d], code [%s]', name, channel, code)
return ShadeGroup(channel=channel, name=name, code=code, mac_address=mac_address)
def parse_scene(data_in_hex:str) -> Scene:
"""Parse a scene entity from the group list returned by Qsync"""
name_in_hex = data_in_hex[82:]
name = bytes.fromhex(name_in_hex).decode().rstrip('\x00')
groups_in_hex = data_in_hex[6:54]
mac_address = data_in_hex[54:66]
logging.debug('Qsync: Scene name [%s]', name)
command_list = []
groups_in_hex_list = [groups_in_hex[i:i+6] for i in range(0, len(groups_in_hex), 6)]
for group_in_hex in groups_in_hex_list:
if group_in_hex == '000000':
break
code = group_in_hex[:4]
position_code = group_in_hex[4:]
command = ShadeGroupCommand(ShadeGroup(channel=0, code=code), position_code=position_code)
logging.debug('Qsync: ShadeGroupCommand scene [%s], code [%s], position_code [%s]', name,
code, position_code)
command_list.append(command)
return Scene(name=name, command_list=command_list, mac_address=mac_address)
def build_group_dict(groups:list) -> dict:
"""Build a dict of groups, code -> group."""
group_dict = {}
for group in groups:
group_dict[group.code] = group
return group_dict
def hydrate_scene(scene:Scene, groups:list) -> None:
"""Expand scene entities with referenced groups"""
for command in scene.command_list:
command.group = groups[command.group.code]
def clear_socket(socket_tcp: socket) -> None:
"""
Read all data in the socket
Qsync does not appear to honor closed tcp connctions - the connetion contineues at same
location as prior conversation. This can cause havoc for the next call. This method will
read and discard all unexpected data, leaving the socket ready to start over again.
"""
# We're going to time out here, it's expected. Might as well make it a short timeout then.
socket_tcp.settimeout(1)
try:
while True:
data = socket_tcp.recv(2048)
logging.debug('Qsync: clear socket [%s]', bytes_to_hex(data))
except timeout:
# Expected - we can't know where we were in the data so read until timeout
logging.debug("Caught expected timeout after clearing socket")
return
def send_header(socket_tcp: socket) -> str:
""" Send a header requst to Qsync"""
command = '1600'
socket_tcp.send(bytes.fromhex(command))
logging.debug('Qsync: send [%s]', command)
data = socket_tcp.recv(2048)
data_in_hex = bytes_to_hex(data)
logging.debug('Qsync: receive [%s]', data_in_hex)
if not is_header(data_in_hex):
raise UnexpectedDataError("Header not received as expected")
# Not suree what happens here, sometimes qsync 'freaks out' and returns this
# instead of the real data. Trying again seems to clear it out.
if data_in_hex == '1604ffffffff':
raise UnexpectedDataError("Header not received as expected")
return data_in_hex
|
[
"socket.socket",
"logging.debug"
] |
[((11075, 11164), 'logging.debug', 'logging.debug', (['"""Qsync: Group name [%s], channel [%d], code [%s]"""', 'name', 'channel', 'code'], {}), "('Qsync: Group name [%s], channel [%d], code [%s]', name,\n channel, code)\n", (11088, 11164), False, 'import logging\n'), ((11538, 11583), 'logging.debug', 'logging.debug', (['"""Qsync: Scene name [%s]"""', 'name'], {}), "('Qsync: Scene name [%s]', name)\n", (11551, 11583), False, 'import logging\n'), ((13611, 13653), 'logging.debug', 'logging.debug', (['"""Qsync: send [%s]"""', 'command'], {}), "('Qsync: send [%s]', command)\n", (13624, 13653), False, 'import logging\n'), ((13729, 13778), 'logging.debug', 'logging.debug', (['"""Qsync: receive [%s]"""', 'data_in_hex'], {}), "('Qsync: receive [%s]', data_in_hex)\n", (13742, 13778), False, 'import logging\n'), ((8171, 8219), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (8184, 8219), False, 'import socket\n'), ((8646, 8739), 'logging.debug', 'logging.debug', (['"""Qsync: found qsync at [%s], name [%s], mac [%s]"""', 'host', 'name', 'mac_address'], {}), "('Qsync: found qsync at [%s], name [%s], mac [%s]', host, name,\n mac_address)\n", (8659, 8739), False, 'import logging\n'), ((11976, 12095), 'logging.debug', 'logging.debug', (['"""Qsync: ShadeGroupCommand scene [%s], code [%s], position_code [%s]"""', 'name', 'code', 'position_code'], {}), "(\n 'Qsync: ShadeGroupCommand scene [%s], code [%s], position_code [%s]',\n name, code, position_code)\n", (11989, 12095), False, 'import logging\n'), ((3459, 3508), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (3472, 3508), False, 'import socket\n'), ((3576, 3649), 'logging.debug', 'logging.debug', (['"""Qsync: connect host [%s], port [%d]"""', 'self.host', 'TCP_PORT'], {}), "('Qsync: connect host [%s], port [%d]', self.host, TCP_PORT)\n", (3589, 3649), False, 'import logging\n'), ((3965, 4007), 'logging.debug', 'logging.debug', (['"""Qsync: send [%s]"""', 'command'], {}), "('Qsync: send [%s]', command)\n", (3978, 4007), False, 'import logging\n'), ((5161, 5210), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (5174, 5210), False, 'import socket\n'), ((5223, 5296), 'logging.debug', 'logging.debug', (['"""Qsync: connect host [%s], port [%d]"""', 'self.host', 'TCP_PORT'], {}), "('Qsync: connect host [%s], port [%d]', self.host, TCP_PORT)\n", (5236, 5296), False, 'import logging\n'), ((5785, 5848), 'logging.debug', 'logging.debug', (['"""Qsync: number of groups [%s]"""', 'number_of_groups'], {}), "('Qsync: number of groups [%s]', number_of_groups)\n", (5798, 5848), False, 'import logging\n'), ((5911, 5974), 'logging.debug', 'logging.debug', (['"""Qsync: number of scenes [%s]"""', 'number_of_scenes'], {}), "('Qsync: number of scenes [%s]', number_of_scenes)\n", (5924, 5974), False, 'import logging\n'), ((9004, 9032), 'logging.debug', 'logging.debug', (['error_message'], {}), '(error_message)\n', (9017, 9032), False, 'import logging\n'), ((13378, 13440), 'logging.debug', 'logging.debug', (['"""Caught expected timeout after clearing socket"""'], {}), "('Caught expected timeout after clearing socket')\n", (13391, 13440), False, 'import logging\n'), ((4343, 4371), 'logging.debug', 'logging.debug', (['error_message'], {}), '(error_message)\n', (4356, 4371), False, 'import logging\n'), ((6461, 6510), 'logging.debug', 'logging.debug', (['"""Qsync: receive [%s]"""', 'data_in_hex'], {}), "('Qsync: receive [%s]', data_in_hex)\n", (6474, 6510), False, 'import logging\n'), ((7589, 7617), 'logging.debug', 'logging.debug', (['error_message'], {}), '(error_message)\n', (7602, 7617), False, 'import logging\n')]
|
from bs4 import BeautifulSoup
from .get_html import get_html
from src.sqlite_writer import sql_writer
from urllib.parse import urlparse, parse_qs
async def process_index_page(session, url):
html = await get_html(session, url)
soup = BeautifulSoup(html, 'html.parser')
items = soup.find_all('td', height='20')
trs = [item.find_parent('tr').find_all('td') for item in items]
sql_writer([
tuple([
parse_qs(urlparse(tds[0].find('a').get('href')).query)['creature_id'][0],
tds[0].get_text(),
tds[1].get_text(),
tds[2].get_text(),
tds[3].get_text(),
tds[4].get_text(),
tds[5].get_text(),
tds[6].get_text(),
tds[7].get_text(),
]) for tds in trs
])
return soup.find('font', color='#FF0000').parent.find_next('a')
|
[
"bs4.BeautifulSoup"
] |
[((239, 273), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (252, 273), False, 'from bs4 import BeautifulSoup\n')]
|
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import traceback
from devops.helpers.helpers import wait
from proboscis import asserts
from proboscis import test
from fuelweb_test.helpers.decorators import log_snapshot_on_error
from fuelweb_test.helpers import os_actions
from fuelweb_test.helpers import checkers
from fuelweb_test import logger
from fuelweb_test.settings import DEPLOYMENT_MODE_SIMPLE
from fuelweb_test.settings import LBAAS_PLUGIN_PATH
from fuelweb_test.settings import NEUTRON_SEGMENT_TYPE
from fuelweb_test.tests.base_test_case import SetupEnvironment
from fuelweb_test.tests.base_test_case import TestBasic
@test(enabled=False, groups=["plugins"])
class LbaasPlugin(TestBasic):
@classmethod
def check_neutron_agents_statuses(cls, os_conn):
agents_list = os_conn.list_agents()
for a in agents_list['agents']:
asserts.assert_equal(
a['alive'], True,
'Neutron agent {0} is not alive'. format(a['binary']))
asserts.assert_true(
a['admin_state_up'],
"Admin state is down for agent {0}".format(a['binary']))
lb_agent = [a for a in agents_list["agents"]
if a['binary'] == 'neutron-lbaas-agent']
logger.debug("LbaaS agent list is {0}".format(lb_agent))
asserts.assert_equal(
len(lb_agent), 1,
'There is not LbaaS agent in neutron agent list output')
@classmethod
def check_lbass_work(cls, os_conn):
# create pool
pool = os_conn.create_pool(pool_name='lbaas_pool')
logger.debug('pull is {0}'.format(pool))
# create vip
vip = os_conn.create_vip(name='lbaas_vip',
protocol='HTTP',
port=80,
pool=pool)
logger.debug('vip is {0}'.format(vip))
# get list of vips
lb_vip_list = os_conn.get_vips()
logger.debug(
'Initial state of vip is {0}'.format(
os_conn.get_vip(lb_vip_list['vips'][0]['id'])))
# wait for active status
try:
wait(lambda: os_conn.get_vip(
lb_vip_list['vips'][0]['id'])['vip']['status'] == 'ACTIVE',
timeout=120 * 60)
except:
logger.error(traceback.format_exc())
vip_state = os_conn.get_vip(
lb_vip_list['vips'][0]['id'])['vip']['status']
asserts.assert_equal(
'ACTIVE', vip_state,
"Vip is not active, current state is {0}".format(vip_state))
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["deploy_neutron_lbaas_simple"])
@log_snapshot_on_error
def deploy_neutron_lbaas_simple(self):
"""Deploy cluster in simple mode with LbaaS plugin
Scenario:
1. Upload plugin to the master node
2. Install plugin
3. Create cluster
4. Add 1 node with controller role
5. Add 2 nodes with compute role
6. Deploy the cluster
7. Run network verification
8. Check health of lbaas agent on the node
9. Create pool and vip
10. Run OSTF
Duration 35m
Snapshot deploy_neutron_vlan_lbaas_simple
"""
self.env.revert_snapshot("ready_with_3_slaves")
# copy plugin to the master node
checkers.upload_tarball(
self.env.get_admin_remote(), LBAAS_PLUGIN_PATH, '/var')
# install plugin
checkers.install_plugin_check_code(
self.env.get_admin_remote(),
plugin=os.path.basename(LBAAS_PLUGIN_PATH))
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE_SIMPLE,
settings={
"net_provider": 'neutron',
"net_segment_type": NEUTRON_SEGMENT_TYPE,
}
)
attr = self.fuel_web.client.get_cluster_attributes(cluster_id)
if 'lbaas' in attr['editable']:
logger.debug('we have lbaas element')
plugin_data = attr['editable']['lbaas']['metadata']
plugin_data['enabled'] = True
self.fuel_web.client.update_cluster_attributes(cluster_id, attr)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute'],
'slave-03': ['compute']
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
cluster = self.fuel_web.client.get_cluster(cluster_id)
asserts.assert_equal(str(cluster['net_provider']), 'neutron')
self.fuel_web.verify_network(cluster_id)
controller = self.fuel_web.get_nailgun_node_by_name('slave-01')
os_conn = os_actions.OpenStackActions(controller['ip'])
self.check_neutron_agents_statuses(os_conn)
self.check_lbass_work(os_conn)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("deploy_neutron_vlan_lbaas_simple")
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
groups=["deploy_neutron_lbaas_simple_reset_ready"])
@log_snapshot_on_error
def deploy_neutron_lbaas_simple_reset_ready(self):
"""Deploy and re-deploy cluster in simple mode with LbaaS plugin
Scenario:
1. Upload plugin to the master node
2. Install plugin
3. Create cluster
4. Add 1 node with controller role
5. Add 1 nodes with compute role
6. Deploy the cluster
7. Run network verification
8. Check health of lbaas agent on the node
9. Create pool and vip
10. Reset cluster
11. Add 1 compute
12. Re-deploy cluster
13. Check health of lbaas agent on the node
14. Create pool and vip
15. Run OSTF
Duration 65m
Snapshot deploy_neutron_lbaas_simple_reset_ready
"""
self.env.revert_snapshot("ready_with_3_slaves")
# copy plugin to the master node
checkers.upload_tarball(
self.env.get_admin_remote(), LBAAS_PLUGIN_PATH, '/var')
# install plugin
checkers.install_plugin_check_code(
self.env.get_admin_remote(),
plugin=os.path.basename(LBAAS_PLUGIN_PATH))
cluster_id = self.fuel_web.create_cluster(
name=self.__class__.__name__,
mode=DEPLOYMENT_MODE_SIMPLE,
settings={
"net_provider": 'neutron',
"net_segment_type": NEUTRON_SEGMENT_TYPE,
}
)
attr = self.fuel_web.client.get_cluster_attributes(cluster_id)
if 'lbaas' in attr['editable']:
logger.debug('we have lbaas element')
plugin_data = attr['editable']['lbaas']['metadata']
plugin_data['enabled'] = True
self.fuel_web.client.update_cluster_attributes(cluster_id, attr)
self.fuel_web.update_nodes(
cluster_id,
{
'slave-01': ['controller'],
'slave-02': ['compute'],
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
cluster = self.fuel_web.client.get_cluster(cluster_id)
asserts.assert_equal(str(cluster['net_provider']), 'neutron')
self.fuel_web.verify_network(cluster_id)
controller = self.fuel_web.get_nailgun_node_by_name('slave-01')
os_conn = os_actions.OpenStackActions(controller['ip'])
self.check_neutron_agents_statuses(os_conn)
self.check_lbass_work(os_conn)
self.fuel_web.stop_reset_env_wait(cluster_id)
self.fuel_web.wait_nodes_get_online_state(self.env.nodes().slaves[:2])
self.fuel_web.update_nodes(
cluster_id,
{
'slave-03': ['compute'],
}
)
self.fuel_web.deploy_cluster_wait(cluster_id)
self.check_neutron_agents_statuses(os_conn)
self.check_lbass_work(os_conn)
self.fuel_web.run_ostf(
cluster_id=cluster_id)
self.env.make_snapshot("deploy_neutron_lbaas_simple_reset_ready")
|
[
"os.path.basename",
"fuelweb_test.logger.debug",
"proboscis.test",
"traceback.format_exc",
"fuelweb_test.helpers.os_actions.OpenStackActions"
] |
[((1205, 1244), 'proboscis.test', 'test', ([], {'enabled': '(False)', 'groups': "['plugins']"}), "(enabled=False, groups=['plugins'])\n", (1209, 1244), False, 'from proboscis import test\n'), ((3197, 3294), 'proboscis.test', 'test', ([], {'depends_on': '[SetupEnvironment.prepare_slaves_3]', 'groups': "['deploy_neutron_lbaas_simple']"}), "(depends_on=[SetupEnvironment.prepare_slaves_3], groups=[\n 'deploy_neutron_lbaas_simple'])\n", (3201, 3294), False, 'from proboscis import test\n'), ((5749, 5858), 'proboscis.test', 'test', ([], {'depends_on': '[SetupEnvironment.prepare_slaves_3]', 'groups': "['deploy_neutron_lbaas_simple_reset_ready']"}), "(depends_on=[SetupEnvironment.prepare_slaves_3], groups=[\n 'deploy_neutron_lbaas_simple_reset_ready'])\n", (5753, 5858), False, 'from proboscis import test\n'), ((5468, 5513), 'fuelweb_test.helpers.os_actions.OpenStackActions', 'os_actions.OpenStackActions', (["controller['ip']"], {}), "(controller['ip'])\n", (5495, 5513), False, 'from fuelweb_test.helpers import os_actions\n'), ((8211, 8256), 'fuelweb_test.helpers.os_actions.OpenStackActions', 'os_actions.OpenStackActions', (["controller['ip']"], {}), "(controller['ip'])\n", (8238, 8256), False, 'from fuelweb_test.helpers import os_actions\n'), ((4697, 4734), 'fuelweb_test.logger.debug', 'logger.debug', (['"""we have lbaas element"""'], {}), "('we have lbaas element')\n", (4709, 4734), False, 'from fuelweb_test import logger\n'), ((7480, 7517), 'fuelweb_test.logger.debug', 'logger.debug', (['"""we have lbaas element"""'], {}), "('we have lbaas element')\n", (7492, 7517), False, 'from fuelweb_test import logger\n'), ((4253, 4288), 'os.path.basename', 'os.path.basename', (['LBAAS_PLUGIN_PATH'], {}), '(LBAAS_PLUGIN_PATH)\n', (4269, 4288), False, 'import os\n'), ((7036, 7071), 'os.path.basename', 'os.path.basename', (['LBAAS_PLUGIN_PATH'], {}), '(LBAAS_PLUGIN_PATH)\n', (7052, 7071), False, 'import os\n'), ((2915, 2937), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (2935, 2937), False, 'import traceback\n')]
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# -*- coding: utf-8 -*-
from google_translate import GoogleTranslate
google = GoogleTranslate()
google.read_file("/Users/litaofan/github_code/AudioRecorder/app/src/main/res/values/strings.xml")
google.start('ja')
|
[
"google_translate.GoogleTranslate"
] |
[((127, 144), 'google_translate.GoogleTranslate', 'GoogleTranslate', ([], {}), '()\n', (142, 144), False, 'from google_translate import GoogleTranslate\n')]
|
import requests
from bs4 import BeautifulSoup
def scrap():
url="http://esp.uem.es/digitalAED/laboratorios.php?lab=C306"
mainurl="http://esp.uem.es/digitalAED/"
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
labs = soup.find_all('option')
print(labs)
cont = 0
links = []
for lab in labs:
num = lab.text
print(cont," - ",num)
cont = cont+1
link = lab.attrs['value']
links.append(link)
inp = int(input("Introduce el laboratorio: "))
if(inp>(cont-1)):
print("Valor introducido no váido. Ejecución terminada")
exit(0)
page = requests.get(mainurl+links[inp])
soupLab = BeautifulSoup(page.content, 'html.parser')
cent = soupLab.find('center')
cent = cent.find('h2')
sib = cent.next_sibling
bol=True
progs=[]
while bol:
try:
sib = sib.next_sibling
progs.append(sib)
except:
print("Final lista")
bol=False
cont=1
print("\nProgramas del laboratorio seleccionado:\n")
programas = []
for prog in progs:
#print(prog)
if((cont%2)==0 and prog.strip()!=''):
print(prog.strip())
programas.append(prog.strip())
cont=cont+1
# print(programas)
|
[
"bs4.BeautifulSoup",
"requests.get"
] |
[((186, 203), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (198, 203), False, 'import requests\n'), ((216, 258), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page.content', '"""html.parser"""'], {}), "(page.content, 'html.parser')\n", (229, 258), False, 'from bs4 import BeautifulSoup\n'), ((675, 709), 'requests.get', 'requests.get', (['(mainurl + links[inp])'], {}), '(mainurl + links[inp])\n', (687, 709), False, 'import requests\n'), ((723, 765), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page.content', '"""html.parser"""'], {}), "(page.content, 'html.parser')\n", (736, 765), False, 'from bs4 import BeautifulSoup\n')]
|
#!/usr/bin/env python
from __future__ import print_function
import sys
import time
import threading
import Queue
import redis
def usage():
sys.stdout = sys.stderr
print('Usage: spam_redis.py [channel||list] name count host [port 5151]')
sys.exit(2)
if len(sys.argv) < 5:
usage()
sys.exit()
redis_type = sys.argv[1]
redis_name = sys.argv[2]
count = int(eval(sys.argv[3]))
host = sys.argv[4]
port = int(sys.argv[5]) if len(sys.argv) == 6 else 6379
class Worker(threading.Thread):
def __init__(self, queue):
threading.Thread.__init__(self)
self.daemon = True
self.queue = queue
self.connected = False
def run(self):
self.connected = False
try:
self.client = redis.Redis(host=host, port=port)
self.connected = True
except:
etype, evalue, etb = sys.exc_info()
print('Could no connect to redis server at %s:%s. Exception: %s, Error: %s.' % (host, port, etype, evalue))
while self.connected:
try:
message = self.queue.get(timeout=.5)
if redis_type == 'channel':
self.client.publish(redis_name, message)
else:
self.client.rpush(redis_name, message)
except Queue.Empty:
break
class RedisLoadTester():
def __init__(self, num_workers=10):
self.lock = threading.Lock()
self.counter = 0
self.queue = Queue.Queue(10)
self.num_workers = num_workers
def start(self, message):
total_item_count = count
workers = set()
for i in xrange(0, self.num_workers):
worker = Worker(self.queue)
worker.start()
workers.add(worker)
time.sleep(.1)
if not worker.connected:
sys.exit()
print("Start load test.")
start = time.time()
for counter in xrange(0, total_item_count):
if counter % 1000 == 0:
sys.stdout.write("Message already sent: %s. Mean req/s: %s\r" % (counter+1, counter / (time.time()-start)))
sys.stdout.flush()
now = "%f" % time.time()
self.queue.put(message % now)
for worker in workers:
worker.join()
stop = time.time()
print("Message sent: %s. Took %s. Mean req/s: %s" % (counter+1, stop-start, total_item_count / (stop-start)))
if __name__ == '__main__':
lt = RedisLoadTester()
lt.start("<13>192.168.127.12 - - [28/Jul/2006:10:27:10 -0300] \"GET /cgi-bin/try/9153/?param1=Test¶m2=%s HTTP/1.0\" 200 3395 \"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:32.0) Gecko/20100101 Firefox/32.0\"\n")
|
[
"redis.Redis",
"threading.Thread.__init__",
"Queue.Queue",
"time.time",
"threading.Lock",
"time.sleep",
"sys.stdout.flush",
"sys.exc_info",
"sys.exit"
] |
[((251, 262), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (259, 262), False, 'import sys\n'), ((302, 312), 'sys.exit', 'sys.exit', ([], {}), '()\n', (310, 312), False, 'import sys\n'), ((543, 574), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (568, 574), False, 'import threading\n'), ((1429, 1445), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (1443, 1445), False, 'import threading\n'), ((1492, 1507), 'Queue.Queue', 'Queue.Queue', (['(10)'], {}), '(10)\n', (1503, 1507), False, 'import Queue\n'), ((1921, 1932), 'time.time', 'time.time', ([], {}), '()\n', (1930, 1932), False, 'import time\n'), ((2331, 2342), 'time.time', 'time.time', ([], {}), '()\n', (2340, 2342), False, 'import time\n'), ((750, 783), 'redis.Redis', 'redis.Redis', ([], {'host': 'host', 'port': 'port'}), '(host=host, port=port)\n', (761, 783), False, 'import redis\n'), ((1792, 1807), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1802, 1807), False, 'import time\n'), ((867, 881), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (879, 881), False, 'import sys\n'), ((1860, 1870), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1868, 1870), False, 'import sys\n'), ((2161, 2179), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2177, 2179), False, 'import sys\n'), ((2205, 2216), 'time.time', 'time.time', ([], {}), '()\n', (2214, 2216), False, 'import time\n'), ((2124, 2135), 'time.time', 'time.time', ([], {}), '()\n', (2133, 2135), False, 'import time\n')]
|
import pytest
from loguru import logger
from tiktokpy import TikTokPy
from tiktokpy.models.feed import FeedItem
@pytest.mark.asyncio()
async def test_user_feed(bot: TikTokPy):
feed = await bot.user_feed(username="@mileycyrus")
logger.info(feed)
assert len(feed) == 50
assert isinstance(feed[0], FeedItem)
|
[
"loguru.logger.info",
"pytest.mark.asyncio"
] |
[((116, 137), 'pytest.mark.asyncio', 'pytest.mark.asyncio', ([], {}), '()\n', (135, 137), False, 'import pytest\n'), ((238, 255), 'loguru.logger.info', 'logger.info', (['feed'], {}), '(feed)\n', (249, 255), False, 'from loguru import logger\n')]
|
from django.contrib import admin
from django.urls import path, include
from rest_framework import routers
from .views import *
router = routers.DefaultRouter()
router.register('tv', TVViewSet)
urlpatterns = [
path('', include(router.urls)),
path('me/', UserDetailView.as_view()),
path('ocena/', OcenaView.as_view()),
path('ocena/<str:ean>', OcenaView.as_view()),
path('porudzbina/', PorudzbineView.as_view())
]
|
[
"rest_framework.routers.DefaultRouter",
"django.urls.include"
] |
[((137, 160), 'rest_framework.routers.DefaultRouter', 'routers.DefaultRouter', ([], {}), '()\n', (158, 160), False, 'from rest_framework import routers\n'), ((224, 244), 'django.urls.include', 'include', (['router.urls'], {}), '(router.urls)\n', (231, 244), False, 'from django.urls import path, include\n')]
|
#! /usr/bin/env python3
""" Script to download public logs """
import os
import glob
import argparse
import json
import datetime
import sys
import time
import requests
from plot_app.config_tables import *
def get_arguments():
"""Get parsed CLI arguments"""
parser = argparse.ArgumentParser(
description="Python script for downloading public logs "
"from the PX4/flight_review database.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--print",
action="store_true",
dest="print_entries",
help="Whether to only print (not download) the database entries.",
)
parser.add_argument(
"-d",
"--directory",
type=str,
default="./",
help="The directory to look for ulog file.",
)
parser.add_argument(
"--server",
type=str,
default="http://localhost:5006",
help="The url of the server",
)
parser.add_argument(
"--name",
default=None,
type=str,
help="Name of the show",
)
parser.add_argument(
"--show_id",
default=None,
type=str,
help="Id of the show",
)
return parser.parse_args()
def main():
"""main script entry point"""
args = get_arguments()
try:
# the db_info_api sends a json file with a list of all public database entries
db_entries_list = requests.get(url=args.server + "/dbinfo").json()
if not args.show_id:
values = {"name": args.name, "place": "jn", "datetime": "12345567789"}
url = args.server + "/show"
r = requests.post(url=url, data=values).json()
show_id = r["id"]
else:
show_id = args.show_id
except Exception as e:
print("Server request failed.")
print(e)
raise
if args.print_entries:
# only print the json output without downloading logs
print(json.dumps(db_entries_list, indent=4, sort_keys=True))
return
# find already existing logs in download folder
logfile_pattern = os.path.join(os.path.abspath(args.directory), "*.ulg")
logfiles = glob.glob(os.path.join(os.getcwd(), logfile_pattern))
logids = [os.path.basename(f) for f in logfiles]
# sort list order to first download the newest log files
db_entries_list = sorted(
db_entries_list,
key=lambda x: datetime.datetime.strptime(x["log_date"], "%Y-%m-%d"),
reverse=True,
)
already_uploaded_file = [f["original_filename"] for f in db_entries_list]
for filename, logfile in zip(logids, logfiles):
if filename in already_uploaded_file:
print(f"[skip] Already uploaded : {filename}")
continue
file = {"filearg": (filename, open(logfile, "rb"))}
values = {
"shows_id": show_id,
"source": "uploader",
"type": "simple_log",
"description": "",
"email": "",
"allowForAnalysis": "true",
}
r = requests.post(url=args.server + "upload", files=file, data=values)
if r.status_code == 200:
data = json.loads(r.text)
url = data["url"]
print(f"Uploaded : {logfile}, url: {url}")
if __name__ == "__main__":
main()
|
[
"os.path.abspath",
"argparse.ArgumentParser",
"json.loads",
"os.path.basename",
"os.getcwd",
"json.dumps",
"datetime.datetime.strptime",
"requests.get",
"requests.post"
] |
[((279, 462), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Python script for downloading public logs from the PX4/flight_review database."""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description=\n 'Python script for downloading public logs from the PX4/flight_review database.'\n , formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (302, 462), False, 'import argparse\n'), ((2155, 2186), 'os.path.abspath', 'os.path.abspath', (['args.directory'], {}), '(args.directory)\n', (2170, 2186), False, 'import os\n'), ((2280, 2299), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (2296, 2299), False, 'import os\n'), ((3099, 3165), 'requests.post', 'requests.post', ([], {'url': "(args.server + 'upload')", 'files': 'file', 'data': 'values'}), "(url=args.server + 'upload', files=file, data=values)\n", (3112, 3165), False, 'import requests\n'), ((1997, 2050), 'json.dumps', 'json.dumps', (['db_entries_list'], {'indent': '(4)', 'sort_keys': '(True)'}), '(db_entries_list, indent=4, sort_keys=True)\n', (2007, 2050), False, 'import json\n'), ((2235, 2246), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2244, 2246), False, 'import os\n'), ((3219, 3237), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (3229, 3237), False, 'import json\n'), ((1454, 1495), 'requests.get', 'requests.get', ([], {'url': "(args.server + '/dbinfo')"}), "(url=args.server + '/dbinfo')\n", (1466, 1495), False, 'import requests\n'), ((2458, 2511), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (["x['log_date']", '"""%Y-%m-%d"""'], {}), "(x['log_date'], '%Y-%m-%d')\n", (2484, 2511), False, 'import datetime\n'), ((1672, 1707), 'requests.post', 'requests.post', ([], {'url': 'url', 'data': 'values'}), '(url=url, data=values)\n', (1685, 1707), False, 'import requests\n')]
|
#!/usr/bin/env python
from __future__ import division
from vector_illustration_processing_simplified import pi_point, pi_line, pi_arithmetic
import math
import rospy
import tf2_ros
from sensor_msgs.msg import LaserScan
from geometry_msgs.msg import TransformStamped
from tf.transformations import euler_from_quaternion
class Transform(object):
def __init__(self, position = None, rotation = None):
self.position = position
self.rotation = rotation
if self.position is None:
self.position = pi_point.Point()
if self.rotation is None:
self.rotation = 0.0
def __repr__(self):
return self.__str__()
def __str__(self):
return str(self.get_as_dictionary())
def get_as_dictionary(self):
params = {
"position": self.position,
"rotation": self.rotation
}
return params
def get_as_deep_dictionary(self):
params = {
"position": self.position.get_as_deep_dictionary(),
"rotation": self.rotation
}
return params
def set_from_ros_transform(self, ros_stamped_transform):
x = ros_stamped_transform.transform.translation.x
y = ros_stamped_transform.transform.translation.y
self.rotation = euler_from_quaternion((
ros_stamped_transform.transform.rotation.x,
ros_stamped_transform.transform.rotation.y,
ros_stamped_transform.transform.rotation.z,
ros_stamped_transform.transform.rotation.w
))[-1]
self.position = pi_point.Point(x, y)
def set_position(self, position):
self.position = position
def set_rotation(self, rotation):
self.rotation = rotation
class RangeSensor(object):
def __init__(self, parent_frame_id, relative_frame_id, tf_buffer_object):
self.relative_frame_id = relative_frame_id
self.parent_frame_id = parent_frame_id
self.tf_buffer_object = tf_buffer_object
self.transform_stamped = None
self.relative_transform = Transform()
self.distance_measured = 0
def __log(self, msg):
rospy.loginfo("[INFO] {}".format(msg))
def update(self, distance_measured):
self.distance_measured = distance_measured
def get_relative_measured_point(self):
x = self.relative_transform.position.x + self.distance_measured * math.sin(self.relative_transform.rotation)
y = self.relative_transform.position.y + self.distance_measured * math.cos(self.relative_transform.rotation)
return pi_point.Point(x, y)
def update_transform(self):
try:
self.transform_stamped = self.tf_buffer_object.lookup_transform(self.parent_frame_id, self.relative_frame_id, rospy.Time())
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
rospy.logerr("[ERROR] could not lookup transform between {0} and {1}".format(self.parent_frame_id, self.relative_frame_id))
return False
if self.transform_stamped is None:
return False
self.relative_transform.set_from_ros_transform(self.transform_stamped)
return True
class LaserScannerUsingRangeSensors(object):
def __init__(self, parent_frame_id, relative_frame_id, tf_buffer_object):
self.relative_frame_id = relative_frame_id
self.parent_frame_id = parent_frame_id
self.tf_buffer_object = tf_buffer_object
self.transform_stamped = None
self.relative_transform = Transform()
self.set_number_of_active_sensors(1)
def __log(self, msg):
rospy.loginfo("[INFO] {}".format(msg))
def set_number_of_active_sensors(self, howmany):
self.number_of_active_sensors = howmany
self.number_of_data_points = self.number_of_active_sensors * 1
self.compute()
def compute(self):
self.ranges = [0] * self.number_of_active_sensors
self.point_cloud = [pi_point.Point()] * self.number_of_active_sensors
self.simulated_origins = [pi_point.Point()] * self.number_of_active_sensors
self.populate_simulated_origins()
# pre-populate the LaserScan message
self.scan_msg = LaserScan()
self.scan_msg.header.frame_id = self.relative_frame_id
self.scan_msg.angle_min = -math.pi
self.scan_msg.angle_max = math.pi
self.scan_msg.angle_increment = (2.0 * math.pi) / self.number_of_data_points
self.scan_msg.time_increment = 0
self.scan_msg.scan_time = 0
self.scan_msg.range_min = 0.01
self.scan_msg.range_max = 0.50
def publish_message(self, publisher):
self.scan_msg.header.stamp = rospy.Time.now()
self.scan_msg.ranges = self.ranges
publisher.publish(self.scan_msg)
def populate_simulated_origins(self):
sim_radius = 0.3
angle_increment = 2.0 * math.pi / self.number_of_data_points
for i in range(self.number_of_data_points):
angle = i * angle_increment
x_position = self.relative_transform.position.x + sim_radius * math.sin(self.relative_transform.rotation + angle)
y_position = self.relative_transform.position.y + sim_radius * math.cos(self.relative_transform.rotation + angle)
self.simulated_origins[i] = pi_point.Point(x_position, y_position)
def update(self, range_sensors):
'''note the order of the sensors determines the order of the data cloud generated'''
for i in range(self.number_of_active_sensors):
point = range_sensors[i].get_relative_measured_point()
self.point_cloud[i] = point
measured_distance_line = pi_line.Line(self.relative_transform.position, point)
simulated_origin_line = pi_line.Line(self.relative_transform.position, self.simulated_origins[i])
r = measured_distance_line.get_length()
angle = measured_distance_line.get_angle_from_line(simulated_origin_line)
self.ranges[i] = r * math.cos(angle)
# self.__log("simulated_origins : {}".format(self.simulated_origins))
# self.__log("point_cloud : {}".format(self.point_cloud))
def update_transform(self):
try:
self.transform_stamped = self.tf_buffer_object.lookup_transform(self.parent_frame_id, self.relative_frame_id, rospy.Time())
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
rospy.logerr("[ERROR] could not lookup transform between {0} and {1}".format(self.parent_frame_id, self.relative_frame_id))
return False
if self.transform_stamped is None:
return False
self.relative_transform.set_from_ros_transform(self.transform_stamped)
self.compute()
return True
|
[
"rospy.Time.now",
"vector_illustration_processing_simplified.pi_point.Point",
"rospy.Time",
"math.sin",
"vector_illustration_processing_simplified.pi_line.Line",
"math.cos",
"tf.transformations.euler_from_quaternion",
"sensor_msgs.msg.LaserScan"
] |
[((1628, 1648), 'vector_illustration_processing_simplified.pi_point.Point', 'pi_point.Point', (['x', 'y'], {}), '(x, y)\n', (1642, 1648), False, 'from vector_illustration_processing_simplified import pi_point, pi_line, pi_arithmetic\n'), ((2653, 2673), 'vector_illustration_processing_simplified.pi_point.Point', 'pi_point.Point', (['x', 'y'], {}), '(x, y)\n', (2667, 2673), False, 'from vector_illustration_processing_simplified import pi_point, pi_line, pi_arithmetic\n'), ((4358, 4369), 'sensor_msgs.msg.LaserScan', 'LaserScan', ([], {}), '()\n', (4367, 4369), False, 'from sensor_msgs.msg import LaserScan\n'), ((4843, 4859), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (4857, 4859), False, 'import rospy\n'), ((534, 550), 'vector_illustration_processing_simplified.pi_point.Point', 'pi_point.Point', ([], {}), '()\n', (548, 550), False, 'from vector_illustration_processing_simplified import pi_point, pi_line, pi_arithmetic\n'), ((1329, 1537), 'tf.transformations.euler_from_quaternion', 'euler_from_quaternion', (['(ros_stamped_transform.transform.rotation.x, ros_stamped_transform.\n transform.rotation.y, ros_stamped_transform.transform.rotation.z,\n ros_stamped_transform.transform.rotation.w)'], {}), '((ros_stamped_transform.transform.rotation.x,\n ros_stamped_transform.transform.rotation.y, ros_stamped_transform.\n transform.rotation.z, ros_stamped_transform.transform.rotation.w))\n', (1350, 1537), False, 'from tf.transformations import euler_from_quaternion\n'), ((5473, 5511), 'vector_illustration_processing_simplified.pi_point.Point', 'pi_point.Point', (['x_position', 'y_position'], {}), '(x_position, y_position)\n', (5487, 5511), False, 'from vector_illustration_processing_simplified import pi_point, pi_line, pi_arithmetic\n'), ((5848, 5901), 'vector_illustration_processing_simplified.pi_line.Line', 'pi_line.Line', (['self.relative_transform.position', 'point'], {}), '(self.relative_transform.position, point)\n', (5860, 5901), False, 'from vector_illustration_processing_simplified import pi_point, pi_line, pi_arithmetic\n'), ((5939, 6012), 'vector_illustration_processing_simplified.pi_line.Line', 'pi_line.Line', (['self.relative_transform.position', 'self.simulated_origins[i]'], {}), '(self.relative_transform.position, self.simulated_origins[i])\n', (5951, 6012), False, 'from vector_illustration_processing_simplified import pi_point, pi_line, pi_arithmetic\n'), ((2477, 2519), 'math.sin', 'math.sin', (['self.relative_transform.rotation'], {}), '(self.relative_transform.rotation)\n', (2485, 2519), False, 'import math\n'), ((2594, 2636), 'math.cos', 'math.cos', (['self.relative_transform.rotation'], {}), '(self.relative_transform.rotation)\n', (2602, 2636), False, 'import math\n'), ((2846, 2858), 'rospy.Time', 'rospy.Time', ([], {}), '()\n', (2856, 2858), False, 'import rospy\n'), ((4111, 4127), 'vector_illustration_processing_simplified.pi_point.Point', 'pi_point.Point', ([], {}), '()\n', (4125, 4127), False, 'from vector_illustration_processing_simplified import pi_point, pi_line, pi_arithmetic\n'), ((4195, 4211), 'vector_illustration_processing_simplified.pi_point.Point', 'pi_point.Point', ([], {}), '()\n', (4209, 4211), False, 'from vector_illustration_processing_simplified import pi_point, pi_line, pi_arithmetic\n'), ((6186, 6201), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (6194, 6201), False, 'import math\n'), ((6527, 6539), 'rospy.Time', 'rospy.Time', ([], {}), '()\n', (6537, 6539), False, 'import rospy\n'), ((5255, 5305), 'math.sin', 'math.sin', (['(self.relative_transform.rotation + angle)'], {}), '(self.relative_transform.rotation + angle)\n', (5263, 5305), False, 'import math\n'), ((5381, 5431), 'math.cos', 'math.cos', (['(self.relative_transform.rotation + angle)'], {}), '(self.relative_transform.rotation + angle)\n', (5389, 5431), False, 'import math\n')]
|
# Generated by Django 3.0.5 on 2020-05-04 11:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("timer", "0005_timestamp_project"),
]
operations = [
migrations.DeleteModel(
name="TimerState",
),
]
|
[
"django.db.migrations.DeleteModel"
] |
[((224, 265), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""TimerState"""'}), "(name='TimerState')\n", (246, 265), False, 'from django.db import migrations\n')]
|
from typing import List, Optional
from river import synth
from river.drift import ADWIN
from river.tree import HoeffdingTreeClassifier
from streamselect.adaptive_learning import BaseAdaptiveLearner
from streamselect.adaptive_learning.reidentification_schedulers import (
DriftDetectionCheck,
DriftInfo,
DriftType,
PeriodicCheck,
)
from streamselect.concept_representations import ErrorRateRepresentation
from streamselect.repository import AbsoluteValueComparer
from streamselect.states import State
from streamselect.utils import Observation
# pylint: disable=too-many-statements, duplicate-code, R0801
def test_init() -> None:
"""Test initialization of the base class."""
al_classifier = BaseAdaptiveLearner(
classifier_constructor=HoeffdingTreeClassifier,
representation_constructor=ErrorRateRepresentation,
representation_comparer=AbsoluteValueComparer(),
drift_detector_constructor=ADWIN,
background_state_mode="drift_reset",
)
# Check initial state has been constructed
assert len(al_classifier.repository.states) == 1
assert al_classifier.active_state_id in al_classifier.repository.states
assert al_classifier.active_state_id in al_classifier.active_window_state_representations
# Assert background state was constructed
assert al_classifier.background_state
assert al_classifier.background_state_active_representation
assert al_classifier.background_state_detector
al_classifier = BaseAdaptiveLearner(
classifier_constructor=HoeffdingTreeClassifier,
representation_constructor=ErrorRateRepresentation,
representation_comparer=AbsoluteValueComparer(),
drift_detector_constructor=ADWIN,
background_state_mode="transition_reset",
)
# Assert background state was constructed
assert al_classifier.background_state
assert al_classifier.background_state_active_representation
assert not al_classifier.background_state_detector
al_classifier = BaseAdaptiveLearner(
classifier_constructor=HoeffdingTreeClassifier,
representation_constructor=ErrorRateRepresentation,
representation_comparer=AbsoluteValueComparer(),
drift_detector_constructor=ADWIN,
background_state_mode="transition_reset",
)
# Assert background state was constructed
assert al_classifier.background_state
assert al_classifier.background_state_active_representation
assert not al_classifier.background_state_detector
al_classifier = BaseAdaptiveLearner(
classifier_constructor=HoeffdingTreeClassifier,
representation_constructor=ErrorRateRepresentation,
representation_comparer=AbsoluteValueComparer(),
drift_detector_constructor=ADWIN,
background_state_mode=None,
)
# Assert background state was constructed
assert not al_classifier.background_state
assert not al_classifier.background_state_active_representation
assert not al_classifier.background_state_detector
# Test that states get the correct properties
window_size = 50
update_period = 50
al_classifier = BaseAdaptiveLearner(
classifier_constructor=HoeffdingTreeClassifier,
representation_constructor=ErrorRateRepresentation,
representation_comparer=AbsoluteValueComparer(),
drift_detector_constructor=ADWIN,
representation_window_size=window_size,
representation_update_period=update_period,
)
# Assert background state was constructed
assert isinstance(al_classifier.get_active_state().classifier, HoeffdingTreeClassifier)
assert isinstance(al_classifier.get_active_state().get_self_representation(), ErrorRateRepresentation)
assert al_classifier.get_active_state().get_self_representation().window_size == window_size
assert al_classifier.get_active_state().get_self_representation().update_period == update_period
# Check that states are correctly made as the concept mode
assert al_classifier.get_active_state().get_self_representation().mode == "concept"
def test_base_predictions() -> None:
"""Test predictions are the same as made by a base classifier."""
al_classifier = BaseAdaptiveLearner(
classifier_constructor=HoeffdingTreeClassifier,
representation_constructor=ErrorRateRepresentation,
representation_comparer=AbsoluteValueComparer(),
drift_detector_constructor=ADWIN,
background_state_mode="drift_reset",
)
baseline_state = State(
HoeffdingTreeClassifier(),
lambda state_id: ErrorRateRepresentation(al_classifier.representation_window_size, state_id),
state_id=-1,
)
baseline_active_representation = ErrorRateRepresentation(
al_classifier.representation_window_size, baseline_state.state_id
)
baseline_comparer = AbsoluteValueComparer()
baseline_detector = ADWIN()
dataset = synth.STAGGER()
for t, (x, y) in enumerate(dataset.take(50)):
p = al_classifier.predict_one(x, t)
ob = Observation(x=x, y=y, seen_at=t, active_state_id=baseline_state.state_id)
p_b = baseline_state.predict_one(ob)
baseline_active_representation.predict_one(ob)
assert p == p_b
al_classifier.learn_one(x, y, timestep=t)
baseline_state.learn_one(ob)
p_b = baseline_state.predict_one(ob)
baseline_active_representation.learn_one(ob)
in_drift, _ = baseline_detector.update(
baseline_comparer.get_state_rep_similarity(baseline_state, baseline_active_representation) # type: ignore
)
if in_drift:
break
assert not al_classifier.performance_monitor.in_drift
assert not al_classifier.performance_monitor.made_transition
def test_drift_detection() -> None:
"""Test predictions are the same as made by a base classifier, and drift detection capabilities are as well."""
al_classifier = BaseAdaptiveLearner(
classifier_constructor=HoeffdingTreeClassifier,
representation_constructor=ErrorRateRepresentation,
representation_comparer=AbsoluteValueComparer(),
drift_detector_constructor=ADWIN,
background_state_mode="drift_reset",
)
baseline_state = State(
HoeffdingTreeClassifier(),
lambda state_id: ErrorRateRepresentation(al_classifier.representation_window_size, state_id, mode="concept"),
state_id=-1,
)
baseline_active_representation = ErrorRateRepresentation(
al_classifier.representation_window_size, baseline_state.state_id
)
baseline_comparer = AbsoluteValueComparer()
baseline_detector = ADWIN()
dataset_0 = synth.STAGGER(classification_function=0, seed=0)
dataset_1 = synth.STAGGER(classification_function=1, seed=0)
found_drift = False
for t, (x, y) in enumerate(dataset_0.take(500)):
# Ensure predictions are equal
ob = Observation(x=x, y=y, seen_at=t, active_state_id=baseline_state.state_id)
p = al_classifier.predict_one(x)
p_b = baseline_state.predict_one(ob, force_train_own_representation=True)
baseline_active_representation.predict_one(ob)
# Ensure background predictions are equal, since we are using drift_reset and no drift will occur.
p_background = al_classifier.background_state.predict_one(ob) # type: ignore
assert al_classifier.background_state
assert al_classifier.background_state_active_representation
assert al_classifier.background_state_detector
assert p_b == p_background
assert p == p_b
assert (
baseline_active_representation.meta_feature_values[0]
== al_classifier.active_window_state_representations[al_classifier.active_state_id].meta_feature_values[0]
)
assert (
baseline_active_representation.meta_feature_values[0]
== al_classifier.background_state_active_representation.meta_feature_values[0]
)
# Assert learning and relevance checks are equal.
# Note: we have to use the second prediction from the baseline, as for the very
# first prediction in the stream the first prediction is None as classes haven't been
# learned. We do this automatically in the adaptive_learning class.
al_classifier.learn_one(x, y)
baseline_state.learn_one(ob, force_train_classifier=True)
baseline_active_representation.learn_one(ob)
baseline_relevance = baseline_comparer.get_state_rep_similarity(baseline_state, baseline_active_representation)
assert (
baseline_state.get_self_representation().meta_feature_values[0]
== al_classifier.get_active_state().get_self_representation().meta_feature_values[0]
)
assert (
baseline_state.get_self_representation().meta_feature_values[0]
== al_classifier.background_state.get_self_representation().meta_feature_values[0]
)
assert (
baseline_active_representation.meta_feature_values[0]
== al_classifier.active_window_state_representations[al_classifier.active_state_id].meta_feature_values[0]
)
assert (
baseline_active_representation.meta_feature_values[0]
== al_classifier.background_state_active_representation.meta_feature_values[0]
)
assert baseline_relevance == al_classifier.performance_monitor.active_state_relevance
assert baseline_relevance == al_classifier.performance_monitor.background_state_relevance
in_drift, _ = baseline_detector.update(baseline_relevance) # type: ignore
assert baseline_detector.total == al_classifier.drift_detector.total # type: ignore
# We shouldn't find a drift in stable data
assert not found_drift
assert not al_classifier.performance_monitor.in_drift
assert not al_classifier.performance_monitor.made_transition
if not found_drift:
for t, (x, y) in enumerate(dataset_1.take(500), start=500):
ob = Observation(x=x, y=y, seen_at=t, active_state_id=baseline_state.state_id)
p = al_classifier.predict_one(x)
p_b = baseline_state.predict_one(ob, force_train_own_representation=True)
baseline_active_representation.predict_one(ob)
assert p == p_b
al_classifier.learn_one(x, y)
baseline_state.learn_one(ob, force_train_classifier=True)
baseline_active_representation.learn_one(ob)
baseline_relevance = baseline_comparer.get_state_rep_similarity(
baseline_state, baseline_active_representation
)
assert baseline_relevance == al_classifier.performance_monitor.active_state_relevance
in_drift, _ = baseline_detector.update(baseline_relevance) # type: ignore
if in_drift:
found_drift = True
break
assert not al_classifier.performance_monitor.in_drift
assert not al_classifier.performance_monitor.made_transition
# We should have found a drift when the concept changed
assert al_classifier.performance_monitor.in_drift
# background should have been reset since we are using "drift_reset"
assert al_classifier.background_state is not None
assert al_classifier.background_state.seen_weight == 0.0
assert al_classifier.get_active_state().seen_weight == 0.0
assert len(al_classifier.repository.states) == 2
assert al_classifier.active_state_id == 1
def test_drift_transition() -> None:
"""Test data after a drift is handled correctly."""
al_classifier = BaseAdaptiveLearner(
classifier_constructor=HoeffdingTreeClassifier,
representation_constructor=ErrorRateRepresentation,
representation_comparer=AbsoluteValueComparer(),
drift_detector_constructor=ADWIN,
background_state_mode="drift_reset",
)
baseline_c1_state = State(
HoeffdingTreeClassifier(),
lambda state_id: ErrorRateRepresentation(al_classifier.representation_window_size, state_id, mode="concept"),
state_id=-1,
)
baseline_c1_active_representation = ErrorRateRepresentation(
al_classifier.representation_window_size, baseline_c1_state.state_id
)
baseline_c1_comparer = AbsoluteValueComparer()
baseline_c1_detector = ADWIN()
dataset_1 = synth.STAGGER(classification_function=0, seed=0)
dataset_2 = synth.STAGGER(classification_function=1, seed=0)
found_drift = False
drift_point = None
# Concept 1
for t, (x, y) in enumerate(dataset_1.take(500)):
ob = Observation(x=x, y=y, seen_at=t, active_state_id=baseline_c1_state.state_id)
al_classifier.predict_one(x)
baseline_c1_state.predict_one(ob, force_train_own_representation=True)
baseline_c1_active_representation.predict_one(ob)
al_classifier.learn_one(x, y)
baseline_c1_state.learn_one(ob, force_train_classifier=True)
baseline_c1_active_representation.learn_one(ob)
baseline_c1_relevance = baseline_c1_comparer.get_state_rep_similarity(
baseline_c1_state, baseline_c1_active_representation
)
in_drift, _ = baseline_c1_detector.update(baseline_c1_relevance) # type: ignore
assert not found_drift
assert not al_classifier.performance_monitor.in_drift
assert not al_classifier.performance_monitor.made_transition
# Concept 2
for t, (x, y) in enumerate(dataset_2.take(500), start=500):
ob = Observation(x=x, y=y, seen_at=t, active_state_id=baseline_c1_state.state_id)
al_classifier.predict_one(x)
baseline_c1_state.predict_one(ob, force_train_own_representation=True)
baseline_c1_active_representation.predict_one(ob)
al_classifier.learn_one(x, y)
baseline_c1_state.learn_one(ob, force_train_classifier=True)
baseline_c1_active_representation.learn_one(ob)
baseline_c1_relevance = baseline_c1_comparer.get_state_rep_similarity(
baseline_c1_state, baseline_c1_active_representation
)
assert baseline_c1_relevance == al_classifier.performance_monitor.active_state_relevance
in_drift, _ = baseline_c1_detector.update(baseline_c1_relevance) # type: ignore
if in_drift:
found_drift = True
drift_point = t
break
assert not al_classifier.performance_monitor.in_drift
assert not al_classifier.performance_monitor.made_transition
# We should have found a drift when the concept changed
assert al_classifier.performance_monitor.in_drift
# background should have been reset since we are using "drift_reset"
assert al_classifier.background_state is not None
assert al_classifier.background_state.seen_weight == 0.0
assert al_classifier.get_active_state().seen_weight == 0.0
assert len(al_classifier.repository.states) == 2
assert al_classifier.active_state_id == 1
assert drift_point
# Test that after the transition, we are properly using the new state not the old state.
baseline_c2_state = State(
HoeffdingTreeClassifier(),
lambda state_id: ErrorRateRepresentation(al_classifier.representation_window_size, state_id, mode="concept"),
state_id=-2,
)
baseline_c2_active_representation = ErrorRateRepresentation(
al_classifier.representation_window_size, baseline_c2_state.state_id
)
baseline_c2_comparer = AbsoluteValueComparer()
baseline_c2_detector = ADWIN()
# Concept 2
for t, (x, y) in enumerate(dataset_2.take(500), start=500 + drift_point):
ob = Observation(x=x, y=y, seen_at=t, active_state_id=baseline_c2_state.state_id)
assert al_classifier.active_state_id == 1
p_c2 = al_classifier.predict_one(x)
bp_c2 = baseline_c2_state.predict_one(ob, force_train_own_representation=True)
# the adaptive learner should give the same results as a new classifier trained on the new concept.
assert p_c2 == bp_c2
# The original concept 1 state should be stored, and give the same predictions as the baseline trained
# only on that data.
p_c1 = al_classifier.repository.states[0].predict_one(ob)
bp_c1 = baseline_c1_state.predict_one(ob)
assert p_c1 == bp_c1
baseline_c2_active_representation.predict_one(ob)
al_classifier.learn_one(x, y)
baseline_c2_state.learn_one(ob, force_train_classifier=True)
baseline_c2_active_representation.learn_one(ob)
baseline_c2_relevance = baseline_c2_comparer.get_state_rep_similarity(
baseline_c2_state, baseline_c2_active_representation
)
assert baseline_c2_relevance == al_classifier.performance_monitor.active_state_relevance
in_drift, _ = baseline_c2_detector.update(baseline_c2_relevance) # type: ignore
if in_drift:
found_drift = True
drift_point = t
break
assert not al_classifier.performance_monitor.in_drift
assert not al_classifier.performance_monitor.made_transition
def test_reidentification_schedule_detection() -> None:
"""Test that drifts are scheduled at the correct times using the DriftDetectionScheduler."""
# In this case, we want to see a reidentification check performed 50 timesteps after every drift.
check_delay = 50
classifier = BaseAdaptiveLearner(
classifier_constructor=HoeffdingTreeClassifier,
representation_constructor=ErrorRateRepresentation,
representation_comparer=AbsoluteValueComparer(),
drift_detector_constructor=ADWIN,
background_state_mode="drift_reset",
reidentification_check_schedulers=[DriftDetectionCheck(check_delay)],
representation_window_size=50,
)
dataset_0 = synth.STAGGER(classification_function=0, seed=0)
dataset_1 = synth.STAGGER(classification_function=1, seed=0)
dataset_2 = synth.STAGGER(classification_function=2, seed=0)
active_state_segments: List[Optional[int]] = [None]
drift_checks: List[Optional[DriftInfo]] = [None]
t = 0
for dataset in [dataset_0, dataset_1, dataset_2] * 3:
for x, y in dataset.take(500):
_ = classifier.predict_one(x, t)
classifier.learn_one(x, y, timestep=t)
current_id = classifier.performance_monitor.final_active_state_id
current_drift = classifier.performance_monitor.last_drift
if current_id != active_state_segments[-1]:
active_state_segments.append(current_id)
if current_drift != drift_checks[-1]:
drift_checks.append(current_drift)
t += 1
for i, drift in enumerate(drift_checks):
if drift is None:
continue
if drift.drift_type == DriftType.ScheduledOne:
prev_drift = drift_checks[i - 1]
assert prev_drift is not None
assert prev_drift.drift_type == DriftType.DriftDetectorTriggered or prev_drift.triggered_transition
assert prev_drift.drift_timestep == drift.drift_timestep - check_delay - 1
def test_reidentification_schedule_periodic() -> None:
"""Test that drifts are scheduled at the correct times using the PeriodicCheck."""
# In this case, we want to see a reidentification check performed every 50.
check_period = 100
classifier = BaseAdaptiveLearner(
classifier_constructor=HoeffdingTreeClassifier,
representation_constructor=ErrorRateRepresentation,
representation_comparer=AbsoluteValueComparer(),
drift_detector_constructor=ADWIN,
background_state_mode="drift_reset",
reidentification_check_schedulers=[PeriodicCheck(check_period)],
representation_window_size=50,
)
dataset_0 = synth.STAGGER(classification_function=0, seed=0)
dataset_1 = synth.STAGGER(classification_function=1, seed=0)
dataset_2 = synth.STAGGER(classification_function=2, seed=0)
active_state_segments: List[Optional[int]] = [None]
drift_checks: List[Optional[DriftInfo]] = [None]
t = 0
for dataset in [dataset_0, dataset_1, dataset_2] * 3:
for x, y in dataset.take(500):
_ = classifier.predict_one(x, t)
classifier.learn_one(x, y, timestep=t)
current_id = classifier.performance_monitor.final_active_state_id
current_drift = classifier.performance_monitor.last_drift
if current_id != active_state_segments[-1]:
active_state_segments.append(current_id)
if current_drift != drift_checks[-1]:
drift_checks.append(current_drift)
t += 1
for i, drift in enumerate(drift_checks):
if drift is None:
continue
if drift.drift_type == DriftType.ScheduledOne:
prev_drift = drift_checks[i - 1]
print(drift, prev_drift)
assert prev_drift is not None
if prev_drift.triggered_transition:
assert prev_drift.drift_timestep == drift.drift_timestep - check_period - 1
else:
assert prev_drift.drift_timestep == drift.drift_timestep - check_period
# %%
|
[
"river.drift.ADWIN",
"streamselect.concept_representations.ErrorRateRepresentation",
"streamselect.adaptive_learning.reidentification_schedulers.PeriodicCheck",
"river.synth.STAGGER",
"river.tree.HoeffdingTreeClassifier",
"streamselect.adaptive_learning.reidentification_schedulers.DriftDetectionCheck",
"streamselect.repository.AbsoluteValueComparer",
"streamselect.utils.Observation"
] |
[((4735, 4829), 'streamselect.concept_representations.ErrorRateRepresentation', 'ErrorRateRepresentation', (['al_classifier.representation_window_size', 'baseline_state.state_id'], {}), '(al_classifier.representation_window_size,\n baseline_state.state_id)\n', (4758, 4829), False, 'from streamselect.concept_representations import ErrorRateRepresentation\n'), ((4864, 4887), 'streamselect.repository.AbsoluteValueComparer', 'AbsoluteValueComparer', ([], {}), '()\n', (4885, 4887), False, 'from streamselect.repository import AbsoluteValueComparer\n'), ((4912, 4919), 'river.drift.ADWIN', 'ADWIN', ([], {}), '()\n', (4917, 4919), False, 'from river.drift import ADWIN\n'), ((4935, 4950), 'river.synth.STAGGER', 'synth.STAGGER', ([], {}), '()\n', (4948, 4950), False, 'from river import synth\n'), ((6498, 6592), 'streamselect.concept_representations.ErrorRateRepresentation', 'ErrorRateRepresentation', (['al_classifier.representation_window_size', 'baseline_state.state_id'], {}), '(al_classifier.representation_window_size,\n baseline_state.state_id)\n', (6521, 6592), False, 'from streamselect.concept_representations import ErrorRateRepresentation\n'), ((6627, 6650), 'streamselect.repository.AbsoluteValueComparer', 'AbsoluteValueComparer', ([], {}), '()\n', (6648, 6650), False, 'from streamselect.repository import AbsoluteValueComparer\n'), ((6675, 6682), 'river.drift.ADWIN', 'ADWIN', ([], {}), '()\n', (6680, 6682), False, 'from river.drift import ADWIN\n'), ((6700, 6748), 'river.synth.STAGGER', 'synth.STAGGER', ([], {'classification_function': '(0)', 'seed': '(0)'}), '(classification_function=0, seed=0)\n', (6713, 6748), False, 'from river import synth\n'), ((6765, 6813), 'river.synth.STAGGER', 'synth.STAGGER', ([], {'classification_function': '(1)', 'seed': '(0)'}), '(classification_function=1, seed=0)\n', (6778, 6813), False, 'from river import synth\n'), ((12236, 12333), 'streamselect.concept_representations.ErrorRateRepresentation', 'ErrorRateRepresentation', (['al_classifier.representation_window_size', 'baseline_c1_state.state_id'], {}), '(al_classifier.representation_window_size,\n baseline_c1_state.state_id)\n', (12259, 12333), False, 'from streamselect.concept_representations import ErrorRateRepresentation\n'), ((12371, 12394), 'streamselect.repository.AbsoluteValueComparer', 'AbsoluteValueComparer', ([], {}), '()\n', (12392, 12394), False, 'from streamselect.repository import AbsoluteValueComparer\n'), ((12422, 12429), 'river.drift.ADWIN', 'ADWIN', ([], {}), '()\n', (12427, 12429), False, 'from river.drift import ADWIN\n'), ((12447, 12495), 'river.synth.STAGGER', 'synth.STAGGER', ([], {'classification_function': '(0)', 'seed': '(0)'}), '(classification_function=0, seed=0)\n', (12460, 12495), False, 'from river import synth\n'), ((12512, 12560), 'river.synth.STAGGER', 'synth.STAGGER', ([], {'classification_function': '(1)', 'seed': '(0)'}), '(classification_function=1, seed=0)\n', (12525, 12560), False, 'from river import synth\n'), ((15424, 15521), 'streamselect.concept_representations.ErrorRateRepresentation', 'ErrorRateRepresentation', (['al_classifier.representation_window_size', 'baseline_c2_state.state_id'], {}), '(al_classifier.representation_window_size,\n baseline_c2_state.state_id)\n', (15447, 15521), False, 'from streamselect.concept_representations import ErrorRateRepresentation\n'), ((15559, 15582), 'streamselect.repository.AbsoluteValueComparer', 'AbsoluteValueComparer', ([], {}), '()\n', (15580, 15582), False, 'from streamselect.repository import AbsoluteValueComparer\n'), ((15610, 15617), 'river.drift.ADWIN', 'ADWIN', ([], {}), '()\n', (15615, 15617), False, 'from river.drift import ADWIN\n'), ((17915, 17963), 'river.synth.STAGGER', 'synth.STAGGER', ([], {'classification_function': '(0)', 'seed': '(0)'}), '(classification_function=0, seed=0)\n', (17928, 17963), False, 'from river import synth\n'), ((17980, 18028), 'river.synth.STAGGER', 'synth.STAGGER', ([], {'classification_function': '(1)', 'seed': '(0)'}), '(classification_function=1, seed=0)\n', (17993, 18028), False, 'from river import synth\n'), ((18045, 18093), 'river.synth.STAGGER', 'synth.STAGGER', ([], {'classification_function': '(2)', 'seed': '(0)'}), '(classification_function=2, seed=0)\n', (18058, 18093), False, 'from river import synth\n'), ((19901, 19949), 'river.synth.STAGGER', 'synth.STAGGER', ([], {'classification_function': '(0)', 'seed': '(0)'}), '(classification_function=0, seed=0)\n', (19914, 19949), False, 'from river import synth\n'), ((19966, 20014), 'river.synth.STAGGER', 'synth.STAGGER', ([], {'classification_function': '(1)', 'seed': '(0)'}), '(classification_function=1, seed=0)\n', (19979, 20014), False, 'from river import synth\n'), ((20031, 20079), 'river.synth.STAGGER', 'synth.STAGGER', ([], {'classification_function': '(2)', 'seed': '(0)'}), '(classification_function=2, seed=0)\n', (20044, 20079), False, 'from river import synth\n'), ((4542, 4567), 'river.tree.HoeffdingTreeClassifier', 'HoeffdingTreeClassifier', ([], {}), '()\n', (4565, 4567), False, 'from river.tree import HoeffdingTreeClassifier\n'), ((5058, 5131), 'streamselect.utils.Observation', 'Observation', ([], {'x': 'x', 'y': 'y', 'seen_at': 't', 'active_state_id': 'baseline_state.state_id'}), '(x=x, y=y, seen_at=t, active_state_id=baseline_state.state_id)\n', (5069, 5131), False, 'from streamselect.utils import Observation\n'), ((6289, 6314), 'river.tree.HoeffdingTreeClassifier', 'HoeffdingTreeClassifier', ([], {}), '()\n', (6312, 6314), False, 'from river.tree import HoeffdingTreeClassifier\n'), ((6943, 7016), 'streamselect.utils.Observation', 'Observation', ([], {'x': 'x', 'y': 'y', 'seen_at': 't', 'active_state_id': 'baseline_state.state_id'}), '(x=x, y=y, seen_at=t, active_state_id=baseline_state.state_id)\n', (6954, 7016), False, 'from streamselect.utils import Observation\n'), ((12024, 12049), 'river.tree.HoeffdingTreeClassifier', 'HoeffdingTreeClassifier', ([], {}), '()\n', (12047, 12049), False, 'from river.tree import HoeffdingTreeClassifier\n'), ((12690, 12766), 'streamselect.utils.Observation', 'Observation', ([], {'x': 'x', 'y': 'y', 'seen_at': 't', 'active_state_id': 'baseline_c1_state.state_id'}), '(x=x, y=y, seen_at=t, active_state_id=baseline_c1_state.state_id)\n', (12701, 12766), False, 'from streamselect.utils import Observation\n'), ((13604, 13680), 'streamselect.utils.Observation', 'Observation', ([], {'x': 'x', 'y': 'y', 'seen_at': 't', 'active_state_id': 'baseline_c1_state.state_id'}), '(x=x, y=y, seen_at=t, active_state_id=baseline_c1_state.state_id)\n', (13615, 13680), False, 'from streamselect.utils import Observation\n'), ((15212, 15237), 'river.tree.HoeffdingTreeClassifier', 'HoeffdingTreeClassifier', ([], {}), '()\n', (15235, 15237), False, 'from river.tree import HoeffdingTreeClassifier\n'), ((15725, 15801), 'streamselect.utils.Observation', 'Observation', ([], {'x': 'x', 'y': 'y', 'seen_at': 't', 'active_state_id': 'baseline_c2_state.state_id'}), '(x=x, y=y, seen_at=t, active_state_id=baseline_c2_state.state_id)\n', (15736, 15801), False, 'from streamselect.utils import Observation\n'), ((887, 910), 'streamselect.repository.AbsoluteValueComparer', 'AbsoluteValueComparer', ([], {}), '()\n', (908, 910), False, 'from streamselect.repository import AbsoluteValueComparer\n'), ((1670, 1693), 'streamselect.repository.AbsoluteValueComparer', 'AbsoluteValueComparer', ([], {}), '()\n', (1691, 1693), False, 'from streamselect.repository import AbsoluteValueComparer\n'), ((2191, 2214), 'streamselect.repository.AbsoluteValueComparer', 'AbsoluteValueComparer', ([], {}), '()\n', (2212, 2214), False, 'from streamselect.repository import AbsoluteValueComparer\n'), ((2712, 2735), 'streamselect.repository.AbsoluteValueComparer', 'AbsoluteValueComparer', ([], {}), '()\n', (2733, 2735), False, 'from streamselect.repository import AbsoluteValueComparer\n'), ((3321, 3344), 'streamselect.repository.AbsoluteValueComparer', 'AbsoluteValueComparer', ([], {}), '()\n', (3342, 3344), False, 'from streamselect.repository import AbsoluteValueComparer\n'), ((4387, 4410), 'streamselect.repository.AbsoluteValueComparer', 'AbsoluteValueComparer', ([], {}), '()\n', (4408, 4410), False, 'from streamselect.repository import AbsoluteValueComparer\n'), ((4594, 4669), 'streamselect.concept_representations.ErrorRateRepresentation', 'ErrorRateRepresentation', (['al_classifier.representation_window_size', 'state_id'], {}), '(al_classifier.representation_window_size, state_id)\n', (4617, 4669), False, 'from streamselect.concept_representations import ErrorRateRepresentation\n'), ((6134, 6157), 'streamselect.repository.AbsoluteValueComparer', 'AbsoluteValueComparer', ([], {}), '()\n', (6155, 6157), False, 'from streamselect.repository import AbsoluteValueComparer\n'), ((6341, 6436), 'streamselect.concept_representations.ErrorRateRepresentation', 'ErrorRateRepresentation', (['al_classifier.representation_window_size', 'state_id'], {'mode': '"""concept"""'}), "(al_classifier.representation_window_size, state_id,\n mode='concept')\n", (6364, 6436), False, 'from streamselect.concept_representations import ErrorRateRepresentation\n'), ((10093, 10166), 'streamselect.utils.Observation', 'Observation', ([], {'x': 'x', 'y': 'y', 'seen_at': 't', 'active_state_id': 'baseline_state.state_id'}), '(x=x, y=y, seen_at=t, active_state_id=baseline_state.state_id)\n', (10104, 10166), False, 'from streamselect.utils import Observation\n'), ((11866, 11889), 'streamselect.repository.AbsoluteValueComparer', 'AbsoluteValueComparer', ([], {}), '()\n', (11887, 11889), False, 'from streamselect.repository import AbsoluteValueComparer\n'), ((12076, 12171), 'streamselect.concept_representations.ErrorRateRepresentation', 'ErrorRateRepresentation', (['al_classifier.representation_window_size', 'state_id'], {'mode': '"""concept"""'}), "(al_classifier.representation_window_size, state_id,\n mode='concept')\n", (12099, 12171), False, 'from streamselect.concept_representations import ErrorRateRepresentation\n'), ((15264, 15359), 'streamselect.concept_representations.ErrorRateRepresentation', 'ErrorRateRepresentation', (['al_classifier.representation_window_size', 'state_id'], {'mode': '"""concept"""'}), "(al_classifier.representation_window_size, state_id,\n mode='concept')\n", (15287, 15359), False, 'from streamselect.concept_representations import ErrorRateRepresentation\n'), ((17663, 17686), 'streamselect.repository.AbsoluteValueComparer', 'AbsoluteValueComparer', ([], {}), '()\n', (17684, 17686), False, 'from streamselect.repository import AbsoluteValueComparer\n'), ((19654, 19677), 'streamselect.repository.AbsoluteValueComparer', 'AbsoluteValueComparer', ([], {}), '()\n', (19675, 19677), False, 'from streamselect.repository import AbsoluteValueComparer\n'), ((17818, 17850), 'streamselect.adaptive_learning.reidentification_schedulers.DriftDetectionCheck', 'DriftDetectionCheck', (['check_delay'], {}), '(check_delay)\n', (17837, 17850), False, 'from streamselect.adaptive_learning.reidentification_schedulers import DriftDetectionCheck, DriftInfo, DriftType, PeriodicCheck\n'), ((19809, 19836), 'streamselect.adaptive_learning.reidentification_schedulers.PeriodicCheck', 'PeriodicCheck', (['check_period'], {}), '(check_period)\n', (19822, 19836), False, 'from streamselect.adaptive_learning.reidentification_schedulers import DriftDetectionCheck, DriftInfo, DriftType, PeriodicCheck\n')]
|
import pickle
import time
import networkx
import torch
import torch.nn as nn
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
"""
Experimental : simple deep learning model to infer protein present in a tissue from a
vector of expression.
"""
class PPGCN(nn.Module):
def __init__(self, accepted_link_value=0.0):
super(PPGCN, self).__init__()
self.complete_graph = networkx.read_gpickle("../data/pp_interactions_undirected.gpickle")
self.tissue_num_mapping = pickle.load(open("../data/tissue_num_mapping.pck", "rb"))
self.accepted_link_value = accepted_link_value
self.restrain_graph_by_values()
self.current_values = {}
self.new_values = {}
self.all_layers = None
self.construct_layers()
def restrain_graph_by_values(self):
edges = [
(x, y)
for x, y, z in self.complete_graph.edges(data=True)
if z["score"] > self.accepted_link_value
]
self.complete_graph = self.complete_graph.edge_subgraph(edges)
def construct_layers(self):
params = {}
num_parameters = 0
for node, neighbors in self.complete_graph.adjacency():
num_links = len(neighbors.keys())
if num_links == 0:
import pdb
pdb.set_trace()
params[node] = nn.Sequential(
nn.Linear(num_links, num_links // 2 + 1, bias=False),
nn.ReLU(),
nn.Linear(num_links // 2 + 1, 1, bias=False),
nn.Sigmoid(),
)
params[node] = nn.Sequential(nn.Linear(num_links, 1, bias=False), nn.Sigmoid())
num_parameters += (num_links * (num_links // 2 + 1)) + num_links // 2 + 1
# num_parameters += num_links
print("Number of paramters :", num_parameters)
self.all_layers = nn.ModuleDict(params).to(device)
def forward_n_times(self, n_times, requires_grad, init_values=None, tissue=None):
if tissue is not None:
self.init_values_from_tissue(tissue)
if init_values is not None:
self.init_values_from_data(init_values)
for _ in range(n_times):
d = time.time()
if requires_grad:
self.forward_once()
else:
with torch.no_grad():
self.forward_once()
print(time.time() - d)
data_out = [self.new_values[node] for node in self.new_values.keys()]
assert len(data_out) == len(list(self.complete_graph.nodes()))
data_out = torch.as_tensor(data_out, dtype=torch.float32, device=device)
return data_out
def forward_once(self):
for node, neighbors in self.complete_graph.adjacency():
vals = []
for neighbor in neighbors.keys():
vals.append(self.current_values[neighbor])
vals = torch.as_tensor(vals, dtype=torch.float32, device=device)
self.new_values[node] = self.all_layers[node](vals)
for node in self.new_values.keys():
self.current_values[node] = self.new_values[node]
def init_values_from_tissue(self, tissue):
self.new_values = {}
num = self.tissue_num_mapping[tissue]
for node in self.complete_graph.nodes():
self.new_values[node] = torch.as_tensor([0.0], dtype=torch.float32, device=device)
self.current_values[node] = torch.as_tensor(
[self.complete_graph.nodes[node]["expression_data"][num]],
dtype=torch.float32,
device=device,
)
def init_values_from_data(self, init_values):
raise NotImplementedError
def propagate_node(self, node):
for pred in self.complete_graph.predecessors(node):
actions = []
for edge in self.complete_graph[pred][node]:
link_type = self.complete_graph[pred][node][edge]["link"]
link_score = self.complete_graph[pred][node][edge]["score"]
if link_score > self.accepted_link_value and link_type in self.accepted_links:
actions.append(link_type)
# self.__getattribute__(link_type + "_propagate")(pred, node)
|
[
"torch.nn.ReLU",
"networkx.read_gpickle",
"time.time",
"torch.nn.ModuleDict",
"torch.cuda.is_available",
"pdb.set_trace",
"torch.nn.Linear",
"torch.as_tensor",
"torch.no_grad",
"torch.nn.Sigmoid"
] |
[((113, 138), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (136, 138), False, 'import torch\n'), ((411, 478), 'networkx.read_gpickle', 'networkx.read_gpickle', (['"""../data/pp_interactions_undirected.gpickle"""'], {}), "('../data/pp_interactions_undirected.gpickle')\n", (432, 478), False, 'import networkx\n'), ((2610, 2671), 'torch.as_tensor', 'torch.as_tensor', (['data_out'], {'dtype': 'torch.float32', 'device': 'device'}), '(data_out, dtype=torch.float32, device=device)\n', (2625, 2671), False, 'import torch\n'), ((2232, 2243), 'time.time', 'time.time', ([], {}), '()\n', (2241, 2243), False, 'import time\n'), ((2935, 2992), 'torch.as_tensor', 'torch.as_tensor', (['vals'], {'dtype': 'torch.float32', 'device': 'device'}), '(vals, dtype=torch.float32, device=device)\n', (2950, 2992), False, 'import torch\n'), ((3371, 3429), 'torch.as_tensor', 'torch.as_tensor', (['[0.0]'], {'dtype': 'torch.float32', 'device': 'device'}), '([0.0], dtype=torch.float32, device=device)\n', (3386, 3429), False, 'import torch\n'), ((3470, 3584), 'torch.as_tensor', 'torch.as_tensor', (["[self.complete_graph.nodes[node]['expression_data'][num]]"], {'dtype': 'torch.float32', 'device': 'device'}), "([self.complete_graph.nodes[node]['expression_data'][num]],\n dtype=torch.float32, device=device)\n", (3485, 3584), False, 'import torch\n'), ((1332, 1347), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (1345, 1347), False, 'import pdb\n'), ((1406, 1458), 'torch.nn.Linear', 'nn.Linear', (['num_links', '(num_links // 2 + 1)'], {'bias': '(False)'}), '(num_links, num_links // 2 + 1, bias=False)\n', (1415, 1458), True, 'import torch.nn as nn\n'), ((1476, 1485), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1483, 1485), True, 'import torch.nn as nn\n'), ((1503, 1547), 'torch.nn.Linear', 'nn.Linear', (['(num_links // 2 + 1)', '(1)'], {'bias': '(False)'}), '(num_links // 2 + 1, 1, bias=False)\n', (1512, 1547), True, 'import torch.nn as nn\n'), ((1565, 1577), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (1575, 1577), True, 'import torch.nn as nn\n'), ((1634, 1669), 'torch.nn.Linear', 'nn.Linear', (['num_links', '(1)'], {'bias': '(False)'}), '(num_links, 1, bias=False)\n', (1643, 1669), True, 'import torch.nn as nn\n'), ((1671, 1683), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (1681, 1683), True, 'import torch.nn as nn\n'), ((1894, 1915), 'torch.nn.ModuleDict', 'nn.ModuleDict', (['params'], {}), '(params)\n', (1907, 1915), True, 'import torch.nn as nn\n'), ((2349, 2364), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2362, 2364), False, 'import torch\n'), ((2424, 2435), 'time.time', 'time.time', ([], {}), '()\n', (2433, 2435), False, 'import time\n')]
|
import urllib2
class Request:
@staticmethod
def update():
base = 'https://iitbbs.herokuapp.com/'
news = urllib2.urlopen(base + 'news').read()
notices = urllib2.urlopen(base + 'notices').read()
events = urllib2.urlopen(base + 'events').read()
nNews = open('json/new/news.json', 'w')
nNews.write(news)
nNews.close()
nNotices = open('json/new/notices.json', 'w')
nNotices.write(notices)
nNotices.close()
nEvents = open('json/new/events.json', 'w')
nEvents.write(events)
nEvents.close()
|
[
"urllib2.urlopen"
] |
[((113, 143), 'urllib2.urlopen', 'urllib2.urlopen', (["(base + 'news')"], {}), "(base + 'news')\n", (128, 143), False, 'import urllib2\n'), ((163, 196), 'urllib2.urlopen', 'urllib2.urlopen', (["(base + 'notices')"], {}), "(base + 'notices')\n", (178, 196), False, 'import urllib2\n'), ((215, 247), 'urllib2.urlopen', 'urllib2.urlopen', (["(base + 'events')"], {}), "(base + 'events')\n", (230, 247), False, 'import urllib2\n')]
|
from bitstring import BitArray
import collections
import functools
import itertools
import json
import math
import time
class Tree:
def __init__(self, left=None, right=None):
self.left = left
self.right = right
def construct_frequency_tree(freqs):
nodes = freqs
while len(nodes) > 1:
key1, val1 = nodes[-1]
key2, val2 = nodes[-2]
node = Tree(key1, key2)
nodes = nodes[:-2]
nodes.append((node, val1 + val2))
nodes = sorted(nodes, key=lambda x: x[1], reverse=True)
return nodes
def generate_huffman_code(node, binary, is_left_node=True):
if isinstance(node, str) or isinstance(node, int):
return {node: binary}
d = {}
d.update(generate_huffman_code(node.left, binary + [0], True))
d.update(generate_huffman_code(node.right, binary + [1], False))
return d
def count_frequencies(string):
freq = collections.defaultdict(int)
for char in string:
freq[char] += 1
return sorted(freq.items(), key=lambda x: x[1], reverse=True)
def max_len_table(tables):
return max([len(huff) for huff in tables])
def bits_needed_to_represent(num):
mask = 0b1 << 32
for i in range(32):
if (((num & mask) >> 31 - i)):
return 32 - i + 1;
mask >>= 1
return 0
def leaves_at_depth(trie, target_level, level=0):
if level == target_level:
return trie.keys()
keys = []
for v in trie.values():
keys += leaves_at_depth(v, target_level, level+1)
return keys
def count_children_trie(trie, children):
children.append(len(trie.keys()))
for k, v in trie.items():
count_children_trie(v, children)
def max_children_trie(trie):
max_len = 0
for k, v in trie.items():
v_len = len(v.keys())
max_len = max(max_len, v_len)
max_len = max(max_len, max_children_trie(v))
return max_len
def convert_trie_to_bits(trie, bit_trie, tables, depth=0, smart=False):
for k, v in trie.items():
if not smart or v.keys():
bit_trie.append(tables[-1][len(v.keys())])
bit_trie.append(tables[depth][k])
convert_trie_to_bits(v, bit_trie, tables, depth+1, smart=smart)
class BitStream:
def __init__(self, bit_array, char_map=None):
self.bits = bit_array
self.bin = self.bits.bin
self.i = 0
self.char_map = char_map
def write(self, key, num_bits):
bits = []
val = self.char_map[key] if isinstance(key, str) else key
mask = 0b1 << num_bits - 1
for i in range(1, num_bits + 1):
bits.append((val & mask) >> num_bits - i)
mask >>= 1
self.bits.append(bits)
def append(self, data):
self.bits.append(data)
def read(self, num_bits):
bits = self.bin[self.i:self.i+num_bits]
self.i += num_bits
return bits
def read_int(self, num_bits):
return int(self.read(num_bits), 2)
def read_varint(self, table):
buf = ''
i = 0
while True:
buf += str(self.read(1))
if buf in table.keys():
return table[buf]
i += 1
#if i > max(map(len, table.keys())):
# print(i, buf, table.keys())
# sys.exit()
def __len__(self):
return len(self.bits)
def read_payload_trie(bits, symbols, tables, depth=0, prefix=''):
num_children = 0
if depth < len(tables) - 2:
num_children = bits.read_varint(tables[-1])
symbol = bits.read_varint(tables[depth])
char = symbols[symbol]
if depth == len(tables) - 2:
return [prefix + char]
words = []
for i in range(num_children):
words += read_payload_trie(bits, symbols, tables, depth+1, prefix + char)
return words
def read_payload(bits, symbols, tables):
word = ''
for i in range(len(tables)):
symbol = bits.read_varint(tables[i])
word += symbols[symbol]
return word
def decode(bits, symbols, use_trie):
bits = BitStream(bits)
# Header.
table_size = bits.read_int(8)
word_size = bits.read_int(8)
num_tables = bits.read_int(8)
num_symbols = bits.read_int(16)
print("Table Size Bits:", table_size)
print("Huffman Table Word Bits:", word_size)
print("Num Tables:", num_tables)
print("Num Symbols:", num_symbols)
tables = []
for i, table_id in enumerate(range(num_tables)):
num_items = bits.read_int(table_size)
print("Table {}:".format(i), num_items)
table = {}
for j in range(num_items):
char = bits.read_int(word_size)
encoding_size = bits.read_int(8)
encoding = bits.read(encoding_size)
table[encoding] = char
tables.append(table)
words = []
for alpha in range(num_symbols):
if use_trie:
words += read_payload_trie(bits, symbols, tables)
else:
words.append(read_payload(bits, symbols, tables))
return words
if __name__ == "__main__":
with open('wordle.json', 'r') as fp:
words = json.load(fp)
def gram_1(x):
return x
def gram_2(x):
return [x[0], x[1], x[2], x[3:]]
func = gram_1
use_trie = True
if func == gram_1:
INT_MAP = dict(zip(
list('abcdefghijklmnopqrstuvwxyz'),
range(26)
))
elif func == gram_2:
INT_MAP = dict(zip(
list('abcdefghijklmnopqrstuvwxyz') +
[x[0]+x[1] for x in itertools.product(*['abcdefghijklmnopqrstuvwxyz'] * 2)],
range(702)
))
if use_trie:
trie = {}
for word in words:
node = trie
for letter in func(word):
if letter not in node:
node[letter] = {}
node = node[letter]
tables = []
for i in range(len(func(words[0]))):
if use_trie:
string = leaves_at_depth(trie, i)
else:
string = [func(x)[i] for x in words]
freqs = count_frequencies(string)
tree = construct_frequency_tree(freqs)
huff = generate_huffman_code(tree[0][0], [])
total = sum([x[1] for x in freqs])
smaller = 0
bigger = 0
for k, v in sorted(freqs, key=lambda x: x[1], reverse=True):
if len(huff[k]) < 5:
smaller += v
if len(huff[k]) > 5:
bigger += v
print("{} | {} | {} | {:0.1f}%".format(k, "".join(map(str, huff[k])), v,
v/total*100))
print("Smaller: {:0.1f}%".format(smaller / total * 100))
print("Bigger: {:0.1f}%".format(bigger / total * 100))
print("")
tables.append(huff)
if use_trie:
string = []
count_children_trie(trie, string)
freqs = count_frequencies(string)
tree = construct_frequency_tree(freqs[1:])
huff = generate_huffman_code(tree[0][0], [])
total = sum([x[1] for x in freqs[1:]])
smaller = 0
bigger = 0
print(huff, freqs)
for k, v in sorted(freqs[1:], key=lambda x: x[1], reverse=True):
if len(huff[k]) < 5:
smaller += v
if len(huff[k]) > 5:
bigger += v
print("{} | {} | {} | {:0.1f}%".format(k, "".join(map(str, huff[k])), v,
v/total*100))
print("Smaller: {:0.1f}%".format(smaller / total * 100))
print("Bigger: {:0.1f}%".format(bigger / total * 100))
print("")
tables.append(huff)
bits = BitStream(BitArray(), char_map=INT_MAP)
table_size = bits_needed_to_represent(max_len_table(tables))
word_size = bits_needed_to_represent(len(INT_MAP))
num_tables = len(tables)
if use_trie:
num_symbols = len(tables[0])
else:
num_symbols = len(words)
# Header.
bits.write(table_size, 8)
bits.write(word_size, 8)
bits.write(num_tables, 8)
bits.write(num_symbols, 16)
# Encode the Huffman tables.
header_size = len(bits)
for table in tables:
bits.write(len(table), table_size)
for char, binary in table.items():
bits.write(char, word_size)
bits.write(len(binary), 8)
bits.append(binary)
huff_size = len(bits) - header_size
# Encode the payload.
if use_trie:
max_children = max_children_trie(trie)
convert_trie_to_bits(trie, bits.bits, tables, smart=True)
else:
for word in words:
for i, char in enumerate(func(word)):
binary = tables[i][char]
bits.append(binary)
payload_size = len(bits) - huff_size
print("#")
print("# Encoding")
print("#")
print("")
print("Table Size Bits:", table_size)
print("Huffman Table Word Bits:", word_size)
print("Num Tables:", num_tables)
print("Num Symbols", num_symbols)
for i, table in enumerate(tables):
print("Table {}:".format(i), len(table))
print("")
print("Header (Bytes):", math.ceil(header_size / 8))
print("Tables (Bytes):", math.ceil(huff_size / 8))
print("Payload (Bytes):", math.ceil(payload_size / 8))
print("Filesize (Bytes):", math.ceil(len(bits) / 8))
with open("wordle_{}.bin".format(func.__name__), "wb") as fp:
fp.write(bits.bits.tobytes())
print("")
print("#")
print("# Decoding")
print("#")
print("")
s = time.monotonic()
words = decode(bits.bits, list(INT_MAP.keys()), use_trie)
print("")
print("Num Words Decoded:", len(words))
print("First Word:", words[0])
print("Last Word:", words[-1])
print("Decode Time: {:0.3f}s".format(time.monotonic() - s))
|
[
"json.load",
"math.ceil",
"bitstring.BitArray",
"collections.defaultdict",
"time.monotonic",
"itertools.product"
] |
[((851, 879), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (874, 879), False, 'import collections\n'), ((8537, 8553), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (8551, 8553), False, 'import time\n'), ((4665, 4678), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (4674, 4678), False, 'import json\n'), ((6828, 6838), 'bitstring.BitArray', 'BitArray', ([], {}), '()\n', (6836, 6838), False, 'from bitstring import BitArray\n'), ((8165, 8191), 'math.ceil', 'math.ceil', (['(header_size / 8)'], {}), '(header_size / 8)\n', (8174, 8191), False, 'import math\n'), ((8220, 8244), 'math.ceil', 'math.ceil', (['(huff_size / 8)'], {}), '(huff_size / 8)\n', (8229, 8244), False, 'import math\n'), ((8274, 8301), 'math.ceil', 'math.ceil', (['(payload_size / 8)'], {}), '(payload_size / 8)\n', (8283, 8301), False, 'import math\n'), ((8773, 8789), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (8787, 8789), False, 'import time\n'), ((5029, 5085), 'itertools.product', 'itertools.product', (["*(['abcdefghijklmnopqrstuvwxyz'] * 2)"], {}), "(*(['abcdefghijklmnopqrstuvwxyz'] * 2))\n", (5046, 5085), False, 'import itertools\n')]
|
from py_hcl.firrtl_ir.expr.prim_ops import Add
from py_hcl.firrtl_ir.shortcuts import uw, sw, u, w, s
from py_hcl.firrtl_ir.type import UIntType, SIntType
from tests.test_firrtl_ir.utils import serialize_equal
from .helper import OpCase, basis_tester, \
encounter_error_tester, type_wrong_cases_2_args_gen, max_width
def args(*arg_types):
class C:
@staticmethod
def tpe(res_type):
return OpCase(Add).arg_types(*arg_types).res_type(res_type)
return C
add_basis_cases = [
args(UIntType, UIntType).tpe(lambda x, y: uw(max_width(x, y) + 1)),
args(SIntType, SIntType).tpe(lambda x, y: sw(max_width(x, y) + 1)),
]
add_type_wrong_cases = type_wrong_cases_2_args_gen(Add)
add_width_wrong_cases = [
args(UIntType, UIntType).tpe(lambda x, y: uw(max_width(x, y))),
args(SIntType, SIntType).tpe(lambda x, y: sw(max_width(x, y))),
args(UIntType, UIntType).tpe(lambda x, y: uw(max_width(x, y) + 2)),
args(SIntType, SIntType).tpe(lambda x, y: sw(max_width(x, y) + 2)),
args(UIntType, UIntType).tpe(lambda x, y: uw(max_width(x, y) - 1)),
args(SIntType, SIntType).tpe(lambda x, y: sw(max_width(x, y) - 1)),
args(UIntType, UIntType).tpe(lambda x, y: uw(1)),
args(SIntType, SIntType).tpe(lambda x, y: sw(1)),
]
def test_add():
basis_tester(add_basis_cases)
encounter_error_tester(add_type_wrong_cases)
encounter_error_tester(add_width_wrong_cases)
serialize_equal(Add([u(20, w(5)), u(15, w(4))], uw(6)),
'add(UInt<5>("h14"), UInt<4>("hf"))')
serialize_equal(Add([s(-20, w(6)), s(-15, w(5))], sw(6)),
'add(SInt<6>("h-14"), SInt<5>("h-f"))')
|
[
"py_hcl.firrtl_ir.shortcuts.uw",
"py_hcl.firrtl_ir.shortcuts.w",
"py_hcl.firrtl_ir.shortcuts.sw"
] |
[((1215, 1220), 'py_hcl.firrtl_ir.shortcuts.uw', 'uw', (['(1)'], {}), '(1)\n', (1217, 1220), False, 'from py_hcl.firrtl_ir.shortcuts import uw, sw, u, w, s\n'), ((1269, 1274), 'py_hcl.firrtl_ir.shortcuts.sw', 'sw', (['(1)'], {}), '(1)\n', (1271, 1274), False, 'from py_hcl.firrtl_ir.shortcuts import uw, sw, u, w, s\n'), ((1482, 1487), 'py_hcl.firrtl_ir.shortcuts.uw', 'uw', (['(6)'], {}), '(6)\n', (1484, 1487), False, 'from py_hcl.firrtl_ir.shortcuts import uw, sw, u, w, s\n'), ((1602, 1607), 'py_hcl.firrtl_ir.shortcuts.sw', 'sw', (['(6)'], {}), '(6)\n', (1604, 1607), False, 'from py_hcl.firrtl_ir.shortcuts import uw, sw, u, w, s\n'), ((1461, 1465), 'py_hcl.firrtl_ir.shortcuts.w', 'w', (['(5)'], {}), '(5)\n', (1462, 1465), False, 'from py_hcl.firrtl_ir.shortcuts import uw, sw, u, w, s\n'), ((1474, 1478), 'py_hcl.firrtl_ir.shortcuts.w', 'w', (['(4)'], {}), '(4)\n', (1475, 1478), False, 'from py_hcl.firrtl_ir.shortcuts import uw, sw, u, w, s\n'), ((1580, 1584), 'py_hcl.firrtl_ir.shortcuts.w', 'w', (['(6)'], {}), '(6)\n', (1581, 1584), False, 'from py_hcl.firrtl_ir.shortcuts import uw, sw, u, w, s\n'), ((1594, 1598), 'py_hcl.firrtl_ir.shortcuts.w', 'w', (['(5)'], {}), '(5)\n', (1595, 1598), False, 'from py_hcl.firrtl_ir.shortcuts import uw, sw, u, w, s\n')]
|
from pathlib import Path
from typing import TypeVar, Generic
from pydantic import BaseModel, Field
from flexlate.config import AppliedTemplateWithSource
from flexlate.template.base import Template
from flexlate.template_data import TemplateData
T = TypeVar("T", bound=Template)
class Renderable(BaseModel, Generic[T]):
template: T
data: TemplateData = Field(default_factory=dict)
out_root: Path = Path(".")
skip_prompts: bool = False
@classmethod
def from_applied_template_with_source(
cls, applied_template_with_source: AppliedTemplateWithSource
) -> "Renderable":
template, data = applied_template_with_source.to_template_and_data()
return cls(
template=template,
data=data,
out_root=applied_template_with_source.applied_template.root,
)
class Config:
arbitrary_types_allowed = True
def __eq__(self, other):
try:
return all(
[
# Assumed that templates have unique names by this point
# Comparing only name to avoid issues with comparing between temp repo and main repo, etc.
self.template.name == other.template.name,
self.data == other.data,
self.skip_prompts == other.skip_prompts,
# Resolve out root again for issues comparing between temp repo and main repo, etc.
# Put this last as it is the most expensive check
self.out_root.resolve() == other.out_root.resolve(),
]
)
except AttributeError:
return False
|
[
"typing.TypeVar",
"pathlib.Path",
"pydantic.Field"
] |
[((252, 280), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {'bound': 'Template'}), "('T', bound=Template)\n", (259, 280), False, 'from typing import TypeVar, Generic\n'), ((365, 392), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (370, 392), False, 'from pydantic import BaseModel, Field\n'), ((414, 423), 'pathlib.Path', 'Path', (['"""."""'], {}), "('.')\n", (418, 423), False, 'from pathlib import Path\n')]
|
from django.contrib import admin
from history.family_history.models import FamilyHistory
class FamilyHistoryAdmin(admin.ModelAdmin):
pass
admin.site.register(FamilyHistory, FamilyHistoryAdmin)
|
[
"django.contrib.admin.site.register"
] |
[((145, 199), 'django.contrib.admin.site.register', 'admin.site.register', (['FamilyHistory', 'FamilyHistoryAdmin'], {}), '(FamilyHistory, FamilyHistoryAdmin)\n', (164, 199), False, 'from django.contrib import admin\n')]
|
# Generated by Selenium IDE
import pytest
import time
import json
import os
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
class TestCreateitemtypemapping():
def setup_method(self, method):
# use headless mode
options = Options()
# options.add_argument('--headless')
self.driver = webdriver.Chrome(options=options)
self.vars = {}
def teardown_method(self, method):
self.driver.quit()
def test_createitemtypemapping(self):
# Test name: create itemtype mapping
# Step # | name | target | value | comment
# 1 | open | / | |
self.driver.get(os.environ['WEKO3_HOST'])
# 2 | setWindowSize | 1646x976 | |
self.driver.set_window_size(1646, 976)
# 3 | click | linkText=Log in | |
self.driver.find_element(By.LINK_TEXT, "Log in").click()
# 4 | type | id=email | <EMAIL> |
self.driver.find_element(By.ID, "email").send_keys("<EMAIL>")
# 5 | type | id=password | <PASSWORD> |
self.driver.find_element(By.ID, "password").send_keys("<PASSWORD>")
# 6 | sendKeys | id=password | ${KEY_ENTER} |
self.driver.find_element(By.ID, "password").send_keys(Keys.ENTER)
# 7 | click | linkText=<EMAIL> | |
self.driver.find_element(By.LINK_TEXT, "<EMAIL>").click()
# 8 | click | linkText=Administration | |
self.driver.find_element(By.LINK_TEXT, "Administration").click()
# 9 | click | linkText=Item Types | |
self.driver.find_element(By.LINK_TEXT, "Item Types").click()
# 10 | click | linkText=Mapping | |
self.driver.find_element(By.LINK_TEXT, "Mapping").click()
# 11 | click | css=.panel > .panel-body | |
self.driver.find_element(By.CSS_SELECTOR, ".panel > .panel-body").click()
# 12 | click | id=item-type-lists | |
self.driver.find_element(By.ID, "item-type-lists").click()
# 13 | select | id=item-type-lists | label=data2(1) |
dropdown = self.driver.find_element(By.ID, "item-type-lists")
dropdown.find_element(By.XPATH, "//option[. = 'data2(1)']").click()
# 14 | click | css=#ul_pubdate > .list_jpcoar_mapping input | |
self.driver.find_element(By.CSS_SELECTOR, "#ul_pubdate > .list_jpcoar_mapping input").click()
# 15 | click | css=#ul_pubdate > .list_jpcoar_mapping .form-control | |
self.driver.find_element(By.CSS_SELECTOR, "#ul_pubdate > .list_jpcoar_mapping .form-control").click()
# 16 | select | css=#ul_pubdate > .list_jpcoar_mapping .form-control | label=date |
dropdown = self.driver.find_element(By.CSS_SELECTOR, "#ul_pubdate > .list_jpcoar_mapping .form-control")
dropdown.find_element(By.XPATH, "//option[. = 'date']").click()
# 17 | click | css=#ul_item_1570959927393 > .list_jpcoar_mapping input | |
self.driver.find_element(By.CSS_SELECTOR, "#ul_item_1570959927393 > .list_jpcoar_mapping input").click()
# 18 | click | css=#ul_item_1570959927393 > .list_jpcoar_mapping .form-control | |
self.driver.find_element(By.CSS_SELECTOR, "#ul_item_1570959927393 > .list_jpcoar_mapping .form-control").click()
# 19 | select | css=#ul_item_1570959927393 > .list_jpcoar_mapping .form-control | label=title |
dropdown = self.driver.find_element(By.CSS_SELECTOR, "#ul_item_1570959927393 > .list_jpcoar_mapping .form-control")
dropdown.find_element(By.XPATH, "//option[. = 'title']").click()
# 20 | click | css=#sub_mapping-add > .glyphicon | |
self.driver.find_element(By.CSS_SELECTOR, "#sub_mapping-add > .glyphicon").click()
# 21 | mouseOver | css=#sub_mapping-add > .glyphicon | |
element = self.driver.find_element(By.CSS_SELECTOR, "#sub_mapping-add > .glyphicon")
actions = ActionChains(driver)
actions.move_to_element(element).perform()
# 22 | mouseOut | css=#sub_mapping-add > .glyphicon | |
element = self.driver.find_element(By.CSS_SELECTOR, "body")
actions = ActionChains(driver)
actions.move_to_element(element, 0, 0).perform()
# 23 | click | css=.row:nth-child(3) .col-sm-8 .form-control | |
self.driver.find_element(By.CSS_SELECTOR, ".row:nth-child(3) .col-sm-8 .form-control").click()
# 24 | select | css=.row:nth-child(3) .col-sm-8 .form-control | label=Language |
dropdown = self.driver.find_element(By.CSS_SELECTOR, ".row:nth-child(3) .col-sm-8 .form-control")
dropdown.find_element(By.XPATH, "//option[. = 'Language']").click()
# 25 | click | css=#ul_item_1570959939365 > .list_jpcoar_mapping | |
self.driver.find_element(By.CSS_SELECTOR, "#ul_item_1570959939365 > .list_jpcoar_mapping").click()
# 26 | click | css=#ul_item_1570959939365 > .list_jpcoar_mapping input | |
self.driver.find_element(By.CSS_SELECTOR, "#ul_item_1570959939365 > .list_jpcoar_mapping input").click()
# 27 | click | css=#ul_item_1570959939365 > .list_jpcoar_mapping .form-control | |
self.driver.find_element(By.CSS_SELECTOR, "#ul_item_1570959939365 > .list_jpcoar_mapping .form-control").click()
# 28 | select | css=#ul_item_1570959939365 > .list_jpcoar_mapping .form-control | label=file |
dropdown = self.driver.find_element(By.CSS_SELECTOR, "#ul_item_1570959939365 > .list_jpcoar_mapping .form-control")
dropdown.find_element(By.XPATH, "//option[. = 'file']").click()
# 29 | click | css=.sub_child_list .col-sm-7 .form-control | |
self.driver.find_element(By.CSS_SELECTOR, ".sub_child_list .col-sm-7 .form-control").click()
# 30 | select | css=.sub_child_list .col-sm-7 .form-control | label=file.date |
dropdown = self.driver.find_element(By.CSS_SELECTOR, ".sub_child_list .col-sm-7 .form-control")
dropdown.find_element(By.XPATH, "//option[. = 'file.date']").click()
# 31 | click | css=.row:nth-child(2) .col-sm-8 .form-control | |
self.driver.find_element(By.CSS_SELECTOR, ".row:nth-child(2) .col-sm-8 .form-control").click()
# 32 | click | id=mapping-submit | |
self.driver.find_element(By.ID, "mapping-submit").click()
# 33 | click | css=.caret | |
self.driver.find_element(By.CSS_SELECTOR, ".caret").click()
# 34 | click | linkText=Logout | |
self.driver.find_element(By.LINK_TEXT, "Logout").click()
|
[
"selenium.webdriver.chrome.options.Options",
"selenium.webdriver.Chrome",
"selenium.webdriver.common.action_chains.ActionChains"
] |
[((543, 552), 'selenium.webdriver.chrome.options.Options', 'Options', ([], {}), '()\n', (550, 552), False, 'from selenium.webdriver.chrome.options import Options\n'), ((612, 645), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'options': 'options'}), '(options=options)\n', (628, 645), False, 'from selenium import webdriver\n'), ((3939, 3959), 'selenium.webdriver.common.action_chains.ActionChains', 'ActionChains', (['driver'], {}), '(driver)\n', (3951, 3959), False, 'from selenium.webdriver.common.action_chains import ActionChains\n'), ((4147, 4167), 'selenium.webdriver.common.action_chains.ActionChains', 'ActionChains', (['driver'], {}), '(driver)\n', (4159, 4167), False, 'from selenium.webdriver.common.action_chains import ActionChains\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ライブラリのインポート
import cv2
import numpy as np
# 画像を読み込み
image = cv2.imread("original.jpg")
# バウンディングボックスを描画
# cv2.rectangle(元画像, ボックスの左上の位置, ボックスの右下の位置, 色情報, 線の太さ)
# 位置(x, y)は画像左上の原点(0, 0)から指定する
edited_image = cv2.rectangle(image, (100, 150), (200, 300), (0, 0, 255), 2)
# 画像を表示
cv2.imshow("edited_image", edited_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.imread",
"cv2.rectangle",
"cv2.imshow"
] |
[((111, 137), 'cv2.imread', 'cv2.imread', (['"""original.jpg"""'], {}), "('original.jpg')\n", (121, 137), False, 'import cv2\n'), ((258, 318), 'cv2.rectangle', 'cv2.rectangle', (['image', '(100, 150)', '(200, 300)', '(0, 0, 255)', '(2)'], {}), '(image, (100, 150), (200, 300), (0, 0, 255), 2)\n', (271, 318), False, 'import cv2\n'), ((328, 368), 'cv2.imshow', 'cv2.imshow', (['"""edited_image"""', 'edited_image'], {}), "('edited_image', edited_image)\n", (338, 368), False, 'import cv2\n'), ((369, 383), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (380, 383), False, 'import cv2\n'), ((384, 407), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (405, 407), False, 'import cv2\n')]
|
"""
Module with functions to generate PQR input (PQR)
Author: <NAME>
Email: <EMAIL>
"""
import os
kcalToKj = 4.184
def writePQR(molecule, pqrFile):
"""Generate PQR file
Parameters
----------
molecule : molecule class
Molecule class
pqrFile : str
PQR file name
"""
sigmaFactor = 2.0**(1.0/6.0) # 2^(1/6)
with open(pqrFile, 'w') as ofile:
atomsToWrite = sorted([atom for atom in molecule.atoms[molecule.numberOfStructuralDummyAtoms:]], key = lambda x: x.serialOriginal)
for i, atom in enumerate(atomsToWrite, start =1):
ofile.write('ATOM %5d %4s %3s %4d %8.3f%8.3f%8.3f%8.4f%7.4f\n' % (i, atom.nameOriginal, molecule.residueName, 1,atom.x + molecule.shiftX,
atom.y + molecule.shiftY, atom.z + molecule.shiftZ, atom.charge, (atom.sigma/2.0)*sigmaFactor))
def getFileNames(molname, workdir):
"""Return output file names
Parameters
----------
molname : str
Molecule name
workdir : str
Working folder path
Returns
-------
pqrFile : str
PQR file name
"""
return os.path.join(workdir,molname+'.pqr')
def write(molecule, molName, workdir):
pqrFile = getFileNames(molName, workdir)
writePQR(molecule, pqrFile)
|
[
"os.path.join"
] |
[((1136, 1175), 'os.path.join', 'os.path.join', (['workdir', "(molname + '.pqr')"], {}), "(workdir, molname + '.pqr')\n", (1148, 1175), False, 'import os\n')]
|
#!/usr/bin/python
#
# BSD-3 Clause.
# Copyright (C) 2017 <NAME>.
#
# Simple Cross Platform Installer Script
import sys
import os
import requests
from shutil import rmtree
# Packages to install
QArchive = {
"username" : "antony-jr",
"repo" : "QArchive",
"mkdir" : {
"QArchive/src",
"QArchive/include"
},
"install" : {
"CMakeLists.txt" : "QArchive/CMakeLists.txt",
"QArchive.pro" : "QArchive/QArchive.pro",
"QArchive" : "QArchive/QArchive",
"include/QArchive.hpp" : "QArchive/include/QArchive.hpp",
"src/QArchive.cc" : "QArchive/src/QArchive.cc",
"LICENSE" : "QArchive/LICENSE"
}
}
def installPackage(config):
print("Installing " + config["repo"])
# Make parent directory first.
if os.path.isfile(config["repo"]):
print("Deleting duplicate file(s)... ")
os.remove(config["repo"])
os.mkdir(config["repo"])
else:
if os.path.exists("QArchive"):
rmtree("QArchive")
else:
os.mkdir("QArchive")
for i in config["mkdir"]:
print("Creating Directory " + i)
if os.path.exists(i):
rmtree(i)
os.mkdir(i) # Make the directory!
print("Downloading the latest release from github... ")
# Write files from the repo
for i in config["install"]:
resp = requests.get("https://raw.githubusercontent.com/"+config["username"]+"/"+config["repo"]+"/master/" + i)
fp = open(config["install"][i] , "wb")
for it in resp:
fp.write(it)
fp.close()
print("Installed "+config["repo"]+".")
return True
if __name__ == "__main__":
installPackage(QArchive)
sys.exit(0)
|
[
"os.mkdir",
"os.remove",
"os.path.exists",
"os.path.isfile",
"requests.get",
"shutil.rmtree",
"sys.exit"
] |
[((882, 912), 'os.path.isfile', 'os.path.isfile', (["config['repo']"], {}), "(config['repo'])\n", (896, 912), False, 'import os\n'), ((1778, 1789), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1786, 1789), False, 'import sys\n'), ((970, 995), 'os.remove', 'os.remove', (["config['repo']"], {}), "(config['repo'])\n", (979, 995), False, 'import os\n'), ((1004, 1028), 'os.mkdir', 'os.mkdir', (["config['repo']"], {}), "(config['repo'])\n", (1012, 1028), False, 'import os\n'), ((1047, 1073), 'os.path.exists', 'os.path.exists', (['"""QArchive"""'], {}), "('QArchive')\n", (1061, 1073), False, 'import os\n'), ((1221, 1238), 'os.path.exists', 'os.path.exists', (['i'], {}), '(i)\n', (1235, 1238), False, 'import os\n'), ((1264, 1275), 'os.mkdir', 'os.mkdir', (['i'], {}), '(i)\n', (1272, 1275), False, 'import os\n'), ((1438, 1553), 'requests.get', 'requests.get', (["('https://raw.githubusercontent.com/' + config['username'] + '/' + config[\n 'repo'] + '/master/' + i)"], {}), "('https://raw.githubusercontent.com/' + config['username'] +\n '/' + config['repo'] + '/master/' + i)\n", (1450, 1553), False, 'import requests\n'), ((1084, 1102), 'shutil.rmtree', 'rmtree', (['"""QArchive"""'], {}), "('QArchive')\n", (1090, 1102), False, 'from shutil import rmtree\n'), ((1123, 1143), 'os.mkdir', 'os.mkdir', (['"""QArchive"""'], {}), "('QArchive')\n", (1131, 1143), False, 'import os\n'), ((1249, 1258), 'shutil.rmtree', 'rmtree', (['i'], {}), '(i)\n', (1255, 1258), False, 'from shutil import rmtree\n')]
|
from django.contrib import admin
from .models import Car
admin.site.register(Car)
|
[
"django.contrib.admin.site.register"
] |
[((59, 83), 'django.contrib.admin.site.register', 'admin.site.register', (['Car'], {}), '(Car)\n', (78, 83), False, 'from django.contrib import admin\n')]
|
import numpy as np
import pickle
from IPython import embed
class RunningMeanStd(object):
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
def __init__(self, shape=()):
self.mean = np.zeros(shape, np.float32)
self.var = np.ones(shape, np.float32)
self.count = 1e-4
self.epsilon = 1e-8
self.clip = 10
# update mean and var with current input
def update(self, x):
# embed()
# exit(0)
if np.any(np.isnan(x)):
return
x_ = x.reshape(-1, len(self.mean))
batch_mean = np.mean(x_, axis=0)
batch_var = np.var(x_, axis=0)
batch_count = x_.shape[0]
# embed()
# exit(0)
self.update_from_moments(batch_mean, batch_var, batch_count)
# get value from normalized output
def apply(self, x):
# return x
# embed()
# exit(0)
self.update(x)
x = np.clip((x - self.mean) / np.sqrt(self.var + self.epsilon), -self.clip, self.clip)
return x
def applyOnly(self, x):
# return x
# embed()
# exit(0)
x = np.clip((x - self.mean) / np.sqrt(self.var + self.epsilon), -self.clip, self.clip)
return x
# rms_x = np.zeros(shape=x.shape(), np.float32)
# for i in range(len(rms_x)):
# rms_x[i] = np.clip((x[i] - self.mean) / np.sqrt(self.var + self.epsilon), -self.clip, self.clip)
def update_from_moments(self, batch_mean, batch_var, batch_count):
self.mean, self.var, self.count = update_mean_var_count_from_moments(
self.mean, self.var, self.count, batch_mean, batch_var, batch_count)
def save(self, path):
data = {'mean':self.mean, 'var':self.var, 'count':self.count}
with open(path, 'wb') as f:
pickle.dump(data, f)
def load(self, path):
with open(path, 'rb') as f:
data = pickle.load(f)
self.mean = data['mean']
self.var = data['var']
self.count = data['count']
def update_mean_var_count_from_moments(mean, var, count, batch_mean, batch_var, batch_count):
delta = batch_mean - mean
tot_count = count + batch_count
new_mean = mean + delta * batch_count / tot_count
m_a = var * count
m_b = batch_var * batch_count
M2 = m_a + m_b + np.square(delta) * count * batch_count / tot_count
new_var = M2 / tot_count
new_count = tot_count
return new_mean, new_var, new_count
|
[
"pickle.dump",
"numpy.square",
"numpy.zeros",
"numpy.ones",
"numpy.isnan",
"numpy.mean",
"pickle.load",
"numpy.var",
"numpy.sqrt"
] |
[((235, 262), 'numpy.zeros', 'np.zeros', (['shape', 'np.float32'], {}), '(shape, np.float32)\n', (243, 262), True, 'import numpy as np\n'), ((282, 308), 'numpy.ones', 'np.ones', (['shape', 'np.float32'], {}), '(shape, np.float32)\n', (289, 308), True, 'import numpy as np\n'), ((609, 628), 'numpy.mean', 'np.mean', (['x_'], {'axis': '(0)'}), '(x_, axis=0)\n', (616, 628), True, 'import numpy as np\n'), ((649, 667), 'numpy.var', 'np.var', (['x_'], {'axis': '(0)'}), '(x_, axis=0)\n', (655, 667), True, 'import numpy as np\n'), ((511, 522), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (519, 522), True, 'import numpy as np\n'), ((1840, 1860), 'pickle.dump', 'pickle.dump', (['data', 'f'], {}), '(data, f)\n', (1851, 1860), False, 'import pickle\n'), ((1947, 1961), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1958, 1961), False, 'import pickle\n'), ((987, 1019), 'numpy.sqrt', 'np.sqrt', (['(self.var + self.epsilon)'], {}), '(self.var + self.epsilon)\n', (994, 1019), True, 'import numpy as np\n'), ((1183, 1215), 'numpy.sqrt', 'np.sqrt', (['(self.var + self.epsilon)'], {}), '(self.var + self.epsilon)\n', (1190, 1215), True, 'import numpy as np\n'), ((2378, 2394), 'numpy.square', 'np.square', (['delta'], {}), '(delta)\n', (2387, 2394), True, 'import numpy as np\n')]
|
import pytest
from eth_abi.abi import (
decode,
)
from eth_abi.exceptions import (
InsufficientDataBytes,
)
from eth_abi.grammar import (
parse,
)
from ..common.unit import (
CORRECT_DYNAMIC_ENCODINGS,
CORRECT_STATIC_ENCODINGS,
CORRECT_TUPLE_ENCODINGS,
words,
)
@pytest.mark.parametrize(
'type_str,expected,abi_encoding,_',
CORRECT_TUPLE_ENCODINGS,
)
def test_decode_abi_for_multiple_types_as_list(type_str, expected, abi_encoding, _):
abi_type = parse(type_str)
if abi_type.arrlist is not None:
pytest.skip('ABI coding functions do not support array types')
types = [t.to_type_str() for t in abi_type.components]
actual = decode(types, abi_encoding)
assert actual == expected
@pytest.mark.parametrize(
'type_str,expected,abi_encoding,_',
CORRECT_STATIC_ENCODINGS,
)
def test_abi_decode_for_single_static_types(type_str, expected, abi_encoding, _):
(actual,) = decode([type_str], abi_encoding)
assert actual == expected
@pytest.mark.parametrize(
'type_str,expected,abi_encoding,_',
CORRECT_DYNAMIC_ENCODINGS,
)
def test_abi_decode_for_single_dynamic_types(type_str, expected, abi_encoding, _):
# Tests set up list values but encoders return sequences as tuples.
# i.e. [b'\xde\xad\xbe\xef'] vs encoder return type (b'\xde\xad\xbe\xef',)
expected = tuple(expected) if isinstance(expected, list) else expected
abi_encoding = (
# 32 bytes offset for dynamic types
b''.join([words('20'), abi_encoding])
)
(actual,) = decode([type_str], abi_encoding)
assert actual == expected
@pytest.mark.parametrize('data', (b'', bytearray()))
def test_decode_abi_empty_data_raises(data):
with pytest.raises(InsufficientDataBytes):
decode(['uint'], data)
@pytest.mark.parametrize('data', ('', 123, 0x123, [b'\x01'], (b'\x01',), {b'\x01'}))
def test_decode_abi_wrong_data_param_type_raises(data):
with pytest.raises(
TypeError,
match=f"The `data` value must be of bytes type. Got {type(data)}"
):
decode(['uint32', 'uint32'], data)
@pytest.mark.parametrize(
'types',
('', 123, b'', b'\xff', b'david attenborough', bytearray(b'\x01\xff'), {'key': 'val'}, {1, 2})
)
def test_decode_abi_wrong_types_param_type_raises(types):
with pytest.raises(
TypeError,
match=f"The `types` value type must be one of list or tuple. Got {type(types)}"
):
decode(types, b'\x00' * 32)
|
[
"pytest.skip",
"eth_abi.abi.decode",
"pytest.raises",
"pytest.mark.parametrize",
"eth_abi.grammar.parse"
] |
[((295, 383), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""type_str,expected,abi_encoding,_"""', 'CORRECT_TUPLE_ENCODINGS'], {}), "('type_str,expected,abi_encoding,_',\n CORRECT_TUPLE_ENCODINGS)\n", (318, 383), False, 'import pytest\n'), ((750, 839), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""type_str,expected,abi_encoding,_"""', 'CORRECT_STATIC_ENCODINGS'], {}), "('type_str,expected,abi_encoding,_',\n CORRECT_STATIC_ENCODINGS)\n", (773, 839), False, 'import pytest\n'), ((1011, 1101), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""type_str,expected,abi_encoding,_"""', 'CORRECT_DYNAMIC_ENCODINGS'], {}), "('type_str,expected,abi_encoding,_',\n CORRECT_DYNAMIC_ENCODINGS)\n", (1034, 1101), False, 'import pytest\n'), ((1798, 1884), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data"""', "('', 123, 291, [b'\\x01'], (b'\\x01',), {b'\\x01'})"], {}), "('data', ('', 123, 291, [b'\\x01'], (b'\\x01',), {\n b'\\x01'}))\n", (1821, 1884), False, 'import pytest\n'), ((491, 506), 'eth_abi.grammar.parse', 'parse', (['type_str'], {}), '(type_str)\n', (496, 506), False, 'from eth_abi.grammar import parse\n'), ((689, 716), 'eth_abi.abi.decode', 'decode', (['types', 'abi_encoding'], {}), '(types, abi_encoding)\n', (695, 716), False, 'from eth_abi.abi import decode\n'), ((945, 977), 'eth_abi.abi.decode', 'decode', (['[type_str]', 'abi_encoding'], {}), '([type_str], abi_encoding)\n', (951, 977), False, 'from eth_abi.abi import decode\n'), ((1553, 1585), 'eth_abi.abi.decode', 'decode', (['[type_str]', 'abi_encoding'], {}), '([type_str], abi_encoding)\n', (1559, 1585), False, 'from eth_abi.abi import decode\n'), ((552, 614), 'pytest.skip', 'pytest.skip', (['"""ABI coding functions do not support array types"""'], {}), "('ABI coding functions do not support array types')\n", (563, 614), False, 'import pytest\n'), ((1726, 1762), 'pytest.raises', 'pytest.raises', (['InsufficientDataBytes'], {}), '(InsufficientDataBytes)\n', (1739, 1762), False, 'import pytest\n'), ((1772, 1794), 'eth_abi.abi.decode', 'decode', (["['uint']", 'data'], {}), "(['uint'], data)\n", (1778, 1794), False, 'from eth_abi.abi import decode\n'), ((2070, 2104), 'eth_abi.abi.decode', 'decode', (["['uint32', 'uint32']", 'data'], {}), "(['uint32', 'uint32'], data)\n", (2076, 2104), False, 'from eth_abi.abi import decode\n'), ((2451, 2478), 'eth_abi.abi.decode', 'decode', (['types', "(b'\\x00' * 32)"], {}), "(types, b'\\x00' * 32)\n", (2457, 2478), False, 'from eth_abi.abi import decode\n')]
|
#!/usr/bin/env python2.7
# vim: fileencoding=utf-8
from distutils.core import setup
from docushare.__init__ import \
__author__, __copyright__, __license__, __version__, __email__
setup(
name="docushare",
version=__version__,
author=__author__,
author_email=__email__,
url="http://launchpad.net/docushare",
description="A DocuShare Client Library.",
long_description="""`docushare' is a client library for DocuShare by Xerox Corporation.""",
license=__license__,
platforms=["win32",],
packages=["docushare",],
package_data={"docushare": ["*.csv",]},
zipfile="docushare.zip",
)
|
[
"distutils.core.setup"
] |
[((192, 599), 'distutils.core.setup', 'setup', ([], {'name': '"""docushare"""', 'version': '__version__', 'author': '__author__', 'author_email': '__email__', 'url': '"""http://launchpad.net/docushare"""', 'description': '"""A DocuShare Client Library."""', 'long_description': '"""`docushare\' is a client library for DocuShare by Xerox Corporation."""', 'license': '__license__', 'platforms': "['win32']", 'packages': "['docushare']", 'package_data': "{'docushare': ['*.csv']}", 'zipfile': '"""docushare.zip"""'}), '(name=\'docushare\', version=__version__, author=__author__,\n author_email=__email__, url=\'http://launchpad.net/docushare\',\n description=\'A DocuShare Client Library.\', long_description=\n "`docushare\' is a client library for DocuShare by Xerox Corporation.",\n license=__license__, platforms=[\'win32\'], packages=[\'docushare\'],\n package_data={\'docushare\': [\'*.csv\']}, zipfile=\'docushare.zip\')\n', (197, 599), False, 'from distutils.core import setup\n')]
|
'''
Schema of extracellular information.
'''
import re
import os
import sys
from datetime import datetime
import numpy as np
import scipy.io as sio
import datajoint as dj
import tqdm
from . import reference, utilities, acquisition, analysis
schema = dj.schema(dj.config['custom']['database.prefix'] + 'extracellular')
@schema
class ProbeInsertion(dj.Manual):
definition = """ # Description of probe insertion details during extracellular recording
-> acquisition.Session
-> reference.Probe
"""
class InsertLocation(dj.Part):
definition = """
-> master
-> reference.Probe.Shank
-> reference.ActionLocation
"""
@schema
class Voltage(dj.Imported):
definition = """
-> ProbeInsertion
---
voltage: longblob # (mV)
voltage_start_time: float # (second) first timepoint of voltage recording
voltage_sampling_rate: float # (Hz) sampling rate of voltage recording
"""
def make(self, key):
# this function implements the ingestion of raw extracellular data into the pipeline
return NotImplementedError
@schema
class UnitSpikeTimes(dj.Manual):
definition = """
-> ProbeInsertion
unit_id : smallint
---
spike_times: longblob # (s) time of each spike, with respect to the start of session
cell_desc='N/A': varchar(32) # e.g. description of this unit (e.g. cell type)
"""
class UnitChannel(dj.Part):
definition = """
-> master
-> reference.Probe.Channel
"""
class SpikeWaveform(dj.Part):
definition = """
-> master
-> UnitSpikeTimes.UnitChannel
---
spike_waveform: longblob # waveform(s) of each spike at each spike time (waveform_timestamps x spike_times)
"""
@schema
class TrialSegmentedUnitSpikeTimes(dj.Imported):
definition = """
-> UnitSpikeTimes
-> acquisition.TrialSet.Trial
-> analysis.TrialSegmentationSetting
---
segmented_spike_times: longblob # (s) with respect to the start of the trial
"""
def make(self, key):
return NotImplementedError
|
[
"datajoint.schema"
] |
[((253, 320), 'datajoint.schema', 'dj.schema', (["(dj.config['custom']['database.prefix'] + 'extracellular')"], {}), "(dj.config['custom']['database.prefix'] + 'extracellular')\n", (262, 320), True, 'import datajoint as dj\n')]
|
# -*- coding: UTF-8 -*-
import tkinter as tk
from tkinter import ttk
import tkinter.messagebox
from gui_class_login import UC_Login
from gui_class_show import UC_Show
root = tk.Tk()
form_login = UC_Login(root)
root.mainloop()
root.destroy()
root = tk.Tk()
form_show = UC_Show(root)
root.mainloop()
|
[
"gui_class_show.UC_Show",
"tkinter.Tk",
"gui_class_login.UC_Login"
] |
[((177, 184), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (182, 184), True, 'import tkinter as tk\n'), ((198, 212), 'gui_class_login.UC_Login', 'UC_Login', (['root'], {}), '(root)\n', (206, 212), False, 'from gui_class_login import UC_Login\n'), ((252, 259), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (257, 259), True, 'import tkinter as tk\n'), ((272, 285), 'gui_class_show.UC_Show', 'UC_Show', (['root'], {}), '(root)\n', (279, 285), False, 'from gui_class_show import UC_Show\n')]
|
"""Get temperature and humidity from a DHT22 sensor"""
# If we're not running on a Raspberry, use a GPIO stub so we can still get
# some coding done
import os
if os.uname()[1] == 'raspberrypi':
from Adafruit_DHT import read_retry, DHT22 # pylint: disable=import-error
else:
from lib.Adafruit_DHT_stub import read_retry, DHT22
class DHT(object):
"""DHT22 temperature and humidity sensor"""
def __init__(self, name, pin):
"""Set initial values and initialize pin"""
self.name = name
self.sensor = DHT22
self.pin = pin
def poll_sensor(self):
"""Poll the sensor to get the temp and humidity"""
# Try to grab a sensor reading. Use the read_retry method which will retry up to 15 times
# to get a sensor reading (waiting 2 seconds between each retry).
humidity, temperature = read_retry(self.sensor, self.pin)
# Note that sometimes you won't get a reading and the results will be null (because Linux
# can't guarantee the timing of calls to read the sensor). If this happens try again!
if humidity is not None and temperature is not None:
temphumid = (humidity, temperature)
return temphumid
else:
raise EnvironmentError('Failed to get a reading from DHT22') # pragma: no cover
def get_temp(self):
"""Call poll_sensor and return temp"""
temp = self.poll_sensor()[1]
return temp
def get_humidity(self):
"""Call poll_sensor and return humidity"""
humidity = self.poll_sensor()[0]
return humidity
|
[
"lib.Adafruit_DHT_stub.read_retry",
"os.uname"
] |
[((163, 173), 'os.uname', 'os.uname', ([], {}), '()\n', (171, 173), False, 'import os\n'), ((864, 897), 'lib.Adafruit_DHT_stub.read_retry', 'read_retry', (['self.sensor', 'self.pin'], {}), '(self.sensor, self.pin)\n', (874, 897), False, 'from lib.Adafruit_DHT_stub import read_retry, DHT22\n')]
|
import boto3, botocore
from os import environ
import logging
import jsonschema
class AwsRdsProvisionError(Exception):
pass
class AwsRdsManager:
log = logging.getLogger(__name__)
db_schema = dict(
type="object",
properties=dict(
database=dict(type="string"),
password=dict(type="string"),
username=dict(type="string"),
),
required=["database", "password", "username"],
)
config_schema = dict(
type="object",
properties=dict(
AllocatedStorage=dict(type="integer"),
DBInstanceClass=dict(type="string"),
Engine=dict(type="string"),
BackupRetentionPeriod=dict(type="integer"),
MultiAZ=dict(type="boolean"),
EngineVersion=dict(type="string"),
PubliclyAccessible=dict(type="boolean"),
VpcSecurityGroupIds=dict(type="array", items=dict(type="string")),
DBSubnetGroupName=dict(type="string"),
),
required=[
"AllocatedStorage",
"DBInstanceClass",
"Engine",
"BackupRetentionPeriod",
"MultiAZ",
"EngineVersion",
"PubliclyAccessible",
"VpcSecurityGroupIds",
"DBSubnetGroupName",
],
)
@classmethod
def set_config(cls, config: dict):
jsonschema.validate(config, cls.config_schema)
cls.RDS_CONFIG = config
def __init__(self, db: dict):
jsonschema.validate(db, self.db_schema)
self.db = db
@property
def boto_client(self):
return boto3.client("rds")
def database_description(self):
rds = self.boto_client
try:
instances = rds.describe_db_instances(
DBInstanceIdentifier=self.db["database"]
)
return instances["DBInstances"][0]
except (rds.exceptions.DBInstanceNotFoundFault, KeyError, IndexError) as ex:
raise AwsRdsProvisionError(f"No Database Provisioned for {self.db}")
def provision(self, silent=True, wait=None):
assert hasattr(
self.__class__, "RDS_CONFIG"
), f"No RDS Configuration Given. See {self.__class__.__name__}.set_config"
db_vars = self.RDS_CONFIG.copy()
db_vars.update(
{
"DBName": self.db["database"],
"DBInstanceIdentifier": self.db["database"],
"MasterUsername": self.db["username"],
"MasterUserPassword": self.db["password"],
}
)
try:
self.boto_client.create_db_instance(**db_vars)
except botocore.exceptions.ClientError as ex:
if silent and "DBInstanceAlreadyExists" in ex.__str__():
self.log.info(f"{self.db} instance already exists. Skipping.")
return
else:
raise ex
if wait:
self.log.info(
f"Waiting for {self.db} instance to spawn... This could take a while..."
)
waiter = self.boto_client.get_waiter(wait)
waiter.wait(DBInstanceIdentifier=db_vars["DBInstanceIdentifier"])
self.log.info(f"{self.db} instance available.")
else:
self.log.warn(
f"Warning: did not wait for {self.db} instance to spawn. Instance may not be available."
)
def deprovision(self, wait=False, silent=True):
try:
self.boto_client.delete_db_instance(
DBInstanceIdentifier=self.db["database"],
SkipFinalSnapshot=True,
DeleteAutomatedBackups=True,
)
except botocore.exceptions.ClientError as ex:
if silent and "DBInstanceNotFound" in ex.__str__():
self.log.info(f"{self.db} instance already terminated. Skipping.")
return
else:
raise ex
if wait:
self.log.info(f"Waiting for {self.db} instance to terminate...")
waiter = self.boto_client.get_waiter("db_instance_deleted")
waiter.wait(DBInstanceIdentifier=self.db["database"])
self.log.info(f"{self.db} instance terminated.")
else:
self.log.warn(f"Warning: did not wait for {self.db} instance to terminate.")
|
[
"jsonschema.validate",
"logging.getLogger",
"boto3.client"
] |
[((162, 189), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (179, 189), False, 'import logging\n'), ((1388, 1434), 'jsonschema.validate', 'jsonschema.validate', (['config', 'cls.config_schema'], {}), '(config, cls.config_schema)\n', (1407, 1434), False, 'import jsonschema\n'), ((1510, 1549), 'jsonschema.validate', 'jsonschema.validate', (['db', 'self.db_schema'], {}), '(db, self.db_schema)\n', (1529, 1549), False, 'import jsonschema\n'), ((1628, 1647), 'boto3.client', 'boto3.client', (['"""rds"""'], {}), "('rds')\n", (1640, 1647), False, 'import boto3, botocore\n')]
|
import functools
import inspect
import wrapt
from experimental_config import ENABLED_EXPERIMENTS
EXPERIMENTS_IGNORED = False
class DisabledExperiment(Exception):
pass
class MismatchingArguments(TypeError):
pass
def volatile(experiment, safe=False, refactor=False):
if not ENABLED_EXPERIMENTS:
raise Exception("ENABLED EXPERIMENTS NOT DEFINED")
@wrapt.decorator
def wrapper(subject, instance, args, kwargs):
if EXPERIMENTS_IGNORED:
return subject(*args, **kwargs)
subject_arguments_count = len(inspect.getargspec(subject).args)
experiment_arguments_count = len(inspect.getargspec(experiment).args)
if subject_arguments_count != experiment_arguments_count:
raise MismatchingArguments(
"Subjects and experiments must have the same number of arguments. '{}' has {} arguments while '{}' has {}.".format(
subject.__name__,
subject_arguments_count,
experiment.__name__,
experiment_arguments_count
)
) # TODO(jaimevp54): Add number of arguments to error message
if "*" not in ENABLED_EXPERIMENTS and experiment.__name__ not in ENABLED_EXPERIMENTS:
return subject(*args, **kwargs)
else:
#print("Running '"+ subject.__name__ + "' as experimental function '" + experiment.__name__ +"'.")
try:
experiment_result = experiment(*args, **kwargs)
subject_result = subject(*args, **kwargs)
if refactor and experiment_result != subject_result:
return subject_result
return experiment_result
except:
if safe:
#print("WARNING: There was an error while executing '"+ experiment.__name__ + "' falling back to '" + func.__name__ +"'.")
return subject(*args, **kwargs)
else:
raise
return wrapper
def experiment(identifier=None):
@wrapt.decorator
def wrapper(wrapped, instance, args, kwargs):
if EXPERIMENTS_IGNORED:
return wrapped(*args, **kwargs)
_identifier = identifier or wrapped.__name__
if '*' not in ENABLED_EXPERIMENTS and _identifier not in ENABLED_EXPERIMENTS:
raise DisabledExperiment("'"+wrapped.__name__ + "' is a experimental feature and it has not been enabled")
return wrapped(*args, **kwargs)
return wrapper
class experimental_block():
def __init__(self, identifier):
self.identifier = identifier
def __enter__(self):
if '*' not in ENABLED_EXPERIMENTS and self.identifier not in ENABLED_EXPERIMENTS:
raise DisabledExperiment
def __exit__(self, type, value, traceback):
pass
class experiments_ignored():
def __init__(self):
pass
def __enter__(self):
global EXPERIMENTS_IGNORED
EXPERIMENTS_IGNORED = True
def __exit__(self, type, value, traceback):
global EXPERIMENTS_IGNORED
EXPERIMENTS_IGNORED = False
|
[
"inspect.getargspec"
] |
[((555, 582), 'inspect.getargspec', 'inspect.getargspec', (['subject'], {}), '(subject)\n', (573, 582), False, 'import inspect\n'), ((630, 660), 'inspect.getargspec', 'inspect.getargspec', (['experiment'], {}), '(experiment)\n', (648, 660), False, 'import inspect\n')]
|
from typing import Optional
from uuid import uuid4
from aiogram import types, Bot
from aiogram.dispatcher import FSMContext
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import sessionmaker
from bot.menu.keyboards import menu_keyboard
from bot.menu.states import States
from bot.user.models import User, Invite
from bot.user.services.invite import create_invite
from bot.user.states import CreateInvite
from bot.user.text import CHOICE_USER_IDENTIFIER, INVITE_INTEGRITY_ERROR
async def choice_user_identifier_(msg: types.Message, state: FSMContext) -> None:
await state.set_state(CreateInvite.choice_user_identifier)
await msg.answer(CHOICE_USER_IDENTIFIER)
async def create_invite_(msg: types.Message, user: User, session: sessionmaker, state: FSMContext) -> None:
user_identifier = msg.text
hash = str(uuid4())
bot_username = (await Bot.get_current().me).username
link = f"https://t.me/{bot_username}?start={hash}"
invite: Optional[Invite]
try:
async with session.begin() as async_session:
invite = await create_invite(async_session, user.id, hash, user_identifier)
except IntegrityError:
invite = None
await state.set_state(States.menu)
if invite is not None:
await msg.answer(link, reply_markup=menu_keyboard)
else:
await msg.answer(INVITE_INTEGRITY_ERROR, reply_markup=menu_keyboard)
|
[
"bot.user.services.invite.create_invite",
"uuid.uuid4",
"aiogram.Bot.get_current"
] |
[((842, 849), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (847, 849), False, 'from uuid import uuid4\n'), ((877, 894), 'aiogram.Bot.get_current', 'Bot.get_current', ([], {}), '()\n', (892, 894), False, 'from aiogram import types, Bot\n'), ((1082, 1142), 'bot.user.services.invite.create_invite', 'create_invite', (['async_session', 'user.id', 'hash', 'user_identifier'], {}), '(async_session, user.id, hash, user_identifier)\n', (1095, 1142), False, 'from bot.user.services.invite import create_invite\n')]
|
#
# Story Time App
# Exposes functions that connect to and query the storytime DB
#
from typing import List
from sqlalchemy.orm.exc import NoResultFound
from werkzeug.datastructures import FileStorage
from storytime import file_storage_service
from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session
SQL_GET_STORY_RANDOM = 'SELECT id FROM story ORDER BY random() LIMIT 1'
# User functions
def create_user(user: User):
"""
Creates the given user in the DB.
:param user: the user to create
:return: an integer representing the primary key of the object created
"""
db_session.add(user)
db_session.commit()
return user.id
def get_user_info(user_id: int):
"""
Gets user info by user id.
:param user_id: the primary key of the user
:return: the user object
"""
try:
return db_session.query(User).filter_by(id=user_id).one()
except NoResultFound:
return None
def get_user_id_by_email(email: str):
"""
Gets the user_id of the user for the given email address.
:param email: the email address to use in the lookup
:return: an integer representing the primary key of the user object retrieved
"""
try:
user = db_session.query(User).filter_by(email=email).one()
return user.id
except NoResultFound:
return None
def get_user_by_email(email: str):
"""
Gets the User object for the given email address.
:param email: the email address to use in the lookup
:return: the user object
"""
try:
return db_session.query(User).filter_by(email=email).one()
except NoResultFound:
return None
# Story functions
def create_story(story: Story, image_file: FileStorage = None):
"""
Creates the given story in the DB.
:param story: the story to create
:param image_file: the image file to save (or None)
:return: an integer representing the primary key of the object created
"""
try:
if image_file:
upload_file = file_storage_service.save_file(file=image_file)
create_upload_file(upload_file)
story.upload_file_id = upload_file.id
db_session.add(story)
db_session.commit()
return story.id
except Exception as exc:
db_session.rollback()
raise exc
def update_story(story: Story, remove_existing_image: bool, new_image_file):
"""
Updates a story and its image.
:param story: the story to update
:param remove_existing_image: a flag indicating whether or not to remove the existing image from the story
:param new_image_file: a new image file to associate with the story
"""
# Save the old upload file for deletion later (if instructed to remove it)
old_upload_file_to_delete = story.upload_file if remove_existing_image and story.upload_file else None
try:
# Removing existing image from story
if remove_existing_image:
story.upload_file = None
# Save new file and add new image to story
if new_image_file:
story.upload_file = file_storage_service.save_file(file=new_image_file)
# Save story to DB
db_session.add(story)
db_session.execute("UPDATE story SET date_last_modified = TIMEZONE('utc', CURRENT_TIMESTAMP) WHERE id = :id",
{'id': story.id})
# Remove old file from DB
if old_upload_file_to_delete:
db_session.delete(old_upload_file_to_delete)
db_session.commit()
except Exception as exc:
db_session.rollback()
raise exc
# Finally, delete the old image from the file system (do this last so we only delete when we know everything
# else has succeeded)
if old_upload_file_to_delete:
file_storage_service.delete_file(file=old_upload_file_to_delete)
def delete_story(story_id: int):
"""
Permanently deletes the story for the given story_id
:param story_id: the primary key of the story to delete
"""
try:
story = db_session.query(Story).filter_by(id=story_id).one()
upload_file = story.upload_file
story.categories = []
db_session.delete(story)
if upload_file:
file_storage_service.delete_file(file=upload_file)
db_session.delete(upload_file)
db_session.commit()
except Exception as exc:
db_session.rollback()
raise exc
def get_published_stories_count():
"""
Gets the count of all published stories.
:return: the number of published stories
"""
return db_session.query(Story).filter_by(published=True).count()
def get_published_stories(count: int = None):
"""
Gets all published stories.
:param count: the number of stories to retrieve
:return: a list of stories
"""
query = db_session.query(Story).filter_by(published=True).order_by(Story.date_created.desc())
if count:
query = query.limit(count)
return query.all()
def get_published_stories_by_category_id(category_id: int):
"""
Gets all published stories for the given category_id.
:param category_id: the primary key for the category to search on
:return: a list of stories
"""
return db_session.query(Story).filter_by(published=True).filter(
Story.categories.any(Category.id == category_id)).all()
def get_stories_by_user_id(user_id: int):
"""
Gets all stories for the given user id.
:param user_id: the primary key for the user to search on
:return: a list of stories ordered by date last modified descending
"""
return db_session.query(Story).filter_by(user_id=user_id).order_by(Story.date_last_modified.desc()).all()
def get_story_by_id(story_id: int):
"""
Gets a story by id
:param story_id: the primary key for the story to search for
:return: the story or None
"""
try:
return db_session.query(Story).filter_by(id=story_id).one()
except NoResultFound:
return None
def get_story_random():
"""
Gets a random story
:return: the story or None if none exist
"""
with db_engine.connect() as con:
rs = con.execute(SQL_GET_STORY_RANDOM)
row = rs.fetchone()
return get_story_by_id(story_id=row[0]) if row else None
# Category functions
def create_category(category: Category):
"""
Creates the given category in the DB.
:param category: the category to create
:return: an integer representing the primary key of the object created
"""
db_session.add(category)
db_session.commit()
return category.id
def get_categories():
"""
Gets all active categories.
:return: a list of categories
"""
return db_session.query(Category).order_by(Category.label.asc()).all()
def get_category_by_id(category_id: int):
"""
Gets a category by id
:param category_id: the primary key for the category to search for
:return: the category or None
"""
try:
return db_session.query(Category).filter_by(id=category_id).one()
except NoResultFound:
return None
def get_categories_by_ids(category_ids: List):
"""
Gets a list of categories by their ids
:param category_ids: the list of category ids
:return: a list of categories
"""
try:
return db_session.query(Category).filter(Category.id.in_(category_ids)).all()
except NoResultFound:
return None
def get_category_by_label(category_label: str):
"""
Gets a category by label
:param category_id: the unique label for the category to search for
:return: the category or None
"""
try:
return db_session.query(Category).filter_by(label=category_label).one()
except NoResultFound:
return None
# Upload File functions
def create_upload_file(file: UploadFile):
"""
Creates the given upload file in the DB.
:param file: the upload file
:return: an integer representing the primary key of the object created
"""
db_session.add(file)
db_session.commit()
return file.id
def get_upload_file_by_id(upload_file_id: int):
"""
Gets an upload file by id
:param upload_file_id: the primary key for the upload file to search for
:return: the upload file or None
"""
try:
return db_session.query(UploadFile).filter_by(id=upload_file_id).one()
except NoResultFound:
return None
|
[
"storytime.story_time_db_init.db_session.execute",
"storytime.story_time_db_init.Story.categories.any",
"storytime.story_time_db_init.Category.label.asc",
"storytime.story_time_db_init.db_session.rollback",
"storytime.story_time_db_init.Story.date_last_modified.desc",
"storytime.story_time_db_init.db_session.commit",
"storytime.story_time_db_init.Category.id.in_",
"storytime.story_time_db_init.db_session.add",
"storytime.story_time_db_init.Story.date_created.desc",
"storytime.file_storage_service.save_file",
"storytime.story_time_db_init.db_engine.connect",
"storytime.story_time_db_init.db_session.query",
"storytime.file_storage_service.delete_file",
"storytime.story_time_db_init.db_session.delete"
] |
[((635, 655), 'storytime.story_time_db_init.db_session.add', 'db_session.add', (['user'], {}), '(user)\n', (649, 655), False, 'from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session\n'), ((660, 679), 'storytime.story_time_db_init.db_session.commit', 'db_session.commit', ([], {}), '()\n', (677, 679), False, 'from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session\n'), ((6585, 6609), 'storytime.story_time_db_init.db_session.add', 'db_session.add', (['category'], {}), '(category)\n', (6599, 6609), False, 'from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session\n'), ((6614, 6633), 'storytime.story_time_db_init.db_session.commit', 'db_session.commit', ([], {}), '()\n', (6631, 6633), False, 'from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session\n'), ((8068, 8088), 'storytime.story_time_db_init.db_session.add', 'db_session.add', (['file'], {}), '(file)\n', (8082, 8088), False, 'from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session\n'), ((8093, 8112), 'storytime.story_time_db_init.db_session.commit', 'db_session.commit', ([], {}), '()\n', (8110, 8112), False, 'from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session\n'), ((2212, 2233), 'storytime.story_time_db_init.db_session.add', 'db_session.add', (['story'], {}), '(story)\n', (2226, 2233), False, 'from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session\n'), ((2242, 2261), 'storytime.story_time_db_init.db_session.commit', 'db_session.commit', ([], {}), '()\n', (2259, 2261), False, 'from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session\n'), ((3225, 3246), 'storytime.story_time_db_init.db_session.add', 'db_session.add', (['story'], {}), '(story)\n', (3239, 3246), False, 'from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session\n'), ((3255, 3392), 'storytime.story_time_db_init.db_session.execute', 'db_session.execute', (['"""UPDATE story SET date_last_modified = TIMEZONE(\'utc\', CURRENT_TIMESTAMP) WHERE id = :id"""', "{'id': story.id}"], {}), '(\n "UPDATE story SET date_last_modified = TIMEZONE(\'utc\', CURRENT_TIMESTAMP) WHERE id = :id"\n , {\'id\': story.id})\n', (3273, 3392), False, 'from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session\n'), ((3549, 3568), 'storytime.story_time_db_init.db_session.commit', 'db_session.commit', ([], {}), '()\n', (3566, 3568), False, 'from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session\n'), ((3828, 3892), 'storytime.file_storage_service.delete_file', 'file_storage_service.delete_file', ([], {'file': 'old_upload_file_to_delete'}), '(file=old_upload_file_to_delete)\n', (3860, 3892), False, 'from storytime import file_storage_service\n'), ((4217, 4241), 'storytime.story_time_db_init.db_session.delete', 'db_session.delete', (['story'], {}), '(story)\n', (4234, 4241), False, 'from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session\n'), ((4380, 4399), 'storytime.story_time_db_init.db_session.commit', 'db_session.commit', ([], {}), '()\n', (4397, 4399), False, 'from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session\n'), ((4939, 4964), 'storytime.story_time_db_init.Story.date_created.desc', 'Story.date_created.desc', ([], {}), '()\n', (4962, 4964), False, 'from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session\n'), ((6172, 6191), 'storytime.story_time_db_init.db_engine.connect', 'db_engine.connect', ([], {}), '()\n', (6189, 6191), False, 'from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session\n'), ((2062, 2109), 'storytime.file_storage_service.save_file', 'file_storage_service.save_file', ([], {'file': 'image_file'}), '(file=image_file)\n', (2092, 2109), False, 'from storytime import file_storage_service\n'), ((2323, 2344), 'storytime.story_time_db_init.db_session.rollback', 'db_session.rollback', ([], {}), '()\n', (2342, 2344), False, 'from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session\n'), ((3137, 3188), 'storytime.file_storage_service.save_file', 'file_storage_service.save_file', ([], {'file': 'new_image_file'}), '(file=new_image_file)\n', (3167, 3188), False, 'from storytime import file_storage_service\n'), ((3495, 3539), 'storytime.story_time_db_init.db_session.delete', 'db_session.delete', (['old_upload_file_to_delete'], {}), '(old_upload_file_to_delete)\n', (3512, 3539), False, 'from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session\n'), ((3606, 3627), 'storytime.story_time_db_init.db_session.rollback', 'db_session.rollback', ([], {}), '()\n', (3625, 3627), False, 'from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session\n'), ((4278, 4328), 'storytime.file_storage_service.delete_file', 'file_storage_service.delete_file', ([], {'file': 'upload_file'}), '(file=upload_file)\n', (4310, 4328), False, 'from storytime import file_storage_service\n'), ((4341, 4371), 'storytime.story_time_db_init.db_session.delete', 'db_session.delete', (['upload_file'], {}), '(upload_file)\n', (4358, 4371), False, 'from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session\n'), ((4437, 4458), 'storytime.story_time_db_init.db_session.rollback', 'db_session.rollback', ([], {}), '()\n', (4456, 4458), False, 'from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session\n'), ((5352, 5400), 'storytime.story_time_db_init.Story.categories.any', 'Story.categories.any', (['(Category.id == category_id)'], {}), '(Category.id == category_id)\n', (5372, 5400), False, 'from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session\n'), ((5717, 5748), 'storytime.story_time_db_init.Story.date_last_modified.desc', 'Story.date_last_modified.desc', ([], {}), '()\n', (5746, 5748), False, 'from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session\n'), ((6810, 6830), 'storytime.story_time_db_init.Category.label.asc', 'Category.label.asc', ([], {}), '()\n', (6828, 6830), False, 'from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session\n'), ((4631, 4654), 'storytime.story_time_db_init.db_session.query', 'db_session.query', (['Story'], {}), '(Story)\n', (4647, 4654), False, 'from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session\n'), ((4880, 4903), 'storytime.story_time_db_init.db_session.query', 'db_session.query', (['Story'], {}), '(Story)\n', (4896, 4903), False, 'from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session\n'), ((6774, 6800), 'storytime.story_time_db_init.db_session.query', 'db_session.query', (['Category'], {}), '(Category)\n', (6790, 6800), False, 'from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session\n'), ((7408, 7437), 'storytime.story_time_db_init.Category.id.in_', 'Category.id.in_', (['category_ids'], {}), '(category_ids)\n', (7423, 7437), False, 'from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session\n'), ((882, 904), 'storytime.story_time_db_init.db_session.query', 'db_session.query', (['User'], {}), '(User)\n', (898, 904), False, 'from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session\n'), ((1260, 1282), 'storytime.story_time_db_init.db_session.query', 'db_session.query', (['User'], {}), '(User)\n', (1276, 1282), False, 'from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session\n'), ((1598, 1620), 'storytime.story_time_db_init.db_session.query', 'db_session.query', (['User'], {}), '(User)\n', (1614, 1620), False, 'from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session\n'), ((4086, 4109), 'storytime.story_time_db_init.db_session.query', 'db_session.query', (['Story'], {}), '(Story)\n', (4102, 4109), False, 'from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session\n'), ((5953, 5976), 'storytime.story_time_db_init.db_session.query', 'db_session.query', (['Story'], {}), '(Story)\n', (5969, 5976), False, 'from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session\n'), ((7053, 7079), 'storytime.story_time_db_init.db_session.query', 'db_session.query', (['Category'], {}), '(Category)\n', (7069, 7079), False, 'from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session\n'), ((7374, 7400), 'storytime.story_time_db_init.db_session.query', 'db_session.query', (['Category'], {}), '(Category)\n', (7390, 7400), False, 'from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session\n'), ((7716, 7742), 'storytime.story_time_db_init.db_session.query', 'db_session.query', (['Category'], {}), '(Category)\n', (7732, 7742), False, 'from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session\n'), ((8366, 8394), 'storytime.story_time_db_init.db_session.query', 'db_session.query', (['UploadFile'], {}), '(UploadFile)\n', (8382, 8394), False, 'from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session\n'), ((5286, 5309), 'storytime.story_time_db_init.db_session.query', 'db_session.query', (['Story'], {}), '(Story)\n', (5302, 5309), False, 'from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session\n'), ((5657, 5680), 'storytime.story_time_db_init.db_session.query', 'db_session.query', (['Story'], {}), '(Story)\n', (5673, 5680), False, 'from storytime.story_time_db_init import Category, Story, UploadFile, User, db_engine, db_session\n')]
|
## The PSICT-UIF top-level interface class
## In the course of normal scripting, this should be the only object
## the the user directly interfaces with in the external script.
import os
import sys
import importlib.util
from pathlib import Path
import logging
from datetime import datetime
from PSICT_UIF._include36.FileManager import FileManager
from PSICT_UIF._include36.PulseSeqManager import PulseSeqManager
from PSICT_UIF._include36.LabberExporter import LabberExporter
import PSICT_UIF._include36._LogLevels as LogLevels
class psictUIFInterface:
'''
The main PSICT pulse-sequence generator class
This class handles all interfaces with the Labber reference files and Labber measurements, so that (in principle) everything can be controlled from a self-contained external script. This includes control over input and output database/reference files, creation of the pulse sequence, and control over parameters for the Single-Qubit Pulse Generator (SQPG) as well as any other instruments on the Labber instrument server in all of point-value, iteration, and inter-channel relation modes.
The Labber ScriptTools MeasurementObject instance can be accessed directly in the external script once both input and output files have been set for the psictUIFInterface object by accessing the "MeasurementObject" attribute.
Some settings and attributes which may change with time are stored in the PSICT_config.py file. This must be loaded into the PSICT interface object using the load_config_file method. Prior to version 1.0.7.1, these parameters were stored in the script-rcfile.
Since version 1.0.7.2, it is possible to specify the is_worker parameter on initialisation. This indicates to the PSICT interface that it is running as part of a more complex automation procedure, but more generally that the 'measurement' script invoking the PSICT interface directly is *not* the main script being executed. The intention is to alter some of the default behaviours of the PSICT interface object that are no longer appropriate in this context. At present, this setting effectively turns off the script copying mechanism (as it does not play well when what is executed as __main__ is not the 'measurement' script mentioned previously), which shifts the burden of copying the script onto the 'master' automation/controller script. As the default of the worker setting is False, this is fully backwards-compatible with scripts from previous versions.
'''
def __init__(self, config_path, *, is_worker = False, parent_logger_name = None):
## NB declare all attributes explicitly for __del__ to work correctly
## Load config (log after logger initialized)
self.load_config_file(config_path)
## Logging
self.init_logging(parent_logger_name)
## Log config loading for debugging
self.logger.log(LogLevels.VERBOSE, 'Config file loaded from path: {}'.format(self.script_rcpath))
## Save original working directory from which external script was invoked from
self._original_wd = os.getcwd()
self._script_inv = sys.argv[0]
## Add attributes
self.is_SQPG_used = False
## Add constituent objects
self.fileManager = FileManager(parent_logger_name = self.logger.name)
self.pulseSeqManager = PulseSeqManager(parent_logger_name = self.logger.name)
self.labberExporter = LabberExporter(parent_logger_name = self.logger.name)
## Add attributes for constituent objects
self.fileManager.set_original_wd(self._original_wd, self._script_inv)
## Assign config to delegates
self.assign_config_to_delegates()
## Set worker status as standalone script by default
self.set_worker_status(is_worker)
## Status message
self.logger.log(LogLevels.TRACE, 'psictUIFInterface instance initialized.')
##
## Direct access to MeasurementObject as attribute
@property
def MeasurementObject(self):
return self.labberExporter.MeasurementObject
def __del__(self):
## Delete object attributes
del self.fileManager # FileManager destructor deletes temp files
del self.pulseSeqManager
## Change working directory back to original
os.chdir(self._original_wd)
## Status message
self.logger.log(LogLevels.TRACE, 'psictUIFInterface instance deleted.')
##
##########################################################################
## Logging
def init_logging(self, parent_logger_name = None):
'''
Initialize logging for the psictUIFInterface.
'''
## Add extra logging levels
logging.addLevelName(LogLevels.ALL, 'ALL')
logging.addLevelName(LogLevels.TRACE, 'TRACE')
logging.addLevelName(LogLevels.VERBOSE, 'VERBOSE')
logging.addLevelName(LogLevels.SPECIAL, 'SPECIAL')
## Init logger
if parent_logger_name is not None:
logger_name = '.'.join([parent_logger_name, 'psictUIFInterface'])
else:
logger_name = 'psictUIFInterface'
self.logger = logging.getLogger(logger_name)
self.logger.setLevel(LogLevels.ALL) # Log all possible events
## If there is no parent logger, carry out additional configurations
if parent_logger_name is None:
## Add handlers if there are none already added
if len(self.logger.handlers) == 0:
## Console stream handler
if self._script_rc.logging_config['console_log_enabled']:
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(self._script_rc.logging_config['console_log_level'])
console_fmt = logging.Formatter(self._script_rc.logging_config['console_fmt'], \
datefmt = self._script_rc.logging_config['console_datefmt'])
console_handler.setFormatter(console_fmt)
## Add handler to logger
self.logger.addHandler(console_handler)
## File handler
if self._script_rc.logging_config['file_log_enabled']:
log_dir = self._script_rc.logging_config['log_dir']
if not os.path.exists(log_dir):
os.makedirs(log_dir)
log_file = self._script_rc.logging_config['log_file'].format(datetime.now())+'.log'
log_path = os.path.join(log_dir, log_file)
file_handler = logging.FileHandler(log_path)
file_handler.setLevel(self._script_rc.logging_config['file_log_level'])
file_fmt = logging.Formatter(self._script_rc.logging_config['file_fmt'], \
datefmt = self._script_rc.logging_config['file_datefmt'])
file_handler.setFormatter(file_fmt)
## Add handler to logger
self.logger.addHandler(file_handler)
## Log log handler additions
self.logger.debug('Logging handlers added')
## Add NullHandler if no other handlers are configured
if len(self.logger.handlers) == 0:
self.logger.addHandler(logging.NullHandler())
else:
## Log the presence of a parent logger
self.logger.debug('Parent logger {} specified; no further config carried out.'.format(parent_logger_name))
## Status message
self.logger.debug('Logging initialization complete.')
def log(self, msg, loglevel = 'special', *args, **kwargs):
'''
Log a message to the logger at the specified level.
This method should be used instead of bare `print` functions in scripts (both worker-level and master-level). This method should NOT be used internally within PSICT.
Log levels can be specified as an integer (the usual way), but can also be string corresponding to the name of the level. Available options are: TRACE, DEBUG, VERBOSE, INFO, SPECIAL, WARNING, ERROR, CRITICAL. Specifying an unsupported string will result in a logged ERROR-level message, but no execution error.
'''
if isinstance(loglevel, str):
## Convert to lowercase
loglevel = loglevel.lower()
## Convert string to appropriate level
if loglevel == 'trace':
lvl = LogLevels.TRACE
elif loglevel == 'debug':
lvl = LogLevels.DEBUG
elif loglevel == 'verbose':
lvl = LogLevels.VERBOSE
elif loglevel == 'info':
lvl = LogLevels.INFO
elif loglevel == 'special':
lvl = LogLevels.SPECIAL
elif loglevel == 'warning':
lvl = LogLevels.WARNING
elif loglevel == 'error':
lvl = LogLevels.ERROR
elif LogLevel == 'critical':
lvl = LogLevels.CRITICAL
else:
self.logger.error('Invalid loglevel string specified in call to log(): {}'.format(loglevel))
return
else: # loglevel is assumed to be numeric
lvl = loglevel
## Log message
self.logger.log(lvl, msg, *args, **kwargs)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
## File and path management
def set_worker_status(self, is_worker):
'''
Passes worker status to the FileManager object.
'''
self.fileManager.set_worker_status(is_worker)
def load_config_file(self, config_path):
'''
Load the PSICT config file from the specified path.
Prior to version 1.1.3.1, this method needed to be invoked explicitly in the external script.
This is now changed in order to enable logging config to be imported from the config file,
and log as much of the psictUIFInterface object creation as possible. Logging of the config
file load process is now confirmed post-fact.
The config_path provided should be an absolute path (behaviour is not guaranteed if it is relative).
Prior to version 1.0.7.1, these configuration settings were stored in the script rc-file.
'''
## Set config path
self.script_rcpath = os.path.normpath(config_path)
## Import config file as module - preserve old names from rc-file
config_spec = importlib.util.spec_from_file_location("", self.script_rcpath)
self._script_rc = importlib.util.module_from_spec(config_spec)
config_spec.loader.exec_module(self._script_rc)
def assign_config_to_delegates(self):
'''
Assign the config path/module to the FileManager and PulseSeqManager objects
Prior to version 1.1.3.1, this was done implicitly in the load_config_file method.
'''
self.fileManager.assign_script_rcmodule(self._script_rc, self.script_rcpath)
self.pulseSeqManager.assign_script_rcmodule(self._script_rc, self.script_rcpath)
self.logger.debug('Config path/module assigned to delegates.')
def set_labber_exe_path(self, new_labber_exe_path):
'''
Change the stored (system default) Labber executable path to a custom path.
Wraps the FileManager.set_labber_exe_path method.
'''
self.fileManager.set_labber_exe_path(new_labber_exe_path)
def set_template_file(self, template_dir, template_file):
'''
Set the template hdf5 file.
The "template" file is so named to differentiate it from the "reference" file, which can be modified by PSICT. The template file is guaranteed to be unmodified.
Wraps the FileManager.set_template_file method.
'''
self.fileManager.set_template_file(template_dir, template_file)
def set_output_file(self, output_dir, output_file):
'''
Set the output hdf5 file.
Wraps the FileManager.set_output_file method.
'''
self.fileManager.set_output_file(output_dir, output_file)
def set_script_copy_target_dir(self, script_copy_target_dir):
'''
Sets the target directory to which the script will be copied.
Prior to version 1.0.7.1, this was specified in the script-rcfile.
'''
self.fileManager.set_script_copy_target_dir(script_copy_target_dir)
def pre_measurement_copy(self):
'''
Copy all specified files (eg external script, script-rcfile) for reproducability.
Wraps the FileManager.pre_measurement_copy method (the copy used to be carried out after the measurement; however, this interfered with editing of external scripts while the measurement was running).
'''
self.fileManager.pre_measurement_copy()
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
## Labber MeasurementObject methods
def init_MeasurementObject(self, *, auto_init = False):
'''
Explicitly initialise the Labber MeasurementObject, so that it can be interacted with directly in the external script.
If this is not called, the MeasurementObject will be initialised during pre-measurement processing.
'''
## Ensure that the appropriate paths are set
self.logger.debug('Asserting reference and output paths exist...')
assert self.fileManager.reference_path
assert self.fileManager.output_path
## Initialise MeasurementObject
self.labberExporter.init_MeasurementObject(self.fileManager.reference_path, self.fileManager.output_path, auto_init = auto_init)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
## Instrument parameter setting methods
def set_point_values(self, point_values_dict):
'''
Set instrument parameters as point (single) values.
'''
## Status message
self.logger.log(LogLevels.VERBOSE, 'Adding point values for instrument parameters...')
## Iterate through instrument specifications in the input dict, and divert the SQPG spec to the PulseSeqManager.
for instrument_name, instrument_params in point_values_dict.items():
if instrument_name == "SQPG":
self.is_SQPG_used = True
self.pulseSeqManager.set_input_pulse_seq(instrument_params)
else:
self.labberExporter.add_point_value_spec(instrument_name, instrument_params)
## Status message
self.logger.debug("Instrument parameter point values added.")
def set_api_client_values(self, api_client_values_dict, hardware_names, server_name = 'localhost'):
'''
Set values through the Labber API InstrumentClient object.
In principle, this can be used for all point values, but is only strictly necessary for setting values as lists/arrays (presently, this is possible neither through the direct scripting API nor by direct HDF5 editing).
'''
## Status message
self.logger.debug('Adding API client values for instrument parameters...')
## Set server name in LabberExporter
self.labberExporter.set_server_name(server_name)
## Iterate through instrument specifications, and pass to the LabberExporter
for instrument_name in api_client_values_dict.keys():
instrument_params = api_client_values_dict[instrument_name]
hardware_name = hardware_names[instrument_name]
self.labberExporter.add_client_value_spec(instrument_name, \
instrument_params, hardware_name)
## Status message
self.logger.debug('Instrument parameter API client values added.')
def set_instr_config_values(self, instr_config_values_dict, hardware_names, server_name = 'localhost'):
'''
Set values by directly editing the reference hdf5 file's 'Instrument config' attributes.
Note that this requires the 'hardware name' of the instrument (ie the full name of the driver), as well as the server name ('localhost' by default).
'''
## Status message
self.logger.debug('Adding HDF5 InstrumentConfig values for instrument parameters...')
## Set server name in LabberExporter
self.labberExporter.set_server_name(server_name)
## Iterate through instruments and pass to LabberExporter
for instrument_name in instr_config_values_dict.keys():
instrument_params = instr_config_values_dict[instrument_name]
hardware_name = hardware_names[instrument_name]
self.labberExporter.add_instr_config_spec(instrument_name, instrument_params, \
hardware_name)
## Status message
self.logger.debug('Instrument parameter HDF5 InstrumentConfig values added.')
def set_iteration_values(self, iteration_values_dict, iteration_order_list):
'''
Set instrument parameters as (independent) iteration values.
Iteration values are set as custom IterationSpec objects. They live within the same structure as the point values, and so overwrite any point values that were previously specified using the set_point_values method. As there is no simple way to implement general relationships amongst variables, all inter-pulse calculations carried out using IterationSpec objects will always take the maximal values in the iteration range.
'''
## Status message
self.logger.log(LogLevels.VERBOSE, "Adding iteration values for instrument parameters...")
## Iterate through instrument specifications in the input dict, and divert the SQPG spec to the PulseSeqManager
for instrument_name, instrument_params in iteration_values_dict.items():
if instrument_name == "SQPG":
self.is_SQPG_used = True
self.pulseSeqManager.set_iteration_spec(instrument_params)
else:
self.labberExporter.add_iteration_spec(instrument_name, instrument_params)
## Set iteration order
self.labberExporter.set_iteration_order(iteration_order_list)
## Status message
self.logger.debug("Instrument parameter iteration values added.")
def set_channel_relations(self, channel_defs_dict, channel_relations_dict):
'''
Set the channel relations.
Pulse names should be used to specify pulse parameters. In addition, the pulse parameters should be specified using their full names and *not* their shortcodes! (This may be changed/fixed in future versions).
channel_defs_dict specifies the available channels, and their algebraic symbols used in the channel relation strings. channel_relations_dict specifies the actual relations.
'''
## Status message
self.logger.log(LogLevels.VERBOSE, "Adding channel relations...")
## Peel off SQPG specifications
if "SQPG" in channel_defs_dict:
self.is_SQPG_used = True
SQPG_defs = channel_defs_dict["SQPG"]
del channel_defs_dict["SQPG"]
## Set definitions
self.pulseSeqManager.add_channel_defs(SQPG_defs)
if "SQPG" in channel_relations_dict:
self.is_SQPG_used = True
SQPG_relations = channel_relations_dict["SQPG"]
del channel_relations_dict["SQPG"]
## Set relations
self.pulseSeqManager.add_channel_relations(SQPG_relations)
## Set channel definitions for generic instruments
self.labberExporter.add_channel_defs(channel_defs_dict)
## Set channel relations for generic instruments
self.labberExporter.set_channel_relations(channel_relations_dict)
## Status message
self.logger.debug("Channel relations added.")
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
## Measurement
def perform_measurement(self, *, dry_run = False):
'''
Calls Labber to perform the measurement.
Note that a final few pre-processing actions are taken before Labber is actually called:
- The last-set user-specified Labber executable path is applied to the system
- The Labber MeasurementObject is initialised (if this has not already occurred explicitly)
- The pulse sequence is processed
- All stored parameter values are actually applied to the Labber reference database file
- When not in worker mode, the measurement script is copied to its target destination
Following the pre-processing, Labber is called to carry out the measurement using the Labber MeasurementObject's performMeasurement() method.
There are currently no post-measurement operations (beyond changing the working directory back to the original one, which is probably redundant anyway...)
'''
## Status message
self.logger.log(LogLevels.VERBOSE, "Carrying out measurement pre-processing...")
##### Measurement pre-processing
## Set ScriptTools executable path
self.fileManager.apply_labber_exe_path()
## Initialise Labber MeasurementObject if not already done
self.init_MeasurementObject(auto_init = True)
## Check if SPQG is being used
if self.is_SQPG_used:
## Convert stored input pulse sequence to output pulse sequence
self.pulseSeqManager.convert_seq()
## Transfer output pulse sequence and main SQPG params to LabberExporter
self.labberExporter.add_point_value_spec("SQPG", self.pulseSeqManager.get_main_params())
self.labberExporter.receive_pulse_sequence(self.pulseSeqManager.export_output())
## Transfer pulse sequence relations to LabberExporter
self.labberExporter.receive_pulse_rels(*self.pulseSeqManager.export_relations())
else:
self.labberExporter.process_iteration_order()
## Apply all parameters stored in LabberExporter
self.labberExporter.apply_all()
## Copy script - carried out before measurement to allow editing the script file while the measurement is running in Labber
self.pre_measurement_copy()
## Status message
self.logger.debug("Measurement pre-processing completed.")
#### End measurement pre-processing
## Status message
self.logger.log(LogLevels.SPECIAL, "Calling Labber to perform measurement...")
## Call Labber to perform measurement
if self.MeasurementObject is not None:
if dry_run: # allows debugging w/o a Labber license
self.logger.warning("Measurement dry run; skipping actual measurement...")
else: # actually perform measurement
self.MeasurementObject.performMeasurement()
else:
raise RuntimeError("MeasurementObject has not been set!")
## Status message
self.logger.log(LogLevels.SPECIAL, "Measurement completed.")
## Change working directory back to original - this is here so Dany will be happy (it also exists in the destructor, but that is not run until ipython exits!)
os.chdir(self._original_wd)
## Final status message - this indicates the user can continue with other things!
self.logger.info("Labber-PSICT execution finished.")
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
|
[
"PSICT_UIF._include36.PulseSeqManager.PulseSeqManager",
"logging.FileHandler",
"os.makedirs",
"os.getcwd",
"PSICT_UIF._include36.LabberExporter.LabberExporter",
"logging.StreamHandler",
"logging.addLevelName",
"os.path.exists",
"datetime.datetime.now",
"logging.Formatter",
"os.path.normpath",
"logging.NullHandler",
"PSICT_UIF._include36.FileManager.FileManager",
"os.path.join",
"os.chdir",
"logging.getLogger"
] |
[((3074, 3085), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3083, 3085), False, 'import os\n'), ((3247, 3295), 'PSICT_UIF._include36.FileManager.FileManager', 'FileManager', ([], {'parent_logger_name': 'self.logger.name'}), '(parent_logger_name=self.logger.name)\n', (3258, 3295), False, 'from PSICT_UIF._include36.FileManager import FileManager\n'), ((3329, 3381), 'PSICT_UIF._include36.PulseSeqManager.PulseSeqManager', 'PulseSeqManager', ([], {'parent_logger_name': 'self.logger.name'}), '(parent_logger_name=self.logger.name)\n', (3344, 3381), False, 'from PSICT_UIF._include36.PulseSeqManager import PulseSeqManager\n'), ((3414, 3465), 'PSICT_UIF._include36.LabberExporter.LabberExporter', 'LabberExporter', ([], {'parent_logger_name': 'self.logger.name'}), '(parent_logger_name=self.logger.name)\n', (3428, 3465), False, 'from PSICT_UIF._include36.LabberExporter import LabberExporter\n'), ((4288, 4315), 'os.chdir', 'os.chdir', (['self._original_wd'], {}), '(self._original_wd)\n', (4296, 4315), False, 'import os\n'), ((4706, 4748), 'logging.addLevelName', 'logging.addLevelName', (['LogLevels.ALL', '"""ALL"""'], {}), "(LogLevels.ALL, 'ALL')\n", (4726, 4748), False, 'import logging\n'), ((4757, 4803), 'logging.addLevelName', 'logging.addLevelName', (['LogLevels.TRACE', '"""TRACE"""'], {}), "(LogLevels.TRACE, 'TRACE')\n", (4777, 4803), False, 'import logging\n'), ((4812, 4862), 'logging.addLevelName', 'logging.addLevelName', (['LogLevels.VERBOSE', '"""VERBOSE"""'], {}), "(LogLevels.VERBOSE, 'VERBOSE')\n", (4832, 4862), False, 'import logging\n'), ((4871, 4921), 'logging.addLevelName', 'logging.addLevelName', (['LogLevels.SPECIAL', '"""SPECIAL"""'], {}), "(LogLevels.SPECIAL, 'SPECIAL')\n", (4891, 4921), False, 'import logging\n'), ((5148, 5178), 'logging.getLogger', 'logging.getLogger', (['logger_name'], {}), '(logger_name)\n', (5165, 5178), False, 'import logging\n'), ((10425, 10454), 'os.path.normpath', 'os.path.normpath', (['config_path'], {}), '(config_path)\n', (10441, 10454), False, 'import os\n'), ((23296, 23323), 'os.chdir', 'os.chdir', (['self._original_wd'], {}), '(self._original_wd)\n', (23304, 23323), False, 'import os\n'), ((5626, 5659), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (5647, 5659), False, 'import logging\n'), ((5792, 5920), 'logging.Formatter', 'logging.Formatter', (["self._script_rc.logging_config['console_fmt']"], {'datefmt': "self._script_rc.logging_config['console_datefmt']"}), "(self._script_rc.logging_config['console_fmt'], datefmt=\n self._script_rc.logging_config['console_datefmt'])\n", (5809, 5920), False, 'import logging\n'), ((6546, 6577), 'os.path.join', 'os.path.join', (['log_dir', 'log_file'], {}), '(log_dir, log_file)\n', (6558, 6577), False, 'import os\n'), ((6613, 6642), 'logging.FileHandler', 'logging.FileHandler', (['log_path'], {}), '(log_path)\n', (6632, 6642), False, 'import logging\n'), ((6766, 6888), 'logging.Formatter', 'logging.Formatter', (["self._script_rc.logging_config['file_fmt']"], {'datefmt': "self._script_rc.logging_config['file_datefmt']"}), "(self._script_rc.logging_config['file_fmt'], datefmt=self.\n _script_rc.logging_config['file_datefmt'])\n", (6783, 6888), False, 'import logging\n'), ((7356, 7377), 'logging.NullHandler', 'logging.NullHandler', ([], {}), '()\n', (7375, 7377), False, 'import logging\n'), ((6341, 6364), 'os.path.exists', 'os.path.exists', (['log_dir'], {}), '(log_dir)\n', (6355, 6364), False, 'import os\n'), ((6390, 6410), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(log_dir)\n', (6401, 6410), False, 'import os\n'), ((6492, 6506), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6504, 6506), False, 'from datetime import datetime\n')]
|
#! /usr/bin/python3
# -*- coding: utf-8 -*-
import time
import sys
import serial
import codecs
PORT_SERIE = "/dev/ttyAMA0"
# Initialisation du port série
ser = serial.Serial(
port = PORT_SERIE,
baudrate = 9600,
parity = serial.PARITY_NONE,
stopbits = serial.STOPBITS_ONE,
bytesize = serial.EIGHTBITS,
timeout = 3
)
# Gestion du CTRL C
def signal_handler(signal, frame):
print ("Sortie du programme par Ctrl+C!")
sys.exit(0)
# Lire le fichier de données Hexa
with open("data", "rb") as f:
# Lire le premier octet
byte = f.read(1)
# Boucler tant qu'on n'est pas à la fin du fichier
while byte != b"":
# Lecture de l'octet suivant + éventuelle modif
byte = f.read(1)
print (byte)
# Envoyer sur le port série
ser.write(byte)
|
[
"serial.Serial",
"sys.exit"
] |
[((163, 307), 'serial.Serial', 'serial.Serial', ([], {'port': 'PORT_SERIE', 'baudrate': '(9600)', 'parity': 'serial.PARITY_NONE', 'stopbits': 'serial.STOPBITS_ONE', 'bytesize': 'serial.EIGHTBITS', 'timeout': '(3)'}), '(port=PORT_SERIE, baudrate=9600, parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS, timeout=3)\n', (176, 307), False, 'import serial\n'), ((456, 467), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (464, 467), False, 'import sys\n')]
|
"""Module to setup celery client.
.. moduleauthor:: <NAME> <<EMAIL>>
.. note::
If CELERY_CONFIG_MODULE is set in environment, load celery config from
the filename declared in CELERY_CONFIG_MODULE.
"""
import os
from celery import Celery
celery = Celery(__name__)
if 'CELERY_CONFIG_MODULE' in os.environ:
celery.config_from_envvar('CELERY_CONFIG_MODULE')
else:
from compass.utils import celeryconfig_wrapper as celeryconfig
celery.config_from_object(celeryconfig)
|
[
"celery.Celery"
] |
[((269, 285), 'celery.Celery', 'Celery', (['__name__'], {}), '(__name__)\n', (275, 285), False, 'from celery import Celery\n')]
|
#!/usr/bin/python
from tkinter import Tk, Frame, Label, BOTH
import time
class MirrorV(Frame):
def __init__(self, parent):
Frame.__init__(self, parent, background="white")
self.parent = parent
self.initUI()
def initUI(self):
self.parent.title("MirrorV")
self.makeFullscreen()
#make clock into its own frame and add it to the overall frame
#possibly break clock out into its own module/class
#maybe make it text on canvas rather than a label
#figure out how to make it a nice looking font
self.displayClock()
def makeFullscreen(self):
self.parent.attributes("-fullscreen", True);
def displayClock(self):
self.clock = Label(self.parent, font=('Helvetica', 45), bg='black')
self.clock.pack(fill=BOTH, expand=1)
self.time1 = ''
self.tick()
def tick(self):
time2 = time.strftime('%I:%M')
if time2 != self.time1:
self.time1 = time2
self.clock.config(text=time2)
self.clock.after(2000, self.tick)
def main():
root = Tk()
root.geometry("250x150+300+300")
app = MirrorV(root)
root.mainloop()
if __name__ == '__main__':
main()
|
[
"time.strftime",
"tkinter.Label",
"tkinter.Tk",
"tkinter.Frame.__init__"
] |
[((1113, 1117), 'tkinter.Tk', 'Tk', ([], {}), '()\n', (1115, 1117), False, 'from tkinter import Tk, Frame, Label, BOTH\n'), ((138, 186), 'tkinter.Frame.__init__', 'Frame.__init__', (['self', 'parent'], {'background': '"""white"""'}), "(self, parent, background='white')\n", (152, 186), False, 'from tkinter import Tk, Frame, Label, BOTH\n'), ((738, 792), 'tkinter.Label', 'Label', (['self.parent'], {'font': "('Helvetica', 45)", 'bg': '"""black"""'}), "(self.parent, font=('Helvetica', 45), bg='black')\n", (743, 792), False, 'from tkinter import Tk, Frame, Label, BOTH\n'), ((919, 941), 'time.strftime', 'time.strftime', (['"""%I:%M"""'], {}), "('%I:%M')\n", (932, 941), False, 'import time\n')]
|
from dataiku.customrecipe import get_recipe_config
import logging
from utils import get_input_output, text_extraction_parameters
from tesseractocr.extract_text import text_extraction
import pandas as pd
from constants import Constants
logger = logging.getLogger(__name__)
input_folder, output_dataset = get_input_output('folder', 'dataset')
params = text_extraction_parameters(get_recipe_config())
input_filenames = input_folder.list_paths_in_partition()
total_images = len(input_filenames)
df = pd.DataFrame(columns=['file', 'text'])
for i, sample_file in enumerate(input_filenames):
if sample_file.split('.')[-1] != "jpg":
logger.info("OCR - Rejecting {} because it is not a JPG file.".format(sample_file))
logger.info("OCR - Rejected {}/{} images".format(i+1, total_images))
continue
with input_folder.get_download_stream(sample_file) as stream:
img_bytes = stream.read()
img_text = text_extraction(img_bytes, params)
logger.info("OCR - Extracted text from {}/{} images".format(i+1, total_images))
df = df.append({'file': sample_file.split('/')[-1].split('.')[0], 'text': img_text}, ignore_index=True)
if params['recombine_pdf']:
df['page_nb'] = df.apply(lambda row: int(row['file'].split(Constants.PDF_MULTI_SUFFIX)[1]) if Constants.PDF_MULTI_SUFFIX in row['file'] else 1, axis=1)
df['file'] = df.apply(lambda row: row['file'].split(Constants.PDF_MULTI_SUFFIX)[0] if Constants.PDF_MULTI_SUFFIX in row['file'] else row['file'], axis=1)
df = df.sort_values(['file', 'page_nb'], ascending=True)
df = df.groupby('file').agg({'text': lambda x: '\n\n'.join(map(str, list(x)))}).reset_index()
df = df[['file', 'text']]
output_dataset.write_with_schema(df)
|
[
"pandas.DataFrame",
"dataiku.customrecipe.get_recipe_config",
"tesseractocr.extract_text.text_extraction",
"utils.get_input_output",
"logging.getLogger"
] |
[((245, 272), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (262, 272), False, 'import logging\n'), ((305, 342), 'utils.get_input_output', 'get_input_output', (['"""folder"""', '"""dataset"""'], {}), "('folder', 'dataset')\n", (321, 342), False, 'from utils import get_input_output, text_extraction_parameters\n'), ((501, 539), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['file', 'text']"}), "(columns=['file', 'text'])\n", (513, 539), True, 'import pandas as pd\n'), ((380, 399), 'dataiku.customrecipe.get_recipe_config', 'get_recipe_config', ([], {}), '()\n', (397, 399), False, 'from dataiku.customrecipe import get_recipe_config\n'), ((938, 972), 'tesseractocr.extract_text.text_extraction', 'text_extraction', (['img_bytes', 'params'], {}), '(img_bytes, params)\n', (953, 972), False, 'from tesseractocr.extract_text import text_extraction\n')]
|
import os
import click
import pandas as pd
from datetime import datetime
from .transcoding.names.worldometers import col, val
worldometer_covid_url = (
"https://web.archive.org/web/{}/" "https://www.worldometers.info/coronavirus/{}"
)
figures = dict(
age_sex_demographics="coronavirus-age-sex-demographics", countries="#countries",
)
col=col
def prepare_worldometers_table(df, figure):
df = df.rename(columns=col[figure])
val_replacements = val[figure]
for column, old, new in val_replacements:
if column in df.columns:
df[column] = df[column].str.replace(old, new)
# we handle rel freqs properly
if figure == "age_sex_demographics":
for col in [c for c in df.columns if "_rel_freq" in c]:
try:
df[col] = df[col].astype(float) / 100
except ValueError:
print(f" +++ error for {col}")
pass
return df
def parse_worldometers_stats(
figure="countries", snapshot_date=f"{datetime.now():%Y%m%d%H}"
):
tables = pd.read_html(
worldometer_covid_url.format(snapshot_date, figures[figure]), header=0
)[2:5] # this to exclude web.archive.org info tables
tables = [prepare_worldometers_table(df, figure) for df in tables]
for table in tables:
print(table)
return tables
# --- CLI ---
@click.command(name="from-web", help="""Datasets creation from `worldometers.info`.
Choose figure between {}""".format(list(figures.keys())))
@click.argument("figure")
@click.option("--snapshot-date", default=f"{datetime.now():%Y%m%d%H}")
@click.option("--out-dir", default="./data/interim/worldometers/",
help="if not specified, defaults to '<figure>.csv'",)
def cli(figure, snapshot_date, out_dir):
os.makedirs(out_dir, exist_ok=True)
print(f"writing: {out_dir}{figure}_*.csv")
dfs = parse_worldometers_stats(figure, snapshot_date=snapshot_date)
for n, df in enumerate(dfs):
df.to_csv(os.path.join(out_dir, f'{figure}_{n}.csv'), index=False)
if __name__ == "__main__":
cli()
|
[
"os.makedirs",
"click.argument",
"os.path.join",
"click.option",
"datetime.datetime.now"
] |
[((1513, 1537), 'click.argument', 'click.argument', (['"""figure"""'], {}), "('figure')\n", (1527, 1537), False, 'import click\n'), ((1610, 1733), 'click.option', 'click.option', (['"""--out-dir"""'], {'default': '"""./data/interim/worldometers/"""', 'help': '"""if not specified, defaults to \'<figure>.csv\'"""'}), '(\'--out-dir\', default=\'./data/interim/worldometers/\', help=\n "if not specified, defaults to \'<figure>.csv\'")\n', (1622, 1733), False, 'import click\n'), ((1794, 1829), 'os.makedirs', 'os.makedirs', (['out_dir'], {'exist_ok': '(True)'}), '(out_dir, exist_ok=True)\n', (1805, 1829), False, 'import os\n'), ((1011, 1025), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1023, 1025), False, 'from datetime import datetime\n'), ((2014, 2056), 'os.path.join', 'os.path.join', (['out_dir', 'f"""{figure}_{n}.csv"""'], {}), "(out_dir, f'{figure}_{n}.csv')\n", (2026, 2056), False, 'import os\n'), ((1582, 1596), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1594, 1596), False, 'from datetime import datetime\n')]
|
import gym
import custom_gym_env
def test_reset_and_step():
env = gym.make('MountainCarContinuous-v1')
env.reset()
action = [0.5]
next_state, reward, done, info = env.step(action)
pass
|
[
"gym.make"
] |
[((71, 107), 'gym.make', 'gym.make', (['"""MountainCarContinuous-v1"""'], {}), "('MountainCarContinuous-v1')\n", (79, 107), False, 'import gym\n')]
|
# -*- coding: utf-8 -*-
import pandas as pd
import os
from common import globals as glob
from common import utils
#from matplotlib import style
#style.use("ggplot")
from pandas.tools.plotting import scatter_matrix
from . import clustering
from . import assoc_rule_mining
from . import regression
from . import classification
import matplotlib.pyplot as plt
import scipy.stats as stats
T_TEST_ALPHA = 0.20
NAME_FOR_LOGGER_ANALYSIS_MODULE = 'SBS_ANALYSIS'
POPULATION_MULTIPLIER_FOR_STORE_DENSITY = 100000
#QUANTILES_FOR_BINNING = [0.05, 0.15, 0.20, 0.5, 0.75, 0.9]
#note this is one more than quantiles
#BIN_CATEGORY_NAMES = ['VVL', 'VL', 'L', 'M', 'H', 'VH', 'VVH']
QUANTILES_FOR_BINNING = [0.1, 0.20, 0.6, 0.80]
#QUANTILES_FOR_BINNING_SB_STORES = [0.3, 0.8]
#BIN_CATEGORY_NAMES_SB_STORES = ['L', 'M', 'H']
#note this is one more than quantiles
BIN_CATEGORY_NAMES =['VL', 'L', 'M', 'H', 'VH']
def get_bins(data, quantiles):
#set up bins using quanties 0.05, 0.15, 0.25, 0.5, 0.75, 0.9
bins = []
for q in quantiles:
bins.append(data.quantile(q))
return bins
def get_bin_category(value, bins, bins_categorical):
for i in range(len(bins)):
if value <= bins[i]:
return bins_categorical[i]
return bins_categorical[-1] #did not fall into any bin so it is the last one since bins are in ascending order
def add_derived_features(df, df_SB):
#we want to add the following features
#number of starbucks stores
#categorization of number of starbucks stores VL, L, M, H, VH
#number of starbucks stores on airports
#Exists in multiple cities
#Ownership model
num_sb_total = []
num_sb_on_airports = []
exists_in_multiple_cities= []
ownership_type_mixed = []
multiple_brands = []
continent = []
#df is the combined df, already only contains countries with starbucks
for i in range(len(df)):
country = df.iloc[i]
cc = country['country_code']
#number of starbucks
store_count = df_SB['country'].value_counts()[cc]
num_sb_total.append(store_count)
#how many on airports
num_sb_on_airports.append(sum(df_SB[df_SB['country'] == cc]['on_airport']))
#all of these follow the same format, basically check if more than 1 unique value
exists_in_multiple_cities.append(str(len(df_SB[df_SB['country'] == cc]['city'].unique()) > 1))
multiple_brands.append(str(len(df_SB[df_SB['country'] == cc]['brand'].unique()) > 1))
ownership_type_mixed.append(str(len(df_SB[df_SB['country'] == cc]['ownership_type'].unique()) > 1))
#continent
continent.append(df_SB[df_SB['country'] == cc]['continent'].iloc[0])
#all set to add the new columns
#add categorical fields for international tourist arrival
bins = get_bins(df['ST.INT.ARVL'], QUANTILES_FOR_BINNING)
glob.log.info('bins for ST.INT.ARVL.Categorical ->')
glob.log.info(bins)
df['ST.INT.ARVL.Categorical'] = [get_bin_category(n, bins, BIN_CATEGORY_NAMES) for n in df['ST.INT.ARVL']]
#add a categorical field for population
bins = get_bins(df['SP.POP.TOTL'], QUANTILES_FOR_BINNING)
glob.log.info('bins for SP.POP.TOTL.Categorical ->')
glob.log.info(bins)
df['SP.POP.TOTL.Categorical'] = [get_bin_category(n, bins, BIN_CATEGORY_NAMES) for n in df['SP.POP.TOTL']]
df['Num.Starbucks.Stores'] = num_sb_total
bins = get_bins(df['Num.Starbucks.Stores'], QUANTILES_FOR_BINNING)
glob.log.info('bins for Num.Starbucks.Stores.Categorical ->')
glob.log.info(bins)
df['Num.Starbucks.Stores.Categorical'] = [get_bin_category(n, bins, BIN_CATEGORY_NAMES) for n in df['Num.Starbucks.Stores']]
#SBSD -> Starbucks store density i.e. number of Starbucks store per 100,000 people
df['Starbucks.Store.Density'] = (df['Num.Starbucks.Stores']*POPULATION_MULTIPLIER_FOR_STORE_DENSITY)/df['SP.POP.TOTL']
bins = get_bins(df['Starbucks.Store.Density'], QUANTILES_FOR_BINNING)
glob.log.info('bins for Starbucks.Store.Density.Categorical ->')
glob.log.info(bins)
df['Starbucks.Store.Density.Categorical'] = [get_bin_category(n, bins, BIN_CATEGORY_NAMES) for n in df['Starbucks.Store.Density']]
df['Num.Starbucks.Stores.On.Airports'] = num_sb_on_airports
df['Exists.In.Multiple.Cities'] = exists_in_multiple_cities
df['Ownership.Type.Mixed'] = ownership_type_mixed
df['continent'] = continent
df['MultipleBrands'] = multiple_brands
return df
def combine_datasets():
glob.log.info('combining datasets...')
#read WB final
fname = os.path.join(glob.OUTPUT_DIR_NAME, glob.WB_CSV_FILE_W_FEATURES)
df_WB = pd.read_csv(fname)
fname = os.path.join(glob.OUTPUT_DIR_NAME, glob.SB_CSV_FILE_W_FEATURES)
df_SB = pd.read_csv(fname)
#create a new dataset, by only keeping those countries that have a starbucks store
countries_w_sb = df_SB['country'].unique()
df = df_WB[df_WB['country_code'].isin(countries_w_sb)]
#now is a good time to check data quality score
dqs = utils.calc_dqs(df)
return df, dqs, df_WB, df_SB
def clean_combined_dataset(df):
###############################################################
#CLEANING STRATEGY 1: remove features which are less than 90% full
###############################################################
#now remove the columns that are less than FEATURE_DENSITY_THRESHOLD (90%) full
for col in df.columns:
density = (df[col].count())/(len(df))
if density < glob.FEATURE_DENSITY_THRESHOLD:
glob.log.info('%s is only %f full, dropping it...' %(col, density*100))
df = df.drop(col, 1)
###############################################################
#CLEANING STRATEGY 2: replace empty cells with mean of the column
# this is ok because we visually inspected the
# data and everything seemed to look like either
# a uniform of normal distribution
###############################################################
df = df.fillna(df.mean())
#check dqs again to see improvement
dqs = utils.calc_dqs(df)
return df, dqs
def run_t_test(df):
df_vh = df[(df['ST.INT.ARVL.Categorical'] == 'VH')]['Num.Starbucks.Stores']
df_row = df[(df['ST.INT.ARVL.Categorical'] == 'H') | (df['ST.INT.ARVL.Categorical'] == 'M') | (df['ST.INT.ARVL.Categorical'] == 'VL') | (df['ST.INT.ARVL.Categorical'] == 'L')]['Num.Starbucks.Stores']
result = stats.ttest_ind(a = df_vh, b = df_row, equal_var = False)
glob.log.info(result)
if result.pvalue < T_TEST_ALPHA:
glob.log.info('Null hypothesis for %s rejected because p value is of %f is less than the t-test alpha value of %f' %('ST.INT.ARVL.Categorical', result.pvalue, T_TEST_ALPHA))
else:
glob.log.info('Null hypothesis for %s accepted because p value is of %f is less than the t-test alpha value of %f' %('ST.INT.ARVL.Categorical', result.pvalue, T_TEST_ALPHA))
#store t-test results in a file
fname = os.path.join(glob.OUTPUT_DIR_NAME, glob.REGRESSION_DIR, glob.T_TEST_RESULT)
f = open(fname, 'w')
f.write('\"Hypothesis\",\"No difference between average number of Starbucks stores in countries with Very high and high number of international tourist Vs Rest of the world\"\n')
f.write('\"T-statistic, p-value\",\"%f,%f\"\n' %(result.statistic, result.pvalue))
f.close()
def do_eda(df):
feature_list = ['Num.Starbucks.Stores', 'IT.NET.USER.P2', 'ST.INT.ARVL', 'SP.POP.TOTL']
#calc Pearson's r for some important features to see if there is a relationship
fname = os.path.join(glob.OUTPUT_DIR_NAME, glob.EDA_DIR, glob.COMBINED_R)
utils.calc_r('combined', fname, df, feature_list)
#make a scatter matrix for the combined dataset just to see histograms and relationship
#between some important features
scatter_matrix(df[feature_list], diagonal='kde')
fname = os.path.join(glob.OUTPUT_DIR_NAME, glob.EDA_DIR, glob.COMBINED_SCATTER_MATRIX)
plt.savefig(fname)
plt.clf()
#also a histogram would be nice, since we used kernel density estimator in the scatter matrix
df[feature_list].hist()
fname = os.path.join(glob.OUTPUT_DIR_NAME, glob.EDA_DIR, glob.COMBINED_HISTOGRAM)
plt.savefig(fname)
plt.clf()
#make a scatter plot for everything
numeric_features = []
for col in df.columns:
try:
x=df[col].iloc[0]
float(x)#typecast the data to float to test if it is numeric
except:
glob.log.info('%s is not a numeric feature, ignoring' %(col))
else:
numeric_features.append(col)
glob.log.info('making scatter plots for Num.Starbucks.Stores w.r.t to all WDI indicators...(could take a minute)')
#df = df[df['country_code'] != 'US']
for col in numeric_features:
plt.figure()
fname = os.path.join(glob.OUTPUT_DIR_NAME, 'scatter', col + '.png')
df.plot.scatter(col, 'Num.Starbucks.Stores')
plt.savefig(fname)
plt.close('all')
def run():
glob.log.info('Begin SB + WB data analysis..')
glob.log.info('run analysis...')
df,dqs1,df_WB,df_SB = combine_datasets()
df,dqs2 = clean_combined_dataset(df)
#also clean the WB dataset, would be used for predictions
#df_WB,dqs_WB = clean_combined_dataset(df_WB)
fname = os.path.join(glob.OUTPUT_DIR_NAME, 'WDI_cleaned_dataset.csv')
df_WB.to_csv(fname)
#add derived features from SB dataset
df = add_derived_features(df, df_SB)
#all done , store it in a csv
fname = os.path.join(glob.OUTPUT_DIR_NAME, glob.COMBINED_DATASET_CSV)
glob.log.info('writing the combined WB and SB dataset to a file')
glob.log.info(df.head())
df.to_csv(fname, index=False)
#log the dqs to a file for easy access later on if needed
fname = os.path.join(glob.OUTPUT_DIR_NAME, glob.DQS_DIR, 'dqs_combined_dataset.csv')
f = open(fname, 'w')
f.write('dataset,before_cleaing, after_cleaning\n')
f.write('WDI+SB,%f,%f' %(dqs1,dqs2))
f.close()
#EDA on the combined dataset
do_eda(df)
#Clustering
clustering.run(df)
#association rule mining
assoc_rule_mining.run(df)
#t-test
run_t_test(df)
#run linear and polynomial regression, multivariate as well
regression.run(df)
#classification and done...
classification.run(df, df_WB)
|
[
"common.globals.log.info",
"pandas.tools.plotting.scatter_matrix",
"pandas.read_csv",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.close",
"scipy.stats.ttest_ind",
"matplotlib.pyplot.figure",
"common.utils.calc_dqs",
"common.utils.calc_r",
"os.path.join",
"matplotlib.pyplot.savefig"
] |
[((2983, 3035), 'common.globals.log.info', 'glob.log.info', (['"""bins for ST.INT.ARVL.Categorical ->"""'], {}), "('bins for ST.INT.ARVL.Categorical ->')\n", (2996, 3035), True, 'from common import globals as glob\n'), ((3040, 3059), 'common.globals.log.info', 'glob.log.info', (['bins'], {}), '(bins)\n', (3053, 3059), True, 'from common import globals as glob\n'), ((3299, 3351), 'common.globals.log.info', 'glob.log.info', (['"""bins for SP.POP.TOTL.Categorical ->"""'], {}), "('bins for SP.POP.TOTL.Categorical ->')\n", (3312, 3351), True, 'from common import globals as glob\n'), ((3356, 3375), 'common.globals.log.info', 'glob.log.info', (['bins'], {}), '(bins)\n', (3369, 3375), True, 'from common import globals as glob\n'), ((3629, 3690), 'common.globals.log.info', 'glob.log.info', (['"""bins for Num.Starbucks.Stores.Categorical ->"""'], {}), "('bins for Num.Starbucks.Stores.Categorical ->')\n", (3642, 3690), True, 'from common import globals as glob\n'), ((3695, 3714), 'common.globals.log.info', 'glob.log.info', (['bins'], {}), '(bins)\n', (3708, 3714), True, 'from common import globals as glob\n'), ((4145, 4209), 'common.globals.log.info', 'glob.log.info', (['"""bins for Starbucks.Store.Density.Categorical ->"""'], {}), "('bins for Starbucks.Store.Density.Categorical ->')\n", (4158, 4209), True, 'from common import globals as glob\n'), ((4214, 4233), 'common.globals.log.info', 'glob.log.info', (['bins'], {}), '(bins)\n', (4227, 4233), True, 'from common import globals as glob\n'), ((4772, 4810), 'common.globals.log.info', 'glob.log.info', (['"""combining datasets..."""'], {}), "('combining datasets...')\n", (4785, 4810), True, 'from common import globals as glob\n'), ((4842, 4905), 'os.path.join', 'os.path.join', (['glob.OUTPUT_DIR_NAME', 'glob.WB_CSV_FILE_W_FEATURES'], {}), '(glob.OUTPUT_DIR_NAME, glob.WB_CSV_FILE_W_FEATURES)\n', (4854, 4905), False, 'import os\n'), ((4918, 4936), 'pandas.read_csv', 'pd.read_csv', (['fname'], {}), '(fname)\n', (4929, 4936), True, 'import pandas as pd\n'), ((4950, 5013), 'os.path.join', 'os.path.join', (['glob.OUTPUT_DIR_NAME', 'glob.SB_CSV_FILE_W_FEATURES'], {}), '(glob.OUTPUT_DIR_NAME, glob.SB_CSV_FILE_W_FEATURES)\n', (4962, 5013), False, 'import os\n'), ((5026, 5044), 'pandas.read_csv', 'pd.read_csv', (['fname'], {}), '(fname)\n', (5037, 5044), True, 'import pandas as pd\n'), ((5310, 5328), 'common.utils.calc_dqs', 'utils.calc_dqs', (['df'], {}), '(df)\n', (5324, 5328), False, 'from common import utils\n'), ((6449, 6467), 'common.utils.calc_dqs', 'utils.calc_dqs', (['df'], {}), '(df)\n', (6463, 6467), False, 'from common import utils\n'), ((6834, 6885), 'scipy.stats.ttest_ind', 'stats.ttest_ind', ([], {'a': 'df_vh', 'b': 'df_row', 'equal_var': '(False)'}), '(a=df_vh, b=df_row, equal_var=False)\n', (6849, 6885), True, 'import scipy.stats as stats\n'), ((6896, 6917), 'common.globals.log.info', 'glob.log.info', (['result'], {}), '(result)\n', (6909, 6917), True, 'from common import globals as glob\n'), ((7377, 7452), 'os.path.join', 'os.path.join', (['glob.OUTPUT_DIR_NAME', 'glob.REGRESSION_DIR', 'glob.T_TEST_RESULT'], {}), '(glob.OUTPUT_DIR_NAME, glob.REGRESSION_DIR, glob.T_TEST_RESULT)\n', (7389, 7452), False, 'import os\n'), ((7977, 8042), 'os.path.join', 'os.path.join', (['glob.OUTPUT_DIR_NAME', 'glob.EDA_DIR', 'glob.COMBINED_R'], {}), '(glob.OUTPUT_DIR_NAME, glob.EDA_DIR, glob.COMBINED_R)\n', (7989, 8042), False, 'import os\n'), ((8047, 8096), 'common.utils.calc_r', 'utils.calc_r', (['"""combined"""', 'fname', 'df', 'feature_list'], {}), "('combined', fname, df, feature_list)\n", (8059, 8096), False, 'from common import utils\n'), ((8236, 8284), 'pandas.tools.plotting.scatter_matrix', 'scatter_matrix', (['df[feature_list]'], {'diagonal': '"""kde"""'}), "(df[feature_list], diagonal='kde')\n", (8250, 8284), False, 'from pandas.tools.plotting import scatter_matrix\n'), ((8297, 8375), 'os.path.join', 'os.path.join', (['glob.OUTPUT_DIR_NAME', 'glob.EDA_DIR', 'glob.COMBINED_SCATTER_MATRIX'], {}), '(glob.OUTPUT_DIR_NAME, glob.EDA_DIR, glob.COMBINED_SCATTER_MATRIX)\n', (8309, 8375), False, 'import os\n'), ((8380, 8398), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {}), '(fname)\n', (8391, 8398), True, 'import matplotlib.pyplot as plt\n'), ((8403, 8412), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (8410, 8412), True, 'import matplotlib.pyplot as plt\n'), ((8556, 8629), 'os.path.join', 'os.path.join', (['glob.OUTPUT_DIR_NAME', 'glob.EDA_DIR', 'glob.COMBINED_HISTOGRAM'], {}), '(glob.OUTPUT_DIR_NAME, glob.EDA_DIR, glob.COMBINED_HISTOGRAM)\n', (8568, 8629), False, 'import os\n'), ((8634, 8652), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {}), '(fname)\n', (8645, 8652), True, 'import matplotlib.pyplot as plt\n'), ((8657, 8666), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (8664, 8666), True, 'import matplotlib.pyplot as plt\n'), ((9031, 9155), 'common.globals.log.info', 'glob.log.info', (['"""making scatter plots for Num.Starbucks.Stores w.r.t to all WDI indicators...(could take a minute)"""'], {}), "(\n 'making scatter plots for Num.Starbucks.Stores w.r.t to all WDI indicators...(could take a minute)'\n )\n", (9044, 9155), True, 'from common import globals as glob\n'), ((9459, 9505), 'common.globals.log.info', 'glob.log.info', (['"""Begin SB + WB data analysis.."""'], {}), "('Begin SB + WB data analysis..')\n", (9472, 9505), True, 'from common import globals as glob\n'), ((9510, 9542), 'common.globals.log.info', 'glob.log.info', (['"""run analysis..."""'], {}), "('run analysis...')\n", (9523, 9542), True, 'from common import globals as glob\n'), ((9763, 9824), 'os.path.join', 'os.path.join', (['glob.OUTPUT_DIR_NAME', '"""WDI_cleaned_dataset.csv"""'], {}), "(glob.OUTPUT_DIR_NAME, 'WDI_cleaned_dataset.csv')\n", (9775, 9824), False, 'import os\n'), ((9988, 10049), 'os.path.join', 'os.path.join', (['glob.OUTPUT_DIR_NAME', 'glob.COMBINED_DATASET_CSV'], {}), '(glob.OUTPUT_DIR_NAME, glob.COMBINED_DATASET_CSV)\n', (10000, 10049), False, 'import os\n'), ((10054, 10119), 'common.globals.log.info', 'glob.log.info', (['"""writing the combined WB and SB dataset to a file"""'], {}), "('writing the combined WB and SB dataset to a file')\n", (10067, 10119), True, 'from common import globals as glob\n'), ((10266, 10342), 'os.path.join', 'os.path.join', (['glob.OUTPUT_DIR_NAME', 'glob.DQS_DIR', '"""dqs_combined_dataset.csv"""'], {}), "(glob.OUTPUT_DIR_NAME, glob.DQS_DIR, 'dqs_combined_dataset.csv')\n", (10278, 10342), False, 'import os\n'), ((6963, 7147), 'common.globals.log.info', 'glob.log.info', (["('Null hypothesis for %s rejected because p value is of %f is less than the t-test alpha value of %f'\n % ('ST.INT.ARVL.Categorical', result.pvalue, T_TEST_ALPHA))"], {}), "(\n 'Null hypothesis for %s rejected because p value is of %f is less than the t-test alpha value of %f'\n % ('ST.INT.ARVL.Categorical', result.pvalue, T_TEST_ALPHA))\n", (6976, 7147), True, 'from common import globals as glob\n'), ((7155, 7339), 'common.globals.log.info', 'glob.log.info', (["('Null hypothesis for %s accepted because p value is of %f is less than the t-test alpha value of %f'\n % ('ST.INT.ARVL.Categorical', result.pvalue, T_TEST_ALPHA))"], {}), "(\n 'Null hypothesis for %s accepted because p value is of %f is less than the t-test alpha value of %f'\n % ('ST.INT.ARVL.Categorical', result.pvalue, T_TEST_ALPHA))\n", (7168, 7339), True, 'from common import globals as glob\n'), ((9241, 9253), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9251, 9253), True, 'import matplotlib.pyplot as plt\n'), ((9270, 9329), 'os.path.join', 'os.path.join', (['glob.OUTPUT_DIR_NAME', '"""scatter"""', "(col + '.png')"], {}), "(glob.OUTPUT_DIR_NAME, 'scatter', col + '.png')\n", (9282, 9329), False, 'import os\n'), ((9391, 9409), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {}), '(fname)\n', (9402, 9409), True, 'import matplotlib.pyplot as plt\n'), ((9418, 9434), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (9427, 9434), True, 'import matplotlib.pyplot as plt\n'), ((5834, 5908), 'common.globals.log.info', 'glob.log.info', (["('%s is only %f full, dropping it...' % (col, density * 100))"], {}), "('%s is only %f full, dropping it...' % (col, density * 100))\n", (5847, 5908), True, 'from common import globals as glob\n'), ((8910, 8970), 'common.globals.log.info', 'glob.log.info', (["('%s is not a numeric feature, ignoring' % col)"], {}), "('%s is not a numeric feature, ignoring' % col)\n", (8923, 8970), True, 'from common import globals as glob\n')]
|
import os
import pyAesCrypt
bufferSize = 64 * 1024
key_file = open("keyfile.txt", "r")
key = key_file.readline()[1:-2]
key_file.close()
dir_file = open("dirfile.txt", "r")
dir_name = dir_file.readline()[1:-2]
dir_file.close()
if(key == "abrakadabra"):
os.remove("keyfile.txt")
os.remove("dirfile.txt")
os.makedirs(dir_name)
|
[
"os.remove",
"os.makedirs"
] |
[((272, 296), 'os.remove', 'os.remove', (['"""keyfile.txt"""'], {}), "('keyfile.txt')\n", (281, 296), False, 'import os\n'), ((302, 326), 'os.remove', 'os.remove', (['"""dirfile.txt"""'], {}), "('dirfile.txt')\n", (311, 326), False, 'import os\n'), ((332, 353), 'os.makedirs', 'os.makedirs', (['dir_name'], {}), '(dir_name)\n', (343, 353), False, 'import os\n')]
|
import os
from .base_atari_env import BaseAtariEnv, base_env_wrapper_fn, parallel_wrapper_fn
def raw_env(**kwargs):
mode = 33
num_players = 4
return BaseAtariEnv(
game="pong",
num_players=num_players,
mode_num=mode,
env_name=os.path.basename(__file__)[:-3],
**kwargs
)
env = base_env_wrapper_fn(raw_env)
parallel_env = parallel_wrapper_fn(env)
|
[
"os.path.basename"
] |
[((272, 298), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (288, 298), False, 'import os\n')]
|
# codigo ganbiarra
import os
from turtle import onclick
os.environ['KIVY_GL_BACKEND'] = 'angle_sdl2'
# codigo ganbiarra
from kivy.app import App
from kivy.uix.label import Label
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.clock import Clock
class Screen1(Screen):
def my_callback(dt):
print ('#########################################################My callback is called !')
Clock.schedule_once(my_callback, 1)
class relogio():
def my_callback(dt):
print ('My callback is called !')
Clock.schedule_once(my_callback, 1)
class MyApp(App):
def build(self):
return Screen1()
MyApp().run()
|
[
"kivy.clock.Clock.schedule_once"
] |
[((416, 451), 'kivy.clock.Clock.schedule_once', 'Clock.schedule_once', (['my_callback', '(1)'], {}), '(my_callback, 1)\n', (435, 451), False, 'from kivy.clock import Clock\n'), ((548, 583), 'kivy.clock.Clock.schedule_once', 'Clock.schedule_once', (['my_callback', '(1)'], {}), '(my_callback, 1)\n', (567, 583), False, 'from kivy.clock import Clock\n')]
|
def create_service(name, **kwargs):
if name == 'Regulation':
from services.reg_service.reg_service import RegService
return RegService()
elif name == 'ArtificialInertia':
from services.artificial_inertia_service.artificial_inertia_service import ArtificialInertiaService
return ArtificialInertiaService()
elif name == 'Reserve':
from services.reserve_service.reserve_service import ReserveService
return ReserveService()
elif name == 'DistributionVoltageService':
from services.distribution_voltage_regulation.distribution_regulation_service import DistributionVoltageService
return DistributionVoltageService()
elif name == 'EnergyMarketService':
from services.energy_market_service.energy_market_service import EnergyMarketService
energy_market = EnergyMarketService()
return energy_market
elif name == 'PeakManagementService':
from services.peak_managment_service.peak_management_service import PeakManagementService
return PeakManagementService(sim_step=kwargs['sim_step'])
elif name == 'PeakManagementService':
from datetime import timedelta
from services.peak_managment_service.peak_management_service import PeakManagementService
return PeakManagementService(sim_step=timedelta(minutes=60))
raise "There is no service with name: " + name
|
[
"services.peak_managment_service.peak_management_service.PeakManagementService",
"services.energy_market_service.energy_market_service.EnergyMarketService",
"services.reg_service.reg_service.RegService",
"datetime.timedelta",
"services.artificial_inertia_service.artificial_inertia_service.ArtificialInertiaService",
"services.distribution_voltage_regulation.distribution_regulation_service.DistributionVoltageService",
"services.reserve_service.reserve_service.ReserveService"
] |
[((145, 157), 'services.reg_service.reg_service.RegService', 'RegService', ([], {}), '()\n', (155, 157), False, 'from services.reg_service.reg_service import RegService\n'), ((320, 346), 'services.artificial_inertia_service.artificial_inertia_service.ArtificialInertiaService', 'ArtificialInertiaService', ([], {}), '()\n', (344, 346), False, 'from services.artificial_inertia_service.artificial_inertia_service import ArtificialInertiaService\n'), ((468, 484), 'services.reserve_service.reserve_service.ReserveService', 'ReserveService', ([], {}), '()\n', (482, 484), False, 'from services.reserve_service.reserve_service import ReserveService\n'), ((677, 705), 'services.distribution_voltage_regulation.distribution_regulation_service.DistributionVoltageService', 'DistributionVoltageService', ([], {}), '()\n', (703, 705), False, 'from services.distribution_voltage_regulation.distribution_regulation_service import DistributionVoltageService\n'), ((865, 886), 'services.energy_market_service.energy_market_service.EnergyMarketService', 'EnergyMarketService', ([], {}), '()\n', (884, 886), False, 'from services.energy_market_service.energy_market_service import EnergyMarketService\n'), ((1077, 1127), 'services.peak_managment_service.peak_management_service.PeakManagementService', 'PeakManagementService', ([], {'sim_step': "kwargs['sim_step']"}), "(sim_step=kwargs['sim_step'])\n", (1098, 1127), False, 'from services.peak_managment_service.peak_management_service import PeakManagementService\n'), ((1355, 1376), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(60)'}), '(minutes=60)\n', (1364, 1376), False, 'from datetime import timedelta\n')]
|
#!/usr/bin/env python
import rospy
import math
import numpy as np
import scipy
from scipy import interpolate
from scipy.interpolate import CubicHermiteSpline
from nav_msgs.msg import Path
from hybrid_astar.srv import *
from std_msgs.msg import *
import pylab as pl
import numpy as np
import matplotlib.pyplot as plt
import random
from geometry_msgs.msg import PoseStamped
from hybrid_astar.srv import *
import rosbag
rospy.init_node('replay', anonymous=True)
pub = rospy.Publisher('/sPath', Path, queue_size=10, latch=True)
bag = rosbag.Bag('/home/kai/map-11-paths.bag')
for topic, msg, t in bag.read_messages(topics=['/sPath']):
# print(msg)
print('-----------------------------------')
print(msg.header)
msg.header.stamp.secs = 0
msg.header.stamp.nsecs = 0
print(msg.header)
pub.publish(msg)
bag.close()
print('I will spin!')
|
[
"rosbag.Bag",
"rospy.Publisher",
"rospy.init_node"
] |
[((421, 462), 'rospy.init_node', 'rospy.init_node', (['"""replay"""'], {'anonymous': '(True)'}), "('replay', anonymous=True)\n", (436, 462), False, 'import rospy\n'), ((469, 527), 'rospy.Publisher', 'rospy.Publisher', (['"""/sPath"""', 'Path'], {'queue_size': '(10)', 'latch': '(True)'}), "('/sPath', Path, queue_size=10, latch=True)\n", (484, 527), False, 'import rospy\n'), ((534, 574), 'rosbag.Bag', 'rosbag.Bag', (['"""/home/kai/map-11-paths.bag"""'], {}), "('/home/kai/map-11-paths.bag')\n", (544, 574), False, 'import rosbag\n')]
|
import os
from pathlib import Path
from utils.installer import Installer
from utils.chalk import print_header
from utils.system import install_pkg, run_command
from utils.utils import remove, file_exists, move, link_file, link_files
import utils.platform as platform
SCRIPT_DIR = Path(__file__).parent
class Main(Installer):
def run(self):
if not platform.is_arch:
return
print_header("Setting up VNC")
install_pkg("tigervnc", "lxde-gtk3")
if file_exists("/etc/pam.d/tigervnc.pacnew"):
move("/etc/pam.d/tigervnc.pacnew", "/etc/pam.d/tigervnc")
link_files(
[
[
SCRIPT_DIR.joinpath("vncserver.users"),
Path("/etc/tigervnc/vncserver.users"),
True,
],
[
SCRIPT_DIR.joinpath("config"),
Path.home().joinpath(".vnc", "config"),
True,
],
]
)
run_command("sudo systemctl enable vncserver@:1")
run_command("sudo systemctl start vncserver@:1")
|
[
"pathlib.Path.home",
"utils.chalk.print_header",
"pathlib.Path",
"utils.system.install_pkg",
"utils.utils.file_exists",
"utils.utils.move",
"utils.system.run_command"
] |
[((282, 296), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (286, 296), False, 'from pathlib import Path\n'), ((409, 439), 'utils.chalk.print_header', 'print_header', (['"""Setting up VNC"""'], {}), "('Setting up VNC')\n", (421, 439), False, 'from utils.chalk import print_header\n'), ((449, 485), 'utils.system.install_pkg', 'install_pkg', (['"""tigervnc"""', '"""lxde-gtk3"""'], {}), "('tigervnc', 'lxde-gtk3')\n", (460, 485), False, 'from utils.system import install_pkg, run_command\n'), ((498, 539), 'utils.utils.file_exists', 'file_exists', (['"""/etc/pam.d/tigervnc.pacnew"""'], {}), "('/etc/pam.d/tigervnc.pacnew')\n", (509, 539), False, 'from utils.utils import remove, file_exists, move, link_file, link_files\n'), ((1035, 1084), 'utils.system.run_command', 'run_command', (['"""sudo systemctl enable vncserver@:1"""'], {}), "('sudo systemctl enable vncserver@:1')\n", (1046, 1084), False, 'from utils.system import install_pkg, run_command\n'), ((1093, 1141), 'utils.system.run_command', 'run_command', (['"""sudo systemctl start vncserver@:1"""'], {}), "('sudo systemctl start vncserver@:1')\n", (1104, 1141), False, 'from utils.system import install_pkg, run_command\n'), ((553, 610), 'utils.utils.move', 'move', (['"""/etc/pam.d/tigervnc.pacnew"""', '"""/etc/pam.d/tigervnc"""'], {}), "('/etc/pam.d/tigervnc.pacnew', '/etc/pam.d/tigervnc')\n", (557, 610), False, 'from utils.utils import remove, file_exists, move, link_file, link_files\n'), ((744, 781), 'pathlib.Path', 'Path', (['"""/etc/tigervnc/vncserver.users"""'], {}), "('/etc/tigervnc/vncserver.users')\n", (748, 781), False, 'from pathlib import Path\n'), ((917, 928), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (926, 928), False, 'from pathlib import Path\n')]
|
from setuptools import setup, find_packages
setup(name='m_gym',
version='0.0.1',
install_requires=['gym','numpy','pathlib', 'uuid']
)
|
[
"setuptools.setup"
] |
[((46, 140), 'setuptools.setup', 'setup', ([], {'name': '"""m_gym"""', 'version': '"""0.0.1"""', 'install_requires': "['gym', 'numpy', 'pathlib', 'uuid']"}), "(name='m_gym', version='0.0.1', install_requires=['gym', 'numpy',\n 'pathlib', 'uuid'])\n", (51, 140), False, 'from setuptools import setup, find_packages\n')]
|
#!/usr/bin/python
import os
import os.path
with open("../file_walker.cmake", 'w') as file_walker:
file_walker.write("set(SOURCES\n")
for root, sub_dirs, files in os.walk("../src"):
for file in files:
if file.endswith(".cpp"):
file_walker.write(" " + root[3:] + "/" + file + "\n")
file_walker.write(")\n")
file_walker.write("set(HEADERS\n")
for root, sub_dirs, files in os.walk("../include"):
for file in files:
if file.endswith(".hpp"):
r = root[1:] if root == "." else root[2:]
file_walker.write(" " + root[3:] + "/" + file + "\n")
file_walker.write(")\n")
|
[
"os.walk"
] |
[((172, 189), 'os.walk', 'os.walk', (['"""../src"""'], {}), "('../src')\n", (179, 189), False, 'import os\n'), ((430, 451), 'os.walk', 'os.walk', (['"""../include"""'], {}), "('../include')\n", (437, 451), False, 'import os\n')]
|
#!/usr/bin/env python3
import argparse
import logging
import sys
from ..reporting.report_result import report_artifact_stats_result
from ..util.constants import LOG, PERFORMANCE_STORAGE_SERVICE_API
from .base_artifact_stats_collector import BaseArtifactStatsCollector
from .collectors import * # Import necessary for __subclasses__ enumeration.
def collect_artifact_stats(collectors):
"""
Executes and combines the results of all the provided collectors.
Parameters
----------
collectors : [BaseArtifactStatsCollector]
Returns
-------
exit_code : int
The exit code of the collection task. 0 on success.
metrics : dict
The combined metrics from all of the collectors.
Meaningless if the exit code is not 0.
"""
exit_code, aggregated_metrics = 0, {}
try:
for collector_class in collectors:
with collector_class(is_debug=args.debug) as collector:
cname = collector.__class__.__name__
LOG.info(f'Starting {cname} collection.')
exit_code = collector.run_collector()
results = collector.metrics
duplicate_keys = set(aggregated_metrics).intersection(results)
if not duplicate_keys:
aggregated_metrics.update(results)
LOG.info(f'{cname} finished successfully.')
else:
exit_code = 1
LOG.error(f'Collector key conflict on {duplicate_keys}.')
LOG.error(f'{cname} failed. Stopping all collection.')
break
except Exception as err:
exit_code = 1 if exit_code == 0 else exit_code
LOG.error(err)
return exit_code, aggregated_metrics
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--debug",
action="store_true",
dest="debug",
default=False,
help="Enable debug output")
parser.add_argument("--publish-results",
default="none",
type=str,
choices=PERFORMANCE_STORAGE_SERVICE_API.keys(),
help="Environment in which to store performance results")
parser.add_argument("--publish-username",
type=str,
help="Performance Storage Service Username")
parser.add_argument("--publish-password",
type=str,
help="Performance Storage Service password")
args = parser.parse_args()
if args.debug:
LOG.setLevel(logging.DEBUG)
# Get the BaseBinaryMetricsCollector subclasses imported from binary_metrics.binary_metrics_collectors
# Effectively this adds each binary metric collector class into an array to be instantiated later.
collectors = [obj for obj in BaseArtifactStatsCollector.__subclasses__()]
exit_code, aggregated_metrics = collect_artifact_stats(collectors)
if not exit_code:
LOG.info(f'Artifact stats: {aggregated_metrics}')
if args.publish_results != 'none':
report_artifact_stats_result(args.publish_results, aggregated_metrics,
args.publish_username, args.publish_password)
logging.shutdown()
sys.exit(exit_code)
|
[
"argparse.ArgumentParser",
"sys.exit",
"logging.shutdown"
] |
[((1817, 1842), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1840, 1842), False, 'import argparse\n'), ((3375, 3393), 'logging.shutdown', 'logging.shutdown', ([], {}), '()\n', (3391, 3393), False, 'import logging\n'), ((3398, 3417), 'sys.exit', 'sys.exit', (['exit_code'], {}), '(exit_code)\n', (3406, 3417), False, 'import sys\n')]
|
import pygame
from setup import Setup
class Textures:
"""Загружает из файлов и хранит изображения."""
def __init__(self):
self.setup = Setup()
# Клетка
self.cell = pygame.image.load('png/cell.png')
# Выделенная клетка
self.cell_select = pygame.image.load('png/cell_select.png')
# Запрещённая для установки клетка
self.cell_district = pygame.image.load('png/cell_district.png')
# Клетка-подсветка победившей комбинации
self.cell_win = pygame.image.load('png/cell_win.png')
# Крестик и нолик
self.__figure = [
[pygame.image.load('png/cross.png'), pygame.image.load('png/zero.png')],
[pygame.image.load('png/star01.png'), pygame.image.load('png/star02.png')],
[pygame.image.load('png/cross-diamond.png'), pygame.image.load('png/zero-diamond.png')],
[pygame.image.load('png/colors01.png'), pygame.image.load('png/colors02.png')],
[pygame.image.load('png/apple8bit.png'), pygame.image.load('png/pear8bit.png')],
[pygame.image.load('png/smile01.png'), pygame.image.load('png/smile02.png')],
[pygame.image.load('png/tile01.png'), pygame.image.load('png/tile02.png')]
]
@property
def figure(self):
return self.__figure[self.setup.skin_number]
|
[
"pygame.image.load",
"setup.Setup"
] |
[((154, 161), 'setup.Setup', 'Setup', ([], {}), '()\n', (159, 161), False, 'from setup import Setup\n'), ((200, 233), 'pygame.image.load', 'pygame.image.load', (['"""png/cell.png"""'], {}), "('png/cell.png')\n", (217, 233), False, 'import pygame\n'), ((290, 330), 'pygame.image.load', 'pygame.image.load', (['"""png/cell_select.png"""'], {}), "('png/cell_select.png')\n", (307, 330), False, 'import pygame\n'), ((404, 446), 'pygame.image.load', 'pygame.image.load', (['"""png/cell_district.png"""'], {}), "('png/cell_district.png')\n", (421, 446), False, 'import pygame\n'), ((521, 558), 'pygame.image.load', 'pygame.image.load', (['"""png/cell_win.png"""'], {}), "('png/cell_win.png')\n", (538, 558), False, 'import pygame\n'), ((625, 659), 'pygame.image.load', 'pygame.image.load', (['"""png/cross.png"""'], {}), "('png/cross.png')\n", (642, 659), False, 'import pygame\n'), ((661, 694), 'pygame.image.load', 'pygame.image.load', (['"""png/zero.png"""'], {}), "('png/zero.png')\n", (678, 694), False, 'import pygame\n'), ((710, 745), 'pygame.image.load', 'pygame.image.load', (['"""png/star01.png"""'], {}), "('png/star01.png')\n", (727, 745), False, 'import pygame\n'), ((747, 782), 'pygame.image.load', 'pygame.image.load', (['"""png/star02.png"""'], {}), "('png/star02.png')\n", (764, 782), False, 'import pygame\n'), ((798, 840), 'pygame.image.load', 'pygame.image.load', (['"""png/cross-diamond.png"""'], {}), "('png/cross-diamond.png')\n", (815, 840), False, 'import pygame\n'), ((842, 883), 'pygame.image.load', 'pygame.image.load', (['"""png/zero-diamond.png"""'], {}), "('png/zero-diamond.png')\n", (859, 883), False, 'import pygame\n'), ((899, 936), 'pygame.image.load', 'pygame.image.load', (['"""png/colors01.png"""'], {}), "('png/colors01.png')\n", (916, 936), False, 'import pygame\n'), ((938, 975), 'pygame.image.load', 'pygame.image.load', (['"""png/colors02.png"""'], {}), "('png/colors02.png')\n", (955, 975), False, 'import pygame\n'), ((991, 1029), 'pygame.image.load', 'pygame.image.load', (['"""png/apple8bit.png"""'], {}), "('png/apple8bit.png')\n", (1008, 1029), False, 'import pygame\n'), ((1031, 1068), 'pygame.image.load', 'pygame.image.load', (['"""png/pear8bit.png"""'], {}), "('png/pear8bit.png')\n", (1048, 1068), False, 'import pygame\n'), ((1084, 1120), 'pygame.image.load', 'pygame.image.load', (['"""png/smile01.png"""'], {}), "('png/smile01.png')\n", (1101, 1120), False, 'import pygame\n'), ((1122, 1158), 'pygame.image.load', 'pygame.image.load', (['"""png/smile02.png"""'], {}), "('png/smile02.png')\n", (1139, 1158), False, 'import pygame\n'), ((1174, 1209), 'pygame.image.load', 'pygame.image.load', (['"""png/tile01.png"""'], {}), "('png/tile01.png')\n", (1191, 1209), False, 'import pygame\n'), ((1211, 1246), 'pygame.image.load', 'pygame.image.load', (['"""png/tile02.png"""'], {}), "('png/tile02.png')\n", (1228, 1246), False, 'import pygame\n')]
|
"""Contains the test suite for the context utils."""
import unittest
from typing import Dict, Any
from pyengy.error import PyEngyError
from pyengy.util.context_utils import Context
def get_test_data() -> Dict[str, Any]:
"""
Auxiliary method to return test data args.
:return: Test data.
"""
return {
"root": {
"branch1": {"number": 23, "string": "value"},
"branch2": {"object": object(), "boolean": True}
}
}
class ContextUtilsTestSuite(unittest.TestCase):
"""Test cases for the context class."""
def test_context_init_saves_given_data(self):
"""
- Given: Test data
- When: Initializing context with given data.
- Then: Should return a context with given data.
"""
data = get_test_data()
context = Context(data)
self.assertIs(data, context.data)
def test_context_get_returns_value_at_path(self):
"""
- Given: Context with test data
- When: Calling ``get`` with an existing key path.
- Then: Should return the item at given key path.
"""
data = get_test_data()
context = Context(data)
test_dict = context.get("root.branch1")
self.assertIs(test_dict, data["root"]["branch1"])
def test_context_get_raises_error_if_path_contains_item(self):
"""
- Given: Context with test data
- When: Calling ``get`` with an illegal key path.
- Then: Should raise an error.
"""
data = get_test_data()
context = Context(data)
self.assertRaises(PyEngyError, lambda: context.get("root.branch2.object.bad_key"))
def test_context_get_raises_error_if_missing_key_and_raise_if_missing_is_set(self):
"""
- Given: Context with test data
- When: Calling ``get`` with a missing key path and raise if missing set.
- Then: Should raise an error.
"""
data = get_test_data()
context = Context(data)
self.assertRaises(PyEngyError, lambda: context.get("root.branch2.missing", raise_if_missing=True))
def test_context_get_returns_none_if_missing_key_and_raise_if_missing_is_not_set(self):
"""
- Given: Context with test data
- When: Calling ``get`` with a missing key path and raise if missing not set.
- Then: Should return None.
"""
data = get_test_data()
context = Context(data)
self.assertEqual(None, context.get("root.branch2.missing", raise_if_missing=False))
def test_context_get_returns_value_if_correct_type(self):
"""
- Given: Context with test data
- When: Calling ``get`` with a matching item type.
- Then: Should return expected item.
"""
data = get_test_data()
context = Context(data)
self.assertEqual(data["root"]["branch2"]["object"], context.get("root.branch2.object", item_type=object))
def test_context_get_raises_error_if_mismatched_type(self):
"""
- Given: Context with test data
- When: Calling ``get`` with a mismatched item type.
- Then: Should raise an error.
"""
data = get_test_data()
context = Context(data)
self.assertRaises(PyEngyError, lambda: context.get("root.branch2.boolean", item_type=str))
def test_context_set_changes_value_at_path(self):
"""
- Given: Context with test data
- When: Calling ``set`` with an existing key path.
- Then: Should set the item at given key path.
"""
data = get_test_data()
context = Context(data)
context.set("root.branch1.number", 532)
self.assertEqual(532, data["root"]["branch1"]["number"])
def test_context_set_raises_error_if_path_contains_item(self):
"""
- Given: Context with test data
- When: Calling ``set`` with an illegal key path.
- Then: Should raise an error.
"""
data = get_test_data()
context = Context(data)
self.assertRaises(PyEngyError, lambda: context.get("root.branch2.boolean.bad_key"))
def test_context_set_creates_internal_dicts_if_missing_key(self):
"""
- Given: Context with test data
- When: Calling ``set`` with a missing key path.
- Then: Should set the item at given key path creating any necessary intermediate dicts.
"""
data = get_test_data()
context = Context(data)
context.set("root.branch3.new_key", "new_value")
self.assertEqual("new_value", data["root"]["branch3"]["new_key"])
def test_context_remove_deletes_value_at_path(self):
"""
- Given: Context with test data
- When: Calling ``remove`` with an existing key path.
- Then: Should remove and return the item at given key path.
"""
data = get_test_data()
context = Context(data)
removed = context.remove("root.branch1.string")
self.assertEqual("value", removed)
self.assertDictEqual({"number": 23}, data["root"]["branch1"])
def test_context_remove_raises_error_if_path_contains_item(self):
"""
- Given: Context with test data
- When: Calling ``remove`` with an illegal key path.
- Then: Should raise an error.
"""
data = get_test_data()
context = Context(data)
self.assertRaises(PyEngyError, lambda: context.remove("root.branch2.object.bad_key"))
def test_context_remove_raises_error_if_missing_key_and_raise_if_missing_is_set(self):
"""
- Given: Context with test data
- When: Calling ``remove`` with a missing key path and raise if missing set.
- Then: Should raise an error.
"""
data = get_test_data()
context = Context(data)
self.assertRaises(PyEngyError, lambda: context.remove("root.branch2.missing", raise_if_missing=True))
def test_context_remove_returns_none_if_missing_key_and_raise_if_missing_is_not_set(self):
"""
- Given: Context with test data
- When: Calling ``remove`` with a missing key path and raise if missing not set.
- Then: Should return None.
"""
data = get_test_data()
context = Context(data)
self.assertEqual(None, context.remove("root.branch2.missing", raise_if_missing=False))
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"pyengy.util.context_utils.Context"
] |
[((6455, 6470), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6468, 6470), False, 'import unittest\n'), ((834, 847), 'pyengy.util.context_utils.Context', 'Context', (['data'], {}), '(data)\n', (841, 847), False, 'from pyengy.util.context_utils import Context\n'), ((1176, 1189), 'pyengy.util.context_utils.Context', 'Context', (['data'], {}), '(data)\n', (1183, 1189), False, 'from pyengy.util.context_utils import Context\n'), ((1576, 1589), 'pyengy.util.context_utils.Context', 'Context', (['data'], {}), '(data)\n', (1583, 1589), False, 'from pyengy.util.context_utils import Context\n'), ((2005, 2018), 'pyengy.util.context_utils.Context', 'Context', (['data'], {}), '(data)\n', (2012, 2018), False, 'from pyengy.util.context_utils import Context\n'), ((2455, 2468), 'pyengy.util.context_utils.Context', 'Context', (['data'], {}), '(data)\n', (2462, 2468), False, 'from pyengy.util.context_utils import Context\n'), ((2842, 2855), 'pyengy.util.context_utils.Context', 'Context', (['data'], {}), '(data)\n', (2849, 2855), False, 'from pyengy.util.context_utils import Context\n'), ((3249, 3262), 'pyengy.util.context_utils.Context', 'Context', (['data'], {}), '(data)\n', (3256, 3262), False, 'from pyengy.util.context_utils import Context\n'), ((3645, 3658), 'pyengy.util.context_utils.Context', 'Context', (['data'], {}), '(data)\n', (3652, 3658), False, 'from pyengy.util.context_utils import Context\n'), ((4052, 4065), 'pyengy.util.context_utils.Context', 'Context', (['data'], {}), '(data)\n', (4059, 4065), False, 'from pyengy.util.context_utils import Context\n'), ((4497, 4510), 'pyengy.util.context_utils.Context', 'Context', (['data'], {}), '(data)\n', (4504, 4510), False, 'from pyengy.util.context_utils import Context\n'), ((4946, 4959), 'pyengy.util.context_utils.Context', 'Context', (['data'], {}), '(data)\n', (4953, 4959), False, 'from pyengy.util.context_utils import Context\n'), ((5415, 5428), 'pyengy.util.context_utils.Context', 'Context', (['data'], {}), '(data)\n', (5422, 5428), False, 'from pyengy.util.context_utils import Context\n'), ((5853, 5866), 'pyengy.util.context_utils.Context', 'Context', (['data'], {}), '(data)\n', (5860, 5866), False, 'from pyengy.util.context_utils import Context\n'), ((6312, 6325), 'pyengy.util.context_utils.Context', 'Context', (['data'], {}), '(data)\n', (6319, 6325), False, 'from pyengy.util.context_utils import Context\n')]
|
#!/usr/bin/env python3
import requests, os
url = "http://localhost/upload/"
user = os.getenv('USER')
image_directory = '/home/{}/supplier-data/images/'.format(user)
files = os.listdir(image_directory)
for image_name in files:
if not image_name.startswith('.') and 'jpeg' in image_name:
image_path = os.path.join(image_directory, image_name)
with open(image_path, 'rb') as file:
response = requests.post(url, files={'file': file})
|
[
"os.listdir",
"requests.post",
"os.path.join",
"os.getenv"
] |
[((85, 102), 'os.getenv', 'os.getenv', (['"""USER"""'], {}), "('USER')\n", (94, 102), False, 'import requests, os\n'), ((175, 202), 'os.listdir', 'os.listdir', (['image_directory'], {}), '(image_directory)\n', (185, 202), False, 'import requests, os\n'), ((314, 355), 'os.path.join', 'os.path.join', (['image_directory', 'image_name'], {}), '(image_directory, image_name)\n', (326, 355), False, 'import requests, os\n'), ((424, 464), 'requests.post', 'requests.post', (['url'], {'files': "{'file': file}"}), "(url, files={'file': file})\n", (437, 464), False, 'import requests, os\n')]
|
import requests
from bs4 import BeautifulSoup
import stanza
import re
import csv
from tqdm import tqdm
from OSGridConverter import grid2latlong
from nuts import NutsFinder
import pandas as pd
# import json
# import plotly.express as px
# from geojson_rewind import rewind
stanza.download("en") # download English model
nlp = stanza.Pipeline("en") # initialize English neural pipeline
# This finds the details paragraph in a webpage processed with beautiful soup
def details(tag):
return (
type(tag.get("class")) is list
and "nhle-list-entry__outer-container" in tag.get("class")
and "Details" in [item.contents[0] for item in tag.find_all("h2")]
)
# Given a text, this will return a list of the nouns in the text
def extract_nouns(text):
doc = nlp(text)
nouns = []
# For every sentence and word
for sentence in doc.sentences:
# Remove the last sentence with the listing details
if sentence.text.startswith("Listing NGR"):
continue
for word in sentence.words:
# If a non-proper noun or plural noun
if word.xpos == "NN" or word.xpos == "NNS":
# Add to list
nouns.append(word)
count = 0
output = []
while count < len(nouns):
if nouns[count].deprel != "compound":
if not re.match(r"c\d+", nouns[count].lemma):
output.append(nouns[count].lemma)
else:
if not re.match(r"c\d+", nouns[count].lemma):
if count + 1 < len(nouns):
if nouns[count].head == nouns[count + 1].id:
output.append(nouns[count].lemma + " " + nouns[count + 1].lemma)
count += 1
count += 1
# The return looks like this to remove duplicates
return list(dict.fromkeys(output))
def proccess_url(soup, details):
mydivs = soup.find(details)
details = ""
for item in mydivs.children:
if item.name == "h2" and item.contents[0] == "Details":
details = item.findNext("p").get_text()
break
return extract_nouns(details)
# This finds the grid reference on the page processed with beautiful soup
def find_gr(tag):
return tag.name == "dl" and "National Grid Reference:" in tag.find("dt")
# Given a webpage, this will return the nuts region the castle is in
def nuts(find_gr, soup, nf):
mydivs = soup.find_all(find_gr)
gr = mydivs[0].dd.contents[0].split(", ")[0]
l = grid2latlong(gr)
location = nf.find(lat=l.latitude, lon=l.longitude)
if location == []:
return False
return location[1]["NUTS_ID"]
# These are the names of the NUTS1 regions in England
nuts1_names = ["UKC", "UKD", "UKE", "UKF", "UKG", "UKH", "UKI", "UKJ", "UKK"]
# Used to set up the dataframe
out = {}
out["castle"] = {key: 0 for key in nuts1_names}
# Used to keep track of how many castles are in each area
area_count = {key: 0 for key in nuts1_names}
df = pd.DataFrame.from_dict(out)
# There is an error with maps later than 2013 in that the actual geojson file isn't returned
nf = NutsFinder(year=2013, scale=60)
# The file was generated from a search on Historic England's website
with open("./search_results/NHLEExport.csv", "r") as file:
reader = csv.DictReader(file)
for row in tqdm(reader):
req = requests.get(dict(row)["Link"])
soup = BeautifulSoup(req.content, "html.parser")
nouns = proccess_url(soup, details)
nuts1 = nuts(find_gr, soup, nf)
if nuts1:
area_count[nuts1] += 1
for item in nouns:
# This happens if the word hasn't been seen before
if item not in df:
df[item] = 0
df.loc[nuts1, item] += 1
# Remove column if sum is less than 5, from stackoverflow: https://stackoverflow.com/questions/33990495/delete-a-column-in-a-pandas-dataframe-if-its-sum-is-less-than-x
df.drop([col for col, val in df.sum().iteritems() if val < 5], axis=1, inplace=True)
df2 = pd.DataFrame(area_count, index=[0])
df = df.div(df2.iloc[0], axis="rows")
out_data = df.to_json()
with open("Output.json", "w") as json_file:
json_file.write(out_data)
# Code sourced from https://focaalvarez.medium.com/mapping-the-uk-and-navigating-the-post-code-maze-4898e758b82f
# Load map of the UK - file size optimized
# with open('super-generalised.geojson') as shapefile:
# counties = json.load(shapefile)
# entries=df.columns
# df.index.name = 'nuts118cd'
# df.reset_index(inplace=True)
# #Make the rings clockwise (to make it compatible with plotly)
# counties_corrected=rewind(counties,rfc7946=False)
# for item in tqdm(entries):
# fig = px.choropleth(df, geojson=counties_corrected, locations='nuts118cd', featureidkey="properties.nuts118cd", color=item,
# color_continuous_scale="PurPor", labels={'label name':'label name'}, title=item.capitalize(),
# scope="europe")
# fig.update_geos(fitbounds="locations", visible=False)
# fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0})
# fig.write_image("aio/"+item+".png")
|
[
"pandas.DataFrame",
"tqdm.tqdm",
"pandas.DataFrame.from_dict",
"nuts.NutsFinder",
"csv.DictReader",
"re.match",
"stanza.Pipeline",
"OSGridConverter.grid2latlong",
"stanza.download",
"bs4.BeautifulSoup"
] |
[((274, 295), 'stanza.download', 'stanza.download', (['"""en"""'], {}), "('en')\n", (289, 295), False, 'import stanza\n'), ((328, 349), 'stanza.Pipeline', 'stanza.Pipeline', (['"""en"""'], {}), "('en')\n", (343, 349), False, 'import stanza\n'), ((2986, 3013), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['out'], {}), '(out)\n', (3008, 3013), True, 'import pandas as pd\n'), ((3112, 3143), 'nuts.NutsFinder', 'NutsFinder', ([], {'year': '(2013)', 'scale': '(60)'}), '(year=2013, scale=60)\n', (3122, 3143), False, 'from nuts import NutsFinder\n'), ((4045, 4080), 'pandas.DataFrame', 'pd.DataFrame', (['area_count'], {'index': '[0]'}), '(area_count, index=[0])\n', (4057, 4080), True, 'import pandas as pd\n'), ((2502, 2518), 'OSGridConverter.grid2latlong', 'grid2latlong', (['gr'], {}), '(gr)\n', (2514, 2518), False, 'from OSGridConverter import grid2latlong\n'), ((3286, 3306), 'csv.DictReader', 'csv.DictReader', (['file'], {}), '(file)\n', (3300, 3306), False, 'import csv\n'), ((3322, 3334), 'tqdm.tqdm', 'tqdm', (['reader'], {}), '(reader)\n', (3326, 3334), False, 'from tqdm import tqdm\n'), ((3397, 3438), 'bs4.BeautifulSoup', 'BeautifulSoup', (['req.content', '"""html.parser"""'], {}), "(req.content, 'html.parser')\n", (3410, 3438), False, 'from bs4 import BeautifulSoup\n'), ((1348, 1385), 're.match', 're.match', (['"""c\\\\d+"""', 'nouns[count].lemma'], {}), "('c\\\\d+', nouns[count].lemma)\n", (1356, 1385), False, 'import re\n'), ((1470, 1507), 're.match', 're.match', (['"""c\\\\d+"""', 'nouns[count].lemma'], {}), "('c\\\\d+', nouns[count].lemma)\n", (1478, 1507), False, 'import re\n')]
|
from workflow.WF_9_send_fastas.WF_9_helpers import WorkflowObj9
import time
def run_script_9(day):
print("\n================================\nCompile Qualifying FASTAs\n================================\n\n")
# import relevant data from json file
data_obj = WorkflowObj9()
data_obj.get_json()
# get paths to files
data_obj.get_lst_fasta_files(day)
# build fasta file
data_obj.build_fasta()
print("\n================================\nSUCCESS - END OF SCRIPT\n================================\n\n")
time.sleep(2)
|
[
"workflow.WF_9_send_fastas.WF_9_helpers.WorkflowObj9",
"time.sleep"
] |
[((273, 287), 'workflow.WF_9_send_fastas.WF_9_helpers.WorkflowObj9', 'WorkflowObj9', ([], {}), '()\n', (285, 287), False, 'from workflow.WF_9_send_fastas.WF_9_helpers import WorkflowObj9\n'), ((543, 556), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (553, 556), False, 'import time\n')]
|
from swagger.tests.common import SwaggerSpecsTestCase
import json
class SchemaTest(SwaggerSpecsTestCase):
def _swagger_bodies(self):
for file_path in self.get_swagger_file_paths(lambda x: 'example' not in x.lower()):
with open(file_path, 'r', encoding='utf-8') as f:
body = json.load(f)
if 'swagger' not in body:
continue
yield file_path, body
def test_Swagger(self):
from swagger.model.schema.swagger import Swagger
parsed = 0
for file_path, body in self._swagger_bodies():
try:
Swagger(body)
except Exception as err:
print(file_path)
raise err
parsed += 1
print(f"Parsed: {parsed}")
def _infoes(self):
for file_path, body in self._swagger_bodies():
if 'info' in body:
yield file_path, body['info']
def test_Info(self):
from swagger.model.schema.info import Info
parsed = 0
for file_path, body in self._infoes():
try:
Info(body)
except Exception as err:
print(file_path)
raise err
parsed += 1
print(f"Parsed: {parsed}")
def _pathItems(self):
for file_path, body in self._swagger_bodies():
for v in body['paths'].values():
yield file_path, v
def test_PathItem(self):
from swagger.model.schema.path_item import PathItem
parsed = 0
for file_path, body in self._pathItems():
try:
PathItem(body)
except Exception as err:
print(file_path)
raise err
parsed += 1
print(f"Parsed: {parsed}")
def _definitions(self):
for file_path, body in self._swagger_bodies():
for v in body.get('definitions', {}).values():
yield file_path, v
def test_Schema_by_definitions(self):
from swagger.model.schema.schema import Schema
parsed = 0
for file_path, body in self._definitions():
try:
Schema(body)
except Exception as err:
print(file_path)
raise err
parsed += 1
print(f"Parsed: {parsed}")
def _parameters(self):
for file_path, body in self._swagger_bodies():
for v in body.get('parameters', {}).values():
yield file_path, v
def test_ParameterType(self):
from swagger.model.schema.parameter import ParameterField
parsed = 0
for file_path, body in self._parameters():
try:
ParameterField(support_reference=True)(body)
except Exception as err:
print(file_path)
raise err
parsed += 1
print(f"Parsed: {parsed}")
def _responses(self):
for file_path, body in self._swagger_bodies():
for v in body.get('responses', {}).values():
yield file_path, v
def test_Response(self):
from swagger.model.schema.response import Response
parsed = 0
for file_path, body in self._responses():
try:
Response(body)
except Exception as err:
print(file_path)
raise err
parsed += 1
print(f"Parsed: {parsed}")
def _response_headers(self):
for file_path, body in self._responses():
for v in body.get('headers', {}).values():
yield file_path, v
def test_Header(self):
from swagger.model.schema.header import Header
parsed = 0
for file_path, body in self._response_headers():
try:
Header(body)
except Exception as err:
print(file_path)
raise err
parsed += 1
print(f"Parsed: {parsed}")
def _response_schema(self):
for file_path, body in self._responses():
if 'schema' in body:
yield file_path, body['schema']
def test_Schema_by_response(self):
from swagger.model.schema.schema import Schema
parsed = 0
for file_path, body in self._response_schema():
try:
Schema(body)
except Exception as err:
print(file_path)
raise err
parsed += 1
print(f"Parsed: {parsed}")
def _securityDefinitions(self):
for file_path, body in self._swagger_bodies():
for v in body.get('securityDefinitions', {}).values():
yield file_path, v
def test_SecuritySchemeType(self):
from swagger.model.schema.security_scheme import SecuritySchemeField
parsed = 0
for file_path, body in self._securityDefinitions():
try:
SecuritySchemeField()(body)
except Exception as err:
print(file_path)
raise err
parsed += 1
print(f"Parsed: {parsed}")
def _tags(self):
for file_path, body in self._swagger_bodies():
for v in body.get('tags', []):
yield file_path, v
def test_Tag(self):
from swagger.model.schema.tag import Tag
parsed = 0
for file_path, body in self._tags():
try:
Tag(body)
except Exception as err:
print(file_path)
raise err
parsed += 1
print(f"Parsed: {parsed}")
def _externalDocs(self):
for file_path, body in self._swagger_bodies():
if 'externalDocs' in body:
yield file_path, body['externalDocs']
def test_ExternalDocumentation(self):
from swagger.model.schema.external_documentation import ExternalDocumentation
parsed = 0
for file_path, body in self._externalDocs():
try:
ExternalDocumentation(body)
except Exception as err:
print(file_path)
raise err
parsed += 1
print(f"Parsed: {parsed}")
def _x_ms_paths(self):
for file_path, body in self._swagger_bodies():
for v in body.get('x-ms-paths', {}).values():
yield file_path, v
def test_PathItem_by_xmsPaths(self):
from swagger.model.schema.path_item import PathItem
parsed = 0
for file_path, body in self._x_ms_paths():
try:
PathItem(body)
except Exception as err:
print(file_path)
raise err
parsed += 1
print(f"Parsed: {parsed}")
def _x_ms_parameterized_host(self):
for file_path, body in self._swagger_bodies():
if 'x-ms-parameterized-host' in body:
yield file_path, body['x-ms-parameterized-host']
def test_XmsParameterizedHost(self):
from swagger.model.schema.x_ms_parameterized_host import XmsParameterizedHost
parsed = 0
for file_path, body in self._x_ms_parameterized_host():
try:
XmsParameterizedHost(body)
except Exception as err:
print(file_path)
raise err
parsed += 1
print(f"Parsed: {parsed}")
|
[
"swagger.model.schema.swagger.Swagger",
"json.load",
"swagger.model.schema.external_documentation.ExternalDocumentation",
"swagger.model.schema.schema.Schema",
"swagger.model.schema.tag.Tag",
"swagger.model.schema.header.Header",
"swagger.model.schema.security_scheme.SecuritySchemeField",
"swagger.model.schema.x_ms_parameterized_host.XmsParameterizedHost",
"swagger.model.schema.response.Response",
"swagger.model.schema.parameter.ParameterField",
"swagger.model.schema.info.Info",
"swagger.model.schema.path_item.PathItem"
] |
[((317, 329), 'json.load', 'json.load', (['f'], {}), '(f)\n', (326, 329), False, 'import json\n'), ((620, 633), 'swagger.model.schema.swagger.Swagger', 'Swagger', (['body'], {}), '(body)\n', (627, 633), False, 'from swagger.model.schema.swagger import Swagger\n'), ((1121, 1131), 'swagger.model.schema.info.Info', 'Info', (['body'], {}), '(body)\n', (1125, 1131), False, 'from swagger.model.schema.info import Info\n'), ((1641, 1655), 'swagger.model.schema.path_item.PathItem', 'PathItem', (['body'], {}), '(body)\n', (1649, 1655), False, 'from swagger.model.schema.path_item import PathItem\n'), ((2191, 2203), 'swagger.model.schema.schema.Schema', 'Schema', (['body'], {}), '(body)\n', (2197, 2203), False, 'from swagger.model.schema.schema import Schema\n'), ((3304, 3318), 'swagger.model.schema.response.Response', 'Response', (['body'], {}), '(body)\n', (3312, 3318), False, 'from swagger.model.schema.response import Response\n'), ((3840, 3852), 'swagger.model.schema.header.Header', 'Header', (['body'], {}), '(body)\n', (3846, 3852), False, 'from swagger.model.schema.header import Header\n'), ((4375, 4387), 'swagger.model.schema.schema.Schema', 'Schema', (['body'], {}), '(body)\n', (4381, 4387), False, 'from swagger.model.schema.schema import Schema\n'), ((5475, 5484), 'swagger.model.schema.tag.Tag', 'Tag', (['body'], {}), '(body)\n', (5478, 5484), False, 'from swagger.model.schema.tag import Tag\n'), ((6052, 6079), 'swagger.model.schema.external_documentation.ExternalDocumentation', 'ExternalDocumentation', (['body'], {}), '(body)\n', (6073, 6079), False, 'from swagger.model.schema.external_documentation import ExternalDocumentation\n'), ((6616, 6630), 'swagger.model.schema.path_item.PathItem', 'PathItem', (['body'], {}), '(body)\n', (6624, 6630), False, 'from swagger.model.schema.path_item import PathItem\n'), ((7241, 7267), 'swagger.model.schema.x_ms_parameterized_host.XmsParameterizedHost', 'XmsParameterizedHost', (['body'], {}), '(body)\n', (7261, 7267), False, 'from swagger.model.schema.x_ms_parameterized_host import XmsParameterizedHost\n'), ((2739, 2777), 'swagger.model.schema.parameter.ParameterField', 'ParameterField', ([], {'support_reference': '(True)'}), '(support_reference=True)\n', (2753, 2777), False, 'from swagger.model.schema.parameter import ParameterField\n'), ((4966, 4987), 'swagger.model.schema.security_scheme.SecuritySchemeField', 'SecuritySchemeField', ([], {}), '()\n', (4985, 4987), False, 'from swagger.model.schema.security_scheme import SecuritySchemeField\n')]
|
import sys
import os
from contextlib import closing
from minerva.metadata.db import MetadataConnection
from minerva.metadata.json import save
def main(uri, path):
try:
os.makedirs(path)
except OSError:
pass
with closing(MetadataConnection(uri)) as conn:
for ds in conn.load_all():
save(ds, os.path.join(path, ds['name'] + '.json'))
# usage: python -m minerva.tools.db_to_json postgresql://minerva:1@localhost/minerva _meta2
main(*sys.argv[1:])
|
[
"minerva.metadata.db.MetadataConnection",
"os.path.join",
"os.makedirs"
] |
[((183, 200), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (194, 200), False, 'import os\n'), ((252, 275), 'minerva.metadata.db.MetadataConnection', 'MetadataConnection', (['uri'], {}), '(uri)\n', (270, 275), False, 'from minerva.metadata.db import MetadataConnection\n'), ((342, 382), 'os.path.join', 'os.path.join', (['path', "(ds['name'] + '.json')"], {}), "(path, ds['name'] + '.json')\n", (354, 382), False, 'import os\n')]
|
#!/usr/bin/env python3
from itertools import tee, groupby
from typing import NamedTuple, Optional, Sequence, Iterator, List
from pathlib import Path
import json
from datetime import datetime
from .exporthelpers import dal_helper
from .exporthelpers.dal_helper import PathIsh, Res, the, Json
Url = str
# TODO unstead, use raw json + add @property?
class Highlight(NamedTuple):
created: datetime
title: str
url: Url
hid: str
hyp_link: Url
highlight: Optional[str] # might be None if for instance we just marked page with tags. not sure if we want to handle it somehow separately
annotation: Optional[str] # user's comment
tags: Sequence[str]
class Page(NamedTuple):
"""
Represents annotated page along with the highlights
"""
highlights: Sequence[Highlight]
@property
def url(self) -> str:
return the(h.url for h in self.highlights)
@property
def title(self) -> str:
return the(h.title for h in self.highlights)
@property
def created(self) -> datetime:
return min(h.created for h in self.highlights)
class DAL:
def __init__(self, sources: Sequence[PathIsh]) -> None:
pathify = lambda s: s if isinstance(s, Path) else Path(s)
self.sources = list(map(pathify, sources))
def _iter_raw(self):
# TODO FIXME merge all of them carefully
last = max(self.sources)
j = json.loads(last.read_text())
if isinstance(j, list):
# old export format
annotations = j
else:
annotations = j['annotations']
yield from annotations
def highlights(self) -> Iterator[Res[Highlight]]:
for i in self._iter_raw():
try:
yield self._parse_highlight(i)
except Exception as e:
err = RuntimeError(i)
err.__cause__ = e
yield err
def pages(self) -> Iterator[Res[Page]]:
vit, eit = tee(self.highlights())
values = (r for r in vit if not isinstance(r, Exception))
errors = (r for r in eit if isinstance(r, Exception))
key = lambda h: h.url
for link, git in groupby(sorted(values, key=key), key=key):
group = list(sorted(git, key=lambda h: h.created))
yield Page(group)
yield from errors
def _parse_highlight(self, i: Json) -> Highlight:
[tg] = i['target'] # hopefully it's always single element?
selectors = tg.get('selector', None)
if selectors is None:
# TODO warn?...
selectors = []
highlights = [s['exact'] for s in selectors if 'exact' in s]
# TODO warn? never happend though
assert len(highlights) <= 1
if len(highlights) == 0:
highlight = None
else:
[highlight] = highlights
content: Optional[str] = None
for s in selectors:
if 'exact' in s:
content = s['exact']
break
page_link = i['uri']
title = i['document'].get('title')
if title is None:
# sometimes happens, e.t. if it's plaintext file
page_title = page_link
else:
page_title = ' '.join(title)
hid = i['id']
# TODO check that UTC?
dts = i['created']
created = datetime.strptime(dts[:-3] + dts[-2:], '%Y-%m-%dT%H:%M:%S.%f%z')
txt = i['text']
annotation = None if len(txt.strip()) == 0 else txt
context = i['links']['incontext']
return Highlight(
created=created,
url=page_link,
title=page_title,
hid=hid,
hyp_link=context,
highlight=highlight,
annotation=annotation,
tags=tuple(i['tags']),
)
# todo would be nice to use some fake data instead? this only gonna work under an ediable install
def _testfile() -> Path:
testdata = Path(__file__).absolute().parent.parent.parent / 'testdata'
[jfile] = testdata.rglob('data/annotations.json')
return jfile
def test() -> None:
dal = DAL([_testfile()])
# at least check it doesn't crash
for p in dal.pages():
assert not isinstance(p, Exception)
p.title
p.url
p.created
len(list(p.highlights))
def demo(dal: DAL) -> None:
# TODO split errors properly? move it to dal_helper?
# highlights = list(w for w in dao.highlights() if not isinstance(w, Exception))
# TODO logger?
vit, eit = tee(dal.pages())
values = (r for r in vit if not isinstance(r, Exception))
errors = (r for r in eit if isinstance(r, Exception))
for e in errors:
print("ERROR! ", e)
pages = list(values)
print(f"Parsed {len(pages)} pages")
from collections import Counter
from pprint import pprint
common = Counter({(x.url, x.title): len(x.highlights) for x in pages}).most_common(10)
print("10 most highlighed pages:")
for (url, title), count in common:
print(f'{count:4d} {url} "{title}"')
if __name__ == '__main__':
dal_helper.main(DAL=DAL, demo=demo)
|
[
"datetime.datetime.strptime",
"pathlib.Path"
] |
[((3366, 3430), 'datetime.datetime.strptime', 'datetime.strptime', (['(dts[:-3] + dts[-2:])', '"""%Y-%m-%dT%H:%M:%S.%f%z"""'], {}), "(dts[:-3] + dts[-2:], '%Y-%m-%dT%H:%M:%S.%f%z')\n", (3383, 3430), False, 'from datetime import datetime\n'), ((1235, 1242), 'pathlib.Path', 'Path', (['s'], {}), '(s)\n', (1239, 1242), False, 'from pathlib import Path\n'), ((3973, 3987), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (3977, 3987), False, 'from pathlib import Path\n')]
|
from google.cloud import bigquery
with open('country.sql', 'r') as myfile:
sql = myfile.read()
# Construct a BigQuery client object.
client = bigquery.Client()
query_job = client.query(sql) # Make an API request.
print("Country update job: {}".format(query_job.job_id))
|
[
"google.cloud.bigquery.Client"
] |
[((149, 166), 'google.cloud.bigquery.Client', 'bigquery.Client', ([], {}), '()\n', (164, 166), False, 'from google.cloud import bigquery\n')]
|
OntCversion = '2.0.0'
from ontology.libont import split
from ontology.builtins import print
def VaasAssert(expr):
if not expr:
raise Exception("AssertError")
def Main():
s = "@abcd@efg@hijklmn@op@q@rst@uvwxy@z@"
res = split(s, '@')
for i in res:
print(i)
VaasAssert(len(res) == 8)
VaasAssert(res[0] == 'abcd')
VaasAssert(res[1] == 'efg')
VaasAssert(res[2] == 'hijklmn')
VaasAssert(res[3] == 'op')
VaasAssert(res[4] == 'q')
VaasAssert(res[5] == 'rst')
VaasAssert(res[6] == 'uvwxy')
VaasAssert(res[7] == 'z')
s = "111@222j#@38u23@sfsdka@jjasdj@"
res = split(s, '@')
for i in res:
print(i)
VaasAssert(len(res) == 5)
VaasAssert(res[0] == '111')
VaasAssert(res[1] == '222j#')
VaasAssert(res[2] == '38u23')
VaasAssert(res[3] == 'sfsdka')
VaasAssert(res[4] == 'jjasdj_add')
|
[
"ontology.libont.split",
"ontology.builtins.print"
] |
[((240, 253), 'ontology.libont.split', 'split', (['s', '"""@"""'], {}), "(s, '@')\n", (245, 253), False, 'from ontology.libont import split\n'), ((630, 643), 'ontology.libont.split', 'split', (['s', '"""@"""'], {}), "(s, '@')\n", (635, 643), False, 'from ontology.libont import split\n'), ((280, 288), 'ontology.builtins.print', 'print', (['i'], {}), '(i)\n', (285, 288), False, 'from ontology.builtins import print\n'), ((670, 678), 'ontology.builtins.print', 'print', (['i'], {}), '(i)\n', (675, 678), False, 'from ontology.builtins import print\n')]
|
''' This module contains class WeightFrame that is used as a tab in
parameters frame to edit and display weights.
'''
from tkinter.ttk import Button, Frame, LabelFrame
from tkinter import N, W, S, E
from pyDEA.core.utils.dea_utils import create_bounds
from pyDEA.core.gui_modules.text_for_weights_gui import TextForWeights
from pyDEA.core.gui_modules.scrollable_frame_gui import VerticalScrolledFrame
class WeightFrame(Frame):
''' This class represents weights editor. It allows to display, modify
and validate weights.
Attributes:
parent (Tk object): parent of this widget.
current_categories (list of str): list of current categories,
this list is not modified by this class.
params (Parameters): Parameters object with all parameter values.
Note:
This class does not change values in Parameters object
until the weight
restrictions are validated, see on_validate_weights().
weights_status_str (StringVar): StringVar object that is used
to show an error if it occurred after validating weights.
abs_weights (TextForWeights): text widget for displaying and editing
absolute weight restrictions.
virtual_weights (TextForWeights): text widget for displaying and
editing virtual weight restrictions.
price_ratio_weights (TextForWeights): text widget for displaying and
editing price-ratio weight restrictions.
Args:
parent (Tk object): parent of this widget.
current_categories (list of str): list of current categories,
this list is not modified by this class.
params (Parameters): Parameters object with all parameter values
weights_status_str (StringVar): StringVar object that is used to
show an error if it occurred after validating weights.
'''
def __init__(self, parent, current_categories, params, weights_status_str,
*args, **kw):
super().__init__(parent, *args, **kw)
self.parent = parent
self.current_categories = current_categories
self.params = params
self.weights_status_str = weights_status_str
self.abs_weights = None
self.virtual_weights = None
self.price_ratio_weights = None
self.create_widgets()
def create_widgets(self):
''' Creates all widgets.
'''
self.columnconfigure(0, weight=1)
self.rowconfigure(1, weight=1)
validate_btn = Button(self, text='Validate weight restrictions',
command=self.on_validate_weights)
validate_btn.grid(row=0, column=0, padx=10, pady=5, sticky=N+W)
panel = LabelFrame(self, text='Weight restrictions')
panel.columnconfigure(0, weight=1)
panel.rowconfigure(0, weight=1)
panel.grid(row=1, column=0,
padx=5, pady=5, sticky=E+W+S+N)
weight_tab_main = VerticalScrolledFrame(panel)
weight_tab_main.grid(row=0, column=0, sticky=E+W+S+N)
weights_tab = weight_tab_main.interior
self.abs_weights = TextForWeights(weights_tab, 'absolute',
'Input1 <= 0.02',
self.current_categories, self.params,
'ABS_WEIGHT_RESTRICTIONS')
self.abs_weights.grid(row=0, column=0, padx=10, pady=5, sticky=N+W)
self.virtual_weights = TextForWeights(weights_tab, 'virtual',
'Input1 >= 0.34',
self.current_categories,
self.params,
'VIRTUAL_WEIGHT_RESTRICTIONS')
self.virtual_weights.grid(row=1, column=0, padx=10, pady=5, sticky=N+W)
self.price_ratio_weights = TextForWeights(weights_tab, 'price ratio',
'Input1/Input2 <= 5',
self.current_categories,
self.params,
'PRICE_RATIO_RESTRICTIONS',
True)
self.price_ratio_weights.grid(row=2, column=0, padx=10, pady=5,
sticky=N+W)
def add_weights(self):
''' Adds weight restrictions stored in params to the appropriate
text widgets if such weight restrictions can be parsed and
contain categories that are also present in current_categories.
'''
self.remove_all_weights()
self._add_given_weights(self.abs_weights, 'ABS_WEIGHT_RESTRICTIONS')
self._add_given_weights(self.virtual_weights,
'VIRTUAL_WEIGHT_RESTRICTIONS')
self._add_given_weights(self.price_ratio_weights,
'PRICE_RATIO_RESTRICTIONS')
def _add_given_weights(self, text_widget, weights_name):
''' Adds given weight restrictions to a given text widget.
Args:
text_widget (TextForWeights): text widget where weight
restrictions will be added.
weights_name (str): name of the parameter that stores weight
restrictions in params.
Example:
If currently parameters contain absolute weight
restrictions "I1 >= 0.1", then we can add this constraint to the
text widget as follows:
>>> self._add_given_weights(self.abs_weights,
'ABS_WEIGHT_RESTRICTIONS')
'''
weights = self.params.get_set_of_parameters(weights_name)
weights_were_added = False
if weights:
for weight in weights:
weight_as_list = []
weight_as_list.append(weight)
try:
create_bounds(weight_as_list, self.current_categories)
except ValueError:
pass # everything that we cannot parse is ignored
else:
text_widget.insert_weight(weight)
weights_were_added = True
if weights_were_added:
self.parent.change_weight_tab_name('Weights editor*')
def remove_all_weights(self):
''' Removes all weight restrictions from all text widgets.
Note:
Parameters object is not affected by this. All weight
restrictions will still be there. Parameters are updated
only when they are validated,
i.e. when the user presses validate weights button or run
button.
'''
self.parent.change_weight_tab_name('Weights editor')
self.weights_status_str.set('')
self.abs_weights.delete_weights()
self.virtual_weights.delete_weights()
self.price_ratio_weights.delete_weights()
def on_validate_weights(self):
''' Validates weights of all text widgets and displays an error if
some of the weights are invalid.
Note:
Parameters object is modified by this method.
'''
abs_weights_present = self.abs_weights.validate_weights()
virtual_weights_present = self.virtual_weights.validate_weights()
price_ratio_weights_present = self.price_ratio_weights.validate_weights()
if (self.abs_weights.error_tag_exists or
self.virtual_weights.error_tag_exists or
self.price_ratio_weights.error_tag_exists):
self.weights_status_str.set(
'Some of the weight restrictions cannot be parsed. \n'
'For error details, see Weights editor tab.')
else:
self.weights_status_str.set('')
if (abs_weights_present or virtual_weights_present or
price_ratio_weights_present):
self.parent.change_weight_tab_name('Weights editor*')
else:
self.parent.change_weight_tab_name('Weights editor')
|
[
"pyDEA.core.utils.dea_utils.create_bounds",
"pyDEA.core.gui_modules.scrollable_frame_gui.VerticalScrolledFrame",
"tkinter.ttk.Button",
"pyDEA.core.gui_modules.text_for_weights_gui.TextForWeights",
"tkinter.ttk.LabelFrame"
] |
[((2652, 2740), 'tkinter.ttk.Button', 'Button', (['self'], {'text': '"""Validate weight restrictions"""', 'command': 'self.on_validate_weights'}), "(self, text='Validate weight restrictions', command=self.\n on_validate_weights)\n", (2658, 2740), False, 'from tkinter.ttk import Button, Frame, LabelFrame\n'), ((2855, 2899), 'tkinter.ttk.LabelFrame', 'LabelFrame', (['self'], {'text': '"""Weight restrictions"""'}), "(self, text='Weight restrictions')\n", (2865, 2899), False, 'from tkinter.ttk import Button, Frame, LabelFrame\n'), ((3097, 3125), 'pyDEA.core.gui_modules.scrollable_frame_gui.VerticalScrolledFrame', 'VerticalScrolledFrame', (['panel'], {}), '(panel)\n', (3118, 3125), False, 'from pyDEA.core.gui_modules.scrollable_frame_gui import VerticalScrolledFrame\n'), ((3263, 3390), 'pyDEA.core.gui_modules.text_for_weights_gui.TextForWeights', 'TextForWeights', (['weights_tab', '"""absolute"""', '"""Input1 <= 0.02"""', 'self.current_categories', 'self.params', '"""ABS_WEIGHT_RESTRICTIONS"""'], {}), "(weights_tab, 'absolute', 'Input1 <= 0.02', self.\n current_categories, self.params, 'ABS_WEIGHT_RESTRICTIONS')\n", (3277, 3390), False, 'from pyDEA.core.gui_modules.text_for_weights_gui import TextForWeights\n'), ((3619, 3749), 'pyDEA.core.gui_modules.text_for_weights_gui.TextForWeights', 'TextForWeights', (['weights_tab', '"""virtual"""', '"""Input1 >= 0.34"""', 'self.current_categories', 'self.params', '"""VIRTUAL_WEIGHT_RESTRICTIONS"""'], {}), "(weights_tab, 'virtual', 'Input1 >= 0.34', self.\n current_categories, self.params, 'VIRTUAL_WEIGHT_RESTRICTIONS')\n", (3633, 3749), False, 'from pyDEA.core.gui_modules.text_for_weights_gui import TextForWeights\n'), ((4044, 4185), 'pyDEA.core.gui_modules.text_for_weights_gui.TextForWeights', 'TextForWeights', (['weights_tab', '"""price ratio"""', '"""Input1/Input2 <= 5"""', 'self.current_categories', 'self.params', '"""PRICE_RATIO_RESTRICTIONS"""', '(True)'], {}), "(weights_tab, 'price ratio', 'Input1/Input2 <= 5', self.\n current_categories, self.params, 'PRICE_RATIO_RESTRICTIONS', True)\n", (4058, 4185), False, 'from pyDEA.core.gui_modules.text_for_weights_gui import TextForWeights\n'), ((6154, 6208), 'pyDEA.core.utils.dea_utils.create_bounds', 'create_bounds', (['weight_as_list', 'self.current_categories'], {}), '(weight_as_list, self.current_categories)\n', (6167, 6208), False, 'from pyDEA.core.utils.dea_utils import create_bounds\n')]
|
# imports - module imports
from pipupgrade.exception import (
PipupgradeError
)
# imports - test imports
import pytest
def test_pipupgrade_error():
with pytest.raises(PipupgradeError):
raise PipupgradeError
|
[
"pytest.raises"
] |
[((165, 195), 'pytest.raises', 'pytest.raises', (['PipupgradeError'], {}), '(PipupgradeError)\n', (178, 195), False, 'import pytest\n')]
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
url(
r'^orders/(?P<slug>\w{8})/',
views.OrderDetailView.as_view(), name='order_detail'
),
url(r'^error/$', views.error, name='error')
]
|
[
"django.conf.urls.url"
] |
[((247, 289), 'django.conf.urls.url', 'url', (['"""^error/$"""', 'views.error'], {'name': '"""error"""'}), "('^error/$', views.error, name='error')\n", (250, 289), False, 'from django.conf.urls import url\n')]
|
from __future__ import (absolute_import, division, print_function, unicode_literals)
from builtins import *
import abc
import collections
import errno
import logging
import os
import os.path
import signal
import subprocess
import tempfile
import threading
import time
subprocess_lock = threading.RLock()
def run_cmd(cmdline, env = None, shell = False):
"""
Executes a command, returns the return code and the merged stdout/stderr contents.
"""
global subprocess_lock
try:
fp = tempfile.TemporaryFile()
with subprocess_lock:
child = subprocess.Popen(cmdline,
env = env,
shell = shell,
bufsize = 2,
stdout = fp,
stderr = fp,
)
return_code = child.wait()
fp.seek(0, 0)
output = fp.read()
return return_code, output
except OSError as e:
if e.errno == errno.ENOENT:
e.msg += '\n' + ' '.join(cmdline)
raise
def run_daemon(cmdline, env = None, shell = False):
"""
Executes a command, returns the subprocess object and the log file
"""
global subprocess_lock
try:
fp = tempfile.NamedTemporaryFile(delete = False)
with subprocess_lock:
child = subprocess.Popen(cmdline,
env = env,
shell = shell,
bufsize = 2,
stdout = fp,
stderr = fp,
)
return child, fp.name
except OSError as e:
if e.errno == errno.ENOENT:
e.msg += '\n' + ' '.join(cmdline)
raise
def log_file_updates(log_func, filename, mtime = None, offset = None):
if not log_func or not filename or not os.path.exists(filename):
return mtime, offset
new_mtime = os.stat(filename).st_mtime
if new_mtime > mtime:
mtime = new_mtime
with open(filename, 'r') as fp:
fp.seek(offset)
for line in fp:
log_func(line[:-1])
offset = fp.tell()
return mtime, offset
class AsyncFileTailer(threading.Thread):
"""
Sets up a thread which reads from a file handle and directs the output to a log function.
This is particularly useful when multiprocessing.
Note that wait_for calls start() for you.
"""
daemon = True
def __init__(self, filename, log_func):
super(AsyncFileTailer, self).__init__()
self.filename = filename
self.log_func = log_func
def run(self):
while True:
self.mtime, self.offset = log_file_updates(
self.log_func,
self.filename,
self.mtime,
self.offset
)
time.sleep(0.1)
class Service(object):
global_log = True
def __init__(self, name, *args, **kwargs):
self.name = name
self.args = args
self.kwargs = kwargs
self.log_file = None
self.log_offset = 0
self.log_mtime = None
self.shutdown = None
def poll(self):
if not self.child:
return
logger = logging.getLogger(self.name)
self.log_mtime, self.log_offset = log_file_updates(
logger.info,
self.log_file,
self.log_mtime,
self.log_offset,
)
self.update_log()
exit_code = self.child.poll()
if exit_code and not self.shutdown:
logging.critical('Service failed: %d', exit_code)
raise ServiceFailureError(self.name)
def update_log(self):
if not self.global_log:
return
def setup_service(self):
pass
@abc.abstractmethod
def cmd(self):
pass
def teardown_service(self):
pass
def validate_startup(self):
pass
class Cluster(threading.Thread):
def __init__(self, *args, **kwargs):
super(Cluster, self).__init__()
self.args = args
self.kwargs = kwargs
self.services = {}
self.running = True
self.kill = True
self.setup_cluster()
def run(self):
while self.running:
self.check_services()
time.sleep(0.1)
if self.kill:
self.kill_all()
else:
self.stop_all()
def join(self, timeout = None, kill = True):
end_time = now() + seconds(timeout or float("inf"))
self.kill = kill
self.running = False
while len(self.services) > 0 and now() < end_time:
time.sleep(0.1)
def check_services(self):
for name, service in self.services.items():
service.poll()
def setup_cluster(self):
pass
def add_service(self, service, timeout = None):
self.run_service(service)
self.services[service.name] = service
self.validate_services([service], timeout)
def add_services(self, services, timeout = None):
for service in services:
self.run_service(service)
self.services[service.name] = service
self.validate_services(services, timeout)
def run_service(self, service):
service.setup_service()
cmd, env = service.cmd()
service.child, service.log_file = run_daemon(cmd, env)
def validate_services(self, services, timeout):
end_time = now() + seconds(timeout)
services = collections.deque(services)
while services and now() < end_time:
service = services.popleft()
if not service.validate_startup():
services.append(service)
return len(services) == 0
def kill_all(self):
self.signal_all(signal.SIGKILL)
def stop_all(self):
self.signal_all(signal.SIGTERM)
def signal_all(self, signal):
for name in self.services:
self.signal(name, signal)
def signal(self, name, signal):
if self.services[name].child:
self.services[name].child.send_signal(signal)
|
[
"tempfile.NamedTemporaryFile",
"subprocess.Popen",
"os.stat",
"threading.RLock",
"collections.deque",
"os.path.exists",
"time.sleep",
"tempfile.TemporaryFile",
"logging.critical",
"logging.getLogger"
] |
[((288, 305), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (303, 305), False, 'import threading\n'), ((508, 532), 'tempfile.TemporaryFile', 'tempfile.TemporaryFile', ([], {}), '()\n', (530, 532), False, 'import tempfile\n'), ((1208, 1249), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (1235, 1249), False, 'import tempfile\n'), ((1835, 1852), 'os.stat', 'os.stat', (['filename'], {}), '(filename)\n', (1842, 1852), False, 'import os\n'), ((3170, 3198), 'logging.getLogger', 'logging.getLogger', (['self.name'], {}), '(self.name)\n', (3187, 3198), False, 'import logging\n'), ((5452, 5479), 'collections.deque', 'collections.deque', (['services'], {}), '(services)\n', (5469, 5479), False, 'import collections\n'), ((583, 668), 'subprocess.Popen', 'subprocess.Popen', (['cmdline'], {'env': 'env', 'shell': 'shell', 'bufsize': '(2)', 'stdout': 'fp', 'stderr': 'fp'}), '(cmdline, env=env, shell=shell, bufsize=2, stdout=fp, stderr=fp\n )\n', (599, 668), False, 'import subprocess\n'), ((1302, 1387), 'subprocess.Popen', 'subprocess.Popen', (['cmdline'], {'env': 'env', 'shell': 'shell', 'bufsize': '(2)', 'stdout': 'fp', 'stderr': 'fp'}), '(cmdline, env=env, shell=shell, bufsize=2, stdout=fp, stderr=fp\n )\n', (1318, 1387), False, 'import subprocess\n'), ((1763, 1787), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (1777, 1787), False, 'import os\n'), ((2779, 2794), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2789, 2794), False, 'import time\n'), ((3500, 3549), 'logging.critical', 'logging.critical', (['"""Service failed: %d"""', 'exit_code'], {}), "('Service failed: %d', exit_code)\n", (3516, 3549), False, 'import logging\n'), ((4244, 4259), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (4254, 4259), False, 'import time\n'), ((4590, 4605), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (4600, 4605), False, 'import time\n')]
|