code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
from Dialogflow_Api import rispondimi
from collegamentoSito import inserisci_utente
from Nutrition import get_food, traduzione
import re
tipo_cibo = ["frutta", "carne", "verdure", "ortaggi", "primi_piatti", "legumi"]
"""
controllo_intent(query_result, utente)--> text_respose
prende il risultato della query e lo confronta con i possibili intenti,
se l'intento risulta essere "cibo", si andrà nella sezione di rilevazione cibo,
altrimenti si passa al controllo sulla modifica dei dati dell'utente.
in caso non ci sia una intento adatto oppure alcun intento, si riceverà
la risposta negativa.
"""
def controllo_intent(query_result, utente):
intent = query_result.intent.display_name
text = query_result.fulfillment_text
if intent == "Cibo":
return rilevazione_cibo(utente, query_result)
if utente:
if intent == "Saluto":
return text + " " + utente.get_nome()
if intent == "Fame":
return controllo_fame(utente, text)
if intent == "Modifica_nome":
return modifica_nome(utente, query_result)
elif intent == "Modifica_peso":
return modifica_peso(utente, query_result)
elif intent == "Modifica_altezza":
return modifica_altezza(utente, query_result)
elif intent == "Modifica_sesso":
return modifica_sesso(utente, query_result)
elif intent == "Modifica_data":
return modifica_data(utente, query_result)
elif intent == "Modifica_attività":
return modifica_attività(utente, query_result)
if text == "":
return "Al momento non sono in grado di risponderti."
return text
"""
controllo_fame(utente, text)--> text_respose
prende un utente e le sue patologie, ed eleabora la risposta
da dare all'utente in merito alla possibilità di mangiare oppure
no un determinato cibo.
"""
def controllo_fame(utente, text):
if utente.get_anemia_sideropenica():
return rispondimi("spuntino ferro").fulfillment_text
if utente.get_iper_tens():
return rispondimi("spuntino ipertensione").fulfillment_text
if utente.get_nefropatia():
return rispondimi("spuntino nefropatia").fulfillment_text
else:
return text
def modifica_nome(utente, result):
try:
nome = result.parameters.fields["given-name"].string_value
except KeyError:
return "Inserisci correttamente il tuo primo nome."
if nome == "":
return "Inserisci correttamente il tuo primo nome."
utente.set_nome(nome)
inserisci_utente(utente)
return result.fulfillment_text
def modifica_peso(utente, result):
try:
unit = result.parameters.fields["unit-weight"].struct_value.fields["unit"].string_value
amount = result.parameters.fields["unit-weight"].struct_value.fields["amount"].number_value
except KeyError:
return "Inserisci correttamente il peso, ad esempio '70kg'."
if not unit == "kg":
return "Ti preghiamo di inserire il peso in kg."
if 39 < amount < 201:
utente.set_peso(amount)
inserisci_utente(utente)
return result.fulfillment_text
else:
return "Inserisci il tuo peso corretto."
def modifica_altezza(utente, result):
try:
unit = result.parameters.fields["unit-length"].struct_value.fields["unit"].string_value
amount = result.parameters.fields["unit-length"].struct_value.fields["amount"].number_value
except KeyError:
return "Inserisci correttamente la tua altezza, ad esempio '180cm'."
if unit == "cm":
if 109 < amount < 231:
utente.set_altezza(amount)
inserisci_utente(utente)
return result.fulfillment_text
else:
return "Inserisci la tua altezza corretta."
elif unit == "m":
if 1.09 < amount < 2.31:
utente.set_altezza(amount)
inserisci_utente(utente)
return result.fulfillment_text
else:
return "Inserisci la tua altezza corretta."
else:
return "Inserisci la tua altezza in cm o in m."
def modifica_sesso(utente, result):
try:
sesso = result.parameters.fields["sesso"].string_value
except KeyError:
return "Inserisci correttamente il tuo sesso, ad esempio 'maschio'."
utente.set_sesso(sesso)
inserisci_utente(utente)
return result.fulfillment_text
def modifica_data(utente, result):
try:
data = result.parameters.fields["date"].string_value
except KeyError:
return "Inserire correttamente la data, ad esempio: '01/01/90'"
if data == "":
return "Inserire correttamente la data, ad esempio: '01/01/90'"
else:
data = re.split("T", data)[0]
utente.set_data(data)
inserisci_utente(utente)
return result.fulfillment_text + " " + str(utente.get_eta()) + " anni."
def modifica_attività(utente, result):
try:
attivita = result.parameters.fields["attivita"].string_value
except KeyError:
return "Inserisci correttamente l'attività."
if attivita == "":
return "Inserisci correttamente l'attività tra queste: 'Sedentaria', 'Leggera', 'Moderata', 'Attiva' " \
"o 'Molto attiva'."
else:
utente.set_attivita(attivita)
inserisci_utente(utente)
return result.fulfillment_text
"""
rilevazione_cibo(utente, result)--> respose
identifica se l'intento dell'utente è parlare di cibo, se è così restituisce il risultato,
altrimenti restituisce stringhe di risposta negative sulla scorretta rilevazione del cibo.
"""
def rilevazione_cibo(utente, result):
cibo = controllo_tipo_cibo(result)
if not cibo:
return "Spiacente, non abbiamo informazione relative a questo cibo."
food = get_food(cibo)
if not food:
food = get_food(traduzione(cibo))
if not food:
return "Il cibo non è stato riconosciuto correttamente."
if utente:
return str(food) + "\n\n" + utente.can_eat(food)
else:
return "Non ti sei ancora registrato, non posso darti consigli alimentari.\nPer registrarti utilizza il" \
" comando /new.\nInformazioni su: " + str(food)
def controllo_tipo_cibo(result):
food = ""
try:
food = result.parameters.fields["Cibo"].list_value.values[0].string_value
except IndexError:
print("Problema riconoscimento tipo di cibo!")
if food != "":
return food
for cibo in tipo_cibo:
try:
food = result.parameters.fields["Cibo"].list_value.values[0].struct_value.fields[cibo].string_value
except IndexError:
continue
if food != "":
return food
return False
| [
"re.split",
"collegamentoSito.inserisci_utente",
"Nutrition.traduzione",
"Nutrition.get_food",
"Dialogflow_Api.rispondimi"
] | [((2545, 2569), 'collegamentoSito.inserisci_utente', 'inserisci_utente', (['utente'], {}), '(utente)\n', (2561, 2569), False, 'from collegamentoSito import inserisci_utente\n'), ((4345, 4369), 'collegamentoSito.inserisci_utente', 'inserisci_utente', (['utente'], {}), '(utente)\n', (4361, 4369), False, 'from collegamentoSito import inserisci_utente\n'), ((5791, 5805), 'Nutrition.get_food', 'get_food', (['cibo'], {}), '(cibo)\n', (5799, 5805), False, 'from Nutrition import get_food, traduzione\n'), ((3086, 3110), 'collegamentoSito.inserisci_utente', 'inserisci_utente', (['utente'], {}), '(utente)\n', (3102, 3110), False, 'from collegamentoSito import inserisci_utente\n'), ((4782, 4806), 'collegamentoSito.inserisci_utente', 'inserisci_utente', (['utente'], {}), '(utente)\n', (4798, 4806), False, 'from collegamentoSito import inserisci_utente\n'), ((5307, 5331), 'collegamentoSito.inserisci_utente', 'inserisci_utente', (['utente'], {}), '(utente)\n', (5323, 5331), False, 'from collegamentoSito import inserisci_utente\n'), ((1968, 1996), 'Dialogflow_Api.rispondimi', 'rispondimi', (['"""spuntino ferro"""'], {}), "('spuntino ferro')\n", (1978, 1996), False, 'from Dialogflow_Api import rispondimi\n'), ((2060, 2095), 'Dialogflow_Api.rispondimi', 'rispondimi', (['"""spuntino ipertensione"""'], {}), "('spuntino ipertensione')\n", (2070, 2095), False, 'from Dialogflow_Api import rispondimi\n'), ((2160, 2193), 'Dialogflow_Api.rispondimi', 'rispondimi', (['"""spuntino nefropatia"""'], {}), "('spuntino nefropatia')\n", (2170, 2193), False, 'from Dialogflow_Api import rispondimi\n'), ((3655, 3679), 'collegamentoSito.inserisci_utente', 'inserisci_utente', (['utente'], {}), '(utente)\n', (3671, 3679), False, 'from collegamentoSito import inserisci_utente\n'), ((4721, 4740), 're.split', 're.split', (['"""T"""', 'data'], {}), "('T', data)\n", (4729, 4740), False, 'import re\n'), ((5847, 5863), 'Nutrition.traduzione', 'traduzione', (['cibo'], {}), '(cibo)\n', (5857, 5863), False, 'from Nutrition import get_food, traduzione\n'), ((3900, 3924), 'collegamentoSito.inserisci_utente', 'inserisci_utente', (['utente'], {}), '(utente)\n', (3916, 3924), False, 'from collegamentoSito import inserisci_utente\n')] |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import numpy as np
import os
import torch
from torch.autograd import Variable
import torch.optim as optim
from torch.utils.data import DataLoader
import sys
from tqdm import tqdm
from architectures import Generator
import kaolin as kal
"""
Commandline arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument('-expid', type=str, default='GAN', help='Unique experiment identifier.')
parser.add_argument('--device', type=str, default='cuda', help='Device to use')
parser.add_argument('-batchsize', type=int, default=50, help='Batch size.')
args = parser.parse_args()
gen = Generator().to(args.device)
gen.load_state_dict(torch.load('log/{0}/gen.pth'.format(args.expid)))
gen.eval()
z = torch.normal(torch.zeros(args.batchsize, 200), torch.ones(args.batchsize, 200)*.33).to(args.device)
fake_voxels = gen(z)[:,0]
for i,model in enumerate(fake_voxels):
model = model[:-2,:-2,:-2]
model = kal.rep.voxel.max_connected(model, .5)
verts, faces = kal.conversion.voxel.to_mesh_quad(model)
mesh = kal.rep.QuadMesh.from_tensors( verts, faces)
mesh.laplacian_smoothing(iterations = 3)
mesh.show() | [
"architectures.Generator",
"argparse.ArgumentParser",
"kaolin.rep.voxel.max_connected",
"kaolin.rep.QuadMesh.from_tensors",
"kaolin.conversion.voxel.to_mesh_quad",
"torch.zeros",
"torch.ones"
] | [((914, 939), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (937, 939), False, 'import argparse\n'), ((1542, 1581), 'kaolin.rep.voxel.max_connected', 'kal.rep.voxel.max_connected', (['model', '(0.5)'], {}), '(model, 0.5)\n', (1569, 1581), True, 'import kaolin as kal\n'), ((1597, 1637), 'kaolin.conversion.voxel.to_mesh_quad', 'kal.conversion.voxel.to_mesh_quad', (['model'], {}), '(model)\n', (1630, 1637), True, 'import kaolin as kal\n'), ((1646, 1689), 'kaolin.rep.QuadMesh.from_tensors', 'kal.rep.QuadMesh.from_tensors', (['verts', 'faces'], {}), '(verts, faces)\n', (1675, 1689), True, 'import kaolin as kal\n'), ((1224, 1235), 'architectures.Generator', 'Generator', ([], {}), '()\n', (1233, 1235), False, 'from architectures import Generator\n'), ((1351, 1383), 'torch.zeros', 'torch.zeros', (['args.batchsize', '(200)'], {}), '(args.batchsize, 200)\n', (1362, 1383), False, 'import torch\n'), ((1385, 1416), 'torch.ones', 'torch.ones', (['args.batchsize', '(200)'], {}), '(args.batchsize, 200)\n', (1395, 1416), False, 'import torch\n')] |
"""Defines procedures for training, and evaluation automatic camfi annotation models,
and for using them for making automatic annotations (inference). Depends on camfi.util,
camfi.datamodel.autoannotation, camfi.datamodel.geometry, camfi.datamode.via, as well
as ._torchutils and ._models."""
from datetime import datetime
import itertools
from math import pi
from pathlib import Path
from typing import Any, Callable, Optional, Union
from sys import stderr
import numpy as np
from pydantic import (
BaseModel,
DirectoryPath,
NonNegativeInt,
NonNegativeFloat,
PositiveFloat,
PositiveInt,
ValidationError,
validator,
)
from scipy import sparse
import torch
from torch.utils.data import DataLoader
from torchvision.models.detection.mask_rcnn import MaskRCNN
from tqdm import tqdm, trange
from camfi.datamodel.autoannotation import CamfiDataset, Prediction
from camfi.datamodel.geometry import (
BoundingBox,
CircleShapeAttributes,
PolylineShapeAttributes,
)
from camfi.datamodel.via import (
ViaFileAttributes,
ViaMetadata,
ViaProject,
ViaRegion,
ViaRegionAttributes,
)
from camfi.models import model_urls
from camfi.util import (
endpoint_truncate,
smallest_enclosing_circle,
weighted_intersection_over_minimum,
Field,
)
from ._torchutils import collate_fn, get_model_instance_segmentation, train_one_epoch
def load_annotation_model(model_path_or_url: Union[Path, str]) -> MaskRCNN:
"""Loads a camfi annotation model. Accepts any model key provided in
camfi.models, a Path object, or a URL str.
Parameters
----------
model_path_or_url : Union[Path, str]
Path to .pth file specifying model parameters, model name defined in
camfi.models.model_urls, or url to model to download from the internet.
Returns
-------
model : MaskRCNN
Instance segmentation model used for automatic annotation.
"""
print(f"Loading model: {model_path_or_url}", file=stderr)
model = get_model_instance_segmentation(2, pretrained=False)
if isinstance(model_path_or_url, Path):
state_dict = torch.load(model_path_or_url)
elif model_path_or_url in model_urls:
state_dict = torch.hub.load_state_dict_from_url(model_urls[model_path_or_url])
else:
state_dict = torch.hub.load_state_dict_from_url(model_path_or_url)
model.load_state_dict(state_dict)
return model
def copy_annotation_model(model: MaskRCNN) -> MaskRCNN:
"""Copies a camfi annotation model.
Parameters
----------
model : MaskRCNN
Model to copy.
Returns
-------
model_copy : MaskRCNN
Copy of model.
"""
model_copy = get_model_instance_segmentation(2, pretrained=False)
model_copy.load_state_dict(model.state_dict())
return model_copy
def train_model(
dataset: CamfiDataset,
load_pretrained_model: Optional[Union[Path, str]] = None,
device: Union[str, torch.device] = "cpu",
batch_size: int = 5,
num_workers: int = 2,
num_epochs: int = 10,
outdir: DirectoryPath = Path(),
model_name: Optional[str] = None,
save_intermediate: bool = False,
) -> Path:
"""Trains a camfi instance segmentation annotation model on specified dataset,
saving to trained model to outdir.
Parameters
----------
dataset : CamfiDataset
Dataset on which to train the model.
load_pretrained_model : Optional[Union[Path, str]]
Path or url to model parameters file. If set, will load the pretrained
parameters. By default, will start with a model pre-trained on the Microsoft
COCO dataset.
device : Union[str, torch.device]
E.g. "cpu" or "cuda". Training is typically much faster on a GPU. Use "cuda" for
Nvidia GPUs.
batch_size : int
Number of images to load at once.
num_workers : int
Number of worker processes for data loader to spawn.
num_epochs : int
Number of epochs to train.
outdir : DirectoryPath
Path to directory where to save model(s).
model_name : Optional[str]
Identifier to include in model save file. By default the current date in
YYYYmmdd format.
save_intermediate : bool
If True, model is saved after each epoch, not just after all epoch are complete.
This is recommended, especially if training on a service which could terminate
unpredicatbly (e.g. Google Colab).
Returns
-------
model_path : Path
Path to saved model.
"""
# Parameter setting
device = torch.device(device)
if model_name is None:
model_name = datetime.now().strftime("%Y%m%d")
# Initialise data_loader
data_loader = DataLoader(
dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
collate_fn=collate_fn,
)
# Initialise model
if load_pretrained_model is not None:
model = load_annotation_model(load_pretrained_model)
else:
model = get_model_instance_segmentation(2)
model.to(device)
# Initialise optimiser and lr_scheduler
params = [p for p in model.parameters() if p.requires_grad]
optimiser = torch.optim.SGD(params, lr=0.005, momentum=0.9, weight_decay=0.0005)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimiser, step_size=3, gamma=0.1)
# Train the model
for epoch in range(num_epochs):
# train for one epoch, printing every 10 iterations
train_one_epoch(model, optimiser, data_loader, device, epoch, print_freq=10)
# update the learning rate
lr_scheduler.step()
if save_intermediate or epoch == num_epochs - 1:
save_path = outdir / f"{model_name}_{epoch}_model.pth"
torch.save(model.state_dict(), save_path)
print(f"Training complete. Model saved at {save_path}")
return save_path
class Annotator(BaseModel):
"""Provides methods for automatically annotating images of flying insects using a
pre-trained instance segmentation model.
Parameters
----------
dataset : CamfiDataset
Dataset to annotate.
model : Union[str, Path, MaskRCNN]
Either a path to state dict file which defines the segmentation model, or a url
pointing to a model to download, or one of the model names defined in
camfi.models.model_urls.
Alternatively, a MaskRCNN instance can be given directly.
device : Union[str, torch.device]
Specifies device to run inference on. E.g. set to "cuda" to use an Nvidia GPU.
backup_device : Optional[Union[str, torch.device]]
Specifies device to run inference on when a runtime error occurs while using
device. Probably only makes sense to set this to "cpu" if device="cuda". This
option enables the annotator to leverage a GPU with limited memory capacity
without crashing if a difficult image is encountered.
backup_model: Optional[MaskRCNN]
Defines the backup model. Will be automatically generated if backup_device is
set. Should not be set manually.
split_angle : PositiveFloat
Approximate maximum angle between polyline segments in degrees. Note that this
will immediately be converted to radians upon instantiation of Annotator.
poly_order : PositiveInt
Order of polynomial used for fitting motion blur paths.
endpoint_method : Callable[[np.ndarray, ...], tuple[NonNegativeInt, NonNegativeInt]]
Method to find endpoints of motion blurs. The first argument to this method
should be a cropped mask np.ndarray.
endpoint_extra_args : list[Any]
Extra arguments to pass to endpoint_method.
score_thresh : float
Score threshold between 0.0 and 1.0 for automatic annotations to be kept.
overlap_thresh : float
Minimum proportion of overlap (weighted intersection over minimum) between two
instance segmentation masks to infer that one of the masks should be discarded.
edge_thresh : NonNegativeInt
Minimum distance an annotation has to be from the edge of the image before it is
converted from a polyline annotation to a circle annotation.
"""
dataset: CamfiDataset
model: MaskRCNN = "release"
device: Union[str, torch.device] = "cpu"
backup_device: Optional[Union[str, torch.device]] = None
backup_model: Optional[MaskRCNN] = None
split_angle: PositiveFloat = 15.0
poly_order: PositiveInt = 2
endpoint_method: Callable[
..., tuple[NonNegativeInt, NonNegativeInt]
] = endpoint_truncate
endpoint_extra_args: list[Any] = [10]
score_thresh: float = 0.4
overlap_thresh: float = 0.4
edge_thresh: NonNegativeInt = 20
backup_model_used: int = 0
class Config:
arbitrary_types_allowed = True
@validator("model", pre=True, always=True)
def get_model(cls, v):
if isinstance(v, MaskRCNN):
return v
else:
return load_annotation_model(v)
@validator("device", always=True)
def put_model_on_device_and_set_to_eval(cls, v, values):
print(f"Putting model on device: {v}", file=stderr)
v = torch.device(v)
values["model"].to(v)
values["model"].eval()
return v
@validator("backup_model", pre=True, always=True)
def copy_model_to_backup_device(cls, v, values):
assert v is None, "Should not set 'backup_model'. It will be set automatically"
if "backup_device" in values and values["backup_device"] is not None:
v = copy_annotation_model(values["model"])
v.to(values["backup_device"])
v.eval()
return v
@validator("split_angle", always=True)
def convert_split_angle_to_radians(cls, v):
return v * pi / 180.0
def get_prediction(self, img_idx: NonNegativeInt) -> Prediction:
"""Run predicion on a single image. First tries to use the model on self.device,
and falls back to the model on self.backup_device if a RuntimeError is caught
(if set).
Parameters
----------
img_idx: int
Index of image in via project.
Returns
-------
prediction: Prediction
Output of model prediction.
"""
try:
img, _ = self.dataset[img_idx]
except (OSError, RuntimeError) as e:
print(
f"Error loading {self.dataset.metadata(img_idx).filename}. {e!r}. Skipping.",
file=stderr,
)
return Prediction.empty()
with torch.no_grad():
try:
prediction = self.model([img.to(self.device)])[0]
except RuntimeError:
if self.backup_model:
prediction = self.backup_model([img.to(self.backup_device)])[0]
self.backup_model_used += 1
else:
raise
del img
return Prediction.from_tensor_dict(prediction)
def filter_annotations(self, prediction: Prediction) -> Prediction:
"""Applies self.score_thresh and self.overlap_thresh to filter out poor quality
annotations.
Parameters
----------
prediction : Prediction
Output of model prediction.
Returns
-------
filtered_prediction : Prediction
Filtered prediction.
"""
# Remove predictions with below-threshold score
prediction = prediction.filter_by_score(self.score_thresh)
n_predictions = len(prediction)
if n_predictions == 0:
return prediction
# Calculate mask overlaps for all pairs of predicted instances
mask_overlaps = np.zeros((n_predictions, n_predictions), dtype="f4")
for i, j in itertools.combinations(range(n_predictions), 2):
if prediction.boxes[i].overlaps(prediction.boxes[j]):
mask_overlaps[i, j] = weighted_intersection_over_minimum(
prediction.masks[i], prediction.masks[j]
)
mask_overlaps[j, i] = mask_overlaps[i, j]
# Remove worst overlapping instances until there are no above-threshold overlaps
keep = set(range(n_predictions))
overlap_mask = mask_overlaps.max(axis=1) >= self.overlap_thresh
while np.any(overlap_mask):
# Figure out which overlapping annotation has the worst score
overlap_annotations = np.where(overlap_mask)[0]
to_discard = overlap_annotations[
np.argmin(np.array(prediction.scores)[overlap_annotations])
]
# Remove the annotation
keep.remove(to_discard)
mask_overlaps[to_discard, :] = 0.0
mask_overlaps[:, to_discard] = 0.0
overlap_mask = mask_overlaps.max(axis=1) >= self.overlap_thresh
return prediction.get_subset_from_index(list(keep))
def fit_poly(
self,
box: BoundingBox,
mask: torch.Tensor,
) -> Union[PolylineShapeAttributes, CircleShapeAttributes, None]:
"""Uses polynomial regression to fit a polyline annotation to the provided
segmentation mask.
Parameters
----------
box : BoundingBox
Fully contains the object to be annotated.
mask : tensor or array
Segmentation mask of instance with shape (image_width, image_height).
Returns
-------
shape_attributes : Union[PolylineShapeAttributes, CircleShapeAttributes, None]
Geometry of automatic annotation.
"""
portrait = box.is_portrait()
crop_mask = box.crop_image(mask).cpu().numpy().reshape(box.shape)
y, x = np.where(crop_mask > 0.0)
weights = np.array(crop_mask[y, x]).flatten()
# Set longest axis as independent variable and fit polynomial
ind = (x, y)[portrait]
dep = (y, x)[portrait]
poly_fit = np.polynomial.Polynomial.fit(ind, dep, self.poly_order, w=weights)
# Find endpoints
ind_vals = np.arange(crop_mask.shape[not portrait])
dep_vals = poly_fit(ind_vals)
val_mask = np.logical_and(dep_vals < crop_mask.shape[portrait], dep_vals >= 0)
y_vals = (dep_vals, ind_vals)[portrait][val_mask]
x_vals = (ind_vals, dep_vals)[portrait][val_mask]
fit_mask_vals = crop_mask[y_vals.astype("i4"), x_vals.astype("i4")]
endpoints = ind_vals[
list(self.endpoint_method(fit_mask_vals, *self.endpoint_extra_args))
]
# Approximate polynomial segment with polyline
end_gradients = poly_fit.deriv()(endpoints)
end_angles = np.arctan(end_gradients)
angle_diff = abs(end_angles[1] - end_angles[0])
all_points_ind, all_points_dep = poly_fit.linspace(
n=int(np.ceil(angle_diff / self.split_angle) + 2), domain=endpoints
)
all_points_x = list((all_points_ind, all_points_dep)[portrait] + box.x0)
all_points_y = list((all_points_dep, all_points_ind)[portrait] + box.y0)
shape_attributes: Union[PolylineShapeAttributes, CircleShapeAttributes, None]
try:
shape_attributes = PolylineShapeAttributes(
all_points_x=all_points_x, all_points_y=all_points_y
)
except ValidationError:
try:
cx, cy, r = smallest_enclosing_circle(zip(all_points_x, all_points_y))
shape_attributes = CircleShapeAttributes(cx=cx, cy=cy, r=r)
except ValidationError:
shape_attributes = None
return shape_attributes
def convert_to_circle(
self,
polyline: PolylineShapeAttributes,
img_shape: tuple[PositiveInt, PositiveInt],
) -> Union[PolylineShapeAttributes, CircleShapeAttributes]:
"""Checks if a polyline annotation is close to the edge of an image, and if so,
converts it to a circle annotation by computing the smallest enclosing circle of
all points in the polyline.
Parameters
----------
polyline : PolylineShapeAttributes
Shape to convert if too close to edge.
img_shape: tuple[int, int]
Height and width of image.
Returns
-------
shape_attributes : Union[PolylineShapeAttributes, CircleShapeAttributes]
Geometry of annotation after (possible) conversion. If polyline does not
go too close to the edge of the image, then polyline is returned unchanged.
Else, a circle annotation is returned.
"""
polyline_accepted_region = BoundingBox.from_shape(
img_shape, border=self.edge_thresh
)
if polyline.in_box(polyline_accepted_region):
return polyline
return polyline.as_circle()
def annotate_img(self, img_idx: int) -> list[ViaRegion]:
"""Calls self.get_prediction, self.filter_annotations, and self.fit_poly to
produce annotations for an image specified with img_idx.
Parameters
----------
img_idx: int
Index of image in via project.
Returns
-------
regions : list[ViaRegion]
list of annotations for image.
"""
prediction = self.get_prediction(img_idx)
prediction = self.filter_annotations(prediction)
regions = []
for i in range(len(prediction)):
box = prediction.boxes[i]
mask = prediction.masks[i]
score = prediction.scores[i]
shape_attributes = self.fit_poly(box, mask)
if shape_attributes is None:
continue
if shape_attributes.name == "polyline":
assert isinstance(shape_attributes, PolylineShapeAttributes)
shape_attributes = self.convert_to_circle(
shape_attributes, (mask.shape[-2], mask.shape[-1])
)
region_attributes = ViaRegionAttributes(score=score)
regions.append(
ViaRegion(
region_attributes=region_attributes,
shape_attributes=shape_attributes,
)
)
return regions
def annotate(self, disable_progress_bar: Optional[bool] = True) -> ViaProject:
"""Calls self.annotate_img on all images and returns a ViaProject instance.
Copies the `via_attributes` and `via_settings` fields from
`self.dataset.via_project`, and just replaces the `via_img_metadata` field.
Parameters
----------
disable_progress_bar : Optional[bool]
If True (default), progress bar is disabled.
If set to None, disable on non-TTY.
Returns
-------
project : ViaProject
With automatic annotations made.
"""
via_img_metadata: dict[str, ViaMetadata] = {}
postfix = {"tot_annotations": 0}
if self.backup_device:
postfix["backup_device_used"] = self.backup_model_used
pb = trange(
len(self.dataset),
disable=disable_progress_bar,
desc="Annotating images",
unit="img",
dynamic_ncols=True,
ascii=True,
postfix=postfix,
)
for img_idx in pb:
img_key = self.dataset.keys[img_idx]
regions = self.annotate_img(img_idx)
in_metadata = self.dataset.metadata(img_idx)
out_metadata = ViaMetadata.construct(
file_attributes=in_metadata.file_attributes.copy(),
filename=in_metadata.filename,
regions=regions,
size=in_metadata.size,
)
via_img_metadata[img_key] = out_metadata
postfix["tot_annotations"] += len(regions)
if self.backup_device:
postfix["backup_device_used"] = self.backup_model_used
pb.set_postfix(postfix, refresh=False)
print(f"Annotation complete.", file=stderr)
return ViaProject.construct(
via_attributes=self.dataset.via_project.via_attributes,
via_img_metadata=via_img_metadata,
via_settings=self.dataset.via_project.via_settings,
)
class AnnotationValidationResult(BaseModel):
"""Contains various metrics for assessing the quality of a set of automatically
obtained annotations of flying insects.
Parameters
----------
ious : list[tuple[NonNegativeFloat, NonNegativeFloat]]
list of (iou, score) pairs.
iou is the Intersection over Union of the bounding boxes of true positives
to their matched ground truth annotation. All matched annotations are
included.
polyline_hausdorff_distances : list[tuple[NonNegativeFloat, NonNegativeFloat]]
list of (h_dist, score) pairs.
h_dist is the hausdorff distance of a true positive polyline annotation,
where the annotation is matched to a polyline ground truth annotation. Only
polyline annotations which matched to a polyline ground truth annotation are
included.
length_differences : list[tuple[float, NonNegativeFloat]]
list of (l_diff, score) pairs.
l_diff is calculated as the length of a true positive polyline annotation
minus the length of it's matched ground truth annotation. Only polyline
annotations which matched to a polyline ground truth annotation are
included.
true_positives : list[NonNegativeFloat]
list of scores.
false_positives : list[NonNegativeFloat]
list of scores. Score is the prediction score of the automatic annotation.
false_negatives : int
Number of false negative annotations.
"""
ious: list[tuple[NonNegativeFloat, NonNegativeFloat]] = []
polyline_hausdorff_distances: list[tuple[NonNegativeFloat, NonNegativeFloat]] = []
length_differences: list[tuple[float, NonNegativeFloat]] = []
true_positives: list[NonNegativeFloat] = []
false_positives: list[NonNegativeFloat] = []
false_negatives: NonNegativeInt = 0
def validate_annotations(
auto_annotations: ViaProject,
ground_truth: ViaProject,
iou_thresh: float = 0.5,
subset_functions: Optional[dict[str, Callable[[ViaMetadata], bool]]] = None,
disable_progress_bar: Optional[bool] = True,
) -> list[AnnotationValidationResult]:
"""Compares automatic annotations against a ground-truth annotations for validation
puposes. Validation data is stored in an AnnotationValidationResult object.
Parameters
----------
auto_annotations : ViaProject
Automatically obtained annotations to assess.
ground_truth : ViaProject
Manually created ground-truth annotations.
iou_thresh : float
Threshold of intersection-over-union of bounding boxes to be considered a
match. Typically, this is 0.5.
subset_functions : Optional[dict[str, Callable[[ViaMetadata], bool]]]
Mapping from subset name to subset function. If set, validation will be repeated
multiple times with different subsets, once for each element.
disable_progress_bar : Optional[bool]
If True (default), progress bar is disabled.
If set to None, disable on non-TTY.
Returns
-------
validation_results : list[AnnotationValidationResult]
list containing instances of AnnotationValidationResult. If subset_functions is
set, then validation_results will have len(subset_functions) elements. By
default it will just contain one element.
"""
if subset_functions is None:
subset_functions = {"all": lambda x: True}
results: list[AnnotationValidationResult] = []
for name, subset_function in subset_functions.items():
gt_annotations = ground_truth.filtered_copy(subset_function)
result = AnnotationValidationResult()
for img_key in tqdm(
gt_annotations.via_img_metadata.keys()
& auto_annotations.via_img_metadata.keys(),
disable=disable_progress_bar,
desc=f"Validating {name} annotations",
unit="img",
dynamic_ncols=True,
ascii=True,
):
gt_metadata = gt_annotations.via_img_metadata[img_key]
metadata = auto_annotations.via_img_metadata[img_key]
ious = sparse.dok_matrix(
(len(metadata.regions), len(gt_metadata.regions)), dtype="f8"
)
for i, j in itertools.product(
range(len(metadata.regions)), range(len(gt_metadata.regions))
):
iou = metadata.regions[i].shape_attributes.intersection_over_union(
gt_metadata.regions[j].shape_attributes
)
if iou >= iou_thresh:
ious[i, j] = iou
ious = ious.tocsr()
matches = sparse.csgraph.maximum_bipartite_matching(ious, "column")
result.false_negatives += len(gt_metadata.regions) - np.count_nonzero(
matches >= 0
)
for i, match in enumerate(matches):
score = metadata.regions[i].region_attributes.score
if score is None:
raise ValueError(
"Invalid automatically obtained annotation. "
"Ensure that auto_annotations were obtained automatically "
f"(region {i} of {img_key} missing 'score' region_attribute)."
)
elif match >= 0:
result.true_positives.append(score)
result.ious.append((ious[i, match], score))
shape = metadata.regions[i].shape_attributes
gt_shape = gt_metadata.regions[match].shape_attributes
if shape.name == gt_shape.name == "polyline":
assert isinstance(shape, PolylineShapeAttributes)
h_dist = shape.hausdorff_distance(gt_shape)
result.polyline_hausdorff_distances.append((h_dist, score))
l_diff = shape.length() - gt_shape.length()
result.length_differences.append((l_diff, score))
else:
result.false_positives.append(score)
results.append(result)
return results
| [
"numpy.count_nonzero",
"numpy.array",
"camfi.datamodel.via.ViaRegionAttributes",
"numpy.arange",
"camfi.datamodel.geometry.CircleShapeAttributes",
"pathlib.Path",
"numpy.where",
"camfi.datamodel.autoannotation.Prediction.from_tensor_dict",
"torch.hub.load_state_dict_from_url",
"scipy.sparse.csgrap... | [((3085, 3091), 'pathlib.Path', 'Path', ([], {}), '()\n', (3089, 3091), False, 'from pathlib import Path\n'), ((4578, 4598), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (4590, 4598), False, 'import torch\n'), ((4729, 4838), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': 'num_workers', 'collate_fn': 'collate_fn'}), '(dataset, batch_size=batch_size, shuffle=True, num_workers=\n num_workers, collate_fn=collate_fn)\n', (4739, 4838), False, 'from torch.utils.data import DataLoader\n'), ((5215, 5283), 'torch.optim.SGD', 'torch.optim.SGD', (['params'], {'lr': '(0.005)', 'momentum': '(0.9)', 'weight_decay': '(0.0005)'}), '(params, lr=0.005, momentum=0.9, weight_decay=0.0005)\n', (5230, 5283), False, 'import torch\n'), ((5303, 5369), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['optimiser'], {'step_size': '(3)', 'gamma': '(0.1)'}), '(optimiser, step_size=3, gamma=0.1)\n', (5334, 5369), False, 'import torch\n'), ((8842, 8883), 'pydantic.validator', 'validator', (['"""model"""'], {'pre': '(True)', 'always': '(True)'}), "('model', pre=True, always=True)\n", (8851, 8883), False, 'from pydantic import BaseModel, DirectoryPath, NonNegativeInt, NonNegativeFloat, PositiveFloat, PositiveInt, ValidationError, validator\n'), ((9032, 9064), 'pydantic.validator', 'validator', (['"""device"""'], {'always': '(True)'}), "('device', always=True)\n", (9041, 9064), False, 'from pydantic import BaseModel, DirectoryPath, NonNegativeInt, NonNegativeFloat, PositiveFloat, PositiveInt, ValidationError, validator\n'), ((9298, 9346), 'pydantic.validator', 'validator', (['"""backup_model"""'], {'pre': '(True)', 'always': '(True)'}), "('backup_model', pre=True, always=True)\n", (9307, 9346), False, 'from pydantic import BaseModel, DirectoryPath, NonNegativeInt, NonNegativeFloat, PositiveFloat, PositiveInt, ValidationError, validator\n'), ((9708, 9745), 'pydantic.validator', 'validator', (['"""split_angle"""'], {'always': '(True)'}), "('split_angle', always=True)\n", (9717, 9745), False, 'from pydantic import BaseModel, DirectoryPath, NonNegativeInt, NonNegativeFloat, PositiveFloat, PositiveInt, ValidationError, validator\n'), ((2129, 2158), 'torch.load', 'torch.load', (['model_path_or_url'], {}), '(model_path_or_url)\n', (2139, 2158), False, 'import torch\n'), ((9198, 9213), 'torch.device', 'torch.device', (['v'], {}), '(v)\n', (9210, 9213), False, 'import torch\n'), ((10999, 11038), 'camfi.datamodel.autoannotation.Prediction.from_tensor_dict', 'Prediction.from_tensor_dict', (['prediction'], {}), '(prediction)\n', (11026, 11038), False, 'from camfi.datamodel.autoannotation import CamfiDataset, Prediction\n'), ((11772, 11824), 'numpy.zeros', 'np.zeros', (['(n_predictions, n_predictions)'], {'dtype': '"""f4"""'}), "((n_predictions, n_predictions), dtype='f4')\n", (11780, 11824), True, 'import numpy as np\n'), ((12390, 12410), 'numpy.any', 'np.any', (['overlap_mask'], {}), '(overlap_mask)\n', (12396, 12410), True, 'import numpy as np\n'), ((13791, 13816), 'numpy.where', 'np.where', (['(crop_mask > 0.0)'], {}), '(crop_mask > 0.0)\n', (13799, 13816), True, 'import numpy as np\n'), ((14023, 14089), 'numpy.polynomial.Polynomial.fit', 'np.polynomial.Polynomial.fit', (['ind', 'dep', 'self.poly_order'], {'w': 'weights'}), '(ind, dep, self.poly_order, w=weights)\n', (14051, 14089), True, 'import numpy as np\n'), ((14135, 14175), 'numpy.arange', 'np.arange', (['crop_mask.shape[not portrait]'], {}), '(crop_mask.shape[not portrait])\n', (14144, 14175), True, 'import numpy as np\n'), ((14233, 14300), 'numpy.logical_and', 'np.logical_and', (['(dep_vals < crop_mask.shape[portrait])', '(dep_vals >= 0)'], {}), '(dep_vals < crop_mask.shape[portrait], dep_vals >= 0)\n', (14247, 14300), True, 'import numpy as np\n'), ((14744, 14768), 'numpy.arctan', 'np.arctan', (['end_gradients'], {}), '(end_gradients)\n', (14753, 14768), True, 'import numpy as np\n'), ((16702, 16760), 'camfi.datamodel.geometry.BoundingBox.from_shape', 'BoundingBox.from_shape', (['img_shape'], {'border': 'self.edge_thresh'}), '(img_shape, border=self.edge_thresh)\n', (16724, 16760), False, 'from camfi.datamodel.geometry import BoundingBox, CircleShapeAttributes, PolylineShapeAttributes\n'), ((20153, 20325), 'camfi.datamodel.via.ViaProject.construct', 'ViaProject.construct', ([], {'via_attributes': 'self.dataset.via_project.via_attributes', 'via_img_metadata': 'via_img_metadata', 'via_settings': 'self.dataset.via_project.via_settings'}), '(via_attributes=self.dataset.via_project.via_attributes,\n via_img_metadata=via_img_metadata, via_settings=self.dataset.\n via_project.via_settings)\n', (20173, 20325), False, 'from camfi.datamodel.via import ViaFileAttributes, ViaMetadata, ViaProject, ViaRegion, ViaRegionAttributes\n'), ((2222, 2287), 'torch.hub.load_state_dict_from_url', 'torch.hub.load_state_dict_from_url', (['model_urls[model_path_or_url]'], {}), '(model_urls[model_path_or_url])\n', (2256, 2287), False, 'import torch\n'), ((2319, 2372), 'torch.hub.load_state_dict_from_url', 'torch.hub.load_state_dict_from_url', (['model_path_or_url'], {}), '(model_path_or_url)\n', (2353, 2372), False, 'import torch\n'), ((10615, 10630), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10628, 10630), False, 'import torch\n'), ((15267, 15344), 'camfi.datamodel.geometry.PolylineShapeAttributes', 'PolylineShapeAttributes', ([], {'all_points_x': 'all_points_x', 'all_points_y': 'all_points_y'}), '(all_points_x=all_points_x, all_points_y=all_points_y)\n', (15290, 15344), False, 'from camfi.datamodel.geometry import BoundingBox, CircleShapeAttributes, PolylineShapeAttributes\n'), ((18058, 18090), 'camfi.datamodel.via.ViaRegionAttributes', 'ViaRegionAttributes', ([], {'score': 'score'}), '(score=score)\n', (18077, 18090), False, 'from camfi.datamodel.via import ViaFileAttributes, ViaMetadata, ViaProject, ViaRegion, ViaRegionAttributes\n'), ((25031, 25088), 'scipy.sparse.csgraph.maximum_bipartite_matching', 'sparse.csgraph.maximum_bipartite_matching', (['ious', '"""column"""'], {}), "(ious, 'column')\n", (25072, 25088), False, 'from scipy import sparse\n'), ((4647, 4661), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4659, 4661), False, 'from datetime import datetime\n'), ((10582, 10600), 'camfi.datamodel.autoannotation.Prediction.empty', 'Prediction.empty', ([], {}), '()\n', (10598, 10600), False, 'from camfi.datamodel.autoannotation import CamfiDataset, Prediction\n'), ((11999, 12075), 'camfi.util.weighted_intersection_over_minimum', 'weighted_intersection_over_minimum', (['prediction.masks[i]', 'prediction.masks[j]'], {}), '(prediction.masks[i], prediction.masks[j])\n', (12033, 12075), False, 'from camfi.util import endpoint_truncate, smallest_enclosing_circle, weighted_intersection_over_minimum, Field\n'), ((12520, 12542), 'numpy.where', 'np.where', (['overlap_mask'], {}), '(overlap_mask)\n', (12528, 12542), True, 'import numpy as np\n'), ((13835, 13860), 'numpy.array', 'np.array', (['crop_mask[y, x]'], {}), '(crop_mask[y, x])\n', (13843, 13860), True, 'import numpy as np\n'), ((18135, 18221), 'camfi.datamodel.via.ViaRegion', 'ViaRegion', ([], {'region_attributes': 'region_attributes', 'shape_attributes': 'shape_attributes'}), '(region_attributes=region_attributes, shape_attributes=\n shape_attributes)\n', (18144, 18221), False, 'from camfi.datamodel.via import ViaFileAttributes, ViaMetadata, ViaProject, ViaRegion, ViaRegionAttributes\n'), ((25154, 25184), 'numpy.count_nonzero', 'np.count_nonzero', (['(matches >= 0)'], {}), '(matches >= 0)\n', (25170, 25184), True, 'import numpy as np\n'), ((15546, 15586), 'camfi.datamodel.geometry.CircleShapeAttributes', 'CircleShapeAttributes', ([], {'cx': 'cx', 'cy': 'cy', 'r': 'r'}), '(cx=cx, cy=cy, r=r)\n', (15567, 15586), False, 'from camfi.datamodel.geometry import BoundingBox, CircleShapeAttributes, PolylineShapeAttributes\n'), ((12618, 12645), 'numpy.array', 'np.array', (['prediction.scores'], {}), '(prediction.scores)\n', (12626, 12645), True, 'import numpy as np\n'), ((14903, 14941), 'numpy.ceil', 'np.ceil', (['(angle_diff / self.split_angle)'], {}), '(angle_diff / self.split_angle)\n', (14910, 14941), True, 'import numpy as np\n')] |
"""
Wrapper for running amplicon_analysis_inputs report for LAAgc (with an extra
input file).
"""
import logging
import sys
from pbcommand.models import FileTypes
from pbcommand.cli import pbparser_runner
from pbcommand.utils import setup_log
from pbreports.report.amplicon_analysis_input import _get_parser, make_report
log = logging.getLogger(__name__)
class Constants(object):
TOOL_ID = "pbreports.tasks.laagc_input"
DRIVER_EXE = "python -m pbreports.report.laagc_input --resolved-tool-contract"
def _args_runner(args):
return make_report(args.report_csv, args.report_json, args.locus_csv, args.barcoded_subreads)
def _rtc_runner(rtc):
return make_report(rtc.task.input_files[0], rtc.task.output_files[0],
rtc.task.input_files[1], rtc.task.input_files[2])
def _get_laagc_parser():
p = _get_parser(Constants.TOOL_ID, Constants.DRIVER_EXE)
p.add_input_file_type(
FileTypes.CSV,
file_id="locus_csv",
name="Mapped Subreads CSV",
description="CSV of mapped subreads per sample per locus")
p.add_input_file_type(
FileTypes.DS_SUBREADS,
file_id="barcoded_subreads",
name="Barcoded Subreads",
description="Barcoded SubreadSet XML")
return p
def main(argv=sys.argv):
return pbparser_runner(argv[1:],
_get_laagc_parser(),
_args_runner,
_rtc_runner,
log,
setup_log)
# for 'python -m pbreports.report.amplicon_analysis_input ...'
if __name__ == "__main__":
sys.exit(main())
| [
"logging.getLogger",
"pbreports.report.amplicon_analysis_input._get_parser",
"pbreports.report.amplicon_analysis_input.make_report"
] | [((332, 359), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (349, 359), False, 'import logging\n'), ((551, 642), 'pbreports.report.amplicon_analysis_input.make_report', 'make_report', (['args.report_csv', 'args.report_json', 'args.locus_csv', 'args.barcoded_subreads'], {}), '(args.report_csv, args.report_json, args.locus_csv, args.\n barcoded_subreads)\n', (562, 642), False, 'from pbreports.report.amplicon_analysis_input import _get_parser, make_report\n'), ((673, 790), 'pbreports.report.amplicon_analysis_input.make_report', 'make_report', (['rtc.task.input_files[0]', 'rtc.task.output_files[0]', 'rtc.task.input_files[1]', 'rtc.task.input_files[2]'], {}), '(rtc.task.input_files[0], rtc.task.output_files[0], rtc.task.\n input_files[1], rtc.task.input_files[2])\n', (684, 790), False, 'from pbreports.report.amplicon_analysis_input import _get_parser, make_report\n'), ((844, 896), 'pbreports.report.amplicon_analysis_input._get_parser', '_get_parser', (['Constants.TOOL_ID', 'Constants.DRIVER_EXE'], {}), '(Constants.TOOL_ID, Constants.DRIVER_EXE)\n', (855, 896), False, 'from pbreports.report.amplicon_analysis_input import _get_parser, make_report\n')] |
# The MIT License (MIT)
# Copyright © 2021 Opentensor.ai
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import argparse
import json
import os
import re
import stat
from munch import Munch
from loguru import logger
import bittensor
from bittensor.crypto import is_encrypted, decrypt_data
from bittensor.crypto import decrypt_keypair
from bittensor.crypto.keyfiles import KeyFileError, load_keypair_from_data
class Wallet():
"""
Bittensor wallet maintenance class. Each wallet contains a coldkey and a hotkey.
The coldkey is the user's primary key for holding their stake in their wallet
and is the only way that users can access their Tao. Coldkeys can hold tokens and should be encrypted on your device.
The coldkey must be used to stake and unstake funds from a running node. The hotkey, on the other hand, is only used
for suscribing and setting weights from running code. Hotkeys are linked to coldkeys through the metagraph.
"""
def __init__(self, config: Munch = None):
if config == None:
config = Wallet.build_config()
self.config = config
try:
self.load_hotkeypair()
self.load_cold_key()
except (KeyError):
logger.error("Invalid password")
quit()
except KeyFileError:
logger.error("Keyfile corrupt")
quit()
def load_cold_key(self):
path = self.config.wallet.coldkeyfile
path = os.path.expanduser(path)
with open(path, "r") as file:
self.coldkey = file.readline().strip()
logger.info("Loaded coldkey: {}", self.coldkey)
def load_hotkeypair(self):
keyfile = os.path.expanduser(self.config.wallet.hotkeyfile)
with open(keyfile, 'rb') as file:
data = file.read()
if is_encrypted(data):
password = bittensor.utils.Cli.ask_password()
data = decrypt_data(password, data)
hotkey = load_keypair_from_data(data)
self.keypair = hotkey
logger.info("Loaded hotkey: {}", self.keypair.public_key)
@staticmethod
def build_config() -> Munch:
# Parses and returns a config Munch for this object.
parser = argparse.ArgumentParser();
Wallet.add_args(parser)
config = bittensor.config.Config.to_config(parser);
Wallet.check_config(config)
return config
@staticmethod
def add_args(parser: argparse.ArgumentParser):
try:
parser.add_argument('--wallet.hotkeyfile', required=False, default='~/.bittensor/wallets/default/hotkeys/default',
help='''The path to your bittensor hot key file,
Hotkeys should not hold tokens and are only used
for suscribing and setting weights from running code.
Hotkeys are linked to coldkeys through the metagraph''')
parser.add_argument('--wallet.coldkeyfile', required=False, default='~/.bittensor/wallets/default/coldkeypub.txt',
help='''The path to your bittensor cold publickey text file.
Coldkeys can hold tokens and should be encrypted on your device.
The coldkey must be used to stake and unstake funds from a running node.
On subscribe this coldkey account is linked to the associated hotkey on the subtensor chain.
Only this key is capable of making staking and unstaking requests for this neuron.''')
except:
pass
@staticmethod
def check_config(config: Munch):
Wallet.__check_hot_key_path(config.wallet.hotkeyfile)
Wallet.__check_cold_key_path(config.wallet.coldkeyfile)
@staticmethod
def __check_hot_key_path(path):
path = os.path.expanduser(path)
if not os.path.isfile(path):
logger.error("--wallet.hotkeyfile {} is not a file", path)
logger.error("You can create keys with: bittensor-cli new_wallet")
raise KeyFileError
if not os.access(path, os.R_OK):
logger.error("--wallet.hotkeyfile {} is not readable", path)
logger.error("Ensure you have proper privileges to read the file {}", path)
raise KeyFileError
if Wallet.__is_world_readable(path):
logger.error("--wallet.hotkeyfile {} is world readable.", path)
logger.error("Ensure you have proper privileges to read the file {}", path)
raise KeyFileError
@staticmethod
def __is_world_readable(path):
st = os.stat(path)
return st.st_mode & stat.S_IROTH
@staticmethod
def __check_cold_key_path(path):
path = os.path.expanduser(path)
if not os.path.isfile(path):
logger.error("--wallet.coldkeyfile {} does not exist", path)
raise KeyFileError
if not os.path.isfile(path):
logger.error("--wallet.coldkeyfile {} is not a file", path)
raise KeyFileError
if not os.access(path, os.R_OK):
logger.error("--wallet.coldkeyfile {} is not readable", path)
raise KeyFileError
with open(path, "r") as file:
key = file.readline().strip()
if not re.match("^0x[a-z0-9]{64}$", key):
logger.error("Cold key file corrupt")
raise KeyFileError
@staticmethod
def __create_keypair() -> bittensor.subtensor.interface.Keypair:
return bittensor.subtensor.interface.Keypair.create_from_mnemonic(bittensor.subtensor.interface.Keypair.generate_mnemonic())
@staticmethod
def __save_keypair(keypair : bittensor.subtensor.interface.Keypair, path : str):
path = os.path.expanduser(path)
with open(path, 'w') as file:
json.dump(keypair.toDict(), file)
file.close()
os.chmod(path, stat.S_IWUSR | stat.S_IRUSR)
@staticmethod
def __has_keypair(path):
path = os.path.expanduser(path)
return os.path.exists(path) | [
"os.path.exists",
"loguru.logger.info",
"argparse.ArgumentParser",
"bittensor.crypto.decrypt_data",
"bittensor.crypto.is_encrypted",
"os.access",
"re.match",
"os.chmod",
"os.path.isfile",
"bittensor.crypto.keyfiles.load_keypair_from_data",
"loguru.logger.error",
"bittensor.utils.Cli.ask_passwo... | [((2475, 2499), 'os.path.expanduser', 'os.path.expanduser', (['path'], {}), '(path)\n', (2493, 2499), False, 'import os\n'), ((2597, 2644), 'loguru.logger.info', 'logger.info', (['"""Loaded coldkey: {}"""', 'self.coldkey'], {}), "('Loaded coldkey: {}', self.coldkey)\n", (2608, 2644), False, 'from loguru import logger\n'), ((2695, 2744), 'os.path.expanduser', 'os.path.expanduser', (['self.config.wallet.hotkeyfile'], {}), '(self.config.wallet.hotkeyfile)\n', (2713, 2744), False, 'import os\n'), ((3262, 3287), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3285, 3287), False, 'import argparse\n'), ((3340, 3381), 'bittensor.config.Config.to_config', 'bittensor.config.Config.to_config', (['parser'], {}), '(parser)\n', (3373, 3381), False, 'import bittensor\n'), ((5040, 5064), 'os.path.expanduser', 'os.path.expanduser', (['path'], {}), '(path)\n', (5058, 5064), False, 'import os\n'), ((5826, 5839), 'os.stat', 'os.stat', (['path'], {}), '(path)\n', (5833, 5839), False, 'import os\n'), ((5952, 5976), 'os.path.expanduser', 'os.path.expanduser', (['path'], {}), '(path)\n', (5970, 5976), False, 'import os\n'), ((6971, 6995), 'os.path.expanduser', 'os.path.expanduser', (['path'], {}), '(path)\n', (6989, 6995), False, 'import os\n'), ((7114, 7157), 'os.chmod', 'os.chmod', (['path', '(stat.S_IWUSR | stat.S_IRUSR)'], {}), '(path, stat.S_IWUSR | stat.S_IRUSR)\n', (7122, 7157), False, 'import os\n'), ((7221, 7245), 'os.path.expanduser', 'os.path.expanduser', (['path'], {}), '(path)\n', (7239, 7245), False, 'import os\n'), ((7261, 7281), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (7275, 7281), False, 'import os\n'), ((2833, 2851), 'bittensor.crypto.is_encrypted', 'is_encrypted', (['data'], {}), '(data)\n', (2845, 2851), False, 'from bittensor.crypto import is_encrypted, decrypt_data\n'), ((2988, 3016), 'bittensor.crypto.keyfiles.load_keypair_from_data', 'load_keypair_from_data', (['data'], {}), '(data)\n', (3010, 3016), False, 'from bittensor.crypto.keyfiles import KeyFileError, load_keypair_from_data\n'), ((3063, 3120), 'loguru.logger.info', 'logger.info', (['"""Loaded hotkey: {}"""', 'self.keypair.public_key'], {}), "('Loaded hotkey: {}', self.keypair.public_key)\n", (3074, 3120), False, 'from loguru import logger\n'), ((5081, 5101), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (5095, 5101), False, 'import os\n'), ((5115, 5173), 'loguru.logger.error', 'logger.error', (['"""--wallet.hotkeyfile {} is not a file"""', 'path'], {}), "('--wallet.hotkeyfile {} is not a file', path)\n", (5127, 5173), False, 'from loguru import logger\n'), ((5186, 5252), 'loguru.logger.error', 'logger.error', (['"""You can create keys with: bittensor-cli new_wallet"""'], {}), "('You can create keys with: bittensor-cli new_wallet')\n", (5198, 5252), False, 'from loguru import logger\n'), ((5300, 5324), 'os.access', 'os.access', (['path', 'os.R_OK'], {}), '(path, os.R_OK)\n', (5309, 5324), False, 'import os\n'), ((5338, 5398), 'loguru.logger.error', 'logger.error', (['"""--wallet.hotkeyfile {} is not readable"""', 'path'], {}), "('--wallet.hotkeyfile {} is not readable', path)\n", (5350, 5398), False, 'from loguru import logger\n'), ((5411, 5486), 'loguru.logger.error', 'logger.error', (['"""Ensure you have proper privileges to read the file {}"""', 'path'], {}), "('Ensure you have proper privileges to read the file {}', path)\n", (5423, 5486), False, 'from loguru import logger\n'), ((5576, 5639), 'loguru.logger.error', 'logger.error', (['"""--wallet.hotkeyfile {} is world readable."""', 'path'], {}), "('--wallet.hotkeyfile {} is world readable.', path)\n", (5588, 5639), False, 'from loguru import logger\n'), ((5652, 5727), 'loguru.logger.error', 'logger.error', (['"""Ensure you have proper privileges to read the file {}"""', 'path'], {}), "('Ensure you have proper privileges to read the file {}', path)\n", (5664, 5727), False, 'from loguru import logger\n'), ((5993, 6013), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (6007, 6013), False, 'import os\n'), ((6027, 6087), 'loguru.logger.error', 'logger.error', (['"""--wallet.coldkeyfile {} does not exist"""', 'path'], {}), "('--wallet.coldkeyfile {} does not exist', path)\n", (6039, 6087), False, 'from loguru import logger\n'), ((6135, 6155), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (6149, 6155), False, 'import os\n'), ((6169, 6228), 'loguru.logger.error', 'logger.error', (['"""--wallet.coldkeyfile {} is not a file"""', 'path'], {}), "('--wallet.coldkeyfile {} is not a file', path)\n", (6181, 6228), False, 'from loguru import logger\n'), ((6276, 6300), 'os.access', 'os.access', (['path', 'os.R_OK'], {}), '(path, os.R_OK)\n', (6285, 6300), False, 'import os\n'), ((6314, 6375), 'loguru.logger.error', 'logger.error', (['"""--wallet.coldkeyfile {} is not readable"""', 'path'], {}), "('--wallet.coldkeyfile {} is not readable', path)\n", (6326, 6375), False, 'from loguru import logger\n'), ((6793, 6850), 'bittensor.subtensor.interface.Keypair.generate_mnemonic', 'bittensor.subtensor.interface.Keypair.generate_mnemonic', ([], {}), '()\n', (6848, 6850), False, 'import bittensor\n'), ((2240, 2272), 'loguru.logger.error', 'logger.error', (['"""Invalid password"""'], {}), "('Invalid password')\n", (2252, 2272), False, 'from loguru import logger\n'), ((2333, 2364), 'loguru.logger.error', 'logger.error', (['"""Keyfile corrupt"""'], {}), "('Keyfile corrupt')\n", (2345, 2364), False, 'from loguru import logger\n'), ((2880, 2914), 'bittensor.utils.Cli.ask_password', 'bittensor.utils.Cli.ask_password', ([], {}), '()\n', (2912, 2914), False, 'import bittensor\n'), ((2938, 2966), 'bittensor.crypto.decrypt_data', 'decrypt_data', (['password', 'data'], {}), '(password, data)\n', (2950, 2966), False, 'from bittensor.crypto import is_encrypted, decrypt_data\n'), ((6507, 6540), 're.match', 're.match', (['"""^0x[a-z0-9]{64}$"""', 'key'], {}), "('^0x[a-z0-9]{64}$', key)\n", (6515, 6540), False, 'import re\n'), ((6558, 6595), 'loguru.logger.error', 'logger.error', (['"""Cold key file corrupt"""'], {}), "('Cold key file corrupt')\n", (6570, 6595), False, 'from loguru import logger\n')] |
import unittest
from datetime import datetime, timezone
from pandas import DatetimeTZDtype
from parameterized import parameterized
import pandas as pd
from aiokraken.utils.timeindexeddataframe import TimeindexedDataframe
"""
Test module.
This is intended for extensive testing, using parameterized, hypothesis or similar generation methods
For simple usecase examples, we should rely on doctests.
"""
class TestTimeindexedDataframe(unittest.TestCase):
@parameterized.expand(
[
[
pd.DataFrame( # One with "datetime" column (like internal model)
# TODO: proper currencies...
[
[
datetime.fromtimestamp(1567039620, tz=timezone.utc),
8746.4,
8751.5,
8745.7,
8745.7,
8749.3,
0.09663298,
8,
],
[
datetime.fromtimestamp(1567039680, tz=timezone.utc),
8745.7,
8747.3,
8745.7,
8747.3,
8747.3,
0.00929540,
1,
],
],
# grab that from kraken documentation
columns=[
"datetime",
"open",
"high",
"low",
"close",
"vwap",
"volume",
"count",
],
) # there is no datetime index a priori.
], [
pd.DataFrame( # One with "datetime" column (like internal model)
# TODO: proper currencies...
[
[
datetime.fromtimestamp(1567039620, tz=timezone.utc),
8746.4,
8751.5,
8745.7,
8745.7,
8749.3,
0.09663298,
8,
],
[
datetime.fromtimestamp(1567039680, tz=timezone.utc),
8745.7,
8747.3,
8745.7,
8747.3,
8747.3,
0.00929540,
1,
],
],
# grab that from kraken documentation
columns=[
"datetime",
"open",
"high",
"low",
"close",
"vwap",
"volume",
"count",
],
).set_index("datetime") # we already have an index
],
]
)
def test_load_ok(self, df):
""" Verifying that expected data parses properly """
tidf = TimeindexedDataframe(data=df, index="datetime")
import pandas.api.types as ptypes
num_cols = ["open", "high", "low", "close", "vwap", "volume", "count"]
assert all(ptypes.is_numeric_dtype(tidf.dataframe[col]) for col in num_cols)
assert tidf.dataframe.index.name == "datetime"
# Verify we have a timezone aware, ns precision datetime.
assert ptypes.is_datetime64tz_dtype(tidf.dataframe.index.dtype)
assert ptypes.is_datetime64_ns_dtype(tidf.dataframe.index.dtype)
# TODO : property test instead (move this example test to doc...)
@parameterized.expand(
[
[
pd.DataFrame(
# TODO: proper Time, proper currencies...
[
[
datetime.fromtimestamp(1567039620, tz=timezone.utc),
8746.4,
8751.5,
8745.7,
8745.7,
8749.3,
0.09663298,
8,
],
[
datetime.fromtimestamp(1567039680, tz=timezone.utc),
8745.7,
8747.3,
8745.7,
8747.3,
8747.3,
0.00929540,
1,
],
],
# grab that from kraken documentation
columns=[
"datetime",
"open",
"high",
"low",
"close",
"vwap",
"volume",
"count",
],
),
pd.DataFrame(
# TODO: proper Time, proper currencies...
[
[
datetime.fromtimestamp(1567039680, tz=timezone.utc),
8745.8,
8747.3,
8745.7,
8747.3,
8747.3,
0.00929540,
1,
], # Not the value is a bit modified to trigger stitching...
[
datetime.fromtimestamp(1567039720, tz=timezone.utc),
8746.6,
8751.4,
8745.3,
8745.4,
8748.1,
0.09663297,
3,
],
],
# grab that from kraken documentation
columns=[
"datetime",
"open",
"high",
"low",
"close",
"vwap",
"volume",
"count",
],
),
],
]
)
def test_stitch_ok(
self, df1, df2
): # TODO : there are MANY cases to test for stitch
""" Verifying that expected data parses properly """
tidf1 = TimeindexedDataframe(data=df1)
tidf2 = TimeindexedDataframe(data=df2)
stitched1 = tidf1.merge(tidf2)
import pandas.api.types as ptypes
num_cols = ["open", "high", "low", "close", "vwap", "volume", "count"]
assert all(
ptypes.is_numeric_dtype(stitched1.dataframe[col]) for col in num_cols
)
assert stitched1.dataframe.index.name == "datetime"
# Verify we have a timezone aware, ns precision datetime.
assert ptypes.is_datetime64tz_dtype(stitched1.dataframe.index.dtype)
assert ptypes.is_datetime64_ns_dtype(stitched1.dataframe.index.dtype)
# verifying stitches
assert (stitched1.dataframe.iloc[0] == tidf1.dataframe.iloc[0]).all()
assert (stitched1.dataframe.iloc[-1] == tidf2.dataframe.iloc[-1]).all()
assert len(stitched1) == 3
# Note : careful with default merging strategy, ORDER MATTERS !
# To make it not matter, we need mode semantics...
@parameterized.expand(
[
[
pd.DataFrame( # One with "datetime" column (like internal model)
# TODO: proper Time, proper currencies...
[
[
datetime.fromtimestamp(1567039620, tz=timezone.utc),
8746.4,
8751.5,
8745.7,
8745.7,
8749.3,
0.09663298,
8,
],
[
datetime.fromtimestamp(1567039680, tz=timezone.utc),
8745.7,
8747.3,
8745.7,
8747.3,
8747.3,
0.00929540,
1,
],
],
# grab that from kraken documentation
columns=[
"datetime",
"open",
"high",
"low",
"close",
"vwap",
"volume",
"count",
],
).set_index("datetime")
],
]
)
def test_getitem_ok(self, df):
""" Verifying that expected data parses properly """
tidf = TimeindexedDataframe(data=df)
import pandas.api.types as ptypes
num_cols = ["open", "high", "low", "close", "vwap", "volume", "count"]
assert all(ptypes.is_numeric_dtype(tidf.dataframe[col]) for col in num_cols)
assert ptypes.is_datetime64_any_dtype(tidf.dataframe.index)
assert tidf.dataframe.index.name == "datetime"
assert tidf.dataframe.index.dtype == DatetimeTZDtype(tz=timezone.utc)
# verifying all ways to access data
# get the first element
assert isinstance(tidf.iloc[0], pd.Series)
assert tidf.iloc[0]["open"] == 8746.4
assert tidf.iloc[0]["high"] == 8751.5
assert tidf.iloc[0]["low"] == 8745.7
assert tidf.iloc[0]["close"] == 8745.7
assert tidf.iloc[0]["vwap"] == 8749.3
assert tidf.iloc[0]["volume"] == 0.09663298
assert tidf.iloc[0]["count"] == 8
# NOT WORKING
# get based on timeindex
# assert isinstance(tidf.tloc[1567039620], pd.Series)
# assert tidf.tloc[1567039620]["open"] == 8746.4
# assert tidf.tloc[1567039620]["high"] == 8751.5
# assert tidf.tloc[1567039620]["low"] == 8745.7
# assert tidf.tloc[1567039620]["close"] == 8745.7
# assert tidf.tloc[1567039620]["vwap"] == 8749.3
# assert tidf.tloc[1567039620]["volume"] == 0.09663298
# assert tidf.tloc[1567039620]["count"] == 8
# get from datetime
firstdatetime = datetime(
year=2019, month=8, day=29, hour=0, minute=47, second=0, tzinfo=timezone.utc
)
assert isinstance(tidf[firstdatetime], pd.Series)
assert tidf[firstdatetime]["open"] == 8746.4
assert tidf[firstdatetime]["high"] == 8751.5
assert tidf[firstdatetime]["low"] == 8745.7
assert tidf[firstdatetime]["close"] == 8745.7
assert tidf[firstdatetime]["vwap"] == 8749.3
assert tidf[firstdatetime]["volume"] == 0.09663298
assert tidf[firstdatetime]["count"] == 8
scnddatetime = datetime(
year=2019, month=8, day=29, hour=0, minute=48, second=0, tzinfo=timezone.utc
)
# get slice and verify equality
assert isinstance(tidf[firstdatetime:scnddatetime], TimeindexedDataframe)
assert tidf[firstdatetime:scnddatetime] == tidf
# get list of columns only
assert isinstance(tidf[["open", "high", "low", "close"]], TimeindexedDataframe)
assert tidf[["open", "high", "low", "close"]][firstdatetime]["open"] == tidf[firstdatetime]["open"]
assert tidf[["open", "high", "low", "close"]][firstdatetime]["high"] == tidf[firstdatetime]["high"]
assert tidf[["open", "high", "low", "close"]][firstdatetime]["low"] == tidf[firstdatetime]["low"]
assert tidf[["open", "high", "low", "close"]][firstdatetime]["close"] == tidf[firstdatetime]["close"]
@parameterized.expand(
[
[
pd.DataFrame( # One with "datetime" column (like internal model)
# TODO: proper Time, proper currencies...
[
[
datetime.fromtimestamp(1567039620, tz=timezone.utc),
8746.4,
8751.5,
8745.7,
8745.7,
8749.3,
0.09663298,
8,
],
[
datetime.fromtimestamp(1567039680, tz=timezone.utc),
8745.7,
8747.3,
8745.7,
8747.3,
8747.3,
0.00929540,
1,
],
],
# grab that from kraken documentation
columns=[
"datetime",
"open",
"high",
"low",
"close",
"vwap",
"volume",
"count",
],
).set_index("datetime")
],
]
)
def test_iter_ok(self, df):
""" Verifying that expected data iterates properly """
tidf = TimeindexedDataframe(data=df)
import pandas.api.types as ptypes
num_cols = ["open", "high", "low", "close", "vwap", "volume", "count"]
assert all(ptypes.is_numeric_dtype(tidf.dataframe[col]) for col in num_cols)
assert ptypes.is_datetime64_any_dtype(tidf.dataframe.index)
assert tidf.dataframe.index.name == "datetime"
assert tidf.dataframe.index.dtype == DatetimeTZDtype(tz=timezone.utc)
it = iter(tidf)
ts, s = next(it)
assert ts == datetime(
year=2019, month=8, day=29, hour=0, minute=48, second=0, tzinfo=timezone.utc
)
assert (s == pd.Series(data={
"open":8745.7,
"high":8747.3,
"low":8745.7,
"close":8747.3,
"vwap":8747.3,
"volume":0.00929540,
"count":1,
})).all()
ts2, s2 = next(it)
assert ts2 == datetime(
year=2019, month=8, day=29, hour=0, minute=47, second=0, tzinfo=timezone.utc
)
assert (s2 == pd.Series(data={
"open": 8746.4,
"high": 8751.5,
"low": 8745.7,
"close": 8745.7,
"vwap": 8749.3,
"volume": 0.09663298,
"count": 8,
})).all()
# @parameterized.expand(
# [
# [
# pd.DataFrame( # One with "datetime" column (like internal model)
# # TODO: proper Time, proper currencies...
# [
# [
# datetime.fromtimestamp(1567039620, tz=timezone.utc),
# 8746.4,
# 8751.5,
# 8745.7,
# 8745.7,
# 8749.3,
# 0.09663298,
# 8,
# ],
# [
# datetime.fromtimestamp(1567039680, tz=timezone.utc),
# 8745.7,
# 8747.3,
# 8745.7,
# 8747.3,
# 8747.3,
# 0.00929540,
# 1,
# ],
# ],
# # grab that from kraken documentation
# columns=[
# "datetime",
# "open",
# "high",
# "low",
# "close",
# "vwap",
# "volume",
# "count",
# ],
# ).set_index("datetime")
# ],
# ]
# )
# def test_aiter_ok(self, df):
# import asyncio
#
# clock = [1567039690,1567039750,1567039810,1567039870]
# countcall = iter(clock)
# def timer():
# return datetime.fromtimestamp(next(countcall), tz=timezone.utc)
#
# slept = 0
# async def sleeper(secs):
# slept = secs
#
# """ Verifying that expected data iterates properly asynchronously """
# tidf = TimeindexedDataframe(data=df, timer=timer, sleeper=sleeper)
#
# import pandas.api.types as ptypes
#
# num_cols = ["open", "high", "low", "close", "vwap", "volume", "count"]
# assert all(ptypes.is_numeric_dtype(tidf.dataframe[col]) for col in num_cols)
#
# assert ptypes.is_datetime64_any_dtype(tidf.dataframe.index)
# assert tidf.dataframe.index.name == "datetime"
# assert tidf.dataframe.index.dtype == DatetimeTZDtype(tz=timezone.utc)
#
# sync=asyncio.Lock()
#
# async def testrunner():
# idx = 0
#
# asyncio.get_running_loop().create_task(provider())
#
# async for m in tidf:
# async with sync:
# if idx == 0:
# assert m[0] == datetime.fromtimestamp(1567039740, tz=timezone.utc)
# assert slept == 50
# elif idx == 1:
# assert m[0]== datetime.fromtimestamp(1567039800, tz=timezone.utc)
# assert slept == 50
# elif idx == 2:
# assert m[0]== datetime.fromtimestamp(1567039860, tz=timezone.utc)
# assert slept == 50
# idx += 1
# if idx >= 3:
# break
#
# async def provider():
# idx=len(df)
# async with sync:
# # TODO : better way to append data (using __call__ ??)
# tidf.dataframe[idx] = [
# datetime.fromtimestamp(1567039740, tz=timezone.utc),
# 8745.7,
# 8747.2,
# 8745.8,
# 8747.3,
# 8747.3,
# 0.00929540,
# 1,
# ]
#
# idx = idx + 1
# async with sync:
# tidf.dataframe[idx] = [
# datetime.fromtimestamp(1567039800, tz=timezone.utc),
# 8745.7,
# 8747.3,
# 8745.7,
# 8747.3,
# 8747.3,
# 0.00929540,
# 1,
# ]
#
# idx = idx + 1
# async with sync:
# tidf.dataframe[idx] = [
# datetime.fromtimestamp(1567039860, tz=timezone.utc),
# 8745.7,
# 8747.3,
# 8745.7,
# 8747.3,
# 8747.3,
# 0.00929540,
# 1,
# ]
#
# # Note : even if we use asyncio here for apparent "parallelism" of control flow,
# # the timer and sleeper are test stubs to control syncronicity...
# asyncio.run(testrunner())
if __name__ == "__main__":
unittest.main()
| [
"datetime.datetime",
"pandas.Series",
"datetime.datetime.fromtimestamp",
"pandas.api.types.is_datetime64_ns_dtype",
"pandas.api.types.is_numeric_dtype",
"aiokraken.utils.timeindexeddataframe.TimeindexedDataframe",
"pandas.api.types.is_datetime64tz_dtype",
"unittest.main",
"pandas.DatetimeTZDtype",
... | [((20171, 20186), 'unittest.main', 'unittest.main', ([], {}), '()\n', (20184, 20186), False, 'import unittest\n'), ((3430, 3477), 'aiokraken.utils.timeindexeddataframe.TimeindexedDataframe', 'TimeindexedDataframe', ([], {'data': 'df', 'index': '"""datetime"""'}), "(data=df, index='datetime')\n", (3450, 3477), False, 'from aiokraken.utils.timeindexeddataframe import TimeindexedDataframe\n'), ((3823, 3879), 'pandas.api.types.is_datetime64tz_dtype', 'ptypes.is_datetime64tz_dtype', (['tidf.dataframe.index.dtype'], {}), '(tidf.dataframe.index.dtype)\n', (3851, 3879), True, 'import pandas.api.types as ptypes\n'), ((3895, 3952), 'pandas.api.types.is_datetime64_ns_dtype', 'ptypes.is_datetime64_ns_dtype', (['tidf.dataframe.index.dtype'], {}), '(tidf.dataframe.index.dtype)\n', (3924, 3952), True, 'import pandas.api.types as ptypes\n'), ((6951, 6981), 'aiokraken.utils.timeindexeddataframe.TimeindexedDataframe', 'TimeindexedDataframe', ([], {'data': 'df1'}), '(data=df1)\n', (6971, 6981), False, 'from aiokraken.utils.timeindexeddataframe import TimeindexedDataframe\n'), ((6998, 7028), 'aiokraken.utils.timeindexeddataframe.TimeindexedDataframe', 'TimeindexedDataframe', ([], {'data': 'df2'}), '(data=df2)\n', (7018, 7028), False, 'from aiokraken.utils.timeindexeddataframe import TimeindexedDataframe\n'), ((7446, 7507), 'pandas.api.types.is_datetime64tz_dtype', 'ptypes.is_datetime64tz_dtype', (['stitched1.dataframe.index.dtype'], {}), '(stitched1.dataframe.index.dtype)\n', (7474, 7507), True, 'import pandas.api.types as ptypes\n'), ((7523, 7585), 'pandas.api.types.is_datetime64_ns_dtype', 'ptypes.is_datetime64_ns_dtype', (['stitched1.dataframe.index.dtype'], {}), '(stitched1.dataframe.index.dtype)\n', (7552, 7585), True, 'import pandas.api.types as ptypes\n'), ((9511, 9540), 'aiokraken.utils.timeindexeddataframe.TimeindexedDataframe', 'TimeindexedDataframe', ([], {'data': 'df'}), '(data=df)\n', (9531, 9540), False, 'from aiokraken.utils.timeindexeddataframe import TimeindexedDataframe\n'), ((9765, 9817), 'pandas.api.types.is_datetime64_any_dtype', 'ptypes.is_datetime64_any_dtype', (['tidf.dataframe.index'], {}), '(tidf.dataframe.index)\n', (9795, 9817), True, 'import pandas.api.types as ptypes\n'), ((10976, 11067), 'datetime.datetime', 'datetime', ([], {'year': '(2019)', 'month': '(8)', 'day': '(29)', 'hour': '(0)', 'minute': '(47)', 'second': '(0)', 'tzinfo': 'timezone.utc'}), '(year=2019, month=8, day=29, hour=0, minute=47, second=0, tzinfo=\n timezone.utc)\n', (10984, 11067), False, 'from datetime import datetime, timezone\n'), ((11540, 11631), 'datetime.datetime', 'datetime', ([], {'year': '(2019)', 'month': '(8)', 'day': '(29)', 'hour': '(0)', 'minute': '(48)', 'second': '(0)', 'tzinfo': 'timezone.utc'}), '(year=2019, month=8, day=29, hour=0, minute=48, second=0, tzinfo=\n timezone.utc)\n', (11548, 11631), False, 'from datetime import datetime, timezone\n'), ((13952, 13981), 'aiokraken.utils.timeindexeddataframe.TimeindexedDataframe', 'TimeindexedDataframe', ([], {'data': 'df'}), '(data=df)\n', (13972, 13981), False, 'from aiokraken.utils.timeindexeddataframe import TimeindexedDataframe\n'), ((14206, 14258), 'pandas.api.types.is_datetime64_any_dtype', 'ptypes.is_datetime64_any_dtype', (['tidf.dataframe.index'], {}), '(tidf.dataframe.index)\n', (14236, 14258), True, 'import pandas.api.types as ptypes\n'), ((9918, 9950), 'pandas.DatetimeTZDtype', 'DatetimeTZDtype', ([], {'tz': 'timezone.utc'}), '(tz=timezone.utc)\n', (9933, 9950), False, 'from pandas import DatetimeTZDtype\n'), ((14359, 14391), 'pandas.DatetimeTZDtype', 'DatetimeTZDtype', ([], {'tz': 'timezone.utc'}), '(tz=timezone.utc)\n', (14374, 14391), False, 'from pandas import DatetimeTZDtype\n'), ((14463, 14554), 'datetime.datetime', 'datetime', ([], {'year': '(2019)', 'month': '(8)', 'day': '(29)', 'hour': '(0)', 'minute': '(48)', 'second': '(0)', 'tzinfo': 'timezone.utc'}), '(year=2019, month=8, day=29, hour=0, minute=48, second=0, tzinfo=\n timezone.utc)\n', (14471, 14554), False, 'from datetime import datetime, timezone\n'), ((14869, 14960), 'datetime.datetime', 'datetime', ([], {'year': '(2019)', 'month': '(8)', 'day': '(29)', 'hour': '(0)', 'minute': '(47)', 'second': '(0)', 'tzinfo': 'timezone.utc'}), '(year=2019, month=8, day=29, hour=0, minute=47, second=0, tzinfo=\n timezone.utc)\n', (14877, 14960), False, 'from datetime import datetime, timezone\n'), ((3620, 3664), 'pandas.api.types.is_numeric_dtype', 'ptypes.is_numeric_dtype', (['tidf.dataframe[col]'], {}), '(tidf.dataframe[col])\n', (3643, 3664), True, 'import pandas.api.types as ptypes\n'), ((7224, 7273), 'pandas.api.types.is_numeric_dtype', 'ptypes.is_numeric_dtype', (['stitched1.dataframe[col]'], {}), '(stitched1.dataframe[col])\n', (7247, 7273), True, 'import pandas.api.types as ptypes\n'), ((9683, 9727), 'pandas.api.types.is_numeric_dtype', 'ptypes.is_numeric_dtype', (['tidf.dataframe[col]'], {}), '(tidf.dataframe[col])\n', (9706, 9727), True, 'import pandas.api.types as ptypes\n'), ((14124, 14168), 'pandas.api.types.is_numeric_dtype', 'ptypes.is_numeric_dtype', (['tidf.dataframe[col]'], {}), '(tidf.dataframe[col])\n', (14147, 14168), True, 'import pandas.api.types as ptypes\n'), ((14593, 14727), 'pandas.Series', 'pd.Series', ([], {'data': "{'open': 8745.7, 'high': 8747.3, 'low': 8745.7, 'close': 8747.3, 'vwap': \n 8747.3, 'volume': 0.0092954, 'count': 1}"}), "(data={'open': 8745.7, 'high': 8747.3, 'low': 8745.7, 'close': \n 8747.3, 'vwap': 8747.3, 'volume': 0.0092954, 'count': 1})\n", (14602, 14727), True, 'import pandas as pd\n'), ((15000, 15135), 'pandas.Series', 'pd.Series', ([], {'data': "{'open': 8746.4, 'high': 8751.5, 'low': 8745.7, 'close': 8745.7, 'vwap': \n 8749.3, 'volume': 0.09663298, 'count': 8}"}), "(data={'open': 8746.4, 'high': 8751.5, 'low': 8745.7, 'close': \n 8745.7, 'vwap': 8749.3, 'volume': 0.09663298, 'count': 8})\n", (15009, 15135), True, 'import pandas as pd\n'), ((715, 766), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['(1567039620)'], {'tz': 'timezone.utc'}), '(1567039620, tz=timezone.utc)\n', (737, 766), False, 'from datetime import datetime, timezone\n'), ((1100, 1151), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['(1567039680)'], {'tz': 'timezone.utc'}), '(1567039680, tz=timezone.utc)\n', (1122, 1151), False, 'from datetime import datetime, timezone\n'), ((4246, 4297), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['(1567039620)'], {'tz': 'timezone.utc'}), '(1567039620, tz=timezone.utc)\n', (4268, 4297), False, 'from datetime import datetime, timezone\n'), ((4631, 4682), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['(1567039680)'], {'tz': 'timezone.utc'}), '(1567039680, tz=timezone.utc)\n', (4653, 4682), False, 'from datetime import datetime, timezone\n'), ((5548, 5599), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['(1567039680)'], {'tz': 'timezone.utc'}), '(1567039680, tz=timezone.utc)\n', (5570, 5599), False, 'from datetime import datetime, timezone\n'), ((5992, 6043), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['(1567039720)'], {'tz': 'timezone.utc'}), '(1567039720, tz=timezone.utc)\n', (6014, 6043), False, 'from datetime import datetime, timezone\n'), ((2110, 2161), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['(1567039620)'], {'tz': 'timezone.utc'}), '(1567039620, tz=timezone.utc)\n', (2132, 2161), False, 'from datetime import datetime, timezone\n'), ((2495, 2546), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['(1567039680)'], {'tz': 'timezone.utc'}), '(1567039680, tz=timezone.utc)\n', (2517, 2546), False, 'from datetime import datetime, timezone\n'), ((8216, 8267), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['(1567039620)'], {'tz': 'timezone.utc'}), '(1567039620, tz=timezone.utc)\n', (8238, 8267), False, 'from datetime import datetime, timezone\n'), ((8601, 8652), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['(1567039680)'], {'tz': 'timezone.utc'}), '(1567039680, tz=timezone.utc)\n', (8623, 8652), False, 'from datetime import datetime, timezone\n'), ((12658, 12709), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['(1567039620)'], {'tz': 'timezone.utc'}), '(1567039620, tz=timezone.utc)\n', (12680, 12709), False, 'from datetime import datetime, timezone\n'), ((13043, 13094), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['(1567039680)'], {'tz': 'timezone.utc'}), '(1567039680, tz=timezone.utc)\n', (13065, 13094), False, 'from datetime import datetime, timezone\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
from bs4 import BeautifulSoup
import re
import json
import numpy as np
import sys
sys.setrecursionlimit(10000)
# Write a function that parses HTML pages into BeautifulSoup objects
# In[2]:
def soupify(html):
soup = BeautifulSoup(html, 'lxml')
return soup
# Film id (we need to download this again to perform a join later on)
# In[3]:
def scrape_id(soup):
id = int(soup.find("div", class_="really-lazy-load").get("data-film-id"))
return id
# Film title
# In[4]:
def scrape_title(soup):
s = soup.find("script", {"type": "application/ld+json"}).string
s = s.replace('\n/* <![CDATA[ */\n', '').replace('\n/* ]]> */\n', '')
d = json.loads(s)
title = d['name']
return title
# Year
# In[5]:
def scrape_year(soup):
try:
s = soup.find("script", {"type": "application/ld+json"}).string
s = s.replace('\n/* <![CDATA[ */\n', '').replace('\n/* ]]> */\n', '')
d = json.loads(s)
year = int(d['releasedEvent'][0]['startDate'])
except:
return np.nan
else:
return year
# Director
# In[6]:
def scrape_director(soup):
try:
s = soup.find("script", {"type": "application/ld+json"}).string
s = s.replace('\n/* <![CDATA[ */\n', '').replace('\n/* ]]> */\n', '')
d = json.loads(s)
names = [director['name'] for director in d['director']]
names = ';'.join(names)
except:
return np.nan
else:
return names
# Cast
# In[7]:
def scrape_cast(soup):
try:
s = soup.find("script", {"type": "application/ld+json"}).string
s = s.replace('\n/* <![CDATA[ */\n', '').replace('\n/* ]]> */\n', '')
d = json.loads(s)
actors = [actor['name'] for actor in d['actors']]
actors = ';'.join(actors)
except:
return np.nan
else:
return actors
# Country
# In[8]:
def scrape_country(soup):
try:
s = soup.find("script", {"type": "application/ld+json"}).string
s = s.replace('\n/* <![CDATA[ */\n', '').replace('\n/* ]]> */\n', '')
d = json.loads(s)
countries_of_origin = [country['name'] for country in d['countryOfOrigin']]
countries_of_origin = ';'.join(countries_of_origin)
except:
return np.nan
else:
return countries_of_origin
# Genres
# In[9]:
def scrape_genre(soup):
try:
s = soup.find("script", {"type": "application/ld+json"}).string
s = s.replace('\n/* <![CDATA[ */\n', '').replace('\n/* ]]> */\n', '')
d = json.loads(s)
genre_names = ';'.join(d['genre'])
except:
return np.nan
else:
return genre_names
# Production company
# In[10]:
def scrape_production_company(soup):
try:
s = soup.find("script", {"type": "application/ld+json"}).string
s = s.replace('\n/* <![CDATA[ */\n', '').replace('\n/* ]]> */\n', '')
d = json.loads(s)
company_names = [company['name'] for company in d['productionCompany']]
company_names = ';'.join(company_names)
except:
return np.nan
else:
return company_names
# Runtime
# In[11]:
def scrape_runtime(soup):
try:
string = soup.find("p", class_="text-link text-footer").text
pattern = r"\d+"
runtime = int(re.findall(pattern, string)[0])
except:
return np.nan
else:
return runtime
# Languages
# In[12]:
def scrape_languages(soup):
try:
languages = [language.text for language in soup.find_all("a", href = re.compile("language"))]
languages = ';'.join(languages)
except:
return np.nan
else:
return languages
# Alternative titles
# In[13]:
def scrape_alt_titles(soup):
try:
alt_titles = soup.find("div", class_ = "text-indentedlist").find("p").text
alt_titles = alt_titles.replace("\n", "").replace("\t", "")
except:
return np.nan
else:
return alt_titles
# People
# In[14]:
def scrape_people(soup, role):
try:
people = [person.text for person in soup.find_all("a", class_="text-slug", href = re.compile(role))]
people = ';'.join(people)
except:
return np.nan
else:
return people
| [
"sys.setrecursionlimit",
"json.loads",
"re.compile",
"bs4.BeautifulSoup",
"re.findall"
] | [((132, 160), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(10000)'], {}), '(10000)\n', (153, 160), False, 'import sys\n'), ((275, 302), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""lxml"""'], {}), "(html, 'lxml')\n", (288, 302), False, 'from bs4 import BeautifulSoup\n'), ((717, 730), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (727, 730), False, 'import json\n'), ((990, 1003), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (1000, 1003), False, 'import json\n'), ((1346, 1359), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (1356, 1359), False, 'import json\n'), ((1737, 1750), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (1747, 1750), False, 'import json\n'), ((2130, 2143), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (2140, 2143), False, 'import json\n'), ((2589, 2602), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (2599, 2602), False, 'import json\n'), ((2961, 2974), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (2971, 2974), False, 'import json\n'), ((3352, 3379), 're.findall', 're.findall', (['pattern', 'string'], {}), '(pattern, string)\n', (3362, 3379), False, 'import re\n'), ((3592, 3614), 're.compile', 're.compile', (['"""language"""'], {}), "('language')\n", (3602, 3614), False, 'import re\n'), ((4175, 4191), 're.compile', 're.compile', (['role'], {}), '(role)\n', (4185, 4191), False, 'import re\n')] |
# coding: utf-8
"""
Engine api
Engine APIs # noqa: E501
The version of the OpenAPI document: 1.0.4
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class EngineTask(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'str',
'capbilities_type': 'Capability',
'event_type': 'EventType',
'engine_machine_id': 'str',
'is_expired': 'bool',
'time_to_live': 'int',
'source': 'SourceEndPoint',
'destination': 'DestinationEndPoint',
'zone_setting': 'EngineTaskZoneSetting',
'line_setting': 'EngineTaskLineSetting',
'config': 'list[Config]',
'updated': 'datetime',
'created': 'datetime',
'etag': 'str',
'links': 'Links'
}
attribute_map = {
'id': '_id',
'capbilities_type': 'capbilitiesType',
'event_type': 'eventType',
'engine_machine_id': 'engineMachineId',
'is_expired': 'isExpired',
'time_to_live': 'timeToLive',
'source': 'source',
'destination': 'destination',
'zone_setting': 'zoneSetting',
'line_setting': 'lineSetting',
'config': 'config',
'updated': 'updated',
'created': 'created',
'etag': 'etag',
'links': 'links'
}
def __init__(self, id=None, capbilities_type=None, event_type=None, engine_machine_id=None, is_expired=False, time_to_live=-1, source=None, destination=None, zone_setting=None, line_setting=None, config=None, updated=None, created=None, etag=None, links=None): # noqa: E501
"""EngineTask - a model defined in OpenAPI""" # noqa: E501
self._id = None
self._capbilities_type = None
self._event_type = None
self._engine_machine_id = None
self._is_expired = None
self._time_to_live = None
self._source = None
self._destination = None
self._zone_setting = None
self._line_setting = None
self._config = None
self._updated = None
self._created = None
self._etag = None
self._links = None
self.discriminator = None
if id is not None:
self.id = id
if capbilities_type is not None:
self.capbilities_type = capbilities_type
if event_type is not None:
self.event_type = event_type
if engine_machine_id is not None:
self.engine_machine_id = engine_machine_id
if is_expired is not None:
self.is_expired = is_expired
if time_to_live is not None:
self.time_to_live = time_to_live
if source is not None:
self.source = source
if destination is not None:
self.destination = destination
if zone_setting is not None:
self.zone_setting = zone_setting
if line_setting is not None:
self.line_setting = line_setting
if config is not None:
self.config = config
if updated is not None:
self.updated = updated
if created is not None:
self.created = created
if etag is not None:
self.etag = etag
if links is not None:
self.links = links
@property
def id(self):
"""Gets the id of this EngineTask. # noqa: E501
:return: The id of this EngineTask. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this EngineTask.
:param id: The id of this EngineTask. # noqa: E501
:type: str
"""
self._id = id
@property
def capbilities_type(self):
"""Gets the capbilities_type of this EngineTask. # noqa: E501
:return: The capbilities_type of this EngineTask. # noqa: E501
:rtype: Capability
"""
return self._capbilities_type
@capbilities_type.setter
def capbilities_type(self, capbilities_type):
"""Sets the capbilities_type of this EngineTask.
:param capbilities_type: The capbilities_type of this EngineTask. # noqa: E501
:type: Capability
"""
self._capbilities_type = capbilities_type
@property
def event_type(self):
"""Gets the event_type of this EngineTask. # noqa: E501
:return: The event_type of this EngineTask. # noqa: E501
:rtype: EventType
"""
return self._event_type
@event_type.setter
def event_type(self, event_type):
"""Sets the event_type of this EngineTask.
:param event_type: The event_type of this EngineTask. # noqa: E501
:type: EventType
"""
self._event_type = event_type
@property
def engine_machine_id(self):
"""Gets the engine_machine_id of this EngineTask. # noqa: E501
:return: The engine_machine_id of this EngineTask. # noqa: E501
:rtype: str
"""
return self._engine_machine_id
@engine_machine_id.setter
def engine_machine_id(self, engine_machine_id):
"""Sets the engine_machine_id of this EngineTask.
:param engine_machine_id: The engine_machine_id of this EngineTask. # noqa: E501
:type: str
"""
self._engine_machine_id = engine_machine_id
@property
def is_expired(self):
"""Gets the is_expired of this EngineTask. # noqa: E501
Explanations: * true = Engines will NEVER execute this task * false = Engines will execute this task # noqa: E501
:return: The is_expired of this EngineTask. # noqa: E501
:rtype: bool
"""
return self._is_expired
@is_expired.setter
def is_expired(self, is_expired):
"""Sets the is_expired of this EngineTask.
Explanations: * true = Engines will NEVER execute this task * false = Engines will execute this task # noqa: E501
:param is_expired: The is_expired of this EngineTask. # noqa: E501
:type: bool
"""
self._is_expired = is_expired
@property
def time_to_live(self):
"""Gets the time_to_live of this EngineTask. # noqa: E501
Time in milliseconds of expiry or the task. Engines will not execute an expired task. Explanations: * -1 = Never expires * -2 = Expired * 0 = Will expire in 0 milliseconds * >0 = milliseconds till expiry # noqa: E501
:return: The time_to_live of this EngineTask. # noqa: E501
:rtype: int
"""
return self._time_to_live
@time_to_live.setter
def time_to_live(self, time_to_live):
"""Sets the time_to_live of this EngineTask.
Time in milliseconds of expiry or the task. Engines will not execute an expired task. Explanations: * -1 = Never expires * -2 = Expired * 0 = Will expire in 0 milliseconds * >0 = milliseconds till expiry # noqa: E501
:param time_to_live: The time_to_live of this EngineTask. # noqa: E501
:type: int
"""
self._time_to_live = time_to_live
@property
def source(self):
"""Gets the source of this EngineTask. # noqa: E501
:return: The source of this EngineTask. # noqa: E501
:rtype: SourceEndPoint
"""
return self._source
@source.setter
def source(self, source):
"""Sets the source of this EngineTask.
:param source: The source of this EngineTask. # noqa: E501
:type: SourceEndPoint
"""
self._source = source
@property
def destination(self):
"""Gets the destination of this EngineTask. # noqa: E501
:return: The destination of this EngineTask. # noqa: E501
:rtype: DestinationEndPoint
"""
return self._destination
@destination.setter
def destination(self, destination):
"""Sets the destination of this EngineTask.
:param destination: The destination of this EngineTask. # noqa: E501
:type: DestinationEndPoint
"""
self._destination = destination
@property
def zone_setting(self):
"""Gets the zone_setting of this EngineTask. # noqa: E501
:return: The zone_setting of this EngineTask. # noqa: E501
:rtype: EngineTaskZoneSetting
"""
return self._zone_setting
@zone_setting.setter
def zone_setting(self, zone_setting):
"""Sets the zone_setting of this EngineTask.
:param zone_setting: The zone_setting of this EngineTask. # noqa: E501
:type: EngineTaskZoneSetting
"""
self._zone_setting = zone_setting
@property
def line_setting(self):
"""Gets the line_setting of this EngineTask. # noqa: E501
:return: The line_setting of this EngineTask. # noqa: E501
:rtype: EngineTaskLineSetting
"""
return self._line_setting
@line_setting.setter
def line_setting(self, line_setting):
"""Sets the line_setting of this EngineTask.
:param line_setting: The line_setting of this EngineTask. # noqa: E501
:type: EngineTaskLineSetting
"""
self._line_setting = line_setting
@property
def config(self):
"""Gets the config of this EngineTask. # noqa: E501
:return: The config of this EngineTask. # noqa: E501
:rtype: list[Config]
"""
return self._config
@config.setter
def config(self, config):
"""Sets the config of this EngineTask.
:param config: The config of this EngineTask. # noqa: E501
:type: list[Config]
"""
self._config = config
@property
def updated(self):
"""Gets the updated of this EngineTask. # noqa: E501
:return: The updated of this EngineTask. # noqa: E501
:rtype: datetime
"""
return self._updated
@updated.setter
def updated(self, updated):
"""Sets the updated of this EngineTask.
:param updated: The updated of this EngineTask. # noqa: E501
:type: datetime
"""
self._updated = updated
@property
def created(self):
"""Gets the created of this EngineTask. # noqa: E501
:return: The created of this EngineTask. # noqa: E501
:rtype: datetime
"""
return self._created
@created.setter
def created(self, created):
"""Sets the created of this EngineTask.
:param created: The created of this EngineTask. # noqa: E501
:type: datetime
"""
self._created = created
@property
def etag(self):
"""Gets the etag of this EngineTask. # noqa: E501
:return: The etag of this EngineTask. # noqa: E501
:rtype: str
"""
return self._etag
@etag.setter
def etag(self, etag):
"""Sets the etag of this EngineTask.
:param etag: The etag of this EngineTask. # noqa: E501
:type: str
"""
self._etag = etag
@property
def links(self):
"""Gets the links of this EngineTask. # noqa: E501
:return: The links of this EngineTask. # noqa: E501
:rtype: Links
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this EngineTask.
:param links: The links of this EngineTask. # noqa: E501
:type: Links
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EngineTask):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"six.iteritems"
] | [((12047, 12080), 'six.iteritems', 'six.iteritems', (['self.openapi_types'], {}), '(self.openapi_types)\n', (12060, 12080), False, 'import six\n')] |
"""
Analyses skewness for continuous features
Options:
A. Log
B. Yeo-Johnson
C. QuantileTransformer
"""
import json
import pandas as pd
import numpy as np
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import power_transform, quantile_transform
from pathlib import Path
p = Path(__file__).parents[1]
# To load project modules
import sys; sys.path.append(str(p))
from src.logger import LOGGER
from src.utils import skewTest
LOGGER.info('Load data')
X = pd.read_pickle(p.joinpath('data', 'interim', 'research.pkl')).filter(like='cont')
LOGGER.info('Process data - Logarithm')
A = (
pd.DataFrame(X.apply(skewTest, args=(np.log1p,)).to_list())
.assign(Transformation='Logarithm')
.set_index('Transformation')
)
LOGGER.info('Process data - Yeo-Johnson')
B = (
pd.DataFrame(
X.apply(lambda s: skewTest(np.reshape(s.values, (-1, 1)), power_transform))
.to_list()
)
.apply(lambda s: s.explode().astype(float))
.assign(Transformation='Yeo-Johnson')
.set_index('Transformation')
)
LOGGER.info('Process data - Quantile Transform')
C = (
pd.DataFrame(
X.apply(lambda s: skewTest(
np.reshape(s.values, (-1, 1)),
quantile_transform,
output_distribution='normal',
random_state=0
))
.to_list()
)
.apply(lambda s: s.explode().astype(float))
.assign(Transformation='Quantile Transform')
.set_index('Transformation')
)
LOGGER.info('Computing result')
(
pd.concat([A, B, C]).reset_index().groupby('Transformation').mean()
.assign(CostEffectivenessRatio=lambda df: df['Time'].div(df['Insignificance']))
.sort_values('CostEffectivenessRatio')
.to_html(
buf=p.joinpath('reports', 'tables', '02ContTransformations.html'),
float_format='{:.2f}'.format,
bold_rows=False
)
) | [
"numpy.reshape",
"pandas.concat",
"src.logger.LOGGER.info",
"pathlib.Path"
] | [((464, 488), 'src.logger.LOGGER.info', 'LOGGER.info', (['"""Load data"""'], {}), "('Load data')\n", (475, 488), False, 'from src.logger import LOGGER\n'), ((576, 615), 'src.logger.LOGGER.info', 'LOGGER.info', (['"""Process data - Logarithm"""'], {}), "('Process data - Logarithm')\n", (587, 615), False, 'from src.logger import LOGGER\n'), ((762, 803), 'src.logger.LOGGER.info', 'LOGGER.info', (['"""Process data - Yeo-Johnson"""'], {}), "('Process data - Yeo-Johnson')\n", (773, 803), False, 'from src.logger import LOGGER\n'), ((1063, 1111), 'src.logger.LOGGER.info', 'LOGGER.info', (['"""Process data - Quantile Transform"""'], {}), "('Process data - Quantile Transform')\n", (1074, 1111), False, 'from src.logger import LOGGER\n'), ((1485, 1516), 'src.logger.LOGGER.info', 'LOGGER.info', (['"""Computing result"""'], {}), "('Computing result')\n", (1496, 1516), False, 'from src.logger import LOGGER\n'), ((312, 326), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (316, 326), False, 'from pathlib import Path\n'), ((863, 892), 'numpy.reshape', 'np.reshape', (['s.values', '(-1, 1)'], {}), '(s.values, (-1, 1))\n', (873, 892), True, 'import numpy as np\n'), ((1184, 1213), 'numpy.reshape', 'np.reshape', (['s.values', '(-1, 1)'], {}), '(s.values, (-1, 1))\n', (1194, 1213), True, 'import numpy as np\n'), ((1523, 1543), 'pandas.concat', 'pd.concat', (['[A, B, C]'], {}), '([A, B, C])\n', (1532, 1543), True, 'import pandas as pd\n')] |
import pygame
pygame.init()
FPS = 60
WIDTH, HEIGHT = 800, 700
ROWS, COLS = 7, 8
SQUARE_SIZE = HEIGHT//ROWS
# GRAPHICAL USER INTERFACE
ICON_PATH = './src/murus_gallicus/assets/noun_checkers_1684698.png'
WINDOW = pygame.display.set_mode((WIDTH, HEIGHT))
PADDING = 20
OUTLINE = 2
# RGB COLORS
RED = (255, 0, 0)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
CLEAR_BLUE = (102, 178, 255)
BLUE = (0, 0, 255)
GREY = (128, 128, 128)
SOFT_YELLOW = (246, 233, 195)
SOFT_RED = (244, 129, 134)
CELTIC_GREEN = (1, 135, 73)
DARK_GREEN = (14, 79, 0)
SPQR_RED = (213, 28, 31)
DARK_RED = (140, 8, 2)
P_2_Minimax = "Player VS MiniMax AI"
P_2_P = "Player vs Player"
AI_MINIMAX_DEPTH = 3
| [
"pygame.display.set_mode",
"pygame.init"
] | [((14, 27), 'pygame.init', 'pygame.init', ([], {}), '()\n', (25, 27), False, 'import pygame\n'), ((213, 253), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(WIDTH, HEIGHT)'], {}), '((WIDTH, HEIGHT))\n', (236, 253), False, 'import pygame\n')] |
import deepchem as dc
import numpy as np
import os
def test_numpy_dataset_get_shape():
"""Test that get_shape works for numpy datasets."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.random.randint(2, size=(num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.NumpyDataset(X, y, w, ids)
X_shape, y_shape, w_shape, ids_shape = dataset.get_shape()
assert X_shape == X.shape
assert y_shape == y.shape
assert w_shape == w.shape
assert ids_shape == ids.shape
def test_disk_dataset_get_shape_single_shard():
"""Test that get_shape works for disk dataset."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.random.randint(2, size=(num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
X_shape, y_shape, w_shape, ids_shape = dataset.get_shape()
assert X_shape == X.shape
assert y_shape == y.shape
assert w_shape == w.shape
assert ids_shape == ids.shape
def test_disk_dataset_get_shape_multishard():
"""Test that get_shape works for multisharded disk dataset."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.random.randint(2, size=(num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
# Should now have 10 shards
dataset.reshard(shard_size=10)
X_shape, y_shape, w_shape, ids_shape = dataset.get_shape()
assert X_shape == X.shape
assert y_shape == y.shape
assert w_shape == w.shape
assert ids_shape == ids.shape
def test_disk_dataset_get_legacy_shape_single_shard():
"""Test that get_shape works for legacy disk dataset."""
# This is the shape of legacy_data
num_datapoints = 100
num_features = 10
num_tasks = 10
current_dir = os.path.dirname(os.path.abspath(__file__))
# legacy_dataset is a dataset in the legacy format kept around for testing
# purposes.
data_dir = os.path.join(current_dir, "legacy_dataset")
dataset = dc.data.DiskDataset(data_dir)
X_shape, y_shape, w_shape, ids_shape = dataset.get_shape()
assert X_shape == (num_datapoints, num_features)
assert y_shape == (num_datapoints, num_tasks)
assert w_shape == (num_datapoints, num_tasks)
assert ids_shape == (num_datapoints,)
def test_disk_dataset_get_legacy_shape_multishard():
"""Test that get_shape works for multisharded legacy disk dataset."""
# This is the shape of legacy_data_reshard
num_datapoints = 100
num_features = 10
num_tasks = 10
# legacy_dataset_reshard is a sharded dataset in the legacy format kept
# around for testing
current_dir = os.path.dirname(os.path.abspath(__file__))
data_dir = os.path.join(current_dir, "legacy_dataset_reshard")
dataset = dc.data.DiskDataset(data_dir)
# Should now have 10 shards
assert dataset.get_number_shards() == 10
X_shape, y_shape, w_shape, ids_shape = dataset.get_shape()
assert X_shape == (num_datapoints, num_features)
assert y_shape == (num_datapoints, num_tasks)
assert w_shape == (num_datapoints, num_tasks)
assert ids_shape == (num_datapoints,)
def test_get_shard_size():
"""
Test that using ids for getting the shard size does not break the method.
The issue arises when attempting to load a dataset that does not have a labels
column. The create_dataset method of the DataLoader class sets the y to None
in this case, which causes the existing implementation of the get_shard_size()
method to fail, as it relies on the dataset having a not None y column. This
consequently breaks all methods depending on this, like the splitters for
example.
Note
----
DiskDatasets without labels cannot be resharded!
"""
current_dir = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(current_dir, "reaction_smiles.csv")
featurizer = dc.feat.DummyFeaturizer()
loader = dc.data.CSVLoader(
tasks=[], feature_field="reactions", featurizer=featurizer)
dataset = loader.create_dataset(file_path)
assert dataset.get_shard_size() == 4
| [
"deepchem.data.DiskDataset.from_numpy",
"deepchem.data.CSVLoader",
"numpy.random.rand",
"os.path.join",
"numpy.array",
"numpy.random.randint",
"deepchem.feat.DummyFeaturizer",
"deepchem.data.NumpyDataset",
"os.path.abspath",
"deepchem.data.DiskDataset"
] | [((227, 271), 'numpy.random.rand', 'np.random.rand', (['num_datapoints', 'num_features'], {}), '(num_datapoints, num_features)\n', (241, 271), True, 'import numpy as np\n'), ((278, 332), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(num_datapoints, num_tasks)'}), '(2, size=(num_datapoints, num_tasks))\n', (295, 332), True, 'import numpy as np\n'), ((339, 393), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(num_datapoints, num_tasks)'}), '(2, size=(num_datapoints, num_tasks))\n', (356, 393), True, 'import numpy as np\n'), ((402, 435), 'numpy.array', 'np.array', (["(['id'] * num_datapoints)"], {}), "(['id'] * num_datapoints)\n", (410, 435), True, 'import numpy as np\n'), ((449, 483), 'deepchem.data.NumpyDataset', 'dc.data.NumpyDataset', (['X', 'y', 'w', 'ids'], {}), '(X, y, w, ids)\n', (469, 483), True, 'import deepchem as dc\n'), ((848, 892), 'numpy.random.rand', 'np.random.rand', (['num_datapoints', 'num_features'], {}), '(num_datapoints, num_features)\n', (862, 892), True, 'import numpy as np\n'), ((899, 953), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(num_datapoints, num_tasks)'}), '(2, size=(num_datapoints, num_tasks))\n', (916, 953), True, 'import numpy as np\n'), ((960, 1014), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(num_datapoints, num_tasks)'}), '(2, size=(num_datapoints, num_tasks))\n', (977, 1014), True, 'import numpy as np\n'), ((1023, 1056), 'numpy.array', 'np.array', (["(['id'] * num_datapoints)"], {}), "(['id'] * num_datapoints)\n", (1031, 1056), True, 'import numpy as np\n'), ((1070, 1114), 'deepchem.data.DiskDataset.from_numpy', 'dc.data.DiskDataset.from_numpy', (['X', 'y', 'w', 'ids'], {}), '(X, y, w, ids)\n', (1100, 1114), True, 'import deepchem as dc\n'), ((1490, 1534), 'numpy.random.rand', 'np.random.rand', (['num_datapoints', 'num_features'], {}), '(num_datapoints, num_features)\n', (1504, 1534), True, 'import numpy as np\n'), ((1541, 1595), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(num_datapoints, num_tasks)'}), '(2, size=(num_datapoints, num_tasks))\n', (1558, 1595), True, 'import numpy as np\n'), ((1602, 1656), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(num_datapoints, num_tasks)'}), '(2, size=(num_datapoints, num_tasks))\n', (1619, 1656), True, 'import numpy as np\n'), ((1665, 1698), 'numpy.array', 'np.array', (["(['id'] * num_datapoints)"], {}), "(['id'] * num_datapoints)\n", (1673, 1698), True, 'import numpy as np\n'), ((1712, 1756), 'deepchem.data.DiskDataset.from_numpy', 'dc.data.DiskDataset.from_numpy', (['X', 'y', 'w', 'ids'], {}), '(X, y, w, ids)\n', (1742, 1756), True, 'import deepchem as dc\n'), ((2375, 2418), 'os.path.join', 'os.path.join', (['current_dir', '"""legacy_dataset"""'], {}), "(current_dir, 'legacy_dataset')\n", (2387, 2418), False, 'import os\n'), ((2431, 2460), 'deepchem.data.DiskDataset', 'dc.data.DiskDataset', (['data_dir'], {}), '(data_dir)\n', (2450, 2460), True, 'import deepchem as dc\n'), ((3112, 3163), 'os.path.join', 'os.path.join', (['current_dir', '"""legacy_dataset_reshard"""'], {}), "(current_dir, 'legacy_dataset_reshard')\n", (3124, 3163), False, 'import os\n'), ((3176, 3205), 'deepchem.data.DiskDataset', 'dc.data.DiskDataset', (['data_dir'], {}), '(data_dir)\n', (3195, 3205), True, 'import deepchem as dc\n'), ((4193, 4241), 'os.path.join', 'os.path.join', (['current_dir', '"""reaction_smiles.csv"""'], {}), "(current_dir, 'reaction_smiles.csv')\n", (4205, 4241), False, 'import os\n'), ((4258, 4283), 'deepchem.feat.DummyFeaturizer', 'dc.feat.DummyFeaturizer', ([], {}), '()\n', (4281, 4283), True, 'import deepchem as dc\n'), ((4295, 4372), 'deepchem.data.CSVLoader', 'dc.data.CSVLoader', ([], {'tasks': '[]', 'feature_field': '"""reactions"""', 'featurizer': 'featurizer'}), "(tasks=[], feature_field='reactions', featurizer=featurizer)\n", (4312, 4372), True, 'import deepchem as dc\n'), ((2244, 2269), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (2259, 2269), False, 'import os\n'), ((3072, 3097), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (3087, 3097), False, 'import os\n'), ((4152, 4177), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (4167, 4177), False, 'import os\n')] |
# -*- coding: utf-8 -*- #
# Copyright 2015 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for manipulating text."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import collections
def Pluralize(num, word, plural=None):
"""Pluralize word based on num.
Args:
num: int, the number of objects to count.
word: str, the word to pluralize.
plural: str, the plural form of word if not "add s"
Returns:
str: the plural or singular form of word in accord with num.
"""
if num == 1:
return word
return plural or word + 's'
_SECONDS_PER = collections.OrderedDict([
('second', 1),
('minute', 60),
('hour', 60 * 60),
('day', 60 * 60 * 24)
])
def GetArticle(noun):
"""Gets article (a or an) for given noun."""
return 'an' if noun[0] in ['a', 'e', 'i', 'o', 'u'] else 'a'
def _TotalSeconds(delta):
"""Re-implementation of datetime.timedelta.total_seconds() for Python 2.6."""
return delta.days * 24 * 60 * 60 + delta.seconds
def PrettyTimeDelta(delta):
"""Pretty print the given time delta.
Rounds down.
>>> _PrettyTimeDelta(datetime.timedelta(seconds=0))
'0 seconds'
>>> _PrettyTimeDelta(datetime.timedelta(minutes=1))
'1 minute'
>>> _PrettyTimeDelta(datetime.timedelta(hours=2))
'2 hours'
>>> _PrettyTimeDelta(datetime.timedelta(days=3))
'3 days'
Args:
delta: a datetime.timedelta object
Returns:
str, a human-readable version of the time delta
"""
seconds = int(_TotalSeconds(delta))
num = seconds
unit = 'second'
for u, seconds_per in _SECONDS_PER.items():
if seconds < seconds_per:
break
unit = u
num = seconds // seconds_per
return '{0} {1}'.format(num, Pluralize(num, unit))
| [
"collections.OrderedDict"
] | [((1176, 1278), 'collections.OrderedDict', 'collections.OrderedDict', (["[('second', 1), ('minute', 60), ('hour', 60 * 60), ('day', 60 * 60 * 24)]"], {}), "([('second', 1), ('minute', 60), ('hour', 60 * 60),\n ('day', 60 * 60 * 24)])\n", (1199, 1278), False, 'import collections\n')] |
import time
a = 3215.35127
b = 3.
start = time.time()
for i in range(100000000):
c = a / b
end = time.time()
time_elapsed = end - start
print('Time elapsed (div ver) = %.5f' % time_elapsed)
a = 3215.35127
b = 1./3.
start = time.time()
for i in range(100000000):
c = a * b
end = time.time()
time_elapsed = end - start
print('Time elapsed (mul ver) = %.5f' % time_elapsed)
| [
"time.time"
] | [((44, 55), 'time.time', 'time.time', ([], {}), '()\n', (53, 55), False, 'import time\n'), ((103, 114), 'time.time', 'time.time', ([], {}), '()\n', (112, 114), False, 'import time\n'), ((231, 242), 'time.time', 'time.time', ([], {}), '()\n', (240, 242), False, 'import time\n'), ((290, 301), 'time.time', 'time.time', ([], {}), '()\n', (299, 301), False, 'import time\n')] |
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ExecutionLogPolicy(object):
"""
Configures the logging policies for the execution logs of an API Deployment.
"""
#: A constant which can be used with the log_level property of a ExecutionLogPolicy.
#: This constant has a value of "INFO"
LOG_LEVEL_INFO = "INFO"
#: A constant which can be used with the log_level property of a ExecutionLogPolicy.
#: This constant has a value of "WARN"
LOG_LEVEL_WARN = "WARN"
#: A constant which can be used with the log_level property of a ExecutionLogPolicy.
#: This constant has a value of "ERROR"
LOG_LEVEL_ERROR = "ERROR"
def __init__(self, **kwargs):
"""
Initializes a new ExecutionLogPolicy object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param is_enabled:
The value to assign to the is_enabled property of this ExecutionLogPolicy.
:type is_enabled: bool
:param log_level:
The value to assign to the log_level property of this ExecutionLogPolicy.
Allowed values for this property are: "INFO", "WARN", "ERROR", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type log_level: str
"""
self.swagger_types = {
'is_enabled': 'bool',
'log_level': 'str'
}
self.attribute_map = {
'is_enabled': 'isEnabled',
'log_level': 'logLevel'
}
self._is_enabled = None
self._log_level = None
@property
def is_enabled(self):
"""
Gets the is_enabled of this ExecutionLogPolicy.
Enables pushing of execution logs to the legacy OCI Object Storage log archival bucket.
Oracle recommends using the OCI Logging service to enable, retrieve, and query execution logs
for an API Deployment. If there is an active log object for the API Deployment and its
category is set to 'execution' in OCI Logging service, the logs will not be uploaded to the legacy
OCI Object Storage log archival bucket.
Please note that the functionality to push to the legacy OCI Object Storage log
archival bucket has been deprecated and will be removed in the future.
:return: The is_enabled of this ExecutionLogPolicy.
:rtype: bool
"""
return self._is_enabled
@is_enabled.setter
def is_enabled(self, is_enabled):
"""
Sets the is_enabled of this ExecutionLogPolicy.
Enables pushing of execution logs to the legacy OCI Object Storage log archival bucket.
Oracle recommends using the OCI Logging service to enable, retrieve, and query execution logs
for an API Deployment. If there is an active log object for the API Deployment and its
category is set to 'execution' in OCI Logging service, the logs will not be uploaded to the legacy
OCI Object Storage log archival bucket.
Please note that the functionality to push to the legacy OCI Object Storage log
archival bucket has been deprecated and will be removed in the future.
:param is_enabled: The is_enabled of this ExecutionLogPolicy.
:type: bool
"""
self._is_enabled = is_enabled
@property
def log_level(self):
"""
Gets the log_level of this ExecutionLogPolicy.
Specifies the log level used to control logging output of execution logs.
Enabling logging at a given level also enables logging at all higher levels.
Allowed values for this property are: "INFO", "WARN", "ERROR", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The log_level of this ExecutionLogPolicy.
:rtype: str
"""
return self._log_level
@log_level.setter
def log_level(self, log_level):
"""
Sets the log_level of this ExecutionLogPolicy.
Specifies the log level used to control logging output of execution logs.
Enabling logging at a given level also enables logging at all higher levels.
:param log_level: The log_level of this ExecutionLogPolicy.
:type: str
"""
allowed_values = ["INFO", "WARN", "ERROR"]
if not value_allowed_none_or_none_sentinel(log_level, allowed_values):
log_level = 'UNKNOWN_ENUM_VALUE'
self._log_level = log_level
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| [
"oci.util.formatted_flat_dict",
"oci.util.value_allowed_none_or_none_sentinel"
] | [((5155, 5180), 'oci.util.formatted_flat_dict', 'formatted_flat_dict', (['self'], {}), '(self)\n', (5174, 5180), False, 'from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel\n'), ((4970, 5032), 'oci.util.value_allowed_none_or_none_sentinel', 'value_allowed_none_or_none_sentinel', (['log_level', 'allowed_values'], {}), '(log_level, allowed_values)\n', (5005, 5032), False, 'from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel\n')] |
from django.contrib import admin
from .models import Search
# Register your models here.
admin.site.register(Search) | [
"django.contrib.admin.site.register"
] | [((90, 117), 'django.contrib.admin.site.register', 'admin.site.register', (['Search'], {}), '(Search)\n', (109, 117), False, 'from django.contrib import admin\n')] |
# <NAME>
# Institute of Mechatronic Systems
# Leibniz Universität Hannover, Germany
# 2019
# Code From https://github.com/mlaves/bayesian-temperature-scaling
import torch
__all__ = ['accuracy', 'kl_loss', 'nentr', 'xavier_normal_init']
def accuracy(input, target):
_, max_indices = torch.max(input.data, 1)
acc = (max_indices == target).sum().float() / max_indices.size(0)
return acc.item()
def kl_loss(logits):
return -torch.nn.functional.log_softmax(logits, dim=1).mean()
def nentr(p, base=None):
"""
Calculates entropy of p to the base b. If base is None, the natural logarithm is used.
:param p: batches of class label probability distributions (softmax output)
:param base: base b
:return:
"""
eps = torch.tensor([1e-16], device=p.device)
if base:
base = torch.tensor([base], device=p.device, dtype=torch.float32)
return (p.mul(p.add(eps).log().div(base.log()))).sum(dim=1).abs()
else:
return (p.mul(p.add(eps).log())).sum(dim=1).abs()
def xavier_normal_init(m):
if isinstance(m, torch.nn.Conv2d):
torch.nn.init.xavier_normal_(m.weight.data) | [
"torch.tensor",
"torch.max",
"torch.nn.functional.log_softmax",
"torch.nn.init.xavier_normal_"
] | [((289, 313), 'torch.max', 'torch.max', (['input.data', '(1)'], {}), '(input.data, 1)\n', (298, 313), False, 'import torch\n'), ((756, 794), 'torch.tensor', 'torch.tensor', (['[1e-16]'], {'device': 'p.device'}), '([1e-16], device=p.device)\n', (768, 794), False, 'import torch\n'), ((823, 881), 'torch.tensor', 'torch.tensor', (['[base]'], {'device': 'p.device', 'dtype': 'torch.float32'}), '([base], device=p.device, dtype=torch.float32)\n', (835, 881), False, 'import torch\n'), ((1100, 1143), 'torch.nn.init.xavier_normal_', 'torch.nn.init.xavier_normal_', (['m.weight.data'], {}), '(m.weight.data)\n', (1128, 1143), False, 'import torch\n'), ((441, 487), 'torch.nn.functional.log_softmax', 'torch.nn.functional.log_softmax', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (472, 487), False, 'import torch\n')] |
import unittest
from core.drive import copy
from core.aesthetics import *
class TestingDrivepy(unittest.TestCase):
def test_copy_function(self):
source_tests = [
r"D:\Alexzander__\programming\python\Python2Executable",
r"D:\Alexzander__\programming\python\byzantion",
r"D:\Alexzander__\programming\python\BizidayNews",
r"D:\Alexzander__\programming\python\bitcoin",
r"D:\Alexzander__\programming\python\core",
r"",
r"",
]
destination_tests = [
r"D:\Alexzander__\programming\python\testing_copy_func",
r"D:\Alexzander__\programming\python\testing_copy_func",
r"D:\Alexzander__\programming\python\testing_copy_func",
r"D:\Alexzander__\programming\python\testing_copy_func",
r"D:\Alexzander__\programming\python\testing_copy_func",
r"",
r"",
]
for index, (source, destination) in enumerate(
zip(source_tests, destination_tests),
start=1
):
if source != r"" and destination != r"":
try:
result = self.assertEqual(
copy(
source,
destination,
open_destination_when_done=False,
__print=False),
True
)
if result is None:
print(f"Test #{index} {green_bold('passed')}.")
except BaseException as exception:
print(red_bold(type(exception)))
print(red_bold(exception))
print(f"Test #{index} DIDNT pass!")
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"core.drive.copy"
] | [((1875, 1890), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1888, 1890), False, 'import unittest\n'), ((1266, 1340), 'core.drive.copy', 'copy', (['source', 'destination'], {'open_destination_when_done': '(False)', '__print': '(False)'}), '(source, destination, open_destination_when_done=False, __print=False)\n', (1270, 1340), False, 'from core.drive import copy\n')] |
from app import app
from flask_restful import Api
from app.resources.auth import TokenResource
api = Api(app)
# Token resource
api.add_resource(TokenResource, '/authservice/token', endpoint='auth_token')
| [
"flask_restful.Api"
] | [((103, 111), 'flask_restful.Api', 'Api', (['app'], {}), '(app)\n', (106, 111), False, 'from flask_restful import Api\n')] |
#! /usr/bin/env python
import os
import rospy
import rospkg
from readbag import restore
from qt_gui.plugin import Plugin
from python_qt_binding.QtCore import Qt
from python_qt_binding import loadUi
from python_qt_binding.QtGui import QFileDialog, QGraphicsView, QIcon, QWidget
from PyQt4 import QtGui, QtCore
from example_ui import *
from PyQt4 import QtGui
from v2 import Ui_addbag
class Form1(QtGui.QWidget, Ui_addbag):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.setupUi(self)
self.pushButton_2.clicked.connect(self.handleButton)
self.window2 = None
def handleButton(self):
if self.window2 is None:
self.window2 = Form1(self)
self.window2.show()
self.hide()
def pop():
import sys
app = QtGui.QApplication(sys.argv)
window = Form1()
window.show()
sys.exit(app.exec_()) | [
"PyQt4.QtGui.QApplication",
"PyQt4.QtGui.QWidget.__init__"
] | [((812, 840), 'PyQt4.QtGui.QApplication', 'QtGui.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (830, 840), False, 'from PyQt4 import QtGui\n'), ((470, 506), 'PyQt4.QtGui.QWidget.__init__', 'QtGui.QWidget.__init__', (['self', 'parent'], {}), '(self, parent)\n', (492, 506), False, 'from PyQt4 import QtGui\n')] |
import models
import os
import copy
import torch
import torch.nn as nn
from lifelines import KaplanMeierFitter as KMFitter
import pycox
import numpy as np
# local
import catdist
import data_utils
import _concordance
import _nll
import _saver
def str_to_bool(arg):
"""Convert an argument string into its boolean value.
Args:
arg: String representing a bool.
Returns:
Boolean value for the string.
"""
if arg.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif arg.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def isnan(x):
return torch.any(torch.isnan(x))
def safe_log(x,eps):
return (x+eps).log()
def clip(prob,clip_min):
return prob.clamp(min=clip_min)
def round3(x):
return round(x,3)
class Meter:
def __init__(self):
self.N = 0
self.total = 0
def update(self,val,N):
self.total += val
self.N += N
def avg(self):
return round(self.total / self.N,4)
############################################
############ KM G IPCW F BS and BLL ########
############################################
def cdfvals_to_probs(cdfvals,args):
K=cdfvals.shape[1]
Gprobs = torch.zeros_like(cdfvals).to(args.device)
Gprobs[:,0] = cdfvals[:,0]
for k in range(1,K-1):
Gprobs[:,k] = cdfvals[:,k] - cdfvals[:,k-1]
Gprobs[:,K-1] = 1 - (Gprobs[:,:K-1]).sum(-1)
return Gprobs
def cdfvals_to_dist(cdfvals,bsz,args):
cdfvals = cdfvals.unsqueeze(0).repeat(bsz,1)
Gprobs = cdfvals_to_probs(cdfvals,args)
assert torch.all( (Gprobs.sum(-1) - 1.0).abs() < 1e-4)
Gdist = catdist.CatDist(logits=None, args=args, probs=Gprobs, k=None)
return Gdist
def get_KM_cdfvals(loader,args):
u=loader.dataset.U
delta=loader.dataset.Delta
durations = u.cpu().numpy()
is_censored = ~delta.cpu().numpy()
km = pycox.utils.kaplan_meier
surv_func = km(durations,is_censored).to_numpy()
cdf_func = 1. - surv_func
km_support = np.sort(np.unique(durations))
cdfvals = torch.zeros(args.K).to(args.device)
for i,val in enumerate(km_support):
cdfvals[val] = cdf_func[i]
for i,val in enumerate(cdfvals):
if i > 0:
if val==0.0:
cdfvals[i]=cdfvals[i-1]
return cdfvals
| [
"numpy.unique",
"catdist.CatDist",
"torch.zeros_like",
"torch.isnan",
"torch.zeros"
] | [((1717, 1778), 'catdist.CatDist', 'catdist.CatDist', ([], {'logits': 'None', 'args': 'args', 'probs': 'Gprobs', 'k': 'None'}), '(logits=None, args=args, probs=Gprobs, k=None)\n', (1732, 1778), False, 'import catdist\n'), ((699, 713), 'torch.isnan', 'torch.isnan', (['x'], {}), '(x)\n', (710, 713), False, 'import torch\n'), ((2097, 2117), 'numpy.unique', 'np.unique', (['durations'], {}), '(durations)\n', (2106, 2117), True, 'import numpy as np\n'), ((1293, 1318), 'torch.zeros_like', 'torch.zeros_like', (['cdfvals'], {}), '(cdfvals)\n', (1309, 1318), False, 'import torch\n'), ((2137, 2156), 'torch.zeros', 'torch.zeros', (['args.K'], {}), '(args.K)\n', (2148, 2156), False, 'import torch\n')] |
from distutils.core import setup
from pathlib import Path
# TODO(joschnei): Add typing info
setup(
name="active",
version="0.1",
packages=["active",],
install_requires=[
"scipy",
"numpy",
"driver @ git+https://github.com/jordan-schneider/driver-env.git#egg=driver",
],
package_data = {
'active': ['py.typed'],
},
)
| [
"distutils.core.setup"
] | [((94, 318), 'distutils.core.setup', 'setup', ([], {'name': '"""active"""', 'version': '"""0.1"""', 'packages': "['active']", 'install_requires': "['scipy', 'numpy',\n 'driver @ git+https://github.com/jordan-schneider/driver-env.git#egg=driver'\n ]", 'package_data': "{'active': ['py.typed']}"}), "(name='active', version='0.1', packages=['active'], install_requires=[\n 'scipy', 'numpy',\n 'driver @ git+https://github.com/jordan-schneider/driver-env.git#egg=driver'\n ], package_data={'active': ['py.typed']})\n", (99, 318), False, 'from distutils.core import setup\n')] |
import tableprint
from collections import Counter
from .args import *
from .util import *
if __name__ == "__main__":
args = get_args()
output_classes = Counter()
question_types = Counter()
with tableprint.TableContext(headers=["Type", "Question", "Answer"], width=[40,50,15]) as t:
for i in read_gqa(args):
output_classes[i["answer"]] += 1
question_types[i["question"]["type_string"]] += 1
t([
i["question"]["type_string"],
i["question"]["english"],
i["answer"]
])
def second(v):
return v[1]
tableprint.table(headers=["Answer", "Count"], width=[20,5], data=sorted(output_classes.items(), key=second))
tableprint.table(headers=["Question", "Count"], width=[20,5], data=sorted(question_types.items(), key=second))
| [
"collections.Counter",
"tableprint.TableContext"
] | [((160, 169), 'collections.Counter', 'Counter', ([], {}), '()\n', (167, 169), False, 'from collections import Counter\n'), ((188, 197), 'collections.Counter', 'Counter', ([], {}), '()\n', (195, 197), False, 'from collections import Counter\n'), ((206, 294), 'tableprint.TableContext', 'tableprint.TableContext', ([], {'headers': "['Type', 'Question', 'Answer']", 'width': '[40, 50, 15]'}), "(headers=['Type', 'Question', 'Answer'], width=[40, \n 50, 15])\n", (229, 294), False, 'import tableprint\n')] |
#!/usr/bin/env python3
# Copyright (2021-) <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Tuple, Iterable
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Layer
from tensorflow.keras.initializers import Initializer, GlorotUniform
from kaldi_tflite.lib.layers.tdnn.utils import reshapeKaldiTdnnWeights
class TDNN(Layer):
"""
This layer implements a kaldi styled time delayed neural network layer.
It's implemented to produce the same output as a TDNN layer implemented
in Kaldi's Nnet3 framework.
Asymmetrical left / right context is allowed just like Kaldi's splicing
specification (e.g. context = [-3, -1, 0, 1]).
This layer's weights can be intialized using the `<LinearParams>` and
`<BiasParams>` of tdnn.affine components with the same number of units
and context configuration as this layer.
"""
def __init__(self,
units: int,
context: list = [0],
subsampling_factor: int = 1,
padding: str = "SAME",
use_bias: bool = True,
kernel_initializer: Initializer = GlorotUniform(),
bias_initializer: Initializer = GlorotUniform(),
activation: str = None,
name: str = None,
**kwargs):
"""
Instantiates a TDNN layer with the given configuration.
Parameters
----------
units : int
Dimension of layer output.
context: list, optional,
List of timesteps to use in the convolution where 0 is the current
timestep and -N would be the previous Nth timestep and +N would be
the future Nth timestep. By default [0], no temporal context.
subsampling_factor: int, optional
If set to N, will evaluate output for kernel centered at every
Nth timestep in the input. By default, 1 (no subsampling).
padding: str, optional
Padding option can be either "SAME" or "VALID". If "SAME", the input
will be padded so that the output has the same number of timesteps as
the input when subsampling_factor = 1. If "VALID", no padding will be
done, and the kernel will be evaluated only at timestamps where it is
completely within the input. By default "SAME", (same as Kaldi).
use_bias: bool, optional
If true, bias vector added to layer output, by default True.
kernel_initializer: tf.keras.initializers.Initializer, optional
Initializer to use when randomly initializing TDNN kernel weights, by
default GlorotUniform (also called Xavier uniform initializer).
bias_initializer: tf.keras.initializers.Initializer, optional
Initializer to use when randomly initializing bias vector, by
default GlorotUniform (also called Xavier uniform initializer).
name : str, optional
Name of the given layer. Auto set if set to None.
By default None.
"""
super(TDNN, self).__init__(trainable=True, name=name, **kwargs)
self.units = units
self.useBias = use_bias
self.subsamplingFactor = subsampling_factor
if self.subsamplingFactor <= 0:
raise ValueError("subsampling_factor should be > 0")
self.padding = padding.upper()
if self.padding not in ["VALID", "SAME"]:
raise ValueError("padding should be either 'VALID' or 'SAME'")
if context is None:
self.context = [0]
elif isinstance(context, int):
self.context = [context]
elif isinstance(context, list):
self.context = context if len(context) > 0 else [0]
else:
raise ValueError("context should be None, a list or an integer")
self.context.sort()
self.contextOffset = tf.constant([context], dtype=tf.int32)
self.kernelWidth = len(context)
self.kernelInitializer = kernel_initializer
self.biasInitializer = bias_initializer
self.activation = activation
if self.activation is not None:
self.activationFunc = tf.keras.activations.get(activation)
# Inputs to this layers are expected to be in the shape
# (batch, timesteps, featdim)
self.batchAxis = 0
self.timeAxis = 1
self.featAxis = -1
def build(self, input_shape: tuple):
super(TDNN, self).build(input_shape)
inputFeatDim = input_shape[self.featAxis]
# Convolutional kernel weights; 2D kernel with length = 1 and width =
# length of specified context timesteps. We use a 2D convolution kernel
# here because it becomes simpler to apply on how the inputs are shaped
# after applying tf.gather on them; see call()
self.kernel = self.add_weight(
name='kernel',
shape=(1, self.kernelWidth, inputFeatDim, self.units),
initializer=self.kernelInitializer,
)
# Bias vector.
self.bias = None
if self.useBias:
self.bias = self.add_weight(
name="bias",
shape=(self.units,),
initializer=self.biasInitializer,
)
def compute_output_shape(self, input_shape) -> tuple:
batchSize = input_shape[self.batchAxis]
inputTimesteps = input_shape[self.timeAxis]
start, end = self.getStartEndSteps(inputTimesteps)
outputTimesteps = (end - start) / self.subsamplingFactor
outputShape = (batchSize, outputTimesteps, self.units)
return outputShape
def get_config(self) -> dict:
config = super(TDNN, self).get_config()
config.update({
"units": self.units,
"context": self.context,
"subsampling_factor": self.subsamplingFactor,
"padding": self.padding,
"use_bias": self.useBias,
"kernel_intializer": self.kernelInitializer,
"bias_initializer": self.biasInitializer,
"activation": self.activation,
})
return config
def set_weights(self, weights: Iterable[np.ndarray], fmt: str = "kaldi"):
"""
Sets the weights of the layer, from numpy arrays. The weights can either
be in the shape and order kaldi provides them in (2D matrices for kernels
and 1D vector for biases) or how tensorflow expects them (output of
`get_weights()`).
Parameters
----------
weights : Iterable[np.ndarray]
Kernel and Bias weights as a list of numpy arrays. If the layer is
configured to not use bias vector, only kernel weights are expected
in the list.
fmt : str, optional
The format in which the weights of the kernel are arranged in -
either "kaldi" or "tensorflow", by default "kaldi".
Raises
------
ValueError
If the "order" is not "kaldi" or "tensorflow".
if the number of weights in the weight list is unexpected.
If the shape of the weights do not match expected shapes.
"""
fmt = fmt.lower()
if fmt not in ["kaldi", "tensorflow"]:
raise ValueError(f"expected 'fmt' to be either 'kaldi' or 'tensorflow', got {fmt}")
if len(weights) == 0:
raise ValueError(f"expected a weight list of at least length 2, got 0")
if self.useBias:
if len(weights) != 2:
raise ValueError(f"expected a weight list of length 2, got {len(weights)}")
kernel = weights[0]
if fmt == "kaldi":
kernel = reshapeKaldiTdnnWeights(kernel, self.units, self.kernelWidth)
if self.useBias:
bias = weights[1]
return super(TDNN, self).set_weights([kernel, bias])
return super(TDNN, self).set_weights([kernel])
def getStartEndSteps(self, inputTimesteps: int) -> Tuple[int, int]:
start = 0
end = inputTimesteps
if self.padding == "VALID":
if self.context[0] < 0:
start = -1 * self.context[0]
if self.context[-1] > 0:
end = inputTimesteps - self.context[-1]
return start, end
def getIndicesToEval(self, inputTimesteps: int) -> tf.Tensor:
start, end = self.getStartEndSteps(inputTimesteps)
indices = tf.range(start=start, limit=end, delta=self.subsamplingFactor)
context = tf.tile(input=self.contextOffset, multiples=[tf.size(indices), 1])
indices = tf.expand_dims(indices, axis=1)
indices = context + indices
# Limiting indices to be within bounds. This is equivalent to padding
# the input by repeating the values at the boundaries.
if self.padding == "SAME":
indices = tf.clip_by_value(indices, 0, inputTimesteps - 1)
return indices
def call(self, inputs):
inputShape = tf.shape(inputs)
inputTimesteps = inputShape[self.timeAxis]
# inputToEval has shape = (batch, numEval, kernelWidth, inputFeatDim)
indicesToEval = self.getIndicesToEval(inputTimesteps)
inputToEval = tf.gather(params=inputs, indices=indicesToEval, axis=self.timeAxis)
# Using 2D convolution with a kernel length of 1, effectively 1D
# convolution along kernel width. It works out easier this way when
# working with tf.gather.
#
# Furthermore, tf.nn.conv1d reshapes the inputs and invokes tf.nn.conv2d
# anyway (https://www.tensorflow.org/api_docs/python/tf/nn/conv1d)
output = tf.nn.conv2d(
inputToEval, self.kernel, strides=(1, 1), padding="VALID", data_format="NHWC",
)
# Removing the dimension along kernelWidth since it has become 1 after
# applying the convolution above.
output = tf.squeeze(output, axis=-2)
if self.useBias:
output = output + self.bias
if self.activation is not None:
output = self.activationFunc(output)
return output
| [
"tensorflow.nn.conv2d",
"tensorflow.shape",
"tensorflow.keras.activations.get",
"tensorflow.keras.initializers.GlorotUniform",
"kaldi_tflite.lib.layers.tdnn.utils.reshapeKaldiTdnnWeights",
"tensorflow.range",
"tensorflow.gather",
"tensorflow.constant",
"tensorflow.clip_by_value",
"tensorflow.expan... | [((1765, 1780), 'tensorflow.keras.initializers.GlorotUniform', 'GlorotUniform', ([], {}), '()\n', (1778, 1780), False, 'from tensorflow.keras.initializers import Initializer, GlorotUniform\n'), ((1831, 1846), 'tensorflow.keras.initializers.GlorotUniform', 'GlorotUniform', ([], {}), '()\n', (1844, 1846), False, 'from tensorflow.keras.initializers import Initializer, GlorotUniform\n'), ((4546, 4584), 'tensorflow.constant', 'tf.constant', (['[context]'], {'dtype': 'tf.int32'}), '([context], dtype=tf.int32)\n', (4557, 4584), True, 'import tensorflow as tf\n'), ((9106, 9168), 'tensorflow.range', 'tf.range', ([], {'start': 'start', 'limit': 'end', 'delta': 'self.subsamplingFactor'}), '(start=start, limit=end, delta=self.subsamplingFactor)\n', (9114, 9168), True, 'import tensorflow as tf\n'), ((9272, 9303), 'tensorflow.expand_dims', 'tf.expand_dims', (['indices'], {'axis': '(1)'}), '(indices, axis=1)\n', (9286, 9303), True, 'import tensorflow as tf\n'), ((9663, 9679), 'tensorflow.shape', 'tf.shape', (['inputs'], {}), '(inputs)\n', (9671, 9679), True, 'import tensorflow as tf\n'), ((9894, 9961), 'tensorflow.gather', 'tf.gather', ([], {'params': 'inputs', 'indices': 'indicesToEval', 'axis': 'self.timeAxis'}), '(params=inputs, indices=indicesToEval, axis=self.timeAxis)\n', (9903, 9961), True, 'import tensorflow as tf\n'), ((10329, 10424), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['inputToEval', 'self.kernel'], {'strides': '(1, 1)', 'padding': '"""VALID"""', 'data_format': '"""NHWC"""'}), "(inputToEval, self.kernel, strides=(1, 1), padding='VALID',\n data_format='NHWC')\n", (10341, 10424), True, 'import tensorflow as tf\n'), ((10583, 10610), 'tensorflow.squeeze', 'tf.squeeze', (['output'], {'axis': '(-2)'}), '(output, axis=-2)\n', (10593, 10610), True, 'import tensorflow as tf\n'), ((4838, 4874), 'tensorflow.keras.activations.get', 'tf.keras.activations.get', (['activation'], {}), '(activation)\n', (4862, 4874), True, 'import tensorflow as tf\n'), ((8364, 8425), 'kaldi_tflite.lib.layers.tdnn.utils.reshapeKaldiTdnnWeights', 'reshapeKaldiTdnnWeights', (['kernel', 'self.units', 'self.kernelWidth'], {}), '(kernel, self.units, self.kernelWidth)\n', (8387, 8425), False, 'from kaldi_tflite.lib.layers.tdnn.utils import reshapeKaldiTdnnWeights\n'), ((9539, 9587), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['indices', '(0)', '(inputTimesteps - 1)'], {}), '(indices, 0, inputTimesteps - 1)\n', (9555, 9587), True, 'import tensorflow as tf\n'), ((9232, 9248), 'tensorflow.size', 'tf.size', (['indices'], {}), '(indices)\n', (9239, 9248), True, 'import tensorflow as tf\n')] |
from django.shortcuts import render
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import status, permissions
import pyotp
import base64
from rest_framework_simplejwt.tokens import RefreshToken
from .serializers import CustomUserSerializer
from .models import CustomUser
from .models import MagicLink
from .utils import send_operations
class UserLogin(APIView):
"""
view for handling login post requests
"""
def post(self, request):
# check if a user with that email exists
email = request.data.get('email')
phone = request.data.get('phone_number')
user = None
try:
if email:
user = CustomUser.objects.get(email=email)
if phone:
users = CustomUser.objects.all()
user = CustomUser.objects.get(phone_number=phone)
except CustomUser.DoesNotExist:
return Response(status=status.HTTP_400_BAD_REQUEST)
send_operations(request, user)
return Response({'status':201, 'userdata': user.username})
class CustomUserCreate(APIView):
"""
Creates a user
"""
permission_classes = (permissions.AllowAny, )
def post(self, request):
serializer = CustomUserSerializer(data=request.data)
if serializer.is_valid():
user = serializer.save()
if user:
user.counter += 1
user.save()
send_operations(request, user)
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(status=status.HTTP_400_BAD_REQUEST)
class DashboardView(APIView):
"""
a protected view
"""
permission_classes = (permissions.IsAuthenticated,)
def get(self, request):
return Response(data={"message": "welcome home"}, status=status.HTTP_200_OK)
class VerifyOTPView(APIView):
"""
verifies entered otp and manually generates a jwt token for a user
"""
def get_tokens_for_user(self, user, otp):
"""
generates jwt with otp code
"""
refresh = RefreshToken.for_user(user)
refresh['otp'] = otp
return {
'refresh': str(refresh),
'access': str(refresh.access_token),
}
def post(self, request):
username = request.data.get('username')
print("username",username)
user = CustomUser.objects.get(username=username)
if user is not None:
key = base64.b32encode(user.username.encode())
otp = pyotp.HOTP(key)
if otp.verify(request.data['otpCode'], user.counter):
user.isVerified = True
user.code = otp.at(user.counter)
user.save()
token = self.get_tokens_for_user(user, user.code)
return Response({'status': 200, 'message': 'otp verified', 'token': token})
else:
return Response({'status': 400, 'message': 'wrong otp code'})
return Response({'status': 400, 'message': 'user does not exist'})
class LoginUserFromEmail(APIView):
"""
creates a jwt from url associated with user
"""
def post(self,request):
user = CustomUser.objects.last()
if user is not None:
magic_link = MagicLink.objects.get(user=user)
magic_link_token = magic_link.get_tokens_for_user(user)
return Response({'status': 200, 'message': 'magiclink ok', 'token': magic_link_token})
return Response({'status': 400, 'message': 'user does not exist'})
| [
"rest_framework_simplejwt.tokens.RefreshToken.for_user",
"rest_framework.response.Response",
"pyotp.HOTP"
] | [((1059, 1111), 'rest_framework.response.Response', 'Response', (["{'status': 201, 'userdata': user.username}"], {}), "({'status': 201, 'userdata': user.username})\n", (1067, 1111), False, 'from rest_framework.response import Response\n'), ((1623, 1667), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(status=status.HTTP_400_BAD_REQUEST)\n', (1631, 1667), False, 'from rest_framework.response import Response\n'), ((1836, 1905), 'rest_framework.response.Response', 'Response', ([], {'data': "{'message': 'welcome home'}", 'status': 'status.HTTP_200_OK'}), "(data={'message': 'welcome home'}, status=status.HTTP_200_OK)\n", (1844, 1905), False, 'from rest_framework.response import Response\n'), ((2149, 2176), 'rest_framework_simplejwt.tokens.RefreshToken.for_user', 'RefreshToken.for_user', (['user'], {}), '(user)\n', (2170, 2176), False, 'from rest_framework_simplejwt.tokens import RefreshToken\n'), ((3050, 3109), 'rest_framework.response.Response', 'Response', (["{'status': 400, 'message': 'user does not exist'}"], {}), "({'status': 400, 'message': 'user does not exist'})\n", (3058, 3109), False, 'from rest_framework.response import Response\n'), ((3551, 3610), 'rest_framework.response.Response', 'Response', (["{'status': 400, 'message': 'user does not exist'}"], {}), "({'status': 400, 'message': 'user does not exist'})\n", (3559, 3610), False, 'from rest_framework.response import Response\n'), ((2583, 2598), 'pyotp.HOTP', 'pyotp.HOTP', (['key'], {}), '(key)\n', (2593, 2598), False, 'import pyotp\n'), ((3456, 3535), 'rest_framework.response.Response', 'Response', (["{'status': 200, 'message': 'magiclink ok', 'token': magic_link_token}"], {}), "({'status': 200, 'message': 'magiclink ok', 'token': magic_link_token})\n", (3464, 3535), False, 'from rest_framework.response import Response\n'), ((960, 1004), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(status=status.HTTP_400_BAD_REQUEST)\n', (968, 1004), False, 'from rest_framework.response import Response\n'), ((1550, 1607), 'rest_framework.response.Response', 'Response', (['serializer.data'], {'status': 'status.HTTP_201_CREATED'}), '(serializer.data, status=status.HTTP_201_CREATED)\n', (1558, 1607), False, 'from rest_framework.response import Response\n'), ((2870, 2938), 'rest_framework.response.Response', 'Response', (["{'status': 200, 'message': 'otp verified', 'token': token}"], {}), "({'status': 200, 'message': 'otp verified', 'token': token})\n", (2878, 2938), False, 'from rest_framework.response import Response\n'), ((2980, 3034), 'rest_framework.response.Response', 'Response', (["{'status': 400, 'message': 'wrong otp code'}"], {}), "({'status': 400, 'message': 'wrong otp code'})\n", (2988, 3034), False, 'from rest_framework.response import Response\n')] |
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--d_model', type=int, default=0, help='d_model')
parser.add_argument('--d_head', type=int, default=2, help='head')
parser.add_argument('--d_inner', type=bool, default=True, help='inner layers')
parser.add_argument('--n_token', type=str, default='roberta-base', help='number of tokens')
parser.add_argument('--n_layer', type=str, default='gru', help='number of hidden layers')
parser.add_argument('--n_head', type=int, default=2, help='num attention heads')
parser.add_argument('--dropout', type=int, default=1024, help='dropout')
parser.add_argument('--dropatt', type=int, default=0.5, help='dropatt')
parser.add_argument('--attention_dropout_prob', type=int, default=1024, help='attention_dropout_prob')
parser.add_argument('--output_dropout_prob', type=int, default=0.5, help='output_dropout_prob')
args = parser.parse_args()
args = vars(args)
| [
"argparse.ArgumentParser"
] | [((25, 104), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (48, 104), False, 'import argparse\n')] |
import logging
import sys
import os
from logging.handlers import RotatingFileHandler
from multiprocessing.pool import ThreadPool
from optparse import OptionParser
import requests
from requests.packages import urllib3
urllib3.disable_warnings()
# Workers configurations
ASYNC_WORKERS_COUNT = 100 # How many threads will make http requests.
WORKERS_DECREMENTED_COUNT_ON_ERROR = 10 # Retry the fuzzing with x less workers, to decrease the load on the server.
STARTED_JOB_LOG_INTERVAL = 100 # Every x started jobs, a log will be written
# IO Configurations
DEFAULT_PATHS_LIST_FILE = 'words_lists/Filenames_or_Directories_Common.wordlist'
VALID_ENDPOINTS_FILE = 'endpoints.txt'
# HTTP Configuration
RESOURCE_EXISTS_STATUS_CODES = list(range(200, 300)) + [401, 402, 403]
DEFAULT_BASE_URL = 'https://www.example.com'
# Logging configurations
LOGS_DIRECTORY_FULL_NAME = 'logs'
LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
LOGGING_LEVEL = logging.INFO
BACKUP_LOGS_FILES_COUNT = 5
FUZZING_LOGGER_NAME = 'fuzzing'
LOG_FILE_MAX_BYTES = 0.5 * 1000 * 1000 # 500 KB
class FilesFactory(object):
"""
Manage files and directories
"""
files = []
urls = []
def read_files_from_directory(self, user_path):
self.files = [os.path.join(user_path, f) for f in os.listdir(user_path) if os.path.isfile(os.path.join(user_path, f))]
def read_lines_from_files(self):
for l in self.files:
h = open(l, 'r')
self.urls += h.read().splitlines()
def __init__(self,user_path):
if os.path.isdir(user_path):
self.read_files_from_directory(user_path)
self.read_lines_from_files()
elif(os.path.isfile(user_path)):
self.files.append(user_path)
self.read_lines_from_files()
class LoggerFactory(object):
"""
Manages loggers
"""
loggers = {}
logging_level = LOGGING_LEVEL
logging.basicConfig(stream=sys.stdout, level=logging_level,
format=LOG_FORMAT)
# Modifying the logger's level to ERROR to prevent console spam
logging.getLogger('urllib3').setLevel(logging.WARNING)
@staticmethod
def get_logger(logger_name):
"""
Gets a logger by it's name. Created the logger if it don't exist yet.
:param logger_name: The name of the logger (identifier).
:return: The logger instance.
:returns: Logger
"""
if logger_name not in LoggerFactory.loggers:
LoggerFactory.loggers[logger_name] = LoggerFactory._get_logger(logger_name)
return LoggerFactory.loggers[logger_name]
@staticmethod
def _get_logger(logger_name, logs_directory_path=LOGS_DIRECTORY_FULL_NAME):
"""
Creates a logger with rolling file handler,
Or returns the logger if it already exists.
:param logger_name: The name of the logger
:param logs_directory_path: The path of the directory that the logs will be written to.
:return: An initialized logger instance.
returns: Logger
"""
# Creating the logs folder if its doesn't exist
if not os.path.exists(logs_directory_path):
os.mkdir(logs_directory_path)
logger = logging.getLogger(logger_name)
formatter = logging.Formatter(LOG_FORMAT)
# Adding a rotating file handler
rotating_file_handler = RotatingFileHandler(
os.path.join(logs_directory_path, '{0}.log'.format(logger_name)), maxBytes=LOG_FILE_MAX_BYTES,
backupCount=BACKUP_LOGS_FILES_COUNT)
rotating_file_handler.setFormatter(formatter)
rotating_file_handler.setLevel(LOGGING_LEVEL)
logger.addHandler(rotating_file_handler)
return logger
class AsyncURLFuzzer(object):
"""
An asynchronous http(s) website endpoint locator.
Discovers active endpoints in websites, based on a list of common URLS.
"""
def __init__(self, base_url=DEFAULT_BASE_URL, list_file=DEFAULT_PATHS_LIST_FILE,
async_workers_count=ASYNC_WORKERS_COUNT,
output_file=VALID_ENDPOINTS_FILE, resource_exists_status_codes=RESOURCE_EXISTS_STATUS_CODES):
"""
Initializes a new member of this class.
:param base_url: The base url of the website.
:type base_url: str
:param list_file: The path of a file, containing the paths to check.
:type list_file: str
:param async_workers_count: How many workers (threads) to use.
:type async_workers_count: int
:param output_file: The name of the active endpoints output file.
:type output_file: str
:param resource_exists_status_codes: A list of HTTP status codes to consider as valid.
:type resource_exists_status_codes: list
"""
self._logger = LoggerFactory.get_logger(FUZZING_LOGGER_NAME)
self._base_url = base_url
self._list_file_path = list_file
self._async_workers_count = async_workers_count
self._output_file_path = output_file
self._resource_exists_status_codes = resource_exists_status_codes
self._active_paths_status_codes = {}
self._checked_endpoints = {}
self._endpoints_total_count = 0
self._session = requests.session()
def start(self):
"""
Starts the fuzzing with the initialized parameters.
"""
self._get_website_endpoints()
def _get_website_endpoints(self, async_workers_count=ASYNC_WORKERS_COUNT):
"""
Requests asynchronously for all the resources with a number of workers (threads).
If it fails for HTTP overloads reasons, it retries with less workers, because it's probably a DDOS
protection mechanism.
:param async_workers_count: How many workers (threads) to use.
:type async_workers_count: int
"""
self._load_paths_list()
self._logger.info(
'Getting the endpoints of the website {0} with list file "{1}" and {2} async workers.'.format(
self._base_url,
self._list_file_path,
async_workers_count))
if 0 >= async_workers_count:
self._logger.error('Seems like the site does not support fuzzing, as it has a DDOS protection engine.')
return
pool = ThreadPool(async_workers_count)
try:
tasks = []
self._logger.debug('Preparing the workers...')
for i, path in enumerate(self._paths):
self._logger.debug('Started a worker for the endpoint {0}'.format(path))
if i > i and i % STARTED_JOB_LOG_INTERVAL == 0:
self._logger.info('Started {0} workers'.format(i))
path = path.strip()
full_path = '/'.join([self._base_url, path])
tasks.append(pool.apply_async(self.request_head, (full_path, path)))
for t in tasks:
status_code, full_path, path = t.get()
self._checked_endpoints[path] = path
if self._is_valid_status_code(status_code):
self._active_paths_status_codes[path] = status_code
self._logger.info(
'Fetched {0}/{1}; {2}; {3}'.format(len(self._checked_endpoints), self._endpoints_total_count,
status_code,
full_path))
self._save_output_log()
except requests.ConnectionError as e:
pool.terminate()
self._logger.error(e)
self._logger.warning('An error occured while fuzzing.'
' Retrying with less async workers to reduce the server load.')
retry_workers_count = async_workers_count - WORKERS_DECREMENTED_COUNT_ON_ERROR
self._get_website_endpoints(retry_workers_count)
def _is_valid_status_code(self, status_code):
"""
Checks whether a HTTP status code implies that the resouce exists.
:param status_code:
:return: True if the status code implies that the resouce exists, False otherwise.
"""
return status_code in self._resource_exists_status_codes
def _save_output_log(self):
"""
Saves the results to an output file.
"""
full_status_codes = {'/'.join([self._base_url, p]): code for p, code in self._active_paths_status_codes.items()}
output_lines = ['{0} : {1}'.format(path, code) for path, code in full_status_codes.items()]
if 1 >= len(output_lines):
self._logger.warning(
'There were no discovered endpoints. consider using a different file from "words_list" directory')
self._logger.info('The following endpoints are active:{0}{1}'.format(os.linesep, os.linesep.join(output_lines)))
with open(self._output_file_path, 'a+') as output_file:
output_lines.sort()
output_file.write(os.linesep.join(output_lines))
self._logger.info('The endpoints were exported to "{0}"'.format(self._output_file_path))
def _load_paths_list(self):
"""
Loads the list of paths from the configured status.
"""
if not os.path.exists(self._list_file_path):
raise FileNotFoundError('The file "{0}" does not exist.'.format(self._list_file_path))
with open(self._list_file_path) as paths_file:
paths = [p.strip().lstrip('/').rstrip('/') for p in paths_file.readlines()]
paths = [p for p in paths if p not in self._active_paths_status_codes]
if not self._endpoints_total_count:
self._endpoints_total_count = len(paths)
self._paths = paths
def request_head(self, url, path):
"""
Executes a http HEAD request to a url.
:param url: The full url to contact.
:param path: The uri of the request.
:return: A tuple of 3 variables:
the recieved status code (int),
the url argument (str),
the path argument (str).
"""
if url != '':
res = self._session.head(url, verify=False, allow_redirects=True)
return res.status_code, url, path
if __name__ == '__main__':
# Parsing the parameters.
parser = OptionParser(description=
'An Asynchronous, robust websites endpoint discovery tool with smart error handling. '
'Locates resources in websites based on a list of paths. '
'Check out the "words_list"" directory for lists examples.',
usage='%prog -u https://example.com/', version='%prog 0.1')
parser.add_option('-u', '--url', dest='base_url', help='The target website to scan.', default=DEFAULT_BASE_URL)
parser.add_option('-l', '--list', dest='list_file', help='A file containing the paths to check (separated with lines).',
default=DEFAULT_PATHS_LIST_FILE)
(options, args) = parser.parse_args()
list_file = options.list_file
base_url = options.base_url
if base_url is None:
parser.print_help()
sys.exit()
# Suspending warning logs from requests and urllib3
logging.getLogger("urllib3").setLevel(logging.ERROR)
logging.getLogger("requests").setLevel(logging.ERROR)
if (os.path.isdir(base_url) or os.path.isfile(base_url)):
FilesFactory(base_url)
for u in FilesFactory.urls:
fuzzer = AsyncURLFuzzer(u, list_file)
fuzzer.start()
else:
fuzzer = AsyncURLFuzzer(base_url, list_file)
fuzzer.start()
| [
"logging.basicConfig",
"logging.getLogger",
"requests.session",
"os.path.exists",
"requests.packages.urllib3.disable_warnings",
"os.listdir",
"os.linesep.join",
"logging.Formatter",
"os.path.join",
"optparse.OptionParser",
"multiprocessing.pool.ThreadPool",
"os.path.isfile",
"os.path.isdir",... | [((219, 245), 'requests.packages.urllib3.disable_warnings', 'urllib3.disable_warnings', ([], {}), '()\n', (243, 245), False, 'from requests.packages import urllib3\n'), ((1927, 2005), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging_level', 'format': 'LOG_FORMAT'}), '(stream=sys.stdout, level=logging_level, format=LOG_FORMAT)\n', (1946, 2005), False, 'import logging\n'), ((10366, 10661), 'optparse.OptionParser', 'OptionParser', ([], {'description': '"""An Asynchronous, robust websites endpoint discovery tool with smart error handling. Locates resources in websites based on a list of paths. Check out the "words_list"" directory for lists examples."""', 'usage': '"""%prog -u https://example.com/"""', 'version': '"""%prog 0.1"""'}), '(description=\n \'An Asynchronous, robust websites endpoint discovery tool with smart error handling. Locates resources in websites based on a list of paths. Check out the "words_list"" directory for lists examples.\'\n , usage=\'%prog -u https://example.com/\', version=\'%prog 0.1\')\n', (10378, 10661), False, 'from optparse import OptionParser\n'), ((1560, 1584), 'os.path.isdir', 'os.path.isdir', (['user_path'], {}), '(user_path)\n', (1573, 1584), False, 'import os\n'), ((3248, 3278), 'logging.getLogger', 'logging.getLogger', (['logger_name'], {}), '(logger_name)\n', (3265, 3278), False, 'import logging\n'), ((3299, 3328), 'logging.Formatter', 'logging.Formatter', (['LOG_FORMAT'], {}), '(LOG_FORMAT)\n', (3316, 3328), False, 'import logging\n'), ((5277, 5295), 'requests.session', 'requests.session', ([], {}), '()\n', (5293, 5295), False, 'import requests\n'), ((6343, 6374), 'multiprocessing.pool.ThreadPool', 'ThreadPool', (['async_workers_count'], {}), '(async_workers_count)\n', (6353, 6374), False, 'from multiprocessing.pool import ThreadPool\n'), ((11228, 11238), 'sys.exit', 'sys.exit', ([], {}), '()\n', (11236, 11238), False, 'import sys\n'), ((11420, 11443), 'os.path.isdir', 'os.path.isdir', (['base_url'], {}), '(base_url)\n', (11433, 11443), False, 'import os\n'), ((11447, 11471), 'os.path.isfile', 'os.path.isfile', (['base_url'], {}), '(base_url)\n', (11461, 11471), False, 'import os\n'), ((1266, 1292), 'os.path.join', 'os.path.join', (['user_path', 'f'], {}), '(user_path, f)\n', (1278, 1292), False, 'import os\n'), ((1694, 1719), 'os.path.isfile', 'os.path.isfile', (['user_path'], {}), '(user_path)\n', (1708, 1719), False, 'import os\n'), ((2103, 2131), 'logging.getLogger', 'logging.getLogger', (['"""urllib3"""'], {}), "('urllib3')\n", (2120, 2131), False, 'import logging\n'), ((3151, 3186), 'os.path.exists', 'os.path.exists', (['logs_directory_path'], {}), '(logs_directory_path)\n', (3165, 3186), False, 'import os\n'), ((3200, 3229), 'os.mkdir', 'os.mkdir', (['logs_directory_path'], {}), '(logs_directory_path)\n', (3208, 3229), False, 'import os\n'), ((9289, 9325), 'os.path.exists', 'os.path.exists', (['self._list_file_path'], {}), '(self._list_file_path)\n', (9303, 9325), False, 'import os\n'), ((11300, 11328), 'logging.getLogger', 'logging.getLogger', (['"""urllib3"""'], {}), "('urllib3')\n", (11317, 11328), False, 'import logging\n'), ((11357, 11386), 'logging.getLogger', 'logging.getLogger', (['"""requests"""'], {}), "('requests')\n", (11374, 11386), False, 'import logging\n'), ((1302, 1323), 'os.listdir', 'os.listdir', (['user_path'], {}), '(user_path)\n', (1312, 1323), False, 'import os\n'), ((8871, 8900), 'os.linesep.join', 'os.linesep.join', (['output_lines'], {}), '(output_lines)\n', (8886, 8900), False, 'import os\n'), ((9029, 9058), 'os.linesep.join', 'os.linesep.join', (['output_lines'], {}), '(output_lines)\n', (9044, 9058), False, 'import os\n'), ((1342, 1368), 'os.path.join', 'os.path.join', (['user_path', 'f'], {}), '(user_path, f)\n', (1354, 1368), False, 'import os\n')] |
#Template of the Purkinje cell model, Zang et al. 2018
#Templating by Lungsi 2019 based on ~/PC2018Zang/purkinje.hoc
#purkinje.hoc has been converted from original purkinje_demo and using readme.html as a guide
from neuron import h
#from pdb import set_trace as breakpoint
from random import randint
class Purkinje(object):
"""Multi-compartment cell
"""
def __init__(self):
h.xopen("purkinje.hoc")
# There are 1088 compartments and the following are chosen as
# attributes to this python class for potential recording
self.soma = h.somaA
self.ais = h.AIS
# Based on last 50 or so lines of Purkinje19b972-1.nrn
self.dend_root = h.dendA1_0 # see Fig.2A of paper
# Reverse eng. from Purkinje19b972-1.nrn and dendv_arnd21.ses
dend_sm = [ sec for sec in h.maindend ] # len(dend_sm) -> 30
dend_sp = [ sec for sec in h.spinydend ] # len(dend_sp) -> 1105
# note that for either self.dend_sm or self.dend_sp
# the first element of its list is a dendrite section closest to soma
# and last element is the dendrite section farthest away.
# also potentially
#self.cf = [ sec for sec in h.cf ] # for climbing fibre
#self.pf = [ sec for sec in h.pf ] # for paraller fibre
#
self.dend_sm = dend_sm[ randint(0, len(dend_sm)-1) ]
self.dend_sp = dend_sp[ randint(0, len(dend_sp)-1) ]
| [
"neuron.h.xopen"
] | [((395, 418), 'neuron.h.xopen', 'h.xopen', (['"""purkinje.hoc"""'], {}), "('purkinje.hoc')\n", (402, 418), False, 'from neuron import h\n')] |
"""
@brief Class to store data from a specific event
@date Created July 2, 2018
@author <NAME>
@bug No known bugs
"""
from fprime.common.models.serialize import time_type
from fprime_gds.common.data_types import sys_data
class EventData(sys_data.SysData):
"""
The EventData class stores a specific event message.
"""
def __init__(self, event_args, event_time, event_temp):
"""
Constructor.
Args:
event_args: The arguments of the event being stored. This should
be a tuple where each element is an object of a class
derived from the BaseType class with a filled in value.
Each element's class should match the class of the
corresponding argument type object in the event_temp
object. This can be None.
event_time: The time the event occurred (TimeType)
event_temp: Event template instance for this event
Returns:
An initialized EventData object
"""
super().__init__()
self.id = event_temp.get_id()
self.args = event_args
self.time = event_time
self.template = event_temp
def get_args(self):
return self.args
def get_severity(self):
return self.template.get_severity()
@staticmethod
def get_empty_obj(event_temp):
"""
Obtains an event object that is empty (arguments = None)
Args:
event_temp: (EventTemplate obj) Template describing event
Returns:
An EventData object with argument value of None
"""
return EventData(None, time_type.TimeType(), event_temp)
@staticmethod
def get_csv_header(verbose=False):
"""
Get the header for a csv file containing event data
Args:
verbose: (boolean, default=False) Indicates if header should be for
regular or verbose output
Returns:
String version of the channel data
"""
if verbose:
return "Time,Raw Time,Name,ID,Severity,Args\n"
else:
return "Time,Name,Severity,Args\n"
def get_str(self, time_zone=None, verbose=False, csv=False):
"""
Convert the event data to a string
Args:
time_zone: (tzinfo, default=None) Timezone to print time in. If
time_zone=None, use local time.
verbose: (boolean, default=False) Prints extra fields if True
csv: (boolean, default=False) Prints each field with commas between
if true
Returns:
String version of the event data
"""
time_str = self.time.to_readable(time_zone)
raw_time_str = str(self.time)
name = self.template.get_full_name()
severity = self.template.get_severity()
format_str = self.template.get_format_str()
if self.args is None:
arg_str = "EMPTY EVENT OBJ"
else:
# The arguments are currently serializable objects which cannot be
# used to fill in a format string. Convert them to values that can be
arg_val_list = [arg_obj.val for arg_obj in self.args]
arg_str = format_str % tuple(arg_val_list)
if verbose and csv:
return "%s,%s,%s,%d,%s,%s" % (
time_str,
raw_time_str,
name,
self.id,
severity,
arg_str,
)
elif verbose and not csv:
return "%s: %s (%d) %s %s : %s" % (
time_str,
name,
self.id,
raw_time_str,
severity,
arg_str,
)
elif not verbose and csv:
return "{},{},{},{}".format(time_str, name, severity, arg_str)
else:
return "{}: {} {} : {}".format(time_str, name, severity, arg_str)
def __str__(self):
"""
Convert the event data to a string
Returns:
String version of the channel data
"""
return self.get_str()
| [
"fprime.common.models.serialize.time_type.TimeType"
] | [((1702, 1722), 'fprime.common.models.serialize.time_type.TimeType', 'time_type.TimeType', ([], {}), '()\n', (1720, 1722), False, 'from fprime.common.models.serialize import time_type\n')] |
from os.path import exists, join
import pytest
from flask_dj import startproject
from tests.basic_project_creator import ProjectCreate
class TestBaseSettingProjectConstructor(ProjectCreate):
def setup(self, need_static=False, need_templates=False):
super().setup()
def test_project_folder_exist(self):
assert exists(self.project_path)
def test_main_folder_exist(self):
assert exists(join(self.project_path, self.project_name))
def test_main_init(self):
self.main_file_test('__init__', ['from flask import Flask', 'from flask_login import LoginManager'])
def test_main_config(self):
self.main_file_test('config', ["HOST = '127.0.0.1'", "PORT = 5000", "Config = DevelopConfig"])
def main_file_test(self, filename, test_contents=['']):
self._file_test(self.project_name, filename, test_contents)
def test_main_urls(self):
assert exists(join(join(self.project_path, self.project_name), 'urls.py'))
def test_main_manage(self):
self._file_test(self.project_path, 'manage', [f'from {self.project_name} import app, config'])
def test_utils_folder_exist(self):
assert exists(join(self.project_path, 'utils'))
def test_utils_urls(self):
self._file_test('utils', 'urls', [f'from {self.project_name} import app'])
def test_templates_folder(self):
assert exists(join(self.project_path, 'templates')) or not self.need_templates
def test_static_folder(self):
assert exists(join(self.project_path, 'static')) or not self.need_static
class TestAdvancedProjectConfig(TestBaseSettingProjectConstructor):
def setup(self):
super().setup(need_templates=True, need_static=True)
class UncorrectProjectName(ProjectCreate):
def setup(self, project_name):
super().setup(project_name=project_name, fast_start=False)
def test_create(self):
with pytest.raises(ValueError):
startproject(self.project_name)
def test_main_folder_not_exist(self):
assert not exists(join(self.project_path, self.project_name))
def teardown(self):
pass
class TestNumUncorrectProjectName(UncorrectProjectName):
def setup(self):
super().setup("123project")
class TestDashInProjectName(UncorrectProjectName):
def setup(self):
super().setup("pro-ject")
| [
"os.path.exists",
"flask_dj.startproject",
"os.path.join",
"pytest.raises"
] | [((336, 361), 'os.path.exists', 'exists', (['self.project_path'], {}), '(self.project_path)\n', (342, 361), False, 'from os.path import exists, join\n'), ((423, 465), 'os.path.join', 'join', (['self.project_path', 'self.project_name'], {}), '(self.project_path, self.project_name)\n', (427, 465), False, 'from os.path import exists, join\n'), ((1184, 1216), 'os.path.join', 'join', (['self.project_path', '"""utils"""'], {}), "(self.project_path, 'utils')\n", (1188, 1216), False, 'from os.path import exists, join\n'), ((1914, 1939), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1927, 1939), False, 'import pytest\n'), ((1953, 1984), 'flask_dj.startproject', 'startproject', (['self.project_name'], {}), '(self.project_name)\n', (1965, 1984), False, 'from flask_dj import startproject\n'), ((930, 972), 'os.path.join', 'join', (['self.project_path', 'self.project_name'], {}), '(self.project_path, self.project_name)\n', (934, 972), False, 'from os.path import exists, join\n'), ((1393, 1429), 'os.path.join', 'join', (['self.project_path', '"""templates"""'], {}), "(self.project_path, 'templates')\n", (1397, 1429), False, 'from os.path import exists, join\n'), ((1515, 1548), 'os.path.join', 'join', (['self.project_path', '"""static"""'], {}), "(self.project_path, 'static')\n", (1519, 1548), False, 'from os.path import exists, join\n'), ((2054, 2096), 'os.path.join', 'join', (['self.project_path', 'self.project_name'], {}), '(self.project_path, self.project_name)\n', (2058, 2096), False, 'from os.path import exists, join\n')] |
from django.test import TestCase
from rest_framework.test import APIRequestFactory
from .views import LoginView, SignupView
from common.tests import USER_DATA, prepare_dummy_user_data
# Create your tests here.
class ApiViewTests(TestCase):
factory = APIRequestFactory()
def test_signup(self):
user_data = USER_DATA
view = SignupView.as_view()
req = self.factory.post('/register/', user_data, format='json')
resp = view(req)
st_code = resp.status_code
self.assertEqual(st_code, 400)
user_data['password'] = '<PASSWORD>'
req = self.factory.post('/register/', user_data, format='json')
resp = view(req)
st_code = resp.status_code
rdata = resp.data.get('data')
self.assertEqual(st_code, 201)
self.assertEqual(rdata['message'], 'USER_REGISTER_SUCCESS')
print("User register API test success")
def test_login(self):
prepare_dummy_user_data()
view = LoginView.as_view()
user_data = {}
req = self.factory.post('/login/', user_data, format='json')
resp = view(req)
st_code = resp.status_code
self.assertEqual(st_code, 400)
user_data = {
'email': USER_DATA['email'],
'password': '',
}
req = self.factory.post('/login/', user_data, format='json')
resp = view(req)
st_code = resp.status_code
self.assertEqual(st_code, 400)
user_data['password'] = '<PASSWORD>'
req = self.factory.post('/login/', user_data, format='json')
resp = view(req)
st_code = resp.status_code
rdata = resp.data.get('data')
self.assertEqual(st_code, 200)
self.assertEqual(len(rdata['token'].split('.')), 3)
print("User login API test success") | [
"rest_framework.test.APIRequestFactory",
"common.tests.prepare_dummy_user_data"
] | [((257, 276), 'rest_framework.test.APIRequestFactory', 'APIRequestFactory', ([], {}), '()\n', (274, 276), False, 'from rest_framework.test import APIRequestFactory\n'), ((949, 974), 'common.tests.prepare_dummy_user_data', 'prepare_dummy_user_data', ([], {}), '()\n', (972, 974), False, 'from common.tests import USER_DATA, prepare_dummy_user_data\n')] |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''
Utility functions for all things related to manipulating google play services
related files.
'''
import argparse
import filecmp
import json
import logging
import os
import re
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
from devil.utils import cmd_helper
_XML_VERSION_NUMBER_PATTERN = re.compile(
r'<integer name="google_play_services_version">(\d+)<\/integer>')
class DefaultsRawHelpFormatter(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
'''
Combines the features of RawDescriptionHelpFormatter and
ArgumentDefaultsHelpFormatter, providing defaults for the arguments and raw
text for the description.
'''
pass
class ConfigParser(object):
'''Reads and writes the configuration files for play services related scripts
The configuration files are JSON files. Here is the data they are expected
to contain:
- version_number
Number. Mirrors @integer/google_play_services_version from the library.
Example: 815000
- sdk_version
Version of the Play Services SDK to retrieve, when preprocessing the
library from a maven/gradle repository.
Example: "8.1.0"
- clients
List of strings. Name of the clients (or play services modules) to
include when preprocessing the library.
Example: ["play-services-base", "play-services-cast"]
- version_xml_path
String. Path to the version.xml string describing the current version.
Should be relative to the library base directory
Example: "res/values/version.xml"
- locale_whitelist
List of strings. List of locales to keep from the resources. Can be
obtained by generating an android build and looking at the content of
`out/Debug/gen/chrome/java/res`; or looking at the android section in
`//chrome/app/generated_resources.grd`
Example: ["am", "ar", "bg", "ca", "cs"]
- resource_whitelist
List of strings. List of resource files to explicitely keep in the final
output. Use it to keep drawables for example, as we currently remove them
all.
Example: ["play-services-base/res/drawables/foobar.xml"]
'''
_VERSION_NUMBER_KEY = 'version_number'
def __init__(self, path):
self.path = path
self._data = {}
with open(path, 'r') as stream:
self._data = json.load(stream)
@property
def version_number(self):
return self._data.get(self._VERSION_NUMBER_KEY)
@property
def sdk_version(self):
return self._data.get('sdk_version')
@property
def clients(self):
return self._data.get('clients') or []
@property
def version_xml_path(self):
return self._data.get('version_xml_path')
@property
def locale_whitelist(self):
return self._data.get('locale_whitelist') or []
@property
def resource_whitelist(self):
return self._data.get('resource_whitelist') or []
def UpdateVersionNumber(self, new_version_number):
'''Updates the version number and saves it in the configuration file. '''
with open(self.path, 'w') as stream:
self._data[self._VERSION_NUMBER_KEY] = new_version_number
stream.write(DumpTrimmedJson(self._data))
def DumpTrimmedJson(json_data):
'''
Default formatting when dumping json to string has trailing spaces and lacks
a new line at the end. This function fixes that.
'''
out = json.dumps(json_data, sort_keys=True, indent=2)
out = out.replace(' ' + os.linesep, os.linesep)
return out + os.linesep
def FileEquals(expected_file, actual_file):
'''
Returns whether the two files are equal. Returns False if any of the files
doesn't exist.
'''
if not os.path.isfile(actual_file) or not os.path.isfile(expected_file):
return False
return filecmp.cmp(expected_file, actual_file)
def IsRepoDirty(repo_root):
'''Returns True if there are no staged or modified files, False otherwise.'''
# diff-index returns 1 if there are staged changes or modified files,
# 0 otherwise
cmd = ['git', 'diff-index', '--quiet', 'HEAD']
return cmd_helper.Call(cmd, cwd=repo_root) == 1
def GetVersionNumberFromLibraryResources(version_xml):
'''
Extracts a Google Play services version number from its version.xml file.
'''
with open(version_xml, 'r') as version_file:
version_file_content = version_file.read()
match = _XML_VERSION_NUMBER_PATTERN.search(version_file_content)
if not match:
raise AttributeError('A value for google_play_services_version was not '
'found in ' + version_xml)
return int(match.group(1))
def MakeLocalCommit(repo_root, files_to_commit, message):
'''Makes a local git commit.'''
logging.debug('Staging files (%s) for commit.', files_to_commit)
if cmd_helper.Call(['git', 'add'] + files_to_commit, cwd=repo_root) != 0:
raise Exception('The local commit failed.')
logging.debug('Committing.')
if cmd_helper.Call(['git', 'commit', '-m', message], cwd=repo_root) != 0:
raise Exception('The local commit failed.')
| [
"logging.debug",
"re.compile",
"devil.utils.cmd_helper.Call",
"json.dumps",
"os.path.isfile",
"os.path.dirname",
"json.load",
"filecmp.cmp"
] | [((490, 567), 're.compile', 're.compile', (['"""<integer name="google_play_services_version">(\\\\d+)<\\\\/integer>"""'], {}), '(\'<integer name="google_play_services_version">(\\\\d+)<\\\\/integer>\')\n', (500, 567), False, 'import re\n'), ((3565, 3612), 'json.dumps', 'json.dumps', (['json_data'], {'sort_keys': '(True)', 'indent': '(2)'}), '(json_data, sort_keys=True, indent=2)\n', (3575, 3612), False, 'import json\n'), ((3943, 3982), 'filecmp.cmp', 'filecmp.cmp', (['expected_file', 'actual_file'], {}), '(expected_file, actual_file)\n', (3954, 3982), False, 'import filecmp\n'), ((4860, 4924), 'logging.debug', 'logging.debug', (['"""Staging files (%s) for commit."""', 'files_to_commit'], {}), "('Staging files (%s) for commit.', files_to_commit)\n", (4873, 4924), False, 'import logging\n'), ((5052, 5080), 'logging.debug', 'logging.debug', (['"""Committing."""'], {}), "('Committing.')\n", (5065, 5080), False, 'import logging\n'), ((384, 409), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (399, 409), False, 'import os\n'), ((4240, 4275), 'devil.utils.cmd_helper.Call', 'cmd_helper.Call', (['cmd'], {'cwd': 'repo_root'}), '(cmd, cwd=repo_root)\n', (4255, 4275), False, 'from devil.utils import cmd_helper\n'), ((4930, 4994), 'devil.utils.cmd_helper.Call', 'cmd_helper.Call', (["(['git', 'add'] + files_to_commit)"], {'cwd': 'repo_root'}), "(['git', 'add'] + files_to_commit, cwd=repo_root)\n", (4945, 4994), False, 'from devil.utils import cmd_helper\n'), ((5086, 5150), 'devil.utils.cmd_helper.Call', 'cmd_helper.Call', (["['git', 'commit', '-m', message]"], {'cwd': 'repo_root'}), "(['git', 'commit', '-m', message], cwd=repo_root)\n", (5101, 5150), False, 'from devil.utils import cmd_helper\n'), ((2544, 2561), 'json.load', 'json.load', (['stream'], {}), '(stream)\n', (2553, 2561), False, 'import json\n'), ((3851, 3878), 'os.path.isfile', 'os.path.isfile', (['actual_file'], {}), '(actual_file)\n', (3865, 3878), False, 'import os\n'), ((3886, 3915), 'os.path.isfile', 'os.path.isfile', (['expected_file'], {}), '(expected_file)\n', (3900, 3915), False, 'import os\n')] |
# -*-coding: utf-8
import sys
import argparse
from tagger import FileTagger
def tagger_add(args):
tg = FileTagger()
res = tg.add_tags(args.path, *args.tags)
if not res:
print("[-] Fail to add tags.")
def tagger_rm(args):
tg = FileTagger()
res = tg.rm_tags(args.path, *args.tags)
if not res:
print("[-] Fail to remove tags.")
def tagger_find(args):
tg = FileTagger()
found = tg.find_tags(args.path, *args.tags, top_only=args.top, depth=args.depth)
print('\n'.join(found))
def tagger_get(args):
tg = FileTagger()
tags = tg.get_tags(args.path)
print('\n'.join(tags))
def tagger_clear(args):
tg = FileTagger()
res = tg.clear_tags(args.path, recursive=args.recursive, depth=args.depth, top_only=args.top)
if not res:
print("[-] Fail to clear tags")
def tagger_merge(args):
tg = FileTagger()
tg.merge_tags(args.path, args.dest_path, *args.tags)
def tagger_sync(args):
tg = FileTagger()
tg.sync_tags(args.path, recursive=args.recursive, depth=args.depth, top_only=args.top)
def get_parser():
parser = argparse.ArgumentParser(prog="tagger")
subparsers = parser.add_subparsers()
# tagger add
parser_add = subparsers.add_parser("add", help="add tags to path")
parser_add.add_argument("path", help="path to add tags")
parser_add.add_argument("tags", nargs="+", help="tags to add")
parser_add.set_defaults(func=tagger_add)
# tagger rm
parser_rm = subparsers.add_parser("rm", help="remove tags from path")
parser_rm.add_argument("path", help="path to remove tags from")
parser_rm.add_argument("tags", nargs="+", help="tags to remove")
parser_rm.set_defaults(func=tagger_rm)
# tagger get
parser_get = subparsers.add_parser("get", help="get tags of path")
parser_get.add_argument("path", help="path of tags")
parser_get.set_defaults(func=tagger_get)
# tagger find
parser_find = subparsers.add_parser("find", help="find paths that have tags")
parser_find.add_argument("path", help="path to find tags")
parser_find.add_argument("tags", nargs="+", help="tags to find")
parser_find.add_argument("-t", "--top", help="only find top directories that have tags", action="store_true")
parser_find.add_argument("-d", "--depth", type=int, help="depth of folder to search")
parser_find.set_defaults(func=tagger_find)
# tagger clear
parser_clear = subparsers.add_parser("clear", help="clear path's tags")
parser_clear.add_argument("path", help="path to clear tags")
parser_clear.add_argument("-r", "--recursive", help="recursively clear tags", action="store_true")
parser_clear.add_argument("-t", "--top", help="top only mode, valid if -r is given", action='store_true')
parser_clear.add_argument("-d", "--depth", type=int, help="recursive depth, valid if -r is given")
parser_clear.set_defaults(func=tagger_clear)
# tagger merge
parser_merge = subparsers.add_parser("merge", help="merge file with same tags to dest directory")
parser_merge.add_argument("path", help="path to search for tags")
parser_merge.add_argument(
"dest_path", help="dest directory to save copy of files")
parser_merge.add_argument("tags", nargs="+", help="tags to merge")
parser_merge.set_defaults(func=tagger_merge)
# tagger sync
parser_sync = subparsers.add_parser("sync", help="synchronize tags, remove tags of non-existent files")
parser_sync.add_argument("path", help="path to synchronize tags")
parser_sync.add_argument("-r", '--recursive', help='recursively sync tags', action='store_true')
parser_sync.add_argument('-t', '--top', help='sync only top files or folders', action='store_true')
parser_sync.add_argument('-d', '--depth', type=int, help='depth to sync')
parser_sync.set_defaults(func=tagger_sync)
return parser
def main():
try:
parser = get_parser()
args = parser.parse_args()
if 'func' in args:
args.func(args)
else:
parser.parse_args(['-h'])
except KeyboardInterrupt:
print("[-] Cancelled by user")
if __name__ == "__main__":
main()
| [
"argparse.ArgumentParser",
"tagger.FileTagger"
] | [((110, 122), 'tagger.FileTagger', 'FileTagger', ([], {}), '()\n', (120, 122), False, 'from tagger import FileTagger\n'), ((255, 267), 'tagger.FileTagger', 'FileTagger', ([], {}), '()\n', (265, 267), False, 'from tagger import FileTagger\n'), ((404, 416), 'tagger.FileTagger', 'FileTagger', ([], {}), '()\n', (414, 416), False, 'from tagger import FileTagger\n'), ((563, 575), 'tagger.FileTagger', 'FileTagger', ([], {}), '()\n', (573, 575), False, 'from tagger import FileTagger\n'), ((672, 684), 'tagger.FileTagger', 'FileTagger', ([], {}), '()\n', (682, 684), False, 'from tagger import FileTagger\n'), ((874, 886), 'tagger.FileTagger', 'FileTagger', ([], {}), '()\n', (884, 886), False, 'from tagger import FileTagger\n'), ((978, 990), 'tagger.FileTagger', 'FileTagger', ([], {}), '()\n', (988, 990), False, 'from tagger import FileTagger\n'), ((1114, 1152), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""tagger"""'}), "(prog='tagger')\n", (1137, 1152), False, 'import argparse\n')] |
import os
FILES_TO_REMOVE = ["setup.cfg", "pyproject.toml", "setup.py"]
if "{{ cookiecutter.as_package }}" == "no":
for file in FILES_TO_REMOVE:
os.remove(file)
if "{{ cookiecutter.remove_test_script }}" == "yes":
os.remove("test_log.py")
| [
"os.remove"
] | [((233, 257), 'os.remove', 'os.remove', (['"""test_log.py"""'], {}), "('test_log.py')\n", (242, 257), False, 'import os\n'), ((159, 174), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (168, 174), False, 'import os\n')] |
from django import forms
from crispy_forms.helper import FormHelper
class MiscForm(forms.Form):
notes = forms.CharField(
label='Notes for the registration team',
help_text='Anything else you need to describe. '
'The registration team will see this. '
'The bursaries team will not.',
widget=forms.Textarea(attrs={'rows': 3}),
required=False,
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.include_media = False
| [
"django.forms.Textarea",
"crispy_forms.helper.FormHelper"
] | [((528, 540), 'crispy_forms.helper.FormHelper', 'FormHelper', ([], {}), '()\n', (538, 540), False, 'from crispy_forms.helper import FormHelper\n'), ((357, 390), 'django.forms.Textarea', 'forms.Textarea', ([], {'attrs': "{'rows': 3}"}), "(attrs={'rows': 3})\n", (371, 390), False, 'from django import forms\n')] |
#Import libraries
import cv2
#Read image file into n dimensional numpy array
#Read as greyscale image -> 0, Read as color image w/o transparency -> 1, Read as color image w transparency -> -1
img = cv2.imread("Practice/images/galaxy.jpg", 0)
print(type(img))
print(img)
#To know the number of rows and columns
print(img.shape)
#To know the number of dimensions
print(img.ndim)
#Resize image to certain dimensions (width, height)
#resized_img = cv2.resize(img,(500,900))
#Resize the image to half the original size using shape attribute
resized_img = cv2.resize(img,(int(img.shape[1]/2), int(img.shape[0]/2)))
#Display image on screen
#cv2.imshow("Galaxy", img)
cv2.imshow("Galaxy", resized_img)
#Amount of time (in milliseconds) before window closes (0 -> closes window on key/button press)
cv2.waitKey(0)
#Remove all windows
cv2.destroyAllWindows()
#Write new image to jpg file
cv2.imwrite("Practice/images/galaxy_resized.jpg", resized_img) | [
"cv2.imwrite",
"cv2.imshow",
"cv2.destroyAllWindows",
"cv2.waitKey",
"cv2.imread"
] | [((199, 242), 'cv2.imread', 'cv2.imread', (['"""Practice/images/galaxy.jpg"""', '(0)'], {}), "('Practice/images/galaxy.jpg', 0)\n", (209, 242), False, 'import cv2\n'), ((667, 700), 'cv2.imshow', 'cv2.imshow', (['"""Galaxy"""', 'resized_img'], {}), "('Galaxy', resized_img)\n", (677, 700), False, 'import cv2\n'), ((797, 811), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (808, 811), False, 'import cv2\n'), ((832, 855), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (853, 855), False, 'import cv2\n'), ((886, 948), 'cv2.imwrite', 'cv2.imwrite', (['"""Practice/images/galaxy_resized.jpg"""', 'resized_img'], {}), "('Practice/images/galaxy_resized.jpg', resized_img)\n", (897, 948), False, 'import cv2\n')] |
import asyncio
from .main import create_app
loop = asyncio.get_event_loop()
app = create_app(loop)
| [
"asyncio.get_event_loop"
] | [((53, 77), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (75, 77), False, 'import asyncio\n')] |
import io, re, codecs, sys, os.path
import pprint
REG = 0
SHFT = 1
CTRL = 2
ALT = 4
ALTGR = 6
COMPRESSED_OUTPUT=1
def get_kbd_layout(base_filename, load_patch = False):
filename_klc = base_filename
filename_changes = base_filename + 'patch'
f = io.open(filename_klc, mode="r", encoding="utf-8")
lines = f.readlines()
f.close()
lines = [x.strip() for x in lines]
if (load_patch and os.path.isfile(filename_changes)):
f = io.open(filename_changes, mode="r", encoding="utf-8")
lines_changes = f.readlines()
f.close()
lines_changes = [x.strip() for x in lines_changes]
else:
lines_changes = []
keywords = [ 'KBD', 'COPYRIGHT', 'COMPANY', 'LOCALENAME', 'LOCALEID', 'VERSION', 'SHIFTSTATE', 'LAYOUT', 'DEADKEY', 'KEYNAME', 'KEYNAME_EXT', 'KEYNAME_DEAD', 'DESCRIPTIONS', 'LANGUAGENAMES', 'ENDKBD' ]
sections = []
section = []
while len(lines) > 0:
while True:
line = lines[0]
lines = lines[1:]
i = line.find('//')
if i != -1:
line = line[:i]
line = line.rstrip()
if len(line) == 0:
continue
fields = re.split(r'\t', line)
while '' in fields:
fields.remove('')
break
if fields[0] in keywords:
if (len(section)) > 0:
sections.append(section)
section = []
section.append(fields)
section_changes = []
while len(lines_changes) > 0:
line = lines_changes[0]
lines_changes = lines_changes[1:]
i = line.find('//')
if i != -1:
line = line[:i]
line = line.rstrip()
if len(line) == 0:
continue
fields = re.split(r'\t', line)
while '' in fields:
fields.remove('')
section_changes.append(fields)
kbd_layout = {}
for lines in sections:
fields = lines[0]
if fields[0] == 'KBD':
kbd_layout['short_id'] = fields[1]
kbd_layout['name'] = fields[2].replace('"', '')
elif fields[0] == 'COPYRIGHT':
kbd_layout['copyright'] = fields[1].replace('"', '')
elif fields[0] == 'COMPANY':
kbd_layout['company'] = fields[1]
elif fields[0] == 'LOCALENAME':
kbd_layout['localename'] = fields[1].replace('"', '')
elif fields[0] == 'LOCALEID':
kbd_layout['localeid'] = fields[1].replace('"', '')
elif fields[0] == 'VERSION':
kbd_layout['version'] = fields[1]
elif fields[0] == 'SHIFTSTATE':
shiftstates = []
for fields in lines[1:]:
shiftstates.append(int(fields[0]))
# The US layout does not use "Alt" *at all*. We add it, so that the
# .klcpatch file can define keys with "Alt" in an extra column.
if not ALT in shiftstates:
shiftstates.append(ALT)
kbd_layout['shiftstates'] = shiftstates
elif fields[0] == 'LAYOUT':
all_originally_reachable_characters = ""
layout = {}
line_number = 0
for fields in lines[1:] + section_changes:
if fields[0] == '-1':
# TODO: 807 has extension lines we don't support
continue
chars = {}
i = 3
for shiftstate in shiftstates:
if i > len(fields) - 1:
break
c = fields[i]
if c != '-1':
if len(c) > 1:
c = chr(int(c[0:4], 16))
chars[shiftstate] = c
if (line_number < len(lines[1:])):
all_originally_reachable_characters += c
i += 1
# TODO: c[4] == '@' -> dead key
layout[int(fields[0], 16)] = {
#'vk_name': 'VK_' + fields[1],
#'cap': int(fields[2]),
'chars': chars
}
line_number += 1
kbd_layout['layout'] = layout
kbd_layout['all_originally_reachable_characters'] = ''.join(sorted(all_originally_reachable_characters))
elif fields[0] == 'DEADKEY':
# TODO
pass
elif fields[0] == 'KEYNAME':
# TODO
pass
elif fields[0] == 'KEYNAME_EXT':
# TODO
pass
elif fields[0] == 'KEYNAME_DEAD':
# TODO
pass
elif fields[0] == 'DESCRIPTIONS':
# TODO
pass
elif fields[0] == 'LANGUAGENAMES':
# TODO
pass
return kbd_layout
def ps2_set2_code_from_hid_code(c):
mapping = { 0x01: 0x76, 0x02: 0x16, 0x03: 0x1E, 0x04: 0x26, 0x05: 0x25, 0x06: 0x2E, 0x07: 0x36, 0x08: 0x3D, 0x09: 0x3E, 0x0A: 0x46, 0x0B: 0x45, 0x0C: 0x4E, 0x0D: 0x55, 0x0E: 0x66, 0x0F: 0x0D, 0x10: 0x15, 0x11: 0x1D, 0x12: 0x24, 0x13: 0x2D, 0x14: 0x2C, 0x15: 0x35, 0x16: 0x3C, 0x17: 0x43, 0x18: 0x44, 0x19: 0x4D, 0x1A: 0x54, 0x1B: 0x5B, 0x1C: 0x5A, 0x1E: 0x1C, 0x1F: 0x1B, 0x20: 0x23, 0x21: 0x2B, 0x22: 0x34, 0x23: 0x33, 0x24: 0x3B, 0x25: 0x42, 0x26: 0x4B, 0x27: 0x4C, 0x28: 0x52, 0x29: 0x0E, 0x2B: 0x5D, 0x2B: 0x5D, 0x2C: 0x1A, 0x2D: 0x22, 0x2E: 0x21, 0x2F: 0x2A, 0x30: 0x32, 0x31: 0x31, 0x32: 0x3A, 0x33: 0x41, 0x34: 0x49, 0x35: 0x4A, 0x39: 0x29, 0x3A: 0x58, 0x3B: 0x05, 0x3C: 0x06, 0x3D: 0x04, 0x3E: 0x0C, 0x3F: 0x03, 0x40: 0x0B, 0x41: 0x83, 0x42: 0x0A, 0x43: 0x01, 0x44: 0x09, 0x53: 0x71, 0x56: 0x61, 0x57: 0x78, 0x58: 0x07 }
if c in mapping:
return mapping[c]
else:
return 0
def petscii_from_unicode(c):
if ord(c) >= 0xf800 and ord(c) <= 0xf8ff: # PETSCII code encoded into private Unicode area
return chr(ord(c) - 0xf800)
if c == '\\' or c == '|' or c == '_' or c == '{' or c == '}' or c == '~':
return chr(0)
if ord(c) == 0xa3: # '£'
return chr(0x5c)
if ord(c) == 0x2190: # '←'
return chr(0x5f)
if ord(c) == 0x03c0: # 'π'
return chr(0xde)
if ord(c) >= ord('A') and ord(c) <= ord('Z'):
return chr(ord(c) + 0x80)
if ord(c) >= ord('a') and ord(c) <= ord('z'):
return chr(ord(c) - 0x20)
if ord(c) < 0x20 and c != '\r':
return chr(0)
if ord(c) >= 0x7e:
return chr(0)
return c
def latin15_from_unicode(c):
# Latin-15 and 8 bit Unicode are almost the same
if ord(c) <= 0xff:
# Latin-1 characters (i.e. 8 bit Unicode) not included in Latin-15
if ord(c) in [0xA4, 0xA6, 0xA8, 0xB4, 0xB8, 0xBC, 0xBD, 0xBE]: #'¤¦¨´¸¼½¾'
return chr(0);
else:
return c
# Latin-15 supports some other Unicode characters
latin15_from_unicode_tab = {
0x20ac: 0xa4, # '€'
0x160: 0xa6, # 'Š'
0x161: 0xa8, # 'š'
0x17d: 0xb4, # 'Ž'
0x17e: 0xb8, # 'ž'
0x152: 0xbc, # 'Œ'
0x153: 0xbd, # 'œ'
0x178: 0xbe # 'Ÿ'
}
if ord(c) in latin15_from_unicode_tab:
return chr(latin15_from_unicode_tab[ord(c)])
# all other characters are unsupported
return chr(0)
def unicode_from_petscii(c):
# only does the minumum
if ord(c) == 0x5c: # '£'
return chr(0xa3)
if ord(c) == 0x5f: # '←'
return chr(0x2190)
if ord(c) == 0xde: # 'π'
return chr(0x03c0)
return c
# constants
# a string with all printable 7-bit PETSCII characters
all_petscii_chars = " !\"#$%&'()*+,-./0123456789:;<=>?@"
for c in "abcdefghijklmnopqrstuvwxyz":
all_petscii_chars += chr(ord(c) - 0x20)
all_petscii_chars += "[\]^_ABCDEFGHIJKLMNOPQRSTUVWXYZ"
all_petscii_chars += "\xde" # π
# all PETSCII control codes and their descriptions
control_codes = {
0x03: 'RUN/STOP',
0x05: 'WHITE',
0x08: 'SHIFT_DISABLE',
0x09: 'SHIFT_ENABLE',
0x0d: 'CR',
0x0e: 'TEXT_MODE',
0x11: 'CURSOR_DOWN',
0x12: 'REVERSE_ON',
0x13: 'HOME',
0x14: 'DEL',
0x1c: 'RED',
0x1d: 'CURSOR_RIGHT',
0x1e: 'GREEN',
0x1f: 'BLUE',
0x81: 'ORANGE',
0x85: 'F1',
0x86: 'F3',
0x87: 'F5',
0x88: 'F7',
0x89: 'F2',
0x8a: 'F4',
0x8b: 'F6',
0x8c: 'F8',
0x8d: 'SHIFT+CR',
0x8e: 'GRAPHICS',
0x90: 'BLACK',
0x91: 'CURSOR_UP',
0x92: 'REVERSE_OFF',
0x93: 'CLR',
0x94: 'INSERT',
0x95: 'BROWN',
0x96: 'LIGHT_RED',
0x97: 'DARK_GRAY',
0x98: 'MIDDLE_GRAY',
0x99: 'LIGHT_GREEN',
0x9a: 'LIGHT_BLUE',
0x9b: 'LIGHT_GRAY',
0x9c: 'PURPLE',
0x9d: 'CURSOR_LEFT',
0x9e: 'YELLOW',
0x9f: 'CYAN',
0xa0: 'SHIFT+SPACE',
}
all_petscii_codes = ""
for c in control_codes.keys():
all_petscii_codes += chr(c)
# all printable PETSCII graphics characters
all_petscii_graphs = ""
for c in range(0xa1, 0xc0):
all_petscii_graphs += chr(c)
# the following PETSCII control codes do not have to be reachable
# through the keyboard
all_petscii_codes_ok_if_missing = [
chr(0x1d), # CURSOR_RIGHT - covered by cursor keys
chr(0x8e), # GRAPHICS - not covered on C64 either
chr(0x91), # CURSOR_UP - covered by cursor keys
chr(0x93), # CLR - convered by E0-prefixed key
chr(0x9d), # CURSOR_LEFT - covered by cursor keys
]
if len(sys.argv) >= 3 and sys.argv[2] == '-iso':
iso_mode = True
else:
iso_mode = False
load_patch = not iso_mode
kbd_layout = get_kbd_layout(sys.argv[1], load_patch)
layout = kbd_layout['layout']
shiftstates = kbd_layout['shiftstates']
keytab = {}
for shiftstate in shiftstates:
keytab[shiftstate] = [ '\0' ] * 128
# some layouts don't define Alt at all
if not ALT in keytab:
keytab[ALT] = [ '\0' ] * 128
# create PS/2 Code 2 -> PETSCII tables
for hid_scancode in layout.keys():
ps2_scancode = ps2_set2_code_from_hid_code(hid_scancode)
l = layout[hid_scancode]['chars']
for shiftstate in keytab.keys():
if shiftstate in l:
c_unicode = l[shiftstate]
if iso_mode:
keytab[shiftstate][ps2_scancode] = latin15_from_unicode(c_unicode)
else:
keytab[shiftstate][ps2_scancode] = petscii_from_unicode(c_unicode)
# stamp in f-keys independent of shiftstate
for shiftstate in keytab.keys():
keytab[shiftstate][5] = chr(0x85) # f1
keytab[shiftstate][6] = chr(0x89) # f2
keytab[shiftstate][4] = chr(0x86) # f3
keytab[shiftstate][12] = chr(0x8a) # f4
keytab[shiftstate][3] = chr(0x87) # f5
keytab[shiftstate][11] = chr(0x8b) # f6
keytab[shiftstate][2] = chr(0x88) # f7
keytab[shiftstate][10] = chr(0x8c) # f8
# C65 additions
keytab[shiftstate][1] = chr(0x10) # f9
keytab[shiftstate][9] = chr(0x15) # f10
keytab[shiftstate][0x78] = chr(0x16) # f11
keytab[shiftstate][7] = chr(0x17) # f12
# * PS/2 keyboards don't have the C65 f13 (chr(0x19)) and f14 (chr(0x1a))
# -> we don't map them
# * PS/2 keyboards don't have the C128/C65 "HELP" (chr(0x83))
# -> TODO
# stamp in Ctrl/Alt color codes
petscii_from_ctrl_scancode = [ # Ctrl
(0x16, 0x90), # '1'
(0x1e, 0x05), # '2'
(0x26, 0x1c), # '3'
(0x25, 0x9f), # '4'
(0x2e, 0x9c), # '5'
(0x36, 0x1e), # '6'
(0x3d, 0x1f), # '7'
(0x3e, 0x9e), # '8'
(0x46, 0x12), # '9' REVERSE ON
(0x45, 0x92), # '0' REVERSE OFF
]
petscii_from_alt_scancode = [ # Alt
(0x16, 0x81), # '1'
(0x1e, 0x95), # '2'
(0x26, 0x96), # '3'
(0x25, 0x97), # '4'
(0x2e, 0x98), # '5'
(0x36, 0x99), # '6'
(0x3d, 0x9a), # '7'
(0x3e, 0x9b), # '8'
]
for (scancode, petscii) in petscii_from_ctrl_scancode:
if keytab[CTRL][scancode] == chr(0): # only if unassigned
keytab[CTRL][scancode] = chr(petscii)
for (scancode, petscii) in petscii_from_alt_scancode:
if keytab[ALT][scancode] == chr(0): # only if unassigned
keytab[ALT][scancode] = chr(petscii)
# stamp in Alt graphic characters
if not iso_mode:
petscii_from_alt_scancode = [
(0x1c, 0xb0), # 'A'
(0x32, 0xbf), # 'B'
(0x21, 0xbc), # 'C'
(0x23, 0xac), # 'D'
(0x24, 0xb1), # 'E'
(0x2b, 0xbb), # 'F'
(0x34, 0xa5), # 'G'
(0x33, 0xb4), # 'H'
(0x43, 0xa2), # 'I'
(0x3b, 0xb5), # 'J'
(0x42, 0xa1), # 'K'
(0x4b, 0xb6), # 'L'
(0x3a, 0xa7), # 'M'
(0x31, 0xaa), # 'N'
(0x44, 0xb9), # 'O'
(0x4d, 0xaf), # 'P'
(0x15, 0xab), # 'Q'
(0x2d, 0xb2), # 'R'
(0x1b, 0xae), # 'S'
(0x2c, 0xa3), # 'T'
(0x3c, 0xb8), # 'U'
(0x2a, 0xbe), # 'V'
(0x1d, 0xb3), # 'W'
(0x22, 0xbd), # 'X'
(0x35, 0xb7), # 'Y'
(0x1a, 0xad), # 'Z'
]
for (scancode, petscii) in petscii_from_alt_scancode:
if keytab[ALT][scancode] == chr(0): # only if unassigned
keytab[ALT][scancode] = chr(petscii)
# generate Ctrl codes for A-Z
for i in range(0, len(keytab[REG])):
c = keytab[REG][i]
if iso_mode and ord(c) >= ord('a') and ord(c) <= ord('z'):
c = chr(ord(c) - ord('a') + 1)
elif not iso_mode and ord(c) >= ord('A') and ord(c) <= ord('Z'):
c = chr(ord(c) - ord('A') + 1)
else:
c = None
if c and keytab[CTRL][i] == chr(0): # only if unassigned
keytab[CTRL][i] = c
# stamp in backspace and TAB
for shiftstate in keytab.keys():
if shiftstate == 0:
keytab[shiftstate][0x66] = chr(0x14) # backspace
keytab[shiftstate][0x0d] = chr(0x09) # TAB
keytab[shiftstate][0x5a] = chr(0x0d) # CR
keytab[shiftstate][0x29] = chr(0x20) # SPACE
else:
keytab[shiftstate][0x66] = chr(0x94) # insert
keytab[shiftstate][0x0d] = chr(0x18) # shift-TAB
keytab[shiftstate][0x5a] = chr(0x8d) # shift-CR
keytab[shiftstate][0x29] = chr(0xA0) # shift-SPACE
# analyze problems
all_keytabs = keytab[REG] + keytab[SHFT] + keytab[CTRL] + keytab[ALT]
if ALTGR in keytab:
all_keytabs += keytab[ALTGR]
petscii_chars_not_reachable = ""
for c in all_petscii_chars:
if not c in all_keytabs:
petscii_chars_not_reachable += unicode_from_petscii(c)
petscii_codes_not_reachable = ""
for c in all_petscii_codes:
if not c in all_keytabs:
if not c in all_petscii_codes_ok_if_missing:
petscii_codes_not_reachable += c
petscii_graphs_not_reachable = ""
for c in all_petscii_graphs:
if not c in all_keytabs:
petscii_graphs_not_reachable += c
unicode_not_reachable = ""
for c_unicode in kbd_layout['all_originally_reachable_characters']:
if iso_mode:
c_encoded = latin15_from_unicode(c_unicode)
else:
c_encoded = petscii_from_unicode(c_unicode)
if (c_encoded == chr(0) or not c_encoded in all_keytabs) and not c_unicode in unicode_not_reachable:
unicode_not_reachable += c_unicode
petscii_chars_not_reachable = ''.join(sorted(petscii_chars_not_reachable))
petscii_codes_not_reachable = ''.join(sorted(petscii_codes_not_reachable))
petscii_graphs_not_reachable = ''.join(sorted(petscii_graphs_not_reachable))
unicode_not_reachable = ''.join(sorted(unicode_not_reachable))
# print
name = kbd_layout['name'].replace(' - Custom', '')
kbd_id = kbd_layout['short_id'].lower()
print("; Name: " + name)
print("; Locale: " + kbd_layout['localename'])
print("; KLID: " + kbd_id)
print(";")
if len(petscii_chars_not_reachable) > 0 or len(petscii_codes_not_reachable) > 0 or len(petscii_graphs_not_reachable) > 0:
print("; PETSCII characters reachable on a C64 keyboard that are not reachable with this layout:")
if len(petscii_chars_not_reachable) > 0:
print("; chars: " + pprint.pformat(petscii_chars_not_reachable))
if len(petscii_codes_not_reachable) > 0:
print("; codes: ", end = '')
for c in petscii_codes_not_reachable:
if ord(c) in control_codes:
print(control_codes[ord(c)] + ' ', end = '')
else:
print(hex(ord(c)) + ' ', end = '')
print()
if len(petscii_graphs_not_reachable) > 0:
print("; graph: '", end = '')
for c in petscii_graphs_not_reachable:
print("\\x{0:02x}".format(ord(c)), end = '')
print("'")
if len(unicode_not_reachable) > 0:
if iso_mode:
print("; Unicode characters reachable with this layout on Windows but not covered by ISO-8859-15:")
else:
print("; Unicode characters reachable with this layout on Windows but not covered by PETSCII:")
print("; '", end = '')
for c in unicode_not_reachable:
if ord(c) < 0x20:
print("\\x{0:02x}".format(ord(c)), end = '')
else:
print(c, end = '')
print("'")
print()
if iso_mode:
print('.segment "IKBDMETA"\n')
prefix = 'i'
else:
print('.segment "KBDMETA"\n')
prefix = ''
locale1 = kbd_layout['localename'][0:2].upper()
locale2 = kbd_layout['localename'][3:5].upper()
if locale1 != locale2:
locale1 = kbd_layout['localename'].upper()
if len(kbd_layout['localename']) != 5:
sys.exit("unknown locale format: " + kbd_layout['localename'])
print('\t.byte "' + locale1 + '"', end = '')
for i in range(0, 6 - len(locale1)):
print(", 0", end = '')
print()
for shiftstate in [SHFT, ALT, CTRL, ALTGR, REG]:
if shiftstate == ALTGR and not ALTGR in keytab.keys():
print_shiftstate = ALT
else:
print_shiftstate = shiftstate
print("\t.word {}kbtab_{}_{}".format(prefix, kbd_id, print_shiftstate), end = '')
if shiftstate == REG:
print()
else:
print("-13")
print()
if iso_mode:
print('.segment "IKBDTABLES"\n')
else:
print('.segment "KBDTABLES"\n')
for shiftstate in [REG, SHFT, CTRL, ALT, ALTGR]:
if shiftstate == ALTGR and not ALTGR in keytab.keys():
continue
print("{}kbtab_{}_{}: ; ".format(prefix, kbd_id, shiftstate), end = '')
if shiftstate == 0:
print('Unshifted', end='')
if shiftstate & 1:
print('Shft ', end='')
if shiftstate & 6 == 6:
print('AltGr ', end='')
else:
if shiftstate & 2:
print('Ctrl ', end='')
if shiftstate & 4:
print('Alt ', end='')
if COMPRESSED_OUTPUT == 1 and shiftstate != REG:
start = 13
end = 104
else:
start = 0
end = 128
for i in range(start, end):
if i == start or i & 7 == 0:
print()
print('\t.byte ', end='')
c = keytab[shiftstate][i]
if ord(c) >= 0x20 and ord(c) <= 0x7e:
print("'{}'".format(c), end = '')
else:
print("${:02x}".format(ord(c)), end = '')
if i & 7 != 7:
print(',', end = '')
print()
| [
"re.split",
"pprint.pformat",
"io.open",
"sys.exit"
] | [((258, 307), 'io.open', 'io.open', (['filename_klc'], {'mode': '"""r"""', 'encoding': '"""utf-8"""'}), "(filename_klc, mode='r', encoding='utf-8')\n", (265, 307), False, 'import io, re, codecs, sys, os.path\n'), ((14988, 15050), 'sys.exit', 'sys.exit', (["('unknown locale format: ' + kbd_layout['localename'])"], {}), "('unknown locale format: ' + kbd_layout['localename'])\n", (14996, 15050), False, 'import io, re, codecs, sys, os.path\n'), ((441, 494), 'io.open', 'io.open', (['filename_changes'], {'mode': '"""r"""', 'encoding': '"""utf-8"""'}), "(filename_changes, mode='r', encoding='utf-8')\n", (448, 494), False, 'import io, re, codecs, sys, os.path\n'), ((1502, 1523), 're.split', 're.split', (['"""\\\\t"""', 'line'], {}), "('\\\\t', line)\n", (1510, 1523), False, 'import io, re, codecs, sys, os.path\n'), ((1062, 1083), 're.split', 're.split', (['"""\\\\t"""', 'line'], {}), "('\\\\t', line)\n", (1070, 1083), False, 'import io, re, codecs, sys, os.path\n'), ((13769, 13812), 'pprint.pformat', 'pprint.pformat', (['petscii_chars_not_reachable'], {}), '(petscii_chars_not_reachable)\n', (13783, 13812), False, 'import pprint\n')] |
import pandas as pd
from gooey import Gooey, GooeyParser
import numpy as np
import xlsxwriter
import xlrd
@Gooey(program_name="FEC FILE FOR FRANCE", required_cols= 4,default_size=(710, 700),navigation='TABBED', header_bg_color = '#48a7fa')
def parse_args():
parser = GooeyParser()
FilesGL = parser.add_argument_group('GL Posted Items')
FilesGL.add_argument('GL',
action='store',
widget='FileChooser',
help="Excel File From SAP G/L View: Normal Items")
FileNOTE = parser.add_argument_group('Entry View Parked Items')
FileNOTE.add_argument('Parked',
action='store',
widget='FileChooser',
help="Excel File From SAP Entry View: Only Parked and Noted Items")
choose = parser.add_argument_group('FEC Name')
choose.add_argument('Choose_File_Name',
action='store',
help="File name with .xlsx in the end. Standard for FEC is 533080222FECYYYYMMDD",
gooey_options={
'validator': {
'test': 'user_input.endswith(".xlsx") == True',
'message': 'Must contain .xlsx at the end!'
}
})
args = parser.parse_args()
return args
def combine(file, file2):
gl_df = pd.read_excel(file)
parked_df = pd.read_excel(file2)
numbers = gl_df['Document Number'].tolist()
gl = gl_df.append(parked_df[~parked_df['Document Number'].isin(numbers)])
gl = gl.reset_index()
return gl
def transform(gl):
gl['JournalCode'] = gl['Document Type']
gl['JournalLib'] = gl['Document Header Text']
gl['EcritureNum'] = gl['Document Number']
gl['EcritureDate'] = gl['Posting Date']
gl['CompteNum'] = gl['G/L Account']
gl['CompteLib'] = gl['G/L Account']
gl['CompAuxLib'] = gl['Offsetting acct no.']
gl['PieceRef'] = gl['Reference']
gl['EcritureLib'] = gl['Text']
gl['Amount'] = gl['Amount in local currency']
gl['MontantDevise'] = ''
gl['Idevise'] = ''
gl['PieceDate'] = gl['Document Date']
gl['ValidDate'] = gl['Entry Date']
gl['EcritureLet'] = gl['Assignment']
gl['DateLet'] = gl['Entry Date']
gl = gl.dropna(subset=['Amount'])
gl.loc[gl["Amount"] < 0 ,'Credit'] = gl['Amount']
gl.loc[gl["Amount"] > 0 ,'Debit'] = gl['Amount']
gl.loc[gl["Debit"].isnull() ,'Debit'] = 0
gl.loc[gl["Credit"].isnull() ,'Credit'] = 0
gl.loc[gl["EcritureLet"].isnull(),'DateLet'] = ''
gl.loc[gl["EcritureLet"].isnull(),'DateLet'] = ''
del gl['Amount']
del gl['Amount in local currency']
accounts = pd.read_excel("mapping-accounts.xlsx")
accounts1 = accounts[['G/L Account #','FrMap']]
accounts2 = accounts[['G/L Account #','FEC Compliant']]
accounts1 = accounts1.set_index('G/L Account #').to_dict()['FrMap']
accounts2 = accounts2.set_index('G/L Account #').to_dict()['FEC Compliant']
gl['CompteLib'] = gl['CompteLib'].replace(accounts2)
gl['CompteNum'] = (gl['CompteNum'].map(accounts1).astype('Int64').astype(str) + gl['CompteNum'].astype(str))
gl['CompteNum'] = gl['CompteNum'].str.replace('\.0$', '')
journals = pd.read_excel("test128.xlsx")
codes = pd.read_excel('mapping-journal.xlsx')
journals = journals.set_index('DocHeader').to_dict()['JournalLib_FR']
codes = codes.set_index('JournalCode').to_dict()["JournalLib_FR"]
gl.loc[gl["JournalLib"].isnull(),'JournalLib'] = gl["JournalCode"].map(str)
gl['JournalLib'] = gl['JournalLib'].replace(journals)
gl['JournalLib'] = gl['JournalLib'].replace(codes)
vendors = pd.read_excel("Vendors1.xlsx")
vendors = vendors.set_index('No').to_dict()['Name']
gl['CompAuxLib'] = gl['CompAuxLib'].map(vendors)
gl['CompAuxNum'] = "F" + gl['CompAuxLib']
gl.loc[(~gl.CompAuxLib.isnull()) & (gl["EcritureLib"].isnull()),'EcritureLib'] = gl['JournalLib'].map(str) + " de " + gl['CompAuxLib'].map(str)
gl.loc[(gl.CompAuxLib.isnull()) & (gl["EcritureLib"].isnull()),'EcritureLib'] = gl['JournalLib'].map(str) + gl['EcritureNum'].map(str)
gl['EcritureLib'] = gl['EcritureLib'].str.replace('^\d+', '')
return gl
def translate(gl):
journals = pd.read_excel("test128.xlsx")
codes = pd.read_excel('mapping-journal.xlsx')
journals = journals.set_index('DocHeader').to_dict()['JournalLib_FR']
codes = codes.set_index('JournalCode').to_dict()["JournalLib_FR"]
mapping_Valuation = {" Valuation on": " Évaluation sur"," Valuation on Reverse":" Évaluation sur Contre Passation",
" Reverse Posting":" Contre-Passation d'Ecriture - Conversion de devise sur",
" Translation Using":" Conversion de devise sur"}
mapping_AA = {"Reclass from": " Reclassification de", "reclass from": " Reclassification de", "ZEE MEDIA":"ZEE MEDIA Campaignes Numériques", "TRAINING CONTRI. ER JANUARY '19":"FORMATION CONTRI. ER JANVIER' 19",
"TAX FEES":"Taxes","SOCIAL SECURITY: URSSAF":"SÉCURITÉ SOCIALE: URSSAF","SOCIAL SECURITY: TRAINING CONTRIBUTIONS":"SÉCURITÉ SOCIALE: CONTRIBUTIONS À LA FORMATION",
"SOCIAL SECURITY: APPRENTICESHIP CONTRIBU":"SÉCURITÉ SOCIALE: CONTRIBUTION À L’APPRENTISSAGE","RSM":"SERVICES DE PAIE RSM EF18","RSA":"SERVICES DE PAIE RSA OCT-JAN",
"PRIVATE HEALTH":"SANTÉ PRIVÉE: ASSURANCE MÉDICALE-AXA/","PENSION: PENSION CONTRIBUTIONS - REUNICA":"PENSION: COTISATIONS DE RETRAITE-REUNICA","PENSION: LIFE & DISABILITY INSURANCE - R":"PENSION: ASSURANCE VIE & INVALIDITÉ-R",
"PENSION JANUARY '19":"PENSION JANVIER '19",
"ON CALL JANUARY '19":"Disponible Janvier'19",
"NRE + PROJECT INITIATION FEES":"NRE + FRAIS D’INITIATION AU PROJET (PO 750003","NET PAY JANUARY '19":"Payeante Janvier'19","JANUARY'19":"JANVIER'19",
"LUNCH VOUCHER- WITHHOLDING":"BON DÉJEUNER-RETENUE","HOLIDAY BONUS ACCRUAL FY18/19":"CUMUL DES PRIMES DE VACANCES EF18/19",
"GROSS SALARY JANUARY '19":"SALAIRE BRUT JANVIER' 19","EMEA ACCRUAL P8FY19":"P8FY19 D’ACCUMULATION EMEA","COMMISSION RE-ACCRUAL":"COMMISSION RÉ-ACCUMULATION",
"COMMISSION ACCRUAL":"COMMISSION D’ACCUMULATION","MARCH":"MARS","MAY":"MAI","APRIL":"AVRIL","AUDIT FEES":"HONORAIRES D’AUDIT",
"UNSUBMITTED_UNPOSTED BOA ACCRUAL":"Accumulation BOA non soumise non exposée","UNASSIGNED CREDITCARD BOA ACCRUAL":"NON ASSIGNÉ CREDITCARD BOA ACCUMULATION ",
"EMEA ACCRUAL":"ACCUMULATION EMEA","Exhibit Expenses":"Frais d'exposition","Hotel Tax":"Taxe hôtelière","Company Events":"Événements d'entreprise",
"Public Transport":"Transport public", "Agency Booking Fees":"Frais de réservation d'agence","Working Meals (Employees Only)":"Repas de travail (employés seulement)",
"Airfare":"Billet d'avion","Office Supplies":"Fournitures de bureau","Tolls":"Péages",
"write off difference see e-mail attached":"radiation de la différence voir e-mail ci-joint",
"Manual P/ment and double payment to be deduct":"P/ment manuel et double paiement à déduire","FX DIFFERENCE ON RSU":"DIFFERENCE FX SUR RSU",
"DEFINED BENEFIT LIABILITY-TRUE UP":"RESPONSABILITÉ À PRESTATIONS DÉTERMINÉES-TRUE UP","EXTRA RELEASE FOR STORAGE REVERSED":"EXTRA LIBERATION POUR STOCKAGE CONTREPASSATION",
"RECLASS BANK CHARGES TO CORRECT COST CEN":"RECLASSER LES FRAIS BANCAIRES POUR CORRIGER","PAYROLL INCOME TAXES":"IMPÔTS SUR LES SALAIRES",
"TRAINING TAX TRUE UP":"TAXE DE FORMATION", "FX DIFFERENCE ON STOCK OPTION EXERCISES":"FX DIFFERENCE SUR LES EXERCICES D'OPTIONS STOCK",
"Airline Frais":"Frais de Transport Aérien","Agency Booking Fees":"Frais de Réservation d'Agence","Computer Supplies":"Fournitures informatiques",
"AUDIT FEES":"FRAIS D'AUDIT", "HOLIDAY BONUS ACCRUAL ":"ACCUMULATION DE BONUS DE VACANCES","TAX FEES":"FRAIS D'IMPÔT",
"SOCIAL SECURITY: APPRENTICESHIP CONTRIBU":"SÉCURITÉ SOCIALE: CONTRIBUITION À L’APPRENTISSAGE",
"SOCIAL SECURITY: TRAINING CONTRIBUTIONS":"SÉCURITÉ SOCIALE: CONTRIBUTIONS À LA FORMATION", "TRAVEL COST":"FRAIS DE VOYAGE", "HOUSING TAX":"TAXE SUR LE LOGEMENT",
"PAYROLL INCOME TAXES":"IMPÔTS SUR LE REVENU DE LA PAIE","INCOME TAX-PAS":"IMPÔT SUR LE REVENU-PAS", "IC SETTLEMENT":"Règlement Interentreprises",
"VACATION TAKEN":"VACANCES PRISES", "SOCIAL SECURITY: APPR. CONTR.":"SÉCURITÉ SOCIALE: CONTRIBUTION À L’APPRENTISSAGE",
"POST OF AVRIL DEC IN CORRECT SIGN":"CORRECTION D'ECRITURE AVRIL DEC"}
gl = gl.replace({"EcritureLib":mapping_Valuation}, regex=True)
gl = gl.replace({"EcritureLib":mapping_AA}, regex=True)
gl['EcritureLib'] = gl["EcritureLib"].str.replace('COST-PLUS', 'Revient Majoré')
gl['EcritureLib'] = gl["EcritureLib"].str.replace('PRITVAE HEALTH: MEDICAL INSURANCE', 'SANTÉ PRIVÉE: ASSURANCE MÉDICALE')
gl['EcritureLib'] = gl["EcritureLib"].str.replace('MEDICAL INSURANCE', 'ASSURANCE MÉDICALE')
gl['EcritureLib'] = gl["EcritureLib"].str.replace('UNASSIGNED', 'NON ATTRIBUÉ')
gl['EcritureLib'] = gl["EcritureLib"].str.replace('Payout', 'Paiement')
gl['EcritureLib'] = gl["EcritureLib"].str.replace('FRINGE COST', 'COÛT MARGINAL')
gl['EcritureLib'] = gl["EcritureLib"].str.replace('PROJECT INITIATION', 'LANCEMENT DU PROJET')
gl['EcritureLib'] = gl["EcritureLib"].str.replace('ACCRUAL', 'ACCUMULATION')
gl['EcritureLib'] = gl["EcritureLib"].str.replace('CREDITCARD', 'CARTE DE CRÉDIT')
gl['EcritureLib'] = gl["EcritureLib"].str.replace('ACCR ', 'ACCUM ')
gl['EcritureLib'] = gl["EcritureLib"].str.replace('VAT ', 'TVA ')
gl['EcritureLib'] = gl["EcritureLib"].str.replace('SOCIAL SECURITY ', 'SÉCURITÉ SOCIALE')
gl['EcritureLib'] = gl["EcritureLib"].str.replace('SEPTEMBER', 'SEPT')
gl['EcritureLib'] = gl["EcritureLib"].str.replace('TAXBACK', 'Reboursement')
gl['EcritureLib'] = gl["EcritureLib"].str.replace('REPORT', '')
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Reverse Posting", "Contre Passation d'Ecriture")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("BASE RENT", "Location Base")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Rent ", "Location ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("RENT ", "Location ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("CLEARING", "compensation ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("clearing", "compensation ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("BILLING CHARGES", "FRAIS DE FACTURATION ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("UNPAID", "NON PAYÉ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("PROPERTY TAX", "IMPÔT FONCIER ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Trans. Using", "Conversion sur")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("SALARIES", "Salaires")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Refund", "Remboursement")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("REFUND", "Remboursement")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("no invoice", "pas de facture")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("COST-PLUS SERVICE REVENUE", "Revenus de service Revient Majoré")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("SETTLEMENT", "RÈGLEMENT ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("PURCHASE", "ACHAT")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("NON-CP SETTLE", "RÈGLEMENT NON-CP")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("PAID ", " Payé ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("FEES ", "Frais")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("January", "Janvier")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("February", "Février")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("March", "Mars")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("April", "Avril")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("May", "Mai")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("June", "Juin")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("July", "Juillet")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("September", "Septembre")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Aug.", "Août")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("JANUARY", "Janvier")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("FEBRUARY", "Février")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("MARCH", "Mars")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("APRIL", "Avril")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("MAY", "Mai")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("JUNE", "Juin")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("JULY", "Juillet")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("SEPTEMBER", "Septembre")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("AUGUST.", "Août")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("NOVEMBER.", "Novembre")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("DECEMBER.", "Décembre")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("December", "Décembre")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Feb.", "Fév.")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Mar.", "Mars")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Apr.", "Avril")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Aug.", "Août")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Aug.", "Août")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Reverse ", "Contre-passation ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("INTEREST CHARGE", "CHARGE D'INTÉRÊT")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("-SICK LEAVE PAY", "-Paiement congé maladie")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("RECLASSEMENTIFICATION", "RECLASSIFICATION")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("INSTALMENT", "VERSEMENT")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("FIRST", "1ere")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("FINE LATE PAY.", "Amende pour retard de paiement")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("-PATERNITY PAY", "Indemnités de paternité")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("SOCIAL SECURITY:", "SÉCURITÉ SOCIALE:")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Trip from", "Voyage de:")
gl['EcritureLib'] = gl["EcritureLib"].str.replace(" To ", " à")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Shipping", "Livraison")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("VOXEET INTEGRATION COSTS", "COÛTS D'INTÉGRATION DE VOXEET")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("INCOME TAX", "IMPÔT SUR LE REVENU")
gl['EcritureLib'] = gl["EcritureLib"].str.replace('Rideshare', 'Covoiturage')
gl['EcritureLib'] = gl["EcritureLib"].str.replace('Travel Meals', 'Repas de Travail')
gl['EcritureLib'] = gl["EcritureLib"].str.replace('Fees', 'Frais')
gl['EcritureLib'] = gl["EcritureLib"].str.replace('Phone', 'Téléphone')
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Books", "Abonnements")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Subcriptions", "Location Base")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Meals", "Repas")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Entertainment", "divertissement ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Third Party", "tiers ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Training Fees", "Frais d0 Formation")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Conferences/Tradeshows Registratio", "Conférences/Tradeshows Enregistrement")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("FOR", "POUR")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("ROUNDING", "ARRONDISSEMENT")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("STORAGE", "STOCKAGE")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("VACATION ACCURAL", "Vacances Accumulées")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("RECEIVABLE ", "Recevables")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("AFTER PAYOUT ", "APRÈS PAIEMENT")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("CLEAN UP ", "APUREMENT")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("EMPLOYEE TRAVEL INSUR ", "ASSURANCE DE VOYAGE DES EMPLOYÉS")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("CORRECTION OF", "CORRECTION DE")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("TAXES PAYROLL", "IMPÔTS SUR LA MASSE SALARIALE")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("ACCOUNT", "COMPTE")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("TAX", "Impôt")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("life disab", "Incapacité de vie")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("HOUSING TAX","TAXE D'HABITATION")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("GROSS SALARY","SALAIRE BRUT")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Cleaning Services","Nettoyage")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Freight","Fret")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Membership","adhésion")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Air cooling Maintenance","Entretien de refroidissement de l'air")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Power on Demand Platform","Plateforme d'energie à la demande")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Sanitaire room installation"," Installation de la salle sanitaire")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("subscription","abonnement")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Coffee supplies "," Fournitures de café")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Duty and Tax ","Devoir et fiscalité")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Electricity ","Electricité ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Lunch vouchers ","Bons déjeuner")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Security monitoring","Surveillance de la sécurité")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Water", "L'EAU")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Statutory Audit", "Audit statutaire")
gl['EcritureLib'] = gl["EcritureLib"].str.replace(" Meeting room screen installation", "Installation de l'écran de la salle de réunion")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Water", "L'EAU")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Water", "L'EAU")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Tax Credit FY 2016", "Crédit d'impôt Exercice 2016")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Bank of America Merill Lynch-T&E statement","Déclaration de Merill Lynch")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("English Translation", "Traduction anglaise")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Office Rent", "Location de Bureau")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Annual Electrical Verification", "Vérification électrique annuelle ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Health costs ", "Coûts santé")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Unlimited-receipt and policy audit", "Vérification illimitée des reçus et audites")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Water fountain ", "Fontaine d'eau")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Quartely control visit", "Visite de contrôle trimestrielle")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Fire extinguishers annual check", "Vérification annuelle des extincteurs")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("showroom rent", "location de salle d'exposition")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("AND ACTUAL RECEIV","ET RECETTES RÉELLES")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("FILING","DÉPÔT")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("ORDERS","ORDRES")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("EXCLUDED -DUMMY CREDIT","EXCLU")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("RELARING TO","RELATIF À")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("CLEAN UP-","APUREMENT-")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("2ND INSTALLEMENT","2ème versement")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("DOUBLE PAYMENT","DOUBLE PAIEMENT")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("CLEAN UP-","APUREMENT-")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("DUTIES","DROITS")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Previous balance","Solde Précédent")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Cash fx","Cash FX")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("PAYROLL INCOME","REVENU DE PAIE")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("TELEPHONE CHARGES","Frais de Téléphone")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Clearing","Compensation")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Hotel","Hôtel")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Miscellaneous","Divers")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Corporate Card-Out-of-Poc","")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Traveling Dolby Empl","Employé itinérant de Dolby")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Tools-Equipment-Lab Supplies","Outils-Equipement-Fournitures de laboratoire")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("rounding","Arrondissement")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Building Supplies-Maintenance","Matériaux de construction-Entretien")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Expensed Furniture","Mobilier Dépensé")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Credit for Charges","Crédit pour frais")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Manual P-ment and double payment to be deduct","P-mnt manuel et double paiement à déduire")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Employee insurance travel","Assurance de voyage des employés 2019")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Rent ","Location ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Lunch vouchers ","Bons déjeuner")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Store Room ","Chambre Stocke")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Evaluation ","Évaluation ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Charges ","Frais ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("On Line ","En ligne ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("/Building Supplies/Maintenance","/ Matériaux de construction / Entretien")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Music Instruments","Instruments Musicales")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("/Employee Awards/Recognition", "/ Récompenses des employés / Reconnaissance")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("/Daily Allowance","/Indemnité journalière")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("RECLASS ", "RECLASSIFICATION ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Purchase Accounting", "Comptabilité d'achat")
gl['EcritureLib'] = gl["EcritureLib"].str.replace( "EXPAT ", " Expatrié ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("FROM ", "DE ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("INVOICE", "FACTURE")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("CLEANUP", "APUREMENT")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Repayment", "Restitution")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Office Furniture", "Meubles de bureau")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("anti-stress treatments", "traitements anti-stress")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("UK Tax Return", "Décl. d'impôt Royaume-Uni")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Office Location", "Location de bureau")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Deliver Service", "Service de livraison")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Foreign Office Support", "Soutien aux bureaux étrangères")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Showroom", "Salle d'exposition")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("aditional Services", "Services supplémentaires ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Cofee consumption Paris office", "Consommation de café Bureau de Paris")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Consultant ", "Expert-conseil")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("INVOICE", "FACTURE")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Rent-", "Location-")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Corporate", "Entreprise")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("COST ", "COÛT ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("TRAINING", "Formation")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("LIFE DISAB", "Invalidité")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("INSU ", "ASSURANCE ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("PATENT AWARD", "BREVET")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("EQUIVALENT POUR UNUSED VACATION POUR LEAVE", "CONGÉ DE VACANCES INUTILISÉS")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("SPOT ", "")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("AIRFARE TRANSFER TO PREPAIDS", "TRANSFERT DE TRANSPORT AÉRIEN À PAYÉ D'AVANCE")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("WITHHOLDING", "RETRAIT")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Clear ", "Reglement ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Clear ", "Reglement ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Rent/", "Location/")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Pay ", "Paiement ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("PAYMENT", "Paiement ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("French Income Tax Return;", "Déclaration de revenus française;")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("REVESERVICES", "SERVICES")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("INCLUDED DOUBLE", "DOUBLE INCLUS")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Bank", "Banque")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("/Promotional Expenses", "/Frais de promotion")
gl['EcritureLib'] = gl["EcritureLib"].str.replace(" ACTIVITY ", " activité ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace(" DEFINED BENEFIT LIABILITY", "PASSIF À AVANTAGES DÉTERMINÉES")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("COÛT PLUS ", "Revient Majoré")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("/Airline Frais", "/Tarifs aériens")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("/Tools/Equipment/Lab Supplies", "/Outils / Équipement / Fournitures de laboratoire")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Rent/", "Location/")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Payment Posting", "Paiements")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("COMMISSION D’ACCUMULATION", "ACCUMULATIONS DE COMISSIONS")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("ImpôtE", "Impôt")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("MED.INSU", "MED.ASSURANCE")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("APPRENTICESHIP_CONTRIBUTIONS_TRUE_UP", "CONTRIBUTIONS À L'APPRENTISSAGE/TRUE UP")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("NET PAY", "SALAIRE NET")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("CASH ", "ARGENT ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Repayment ", "Repaiement ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Acct. ", "Comptab. ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("ACCR ", "ACC ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Accr ", "Acc.")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Cash Balance", "Solde de caisse")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("RECLASS ", "RECLASSEMENT ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("VAT FILING ", "Dépôt de TVA ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Needs to be re-booked due", "KI")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("reclass from", "reclasser de")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("RECLASS FROM", "reclasser de")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("PAYROLL", "PAIE")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("RECLASS ", "Reclasser")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("DEDICTION","DEDUCTION")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Cash","Argent ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("cash ","argent ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("ReclasserIFICATIO","RECLASSEMENT ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("ImpôtS ","Impôts ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Working Repas (Employees Only) ","Repas de travail (employés seulement) ")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("/Banque Frais","/Frais Bancaires")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("MED. INS.","ASSURANCE MED.")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("Facture - Brut'","Facture - Brute'")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("_20181130_ MK063850","FACTURE COUPA")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("_20181130_ MS063849","FACTURE COUPA")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("_20181130_ MB063846","FACTURE COUPA")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("_20181231_ MK063850","FACTURE COUPA")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("_20181231_ MK063850","FACTURE COUPA")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("_20190228_ MK063850","FACTURE COUPA")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("_20190331_ MB063846","FACTURE COUPA")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("_20190430_ MS063849","FACTURE COUPA")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("_20190430_ MB063846","FACTURE COUPA")
gl['EcritureLib'] = gl['EcritureLib'].str.replace('-', '')
gl['EcritureLib'] = gl['EcritureLib'].str.replace('/', '')
gl['EcritureLib'] = gl['EcritureLib'].str.replace('Contre Passation', 'CP')
mapping_Valuation1 = {" Valuation on": " Évaluation sur"," Valuation on Reverse":" Évaluation sur Contre Passation",
" Reverse Posting":" Contre-Passation d'Ecriture - Conversion de devise sur",
" Translation Using":" Conversion de devise sur"}
mapping_AA1 = {"Reclass from": " Reclassification de", "reclass from": " Reclassification de", "ZEE MEDIA":"ZEE MEDIA Campaignes Numériques", "TRAINING CONTRI. ER JANUARY '19":"FORMATION CONTRI. ER JANVIER' 19",
"TAX FEES":"Taxes","SOCIAL SECURITY: URSSAF":"SÉCURITÉ SOCIALE: URSSAF","SOCIAL SECURITY: TRAINING CONTRIBUTIONS":"SÉCURITÉ SOCIALE: CONTRIBUTIONS À LA FORMATION",
"SOCIAL SECURITY: APPRENTICESHIP CONTRIBU":"SÉCURITÉ SOCIALE: CONTRIBUTION À L’APPRENTISSAGE","RSM":"SERVICES DE PAIE RSM EF18","RSA":"SERVICES DE PAIE RSA OCT-JAN",
"PRIVATE HEALTH":"SANTÉ PRIVÉE: ASSURANCE MÉDICALE-AXA/","PENSION: PENSION CONTRIBUTIONS - REUNICA":"PENSION: COTISATIONS DE RETRAITE-REUNICA","PENSION: LIFE & DISABILITY INSURANCE - R":"PENSION: ASSURANCE VIE & INVALIDITÉ-R",
"PENSION JANUARY '19":"PENSION JANVIER '19",
"ON CALL JANUARY '19":"Disponible Janvier'19",
"NRE + PROJECT INITIATION FEES":"NRE + FRAIS D’INITIATION AU PROJET (PO 750003","NET PAY JANUARY '19":"Payeante Janvier'19","JANUARY'19":"JANVIER'19",
"LUNCH VOUCHER- WITHHOLDING":"BON DÉJEUNER-RETENUE","HOLIDAY BONUS ACCRUAL FY18/19":"CUMUL DES PRIMES DE VACANCES EF18/19",
"GROSS SALARY JANUARY '19":"SALAIRE BRUT JANVIER' 19","EMEA ACCRUAL P8FY19":"P8FY19 D’ACCUMULATION EMEA","COMMISSION RE-ACCRUAL":"COMMISSION RÉ-ACCUMULATION",
"COMMISSION ACCRUAL":"COMMISSION D’ACCUMULATION","MARCH":"MARS","MAY":"MAI","APRIL":"AVRIL","AUDIT FEES":"HONORAIRES D’AUDIT",
"UNSUBMITTED_UNPOSTED BOA ACCRUAL":"Accumulation BOA non soumise non exposée","UNASSIGNED CREDITCARD BOA ACCRUAL":"NON ASSIGNÉ CREDITCARD BOA ACCUMULATION ",
"EMEA ACCRUAL":"ACCUMULATION EMEA","Exhibit Expenses":"Frais d'exposition","Hotel Tax":"Taxe hôtelière","Company Events":"Événements d'entreprise",
"Public Transport":"Transport public", "Agency Booking Fees":"Frais de réservation d'agence","Working Meals (Employees Only)":"Repas de travail (employés seulement)",
"Airfare":"Billet d'avion","Office Supplies":"Fournitures de bureau","Tolls":"Péages",
"write off difference see e-mail attached":"radiation de la différence voir e-mail ci-joint",
"Manual P/ment and double payment to be deduct":"P/ment manuel et double paiement à déduire","FX DIFFERENCE ON RSU":"DIFFERENCE FX SUR RSU",
"DEFINED BENEFIT LIABILITY-TRUE UP":"RESPONSABILITÉ À PRESTATIONS DÉTERMINÉES-TRUE UP","EXTRA RELEASE FOR STORAGE REVERSED":"EXTRA LIBERATION POUR STOCKAGE CONTREPASSATION",
"RECLASS BANK CHARGES TO CORRECT COST CEN":"RECLASSER LES FRAIS BANCAIRES POUR CORRIGER","PAYROLL INCOME TAXES":"IMPÔTS SUR LES SALAIRES",
"TRAINING TAX TRUE UP":"TAXE DE FORMATION", "FX DIFFERENCE ON STOCK OPTION EXERCISES":"FX DIFFERENCE SUR LES EXERCICES D'OPTIONS STOCK",
"Airline Frais":"Frais de Transport Aérien","Agency Booking Fees":"Frais de Réservation d'Agence","Computer Supplies":"Fournitures informatiques",
"AUDIT FEES":"FRAIS D'AUDIT", "HOLIDAY BONUS ACCRUAL ":"ACCUMULATION DE BONUS DE VACANCES","TAX FEES":"FRAIS D'IMPÔT",
"SOCIAL SECURITY: APPRENTICESHIP CONTRIBU":"SÉCURITÉ SOCIALE: CONTRIBUITION À L’APPRENTISSAGE",
"SOCIAL SECURITY: TRAINING CONTRIBUTIONS":"SÉCURITÉ SOCIALE: CONTRIBUTIONS À LA FORMATION", "TRAVEL COST":"FRAIS DE VOYAGE", "HOUSING TAX":"TAXE SUR LE LOGEMENT",
"PAYROLL INCOME TAXES":"IMPÔTS SUR LE REVENU DE LA PAIE","INCOME TAX-PAS":"IMPÔT SUR LE REVENU-PAS", "IC SETTLEMENT":"Règlement Interentreprises",
"VACATION TAKEN":"VACANCES PRISES", "SOCIAL SECURITY: APPR. CONTR.":"SÉCURITÉ SOCIALE: CONTRIBUTION À L’APPRENTISSAGE",
"POST OF AVRIL DEC IN CORRECT SIGN":"CORRECTION D'ECRITURE AVRIL DEC"}
gl = gl.replace({"JournalLib":mapping_Valuation1}, regex=True)
gl = gl.replace({"JournalLib":mapping_AA1}, regex=True)
gl['JournalLib'] = gl["JournalLib"].str.replace('COST-PLUS', 'Revient Majoré')
gl['JournalLib'] = gl["JournalLib"].str.replace('PRITVAE HEALTH: MEDICAL INSURANCE', 'SANTÉ PRIVÉE: ASSURANCE MÉDICALE')
gl['JournalLib'] = gl["JournalLib"].str.replace('MEDICAL INSURANCE', 'ASSURANCE MÉDICALE')
gl['JournalLib'] = gl["JournalLib"].str.replace('UNASSIGNED', 'NON ATTRIBUÉ')
gl['JournalLib'] = gl["JournalLib"].str.replace('Payout', 'Paiement')
gl['JournalLib'] = gl["JournalLib"].str.replace('FRINGE COST', 'COÛT MARGINAL')
gl['JournalLib'] = gl["JournalLib"].str.replace('PROJECT INITIATION', 'LANCEMENT DU PROJET')
gl['JournalLib'] = gl["JournalLib"].str.replace('ACCRUAL', 'ACCUMULATION')
gl['JournalLib'] = gl["JournalLib"].str.replace('CREDITCARD', 'CARTE DE CRÉDIT')
gl['JournalLib'] = gl["JournalLib"].str.replace('ACCR ', 'ACCUM ')
gl['JournalLib'] = gl["JournalLib"].str.replace('VAT ', 'TVA ')
gl['JournalLib'] = gl["JournalLib"].str.replace('SOCIAL SECURITY ', 'SÉCURITÉ SOCIALE')
gl['JournalLib'] = gl["JournalLib"].str.replace('SEPTEMBER', 'SEPT')
gl['JournalLib'] = gl["JournalLib"].str.replace('TAXBACK', 'Reboursement')
gl['JournalLib'] = gl["JournalLib"].str.replace('REPORT', '')
gl['JournalLib'] = gl["JournalLib"].str.replace("Reverse Posting", "Contre Passation d'Ecriture")
gl['JournalLib'] = gl["JournalLib"].str.replace("BASE RENT", "Location Base")
gl['JournalLib'] = gl["JournalLib"].str.replace("Rent ", "Location ")
gl['JournalLib'] = gl["JournalLib"].str.replace("RENT ", "Location ")
gl['JournalLib'] = gl["JournalLib"].str.replace("CLEARING", "compensation ")
gl['JournalLib'] = gl["JournalLib"].str.replace("clearing", "compensation ")
gl['JournalLib'] = gl["JournalLib"].str.replace("BILLING CHARGES", "FRAIS DE FACTURATION ")
gl['JournalLib'] = gl["JournalLib"].str.replace("UNPAID", "NON PAYÉ")
gl['JournalLib'] = gl["JournalLib"].str.replace("PROPERTY TAX", "IMPÔT FONCIER ")
gl['JournalLib'] = gl["JournalLib"].str.replace("Trans. Using", "Conversion sur")
gl['JournalLib'] = gl["JournalLib"].str.replace("SALARIES", "Salaires")
gl['JournalLib'] = gl["JournalLib"].str.replace("Refund", "Remboursement")
gl['JournalLib'] = gl["JournalLib"].str.replace("REFUND", "Remboursement")
gl['JournalLib'] = gl["JournalLib"].str.replace("no invoice", "pas de facture")
gl['JournalLib'] = gl["JournalLib"].str.replace("COST-PLUS SERVICE REVENUE", "Revenus de service Revient Majoré")
gl['JournalLib'] = gl["JournalLib"].str.replace("SETTLEMENT", "RÈGLEMENT ")
gl['JournalLib'] = gl["JournalLib"].str.replace("PURCHASE", "ACHAT")
gl['JournalLib'] = gl["JournalLib"].str.replace("NON-CP SETTLE", "RÈGLEMENT NON-CP")
gl['JournalLib'] = gl["JournalLib"].str.replace("PAID ", " Payé ")
gl['JournalLib'] = gl["JournalLib"].str.replace("FEES ", "Frais")
gl['JournalLib'] = gl["JournalLib"].str.replace("January", "Janvier")
gl['JournalLib'] = gl["JournalLib"].str.replace("February", "Février")
gl['JournalLib'] = gl["JournalLib"].str.replace("March", "Mars")
gl['JournalLib'] = gl["JournalLib"].str.replace("April", "Avril")
gl['JournalLib'] = gl["JournalLib"].str.replace("May", "Mai")
gl['JournalLib'] = gl["JournalLib"].str.replace("June", "Juin")
gl['JournalLib'] = gl["JournalLib"].str.replace("July", "Juillet")
gl['JournalLib'] = gl["JournalLib"].str.replace("September", "Septembre")
gl['JournalLib'] = gl["JournalLib"].str.replace("Aug.", "Août")
gl['JournalLib'] = gl["JournalLib"].str.replace("JANUARY", "Janvier")
gl['JournalLib'] = gl["JournalLib"].str.replace("FEBRUARY", "Février")
gl['JournalLib'] = gl["JournalLib"].str.replace("MARCH", "Mars")
gl['JournalLib'] = gl["JournalLib"].str.replace("APRIL", "Avril")
gl['JournalLib'] = gl["JournalLib"].str.replace("MAY", "Mai")
gl['JournalLib'] = gl["JournalLib"].str.replace("JUNE", "Juin")
gl['JournalLib'] = gl["JournalLib"].str.replace("JULY", "Juillet")
gl['JournalLib'] = gl["JournalLib"].str.replace("SEPTEMBER", "Septembre")
gl['JournalLib'] = gl["JournalLib"].str.replace("AUGUST.", "Août")
gl['JournalLib'] = gl["JournalLib"].str.replace("NOVEMBER.", "Novembre")
gl['JournalLib'] = gl["JournalLib"].str.replace("DECEMBER.", "Décembre")
gl['JournalLib'] = gl["JournalLib"].str.replace("December", "Décembre")
gl['JournalLib'] = gl["JournalLib"].str.replace("Feb.", "Fév.")
gl['JournalLib'] = gl["JournalLib"].str.replace("Mar.", "Mars")
gl['JournalLib'] = gl["JournalLib"].str.replace("Apr.", "Avril")
gl['JournalLib'] = gl["JournalLib"].str.replace("Aug.", "Août")
gl['JournalLib'] = gl["JournalLib"].str.replace("Aug.", "Août")
gl['JournalLib'] = gl["JournalLib"].str.replace("Reverse ", "Contre-passation ")
gl['JournalLib'] = gl["JournalLib"].str.replace("INTEREST CHARGE", "CHARGE D'INTÉRÊT")
gl['JournalLib'] = gl["JournalLib"].str.replace("-SICK LEAVE PAY", "-Paiement congé maladie")
gl['JournalLib'] = gl["JournalLib"].str.replace("RECLASSEMENTIFICATION", "RECLASSIFICATION")
gl['JournalLib'] = gl["JournalLib"].str.replace("INSTALMENT", "VERSEMENT")
gl['JournalLib'] = gl["JournalLib"].str.replace("FIRST", "1ere")
gl['JournalLib'] = gl["JournalLib"].str.replace("FINE LATE PAY.", "Amende pour retard de paiement")
gl['JournalLib'] = gl["JournalLib"].str.replace("-PATERNITY PAY", "Indemnités de paternité")
gl['JournalLib'] = gl["JournalLib"].str.replace("SOCIAL SECURITY:", "SÉCURITÉ SOCIALE:")
gl['JournalLib'] = gl["JournalLib"].str.replace("Trip from", "Voyage de:")
gl['JournalLib'] = gl["JournalLib"].str.replace(" To ", " à")
gl['JournalLib'] = gl["JournalLib"].str.replace("Shipping", "Livraison")
gl['JournalLib'] = gl["JournalLib"].str.replace("VOXEET INTEGRATION COSTS", "COÛTS D'INTÉGRATION DE VOXEET")
gl['JournalLib'] = gl["JournalLib"].str.replace("INCOME TAX", "IMPÔT SUR LE REVENU")
gl['JournalLib'] = gl["JournalLib"].str.replace('Rideshare', 'Covoiturage')
gl['JournalLib'] = gl["JournalLib"].str.replace('Travel Meals', 'Repas de Travail')
gl['JournalLib'] = gl["JournalLib"].str.replace('Fees', 'Frais')
gl['JournalLib'] = gl["JournalLib"].str.replace('Phone', 'Téléphone')
gl['JournalLib'] = gl["JournalLib"].str.replace("Books", "Abonnements")
gl['JournalLib'] = gl["JournalLib"].str.replace("Subcriptions", "Location Base")
gl['JournalLib'] = gl["JournalLib"].str.replace("Meals", "Repas")
gl['JournalLib'] = gl["JournalLib"].str.replace("Entertainment", "divertissement ")
gl['JournalLib'] = gl["JournalLib"].str.replace("Third Party", "tiers ")
gl['JournalLib'] = gl["JournalLib"].str.replace("Training Fees", "Frais d0 Formation")
gl['JournalLib'] = gl["JournalLib"].str.replace("Conferences/Tradeshows Registratio", "Conférences/Tradeshows Enregistrement")
gl['JournalLib'] = gl["JournalLib"].str.replace("FOR", "POUR")
gl['JournalLib'] = gl["JournalLib"].str.replace("ROUNDING", "ARRONDISSEMENT")
gl['JournalLib'] = gl["JournalLib"].str.replace("STORAGE", "STOCKAGE")
gl['JournalLib'] = gl["JournalLib"].str.replace("VACATION ACCURAL", "Vacances Accumulées")
gl['JournalLib'] = gl["JournalLib"].str.replace("RECEIVABLE ", "Recevables")
gl['JournalLib'] = gl["JournalLib"].str.replace("AFTER PAYOUT ", "APRÈS PAIEMENT")
gl['JournalLib'] = gl["JournalLib"].str.replace("CLEAN UP ", "APUREMENT")
gl['JournalLib'] = gl["JournalLib"].str.replace("EMPLOYEE TRAVEL INSUR ", "ASSURANCE DE VOYAGE DES EMPLOYÉS")
gl['JournalLib'] = gl["JournalLib"].str.replace("CORRECTION OF", "CORRECTION DE")
gl['JournalLib'] = gl["JournalLib"].str.replace("TAXES PAYROLL", "IMPÔTS SUR LA MASSE SALARIALE")
gl['JournalLib'] = gl["JournalLib"].str.replace("ACCOUNT", "COMPTE")
gl['JournalLib'] = gl["JournalLib"].str.replace("TAX", "Impôt")
gl['JournalLib'] = gl["JournalLib"].str.replace("life disab", "Incapacité de vie")
gl['JournalLib'] = gl["JournalLib"].str.replace("HOUSING TAX","TAXE D'HABITATION")
gl['JournalLib'] = gl["JournalLib"].str.replace("GROSS SALARY","SALAIRE BRUT")
gl['JournalLib'] = gl["JournalLib"].str.replace("Cleaning Services","Nettoyage")
gl['JournalLib'] = gl["JournalLib"].str.replace("Freight","Fret")
gl['JournalLib'] = gl["JournalLib"].str.replace("Membership","adhésion")
gl['JournalLib'] = gl["JournalLib"].str.replace("Air cooling Maintenance","Entretien de refroidissement de l'air")
gl['JournalLib'] = gl["JournalLib"].str.replace("Power on Demand Platform","Plateforme d'energie à la demande")
gl['JournalLib'] = gl["JournalLib"].str.replace("Sanitaire room installation"," Installation de la salle sanitaire")
gl['JournalLib'] = gl["JournalLib"].str.replace("subscription","abonnement")
gl['JournalLib'] = gl["JournalLib"].str.replace("Coffee supplies "," Fournitures de café")
gl['JournalLib'] = gl["JournalLib"].str.replace("Duty and Tax ","Devoir et fiscalité")
gl['JournalLib'] = gl["JournalLib"].str.replace("Electricity ","Electricité ")
gl['JournalLib'] = gl["JournalLib"].str.replace("Lunch vouchers ","Bons déjeuner")
gl['JournalLib'] = gl["JournalLib"].str.replace("Security monitoring","Surveillance de la sécurité")
gl['JournalLib'] = gl["JournalLib"].str.replace("Water", "L'EAU")
gl['JournalLib'] = gl["JournalLib"].str.replace("Statutory Audit", "Audit statutaire")
gl['JournalLib'] = gl["JournalLib"].str.replace(" Meeting room screen installation", "Installation de l'écran de la salle de réunion")
gl['JournalLib'] = gl["JournalLib"].str.replace("Water", "L'EAU")
gl['JournalLib'] = gl["JournalLib"].str.replace("Water", "L'EAU")
gl['JournalLib'] = gl["JournalLib"].str.replace("Tax Credit FY 2016", "Crédit d'impôt Exercice 2016")
gl['JournalLib'] = gl["JournalLib"].str.replace("Bank of America Merill Lynch-T&E statement","Déclaration de <NAME>")
gl['JournalLib'] = gl["JournalLib"].str.replace("English Translation", "Traduction anglaise")
gl['JournalLib'] = gl["JournalLib"].str.replace("Office Rent", "Location de Bureau")
gl['JournalLib'] = gl["JournalLib"].str.replace("Annual Electrical Verification", "Vérification électrique annuelle ")
gl['JournalLib'] = gl["JournalLib"].str.replace("Health costs ", "Coûts santé")
gl['JournalLib'] = gl["JournalLib"].str.replace("Unlimited-receipt and policy audit", "Vérification illimitée des reçus et audites")
gl['JournalLib'] = gl["JournalLib"].str.replace("Water fountain ", "Fontaine d'eau")
gl['JournalLib'] = gl["JournalLib"].str.replace("Quartely control visit", "Visite de contrôle trimestrielle")
gl['JournalLib'] = gl["JournalLib"].str.replace("Fire extinguishers annual check", "Vérification annuelle des extincteurs")
gl['JournalLib'] = gl["JournalLib"].str.replace("showroom rent", "location de salle d'exposition")
gl['JournalLib'] = gl["JournalLib"].str.replace("AND ACTUAL RECEIV","ET RECETTES RÉELLES")
gl['JournalLib'] = gl["JournalLib"].str.replace("FILING","DÉPÔT")
gl['JournalLib'] = gl["JournalLib"].str.replace("ORDERS","ORDRES")
gl['JournalLib'] = gl["JournalLib"].str.replace("EXCLUDED -DUMMY CREDIT","EXCLU")
gl['JournalLib'] = gl["JournalLib"].str.replace("RELARING TO","RELATIF À")
gl['JournalLib'] = gl["JournalLib"].str.replace("CLEAN UP-","APUREMENT-")
gl['JournalLib'] = gl["JournalLib"].str.replace("2ND INSTALLEMENT","2ème versement")
gl['JournalLib'] = gl["JournalLib"].str.replace("DOUBLE PAYMENT","DOUBLE PAIEMENT")
gl['JournalLib'] = gl["JournalLib"].str.replace("CLEAN UP-","APUREMENT-")
gl['JournalLib'] = gl["JournalLib"].str.replace("DUTIES","DROITS")
gl['JournalLib'] = gl["JournalLib"].str.replace("Previous balance","Solde Précédent")
gl['JournalLib'] = gl["JournalLib"].str.replace("Cash fx","Cash FX")
gl['JournalLib'] = gl["JournalLib"].str.replace("PAYROLL INCOME","REVENU DE PAIE")
gl['JournalLib'] = gl["JournalLib"].str.replace("TELEPHONE CHARGES","Frais de Téléphone")
gl['JournalLib'] = gl["JournalLib"].str.replace("Clearing","Compensation")
gl['JournalLib'] = gl["JournalLib"].str.replace("Hotel","Hôtel")
gl['JournalLib'] = gl["JournalLib"].str.replace("Miscellaneous","Divers")
gl['JournalLib'] = gl["JournalLib"].str.replace("Corporate Card-Out-of-Poc","")
gl['JournalLib'] = gl["JournalLib"].str.replace("Traveling Dolby Empl","Employé itinérant de Dolby")
gl['JournalLib'] = gl["JournalLib"].str.replace("Tools-Equipment-Lab Supplies","Outils-Equipement-Fournitures de laboratoire")
gl['JournalLib'] = gl["JournalLib"].str.replace("rounding","Arrondissement")
gl['JournalLib'] = gl["JournalLib"].str.replace("Building Supplies-Maintenance","Matériaux de construction-Entretien")
gl['JournalLib'] = gl["JournalLib"].str.replace("Expensed Furniture","Mobilier Dépensé")
gl['JournalLib'] = gl["JournalLib"].str.replace("Credit for Charges","Crédit pour frais")
gl['JournalLib'] = gl["JournalLib"].str.replace("Manual P-ment and double payment to be deduct","P-mnt manuel et double paiement à déduire")
gl['JournalLib'] = gl["JournalLib"].str.replace("Employee insurance travel","Assurance de voyage des employés 2019")
gl['JournalLib'] = gl["JournalLib"].str.replace("Rent ","Location ")
gl['JournalLib'] = gl["JournalLib"].str.replace("Lunch vouchers ","Bons déjeuner")
gl['JournalLib'] = gl["JournalLib"].str.replace("Store Room ","Chambre Stocke")
gl['JournalLib'] = gl["JournalLib"].str.replace("Evaluation ","Évaluation ")
gl['JournalLib'] = gl["JournalLib"].str.replace("Charges ","Frais ")
gl['JournalLib'] = gl["JournalLib"].str.replace("On Line ","En ligne ")
gl['JournalLib'] = gl["JournalLib"].str.replace("/Building Supplies/Maintenance","/ Matériaux de construction / Entretien")
gl['JournalLib'] = gl["JournalLib"].str.replace("Music Instruments","Instruments Musicales")
gl['JournalLib'] = gl["JournalLib"].str.replace("/Employee Awards/Recognition", "/ Récompenses des employés / Reconnaissance")
gl['JournalLib'] = gl["JournalLib"].str.replace("/Daily Allowance","/Indemnité journalière")
gl['JournalLib'] = gl["JournalLib"].str.replace("RECLASS ", "RECLASSIFICATION ")
gl['JournalLib'] = gl["JournalLib"].str.replace("Purchase Accounting", "Comptabilité d'achat")
gl['JournalLib'] = gl["JournalLib"].str.replace( "EXPAT ", " Expatrié ")
gl['JournalLib'] = gl["JournalLib"].str.replace("FROM ", "DE ")
gl['JournalLib'] = gl["JournalLib"].str.replace("INVOICE", "FACTURE")
gl['JournalLib'] = gl["JournalLib"].str.replace("CLEANUP", "APUREMENT")
gl['JournalLib'] = gl["JournalLib"].str.replace("Repayment", "Restitution")
gl['JournalLib'] = gl["JournalLib"].str.replace("Office Furniture", "Meubles de bureau")
gl['JournalLib'] = gl["JournalLib"].str.replace("anti-stress treatments", "traitements anti-stress")
gl['JournalLib'] = gl["JournalLib"].str.replace("UK Tax Return", "Décl. d'impôt Royaume-Uni")
gl['JournalLib'] = gl["JournalLib"].str.replace("Office Location", "Location de bureau")
gl['JournalLib'] = gl["JournalLib"].str.replace("Deliver Service", "Service de livraison")
gl['JournalLib'] = gl["JournalLib"].str.replace("Foreign Office Support", "Soutien aux bureaux étrangères")
gl['JournalLib'] = gl["JournalLib"].str.replace("Showroom", "Salle d'exposition")
gl['JournalLib'] = gl["JournalLib"].str.replace("aditional Services", "Services supplémentaires ")
gl['JournalLib'] = gl["JournalLib"].str.replace("Cofee consumption Paris office", "Consommation de café Bureau de Paris")
gl['JournalLib'] = gl["JournalLib"].str.replace("Consultant ", "Expert-conseil")
gl['JournalLib'] = gl["JournalLib"].str.replace("INVOICE", "FACTURE")
gl['JournalLib'] = gl["JournalLib"].str.replace("Rent-", "Location-")
gl['JournalLib'] = gl["JournalLib"].str.replace("Corporate", "Entreprise")
gl['JournalLib'] = gl["JournalLib"].str.replace("COST ", "COÛT ")
gl['JournalLib'] = gl["JournalLib"].str.replace("TRAINING", "Formation")
gl['JournalLib'] = gl["JournalLib"].str.replace("LIFE DISAB", "Invalidité")
gl['JournalLib'] = gl["JournalLib"].str.replace("INSU ", "ASSURANCE ")
gl['JournalLib'] = gl["JournalLib"].str.replace("PATENT AWARD", "BREVET")
gl['JournalLib'] = gl["JournalLib"].str.replace("EQUIVALENT POUR UNUSED VACATION POUR LEAVE", "CONGÉ DE VACANCES INUTILISÉS")
gl['JournalLib'] = gl["JournalLib"].str.replace("SPOT ", "")
gl['JournalLib'] = gl["JournalLib"].str.replace("AIRFARE TRANSFER TO PREPAIDS", "TRANSFERT DE TRANSPORT AÉRIEN À PAYÉ D'AVANCE")
gl['JournalLib'] = gl["JournalLib"].str.replace("WITHHOLDING", "RETRAIT")
gl['JournalLib'] = gl["JournalLib"].str.replace("Clear ", "Reglement ")
gl['JournalLib'] = gl["JournalLib"].str.replace("Clear ", "Reglement ")
gl['JournalLib'] = gl["JournalLib"].str.replace("Rent/", "Location/")
gl['JournalLib'] = gl["JournalLib"].str.replace("Pay ", "Paiement ")
gl['JournalLib'] = gl["JournalLib"].str.replace("PAYMENT", "Paiement ")
gl['JournalLib'] = gl["JournalLib"].str.replace("French Income Tax Return;", "Déclaration de revenus française;")
gl['JournalLib'] = gl["JournalLib"].str.replace("REVESERVICES", "SERVICES")
gl['JournalLib'] = gl["JournalLib"].str.replace("INCLUDED DOUBLE", "DOUBLE INCLUS")
gl['JournalLib'] = gl["JournalLib"].str.replace("Bank", "Banque")
gl['JournalLib'] = gl["JournalLib"].str.replace("/Promotional Expenses", "/Frais de promotion")
gl['JournalLib'] = gl["JournalLib"].str.replace(" ACTIVITY ", " activité ")
gl['JournalLib'] = gl["JournalLib"].str.replace(" DEFINED BENEFIT LIABILITY", "PASSIF À AVANTAGES DÉTERMINÉES")
gl['JournalLib'] = gl["JournalLib"].str.replace("COÛT PLUS ", "Revient Majoré")
gl['JournalLib'] = gl["JournalLib"].str.replace("/Airline Frais", "/Tarifs aériens")
gl['JournalLib'] = gl["JournalLib"].str.replace("/Tools/Equipment/Lab Supplies", "/Outils / Équipement / Fournitures de laboratoire")
gl['JournalLib'] = gl["JournalLib"].str.replace("Rent/", "Location/")
gl['JournalLib'] = gl["JournalLib"].str.replace("Payment Posting", "Paiements")
gl['JournalLib'] = gl["JournalLib"].str.replace("COMMISSION D’ACCUMULATION", "ACCUMULATIONS DE COMISSIONS")
gl['JournalLib'] = gl["JournalLib"].str.replace("ImpôtE", "Impôt")
gl['JournalLib'] = gl["JournalLib"].str.replace("MED.INSU", "MED.ASSURANCE")
gl['JournalLib'] = gl["JournalLib"].str.replace("APPRENTICESHIP_CONTRIBUTIONS_TRUE_UP", "CONTRIBUTIONS À L'APPRENTISSAGE/TRUE UP")
gl['JournalLib'] = gl["JournalLib"].str.replace("NET PAY", "SALAIRE NET")
gl['JournalLib'] = gl["JournalLib"].str.replace("CASH ", "ARGENT ")
gl['JournalLib'] = gl["JournalLib"].str.replace("Repayment ", "Repaiement ")
gl['JournalLib'] = gl["JournalLib"].str.replace("Acct. ", "Comptab. ")
gl['JournalLib'] = gl["JournalLib"].str.replace("ACCR ", "ACC ")
gl['JournalLib'] = gl["JournalLib"].str.replace("Accr ", "Acc.")
gl['JournalLib'] = gl["JournalLib"].str.replace("Cash Balance", "Solde de caisse")
gl['JournalLib'] = gl["JournalLib"].str.replace("RECLASS ", "RECLASSEMENT ")
gl['JournalLib'] = gl["JournalLib"].str.replace("VAT FILING ", "Dépôt de TVA ")
gl['JournalLib'] = gl["JournalLib"].str.replace("Needs to be re-booked due", "KI")
gl['JournalLib'] = gl["JournalLib"].str.replace("reclass from", "reclasser de")
gl['JournalLib'] = gl["JournalLib"].str.replace("RECLASS FROM", "reclasser de")
gl['JournalLib'] = gl["JournalLib"].str.replace("PAYROLL", "PAIE")
gl['JournalLib'] = gl["JournalLib"].str.replace("RECLASS ", "Reclasser")
gl['JournalLib'] = gl["JournalLib"].str.replace("DEDICTION","DEDUCTION")
gl['JournalLib'] = gl["JournalLib"].str.replace("Cash","Argent ")
gl['JournalLib'] = gl["JournalLib"].str.replace("cash ","argent ")
gl['JournalLib'] = gl["JournalLib"].str.replace("ReclasserIFICATIO","RECLASSEMENT ")
gl['JournalLib'] = gl["JournalLib"].str.replace("ImpôtS ","Impôts ")
gl['JournalLib'] = gl["JournalLib"].str.replace("Working Repas (Employees Only) ","Repas de travail (employés seulement) ")
gl['JournalLib'] = gl["JournalLib"].str.replace("/Banque Frais","/Frais Bancaires")
gl['JournalLib'] = gl["JournalLib"].str.replace("MED. INS.","ASSURANCE MED.")
gl['JournalLib'] = gl["JournalLib"].str.replace("AJE WIRE LOG TRAN","AJE VERSEMENT")
gl['JournalLib'] = gl["JournalLib"].str.replace("JUN'","JUIN'")
gl['JournalLib'] = gl["JournalLib"].str.replace("Deferred Rent18 rue de Lo","Loyer différé 18 Rue de Lo")
gl['JournalLib'] = gl["JournalLib"].str.replace("Facture - Brut'","Facture - Brute")
gl['JournalLib'] = gl["JournalLib"].str.replace("T&E","VD")
gl['JournalLib'] = gl["JournalLib"].str.replace("/","")
gl['JournalLib'] = gl["JournalLib"].str.replace("Inv","Facture")
gl['JournalLib'] = gl["JournalLib"].str.replace("2019`","2019")
gl['JournalLib'] = gl["JournalLib"].str.replace("-2014V","")
mapping_Valuation1 = {" Valuation on": " Évaluation sur"," Valuation on Reverse":" Évaluation sur Contre Passation",
" Reverse Posting":" Contre-Passation d'Ecriture - Conversion de devise sur",
" Translation Using":" Conversion de devise sur"}
mapping_AA1 = {"Reclass from": " Reclassification de", "reclass from": " Reclassification de", "ZEE MEDIA":"ZEE MEDIA Campaignes Numériques", "TRAINING CONTRI. ER JANUARY '19":"FORMATION CONTRI. ER JANVIER' 19",
"TAX FEES":"Taxes","SOCIAL SECURITY: URSSAF":"SÉCURITÉ SOCIALE: URSSAF","SOCIAL SECURITY: TRAINING CONTRIBUTIONS":"SÉCURITÉ SOCIALE: CONTRIBUTIONS À LA FORMATION",
"SOCIAL SECURITY: APPRENTICESHIP CONTRIBU":"SÉCURITÉ SOCIALE: CONTRIBUTION À L’APPRENTISSAGE","RSM":"SERVICES DE PAIE RSM EF18","RSA":"SERVICES DE PAIE RSA OCT-JAN",
"PRIVATE HEALTH":"SANTÉ PRIVÉE: ASSURANCE MÉDICALE-AXA/","PENSION: PENSION CONTRIBUTIONS - REUNICA":"PENSION: COTISATIONS DE RETRAITE-REUNICA","PENSION: LIFE & DISABILITY INSURANCE - R":"PENSION: ASSURANCE VIE & INVALIDITÉ-R",
"PENSION JANUARY '19":"PENSION JANVIER '19",
"ON CALL JANUARY '19":"Disponible Janvier'19",
"NRE + PROJECT INITIATION FEES":"NRE + FRAIS D’INITIATION AU PROJET (PO 750003","NET PAY JANUARY '19":"Payeante Janvier'19","JANUARY'19":"JANVIER'19",
"LUNCH VOUCHER- WITHHOLDING":"BON DÉJEUNER-RETENUE","HOLIDAY BONUS ACCRUAL FY18/19":"CUMUL DES PRIMES DE VACANCES EF18/19",
"GROSS SALARY JANUARY '19":"SALAIRE BRUT JANVIER' 19","EMEA ACCRUAL P8FY19":"P8FY19 D’ACCUMULATION EMEA","COMMISSION RE-ACCRUAL":"COMMISSION RÉ-ACCUMULATION",
"COMMISSION ACCRUAL":"COMMISSION D’ACCUMULATION","MARCH":"MARS","MAY":"MAI","APRIL":"AVRIL","AUDIT FEES":"HONORAIRES D’AUDIT",
"UNSUBMITTED_UNPOSTED BOA ACCRUAL":"Accumulation BOA non soumise non exposée","UNASSIGNED CREDITCARD BOA ACCRUAL":"NON ASSIGNÉ CREDITCARD BOA ACCUMULATION ",
"EMEA ACCRUAL":"ACCUMULATION EMEA","Exhibit Expenses":"Frais d'exposition","Hotel Tax":"Taxe hôtelière","Company Events":"Événements d'entreprise",
"Public Transport":"Transport public", "Agency Booking Fees":"Frais de réservation d'agence","Working Meals (Employees Only)":"Repas de travail (employés seulement)",
"Airfare":"Billet d'avion","Office Supplies":"Fournitures de bureau","Tolls":"Péages",
"write off difference see e-mail attached":"radiation de la différence voir e-mail ci-joint",
"Manual P/ment and double payment to be deduct":"P/ment manuel et double paiement à déduire","FX DIFFERENCE ON RSU":"DIFFERENCE FX SUR RSU",
"DEFINED BENEFIT LIABILITY-TRUE UP":"RESPONSABILITÉ À PRESTATIONS DÉTERMINÉES-TRUE UP","EXTRA RELEASE FOR STORAGE REVERSED":"EXTRA LIBERATION POUR STOCKAGE CONTREPASSATION",
"RECLASS BANK CHARGES TO CORRECT COST CEN":"RECLASSER LES FRAIS BANCAIRES POUR CORRIGER","PAYROLL INCOME TAXES":"IMPÔTS SUR LES SALAIRES",
"TRAINING TAX TRUE UP":"TAXE DE FORMATION", "FX DIFFERENCE ON STOCK OPTION EXERCISES":"FX DIFFERENCE SUR LES EXERCICES D'OPTIONS STOCK",
"Airline Frais":"Frais de Transport Aérien","Agency Booking Fees":"Frais de Réservation d'Agence","Computer Supplies":"Fournitures informatiques",
"AUDIT FEES":"FRAIS D'AUDIT", "HOLIDAY BONUS ACCRUAL ":"ACCUMULATION DE BONUS DE VACANCES","TAX FEES":"FRAIS D'IMPÔT",
"SOCIAL SECURITY: APPRENTICESHIP CONTRIBU":"SÉCURITÉ SOCIALE: CONTRIBUITION À L’APPRENTISSAGE",
"SOCIAL SECURITY: TRAINING CONTRIBUTIONS":"SÉCURITÉ SOCIALE: CONTRIBUTIONS À LA FORMATION", "TRAVEL COST":"FRAIS DE VOYAGE", "HOUSING TAX":"TAXE SUR LE LOGEMENT",
"PAYROLL INCOME TAXES":"IMPÔTS SUR LE REVENU DE LA PAIE","INCOME TAX-PAS":"IMPÔT SUR LE REVENU-PAS", "IC SETTLEMENT":"Règlement Interentreprises",
"VACATION TAKEN":"VACANCES PRISES", "SOCIAL SECURITY: APPR. CONTR.":"SÉCURITÉ SOCIALE: CONTRIBUTION À L’APPRENTISSAGE",
"POST OF AVRIL DEC IN CORRECT SIGN":"CORRECTION D'ECRITURE AVRIL DEC"}
gl = gl.replace({"PieceRef":mapping_Valuation1}, regex=True)
gl = gl.replace({"PieceRef":mapping_AA1}, regex=True)
gl['PieceRef'] = gl["PieceRef"].str.replace('COST-PLUS', 'Revient Majoré')
gl['PieceRef'] = gl["PieceRef"].str.replace('PRITVAE HEALTH: MEDICAL INSURANCE', 'SANTÉ PRIVÉE: ASSURANCE MÉDICALE')
gl['PieceRef'] = gl["PieceRef"].str.replace('MEDICAL INSURANCE', 'ASSURANCE MÉDICALE')
gl['PieceRef'] = gl["PieceRef"].str.replace('UNASSIGNED', 'NON ATTRIBUÉ')
gl['PieceRef'] = gl["PieceRef"].str.replace('Payout', 'Paiement')
gl['PieceRef'] = gl["PieceRef"].str.replace('FRINGE COST', 'COÛT MARGINAL')
gl['PieceRef'] = gl["PieceRef"].str.replace('PROJECT INITIATION', 'LANCEMENT DU PROJET')
gl['PieceRef'] = gl["PieceRef"].str.replace('ACCRUAL', 'ACCUMULATION')
gl['PieceRef'] = gl["PieceRef"].str.replace('CREDITCARD', 'CARTE DE CRÉDIT')
gl['PieceRef'] = gl["PieceRef"].str.replace('ACCR ', 'ACCUM ')
gl['PieceRef'] = gl["PieceRef"].str.replace('VAT ', 'TVA ')
gl['PieceRef'] = gl["PieceRef"].str.replace('SOCIAL SECURITY ', 'SÉCURITÉ SOCIALE')
gl['PieceRef'] = gl["PieceRef"].str.replace('SEPTEMBER', 'SEPT')
gl['PieceRef'] = gl["PieceRef"].str.replace('TAXBACK', 'Reboursement')
gl['PieceRef'] = gl["PieceRef"].str.replace('REPORT', '')
gl['PieceRef'] = gl["PieceRef"].str.replace("Reverse Posting", "Contre Passation d'Ecriture")
gl['PieceRef'] = gl["PieceRef"].str.replace("BASE RENT", "Location Base")
gl['PieceRef'] = gl["PieceRef"].str.replace("Rent ", "Location ")
gl['PieceRef'] = gl["PieceRef"].str.replace("RENT ", "Location ")
gl['PieceRef'] = gl["PieceRef"].str.replace("CLEARING", "compensation ")
gl['PieceRef'] = gl["PieceRef"].str.replace("clearing", "compensation ")
gl['PieceRef'] = gl["PieceRef"].str.replace("BILLING CHARGES", "FRAIS DE FACTURATION ")
gl['PieceRef'] = gl["PieceRef"].str.replace("UNPAID", "NON PAYÉ")
gl['PieceRef'] = gl["PieceRef"].str.replace("PROPERTY TAX", "IMPÔT FONCIER ")
gl['PieceRef'] = gl["PieceRef"].str.replace("Trans. Using", "Conversion sur")
gl['PieceRef'] = gl["PieceRef"].str.replace("SALARIES", "Salaires")
gl['PieceRef'] = gl["PieceRef"].str.replace("Refund", "Remboursement")
gl['PieceRef'] = gl["PieceRef"].str.replace("REFUND", "Remboursement")
gl['PieceRef'] = gl["PieceRef"].str.replace("no invoice", "pas de facture")
gl['PieceRef'] = gl["PieceRef"].str.replace("COST-PLUS SERVICE REVENUE", "Revenus de service Revient Majoré")
gl['PieceRef'] = gl["PieceRef"].str.replace("SETTLEMENT", "RÈGLEMENT ")
gl['PieceRef'] = gl["PieceRef"].str.replace("PURCHASE", "ACHAT")
gl['PieceRef'] = gl["PieceRef"].str.replace("NON-CP SETTLE", "RÈGLEMENT NON-CP")
gl['PieceRef'] = gl["PieceRef"].str.replace("PAID ", " Payé ")
gl['PieceRef'] = gl["PieceRef"].str.replace("FEES ", "Frais")
gl['PieceRef'] = gl["PieceRef"].str.replace("January", "Janvier")
gl['PieceRef'] = gl["PieceRef"].str.replace("February", "Février")
gl['PieceRef'] = gl["PieceRef"].str.replace("March", "Mars")
gl['PieceRef'] = gl["PieceRef"].str.replace("April", "Avril")
gl['PieceRef'] = gl["PieceRef"].str.replace("May", "Mai")
gl['PieceRef'] = gl["PieceRef"].str.replace("June", "Juin")
gl['PieceRef'] = gl["PieceRef"].str.replace("July", "Juillet")
gl['PieceRef'] = gl["PieceRef"].str.replace("September", "Septembre")
gl['PieceRef'] = gl["PieceRef"].str.replace("Aug.", "Août")
gl['PieceRef'] = gl["PieceRef"].str.replace("JANUARY", "Janvier")
gl['PieceRef'] = gl["PieceRef"].str.replace("FEBRUARY", "Février")
gl['PieceRef'] = gl["PieceRef"].str.replace("MARCH", "Mars")
gl['PieceRef'] = gl["PieceRef"].str.replace("APRIL", "Avril")
gl['PieceRef'] = gl["PieceRef"].str.replace("MAY", "Mai")
gl['PieceRef'] = gl["PieceRef"].str.replace("JUNE", "Juin")
gl['PieceRef'] = gl["PieceRef"].str.replace("JULY", "Juillet")
gl['PieceRef'] = gl["PieceRef"].str.replace("SEPTEMBER", "Septembre")
gl['PieceRef'] = gl["PieceRef"].str.replace("AUGUST.", "Août")
gl['PieceRef'] = gl["PieceRef"].str.replace("NOVEMBER.", "Novembre")
gl['PieceRef'] = gl["PieceRef"].str.replace("DECEMBER.", "Décembre")
gl['PieceRef'] = gl["PieceRef"].str.replace("December", "Décembre")
gl['PieceRef'] = gl["PieceRef"].str.replace("Feb.", "Fév.")
gl['PieceRef'] = gl["PieceRef"].str.replace("Mar.", "Mars")
gl['PieceRef'] = gl["PieceRef"].str.replace("Apr.", "Avril")
gl['PieceRef'] = gl["PieceRef"].str.replace("Aug.", "Août")
gl['PieceRef'] = gl["PieceRef"].str.replace("Aug.", "Août")
gl['PieceRef'] = gl["PieceRef"].str.replace("Reverse ", "Contre-passation ")
gl['PieceRef'] = gl["PieceRef"].str.replace("INTEREST CHARGE", "CHARGE D'INTÉRÊT")
gl['PieceRef'] = gl["PieceRef"].str.replace("-SICK LEAVE PAY", "-Paiement congé maladie")
gl['PieceRef'] = gl["PieceRef"].str.replace("RECLASSEMENTIFICATION", "RECLASSIFICATION")
gl['PieceRef'] = gl["PieceRef"].str.replace("INSTALMENT", "VERSEMENT")
gl['PieceRef'] = gl["PieceRef"].str.replace("FIRST", "1ere")
gl['PieceRef'] = gl["PieceRef"].str.replace("FINE LATE PAY.", "Amende pour retard de paiement")
gl['PieceRef'] = gl["PieceRef"].str.replace("-PATERNITY PAY", "Indemnités de paternité")
gl['PieceRef'] = gl["PieceRef"].str.replace("SOCIAL SECURITY:", "SÉCURITÉ SOCIALE:")
gl['PieceRef'] = gl["PieceRef"].str.replace("Trip from", "Voyage de:")
gl['PieceRef'] = gl["PieceRef"].str.replace(" To ", " à")
gl['PieceRef'] = gl["PieceRef"].str.replace("Shipping", "Livraison")
gl['PieceRef'] = gl["PieceRef"].str.replace("VOXEET INTEGRATION COSTS", "COÛTS D'INTÉGRATION DE VOXEET")
gl['PieceRef'] = gl["PieceRef"].str.replace("INCOME TAX", "IMPÔT SUR LE REVENU")
gl['PieceRef'] = gl["PieceRef"].str.replace('Rideshare', 'Covoiturage')
gl['PieceRef'] = gl["PieceRef"].str.replace('Travel Meals', 'Repas de Travail')
gl['PieceRef'] = gl["PieceRef"].str.replace('Fees', 'Frais')
gl['PieceRef'] = gl["PieceRef"].str.replace('Phone', 'Téléphone')
gl['PieceRef'] = gl["PieceRef"].str.replace("Books", "Abonnements")
gl['PieceRef'] = gl["PieceRef"].str.replace("Subcriptions", "Location Base")
gl['PieceRef'] = gl["PieceRef"].str.replace("Meals", "Repas")
gl['PieceRef'] = gl["PieceRef"].str.replace("Entertainment", "divertissement ")
gl['PieceRef'] = gl["PieceRef"].str.replace("Third Party", "tiers ")
gl['PieceRef'] = gl["PieceRef"].str.replace("Training Fees", "Frais d0 Formation")
gl['PieceRef'] = gl["PieceRef"].str.replace("Conferences/Tradeshows Registratio", "Conférences/Tradeshows Enregistrement")
gl['PieceRef'] = gl["PieceRef"].str.replace("FOR", "POUR")
gl['PieceRef'] = gl["PieceRef"].str.replace("ROUNDING", "ARRONDISSEMENT")
gl['PieceRef'] = gl["PieceRef"].str.replace("STORAGE", "STOCKAGE")
gl['PieceRef'] = gl["PieceRef"].str.replace("VACATION ACCURAL", "Vacances Accumulées")
gl['PieceRef'] = gl["PieceRef"].str.replace("RECEIVABLE ", "Recevables")
gl['PieceRef'] = gl["PieceRef"].str.replace("AFTER PAYOUT ", "APRÈS PAIEMENT")
gl['PieceRef'] = gl["PieceRef"].str.replace("CLEAN UP ", "APUREMENT")
gl['PieceRef'] = gl["PieceRef"].str.replace("EMPLOYEE TRAVEL INSUR ", "ASSURANCE DE VOYAGE DES EMPLOYÉS")
gl['PieceRef'] = gl["PieceRef"].str.replace("CORRECTION OF", "CORRECTION DE")
gl['PieceRef'] = gl["PieceRef"].str.replace("TAXES PAYROLL", "IMPÔTS SUR LA MASSE SALARIALE")
gl['PieceRef'] = gl["PieceRef"].str.replace("ACCOUNT", "COMPTE")
gl['PieceRef'] = gl["PieceRef"].str.replace("TAX", "Impôt")
gl['PieceRef'] = gl["PieceRef"].str.replace("life disab", "Incapacité de vie")
gl['PieceRef'] = gl["PieceRef"].str.replace("HOUSING TAX","TAXE D'HABITATION")
gl['PieceRef'] = gl["PieceRef"].str.replace("GROSS SALARY","SALAIRE BRUT")
gl['PieceRef'] = gl["PieceRef"].str.replace("Cleaning Services","Nettoyage")
gl['PieceRef'] = gl["PieceRef"].str.replace("Freight","Fret")
gl['PieceRef'] = gl["PieceRef"].str.replace("Membership","adhésion")
gl['PieceRef'] = gl["PieceRef"].str.replace("Air cooling Maintenance","Entretien de refroidissement de l'air")
gl['PieceRef'] = gl["PieceRef"].str.replace("Power on Demand Platform","Plateforme d'energie à la demande")
gl['PieceRef'] = gl["PieceRef"].str.replace("Sanitaire room installation"," Installation de la salle sanitaire")
gl['PieceRef'] = gl["PieceRef"].str.replace("subscription","abonnement")
gl['PieceRef'] = gl["PieceRef"].str.replace("Coffee supplies "," Fournitures de café")
gl['PieceRef'] = gl["PieceRef"].str.replace("Duty and Tax ","Devoir et fiscalité")
gl['PieceRef'] = gl["PieceRef"].str.replace("Electricity ","Electricité ")
gl['PieceRef'] = gl["PieceRef"].str.replace("Lunch vouchers ","Bons déjeuner")
gl['PieceRef'] = gl["PieceRef"].str.replace("Security monitoring","Surveillance de la sécurité")
gl['PieceRef'] = gl["PieceRef"].str.replace("Water", "L'EAU")
gl['PieceRef'] = gl["PieceRef"].str.replace("Statutory Audit", "Audit statutaire")
gl['PieceRef'] = gl["PieceRef"].str.replace(" Meeting room screen installation", "Installation de l'écran de la salle de réunion")
gl['PieceRef'] = gl["PieceRef"].str.replace("Water", "L'EAU")
gl['PieceRef'] = gl["PieceRef"].str.replace("Water", "L'EAU")
gl['PieceRef'] = gl["PieceRef"].str.replace("Tax Credit FY 2016", "Crédit d'impôt Exercice 2016")
gl['PieceRef'] = gl["PieceRef"].str.replace("Bank of America Merill Lynch-T&E statement","Déclaration de Merill Lynch")
gl['PieceRef'] = gl["PieceRef"].str.replace("English Translation", "Traduction anglaise")
gl['PieceRef'] = gl["PieceRef"].str.replace("Office Rent", "Location de Bureau")
gl['PieceRef'] = gl["PieceRef"].str.replace("Annual Electrical Verification", "Vérification électrique annuelle ")
gl['PieceRef'] = gl["PieceRef"].str.replace("Health costs ", "Coûts santé")
gl['PieceRef'] = gl["PieceRef"].str.replace("Unlimited-receipt and policy audit", "Vérification illimitée des reçus et audites")
gl['PieceRef'] = gl["PieceRef"].str.replace("Water fountain ", "Fontaine d'eau")
gl['PieceRef'] = gl["PieceRef"].str.replace("Quartely control visit", "Visite de contrôle trimestrielle")
gl['PieceRef'] = gl["PieceRef"].str.replace("Fire extinguishers annual check", "Vérification annuelle des extincteurs")
gl['PieceRef'] = gl["PieceRef"].str.replace("showroom rent", "location de salle d'exposition")
gl['PieceRef'] = gl["PieceRef"].str.replace("AND ACTUAL RECEIV","ET RECETTES RÉELLES")
gl['PieceRef'] = gl["PieceRef"].str.replace("FILING","DÉPÔT")
gl['PieceRef'] = gl["PieceRef"].str.replace("ORDERS","ORDRES")
gl['PieceRef'] = gl["PieceRef"].str.replace("EXCLUDED -DUMMY CREDIT","EXCLU")
gl['PieceRef'] = gl["PieceRef"].str.replace("RELARING TO","RELATIF À")
gl['PieceRef'] = gl["PieceRef"].str.replace("CLEAN UP-","APUREMENT-")
gl['PieceRef'] = gl["PieceRef"].str.replace("2ND INSTALLEMENT","2ème versement")
gl['PieceRef'] = gl["PieceRef"].str.replace("DOUBLE PAYMENT","DOUBLE PAIEMENT")
gl['PieceRef'] = gl["PieceRef"].str.replace("CLEAN UP-","APUREMENT-")
gl['PieceRef'] = gl["PieceRef"].str.replace("DUTIES","DROITS")
gl['PieceRef'] = gl["PieceRef"].str.replace("Previous balance","Solde Précédent")
gl['PieceRef'] = gl["PieceRef"].str.replace("Cash fx","Cash FX")
gl['PieceRef'] = gl["PieceRef"].str.replace("PAYROLL INCOME","REVENU DE PAIE")
gl['PieceRef'] = gl["PieceRef"].str.replace("TELEPHONE CHARGES","Frais de Téléphone")
gl['PieceRef'] = gl["PieceRef"].str.replace("Clearing","Compensation")
gl['PieceRef'] = gl["PieceRef"].str.replace("Hotel","Hôtel")
gl['PieceRef'] = gl["PieceRef"].str.replace("Miscellaneous","Divers")
gl['PieceRef'] = gl["PieceRef"].str.replace("Corporate Card-Out-of-Poc","")
gl['PieceRef'] = gl["PieceRef"].str.replace("Traveling Dolby Empl","Employé itinérant de Dolby")
gl['PieceRef'] = gl["PieceRef"].str.replace("Tools-Equipment-Lab Supplies","Outils-Equipement-Fournitures de laboratoire")
gl['PieceRef'] = gl["PieceRef"].str.replace("rounding","Arrondissement")
gl['PieceRef'] = gl["PieceRef"].str.replace("Building Supplies-Maintenance","Matériaux de construction-Entretien")
gl['PieceRef'] = gl["PieceRef"].str.replace("Expensed Furniture","Mobilier Dépensé")
gl['PieceRef'] = gl["PieceRef"].str.replace("Credit for Charges","Crédit pour frais")
gl['PieceRef'] = gl["PieceRef"].str.replace("Manual P-ment and double payment to be deduct","P-mnt manuel et double paiement à déduire")
gl['PieceRef'] = gl["PieceRef"].str.replace("Employee insurance travel","Assurance de voyage des employés 2019")
gl['PieceRef'] = gl["PieceRef"].str.replace("Rent ","Location ")
gl['PieceRef'] = gl["PieceRef"].str.replace("Lunch vouchers ","Bons déjeuner")
gl['PieceRef'] = gl["PieceRef"].str.replace("Store Room ","Chambre Stocke")
gl['PieceRef'] = gl["PieceRef"].str.replace("Evaluation ","Évaluation ")
gl['PieceRef'] = gl["PieceRef"].str.replace("Charges ","Frais ")
gl['PieceRef'] = gl["PieceRef"].str.replace("On Line ","En ligne ")
gl['PieceRef'] = gl["PieceRef"].str.replace("/Building Supplies/Maintenance","/ Matériaux de construction / Entretien")
gl['PieceRef'] = gl["PieceRef"].str.replace("Music Instruments","Instruments Musicales")
gl['PieceRef'] = gl["PieceRef"].str.replace("/Employee Awards/Recognition", "/ Récompenses des employés / Reconnaissance")
gl['PieceRef'] = gl["PieceRef"].str.replace("/Daily Allowance","/Indemnité journalière")
gl['PieceRef'] = gl["PieceRef"].str.replace("RECLASS ", "RECLASSIFICATION ")
gl['PieceRef'] = gl["PieceRef"].str.replace("Purchase Accounting", "Comptabilité d'achat")
gl['PieceRef'] = gl["PieceRef"].str.replace( "EXPAT ", " Expatrié ")
gl['PieceRef'] = gl["PieceRef"].str.replace("FROM ", "DE ")
gl['PieceRef'] = gl["PieceRef"].str.replace("INVOICE", "FACTURE")
gl['PieceRef'] = gl["PieceRef"].str.replace("CLEANUP", "APUREMENT")
gl['PieceRef'] = gl["PieceRef"].str.replace("Repayment", "Restitution")
gl['PieceRef'] = gl["PieceRef"].str.replace("Office Furniture", "Meubles de bureau")
gl['PieceRef'] = gl["PieceRef"].str.replace("anti-stress treatments", "traitements anti-stress")
gl['PieceRef'] = gl["PieceRef"].str.replace("UK Tax Return", "Décl. d'impôt Royaume-Uni")
gl['PieceRef'] = gl["PieceRef"].str.replace("Office Location", "Location de bureau")
gl['PieceRef'] = gl["PieceRef"].str.replace("Deliver Service", "Service de livraison")
gl['PieceRef'] = gl["PieceRef"].str.replace("Foreign Office Support", "Soutien aux bureaux étrangères")
gl['PieceRef'] = gl["PieceRef"].str.replace("Showroom", "Salle d'exposition")
gl['PieceRef'] = gl["PieceRef"].str.replace("aditional Services", "Services supplémentaires ")
gl['PieceRef'] = gl["PieceRef"].str.replace("Cofee consumption Paris office", "Consommation de café Bureau de Paris")
gl['PieceRef'] = gl["PieceRef"].str.replace("Consultant ", "Expert-conseil")
gl['PieceRef'] = gl["PieceRef"].str.replace("INVOICE", "FACTURE")
gl['PieceRef'] = gl["PieceRef"].str.replace("Rent-", "Location-")
gl['PieceRef'] = gl["PieceRef"].str.replace("Corporate", "Entreprise")
gl['PieceRef'] = gl["PieceRef"].str.replace("COST ", "COÛT ")
gl['PieceRef'] = gl["PieceRef"].str.replace("TRAINING", "Formation")
gl['PieceRef'] = gl["PieceRef"].str.replace("LIFE DISAB", "Invalidité")
gl['PieceRef'] = gl["PieceRef"].str.replace("INSU ", "ASSURANCE ")
gl['PieceRef'] = gl["PieceRef"].str.replace("PATENT AWARD", "BREVET")
gl['PieceRef'] = gl["PieceRef"].str.replace("EQUIVALENT POUR UNUSED VACATION POUR LEAVE", "CONGÉ DE VACANCES INUTILISÉS")
gl['PieceRef'] = gl["PieceRef"].str.replace("SPOT ", "")
gl['PieceRef'] = gl["PieceRef"].str.replace("AIRFARE TRANSFER TO PREPAIDS", "TRANSFERT DE TRANSPORT AÉRIEN À PAYÉ D'AVANCE")
gl['PieceRef'] = gl["PieceRef"].str.replace("WITHHOLDING", "RETRAIT")
gl['PieceRef'] = gl["PieceRef"].str.replace("Clear ", "Reglement ")
gl['PieceRef'] = gl["PieceRef"].str.replace("Clear ", "Reglement ")
gl['PieceRef'] = gl["PieceRef"].str.replace("Rent/", "Location/")
gl['PieceRef'] = gl["PieceRef"].str.replace("Pay ", "Paiement ")
gl['PieceRef'] = gl["PieceRef"].str.replace("PAYMENT", "Paiement ")
gl['PieceRef'] = gl["PieceRef"].str.replace("French Income Tax Return;", "Déclaration de revenus française;")
gl['PieceRef'] = gl["PieceRef"].str.replace("REVESERVICES", "SERVICES")
gl['PieceRef'] = gl["PieceRef"].str.replace("INCLUDED DOUBLE", "DOUBLE INCLUS")
gl['PieceRef'] = gl["PieceRef"].str.replace("Bank", "Banque")
gl['PieceRef'] = gl["PieceRef"].str.replace("/Promotional Expenses", "/Frais de promotion")
gl['PieceRef'] = gl["PieceRef"].str.replace(" ACTIVITY ", " activité ")
gl['PieceRef'] = gl["PieceRef"].str.replace(" DEFINED BENEFIT LIABILITY", "PASSIF À AVANTAGES DÉTERMINÉES")
gl['PieceRef'] = gl["PieceRef"].str.replace("COÛT PLUS ", "Revient Majoré")
gl['PieceRef'] = gl["PieceRef"].str.replace("/Airline Frais", "/Tarifs aériens")
gl['PieceRef'] = gl["PieceRef"].str.replace("/Tools/Equipment/Lab Supplies", "/Outils / Équipement / Fournitures de laboratoire")
gl['PieceRef'] = gl["PieceRef"].str.replace("Rent/", "Location/")
gl['PieceRef'] = gl["PieceRef"].str.replace("Payment Posting", "Paiements")
gl['PieceRef'] = gl["PieceRef"].str.replace("COMMISSION D’ACCUMULATION", "ACCUMULATIONS DE COMISSIONS")
gl['PieceRef'] = gl["PieceRef"].str.replace("ImpôtE", "Impôt")
gl['PieceRef'] = gl["PieceRef"].str.replace("MED.INSU", "MED.ASSURANCE")
gl['PieceRef'] = gl["PieceRef"].str.replace("APPRENTICESHIP_CONTRIBUTIONS_TRUE_UP", "CONTRIBUTIONS À L'APPRENTISSAGE/TRUE UP")
gl['PieceRef'] = gl["PieceRef"].str.replace("NET PAY", "SALAIRE NET")
gl['PieceRef'] = gl["PieceRef"].str.replace("CASH ", "ARGENT ")
gl['PieceRef'] = gl["PieceRef"].str.replace("Repayment ", "Repaiement ")
gl['PieceRef'] = gl["PieceRef"].str.replace("Acct. ", "Comptab. ")
gl['PieceRef'] = gl["PieceRef"].str.replace("ACCR ", "ACC ")
gl['PieceRef'] = gl["PieceRef"].str.replace("Accr ", "Acc.")
gl['PieceRef'] = gl["PieceRef"].str.replace("Cash Balance", "Solde de caisse")
gl['PieceRef'] = gl["PieceRef"].str.replace("RECLASS ", "RECLASSEMENT ")
gl['PieceRef'] = gl["PieceRef"].str.replace("VAT FILING ", "Dépôt de TVA ")
gl['PieceRef'] = gl["PieceRef"].str.replace("Needs to be re-booked due", "KI")
gl['PieceRef'] = gl["PieceRef"].str.replace("reclass from", "reclasser de")
gl['PieceRef'] = gl["PieceRef"].str.replace("RECLASS FROM", "reclasser de")
gl['PieceRef'] = gl["PieceRef"].str.replace("PAYROLL", "PAIE")
gl['PieceRef'] = gl["PieceRef"].str.replace("RECLASS ", "Reclasser")
gl['PieceRef'] = gl["PieceRef"].str.replace("DEDICTION","DEDUCTION")
gl['PieceRef'] = gl["PieceRef"].str.replace("Cash","Argent ")
gl['PieceRef'] = gl["PieceRef"].str.replace("cash ","argent ")
gl['PieceRef'] = gl["PieceRef"].str.replace("ReclasserIFICATIO","RECLASSEMENT ")
gl['PieceRef'] = gl["PieceRef"].str.replace("ImpôtS ","Impôts ")
gl['PieceRef'] = gl["PieceRef"].str.replace("Working Repas (Employees Only) ","Repas de travail (employés seulement) ")
gl['PieceRef'] = gl["PieceRef"].str.replace("/Banque Frais","/Frais Bancaires")
gl['PieceRef'] = gl["PieceRef"].str.replace("MED. INS.","ASSURANCE MED.")
gl['PieceRef'] = gl["PieceRef"].str.replace("AJE WIRE LOG TRAN","AJE VERSEMENT")
gl['PieceRef'] = gl["PieceRef"].str.replace("JUN'","JUIN'")
gl['PieceRef'] = gl["PieceRef"].str.replace("Deferred Rent18 rue de Lo","Loyer différé 18 Rue de Lo")
gl['PieceRef'] = gl["PieceRef"].str.replace("Facture - Brut'","Facture - Brute")
gl['PieceRef'] = gl["PieceRef"].str.replace("T&E","VD")
gl['PieceRef'] = gl["PieceRef"].str.replace("/","")
gl['PieceRef'] = gl["PieceRef"].str.replace("Inv","Facture")
gl['PieceRef'] = gl["PieceRef"].str.replace("RECUR DEF RENT","LOCATION DIFFÉRÉE RECUR")
gl['PieceRef'] = gl["PieceRef"].str.replace(" NaT ","")
gl['JournalLib'] = gl["JournalLib"].str.replace(" NaT ","")
gl['EcritureLib'] = gl["EcritureLib"].str.replace(" NaT ","")
gl['PieceRef'] = gl["PieceRef"].str.replace(" NAN ","")
gl['JournalLib'] = gl["JournalLib"].str.replace(" NAN ","")
gl['EcritureLib'] = gl["EcritureLib"].str.replace(" NAN ","")
gl['PieceRef'] = gl["PieceRef"].str.replace(" nan ","")
gl['JournalLib'] = gl["JournalLib"].str.replace(" nan ","")
gl['EcritureLib'] = gl["EcritureLib"].str.replace(" nan ","")
gl['PieceRef'] = gl["PieceRef"].str.replace(" nannan ","")
gl['JournalLib'] = gl["JournalLib"].str.replace(" nannan ","")
gl['EcritureLib'] = gl["EcritureLib"].str.replace(" nannan ","")
gl.loc[gl["JournalLib"].str.isnumeric(),'JournalLib'] = gl['JournalCode']
gl['JournalLib'] = gl['JournalLib'].replace(codes)
gl['JournalLib'] = gl["JournalLib"].str.replace("-2014123456789","-2014V")
gl['JournalLib'] = gl["JournalLib"].str.replace("T/&E","VD")
gl['EcritureLib'] = gl["EcritureLib"].str.replace("T/&E","VD")
gl['DocDate'] = gl['Document Date']
gl.loc[gl["PieceRef"].isnull(),'PieceRef'] = gl["JournalLib"].map(str) + " " + gl.DocDate.dt.strftime('%Y%m%d').astype(str)
gl.loc[gl["EcritureLib"].str.isnumeric(),'EcritureLib'] = gl['JournalLib'].map(str) + gl['EcritureNum'].map(str)
gl['Document Date'] = gl['DocDate']
del gl['DocDate']
gl['EcritureLib'] = gl['EcritureLib'].apply(lambda x: x.upper())
gl['Credit'] = gl['Credit'].abs()
gl = gl.sort_values('EcritureNum')
return gl
def save_results(df, output):
del df['Amount in doc. curr.']
del df['Assignment']
del df['Document Date']
del df['Reference']
del df['Text']
del df['Posting Date']
del df['Document Number']
del df['Document Type']
del df['Document currency']
del df['G/L Account']
del df['Local Currency']
del df['Local currency 2']
del df['Offsetting acct no.']
writer = pd.ExcelWriter(output,
engine='xlsxwriter',
datetime_format='yyyymmdd',
date_format='yyyymmdd')
df.to_excel(writer, index = False,sheet_name = ('Sheet 1'), columns =['JournalCode','JournalLib','EcritureNum','EcritureDate','CompteNum',
'CompteLib','CompAuxNum','CompAuxLib','PieceRef','PieceDate','EcritureLib',
'Debit','Credit','EcritureLet','DateLet','ValidDate','MontantDevise','Idevise'])
workbook = writer.book
worksheet = writer.sheets['Sheet 1']
worksheet.set_column('A:AV', 40)
writer.save()
if __name__ == '__main__':
args = parse_args()
gl_items = args.GL
parked = args.Parked
output_file = args.Choose_File_Name
output_df = combine(gl_items,parked)
print("Reading data and combining with parked and deleted items")
print("Separating Debits and Credits")
print("Mapping Vendors")
output_df_transformed = transform(output_df)
output_df_translated = translate(output_df_transformed)
print("Translating to French")
print("Mapping French Accounts")
print("Filling in blanks")
save_results(output_df_translated,output_file)
z = output_df_translated['Debit'].sum(axis = 0,skipna = True)
y = output_df_translated['Credit'].sum(axis = 0, skipna = True)
h = z - y
if h != 0:
print("WARNING: Debits and Credits are not balanced!")
| [
"pandas.ExcelWriter",
"gooey.Gooey",
"gooey.GooeyParser",
"pandas.read_excel"
] | [((118, 254), 'gooey.Gooey', 'Gooey', ([], {'program_name': '"""FEC FILE FOR FRANCE"""', 'required_cols': '(4)', 'default_size': '(710, 700)', 'navigation': '"""TABBED"""', 'header_bg_color': '"""#48a7fa"""'}), "(program_name='FEC FILE FOR FRANCE', required_cols=4, default_size=(\n 710, 700), navigation='TABBED', header_bg_color='#48a7fa')\n", (123, 254), False, 'from gooey import Gooey, GooeyParser\n'), ((286, 299), 'gooey.GooeyParser', 'GooeyParser', ([], {}), '()\n', (297, 299), False, 'from gooey import Gooey, GooeyParser\n'), ((1519, 1538), 'pandas.read_excel', 'pd.read_excel', (['file'], {}), '(file)\n', (1532, 1538), True, 'import pandas as pd\n'), ((1556, 1576), 'pandas.read_excel', 'pd.read_excel', (['file2'], {}), '(file2)\n', (1569, 1576), True, 'import pandas as pd\n'), ((2902, 2940), 'pandas.read_excel', 'pd.read_excel', (['"""mapping-accounts.xlsx"""'], {}), "('mapping-accounts.xlsx')\n", (2915, 2940), True, 'import pandas as pd\n'), ((3467, 3496), 'pandas.read_excel', 'pd.read_excel', (['"""test128.xlsx"""'], {}), "('test128.xlsx')\n", (3480, 3496), True, 'import pandas as pd\n'), ((3510, 3547), 'pandas.read_excel', 'pd.read_excel', (['"""mapping-journal.xlsx"""'], {}), "('mapping-journal.xlsx')\n", (3523, 3547), True, 'import pandas as pd\n'), ((3909, 3939), 'pandas.read_excel', 'pd.read_excel', (['"""Vendors1.xlsx"""'], {}), "('Vendors1.xlsx')\n", (3922, 3939), True, 'import pandas as pd\n'), ((4513, 4542), 'pandas.read_excel', 'pd.read_excel', (['"""test128.xlsx"""'], {}), "('test128.xlsx')\n", (4526, 4542), True, 'import pandas as pd\n'), ((4556, 4593), 'pandas.read_excel', 'pd.read_excel', (['"""mapping-journal.xlsx"""'], {}), "('mapping-journal.xlsx')\n", (4569, 4593), True, 'import pandas as pd\n'), ((84466, 84565), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['output'], {'engine': '"""xlsxwriter"""', 'datetime_format': '"""yyyymmdd"""', 'date_format': '"""yyyymmdd"""'}), "(output, engine='xlsxwriter', datetime_format='yyyymmdd',\n date_format='yyyymmdd')\n", (84480, 84565), True, 'import pandas as pd\n')] |
import boto3
def handler(event, _):
boto3.client('codebuild').start_build(
projectName=event['Records'][0]['customData'])
| [
"boto3.client"
] | [((38, 63), 'boto3.client', 'boto3.client', (['"""codebuild"""'], {}), "('codebuild')\n", (50, 63), False, 'import boto3\n')] |
# menu.py
# 维护暂停界面
import pygame
from pygame.locals import *
import sys
from utility import globe
from process.scene import menu_confirm
from PIL import Image, ImageFilter
class Pause_Menu(object):
# 暂停页面
def __init__(self):
self.button_rect = []
self.rs = globe.destiny.rsManager.image
self.pause_title = self.rs["menu_title"]
self.confirm_title = self.rs["confirm_title"]
self.button_rect.append([100, 220]) # Resume_Start
self.button_rect.append([100, 260]) # To_Title_Start
self.button_rect.append([90, 300]) # Retry_Start
self.image = []
self.image.append(self.rs["Resume_Start"]) # index: 0
self.image.append(self.rs["To_Title_Start"]) # index: 1
self.image.append(self.rs["Retry_Start"]) # index: 2
self.index = 3
globe.scene_menu_choose = False # 按键状态
# 为全局变量是便于二级菜单对状态的重置
def replace(self):
if self.index ^ 3:
self.image[self.index].set_alpha(90)
self.button_rect[self.index][0] += 5
self.index = 3
def update(self):
if not globe.scene_menu_choose:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == pygame.K_F4 and event.mod == pygame.KMOD_LALT:
pygame.quit()
sys.exit()
if event.key == K_UP:
if self.index == 3:
self.index = 0
self.button_rect[self.index][0] -= 5 # 按键微移
self.image[self.index].set_alpha(1000) # 增大透明度, 突出显示
globe.destiny.msManager.play_SE("select")
elif self.index != 0:
self.index -= 1
self.button_rect[self.index][0] -= 5 # 按键微移
self.image[self.index].set_alpha(1000) # 增大透明度, 突出显示
self.button_rect[self.index + 1][0] += 5 # 按键微移
self.image[self.index + 1].set_alpha(90) # 重置透明度
globe.destiny.msManager.play_SE("select")
else:
self.index = 2
self.button_rect[self.index][0] -= 5
self.image[self.index].set_alpha(1000)
self.button_rect[0][0] += 5
self.image[0].set_alpha(90)
globe.destiny.msManager.play_SE("select")
if event.key == K_DOWN:
if self.index == 3:
self.index = 2
self.button_rect[self.index][0] -= 5 # 按键微移
self.image[self.index].set_alpha(1000) # 增大透明度, 突出显示
globe.destiny.msManager.play_SE("select")
elif self.index != 2:
self.index += 1
self.button_rect[self.index][0] -= 5
self.image[self.index].set_alpha(1000)
self.button_rect[self.index - 1][0] += 5
self.image[self.index - 1].set_alpha(90)
globe.destiny.msManager.play_SE("select")
else:
self.index = 0
self.button_rect[self.index][0] -= 5
self.image[self.index].set_alpha(1000)
self.button_rect[2][0] += 5
self.image[2].set_alpha(90)
globe.destiny.msManager.play_SE("select")
if event.key == K_z and self.index ^ 3:
globe.scene_menu_choose = True
globe.destiny.msManager.play_SE("select")
if event.key == K_ESCAPE:
self.replace()
globe.destiny.msManager.unpause()
globe.destiny.back()
else:
if self.index == 0: # 返回游戏
self.replace()
globe.destiny.back()
globe.destiny.msManager.unpause()
if self.index == 1: # 选择了选单第一项, 跳转到确认页面
self.replace()
globe.scene_menu_flag = 1
globe.destiny.call(menu_confirm.Scene_Menu_Confirm)
if self.index == 2: # 选择了选单第二项, 跳转到确认页面
self.replace()
globe.scene_menu_flag = 2
globe.destiny.call(menu_confirm.Scene_Menu_Confirm)
def draw(self, screen):
screen.blit(self.pause_title, (160, 140))
for i in range(0, 3):
screen.blit(self.image[i], self.button_rect[i])
def start(self):
pass
def stop(self):
pass
class Scene_Menu(object):
# 游戏背景糊化类
def __init__(self):
self.rs = globe.destiny.rsManager
self.menu = Pause_Menu()
self.count = 0
self.fade = pygame.Surface(globe.destiny.screen.get_size())
self.imgtmp = globe.destiny.screen.subsurface(Rect(30, 14, 388, 452)).copy()
# 使用枕头库模糊化游戏窗口
for i in range(0, 3):
# 转换PyGame图像为pillow图像
raw_str = pygame.image.tostring(self.imgtmp, "RGBA", False)
image = Image.frombytes("RGBA", self.imgtmp.get_size(), raw_str)
imgblur = image.filter(ImageFilter.BLUR)
# 转换pillow图像为PyGame图像
raw_str = imgblur.tobytes("raw", "RGBA")
imgblur_pygame = pygame.image.fromstring(raw_str, imgblur.size, "RGBA")
self.imgtmp = imgblur_pygame
globe.game_active_bg_blured = imgblur_pygame
def update(self):
self.menu.update()
def draw(self, screen):
screen.blit(self.imgtmp, (30, 14))
self.menu.draw(screen)
def start(self):
pass
def stop(self):
pass
| [
"utility.globe.destiny.msManager.play_SE",
"pygame.image.tostring",
"pygame.quit",
"pygame.event.get",
"pygame.image.fromstring",
"utility.globe.destiny.back",
"utility.globe.destiny.call",
"utility.globe.destiny.msManager.unpause",
"sys.exit",
"utility.globe.destiny.screen.get_size"
] | [((1175, 1193), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (1191, 1193), False, 'import pygame\n'), ((5274, 5305), 'utility.globe.destiny.screen.get_size', 'globe.destiny.screen.get_size', ([], {}), '()\n', (5303, 5305), False, 'from utility import globe\n'), ((5502, 5551), 'pygame.image.tostring', 'pygame.image.tostring', (['self.imgtmp', '"""RGBA"""', '(False)'], {}), "(self.imgtmp, 'RGBA', False)\n", (5523, 5551), False, 'import pygame\n'), ((5798, 5852), 'pygame.image.fromstring', 'pygame.image.fromstring', (['raw_str', 'imgblur.size', '"""RGBA"""'], {}), "(raw_str, imgblur.size, 'RGBA')\n", (5821, 5852), False, 'import pygame\n'), ((4377, 4397), 'utility.globe.destiny.back', 'globe.destiny.back', ([], {}), '()\n', (4395, 4397), False, 'from utility import globe\n'), ((4414, 4447), 'utility.globe.destiny.msManager.unpause', 'globe.destiny.msManager.unpause', ([], {}), '()\n', (4445, 4447), False, 'from utility import globe\n'), ((4590, 4641), 'utility.globe.destiny.call', 'globe.destiny.call', (['menu_confirm.Scene_Menu_Confirm'], {}), '(menu_confirm.Scene_Menu_Confirm)\n', (4608, 4641), False, 'from utility import globe\n'), ((4784, 4835), 'utility.globe.destiny.call', 'globe.destiny.call', (['menu_confirm.Scene_Menu_Confirm'], {}), '(menu_confirm.Scene_Menu_Confirm)\n', (4802, 4835), False, 'from utility import globe\n'), ((1261, 1274), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (1272, 1274), False, 'import pygame\n'), ((1295, 1305), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1303, 1305), False, 'import sys\n'), ((1455, 1468), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (1466, 1468), False, 'import pygame\n'), ((1493, 1503), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1501, 1503), False, 'import sys\n'), ((4046, 4087), 'utility.globe.destiny.msManager.play_SE', 'globe.destiny.msManager.play_SE', (['"""select"""'], {}), "('select')\n", (4077, 4087), False, 'from utility import globe\n'), ((4197, 4230), 'utility.globe.destiny.msManager.unpause', 'globe.destiny.msManager.unpause', ([], {}), '()\n', (4228, 4230), False, 'from utility import globe\n'), ((4255, 4275), 'utility.globe.destiny.back', 'globe.destiny.back', ([], {}), '()\n', (4273, 4275), False, 'from utility import globe\n'), ((1822, 1863), 'utility.globe.destiny.msManager.play_SE', 'globe.destiny.msManager.play_SE', (['"""select"""'], {}), "('select')\n", (1853, 1863), False, 'from utility import globe\n'), ((3047, 3088), 'utility.globe.destiny.msManager.play_SE', 'globe.destiny.msManager.play_SE', (['"""select"""'], {}), "('select')\n", (3078, 3088), False, 'from utility import globe\n'), ((2298, 2339), 'utility.globe.destiny.msManager.play_SE', 'globe.destiny.msManager.play_SE', (['"""select"""'], {}), "('select')\n", (2329, 2339), False, 'from utility import globe\n'), ((2685, 2726), 'utility.globe.destiny.msManager.play_SE', 'globe.destiny.msManager.play_SE', (['"""select"""'], {}), "('select')\n", (2716, 2726), False, 'from utility import globe\n'), ((3477, 3518), 'utility.globe.destiny.msManager.play_SE', 'globe.destiny.msManager.play_SE', (['"""select"""'], {}), "('select')\n", (3508, 3518), False, 'from utility import globe\n'), ((3864, 3905), 'utility.globe.destiny.msManager.play_SE', 'globe.destiny.msManager.play_SE', (['"""select"""'], {}), "('select')\n", (3895, 3905), False, 'from utility import globe\n')] |
from setuptools import setup
# get the version here
pkg_vars = {}
with open("version.py") as fp:
exec(fp.read(), pkg_vars)
setup(
name='sn_simulation',
version= pkg_vars['__version__'],
description='Simulations for supernovae',
url='http://github.com/lsstdesc/sn_simulation',
author='<NAME>',
author_email='<EMAIL>',
license='BSD',
packages=['sn_simulator', 'sn_simu_wrapper'],
python_requires='>=3.5',
zip_safe=False,
install_requires=[
'sn_tools>=0.1',
'sn_stackers>=0.1',
'dustmaps'
],
)
| [
"setuptools.setup"
] | [((131, 514), 'setuptools.setup', 'setup', ([], {'name': '"""sn_simulation"""', 'version': "pkg_vars['__version__']", 'description': '"""Simulations for supernovae"""', 'url': '"""http://github.com/lsstdesc/sn_simulation"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'license': '"""BSD"""', 'packages': "['sn_simulator', 'sn_simu_wrapper']", 'python_requires': '""">=3.5"""', 'zip_safe': '(False)', 'install_requires': "['sn_tools>=0.1', 'sn_stackers>=0.1', 'dustmaps']"}), "(name='sn_simulation', version=pkg_vars['__version__'], description=\n 'Simulations for supernovae', url=\n 'http://github.com/lsstdesc/sn_simulation', author='<NAME>',\n author_email='<EMAIL>', license='BSD', packages=['sn_simulator',\n 'sn_simu_wrapper'], python_requires='>=3.5', zip_safe=False,\n install_requires=['sn_tools>=0.1', 'sn_stackers>=0.1', 'dustmaps'])\n", (136, 514), False, 'from setuptools import setup\n')] |
import contextlib
import datetime
import json
import StringIO
from django.contrib.auth import logout as logout_user
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponseRedirect, JsonResponse, Http404, HttpResponse, HttpResponseBadRequest
from django.db.models import Count
from django.shortcuts import render
from django.template.loader import render_to_string
from django.utils import timezone
from django.utils.http import urlencode, urlquote
from django_ical import feedgenerator
from simpleoncall.forms.auth import AuthenticationForm, RegistrationForm
from simpleoncall.forms.account import EditAccountForm, ChangePasswordForm
from simpleoncall.forms.schedule import TeamScheduleForm
from simpleoncall.forms.team import CreateTeamForm, SelectTeamForm, InviteTeamForm
from simpleoncall.decorators import require_authentication, require_selected_team
from simpleoncall.models import APIKey, TeamMember, TeamInvite, User, TeamSchedule
from simpleoncall.models import Alert, EventStatus, NotificationSetting, NotificationType
@require_authentication()
@require_selected_team()
def dashboard(request):
end = timezone.now()
start = end - datetime.timedelta(hours=12)
date_added__range = (start, end)
alerts = Alert.objects.filter(team=request.team, date_added__range=date_added__range).order_by('-date_added')[:10]
alert_statuses = Alert.objects.filter(
team=request.team, date_added__range=date_added__range
).values('status').annotate(total=Count('status'))
alert_times = Alert.objects.filter(
team=request.team, date_added__range=date_added__range
).values('date_added').annotate(total=Count('date_added')).order_by('-date_added')
alert_timeseries = {}
while start <= end:
bucket = start - datetime.timedelta(minutes=start.minute % 60,
seconds=start.second,
microseconds=start.microsecond)
alert_timeseries[bucket.strftime('%s')] = 0
start += datetime.timedelta(minutes=60)
for alert in alert_times:
added = alert['date_added']
bucket = added - datetime.timedelta(minutes=added.minute % 60,
seconds=added.second,
microseconds=added.microsecond)
alert_timeseries[bucket.strftime('%s')] += alert['total']
context = {
'title': 'Dashboard',
'alerts': alerts,
'statuses': dict((a['status'], a['total']) for a in alert_statuses),
'timeseries': json.dumps(alert_timeseries),
}
return render(request, 'dashboard.html', context)
def login(request):
if request.user.is_authenticated():
return HttpResponseRedirect(reverse('dashboard'))
context = {
'login_form': AuthenticationForm(),
'register_form': RegistrationForm(),
'login': True,
'title': 'Login',
}
return render(request, 'login.html', context)
def register(request):
if request.user.is_authenticated():
return HttpResponseRedirect(reverse('dashboard'))
context = {
'login_form': AuthenticationForm(),
'register_form': RegistrationForm(),
'register': True,
'title': 'Register',
'next': urlquote(request.GET.get('next')),
}
return render(request, 'login.html', context)
def logout(request):
logout_user(request)
return HttpResponseRedirect(reverse('login'))
@require_authentication()
@require_selected_team()
def settings(request):
api_keys = APIKey.objects.filter(team=request.team)
members = TeamMember.objects.filter(team=request.team)
context = {
'title': '%s Settings' % (request.team.name, ),
'api_keys': api_keys,
'members': members,
'invite_team_form': InviteTeamForm(),
}
return render(request, 'settings.html', context)
@require_authentication()
def account(request):
alerts = request.user.get_notification_settings()
if not alerts:
alerts = [
NotificationSetting(id=0, type=NotificationType.EMAIL, time=0)
]
context = {
'title': 'Account',
'edit_account_form': EditAccountForm(instance=request.user),
'change_password_form': ChangePasswordForm(instance=request.user),
'alerts': alerts,
}
return render(request, 'account.html', context)
@require_authentication()
@require_selected_team()
def alerts(request):
alert_count = Alert.objects.filter(team=request.team).count()
alerts = Alert.objects.filter(team=request.team).order_by('-date_updated')[:10]
context = {
'title': 'Alerts',
'alert_count': alert_count,
'alerts': alerts,
}
return render(request, 'alerts.html', context)
@require_authentication()
@require_selected_team()
def schedule(request):
schedule = request.team.get_active_schedule()
oncall = None
if schedule:
oncall = schedule.get_currently_on_call()
context = {
'title': 'Schedule',
'schedule': schedule,
'oncall': oncall,
}
return render(request, 'schedule.html', context)
@require_authentication(require_team=False)
def create_team(request):
create_team_form = CreateTeamForm(request.POST or None)
if create_team_form.is_valid():
team = create_team_form.save(request)
messages.success(request, 'New team %s created' % team.name)
return HttpResponseRedirect(reverse('dashboard'))
context = {
'title': 'Create New Team',
'create_team_form': create_team_form,
}
return render(request, 'team/create.html', context)
@require_authentication()
def select_team(request):
select_team_form = SelectTeamForm(request.POST or None, request.user)
if select_team_form.is_valid():
team = select_team_form.save(request)
messages.success(request, 'Team changed to %s' % team.name)
return HttpResponseRedirect(reverse('dashboard'))
context = {
'title': 'Select Team',
'select_team_form': select_team_form,
}
return render(request, 'team/select.html', context)
def invite_accept(request):
code = request.GET.get('code')
email = request.GET.get('email')
if not code or not email:
return HttpResponseRedirect(reverse('dashboard'))
invite = TeamInvite.objects.get(invite_code=code, email=email)
if not invite:
return HttpResponseRedirect(reverse('dashboard'))
user = User.objects.get(email=email)
if user:
try:
team_member = TeamMember.objects.get(team=invite.team, user=user)
except ObjectDoesNotExist:
team_member = None
if team_member:
messages.warning(request, 'already a member of team %s' % (invite.team.name, ))
else:
team_member = TeamMember(team=invite.team, user=user)
team_member.save()
messages.success(request, 'added to team %s' % (invite.team.name, ))
else:
args = {
'code': code,
'email': email,
}
next = '%s?%s' % (reverse('invite-accept'), urlencode(args))
redirect = '%s?next=%s' % (reverse('register'), urlquote(next))
return HttpResponseRedirect(redirect)
return HttpResponseRedirect(reverse('dashboard'))
@require_authentication()
@require_selected_team()
def alert_ack(request, alert_id):
alert = Alert.objects.get(id=alert_id)
if not alert:
messages.error(request, 'Alert %s was not found' % (alert_id, ))
elif alert.status == EventStatus.ACKNOWLEDGED:
messages.warning(request, 'Alert %s already acknowledged' % (alert_id, ))
else:
alert.status = EventStatus.ACKNOWLEDGED
alert.save(user=request.user)
messages.success(request, 'Alert %s was acknowledged' % (alert_id, ))
return HttpResponseRedirect(reverse('alerts'))
@require_authentication()
@require_selected_team()
def alert_resolve(request, alert_id):
alert = Alert.objects.get(id=alert_id, team=request.team)
if not alert:
messages.error(request, 'Alert %s was not found' % (alert_id, ))
elif alert.status == EventStatus.RESOLVED:
messages.warning(request, 'Alert %s already resolved' % (alert_id, ))
else:
alert.status = EventStatus.RESOLVED
alert.save(user=request.user)
messages.success(request, 'Alert %s was resolved' % (alert_id, ))
return HttpResponseRedirect(reverse('alerts'))
@require_authentication()
@require_selected_team()
def alert_view(request, alert_id):
alert = Alert.objects.get(id=alert_id, team=request.team)
if not alert:
messages.error(request, 'Alert %s was not found' % (alert_id, ))
return HttpResponseRedirect(reverse('dashboard'))
context = {
'title': alert.title,
'event': alert,
}
return render(request, 'alert.html', context)
@require_authentication()
@require_selected_team()
def edit_schedule(request):
msg = None
schedule_id = None
if 'schedule_id' in request.POST:
schedule_id = int(request.POST['schedule_id'])
dummy_schedule = TeamSchedule(team=request.team)
data = None if schedule_id else request.POST or None
new_schedule_form = TeamScheduleForm(request.team, data, instance=dummy_schedule)
saved = False
if request.method == 'POST' and not schedule_id:
if new_schedule_form.is_valid():
new_schedule_form.save()
saved = True
msg = 'New Schedule Added'
schedule_forms = []
for schedule in request.team.get_schedules():
data = None
if schedule.id == schedule_id:
data = request.POST
schedule_form = TeamScheduleForm(request.team, data, instance=schedule)
if data and schedule_form.is_valid():
schedule_form.save()
msg = 'Schedule Updated'
schedule_forms.append(schedule_form)
if msg:
messages.success(request, msg)
context = {
'title': 'Edit Schedule',
'active_schedule': request.team.get_active_schedule(),
'schedule_forms': schedule_forms,
'new_schedule_form': new_schedule_form,
'hidden_schedule_form': not saved or request.method != 'POST',
}
return render(request, 'edit_schedule.html', context)
@require_authentication()
@require_selected_team()
def delete_schedule(request):
id = request.GET.get('id')
if id:
schedule = TeamSchedule.objects.get(team=request.team, id=id)
schedule.delete()
messages.success(request, 'Schedule %s Deleted' % (schedule.name, ))
else:
messages.error(request, 'Unknown Schedule Id')
return HttpResponseRedirect(reverse('edit-schedule'))
@require_authentication()
@require_selected_team()
def partial(request, partial):
context = {
'request': request,
'user': request.user,
'team': request.team,
}
html = render_to_string('partials/%s.html' % (partial, ), context)
return JsonResponse({'html': html})
@require_authentication()
@require_selected_team()
def team_calendar(request):
schedule = request.team.get_active_schedule()
if not schedule:
return Http404('Unkown Calendar')
feed = feedgenerator.ICal20Feed(
title='Team %s On-Call Schedule %s' % (request.team.name, schedule.name),
link=request.build_absolute_uri(request.path),
description='Team %s On-Call Schedule %s' % (request.team.name, schedule.name),
language='en',
subtitle='Generated by SimpleOnCall',
author_email='<EMAIL>',
author_link='http://simpleoncall.com',
author_name='SimpleOnCall',
feed_url=request.build_absolute_uri(request.path)
)
now = timezone.now()
starting_time = datetime.datetime(now.year, now.month, now.day, schedule.starting_time, tzinfo=timezone.utc)
next_start_time = None
currently_oncall = None
for i in xrange(90):
now = starting_time + datetime.timedelta(days=i)
oncall = schedule.get_currently_on_call(now)
if next_start_time is None:
next_start_time = now
currently_oncall = oncall
elif currently_oncall.id != oncall.id:
feed.add_item(
title='%s On-Call' % (oncall.get_display_name(), ),
link=request.build_absolute_uri(reverse('schedule')),
description='%s On-Call' % (currently_oncall.get_display_name(), ),
start_datetime=next_start_time,
end_datetime=now
)
next_start_time = now
currently_oncall = oncall
feed.add_item(
title='%s On-Call' % (oncall.get_display_name(), ),
link=request.build_absolute_uri(reverse('schedule')),
description='%s On-Call' % (currently_oncall.get_display_name(), ),
start_datetime=next_start_time,
end_datetime=now
)
results = None
with contextlib.closing(StringIO.StringIO()) as output:
feed.write(output, 'utf-8')
results = output.getvalue()
if results is not None:
return HttpResponse(results, content_type='text/calendar; charset=utf-8')
return HttpResponseBadRequest('Could not generate iCal at this time')
| [
"simpleoncall.forms.schedule.TeamScheduleForm",
"django.http.HttpResponseBadRequest",
"simpleoncall.models.Alert.objects.get",
"django.db.models.Count",
"django.contrib.messages.warning",
"django.utils.http.urlencode",
"django.core.urlresolvers.reverse",
"django.utils.http.urlquote",
"simpleoncall.f... | [((1151, 1175), 'simpleoncall.decorators.require_authentication', 'require_authentication', ([], {}), '()\n', (1173, 1175), False, 'from simpleoncall.decorators import require_authentication, require_selected_team\n'), ((1177, 1200), 'simpleoncall.decorators.require_selected_team', 'require_selected_team', ([], {}), '()\n', (1198, 1200), False, 'from simpleoncall.decorators import require_authentication, require_selected_team\n'), ((3601, 3625), 'simpleoncall.decorators.require_authentication', 'require_authentication', ([], {}), '()\n', (3623, 3625), False, 'from simpleoncall.decorators import require_authentication, require_selected_team\n'), ((3627, 3650), 'simpleoncall.decorators.require_selected_team', 'require_selected_team', ([], {}), '()\n', (3648, 3650), False, 'from simpleoncall.decorators import require_authentication, require_selected_team\n'), ((4028, 4052), 'simpleoncall.decorators.require_authentication', 'require_authentication', ([], {}), '()\n', (4050, 4052), False, 'from simpleoncall.decorators import require_authentication, require_selected_team\n'), ((4528, 4552), 'simpleoncall.decorators.require_authentication', 'require_authentication', ([], {}), '()\n', (4550, 4552), False, 'from simpleoncall.decorators import require_authentication, require_selected_team\n'), ((4554, 4577), 'simpleoncall.decorators.require_selected_team', 'require_selected_team', ([], {}), '()\n', (4575, 4577), False, 'from simpleoncall.decorators import require_authentication, require_selected_team\n'), ((4914, 4938), 'simpleoncall.decorators.require_authentication', 'require_authentication', ([], {}), '()\n', (4936, 4938), False, 'from simpleoncall.decorators import require_authentication, require_selected_team\n'), ((4940, 4963), 'simpleoncall.decorators.require_selected_team', 'require_selected_team', ([], {}), '()\n', (4961, 4963), False, 'from simpleoncall.decorators import require_authentication, require_selected_team\n'), ((5285, 5327), 'simpleoncall.decorators.require_authentication', 'require_authentication', ([], {'require_team': '(False)'}), '(require_team=False)\n', (5307, 5327), False, 'from simpleoncall.decorators import require_authentication, require_selected_team\n'), ((5787, 5811), 'simpleoncall.decorators.require_authentication', 'require_authentication', ([], {}), '()\n', (5809, 5811), False, 'from simpleoncall.decorators import require_authentication, require_selected_team\n'), ((7468, 7492), 'simpleoncall.decorators.require_authentication', 'require_authentication', ([], {}), '()\n', (7490, 7492), False, 'from simpleoncall.decorators import require_authentication, require_selected_team\n'), ((7494, 7517), 'simpleoncall.decorators.require_selected_team', 'require_selected_team', ([], {}), '()\n', (7515, 7517), False, 'from simpleoncall.decorators import require_authentication, require_selected_team\n'), ((8048, 8072), 'simpleoncall.decorators.require_authentication', 'require_authentication', ([], {}), '()\n', (8070, 8072), False, 'from simpleoncall.decorators import require_authentication, require_selected_team\n'), ((8074, 8097), 'simpleoncall.decorators.require_selected_team', 'require_selected_team', ([], {}), '()\n', (8095, 8097), False, 'from simpleoncall.decorators import require_authentication, require_selected_team\n'), ((8635, 8659), 'simpleoncall.decorators.require_authentication', 'require_authentication', ([], {}), '()\n', (8657, 8659), False, 'from simpleoncall.decorators import require_authentication, require_selected_team\n'), ((8661, 8684), 'simpleoncall.decorators.require_selected_team', 'require_selected_team', ([], {}), '()\n', (8682, 8684), False, 'from simpleoncall.decorators import require_authentication, require_selected_team\n'), ((9061, 9085), 'simpleoncall.decorators.require_authentication', 'require_authentication', ([], {}), '()\n', (9083, 9085), False, 'from simpleoncall.decorators import require_authentication, require_selected_team\n'), ((9087, 9110), 'simpleoncall.decorators.require_selected_team', 'require_selected_team', ([], {}), '()\n', (9108, 9110), False, 'from simpleoncall.decorators import require_authentication, require_selected_team\n'), ((10481, 10505), 'simpleoncall.decorators.require_authentication', 'require_authentication', ([], {}), '()\n', (10503, 10505), False, 'from simpleoncall.decorators import require_authentication, require_selected_team\n'), ((10507, 10530), 'simpleoncall.decorators.require_selected_team', 'require_selected_team', ([], {}), '()\n', (10528, 10530), False, 'from simpleoncall.decorators import require_authentication, require_selected_team\n'), ((10902, 10926), 'simpleoncall.decorators.require_authentication', 'require_authentication', ([], {}), '()\n', (10924, 10926), False, 'from simpleoncall.decorators import require_authentication, require_selected_team\n'), ((10928, 10951), 'simpleoncall.decorators.require_selected_team', 'require_selected_team', ([], {}), '()\n', (10949, 10951), False, 'from simpleoncall.decorators import require_authentication, require_selected_team\n'), ((11207, 11231), 'simpleoncall.decorators.require_authentication', 'require_authentication', ([], {}), '()\n', (11229, 11231), False, 'from simpleoncall.decorators import require_authentication, require_selected_team\n'), ((11233, 11256), 'simpleoncall.decorators.require_selected_team', 'require_selected_team', ([], {}), '()\n', (11254, 11256), False, 'from simpleoncall.decorators import require_authentication, require_selected_team\n'), ((1235, 1249), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1247, 1249), False, 'from django.utils import timezone\n'), ((2735, 2777), 'django.shortcuts.render', 'render', (['request', '"""dashboard.html"""', 'context'], {}), "(request, 'dashboard.html', context)\n", (2741, 2777), False, 'from django.shortcuts import render\n'), ((3070, 3108), 'django.shortcuts.render', 'render', (['request', '"""login.html"""', 'context'], {}), "(request, 'login.html', context)\n", (3076, 3108), False, 'from django.shortcuts import render\n'), ((3461, 3499), 'django.shortcuts.render', 'render', (['request', '"""login.html"""', 'context'], {}), "(request, 'login.html', context)\n", (3467, 3499), False, 'from django.shortcuts import render\n'), ((3527, 3547), 'django.contrib.auth.logout', 'logout_user', (['request'], {}), '(request)\n', (3538, 3547), True, 'from django.contrib.auth import logout as logout_user\n'), ((3689, 3729), 'simpleoncall.models.APIKey.objects.filter', 'APIKey.objects.filter', ([], {'team': 'request.team'}), '(team=request.team)\n', (3710, 3729), False, 'from simpleoncall.models import APIKey, TeamMember, TeamInvite, User, TeamSchedule\n'), ((3744, 3788), 'simpleoncall.models.TeamMember.objects.filter', 'TeamMember.objects.filter', ([], {'team': 'request.team'}), '(team=request.team)\n', (3769, 3788), False, 'from simpleoncall.models import APIKey, TeamMember, TeamInvite, User, TeamSchedule\n'), ((3983, 4024), 'django.shortcuts.render', 'render', (['request', '"""settings.html"""', 'context'], {}), "(request, 'settings.html', context)\n", (3989, 4024), False, 'from django.shortcuts import render\n'), ((4484, 4524), 'django.shortcuts.render', 'render', (['request', '"""account.html"""', 'context'], {}), "(request, 'account.html', context)\n", (4490, 4524), False, 'from django.shortcuts import render\n'), ((4871, 4910), 'django.shortcuts.render', 'render', (['request', '"""alerts.html"""', 'context'], {}), "(request, 'alerts.html', context)\n", (4877, 4910), False, 'from django.shortcuts import render\n'), ((5240, 5281), 'django.shortcuts.render', 'render', (['request', '"""schedule.html"""', 'context'], {}), "(request, 'schedule.html', context)\n", (5246, 5281), False, 'from django.shortcuts import render\n'), ((5377, 5413), 'simpleoncall.forms.team.CreateTeamForm', 'CreateTeamForm', (['(request.POST or None)'], {}), '(request.POST or None)\n', (5391, 5413), False, 'from simpleoncall.forms.team import CreateTeamForm, SelectTeamForm, InviteTeamForm\n'), ((5739, 5783), 'django.shortcuts.render', 'render', (['request', '"""team/create.html"""', 'context'], {}), "(request, 'team/create.html', context)\n", (5745, 5783), False, 'from django.shortcuts import render\n'), ((5861, 5911), 'simpleoncall.forms.team.SelectTeamForm', 'SelectTeamForm', (['(request.POST or None)', 'request.user'], {}), '(request.POST or None, request.user)\n', (5875, 5911), False, 'from simpleoncall.forms.team import CreateTeamForm, SelectTeamForm, InviteTeamForm\n'), ((6232, 6276), 'django.shortcuts.render', 'render', (['request', '"""team/select.html"""', 'context'], {}), "(request, 'team/select.html', context)\n", (6238, 6276), False, 'from django.shortcuts import render\n'), ((6481, 6534), 'simpleoncall.models.TeamInvite.objects.get', 'TeamInvite.objects.get', ([], {'invite_code': 'code', 'email': 'email'}), '(invite_code=code, email=email)\n', (6503, 6534), False, 'from simpleoncall.models import APIKey, TeamMember, TeamInvite, User, TeamSchedule\n'), ((6624, 6653), 'simpleoncall.models.User.objects.get', 'User.objects.get', ([], {'email': 'email'}), '(email=email)\n', (6640, 6653), False, 'from simpleoncall.models import APIKey, TeamMember, TeamInvite, User, TeamSchedule\n'), ((7564, 7594), 'simpleoncall.models.Alert.objects.get', 'Alert.objects.get', ([], {'id': 'alert_id'}), '(id=alert_id)\n', (7581, 7594), False, 'from simpleoncall.models import Alert, EventStatus, NotificationSetting, NotificationType\n'), ((8148, 8197), 'simpleoncall.models.Alert.objects.get', 'Alert.objects.get', ([], {'id': 'alert_id', 'team': 'request.team'}), '(id=alert_id, team=request.team)\n', (8165, 8197), False, 'from simpleoncall.models import Alert, EventStatus, NotificationSetting, NotificationType\n'), ((8732, 8781), 'simpleoncall.models.Alert.objects.get', 'Alert.objects.get', ([], {'id': 'alert_id', 'team': 'request.team'}), '(id=alert_id, team=request.team)\n', (8749, 8781), False, 'from simpleoncall.models import Alert, EventStatus, NotificationSetting, NotificationType\n'), ((9019, 9057), 'django.shortcuts.render', 'render', (['request', '"""alert.html"""', 'context'], {}), "(request, 'alert.html', context)\n", (9025, 9057), False, 'from django.shortcuts import render\n'), ((9292, 9323), 'simpleoncall.models.TeamSchedule', 'TeamSchedule', ([], {'team': 'request.team'}), '(team=request.team)\n', (9304, 9323), False, 'from simpleoncall.models import APIKey, TeamMember, TeamInvite, User, TeamSchedule\n'), ((9405, 9466), 'simpleoncall.forms.schedule.TeamScheduleForm', 'TeamScheduleForm', (['request.team', 'data'], {'instance': 'dummy_schedule'}), '(request.team, data, instance=dummy_schedule)\n', (9421, 9466), False, 'from simpleoncall.forms.schedule import TeamScheduleForm\n'), ((10431, 10477), 'django.shortcuts.render', 'render', (['request', '"""edit_schedule.html"""', 'context'], {}), "(request, 'edit_schedule.html', context)\n", (10437, 10477), False, 'from django.shortcuts import render\n'), ((11104, 11162), 'django.template.loader.render_to_string', 'render_to_string', (["('partials/%s.html' % (partial,))", 'context'], {}), "('partials/%s.html' % (partial,), context)\n", (11120, 11162), False, 'from django.template.loader import render_to_string\n'), ((11175, 11203), 'django.http.JsonResponse', 'JsonResponse', (["{'html': html}"], {}), "({'html': html})\n", (11187, 11203), False, 'from django.http import HttpResponseRedirect, JsonResponse, Http404, HttpResponse, HttpResponseBadRequest\n'), ((11919, 11933), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (11931, 11933), False, 'from django.utils import timezone\n'), ((11954, 12050), 'datetime.datetime', 'datetime.datetime', (['now.year', 'now.month', 'now.day', 'schedule.starting_time'], {'tzinfo': 'timezone.utc'}), '(now.year, now.month, now.day, schedule.starting_time,\n tzinfo=timezone.utc)\n', (11971, 12050), False, 'import datetime\n'), ((13370, 13432), 'django.http.HttpResponseBadRequest', 'HttpResponseBadRequest', (['"""Could not generate iCal at this time"""'], {}), "('Could not generate iCal at this time')\n", (13392, 13432), False, 'from django.http import HttpResponseRedirect, JsonResponse, Http404, HttpResponse, HttpResponseBadRequest\n'), ((1268, 1296), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(12)'}), '(hours=12)\n', (1286, 1296), False, 'import datetime\n'), ((2139, 2169), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(60)'}), '(minutes=60)\n', (2157, 2169), False, 'import datetime\n'), ((2688, 2716), 'json.dumps', 'json.dumps', (['alert_timeseries'], {}), '(alert_timeseries)\n', (2698, 2716), False, 'import json\n'), ((2937, 2957), 'simpleoncall.forms.auth.AuthenticationForm', 'AuthenticationForm', ([], {}), '()\n', (2955, 2957), False, 'from simpleoncall.forms.auth import AuthenticationForm, RegistrationForm\n'), ((2984, 3002), 'simpleoncall.forms.auth.RegistrationForm', 'RegistrationForm', ([], {}), '()\n', (3000, 3002), False, 'from simpleoncall.forms.auth import AuthenticationForm, RegistrationForm\n'), ((3271, 3291), 'simpleoncall.forms.auth.AuthenticationForm', 'AuthenticationForm', ([], {}), '()\n', (3289, 3291), False, 'from simpleoncall.forms.auth import AuthenticationForm, RegistrationForm\n'), ((3318, 3336), 'simpleoncall.forms.auth.RegistrationForm', 'RegistrationForm', ([], {}), '()\n', (3334, 3336), False, 'from simpleoncall.forms.auth import AuthenticationForm, RegistrationForm\n'), ((3580, 3596), 'django.core.urlresolvers.reverse', 'reverse', (['"""login"""'], {}), "('login')\n", (3587, 3596), False, 'from django.core.urlresolvers import reverse\n'), ((3948, 3964), 'simpleoncall.forms.team.InviteTeamForm', 'InviteTeamForm', ([], {}), '()\n', (3962, 3964), False, 'from simpleoncall.forms.team import CreateTeamForm, SelectTeamForm, InviteTeamForm\n'), ((4326, 4364), 'simpleoncall.forms.account.EditAccountForm', 'EditAccountForm', ([], {'instance': 'request.user'}), '(instance=request.user)\n', (4341, 4364), False, 'from simpleoncall.forms.account import EditAccountForm, ChangePasswordForm\n'), ((4398, 4439), 'simpleoncall.forms.account.ChangePasswordForm', 'ChangePasswordForm', ([], {'instance': 'request.user'}), '(instance=request.user)\n', (4416, 4439), False, 'from simpleoncall.forms.account import EditAccountForm, ChangePasswordForm\n'), ((5504, 5564), 'django.contrib.messages.success', 'messages.success', (['request', "('New team %s created' % team.name)"], {}), "(request, 'New team %s created' % team.name)\n", (5520, 5564), False, 'from django.contrib import messages\n'), ((6002, 6061), 'django.contrib.messages.success', 'messages.success', (['request', "('Team changed to %s' % team.name)"], {}), "(request, 'Team changed to %s' % team.name)\n", (6018, 6061), False, 'from django.contrib import messages\n'), ((7379, 7409), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['redirect'], {}), '(redirect)\n', (7399, 7409), False, 'from django.http import HttpResponseRedirect, JsonResponse, Http404, HttpResponse, HttpResponseBadRequest\n'), ((7443, 7463), 'django.core.urlresolvers.reverse', 'reverse', (['"""dashboard"""'], {}), "('dashboard')\n", (7450, 7463), False, 'from django.core.urlresolvers import reverse\n'), ((7621, 7684), 'django.contrib.messages.error', 'messages.error', (['request', "('Alert %s was not found' % (alert_id,))"], {}), "(request, 'Alert %s was not found' % (alert_id,))\n", (7635, 7684), False, 'from django.contrib import messages\n'), ((8026, 8043), 'django.core.urlresolvers.reverse', 'reverse', (['"""alerts"""'], {}), "('alerts')\n", (8033, 8043), False, 'from django.core.urlresolvers import reverse\n'), ((8224, 8287), 'django.contrib.messages.error', 'messages.error', (['request', "('Alert %s was not found' % (alert_id,))"], {}), "(request, 'Alert %s was not found' % (alert_id,))\n", (8238, 8287), False, 'from django.contrib import messages\n'), ((8613, 8630), 'django.core.urlresolvers.reverse', 'reverse', (['"""alerts"""'], {}), "('alerts')\n", (8620, 8630), False, 'from django.core.urlresolvers import reverse\n'), ((8808, 8871), 'django.contrib.messages.error', 'messages.error', (['request', "('Alert %s was not found' % (alert_id,))"], {}), "(request, 'Alert %s was not found' % (alert_id,))\n", (8822, 8871), False, 'from django.contrib import messages\n'), ((9870, 9925), 'simpleoncall.forms.schedule.TeamScheduleForm', 'TeamScheduleForm', (['request.team', 'data'], {'instance': 'schedule'}), '(request.team, data, instance=schedule)\n', (9886, 9925), False, 'from simpleoncall.forms.schedule import TeamScheduleForm\n'), ((10108, 10138), 'django.contrib.messages.success', 'messages.success', (['request', 'msg'], {}), '(request, msg)\n', (10124, 10138), False, 'from django.contrib import messages\n'), ((10622, 10672), 'simpleoncall.models.TeamSchedule.objects.get', 'TeamSchedule.objects.get', ([], {'team': 'request.team', 'id': 'id'}), '(team=request.team, id=id)\n', (10646, 10672), False, 'from simpleoncall.models import APIKey, TeamMember, TeamInvite, User, TeamSchedule\n'), ((10707, 10774), 'django.contrib.messages.success', 'messages.success', (['request', "('Schedule %s Deleted' % (schedule.name,))"], {}), "(request, 'Schedule %s Deleted' % (schedule.name,))\n", (10723, 10774), False, 'from django.contrib import messages\n'), ((10794, 10840), 'django.contrib.messages.error', 'messages.error', (['request', '"""Unknown Schedule Id"""'], {}), "(request, 'Unknown Schedule Id')\n", (10808, 10840), False, 'from django.contrib import messages\n'), ((10873, 10897), 'django.core.urlresolvers.reverse', 'reverse', (['"""edit-schedule"""'], {}), "('edit-schedule')\n", (10880, 10897), False, 'from django.core.urlresolvers import reverse\n'), ((11371, 11397), 'django.http.Http404', 'Http404', (['"""Unkown Calendar"""'], {}), "('Unkown Calendar')\n", (11378, 11397), False, 'from django.http import HttpResponseRedirect, JsonResponse, Http404, HttpResponse, HttpResponseBadRequest\n'), ((13292, 13358), 'django.http.HttpResponse', 'HttpResponse', (['results'], {'content_type': '"""text/calendar; charset=utf-8"""'}), "(results, content_type='text/calendar; charset=utf-8')\n", (13304, 13358), False, 'from django.http import HttpResponseRedirect, JsonResponse, Http404, HttpResponse, HttpResponseBadRequest\n'), ((1598, 1613), 'django.db.models.Count', 'Count', (['"""status"""'], {}), "('status')\n", (1603, 1613), False, 'from django.db.models import Count\n'), ((1882, 1985), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(start.minute % 60)', 'seconds': 'start.second', 'microseconds': 'start.microsecond'}), '(minutes=start.minute % 60, seconds=start.second,\n microseconds=start.microsecond)\n', (1900, 1985), False, 'import datetime\n'), ((2262, 2365), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(added.minute % 60)', 'seconds': 'added.second', 'microseconds': 'added.microsecond'}), '(minutes=added.minute % 60, seconds=added.second,\n microseconds=added.microsecond)\n', (2280, 2365), False, 'import datetime\n'), ((2876, 2896), 'django.core.urlresolvers.reverse', 'reverse', (['"""dashboard"""'], {}), "('dashboard')\n", (2883, 2896), False, 'from django.core.urlresolvers import reverse\n'), ((3210, 3230), 'django.core.urlresolvers.reverse', 'reverse', (['"""dashboard"""'], {}), "('dashboard')\n", (3217, 3230), False, 'from django.core.urlresolvers import reverse\n'), ((4179, 4241), 'simpleoncall.models.NotificationSetting', 'NotificationSetting', ([], {'id': '(0)', 'type': 'NotificationType.EMAIL', 'time': '(0)'}), '(id=0, type=NotificationType.EMAIL, time=0)\n', (4198, 4241), False, 'from simpleoncall.models import Alert, EventStatus, NotificationSetting, NotificationType\n'), ((4617, 4656), 'simpleoncall.models.Alert.objects.filter', 'Alert.objects.filter', ([], {'team': 'request.team'}), '(team=request.team)\n', (4637, 4656), False, 'from simpleoncall.models import Alert, EventStatus, NotificationSetting, NotificationType\n'), ((5601, 5621), 'django.core.urlresolvers.reverse', 'reverse', (['"""dashboard"""'], {}), "('dashboard')\n", (5608, 5621), False, 'from django.core.urlresolvers import reverse\n'), ((6098, 6118), 'django.core.urlresolvers.reverse', 'reverse', (['"""dashboard"""'], {}), "('dashboard')\n", (6105, 6118), False, 'from django.core.urlresolvers import reverse\n'), ((6445, 6465), 'django.core.urlresolvers.reverse', 'reverse', (['"""dashboard"""'], {}), "('dashboard')\n", (6452, 6465), False, 'from django.core.urlresolvers import reverse\n'), ((6590, 6610), 'django.core.urlresolvers.reverse', 'reverse', (['"""dashboard"""'], {}), "('dashboard')\n", (6597, 6610), False, 'from django.core.urlresolvers import reverse\n'), ((6706, 6757), 'simpleoncall.models.TeamMember.objects.get', 'TeamMember.objects.get', ([], {'team': 'invite.team', 'user': 'user'}), '(team=invite.team, user=user)\n', (6728, 6757), False, 'from simpleoncall.models import APIKey, TeamMember, TeamInvite, User, TeamSchedule\n'), ((6860, 6938), 'django.contrib.messages.warning', 'messages.warning', (['request', "('already a member of team %s' % (invite.team.name,))"], {}), "(request, 'already a member of team %s' % (invite.team.name,))\n", (6876, 6938), False, 'from django.contrib import messages\n'), ((6980, 7019), 'simpleoncall.models.TeamMember', 'TeamMember', ([], {'team': 'invite.team', 'user': 'user'}), '(team=invite.team, user=user)\n', (6990, 7019), False, 'from simpleoncall.models import APIKey, TeamMember, TeamInvite, User, TeamSchedule\n'), ((7063, 7130), 'django.contrib.messages.success', 'messages.success', (['request', "('added to team %s' % (invite.team.name,))"], {}), "(request, 'added to team %s' % (invite.team.name,))\n", (7079, 7130), False, 'from django.contrib import messages\n'), ((7745, 7817), 'django.contrib.messages.warning', 'messages.warning', (['request', "('Alert %s already acknowledged' % (alert_id,))"], {}), "(request, 'Alert %s already acknowledged' % (alert_id,))\n", (7761, 7817), False, 'from django.contrib import messages\n'), ((7923, 7991), 'django.contrib.messages.success', 'messages.success', (['request', "('Alert %s was acknowledged' % (alert_id,))"], {}), "(request, 'Alert %s was acknowledged' % (alert_id,))\n", (7939, 7991), False, 'from django.contrib import messages\n'), ((8344, 8412), 'django.contrib.messages.warning', 'messages.warning', (['request', "('Alert %s already resolved' % (alert_id,))"], {}), "(request, 'Alert %s already resolved' % (alert_id,))\n", (8360, 8412), False, 'from django.contrib import messages\n'), ((8514, 8578), 'django.contrib.messages.success', 'messages.success', (['request', "('Alert %s was resolved' % (alert_id,))"], {}), "(request, 'Alert %s was resolved' % (alert_id,))\n", (8530, 8578), False, 'from django.contrib import messages\n'), ((8909, 8929), 'django.core.urlresolvers.reverse', 'reverse', (['"""dashboard"""'], {}), "('dashboard')\n", (8916, 8929), False, 'from django.core.urlresolvers import reverse\n'), ((12157, 12183), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'i'}), '(days=i)\n', (12175, 12183), False, 'import datetime\n'), ((13145, 13164), 'StringIO.StringIO', 'StringIO.StringIO', ([], {}), '()\n', (13162, 13164), False, 'import StringIO\n'), ((1348, 1424), 'simpleoncall.models.Alert.objects.filter', 'Alert.objects.filter', ([], {'team': 'request.team', 'date_added__range': 'date_added__range'}), '(team=request.team, date_added__range=date_added__range)\n', (1368, 1424), False, 'from simpleoncall.models import Alert, EventStatus, NotificationSetting, NotificationType\n'), ((4678, 4717), 'simpleoncall.models.Alert.objects.filter', 'Alert.objects.filter', ([], {'team': 'request.team'}), '(team=request.team)\n', (4698, 4717), False, 'from simpleoncall.models import Alert, EventStatus, NotificationSetting, NotificationType\n'), ((7249, 7273), 'django.core.urlresolvers.reverse', 'reverse', (['"""invite-accept"""'], {}), "('invite-accept')\n", (7256, 7273), False, 'from django.core.urlresolvers import reverse\n'), ((7275, 7290), 'django.utils.http.urlencode', 'urlencode', (['args'], {}), '(args)\n', (7284, 7290), False, 'from django.utils.http import urlencode, urlquote\n'), ((7327, 7346), 'django.core.urlresolvers.reverse', 'reverse', (['"""register"""'], {}), "('register')\n", (7334, 7346), False, 'from django.core.urlresolvers import reverse\n'), ((7348, 7362), 'django.utils.http.urlquote', 'urlquote', (['next'], {}), '(next)\n', (7356, 7362), False, 'from django.utils.http import urlencode, urlquote\n'), ((12928, 12947), 'django.core.urlresolvers.reverse', 'reverse', (['"""schedule"""'], {}), "('schedule')\n", (12935, 12947), False, 'from django.core.urlresolvers import reverse\n'), ((1475, 1551), 'simpleoncall.models.Alert.objects.filter', 'Alert.objects.filter', ([], {'team': 'request.team', 'date_added__range': 'date_added__range'}), '(team=request.team, date_added__range=date_added__range)\n', (1495, 1551), False, 'from simpleoncall.models import Alert, EventStatus, NotificationSetting, NotificationType\n'), ((1761, 1780), 'django.db.models.Count', 'Count', (['"""date_added"""'], {}), "('date_added')\n", (1766, 1780), False, 'from django.db.models import Count\n'), ((1634, 1710), 'simpleoncall.models.Alert.objects.filter', 'Alert.objects.filter', ([], {'team': 'request.team', 'date_added__range': 'date_added__range'}), '(team=request.team, date_added__range=date_added__range)\n', (1654, 1710), False, 'from simpleoncall.models import Alert, EventStatus, NotificationSetting, NotificationType\n'), ((12535, 12554), 'django.core.urlresolvers.reverse', 'reverse', (['"""schedule"""'], {}), "('schedule')\n", (12542, 12554), False, 'from django.core.urlresolvers import reverse\n')] |
from bs4 import BeautifulSoup
import requests
import os
""" This script download all songs with given genre from midiworld.com
"""
genre_name = input(
"type in genre name (lowercase, no space, no special characters): ")
# Just in case someone don't respect the rules.
genre_name = genre_name.lower()
genre_name = genre_name.strip()
genre_name = "".join(genre_name.split(" "))
folder = os.path.join("genresDataset", genre_name, "midiworld")
if not os.path.isdir(folder):
os.mkdir(folder)
#Here I was lazy, the biggest genre on that page has 38 pages so I've done it that way.
#If there is no page we will not get any answer, and just run the loop withouth doing anything.
for i in range(1, 38):
URL = f"https://www.midiworld.com/search/{i}/?q={genre_name}"
page = requests.get(URL)
soup = BeautifulSoup(page.content, "html.parser")
results = soup.find_all("li")
for r in results:
link = r.find("a")
if link:
if "download" in link:
link = link['href']
song_title = r.text.split("-")[0].strip()
print(f"Downloading: {song_title}")
song = requests.get(link)
with open(os.path.join(folder, song_title + ".mid"), "wb") as f:
f.write(song.content)
| [
"os.path.join",
"requests.get",
"bs4.BeautifulSoup",
"os.path.isdir",
"os.mkdir"
] | [((392, 446), 'os.path.join', 'os.path.join', (['"""genresDataset"""', 'genre_name', '"""midiworld"""'], {}), "('genresDataset', genre_name, 'midiworld')\n", (404, 446), False, 'import os\n'), ((454, 475), 'os.path.isdir', 'os.path.isdir', (['folder'], {}), '(folder)\n', (467, 475), False, 'import os\n'), ((481, 497), 'os.mkdir', 'os.mkdir', (['folder'], {}), '(folder)\n', (489, 497), False, 'import os\n'), ((783, 800), 'requests.get', 'requests.get', (['URL'], {}), '(URL)\n', (795, 800), False, 'import requests\n'), ((813, 855), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page.content', '"""html.parser"""'], {}), "(page.content, 'html.parser')\n", (826, 855), False, 'from bs4 import BeautifulSoup\n'), ((1160, 1178), 'requests.get', 'requests.get', (['link'], {}), '(link)\n', (1172, 1178), False, 'import requests\n'), ((1205, 1246), 'os.path.join', 'os.path.join', (['folder', "(song_title + '.mid')"], {}), "(folder, song_title + '.mid')\n", (1217, 1246), False, 'import os\n')] |
#!/usr/bin/env python
# coding: utf-8
__author__ = '<NAME>'
__copyright__ = 'Copyright 2017-2020, <NAME>'
__license__ = 'MIT'
__version__ = '0.5'
__email__ = '<EMAIL>'
__status__ = 'Development'
__description__ = 'Tkinter based GUI, visualizing PASCAL VOC object detection annotation'
"""
Changelog:
- 2020-06-16 11:39 v0.5
Support specifying ignore and not ignore class names. Better logger. Fix MacOS font.
- 2020-06-13 00:48 v0.4
API change: add class name mapping dict, mapping xml class name to shown class name.
Based on this, ImageNet2012 and self-defined VOC format style dataset labels can show.
Supported image extension: bmp, jpg, jpeg, png and their upper cases.
- 2020-06-09 23:14 v0.3
User select saving directory(optional) for picking up interested images.
By pressing left control button, selected image is saved.
- 2020-06-02 16:40 v0.2
User choose image and annotation folders separately. Better UI layout.
Colorful boxes and class name text.
- 2020-06-01 14:44 v0.1
Draw object class name. Add license. Polish meta info. Adjust UI.
- 2017.10.22 22:36 v0.0
Created project. Dependencies: Python, Tkinter(GUI), opencv(image processing),
lxml(annotation parsing).
You may need this: pip install --upgrade image pillow lxml numpy
"""
from PIL import Image, ImageTk, ImageFont, ImageDraw # pillow module
import os
import cv2
from lxml import etree
import numpy as np
import random
import colorsys
import shutil
import platform
import matplotlib.font_manager as fm # to create font
import six
import logging
from natsort import natsorted
import time
if six.PY3:
import tkinter as tk
from tkinter.filedialog import askdirectory
else:
import Tkinter as tk
from tkFileDialog import askdirectory
def draw_text(im, text, text_org, color=(0,0,255,0), font=None):
"""
Draw text on OpenCV's Image (ndarray)
Implemented by: ndarray -> pil's image -> draw text -> ndarray
Note: OpenCV puttext's drawback: font too large, no anti-alias, can't show Chinese chars
@param im: opencv loaded image
@param text: text(string) to be put. support Chinese
@param font: font, e.g. ImageFont.truetype('C:/Windows/Fonts/msyh.ttc', font_size)
Example Usage:
font_size = 20
font = ImageFont.truetype('C:/Windows/Fonts/msyh.ttc', font_size)
text_org = (256, 256)
im = draw_text(im, "object", text_org, font)
"""
im_pil = Image.fromarray(im)
draw = ImageDraw.Draw(im_pil)
draw.text(text_org, text, font=font, fill=color)
return np.array(im_pil)
class BndBox(object):
def __init__(self, x1=0, y1=0, x2=0, y2=0, cls_name=None):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.cls_name = cls_name # class name
class PascalVOC2007XML:
def __init__(self, xml_pth):
# TODO: validate xml_pth's content
self.tree = etree.parse(xml_pth)
self.boxes = []
def get_boxes(self):
if len(self.boxes) == 0:
for obj in self.tree.xpath('//object'):
box = BndBox()
for item in obj.getchildren():
if (item.tag=='name'):
box.cls_name = item.text
elif (item.tag=='bndbox'):
coords = [int(float(_.text)) for _ in item.getchildren()]
box.x1, box.y1, box.x2, box.y2 = coords
self.boxes.append(box)
return self.boxes
def get_color_table(num_cls=20):
hsv_tuples = [(x*1.0 / num_cls, 1., 1.) for x in range(num_cls)]
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors))
random.seed(42)
random.shuffle(colors)
random.seed(None)
return colors
class VOCViewer(tk.Tk):
def __init__(self, im_dir=None, anno_dir=None, save_dir=None, max_width=None, max_height=None, box_thick=1,
name_mapping=None, ignore_names=None, not_ignore_names=None):
"""
@param im_dir: the directory which contains images, e.g. "JPEGImages"
@param max_width: max image width when image is displayed
@param max_height: max image height when image is displayed
@param box_thick: thickness of bounding box
@param name_mapping: dict of: class name in XML => class name to be viewed
@param ignore_names: list of class names that will be ignored on viewer
@param not_ignore_names: list of all class names to be viewed
@note `ignore_names` and `not_ignore_names` shouldn't be setting at the same time
@note loading image: tk doesn't support directly load image. Pillow module is required as intermidiate stuff.
"""
#super().__init__() # not working for Python2
tk.Tk.__init__(self)
self.init_logger()
self.init_layout(im_dir, anno_dir, save_dir, max_width, max_height, box_thick)
self.init_dataset(name_mapping, ignore_names, not_ignore_names)
def init_logger(self):
logger = logging.getLogger()
logger.setLevel(logging.WARN)
formatter = logging.Formatter(
'%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
time_line = time.strftime('%Y%m%d%H%M', time.localtime(time.time()))
logfile = os.getcwd() + '/view-' + time_line + '.log'
# print to file via FileHandler
fh = logging.FileHandler(logfile)
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
# print to screen via StreamHandler
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
# add two Handler
logger.addHandler(ch)
logger.addHandler(fh)
self.logger = logger
def should_ignore(self, cls_name):
if self.ignore_names is not None:
if cls_name in self.ignore_names:
return True
else:
return False
if self.not_ignore_names is not None:
if cls_name in self.not_ignore_names:
return False
return True
return False
def init_dataset(self, name_mapping, ignore_names, not_ignore_names):
if (ignore_names is not None and not_ignore_names is not None):
self.logger.fatal("ignore_names and not_ignore_names can't be setting at the same time")
self.name_mapping = dict()
if name_mapping is not None:
self.name_mapping = name_mapping
self.ignore_names = None
if ignore_names is not None:
self.ignore_names = ignore_names
self.not_ignore_names = None
if not_ignore_names is not None:
self.not_ignore_names = not_ignore_names
self.color_table = get_color_table()
self.class_to_ind = dict()
for cls_name in self.name_mapping.keys():
next_ind = len(self.class_to_ind)
self.class_to_ind[cls_name] = next_ind
self.supported_im_ext = ['bmp', 'BMP', 'png', 'PNG',
'jpg', 'JPG', 'jpeg', 'JPEG', 'jpe', 'jif', 'jfif', 'jfi']
def get_color_by_cls_name(self, cls_name):
ind = self.class_to_ind[cls_name]
return self.color_table[ind]
def init_layout(self, im_dir, anno_dir, save_dir, max_width, max_height, box_thick):
# custom settings
self.max_width = max_width
self.max_height = max_height
self.box_thick = box_thick
self.bg = '#34373c'
self.fg = '#f2f2f2'
# MacOSX's tk is wired and I don't want tkmacosx
if platform.system()=='Darwin':
self.bg, self.fg = self.fg, self.bg
# set title, window size and background
self.title('ImageSet Viewer ' + __version__)
self.width = (int)(0.6 * self.winfo_screenwidth())
self.height = (int)(0.6 * self.winfo_screenheight())
self.geometry('%dx%d+200+100' % (self.width, self.height))
self.configure(bg=self.bg)
self.minsize(800, 600)
# Setting top level widget's row & column weight,
# children widgets won't stretch-and-fill-in until setting this weight
# ref: https://blog.csdn.net/acaic/article/details/80963688
self.rowconfigure(0,weight=1)
self.columnconfigure(0,weight=1)
# Top Level Layout: main_frame & side_frame
main_frame_width = (int)(0.8*self.width)
main_frame = tk.LabelFrame(self, bg=self.bg, width=main_frame_width)
main_frame.grid(row=0, column=0, padx=10, pady=10, sticky=tk.NSEW)
side_frame = tk.LabelFrame(self, bg=self.bg)
side_frame.grid(row=0, column=1, padx=10, pady=10, sticky=tk.NSEW)
# main_frame: directory_frame & image_frame
main_frame.rowconfigure(0, weight=20)
main_frame.rowconfigure(1, weight=80)
main_frame.columnconfigure(0, weight=1)
directory_frame = tk.LabelFrame(main_frame, bg=self.bg)
directory_frame.grid(row=0, column=0, sticky=tk.NSEW)
image_frame_height = (int)(0.7*self.height)
image_frame = tk.LabelFrame(main_frame, height=image_frame_height, bg=self.bg)
image_frame.grid(row=1, column=0, sticky=tk.NSEW)
# keep widgets size stay, instead of change when switching to another image
# ref: https://zhidao.baidu.com/question/1643979034294549180.html
image_frame.grid_propagate(0)
# image_frame
image_frame.rowconfigure(0, weight=1)
image_frame.columnconfigure(0, weight=1)
self.surface = self.get_surface_image() # Surface image
# self.surface = self.cv_to_tk(cv2.imread('surface.jpg')) # Use image file
self.image_label = tk.Label(image_frame, image=self.surface,
bg=self.bg, fg=self.fg,compound='center')
self.image_label.grid(row=0, column=0, sticky=tk.NSEW)
#self.image_label.bind('<Configure>', self.changeSize) #TODO
# side_frame
side_frame.rowconfigure(0, weight=5)
side_frame.rowconfigure(1, weight=95)
image_names_label = tk.Label(side_frame, text="Image Files", bg=self.bg, fg=self.fg)
image_names_label.grid(row=0, column=0)
self.scrollbar = tk.Scrollbar(side_frame, orient=tk.VERTICAL)
self.listbox = tk.Listbox(side_frame, yscrollcommand=self.scrollbar.set)
self.listbox.grid(row=1, column=0, sticky=tk.NS)
# directory_frame
directory_frame.rowconfigure(0, weight=5)
directory_frame.rowconfigure(1, weight=5)
directory_frame.rowconfigure(2, weight=5)
directory_frame.columnconfigure(0, weight=1)
directory_frame.columnconfigure(1, weight=9)
# im_dir button
choose_im_dir_btn = tk.Button(directory_frame, text='Image Directory',
command=self.select_image_directory, bg=self.bg, fg=self.fg)
choose_im_dir_btn.grid(row=0, column=0, sticky=tk.NSEW)
self.im_dir = tk.StringVar()
im_dir_entry = tk.Entry(directory_frame, text=self.im_dir, state='readonly')
im_dir_entry.grid(row=0, column=1, sticky=tk.NSEW)
self.im_names = []
if im_dir is not None:
self.im_dir.set(im_dir)
self.im_names = [_ for _ in os.listdir(self.im_dir.get())]
self.im_names = natsorted(self.im_names)
for im_name in self.im_names:
self.listbox.insert(tk.END, im_name)
self.listbox.bind('<<ListboxSelect>>', self.callback)
# more key binds see https://www.cnblogs.com/muziyunxuan/p/8297536.html
self.listbox.bind('<Control_L>', self.save_image)
self.scrollbar.config(command=self.listbox.yview)
self.scrollbar.grid(row=1, column=1, sticky=tk.NS)
# anno_dir button
choose_anno_dir_bn = tk.Button(directory_frame, text='Annotation Directory',
command=self.select_annotation_directory, bg=self.bg, fg=self.fg)
choose_anno_dir_bn.grid(row=1, column=0, sticky=tk.NSEW)
self.anno_dir = tk.StringVar()
anno_dir_entry = tk.Entry(directory_frame, text=self.anno_dir, state='readonly')
anno_dir_entry.grid(row=1, column=1, sticky=tk.NSEW)
if anno_dir is not None:
self.anno_dir.set(anno_dir)
# copy (save) dir button
choose_save_dir_btn = tk.Button(directory_frame, text='Copy Save Directory',
command=self.select_save_directory, bg=self.bg, fg=self.fg)
choose_save_dir_btn.grid(row=2, column=0, sticky=tk.NSEW)
self.save_dir = tk.StringVar()
save_dir_entry = tk.Entry(directory_frame, text=self.save_dir, state='readonly')
save_dir_entry.grid(row=2, column=1, sticky=tk.NSEW)
if save_dir is not None:
self.save_dir.set(save_dir)
def callback(self, event=None):
im_id = self.listbox.curselection()
if im_id:
im_id = im_id[0]
self.logger.info('im_id is {:d}'.format(im_id))
im_name = self.listbox.get(im_id)
im_ext = im_name.split('.')[-1]
if im_ext in self.supported_im_ext:
im_pth = os.path.join(self.im_dir.get(), im_name).replace('\\', '/')
self.tkim = self.get_tkim(im_pth)
self.image_label.configure(image=self.tkim)
#self.logger.debug(im_pth)
def save_image(self, event):
"""Save (copy) current displayed (original, no box) image to specified saving directory.
This is binding to left-control key now. Useful for manually picking up images.
"""
im_id = self.listbox.curselection()
if im_id:
im_name = self.listbox.get(im_id)
im_ext = im_name.split('.')[-1]
if im_ext in self.supported_im_ext:
im_pth = os.path.join(self.im_dir.get(), im_name).replace('\\', '/')
save_pth = os.path.join(self.save_dir.get(), im_name).replace('\\', '/')
shutil.copyfile(im_pth, save_pth)
self.logger.info('Save(copy) to {:s}'.format(save_pth))
#self.logger.debug(im_pth)
def get_tkim(self, im_pth):
"""
Load image and annotation, draw on image, and convert to image.
When necessary, image resizing is utilized.
"""
im = cv2.imread(im_pth)
self.logger.info('Image file is: {:s}'.format(im_pth))
im_ht, im_wt, im_dt = im.shape
if self.max_width is None or self.max_width >= im_wt:
show_width = im_wt
else:
show_width = self.max_width
if self.max_height is None or self.max_height >= im_ht:
show_height = im_ht
else:
show_height = self.max_height
scale_width = im_wt * 1.0 / show_width
scale_height = im_ht * 1.0 / show_height
if show_width!=im_wt or show_height!=im_ht:
im = cv2.resize(im, (show_width, show_height))
self.logger.info('doing resize, show_width={:d}, im_wt={:d}, show_height={:d}, im_ht={:d}'.format(show_width, im_wt, show_height, im_ht))
# xml_pth = im_pth.replace('JPEGImages', 'Annotations').replace('.jpg', '.xml').replace('.png', '.xml')
# We don't assume a standard PASCAL VOC dataset directory.
# User should choose image and annotation folder seperately.
im_head = '.'.join(im_pth.split('/')[-1].split('.')[:-1])
xml_pth = self.anno_dir.get() + '/' + im_head + '.xml'
if os.path.exists(xml_pth):
self.logger.info('XML annotation file is {:s}'.format(xml_pth))
boxes = self.parse_xml(xml_pth)
for box in boxes:
if self.should_ignore(box.cls_name): continue
if box.cls_name not in self.name_mapping.keys():
self.name_mapping[box.cls_name] = box.cls_name
next_ind = len(self.class_to_ind)
self.class_to_ind[box.cls_name] = next_ind
xmin = int(box.x1/scale_width)
ymin = int(box.y1/scale_height)
xmax = int(box.x2/scale_width)
ymax = int(box.y2/scale_height)
color = self.get_color_by_cls_name(box.cls_name)
cv2.rectangle(im, pt1=(xmin, ymin), pt2=(xmax, ymax),
color = color, thickness=self.box_thick)
font_size = 16
font = self.get_font(font_size)
tx = xmin
ty = ymin-20
if(ty<0):
ty = ymin+10
tx = xmin+10
text_org = (tx, ty)
show_text = self.name_mapping[box.cls_name]
self.logger.debug('box.cls_name is:' + box.cls_name)
self.logger.debug('show_text:' + show_text)
im = draw_text(im, show_text, text_org, color, font)
else:
self.logger.warning("XML annotation file {:s} doesn't exist".format(xml_pth))
return self.cv_to_tk(im)
@staticmethod
def cv_to_tk(im):
"""Convert OpenCV's (numpy) image to Tkinter-compatible photo image"""
im = im[:, :, ::-1] # bgr => rgb
return ImageTk.PhotoImage(Image.fromarray(im))
@staticmethod
def get_font(font_size):
font_pth = None
if platform.system()=='Windows':
font_pth = 'C:/Windows/Fonts/msyh.ttc'
elif (platform.system()=='Linux'):
font_pth = fm.findfont(fm.FontProperties(family='DejaVu Mono'))
else:
font_pth = '/Library/Fonts//Songti.ttc'
return ImageFont.truetype(font_pth, font_size)
def get_surface_image(self):
"""Return surface image, which is ImageTK type"""
im = np.ndarray((256, 256, 3), dtype=np.uint8)
for y in range(256):
for x in range(256):
im[y, x, :] = (60, 55, 52) # #34373c(RGB)'s BGR split
im = cv2.resize(im, ((int)(self.width*0.6), (int)(self.height*0.6)))
font_size = 30
font = self.get_font(font_size)
text_org = (self.width*0.16, self.height*0.26)
text = 'ImageSet Viewer'
im = draw_text(im, text, text_org, color=(255, 255, 255, 255), font=font)
return self.cv_to_tk(im)
def parse_xml(self, xml_pth):
anno = PascalVOC2007XML(xml_pth)
return anno.get_boxes()
def select_image_directory(self):
im_dir = askdirectory()
self.listbox.delete(0, len(self.im_names)-1) # delete all elements
self.fill_im_names(im_dir)
def select_annotation_directory(self):
anno_dir = askdirectory()
self.anno_dir.set(anno_dir) # TODO: validate anno_dir
def select_save_directory(self):
save_dir = askdirectory()
self.save_dir.set(save_dir) # the directory to save(copy) select images
def fill_im_names(self, im_dir):
if im_dir is not None:
self.im_dir.set(im_dir)
# Get natural order of image file names
self.im_names = [_ for _ in os.listdir(im_dir)]
self.im_names = natsorted(self.im_names)
for im_name in self.im_names:
self.listbox.insert(tk.END, im_name)
def example1():
"""The simplest example: don't specify any parameters.
Choose imd dir and xml dir in GUI
"""
app = VOCViewer()
app.mainloop()
def example2():
"""Specify directories & drawing related settings
"""
app = VOCViewer(im_dir = '/Users/chris/data/VOC2007/JPEGImages', # image directory
anno_dir = '/Users/chris/data/VOC2007/Annotations', # XML directory
save_dir = '/Users/chris/data/VOC2007/save', # Picking images saving directory
max_width = 1000, # max allowed shown image width is 1000
max_height = 800, # max allowed shown image height is 800
box_thick = 2, # bounding box thickness
)
app.mainloop()
def example3():
"""Specify name mapping
"""
# category mapping dict: key for class name in XML,
# value for shown class name in displayed image
# note: you can make key=val if it is understandable
voc_mapping = {
'__background__': '背景',
'aeroplane': '飞机',
'bicycle': '自行车',
'bird': '鸟',
'boat': '船',
'bottle': '瓶子',
'bus': '公交车',
'car': '汽车',
'cat': '猫',
'chair': '椅子',
'cow': '牛',
'diningtable': '餐桌',
'dog': '狗',
'horse': '马',
'motorbike': '摩托车',
'person': '人',
'pottedplant': '盆栽',
'sheep': '绵羊',
'sofa': '沙发',
'train': '火车',
'tvmonitor': '显示器'
}
app = VOCViewer(im_dir = '/Users/chris/data/VOC2007/JPEGImages', # image directory
anno_dir = '/Users/chris/data/VOC2007/Annotations', # XML directory
save_dir = '/Users/chris/data/VOC2007/save', # Picking images saving directory
max_width = 1000, # max allowed shown image width is 1000
max_height = 800, # max allowed shown image height is 800
box_thick = 2, # bounding box thickness
name_mapping = voc_mapping #!!
)
app.mainloop()
def example4():
"""Specify ignore_names / not_ignore_names
You can specify either ignore_names or not_ignore_names. But can't specify neither.
"""
app = VOCViewer(im_dir = '/Users/chris/data/VOC2007/JPEGImages', # image directory
anno_dir = '/Users/chris/data/VOC2007/Annotations', # XML directory
save_dir = '/Users/chris/data/VOC2007/save', # Picking images saving directory
max_width = 1000, # max allowed shown image width is 1000
max_height = 800, # max allowed shown image height is 800
box_thick = 2, # bounding box thickness
not_ignore_names = ['person']
)
app.mainloop()
def example5():
"""
Take ImageNet2012 as example. You can imitate this and
show your own PASCAL-VOC-Style-Labeled imageset
"""
fin = open('imagenet_cls_cn.txt', encoding='UTF-8')
lines = [_.strip() for _ in fin.readlines()]
fin.close()
ilsvrc2012_cls_dict = dict()
for item in lines:
item = item.split(' ')
digit_cls_name = item[0]
literal_cls_name = ' '.join(item[1:])
ilsvrc2012_cls_dict[digit_cls_name] = literal_cls_name
app = VOCViewer(im_dir = 'D:/data/ILSVRC2012/ILSVRC2012_img_train/n01440764', # image directory
anno_dir = 'D:/data/ILSVRC2012/ILSVRC2012_bbox_train_v2/n01440764', # XML directory
save_dir = None, # not specified saving direcotry
max_width = 1000, # max allowed shown image width is 1000
max_height = 800, # max allowed shown image height is 800
box_thick = 2, # bounding box thickness
name_mapping = ilsvrc2012_cls_dict
)
app.mainloop()
if __name__ == '__main__':
example1()
#example2()
#example3()
#example4()
#example5()
| [
"logging.getLogger",
"Tkinter.Entry",
"cv2.rectangle",
"logging.StreamHandler",
"colorsys.hsv_to_rgb",
"numpy.array",
"PIL.ImageDraw.Draw",
"Tkinter.Scrollbar",
"Tkinter.LabelFrame",
"Tkinter.Label",
"os.path.exists",
"os.listdir",
"Tkinter.Tk.__init__",
"Tkinter.Listbox",
"Tkinter.Strin... | [((2452, 2471), 'PIL.Image.fromarray', 'Image.fromarray', (['im'], {}), '(im)\n', (2467, 2471), False, 'from PIL import Image, ImageTk, ImageFont, ImageDraw\n'), ((2483, 2505), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['im_pil'], {}), '(im_pil)\n', (2497, 2505), False, 'from PIL import Image, ImageTk, ImageFont, ImageDraw\n'), ((2570, 2586), 'numpy.array', 'np.array', (['im_pil'], {}), '(im_pil)\n', (2578, 2586), True, 'import numpy as np\n'), ((3782, 3797), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (3793, 3797), False, 'import random\n'), ((3802, 3824), 'random.shuffle', 'random.shuffle', (['colors'], {}), '(colors)\n', (3816, 3824), False, 'import random\n'), ((3829, 3846), 'random.seed', 'random.seed', (['None'], {}), '(None)\n', (3840, 3846), False, 'import random\n'), ((2926, 2946), 'lxml.etree.parse', 'etree.parse', (['xml_pth'], {}), '(xml_pth)\n', (2937, 2946), False, 'from lxml import etree\n'), ((4870, 4890), 'Tkinter.Tk.__init__', 'tk.Tk.__init__', (['self'], {}), '(self)\n', (4884, 4890), True, 'import Tkinter as tk\n'), ((5124, 5143), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (5141, 5143), False, 'import logging\n'), ((5202, 5333), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s"""'], {'datefmt': '"""%Y-%m-%d %H:%M:%S"""'}), "(\n '%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n", (5219, 5333), False, 'import logging\n'), ((5544, 5572), 'logging.FileHandler', 'logging.FileHandler', (['logfile'], {}), '(logfile)\n', (5563, 5572), False, 'import logging\n'), ((5701, 5724), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (5722, 5724), False, 'import logging\n'), ((8579, 8634), 'Tkinter.LabelFrame', 'tk.LabelFrame', (['self'], {'bg': 'self.bg', 'width': 'main_frame_width'}), '(self, bg=self.bg, width=main_frame_width)\n', (8592, 8634), True, 'import Tkinter as tk\n'), ((8732, 8763), 'Tkinter.LabelFrame', 'tk.LabelFrame', (['self'], {'bg': 'self.bg'}), '(self, bg=self.bg)\n', (8745, 8763), True, 'import Tkinter as tk\n'), ((9059, 9096), 'Tkinter.LabelFrame', 'tk.LabelFrame', (['main_frame'], {'bg': 'self.bg'}), '(main_frame, bg=self.bg)\n', (9072, 9096), True, 'import Tkinter as tk\n'), ((9234, 9298), 'Tkinter.LabelFrame', 'tk.LabelFrame', (['main_frame'], {'height': 'image_frame_height', 'bg': 'self.bg'}), '(main_frame, height=image_frame_height, bg=self.bg)\n', (9247, 9298), True, 'import Tkinter as tk\n'), ((9845, 9934), 'Tkinter.Label', 'tk.Label', (['image_frame'], {'image': 'self.surface', 'bg': 'self.bg', 'fg': 'self.fg', 'compound': '"""center"""'}), "(image_frame, image=self.surface, bg=self.bg, fg=self.fg, compound=\n 'center')\n", (9853, 9934), True, 'import Tkinter as tk\n'), ((10229, 10293), 'Tkinter.Label', 'tk.Label', (['side_frame'], {'text': '"""Image Files"""', 'bg': 'self.bg', 'fg': 'self.fg'}), "(side_frame, text='Image Files', bg=self.bg, fg=self.fg)\n", (10237, 10293), True, 'import Tkinter as tk\n'), ((10368, 10412), 'Tkinter.Scrollbar', 'tk.Scrollbar', (['side_frame'], {'orient': 'tk.VERTICAL'}), '(side_frame, orient=tk.VERTICAL)\n', (10380, 10412), True, 'import Tkinter as tk\n'), ((10437, 10494), 'Tkinter.Listbox', 'tk.Listbox', (['side_frame'], {'yscrollcommand': 'self.scrollbar.set'}), '(side_frame, yscrollcommand=self.scrollbar.set)\n', (10447, 10494), True, 'import Tkinter as tk\n'), ((10888, 11004), 'Tkinter.Button', 'tk.Button', (['directory_frame'], {'text': '"""Image Directory"""', 'command': 'self.select_image_directory', 'bg': 'self.bg', 'fg': 'self.fg'}), "(directory_frame, text='Image Directory', command=self.\n select_image_directory, bg=self.bg, fg=self.fg)\n", (10897, 11004), True, 'import Tkinter as tk\n'), ((11099, 11113), 'Tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (11111, 11113), True, 'import Tkinter as tk\n'), ((11137, 11198), 'Tkinter.Entry', 'tk.Entry', (['directory_frame'], {'text': 'self.im_dir', 'state': '"""readonly"""'}), "(directory_frame, text=self.im_dir, state='readonly')\n", (11145, 11198), True, 'import Tkinter as tk\n'), ((11945, 12071), 'Tkinter.Button', 'tk.Button', (['directory_frame'], {'text': '"""Annotation Directory"""', 'command': 'self.select_annotation_directory', 'bg': 'self.bg', 'fg': 'self.fg'}), "(directory_frame, text='Annotation Directory', command=self.\n select_annotation_directory, bg=self.bg, fg=self.fg)\n", (11954, 12071), True, 'import Tkinter as tk\n'), ((12169, 12183), 'Tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (12181, 12183), True, 'import Tkinter as tk\n'), ((12209, 12272), 'Tkinter.Entry', 'tk.Entry', (['directory_frame'], {'text': 'self.anno_dir', 'state': '"""readonly"""'}), "(directory_frame, text=self.anno_dir, state='readonly')\n", (12217, 12272), True, 'import Tkinter as tk\n'), ((12472, 12591), 'Tkinter.Button', 'tk.Button', (['directory_frame'], {'text': '"""Copy Save Directory"""', 'command': 'self.select_save_directory', 'bg': 'self.bg', 'fg': 'self.fg'}), "(directory_frame, text='Copy Save Directory', command=self.\n select_save_directory, bg=self.bg, fg=self.fg)\n", (12481, 12591), True, 'import Tkinter as tk\n'), ((12690, 12704), 'Tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (12702, 12704), True, 'import Tkinter as tk\n'), ((12730, 12793), 'Tkinter.Entry', 'tk.Entry', (['directory_frame'], {'text': 'self.save_dir', 'state': '"""readonly"""'}), "(directory_frame, text=self.save_dir, state='readonly')\n", (12738, 12793), True, 'import Tkinter as tk\n'), ((14457, 14475), 'cv2.imread', 'cv2.imread', (['im_pth'], {}), '(im_pth)\n', (14467, 14475), False, 'import cv2\n'), ((15626, 15649), 'os.path.exists', 'os.path.exists', (['xml_pth'], {}), '(xml_pth)\n', (15640, 15649), False, 'import os\n'), ((17742, 17781), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['font_pth', 'font_size'], {}), '(font_pth, font_size)\n', (17760, 17781), False, 'from PIL import Image, ImageTk, ImageFont, ImageDraw\n'), ((17887, 17928), 'numpy.ndarray', 'np.ndarray', (['(256, 256, 3)'], {'dtype': 'np.uint8'}), '((256, 256, 3), dtype=np.uint8)\n', (17897, 17928), True, 'import numpy as np\n'), ((18571, 18585), 'tkFileDialog.askdirectory', 'askdirectory', ([], {}), '()\n', (18583, 18585), False, 'from tkFileDialog import askdirectory\n'), ((18759, 18773), 'tkFileDialog.askdirectory', 'askdirectory', ([], {}), '()\n', (18771, 18773), False, 'from tkFileDialog import askdirectory\n'), ((18893, 18907), 'tkFileDialog.askdirectory', 'askdirectory', ([], {}), '()\n', (18905, 18907), False, 'from tkFileDialog import askdirectory\n'), ((7739, 7756), 'platform.system', 'platform.system', ([], {}), '()\n', (7754, 7756), False, 'import platform\n'), ((11452, 11476), 'natsort.natsorted', 'natsorted', (['self.im_names'], {}), '(self.im_names)\n', (11461, 11476), False, 'from natsort import natsorted\n'), ((15045, 15086), 'cv2.resize', 'cv2.resize', (['im', '(show_width, show_height)'], {}), '(im, (show_width, show_height))\n', (15055, 15086), False, 'import cv2\n'), ((17357, 17376), 'PIL.Image.fromarray', 'Image.fromarray', (['im'], {}), '(im)\n', (17372, 17376), False, 'from PIL import Image, ImageTk, ImageFont, ImageDraw\n'), ((17461, 17478), 'platform.system', 'platform.system', ([], {}), '()\n', (17476, 17478), False, 'import platform\n'), ((19233, 19257), 'natsort.natsorted', 'natsorted', (['self.im_names'], {}), '(self.im_names)\n', (19242, 19257), False, 'from natsort import natsorted\n'), ((3646, 3669), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['*x'], {}), '(*x)\n', (3665, 3669), False, 'import colorsys\n'), ((5414, 5425), 'time.time', 'time.time', ([], {}), '()\n', (5423, 5425), False, 'import time\n'), ((14114, 14147), 'shutil.copyfile', 'shutil.copyfile', (['im_pth', 'save_pth'], {}), '(im_pth, save_pth)\n', (14129, 14147), False, 'import shutil\n'), ((16383, 16479), 'cv2.rectangle', 'cv2.rectangle', (['im'], {'pt1': '(xmin, ymin)', 'pt2': '(xmax, ymax)', 'color': 'color', 'thickness': 'self.box_thick'}), '(im, pt1=(xmin, ymin), pt2=(xmax, ymax), color=color,\n thickness=self.box_thick)\n', (16396, 16479), False, 'import cv2\n'), ((17556, 17573), 'platform.system', 'platform.system', ([], {}), '()\n', (17571, 17573), False, 'import platform\n'), ((5446, 5457), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5455, 5457), False, 'import os\n'), ((17620, 17659), 'matplotlib.font_manager.FontProperties', 'fm.FontProperties', ([], {'family': '"""DejaVu Mono"""'}), "(family='DejaVu Mono')\n", (17637, 17659), True, 'import matplotlib.font_manager as fm\n'), ((19185, 19203), 'os.listdir', 'os.listdir', (['im_dir'], {}), '(im_dir)\n', (19195, 19203), False, 'import os\n')] |
__author__ = 'dimd'
from twisted.python import log
from zope.interface import implementer
from NetCatKS.Logger.api.interfaces import ILogger
GLOBAL_DEBUG = True
@implementer(ILogger)
class Logger(object):
def __init__(self):
pass
def debug(self, msg):
if GLOBAL_DEBUG is True:
log.msg('[ ====== DEBUG ]: {}'.format(msg))
def info(self, msg):
log.msg('[ ++++++ INFO ]: {}'.format(msg))
def warning(self, msg):
log.msg('[ !!!!!! WARNING ]: {}'.format(msg))
def error(self, msg):
log.msg('[ ------ ERROR ]: {}'.format(msg))
def critical(self, msg):
log.msg('[ @@@@@@ CRITICAL ]: {}'.format(msg))
__all__ = [
'Logger'
] | [
"zope.interface.implementer"
] | [((167, 187), 'zope.interface.implementer', 'implementer', (['ILogger'], {}), '(ILogger)\n', (178, 187), False, 'from zope.interface import implementer\n')] |
from plenum.test.bls.helper import check_bls_multi_sig_after_send
from plenum.test.pool_transactions.conftest import looper, clientAndWallet1, \
client1, wallet1, client1Connected
nodeCount = 4
nodes_wth_bls = 0
def test_each_node_has_bls(txnPoolNodeSet):
for node in txnPoolNodeSet:
assert node.bls_bft
assert node.replicas[0]._bls_bft_replica
def test_send_txns_no_bls(looper, txnPoolNodeSet,
client1, client1Connected, wallet1):
check_bls_multi_sig_after_send(looper, txnPoolNodeSet,
client1, wallet1,
saved_multi_sigs_count=0)
| [
"plenum.test.bls.helper.check_bls_multi_sig_after_send"
] | [((491, 593), 'plenum.test.bls.helper.check_bls_multi_sig_after_send', 'check_bls_multi_sig_after_send', (['looper', 'txnPoolNodeSet', 'client1', 'wallet1'], {'saved_multi_sigs_count': '(0)'}), '(looper, txnPoolNodeSet, client1, wallet1,\n saved_multi_sigs_count=0)\n', (521, 593), False, 'from plenum.test.bls.helper import check_bls_multi_sig_after_send\n')] |
from __future__ import division, print_function
from .. import __version__
from ._global_imports import *
try:
import h5py
except ImportError:
print('Install h5py to enable signal caching.')
raise
class _Cache(object):
""" Cache numerical model objects computed during likelihood evaluation.
:param str filename:
Filename of cache.
:param str cache_dir:
Directory to write cache to.
:param bool read_only:
Do not write to cache file?
:param bool archive:
If not read-only, then archive an existing cache file found at the
same path?
"""
def __init__(self, filename, cache_dir='./',
read_only=False, archive=True):
if isinstance(filename, _six.string_types):
if filename[-3:] != '.h5':
self._filename = filename + '.h5'
else:
self._filename = filename
self._cache_dir = cache_dir
self._path = _os.path.join(self._cache_dir, self._filename)
self._read_only = read_only
self._archive_if_incompatible = archive
def __enter__(self):
return self
def __exit__(self, exc, exc_value, traceback):
if exc:
print('Encountered problem whilst caching:')
def _open(self, mode='r'):
""" Get the :mod:`h5py` context manager. """
if self._read_only and mode != 'r':
raise RuntimeError('The cache is in read-only mode.')
return h5py.File(self._path, mode)
def cache(self, data):
""" Cache the computational data. """
with self._open('r+') as f:
g = f['data']
for key, value in data.iteritems():
if isinstance(value, tuple) or isinstance(value, list):
if key not in g.keys():
shape = [f.attrs['n'], len(value)]
shape += [s for s in value[0].shape]
g.create_dataset(key, shape=shape, dtype='float64')
for j, v in enumerate(value):
g[key][self.i,j,...] = v
else:
if key not in g.keys():
shape = [f.attrs['n']] + [s for s in value.shape]
g.create_dataset(key, shape=shape, dtype='float64')
g[key][self.i,...] = value
self.i += 1
def reset_iterator(self):
""" Reset the counter for the cache iterator. """
self.i = 0
def __iter__(self):
self.reset_iterator()
return self
def __next__(self):
""" Read from the cache. """
cached = {}
with self._open('r') as f:
g = f['data']
for key in g.keys():
cached[key] = g[key][self.i,...]
self.i += 1
return cached
def next(self):
""" Python 2.x compatibility. """
return self.__next__()
@make_verbose('Checking whether an existing cache can be read:',
'Cache state determined')
def do_caching(self, samples, force=False):
""" Check whether a new cache is required or whether an exising
cache can be read without additional computation.
:return: Boolean indicating whether to read (``False``) or write.
"""
if force:
self._new(samples)
return True
try: # try reading file and checking keys
with self._open('r') as f:
if 'thetas' not in f.keys():
self._new(samples)
return True
except IOError: # create new cache file
self._new(samples)
return True
else: # can be read, so check if samples array are matching
if self._changed(samples):
self._new(samples)
return True
else:
return False
@make_verbose('Creating new cache file', 'Cache file created')
def _new(self, samples):
""" Prepare a new cache file. """
if not _os.path.isdir(self._cache_dir):
_os.mkdir(self._cache_dir)
if self._archive_if_incompatible:
try:
with self._open('r'):
pass
except IOError:
self._initialise(samples)
else:
self._archive()
self._initialise(samples)
else:
self._initialise(samples)
@make_verbose('Initialising cache file', 'Cache file initialised')
def _initialise(self, samples):
""" Initialise the cache. """
with self._open('w') as f:
f.attrs['version'] = __version__
f.attrs['n'] = samples.shape[0]
f.create_dataset('thetas', data=samples)
f.create_group('/data')
self.reset_iterator()
def _changed(self, samples):
""" Check whether software version or sample set has changed. """
with self._open('r') as f:
if f.attrs['version'] != __version__:
return True
if not _np.array_equal(f['thetas'], samples):
return True
return False
@make_verbose('Attempting to archive existing cache file in '
'a subdirectory')
def _archive(self):
""" Archive an existing cache file. """
# to archive the existing cache file
archive_dir = _os.path.join(self._cache_dir, 'archive')
try:
if not _os.path.isdir(archive_dir):
_os.mkdir(archive_dir)
except OSError:
yield ('Archiving failed... cache file %s will be '
'overwritten.' % self._filename)
yield
else:
yield 'Targeting subdirectory: %s.' % archive_dir
try:
from datetime import datetime
except ImportError:
yield ('Archiving failed... cache file %s will be '
'overwritten.' % self._filename)
yield
else:
name_archived = self._filename[:-3] + '__archive__'
name_archived += 'xpsi_version_%s__' % __version__
obj = datetime.now()
name_archived += 'datetime__%i.%i.%i__%i.%i.%i' % (obj.day,
obj.month,
obj.year,
obj.hour,
obj.minute,
obj.second)
try:
_os.rename(self._filename,
_os.path.join(archive_dir, name_archived + '.h5'))
except OSError:
yield ('Archiving failed... cache file %s will be '
'overwritten.' % self._filename)
else:
yield ('Exisiting cache file archived in '
'subdirectory %s.' % archive_dir)
yield None
| [
"datetime.datetime.now",
"h5py.File"
] | [((1493, 1520), 'h5py.File', 'h5py.File', (['self._path', 'mode'], {}), '(self._path, mode)\n', (1502, 1520), False, 'import h5py\n'), ((6214, 6228), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6226, 6228), False, 'from datetime import datetime\n')] |
from multiprocessing import Process, Manager
''' Custom Module Imports '''
from calculator.add import addition
from calculator.subtract import subtraction
from calculator.multiply import multiplication
from calculator.divide import division
class Main:
def __init__(self) -> None:
pass
def calculatorFunction(self):
ls = [[1,2],[3,4],[5,6]]
with Manager() as manager:
result = manager.dict()
for i in ls:
obj1 = addition(i[0],i[1], result)
obj2 = subtraction(i[0],i[1], result)
obj3 = multiplication(i[0],i[1], result)
obj4 = division(i[0],i[1], result)
p1 = Process(target=obj1.add)
p2 = Process(target=obj2.subtract)
p3 = Process(target=obj3.multiply)
p4 = Process(target=obj4.divide)
p = [p1,p2,p3,p4]
p1.start()
p2.start()
p3.start()
p4.start()
for procs in p:
procs.join()
print(result)
if __name__ == '__main__':
main = Main()
main.calculatorFunction() | [
"calculator.subtract.subtraction",
"calculator.multiply.multiplication",
"multiprocessing.Process",
"calculator.divide.division",
"calculator.add.addition",
"multiprocessing.Manager"
] | [((397, 406), 'multiprocessing.Manager', 'Manager', ([], {}), '()\n', (404, 406), False, 'from multiprocessing import Process, Manager\n'), ((506, 534), 'calculator.add.addition', 'addition', (['i[0]', 'i[1]', 'result'], {}), '(i[0], i[1], result)\n', (514, 534), False, 'from calculator.add import addition\n'), ((558, 589), 'calculator.subtract.subtraction', 'subtraction', (['i[0]', 'i[1]', 'result'], {}), '(i[0], i[1], result)\n', (569, 589), False, 'from calculator.subtract import subtraction\n'), ((613, 647), 'calculator.multiply.multiplication', 'multiplication', (['i[0]', 'i[1]', 'result'], {}), '(i[0], i[1], result)\n', (627, 647), False, 'from calculator.multiply import multiplication\n'), ((671, 699), 'calculator.divide.division', 'division', (['i[0]', 'i[1]', 'result'], {}), '(i[0], i[1], result)\n', (679, 699), False, 'from calculator.divide import division\n'), ((723, 747), 'multiprocessing.Process', 'Process', ([], {'target': 'obj1.add'}), '(target=obj1.add)\n', (730, 747), False, 'from multiprocessing import Process, Manager\n'), ((770, 799), 'multiprocessing.Process', 'Process', ([], {'target': 'obj2.subtract'}), '(target=obj2.subtract)\n', (777, 799), False, 'from multiprocessing import Process, Manager\n'), ((822, 851), 'multiprocessing.Process', 'Process', ([], {'target': 'obj3.multiply'}), '(target=obj3.multiply)\n', (829, 851), False, 'from multiprocessing import Process, Manager\n'), ((874, 901), 'multiprocessing.Process', 'Process', ([], {'target': 'obj4.divide'}), '(target=obj4.divide)\n', (881, 901), False, 'from multiprocessing import Process, Manager\n')] |
"""minor fixes
Revision ID: <KEY>
Revises: 0<PASSWORD>
Create Date: 2020-09-18 07:56:14.159782
"""
from alembic import op
import geoalchemy2
import sqlalchemy as sa
import backend
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '0fed690a57ce'
branch_labels = ()
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('task_harvesting',
sa.Column('id', sa.BigInteger(), nullable=False),
sa.Column('specific_product_id', sa.BigInteger(), nullable=False),
sa.ForeignKeyConstraint(['id'], ['task.task_id'], name=op.f('fk_task_harvesting_id_task'), onupdate='CASCADE', ondelete='CASCADE'),
sa.ForeignKeyConstraint(['specific_product_id'], ['specific_product.id'], name=op.f('fk_task_harvesting_specific_product_id_specific_product')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_task_harvesting')),
mysql_charset='utf8',
mysql_engine='InnoDB'
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('spatial_ref_sys',
sa.Column('srid', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column('auth_name', sa.VARCHAR(length=256), autoincrement=False, nullable=True),
sa.Column('auth_srid', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('srtext', sa.VARCHAR(length=2048), autoincrement=False, nullable=True),
sa.Column('proj4text', sa.VARCHAR(length=2048), autoincrement=False, nullable=True),
sa.CheckConstraint('(srid > 0) AND (srid <= 998999)', name='spatial_ref_sys_srid_check'),
sa.PrimaryKeyConstraint('srid', name='spatial_ref_sys_pkey')
)
op.drop_table('task_harvesting')
# ### end Alembic commands ###
| [
"alembic.op.drop_table",
"sqlalchemy.VARCHAR",
"alembic.op.f",
"sqlalchemy.PrimaryKeyConstraint",
"sqlalchemy.INTEGER",
"sqlalchemy.CheckConstraint",
"sqlalchemy.BigInteger"
] | [((1714, 1746), 'alembic.op.drop_table', 'op.drop_table', (['"""task_harvesting"""'], {}), "('task_harvesting')\n", (1727, 1746), False, 'from alembic import op\n'), ((1549, 1642), 'sqlalchemy.CheckConstraint', 'sa.CheckConstraint', (['"""(srid > 0) AND (srid <= 998999)"""'], {'name': '"""spatial_ref_sys_srid_check"""'}), "('(srid > 0) AND (srid <= 998999)', name=\n 'spatial_ref_sys_srid_check')\n", (1567, 1642), True, 'import sqlalchemy as sa\n'), ((1643, 1703), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""srid"""'], {'name': '"""spatial_ref_sys_pkey"""'}), "('srid', name='spatial_ref_sys_pkey')\n", (1666, 1703), True, 'import sqlalchemy as sa\n'), ((455, 470), 'sqlalchemy.BigInteger', 'sa.BigInteger', ([], {}), '()\n', (468, 470), True, 'import sqlalchemy as sa\n'), ((526, 541), 'sqlalchemy.BigInteger', 'sa.BigInteger', ([], {}), '()\n', (539, 541), True, 'import sqlalchemy as sa\n'), ((1152, 1164), 'sqlalchemy.INTEGER', 'sa.INTEGER', ([], {}), '()\n', (1162, 1164), True, 'import sqlalchemy as sa\n'), ((1231, 1253), 'sqlalchemy.VARCHAR', 'sa.VARCHAR', ([], {'length': '(256)'}), '(length=256)\n', (1241, 1253), True, 'import sqlalchemy as sa\n'), ((1319, 1331), 'sqlalchemy.INTEGER', 'sa.INTEGER', ([], {}), '()\n', (1329, 1331), True, 'import sqlalchemy as sa\n'), ((1394, 1417), 'sqlalchemy.VARCHAR', 'sa.VARCHAR', ([], {'length': '(2048)'}), '(length=2048)\n', (1404, 1417), True, 'import sqlalchemy as sa\n'), ((1483, 1506), 'sqlalchemy.VARCHAR', 'sa.VARCHAR', ([], {'length': '(2048)'}), '(length=2048)\n', (1493, 1506), True, 'import sqlalchemy as sa\n'), ((619, 653), 'alembic.op.f', 'op.f', (['"""fk_task_harvesting_id_task"""'], {}), "('fk_task_harvesting_id_task')\n", (623, 653), False, 'from alembic import op\n'), ((779, 842), 'alembic.op.f', 'op.f', (['"""fk_task_harvesting_specific_product_id_specific_product"""'], {}), "('fk_task_harvesting_specific_product_id_specific_product')\n", (783, 842), False, 'from alembic import op\n'), ((884, 910), 'alembic.op.f', 'op.f', (['"""pk_task_harvesting"""'], {}), "('pk_task_harvesting')\n", (888, 910), False, 'from alembic import op\n')] |
"""
Tests for Day 22
"""
from day22.module import part_1, part_2, \
FULL_INPUT_FILE, TEST_INPUT_FILE_1, TEST_INPUT_FILE_2, TEST_INPUT_FILE_3
def test_part_1_1():
result = part_1(TEST_INPUT_FILE_1)
assert result == 39
def test_part_1_2():
result = part_1(TEST_INPUT_FILE_2)
assert result == 590784
def test_part_1_3():
result = part_1(TEST_INPUT_FILE_3)
assert result == 474140
def test_part_1_full():
result = part_1(FULL_INPUT_FILE)
assert result == 546724
def test_part_2():
result = part_2(TEST_INPUT_FILE_3)
assert result == 2758514936282235
def test_part_2_full():
result = part_2(FULL_INPUT_FILE)
assert result == 1346544039176841
| [
"day22.module.part_2",
"day22.module.part_1"
] | [((182, 207), 'day22.module.part_1', 'part_1', (['TEST_INPUT_FILE_1'], {}), '(TEST_INPUT_FILE_1)\n', (188, 207), False, 'from day22.module import part_1, part_2, FULL_INPUT_FILE, TEST_INPUT_FILE_1, TEST_INPUT_FILE_2, TEST_INPUT_FILE_3\n'), ((268, 293), 'day22.module.part_1', 'part_1', (['TEST_INPUT_FILE_2'], {}), '(TEST_INPUT_FILE_2)\n', (274, 293), False, 'from day22.module import part_1, part_2, FULL_INPUT_FILE, TEST_INPUT_FILE_1, TEST_INPUT_FILE_2, TEST_INPUT_FILE_3\n'), ((358, 383), 'day22.module.part_1', 'part_1', (['TEST_INPUT_FILE_3'], {}), '(TEST_INPUT_FILE_3)\n', (364, 383), False, 'from day22.module import part_1, part_2, FULL_INPUT_FILE, TEST_INPUT_FILE_1, TEST_INPUT_FILE_2, TEST_INPUT_FILE_3\n'), ((451, 474), 'day22.module.part_1', 'part_1', (['FULL_INPUT_FILE'], {}), '(FULL_INPUT_FILE)\n', (457, 474), False, 'from day22.module import part_1, part_2, FULL_INPUT_FILE, TEST_INPUT_FILE_1, TEST_INPUT_FILE_2, TEST_INPUT_FILE_3\n'), ((537, 562), 'day22.module.part_2', 'part_2', (['TEST_INPUT_FILE_3'], {}), '(TEST_INPUT_FILE_3)\n', (543, 562), False, 'from day22.module import part_1, part_2, FULL_INPUT_FILE, TEST_INPUT_FILE_1, TEST_INPUT_FILE_2, TEST_INPUT_FILE_3\n'), ((640, 663), 'day22.module.part_2', 'part_2', (['FULL_INPUT_FILE'], {}), '(FULL_INPUT_FILE)\n', (646, 663), False, 'from day22.module import part_1, part_2, FULL_INPUT_FILE, TEST_INPUT_FILE_1, TEST_INPUT_FILE_2, TEST_INPUT_FILE_3\n')] |
# -*- coding: utf-8 -*-
# DO NOT EDIT THIS FILE!
# This file has been autogenerated by dephell <3
# https://github.com/dephell/dephell
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import os.path
readme = ""
here = os.path.abspath(os.path.dirname(__file__))
readme_path = os.path.join(here, "README.rst")
if os.path.exists(readme_path):
with open(readme_path, "rb") as stream:
readme = stream.read().decode("utf8")
setup(
long_description=readme,
name="lightbus",
version="1.1.0",
description="RPC & event framework for Python 3",
python_requires=">=3.7",
project_urls={
"documentation": "https://lightbus.org",
"homepage": "https://lightbus.org",
"repository": "https://github.com/adamcharnock/lightbus/",
},
author="<NAME>",
author_email="<EMAIL>",
keywords="python messaging redis bus queue",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: AsyncIO",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Programming Language :: Python :: 3",
"Topic :: System :: Networking",
"Topic :: Communications",
],
entry_points={
"console_scripts": ["lightbus = lightbus.commands:lightbus_entry_point"],
"lightbus_event_transports": [
"debug = lightbus:DebugEventTransport",
"redis = lightbus:RedisEventTransport",
],
"lightbus_plugins": [
"internal_metrics = lightbus.plugins.metrics:MetricsPlugin",
"internal_state = lightbus.plugins.state:StatePlugin",
],
"lightbus_result_transports": [
"debug = lightbus:DebugResultTransport",
"redis = lightbus:RedisResultTransport",
],
"lightbus_rpc_transports": [
"debug = lightbus:DebugRpcTransport",
"redis = lightbus:RedisRpcTransport",
],
"lightbus_schema_transports": [
"debug = lightbus:DebugSchemaTransport",
"redis = lightbus:RedisSchemaTransport",
],
},
packages=[
"lightbus",
"lightbus.client",
"lightbus.client.docks",
"lightbus.client.internal_messaging",
"lightbus.client.subclients",
"lightbus.commands",
"lightbus.config",
"lightbus.plugins",
"lightbus.schema",
"lightbus.serializers",
"lightbus.transports",
"lightbus.transports.redis",
"lightbus.utilities",
],
package_dir={"": "."},
package_data={},
install_requires=["aioredis>=1.2.0", "jsonschema>=3.2", "pyyaml>=3.12"],
)
| [
"distutils.core.setup"
] | [((483, 2453), 'distutils.core.setup', 'setup', ([], {'long_description': 'readme', 'name': '"""lightbus"""', 'version': '"""1.1.0"""', 'description': '"""RPC & event framework for Python 3"""', 'python_requires': '""">=3.7"""', 'project_urls': "{'documentation': 'https://lightbus.org', 'homepage':\n 'https://lightbus.org', 'repository':\n 'https://github.com/adamcharnock/lightbus/'}", 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'keywords': '"""python messaging redis bus queue"""', 'classifiers': "['Development Status :: 5 - Production/Stable', 'Framework :: AsyncIO',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Natural Language :: English', 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX', 'Programming Language :: Python :: 3',\n 'Topic :: System :: Networking', 'Topic :: Communications']", 'entry_points': "{'console_scripts': ['lightbus = lightbus.commands:lightbus_entry_point'],\n 'lightbus_event_transports': ['debug = lightbus:DebugEventTransport',\n 'redis = lightbus:RedisEventTransport'], 'lightbus_plugins': [\n 'internal_metrics = lightbus.plugins.metrics:MetricsPlugin',\n 'internal_state = lightbus.plugins.state:StatePlugin'],\n 'lightbus_result_transports': ['debug = lightbus:DebugResultTransport',\n 'redis = lightbus:RedisResultTransport'], 'lightbus_rpc_transports': [\n 'debug = lightbus:DebugRpcTransport',\n 'redis = lightbus:RedisRpcTransport'], 'lightbus_schema_transports': [\n 'debug = lightbus:DebugSchemaTransport',\n 'redis = lightbus:RedisSchemaTransport']}", 'packages': "['lightbus', 'lightbus.client', 'lightbus.client.docks',\n 'lightbus.client.internal_messaging', 'lightbus.client.subclients',\n 'lightbus.commands', 'lightbus.config', 'lightbus.plugins',\n 'lightbus.schema', 'lightbus.serializers', 'lightbus.transports',\n 'lightbus.transports.redis', 'lightbus.utilities']", 'package_dir': "{'': '.'}", 'package_data': '{}', 'install_requires': "['aioredis>=1.2.0', 'jsonschema>=3.2', 'pyyaml>=3.12']"}), "(long_description=readme, name='lightbus', version='1.1.0',\n description='RPC & event framework for Python 3', python_requires=\n '>=3.7', project_urls={'documentation': 'https://lightbus.org',\n 'homepage': 'https://lightbus.org', 'repository':\n 'https://github.com/adamcharnock/lightbus/'}, author='<NAME>',\n author_email='<EMAIL>', keywords='python messaging redis bus queue',\n classifiers=['Development Status :: 5 - Production/Stable',\n 'Framework :: AsyncIO', 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Natural Language :: English', 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX', 'Programming Language :: Python :: 3',\n 'Topic :: System :: Networking', 'Topic :: Communications'],\n entry_points={'console_scripts': [\n 'lightbus = lightbus.commands:lightbus_entry_point'],\n 'lightbus_event_transports': ['debug = lightbus:DebugEventTransport',\n 'redis = lightbus:RedisEventTransport'], 'lightbus_plugins': [\n 'internal_metrics = lightbus.plugins.metrics:MetricsPlugin',\n 'internal_state = lightbus.plugins.state:StatePlugin'],\n 'lightbus_result_transports': ['debug = lightbus:DebugResultTransport',\n 'redis = lightbus:RedisResultTransport'], 'lightbus_rpc_transports': [\n 'debug = lightbus:DebugRpcTransport',\n 'redis = lightbus:RedisRpcTransport'], 'lightbus_schema_transports': [\n 'debug = lightbus:DebugSchemaTransport',\n 'redis = lightbus:RedisSchemaTransport']}, packages=['lightbus',\n 'lightbus.client', 'lightbus.client.docks',\n 'lightbus.client.internal_messaging', 'lightbus.client.subclients',\n 'lightbus.commands', 'lightbus.config', 'lightbus.plugins',\n 'lightbus.schema', 'lightbus.serializers', 'lightbus.transports',\n 'lightbus.transports.redis', 'lightbus.utilities'], package_dir={'':\n '.'}, package_data={}, install_requires=['aioredis>=1.2.0',\n 'jsonschema>=3.2', 'pyyaml>=3.12'])\n", (488, 2453), False, 'from distutils.core import setup\n')] |
import ENVIRONMENT
from src.database.database_creation import createPlayerTrueSkillDictionary
from src.skill_algorithms.algorithms import trueSkillMatchWithRawNums, trueSkillTipWinProb
from src.skill_algorithms.common_data_processing import beforeMatchPredictions, runAlgoForSeason, runAlgoForAllSeasons
# backlogtodo optimize trueskill, glicko etc. for rapid iteration
# backlogtodo refactor equations here to be generic
def runTrueSkillForSeason(seasonCsv: str, winningBetThreshold: float= ENVIRONMENT.GLICKO_TIPOFF_ODDS_THRESHOLD, startFromBeginning=False):
runAlgoForSeason(seasonCsv, ENVIRONMENT.PLAYER_TRUESKILL_DICT_PATH, ENVIRONMENT.TS_PREDICTION_SUMMARIES_PATH,
trueSkillBeforeMatchPredictions, trueSkillUpdateDataSingleTipoff, winningBetThreshold,
columnAdds=['Home TS Mu', 'Away TS Mu', 'Home TS Sigma', 'Away TS Sigma', 'Home Lifetime Appearances',
'Away Lifetime Appearances', 'Home Tipper Wins', 'Away Tipper Wins', 'Home Tipper Losses', 'Away Tipper Losses'], startFromBeginning=startFromBeginning)
# backlogtodo setup odds prediction to use Ev or win prob rather than bet threshold
def trueSkillBeforeMatchPredictions(psd, homePlayerCode, awayPlayerCode, homeTeam, awayTeam, tipWinnerCode, scoringTeam, predictionArray=None, actualArray=None, histogramPredictionsDict=None,
winningBetThreshold=ENVIRONMENT.TS_TIPOFF_ODDS_THRESHOLD):
return beforeMatchPredictions(psd, homePlayerCode, awayPlayerCode, homeTeam, awayTeam, tipWinnerCode, scoringTeam, predictionArray=predictionArray, actualArray=actualArray, histogramPredictionsDict=histogramPredictionsDict, predictionSummaryPath=ENVIRONMENT.TS_PREDICTION_SUMMARIES_PATH,
minimumTipWinPercentage=winningBetThreshold, predictionFunction=trueSkillTipWinProb, minimumAppearances=ENVIRONMENT.MIN_TS_APPEARANCES)
def runTSForAllSeasons(seasons, winningBetThreshold=ENVIRONMENT.TS_TIPOFF_ODDS_THRESHOLD):
runAlgoForAllSeasons(seasons, ENVIRONMENT.PLAYER_TRUESKILL_DICT_PATH, ENVIRONMENT.TS_PREDICTION_SUMMARIES_PATH, trueSkillBeforeMatchPredictions, trueSkillUpdateDataSingleTipoff,
winningBetThreshold, columnAdds=['Home TS Mu', 'Away TS Mu', 'Home TS Sigma', 'Away TS Sigma', 'Home Lifetime Appearances',
'Away Lifetime Appearances', 'Home Tipper Wins', 'Away Tipper Wins', 'Home Tipper Losses', 'Away Tipper Losses'])
def trueSkillUpdateDataSingleTipoff(psd, winnerCode, loserCode, homePlayerCode, game_code=None):
if game_code:
print(game_code)
winnerCode = winnerCode[11:]
loserCode = loserCode[11:]
winnerOgMu = psd[winnerCode]["mu"]
winnerOgSigma = psd[winnerCode]["sigma"]
loserOgMu = psd[loserCode]["mu"]
loserOgSigma = psd[loserCode]["sigma"]
winnerMu, winnerSigma, loserMu, loserSigma = trueSkillMatchWithRawNums(psd[winnerCode]["mu"], psd[winnerCode]["sigma"], psd[loserCode]['mu'], psd[loserCode]["sigma"])
winnerWinCount = psd[winnerCode]["wins"] + 1
winnerAppearances = psd[winnerCode]["appearances"] + 1
loserLosses = psd[loserCode]["losses"] + 1
loserAppearances = psd[loserCode]["appearances"] + 1
psd[winnerCode]["wins"] = winnerWinCount
psd[winnerCode]["appearances"] = winnerAppearances
psd[loserCode]["losses"] = loserLosses
psd[loserCode]["appearances"] = loserAppearances
psd[winnerCode]["mu"] = winnerMu
psd[winnerCode]["sigma"] = winnerSigma
psd[loserCode]["mu"] = loserMu
psd[loserCode]["sigma"] = loserSigma
print('Winner:', winnerCode, 'trueskill increased', winnerMu - winnerOgMu, 'to', winnerMu, '. Sigma is now', winnerSigma, '. W:', winnerWinCount, 'L', winnerAppearances - winnerWinCount)
print('Loser:', loserCode, 'trueskill decreased', loserMu - loserOgMu, 'to', loserMu, '. Sigma is now', loserSigma, '. W:', loserAppearances - loserLosses, 'L', loserLosses)
# backlogtodo refactor repeated code out of algo methods
if homePlayerCode == winnerCode:
homeMu = winnerOgMu
homeSigma = winnerOgSigma
awayMu = loserOgMu
awaySigma = loserOgSigma
homeAppearances = winnerAppearances - 1
awayAppearances = loserAppearances - 1
homeWins = winnerWinCount - 1
homeLosses = psd[winnerCode]["losses"]
awayWins = psd[loserCode]["wins"]
awayLosses = loserLosses
elif homePlayerCode == loserCode:
homeMu = loserOgMu
homeSigma = loserOgSigma
awayMu = winnerOgMu
awaySigma = winnerOgSigma
awayAppearances = winnerAppearances
homeAppearances = loserAppearances
awayWins = winnerWinCount - 1
awayLosses = psd[winnerCode]["losses"]
homeWins = psd[loserCode]["wins"]
homeLosses = loserLosses
else:
raise ValueError('neither code matches')
return {"Home TS Mu": homeMu, "Home TS Sigma": homeSigma, "Away TS Mu": awayMu, "Away TS Sigma": awaySigma, "Home Lifetime Appearances": homeAppearances, "Away Lifetime Appearances": awayAppearances,
"Home Tipper Wins": homeWins, "Home Tipper Losses": homeLosses, "Away Tipper Wins": awayWins, "Away Tipper Losses": awayLosses}
def calculateTrueSkillDictionaryFromZero():
createPlayerTrueSkillDictionary() # clears the stored values,
runTSForAllSeasons(ENVIRONMENT.ALL_SEASONS_LIST, winningBetThreshold=ENVIRONMENT.TS_TIPOFF_ODDS_THRESHOLD)
print("\n", "trueskill dictionary updated for seasons", ENVIRONMENT.ALL_SEASONS_LIST, "\n")
def updateTrueSkillDictionaryFromLastGame():
runTrueSkillForSeason(ENVIRONMENT.CURRENT_SEASON_CSV, winningBetThreshold=ENVIRONMENT.TS_TIPOFF_ODDS_THRESHOLD, startFromBeginning=False)
print("\n", "trueskill dictionary updated from last game", "\n")
| [
"src.skill_algorithms.common_data_processing.beforeMatchPredictions",
"src.database.database_creation.createPlayerTrueSkillDictionary",
"src.skill_algorithms.algorithms.trueSkillMatchWithRawNums",
"src.skill_algorithms.common_data_processing.runAlgoForSeason",
"src.skill_algorithms.common_data_processing.ru... | [((568, 1049), 'src.skill_algorithms.common_data_processing.runAlgoForSeason', 'runAlgoForSeason', (['seasonCsv', 'ENVIRONMENT.PLAYER_TRUESKILL_DICT_PATH', 'ENVIRONMENT.TS_PREDICTION_SUMMARIES_PATH', 'trueSkillBeforeMatchPredictions', 'trueSkillUpdateDataSingleTipoff', 'winningBetThreshold'], {'columnAdds': "['Home TS Mu', 'Away TS Mu', 'Home TS Sigma', 'Away TS Sigma',\n 'Home Lifetime Appearances', 'Away Lifetime Appearances',\n 'Home Tipper Wins', 'Away Tipper Wins', 'Home Tipper Losses',\n 'Away Tipper Losses']", 'startFromBeginning': 'startFromBeginning'}), "(seasonCsv, ENVIRONMENT.PLAYER_TRUESKILL_DICT_PATH,\n ENVIRONMENT.TS_PREDICTION_SUMMARIES_PATH,\n trueSkillBeforeMatchPredictions, trueSkillUpdateDataSingleTipoff,\n winningBetThreshold, columnAdds=['Home TS Mu', 'Away TS Mu',\n 'Home TS Sigma', 'Away TS Sigma', 'Home Lifetime Appearances',\n 'Away Lifetime Appearances', 'Home Tipper Wins', 'Away Tipper Wins',\n 'Home Tipper Losses', 'Away Tipper Losses'], startFromBeginning=\n startFromBeginning)\n", (584, 1049), False, 'from src.skill_algorithms.common_data_processing import beforeMatchPredictions, runAlgoForSeason, runAlgoForAllSeasons\n'), ((1480, 1923), 'src.skill_algorithms.common_data_processing.beforeMatchPredictions', 'beforeMatchPredictions', (['psd', 'homePlayerCode', 'awayPlayerCode', 'homeTeam', 'awayTeam', 'tipWinnerCode', 'scoringTeam'], {'predictionArray': 'predictionArray', 'actualArray': 'actualArray', 'histogramPredictionsDict': 'histogramPredictionsDict', 'predictionSummaryPath': 'ENVIRONMENT.TS_PREDICTION_SUMMARIES_PATH', 'minimumTipWinPercentage': 'winningBetThreshold', 'predictionFunction': 'trueSkillTipWinProb', 'minimumAppearances': 'ENVIRONMENT.MIN_TS_APPEARANCES'}), '(psd, homePlayerCode, awayPlayerCode, homeTeam,\n awayTeam, tipWinnerCode, scoringTeam, predictionArray=predictionArray,\n actualArray=actualArray, histogramPredictionsDict=\n histogramPredictionsDict, predictionSummaryPath=ENVIRONMENT.\n TS_PREDICTION_SUMMARIES_PATH, minimumTipWinPercentage=\n winningBetThreshold, predictionFunction=trueSkillTipWinProb,\n minimumAppearances=ENVIRONMENT.MIN_TS_APPEARANCES)\n', (1502, 1923), False, 'from src.skill_algorithms.common_data_processing import beforeMatchPredictions, runAlgoForSeason, runAlgoForAllSeasons\n'), ((2027, 2466), 'src.skill_algorithms.common_data_processing.runAlgoForAllSeasons', 'runAlgoForAllSeasons', (['seasons', 'ENVIRONMENT.PLAYER_TRUESKILL_DICT_PATH', 'ENVIRONMENT.TS_PREDICTION_SUMMARIES_PATH', 'trueSkillBeforeMatchPredictions', 'trueSkillUpdateDataSingleTipoff', 'winningBetThreshold'], {'columnAdds': "['Home TS Mu', 'Away TS Mu', 'Home TS Sigma', 'Away TS Sigma',\n 'Home Lifetime Appearances', 'Away Lifetime Appearances',\n 'Home Tipper Wins', 'Away Tipper Wins', 'Home Tipper Losses',\n 'Away Tipper Losses']"}), "(seasons, ENVIRONMENT.PLAYER_TRUESKILL_DICT_PATH,\n ENVIRONMENT.TS_PREDICTION_SUMMARIES_PATH,\n trueSkillBeforeMatchPredictions, trueSkillUpdateDataSingleTipoff,\n winningBetThreshold, columnAdds=['Home TS Mu', 'Away TS Mu',\n 'Home TS Sigma', 'Away TS Sigma', 'Home Lifetime Appearances',\n 'Away Lifetime Appearances', 'Home Tipper Wins', 'Away Tipper Wins',\n 'Home Tipper Losses', 'Away Tipper Losses'])\n", (2047, 2466), False, 'from src.skill_algorithms.common_data_processing import beforeMatchPredictions, runAlgoForSeason, runAlgoForAllSeasons\n'), ((2920, 3045), 'src.skill_algorithms.algorithms.trueSkillMatchWithRawNums', 'trueSkillMatchWithRawNums', (["psd[winnerCode]['mu']", "psd[winnerCode]['sigma']", "psd[loserCode]['mu']", "psd[loserCode]['sigma']"], {}), "(psd[winnerCode]['mu'], psd[winnerCode]['sigma'],\n psd[loserCode]['mu'], psd[loserCode]['sigma'])\n", (2945, 3045), False, 'from src.skill_algorithms.algorithms import trueSkillMatchWithRawNums, trueSkillTipWinProb\n'), ((5313, 5346), 'src.database.database_creation.createPlayerTrueSkillDictionary', 'createPlayerTrueSkillDictionary', ([], {}), '()\n', (5344, 5346), False, 'from src.database.database_creation import createPlayerTrueSkillDictionary\n')] |
__author__ = 'multiangle'
# 这是实现 霍夫曼树相关的文件, 主要用于 针对层次softmax进行 word2vec 优化方案的一种
'''
至于 为什么要进行层次softmax 可以简单理解 因为词表很大 针对上完个类别单词进行softmax 计算量大 更新参数过多 无法训练,而采用softmax 层次化 只需要 计算几个有限单词的sigmod 就可以 更新参数也非常少
提高训练速度
什么是霍夫曼树 简单理解就是 将训练文本 进行词频统计 通过构建加权最短路径来构造二叉树 这样 词频高的 位置在前 词频低的位置在后 每一个 霍夫曼编码代表一个词 路径 并且是唯一 不是其他词的前缀
'''
import numpy as np
class HuffmanTreeNode():
def __init__(self,value,possibility):
# common part of leaf node and tree node
# 词频概率,训练文本出现的次数
self.possibility = possibility
# 左右子节点
self.left = None
self.right = None
# value of leaf node will be the word, and be
# mid vector in tree node
# 叶节点是学习的词向量 非叶子节点是中间变量 即 wx 与 xite
self.value = value # the value of word
# 存储霍夫曼码
self.Huffman = "" # store the huffman code
def __str__(self):
return 'HuffmanTreeNode object, value: {v}, possibility: {p}, Huffman: {h}'\
.format(v=self.value,p=self.possibility,h=self.Huffman)
class HuffmanTree():
def __init__(self, word_dict, vec_len=15000):
self.vec_len = vec_len # the length of word vector
self.root = None
# 所有词汇
word_dict_list = list(word_dict.values())
# 根据所有词汇信息 创建节点
node_list = [HuffmanTreeNode(x['word'],x['possibility']) for x in word_dict_list]
# 构建霍夫曼树
self.build_tree(node_list)
# self.build_CBT(node_list)
# 生成霍夫曼树的霍夫曼编码
self.generate_huffman_code(self.root, word_dict)
def build_tree(self,node_list):
# node_list.sort(key=lambda x:x.possibility,reverse=True)
# for i in range(node_list.__len__()-1)[::-1]:
# top_node = self.merge(node_list[i],node_list[i+1])
# node_list.insert(i,top_node)
# self.root = node_list[0]
while node_list.__len__()>1:
i1 = 0 # i1表示概率最小的节点
i2 = 1 # i2 概率第二小的节点
if node_list[i2].possibility < node_list[i1].possibility :
[i1,i2] = [i2,i1]
for i in range(2,node_list.__len__()): # 找到最小的两个节点
if node_list[i].possibility<node_list[i2].possibility :
i2 = i
if node_list[i2].possibility < node_list[i1].possibility :
[i1,i2] = [i2,i1]
#根据 叶节点1 和叶节点2 生成叶节点 也就是中间变量 其中 用来 存放xite
top_node = self.merge(node_list[i1],node_list[i2])
# 删除节点1 和节点2 将 新生成的非叶节点进行 加入 以进行后续 循环构建霍夫曼树
if i1<i2:
node_list.pop(i2)
node_list.pop(i1)
elif i1>i2:
node_list.pop(i1)
node_list.pop(i2)
else:
raise RuntimeError('i1 should not be equal to i2')
node_list.insert(0,top_node)
self.root = node_list[0]
def build_CBT(self,node_list): # build a complete binary tree
node_list.sort(key=lambda x:x.possibility,reverse=True)
node_num = node_list.__len__()
before_start = 0
while node_num>1 :
for i in range(node_num>>1):
top_node = self.merge(node_list[before_start+i*2],node_list[before_start+i*2+1])
node_list.append(top_node)
if node_num%2==1:
top_node = self.merge(node_list[before_start+i*2+2],node_list[-1])
node_list[-1] = top_node
before_start = before_start + node_num
node_num = node_num>>1
self.root = node_list[-1]
def generate_huffman_code(self, node, word_dict):
# # use recursion in this edition
# if node.left==None and node.right==None :
# word = node.value
# code = node.Huffman
# print(word,code)
# word_dict[word]['Huffman'] = code
# return -1
#
# code = node.Huffman
# if code==None:
# code = ""
# node.left.Huffman = code + "1"
# node.right.Huffman = code + "0"
# self.generate_huffman_code(node.left, word_dict)
# self.generate_huffman_code(node.right, word_dict)
# use stack butnot recursion in this edition
# 左子树 编码是1 右子树 编码是0 先左子树 在右字数 设置编码链
stack = [self.root]
while (stack.__len__()>0):
node = stack.pop()
# go along left tree
while node.left or node.right :
code = node.Huffman
node.left.Huffman = code + "1"
node.right.Huffman = code + "0"
stack.append(node.right)
node = node.left
word = node.value
code = node.Huffman
# print(word,'\t',code.__len__(),'\t',node.possibility)
word_dict[word]['Huffman'] = code
def merge(self,node1,node2):
# 新生成的非叶节点的词频是 俩个叶节点的加和
top_pos = node1.possibility + node2.possibility
# 将非叶节点向量进行初始化
top_node = HuffmanTreeNode(np.zeros([1,self.vec_len]), top_pos)
if node1.possibility >= node2.possibility :
top_node.left = node1
top_node.right = node2
else:
top_node.left = node2
top_node.right = node1
return top_node
| [
"numpy.zeros"
] | [((4947, 4974), 'numpy.zeros', 'np.zeros', (['[1, self.vec_len]'], {}), '([1, self.vec_len])\n', (4955, 4974), True, 'import numpy as np\n')] |
__all__ = [
"fit_gp",
"ft_gp",
"fit_lm",
"ft_lm",
"fit_rf",
"ft_rf",
"fit_kmeans",
"ft_kmeans",
]
## Fitting via sklearn package
try:
from sklearn.base import clone
from sklearn.linear_model import LinearRegression
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import Kernel, RBF, ConstantKernel as Con
from sklearn.cluster import KMeans
from sklearn.ensemble import RandomForestRegressor
except ModuleNotFoundError:
raise ModuleNotFoundError("module sklearn not found")
import grama as gr
from copy import deepcopy
from grama import add_pipe, pipe
from pandas import concat, DataFrame, Series
from toolz import curry
from warnings import filterwarnings
## Helper functions and classes
# --------------------------------------------------
def standardize_cols(df, ser_min, ser_max, var):
"""
@pre set(ser_min.index) == set(ser_max.index)
"""
df_std = df.copy()
for v in var:
den = ser_max[v] - ser_min[v]
if den < 1e-16:
den = 1
df_std[v] = (df_std[v] - ser_min[v]) / den
return df_std
def restore_cols(df, ser_min, ser_max, var):
"""
@pre set(ser_min.index) == set(ser_max.index)
"""
df_res = df.copy()
for v in var:
den = ser_max[v] - ser_min[v]
if den < 1e-16:
den = 1
df_res[v] = den * df[v] + ser_min[v]
return df_res
class FunctionGPR(gr.Function):
def __init__(self, gpr, var, out, name, runtime, var_min, var_max):
self.gpr = gpr
# self.df_train = df_train
self.var = var
## "Natural" outputs; what we're modeling
self.out_nat = out
## Predicted outputs; mean and std
self.out_mean = list(map(lambda s: s + "_mean", out))
self.out_sd = list(map(lambda s: s + "_sd", out))
self.out = self.out_mean + self.out_sd
self.name = name
self.runtime = runtime
self.var_min = var_min
self.var_max = var_max
def eval(self, df):
## Check invariant; model inputs must be subset of df columns
if not set(self.var).issubset(set(df.columns)):
raise ValueError(
"Model function `{}` var not a subset of given columns".format(
self.name
)
)
df_sd = standardize_cols(df, self.var_min, self.var_max, self.var)
y, y_sd = self.gpr.predict(df_sd[self.var], return_std=True)
return concat(
(
DataFrame(data=y, columns=self.out_mean),
DataFrame(data=y_sd, columns=self.out_sd),
),
axis=1,
)
def copy(self):
func_new = FunctionGPR(
self.gpr,
self.df_train.copy(),
self.var,
self.out_nat,
self.name,
self.runtime,
)
return func_new
class FunctionRegressor(gr.Function):
def __init__(self, regressor, var, out, name, runtime):
"""
Args:
regressor (scikit Regressor):
"""
self.regressor = regressor
self.var = var
self.out = list(map(lambda s: s + "_mean", out))
self.name = name
self.runtime = runtime
def eval(self, df):
## Check invariant; model inputs must be subset of df columns
if not set(self.var).issubset(set(df.columns)):
raise ValueError(
"Model function `{}` var not a subset of given columns".format(
self.name
)
)
## Predict
y = self.regressor.predict(df[self.var])
return DataFrame(data=y, columns=self.out)
## Fit GP model with sklearn
# --------------------------------------------------
@curry
def fit_gp(
df,
md=None,
var=None,
out=None,
domain=None,
density=None,
kernels=None,
seed=None,
suppress_warnings=True,
n_restart=5,
alpha=1e-10,
):
r"""Fit a gaussian process
Fit a gaussian process to given data. Specify var and out, or inherit from
an existing model.
Note that the new model will have two outputs `y_mean, y_sd` for each
original output `y`. The quantity `y_mean` is the best-fit value, while
`y_sd` is a measure of predictive uncertainty.
Args:
df (DataFrame): Data for function fitting
md (gr.Model): Model from which to inherit metadata
var (list(str) or None): List of features or None for all except outputs
out (list(str)): List of outputs to fit
domain (gr.Domain): Domain for new model
density (gr.Density): Density for new model
seed (int or None): Random seed for fitting process
kernels (sklearn.gaussian_process.kernels.Kernel or dict or None): Kernel for GP
n_restart (int): Restarts for optimization
alpha (float or iterable): Value added to diagonal of kernel matrix
suppress_warnings (bool): Suppress warnings when fitting?
Returns:
gr.Model: A grama model with fitted function(s)
Notes:
- Wrapper for sklearn.gaussian_process.GaussianProcessRegressor
"""
if suppress_warnings:
filterwarnings("ignore")
n_obs, n_in = df.shape
## Infer fitting metadata, if available
if not (md is None):
domain = md.domain
density = md.density
out = md.out
## Check invariants
if not set(out).issubset(set(df.columns)):
raise ValueError("out must be subset of df.columns")
## Default input value
if var is None:
var = list(set(df.columns).difference(set(out)))
## Check more invariants
set_inter = set(out).intersection(set(var))
if len(set_inter) > 0:
raise ValueError(
"out and var must be disjoint; intersect = {}".format(set_inter)
)
if not set(var).issubset(set(df.columns)):
raise ValueError("var must be subset of df.columns")
## Pre-process kernel selection
if kernels is None:
# Vectorize
kernels = {o: None for o in out}
elif isinstance(kernels, Kernel):
kernels = {o: kernels for o in out}
## Pre-process data
var_min = df[var].min()
var_max = df[var].max()
df_sd = standardize_cols(df, var_min, var_max, var)
## Construct gaussian process for each output
functions = []
for output in out:
# Define and fit model
gpr = GaussianProcessRegressor(
kernel=deepcopy(kernels[output]),
random_state=seed,
normalize_y=True,
copy_X_train=True,
n_restarts_optimizer=n_restart,
alpha=alpha,
)
gpr.fit(df_sd[var], df_sd[output])
name = "GP ({})".format(str(gpr.kernel_))
fun = FunctionGPR(gpr, var, [output], name, 0, var_min, var_max)
functions.append(fun)
## Construct model
return gr.Model(functions=functions, domain=domain, density=density)
ft_gp = add_pipe(fit_gp)
## Fit random forest model with sklearn
# --------------------------------------------------
@curry
def fit_rf(
df,
md=None,
var=None,
out=None,
domain=None,
density=None,
seed=None,
suppress_warnings=True,
**kwargs
):
r"""Fit a random forest
Fit a random forest to given data. Specify inputs and outputs, or inherit
from an existing model.
Args:
df (DataFrame): Data for function fitting
md (gr.Model): Model from which to inherit metadata
var (list(str) or None): List of features or None for all except outputs
out (list(str)): List of outputs to fit
domain (gr.Domain): Domain for new model
density (gr.Density): Density for new model
seed (int or None): Random seed for fitting process
suppress_warnings (bool): Suppress warnings when fitting?
Keyword Arguments:
n_estimators (int):
criterion (int):
max_depth (int or None):
min_samples_split (int, float):
min_samples_leaf (int, float):
min_weight_fraction_leaf (float):
max_features (int, float, string):
max_leaf_nodes (int or None):
min_impurity_decrease (float):
min_impurity_split (float):
bootstrap (bool):
oob_score (bool):
n_jobs (int or None):
random_state (int):
Returns:
gr.Model: A grama model with fitted function(s)
Notes:
- Wrapper for sklearn.ensemble.RandomForestRegressor
"""
if suppress_warnings:
filterwarnings("ignore")
n_obs, n_in = df.shape
## Infer fitting metadata, if available
if not (md is None):
domain = md.domain
density = md.density
out = md.out
## Check invariants
if not set(out).issubset(set(df.columns)):
raise ValueError("out must be subset of df.columns")
## Default input value
if var is None:
var = list(set(df.columns).difference(set(out)))
## Check more invariants
set_inter = set(out).intersection(set(var))
if len(set_inter) > 0:
raise ValueError(
"outputs and inputs must be disjoint; intersect = {}".format(set_inter)
)
if not set(var).issubset(set(df.columns)):
raise ValueError("var must be subset of df.columns")
## Construct gaussian process for each output
functions = []
for output in out:
rf = RandomForestRegressor(random_state=seed, **kwargs)
rf.fit(df[var], df[output])
name = "RF"
fun = FunctionRegressor(rf, var, [output], name, 0)
functions.append(fun)
## Construct model
return gr.Model(functions=functions, domain=domain, density=density)
ft_rf = add_pipe(fit_rf)
## Fit linear model with sklearn
# --------------------------------------------------
@curry
def fit_lm(
df,
md=None,
var=None,
out=None,
domain=None,
density=None,
seed=None,
suppress_warnings=True,
**kwargs
):
r"""Fit a linear model
Fit a linear model to given data. Specify inputs and outputs, or inherit
from an existing model.
Args:
df (DataFrame): Data for function fitting
md (gr.Model): Model from which to inherit metadata
var (list(str) or None): List of features or None for all except outputs
out (list(str)): List of outputs to fit
domain (gr.Domain): Domain for new model
density (gr.Density): Density for new model
seed (int or None): Random seed for fitting process
suppress_warnings (bool): Suppress warnings when fitting?
Returns:
gr.Model: A grama model with fitted function(s)
Notes:
- Wrapper for sklearn.ensemble.RandomForestRegressor
"""
if suppress_warnings:
filterwarnings("ignore")
n_obs, n_in = df.shape
## Infer fitting metadata, if available
if not (md is None):
domain = md.domain
density = md.density
out = md.out
## Check invariants
if not set(out).issubset(set(df.columns)):
raise ValueError("out must be subset of df.columns")
## Default input value
if var is None:
var = list(set(df.columns).difference(set(out)))
## Check more invariants
set_inter = set(out).intersection(set(var))
if len(set_inter) > 0:
raise ValueError(
"outputs and inputs must be disjoint; intersect = {}".format(set_inter)
)
if not set(var).issubset(set(df.columns)):
raise ValueError("var must be subset of df.columns")
## Construct gaussian process for each output
functions = []
for output in out:
lm = LinearRegression(**kwargs)
lm.fit(df[var], df[output])
name = "LM"
fun = FunctionRegressor(lm, var, [output], name, 0)
functions.append(fun)
## Construct model
return gr.Model(functions=functions, domain=domain, density=density)
ft_lm = add_pipe(fit_lm)
## Fit kmeans clustering model
# --------------------------------------------------
@curry
def fit_kmeans(df, var=None, colname="cluster_id", seed=None, **kwargs):
r"""K-means cluster a dataset
Create a cluster-labeling model on a dataset using the K-means algorithm.
Args:
df (DataFrame): Hybrid point results from gr.eval_hybrid()
var (list or None): Variables in df on which to cluster. Use None to
cluster on all variables.
colname (string): Name of cluster id; will be output in cluster model.
seed (int): Random seed for kmeans clustering
Kwargs:
n_clusters (int): Number of clusters to fit
random_state (int or None):
Returns:
gr.Model: Model that labels input data
Notes:
- A wrapper for sklearn.cluster.KMeans
References:
Scikit-learn: Machine Learning in Python, Pedregosa et al. JMLR 12, pp. 2825-2830, 2011.
Examples:
>>> import grama as gr
>>> from grama.data import df_stang
>>> from grama.fit import ft_kmeans
>>> X = gr.Intention()
>>> md_cluster = (
>>> df_stang
>>> >> ft_kmeans(var=["E", "mu"], n_clusters=2)
>>> )
>>> (
>>> md_cluster
>>> >> gr.ev_df(df_stang)
>>> >> gr.tf_group_by(X.cluster_id)
>>> >> gr.tf_summarize(
>>> thick_mean=gr.mean(X.thick),
>>> thick_sd=gr.sd(X.thick),
>>> n=gr.n(X.index),
>>> )
>>> )
"""
## Check invariants
if var is None:
var = list(df.columns).copy()
else:
var = list(var).copy()
diff = set(var).difference(set(df.columns))
if len(diff) > 0:
raise ValueError(
"`var` must be subset of `df.columns`\n" "diff = {}".format(diff)
)
## Generate clustering
kmeans = KMeans(random_state=seed, **kwargs).fit(df[var].values)
## Build grama model
def fun_cluster(df):
res = kmeans.predict(df[var].values)
return DataFrame(data={colname: res})
md = gr.Model() >> gr.cp_vec_function(fun=fun_cluster, var=var, out=[colname])
return md
ft_kmeans = add_pipe(fit_kmeans)
| [
"grama.add_pipe",
"sklearn.cluster.KMeans",
"sklearn.ensemble.RandomForestRegressor",
"grama.Model",
"sklearn.linear_model.LinearRegression",
"grama.cp_vec_function",
"copy.deepcopy",
"pandas.DataFrame",
"warnings.filterwarnings"
] | [((7062, 7078), 'grama.add_pipe', 'add_pipe', (['fit_gp'], {}), '(fit_gp)\n', (7070, 7078), False, 'from grama import add_pipe, pipe\n'), ((9811, 9827), 'grama.add_pipe', 'add_pipe', (['fit_rf'], {}), '(fit_rf)\n', (9819, 9827), False, 'from grama import add_pipe, pipe\n'), ((12030, 12046), 'grama.add_pipe', 'add_pipe', (['fit_lm'], {}), '(fit_lm)\n', (12038, 12046), False, 'from grama import add_pipe, pipe\n'), ((14292, 14312), 'grama.add_pipe', 'add_pipe', (['fit_kmeans'], {}), '(fit_kmeans)\n', (14300, 14312), False, 'from grama import add_pipe, pipe\n'), ((6990, 7051), 'grama.Model', 'gr.Model', ([], {'functions': 'functions', 'domain': 'domain', 'density': 'density'}), '(functions=functions, domain=domain, density=density)\n', (6998, 7051), True, 'import grama as gr\n'), ((9739, 9800), 'grama.Model', 'gr.Model', ([], {'functions': 'functions', 'domain': 'domain', 'density': 'density'}), '(functions=functions, domain=domain, density=density)\n', (9747, 9800), True, 'import grama as gr\n'), ((11958, 12019), 'grama.Model', 'gr.Model', ([], {'functions': 'functions', 'domain': 'domain', 'density': 'density'}), '(functions=functions, domain=domain, density=density)\n', (11966, 12019), True, 'import grama as gr\n'), ((3726, 3761), 'pandas.DataFrame', 'DataFrame', ([], {'data': 'y', 'columns': 'self.out'}), '(data=y, columns=self.out)\n', (3735, 3761), False, 'from pandas import concat, DataFrame, Series\n'), ((5273, 5297), 'warnings.filterwarnings', 'filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (5287, 5297), False, 'from warnings import filterwarnings\n'), ((8630, 8654), 'warnings.filterwarnings', 'filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (8644, 8654), False, 'from warnings import filterwarnings\n'), ((9506, 9556), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'random_state': 'seed'}), '(random_state=seed, **kwargs)\n', (9527, 9556), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((10873, 10897), 'warnings.filterwarnings', 'filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (10887, 10897), False, 'from warnings import filterwarnings\n'), ((11749, 11775), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '(**kwargs)\n', (11765, 11775), False, 'from sklearn.linear_model import LinearRegression\n'), ((14148, 14178), 'pandas.DataFrame', 'DataFrame', ([], {'data': '{colname: res}'}), '(data={colname: res})\n', (14157, 14178), False, 'from pandas import concat, DataFrame, Series\n'), ((14189, 14199), 'grama.Model', 'gr.Model', ([], {}), '()\n', (14197, 14199), True, 'import grama as gr\n'), ((14203, 14262), 'grama.cp_vec_function', 'gr.cp_vec_function', ([], {'fun': 'fun_cluster', 'var': 'var', 'out': '[colname]'}), '(fun=fun_cluster, var=var, out=[colname])\n', (14221, 14262), True, 'import grama as gr\n'), ((13981, 14016), 'sklearn.cluster.KMeans', 'KMeans', ([], {'random_state': 'seed'}), '(random_state=seed, **kwargs)\n', (13987, 14016), False, 'from sklearn.cluster import KMeans\n'), ((2580, 2620), 'pandas.DataFrame', 'DataFrame', ([], {'data': 'y', 'columns': 'self.out_mean'}), '(data=y, columns=self.out_mean)\n', (2589, 2620), False, 'from pandas import concat, DataFrame, Series\n'), ((2638, 2679), 'pandas.DataFrame', 'DataFrame', ([], {'data': 'y_sd', 'columns': 'self.out_sd'}), '(data=y_sd, columns=self.out_sd)\n', (2647, 2679), False, 'from pandas import concat, DataFrame, Series\n'), ((6560, 6585), 'copy.deepcopy', 'deepcopy', (['kernels[output]'], {}), '(kernels[output])\n', (6568, 6585), False, 'from copy import deepcopy\n')] |
from django.conf import settings
from django.test import TestCase
# Create your tests here.
class SettingsTest(TestCase):
def test_settings(self):
self.assertEqual(settings.SERVER, 'prodserver.com')
self.assertEqual(
settings.STATIC_URL, '/changed/in/settings.toml/by/dynaconf/')
self.assertEqual(settings.USERNAME, 'admin_user_from_env')
self.assertEqual(settings.PASSWORD, '<PASSWORD>')
self.assertEqual(settings.get('PASSWORD'), '<PASSWORD>')
self.assertEqual(settings.FOO, 'It overrides every other env')
with settings.using_env('development'):
self.assertEqual(settings.SERVER, 'devserver.com')
self.assertEqual(settings.PASSWORD, False)
self.assertEqual(settings.USERNAME, 'admin_user_from_env')
self.assertEqual(settings.FOO, 'It overrides every other env')
self.assertEqual(settings.SERVER, 'prodserver.com')
self.assertEqual(settings.PASSWORD, '<PASSWORD>')
self.assertEqual(settings.USERNAME, 'admin_user_from_env')
self.assertEqual(settings.FOO, 'It overrides every other env')
with settings.using_env('staging'):
self.assertEqual(settings.SERVER, 'stagingserver.com')
self.assertEqual(settings.PASSWORD, False)
self.assertEqual(settings.USERNAME, 'admin_user_from_env')
self.assertEqual(settings.FOO, 'It overrides every other env')
self.assertEqual(settings.SERVER, 'prodserver.com')
self.assertEqual(settings.PASSWORD, '<PASSWORD>')
self.assertEqual(settings.USERNAME, 'admin_user_from_env')
self.assertEqual(settings.FOO, 'It overrides every other env')
with settings.using_env('customenv'):
self.assertEqual(settings.SERVER, 'customserver.com')
self.assertEqual(settings.PASSWORD, False)
self.assertEqual(settings.USERNAME, 'admin_user_from_env')
self.assertEqual(settings.FOO, 'It overrides every other env')
self.assertEqual(settings.SERVER, 'prodserver.com')
self.assertEqual(settings.PASSWORD, '<PASSWORD>')
self.assertEqual(settings.USERNAME, 'admin_user_from_env')
self.assertEqual(settings.FOO, 'It overrides every other env')
| [
"django.conf.settings.get",
"django.conf.settings.using_env"
] | [((466, 490), 'django.conf.settings.get', 'settings.get', (['"""PASSWORD"""'], {}), "('PASSWORD')\n", (478, 490), False, 'from django.conf import settings\n'), ((591, 624), 'django.conf.settings.using_env', 'settings.using_env', (['"""development"""'], {}), "('development')\n", (609, 624), False, 'from django.conf import settings\n'), ((1161, 1190), 'django.conf.settings.using_env', 'settings.using_env', (['"""staging"""'], {}), "('staging')\n", (1179, 1190), False, 'from django.conf import settings\n'), ((1731, 1762), 'django.conf.settings.using_env', 'settings.using_env', (['"""customenv"""'], {}), "('customenv')\n", (1749, 1762), False, 'from django.conf import settings\n')] |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Downloads and prepares the Forest Covertype dataset."""
import gzip
import os
import shutil
import pandas as pd
from sklearn.model_selection import train_test_split
import wget
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/covtype/covtype.data.gz'
os.mkdir('./data')
filename = wget.download(url)
with gzip.open(filename, 'rb') as f_in:
with open('data/covtype.csv', 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
df = pd.read_csv('data/covtype.csv')
n_total = len(df)
# Train, val and test split follows
# <NAME>, <NAME>, <NAME>, and <NAME>.
# Xgboost: Scalable GPU accelerated learning. arXiv:1806.11248, 2018.
train_val_indices, test_indices = train_test_split(
range(n_total), test_size=0.2, random_state=0)
train_indices, val_indices = train_test_split(
train_val_indices, test_size=0.2 / 0.6, random_state=0)
traindf = df.iloc[train_indices]
valdf = df.iloc[val_indices]
testdf = df.iloc[test_indices]
traindf = traindf.sample(frac=1)
traindf.to_csv('data/train.csv', index=False, header=False)
valdf.to_csv('data/val.csv', index=False, header=False)
testdf.to_csv('data/test.csv', index=False, header=False)
| [
"wget.download",
"shutil.copyfileobj",
"pandas.read_csv",
"gzip.open",
"sklearn.model_selection.train_test_split",
"os.mkdir"
] | [((880, 898), 'os.mkdir', 'os.mkdir', (['"""./data"""'], {}), "('./data')\n", (888, 898), False, 'import os\n'), ((910, 928), 'wget.download', 'wget.download', (['url'], {}), '(url)\n', (923, 928), False, 'import wget\n'), ((1059, 1090), 'pandas.read_csv', 'pd.read_csv', (['"""data/covtype.csv"""'], {}), "('data/covtype.csv')\n", (1070, 1090), True, 'import pandas as pd\n'), ((1387, 1459), 'sklearn.model_selection.train_test_split', 'train_test_split', (['train_val_indices'], {'test_size': '(0.2 / 0.6)', 'random_state': '(0)'}), '(train_val_indices, test_size=0.2 / 0.6, random_state=0)\n', (1403, 1459), False, 'from sklearn.model_selection import train_test_split\n'), ((934, 959), 'gzip.open', 'gzip.open', (['filename', '"""rb"""'], {}), "(filename, 'rb')\n", (943, 959), False, 'import gzip\n'), ((1021, 1052), 'shutil.copyfileobj', 'shutil.copyfileobj', (['f_in', 'f_out'], {}), '(f_in, f_out)\n', (1039, 1052), False, 'import shutil\n')] |
import pytest
import datetime
from django.test import TestCase
from django.utils import timezone
from gamer_registration_system.con.models import Convention, Event, EventSchedule
# Create your tests here.
class EventScheduleModelTests(TestCase):
new_con = Convention(convention_name='Test Future Con')
new_event = Event(convention=new_con, title='Test Future Event')
def test_recent_event_with_future_start(self, new_con=new_con, new_event=new_event):
"""
recent_event() returns False for events whose start_date
is in the future.
"""
time = timezone.now() + datetime.timedelta(days=30)
future_eventsched = EventSchedule(convention=new_con, event=new_event, start_date=time)
self.assertIs(future_eventsched.recent_event(), False)
def test_recent_event_with_old_event(self, new_con=new_con, new_event=new_event):
"""
recent_event() returns False for events whose start_date is older than 1 day
"""
time = timezone.now() - datetime.timedelta(days=1, seconds=1)
old_event = EventSchedule(convention=new_con, event=new_event, start_date=time)
self.assertIs(old_event.recent_event(), False)
def test_recent_event_with_recent_question(self, new_con=new_con, new_event=new_event):
"""
recent_event() returns True for events whose start_date is within the last day
"""
time = timezone.now() - datetime.timedelta(hours=23, minutes=59, seconds=59)
recent_event = EventSchedule(convention=new_con, event=new_event, start_date=time)
self.assertIs(recent_event.recent_event(), True)
| [
"gamer_registration_system.con.models.Event",
"gamer_registration_system.con.models.EventSchedule",
"django.utils.timezone.now",
"gamer_registration_system.con.models.Convention",
"datetime.timedelta"
] | [((263, 308), 'gamer_registration_system.con.models.Convention', 'Convention', ([], {'convention_name': '"""Test Future Con"""'}), "(convention_name='Test Future Con')\n", (273, 308), False, 'from gamer_registration_system.con.models import Convention, Event, EventSchedule\n'), ((325, 377), 'gamer_registration_system.con.models.Event', 'Event', ([], {'convention': 'new_con', 'title': '"""Test Future Event"""'}), "(convention=new_con, title='Test Future Event')\n", (330, 377), False, 'from gamer_registration_system.con.models import Convention, Event, EventSchedule\n'), ((671, 738), 'gamer_registration_system.con.models.EventSchedule', 'EventSchedule', ([], {'convention': 'new_con', 'event': 'new_event', 'start_date': 'time'}), '(convention=new_con, event=new_event, start_date=time)\n', (684, 738), False, 'from gamer_registration_system.con.models import Convention, Event, EventSchedule\n'), ((1088, 1155), 'gamer_registration_system.con.models.EventSchedule', 'EventSchedule', ([], {'convention': 'new_con', 'event': 'new_event', 'start_date': 'time'}), '(convention=new_con, event=new_event, start_date=time)\n', (1101, 1155), False, 'from gamer_registration_system.con.models import Convention, Event, EventSchedule\n'), ((1523, 1590), 'gamer_registration_system.con.models.EventSchedule', 'EventSchedule', ([], {'convention': 'new_con', 'event': 'new_event', 'start_date': 'time'}), '(convention=new_con, event=new_event, start_date=time)\n', (1536, 1590), False, 'from gamer_registration_system.con.models import Convention, Event, EventSchedule\n'), ((598, 612), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (610, 612), False, 'from django.utils import timezone\n'), ((615, 642), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(30)'}), '(days=30)\n', (633, 642), False, 'import datetime\n'), ((1013, 1027), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1025, 1027), False, 'from django.utils import timezone\n'), ((1030, 1067), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)', 'seconds': '(1)'}), '(days=1, seconds=1)\n', (1048, 1067), False, 'import datetime\n'), ((1430, 1444), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1442, 1444), False, 'from django.utils import timezone\n'), ((1447, 1499), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(23)', 'minutes': '(59)', 'seconds': '(59)'}), '(hours=23, minutes=59, seconds=59)\n', (1465, 1499), False, 'import datetime\n')] |
#!/usr/bin/env python
import rospy
import tf
from jsk_recognition_msgs.msg import BoundingBox
class BoundingBoxToTf(object):
def __init__(self):
self.tf_frame = rospy.get_param('~tf_frame', 'bounding_box')
self.broadcaster = tf.TransformBroadcaster()
self.sub = rospy.Subscriber('~input', BoundingBox, self._cb)
def _cb(self, bbox):
pos = bbox.pose.position
ornt = bbox.pose.orientation
self.broadcaster.sendTransform((pos.x, pos.y, pos.z),
(ornt.x, ornt.y, ornt.z, ornt.w),
rospy.Time.now(),
self.tf_frame,
bbox.header.frame_id)
if __name__ == '__main__':
rospy.init_node(' bounding_box_to_tf')
app = BoundingBoxToTf()
rospy.spin()
| [
"tf.TransformBroadcaster",
"rospy.init_node",
"rospy.get_param",
"rospy.Time.now",
"rospy.spin",
"rospy.Subscriber"
] | [((783, 821), 'rospy.init_node', 'rospy.init_node', (['""" bounding_box_to_tf"""'], {}), "(' bounding_box_to_tf')\n", (798, 821), False, 'import rospy\n'), ((854, 866), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (864, 866), False, 'import rospy\n'), ((178, 222), 'rospy.get_param', 'rospy.get_param', (['"""~tf_frame"""', '"""bounding_box"""'], {}), "('~tf_frame', 'bounding_box')\n", (193, 222), False, 'import rospy\n'), ((251, 276), 'tf.TransformBroadcaster', 'tf.TransformBroadcaster', ([], {}), '()\n', (274, 276), False, 'import tf\n'), ((297, 346), 'rospy.Subscriber', 'rospy.Subscriber', (['"""~input"""', 'BoundingBox', 'self._cb'], {}), "('~input', BoundingBox, self._cb)\n", (313, 346), False, 'import rospy\n'), ((617, 633), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (631, 633), False, 'import rospy\n')] |
from defs_utils import *
import pdf2image
import os
def transforma_pdf_em_img_por_materia(materia, pdf_path=None):
searched = materia
if pdf_path:
list_files = list_dir(complete_name(searched, pre=pdf_path), True)
else:
list_files = list_dir(complete_name(searched), True)
volta = os.getcwd()
for file in list_files:
pages = pdf2image.convert_from_path(file)
print(file)
os.chdir(volta)
for e, page in enumerate(pages):
e_cont = e+1
dir_name = '../MATERIAS_CRIA_FILES'
dir_name += '\\'+searched+'\\'
dir_name += file.split('\\')[-1].split('-')[0]
for folder in dir_name.split('\\'):
try:
os.chdir(folder)
except (FileNotFoundError):
os.mkdir(folder)
os.chdir(folder)
os.chdir(volta)
real = '\\'.join(os.path.realpath(__file__).split('\\')[:-1])
page.save(f'{real}\\{dir_name}\\out-{e_cont}.jpg', 'JPEG')
print(dir_name)
| [
"os.getcwd",
"os.chdir",
"os.path.realpath",
"os.mkdir",
"pdf2image.convert_from_path"
] | [((315, 326), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (324, 326), False, 'import os\n'), ((372, 405), 'pdf2image.convert_from_path', 'pdf2image.convert_from_path', (['file'], {}), '(file)\n', (399, 405), False, 'import pdf2image\n'), ((434, 449), 'os.chdir', 'os.chdir', (['volta'], {}), '(volta)\n', (442, 449), False, 'import os\n'), ((904, 919), 'os.chdir', 'os.chdir', (['volta'], {}), '(volta)\n', (912, 919), False, 'import os\n'), ((757, 773), 'os.chdir', 'os.chdir', (['folder'], {}), '(folder)\n', (765, 773), False, 'import os\n'), ((838, 854), 'os.mkdir', 'os.mkdir', (['folder'], {}), '(folder)\n', (846, 854), False, 'import os\n'), ((875, 891), 'os.chdir', 'os.chdir', (['folder'], {}), '(folder)\n', (883, 891), False, 'import os\n'), ((950, 976), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (966, 976), False, 'import os\n')] |
# Copyright (c) 2017, Carnegie Mellon University. All rights reserved.
#
# Use of the K-NRM package is subject to the terms of the software license set
# forth in the LICENSE file included with this software, and also available at
# https://github.com/AdeDZY/K-NRM/blob/master/LICENSE
from setuptools import setup
from setuptools import find_packages
setup(name='knrm',
version='0',
description='knrm',
author='<NAME> and <NAME>',
install_requires=['numpy', 'traitlets', 'tensorflow'],
packages=find_packages()
)
| [
"setuptools.find_packages"
] | [((528, 543), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (541, 543), False, 'from setuptools import find_packages\n')] |
#Imports
import config
import telebot
from db_manager import SQL
import os
#Globals
restaurants = {"12345":"abc"}
current_restaurant_id = ""
current_restaurant_key = ""
couriers = {"12345":"abc"}
current_courier_id = ""
current_courier_key = ""
current_courier_altitude = 0
current_courier_longitude = 0
#DB + BOT CONNECTION
token = config.API_KEY
bot = telebot.TeleBot(token)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
db_path = os.path.join(BASE_DIR, "database.db")
db = SQL(db_path)
keyboard1 = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=True)
keyboard1.row('Клиент', 'Ресторан', "Курьер")
keyboard2 = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=True)
keyboard2.row("Сделать заказ", "Проверить статус заказа")
keyboard3 = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=True)
keyboard3.row("Да, досталяйте туда же", "Нет, я сейчас введу новые")
keyboard4 = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=True)
keyboard4.row("Москва", "Санкт-Петербург")
keyboard5 = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=True)
keyboard5.row("Да, сохраняйте", "Нет, спасибо.")
keyboard6 = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=True)
keyboard6.row("KFC", "McDonalds")
keyboard7 = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=True)
keyboard7.row("Получить список заказов", "Заказ отдан курьеру", "Что в заказе?")
keyboard8 = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=True)
keyboard8.row("Уже зарегистрированы", "Хотим подключиться")
keyboard9 = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=True)
keyboard9.row("Cэндвичи", "Бургеры")
keyboard10 = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=True)
keyboard10.row("Баскеты", "Твистеры")
keyboard11 = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=True)
keyboard11.row("Сандерс Баскет", "Баскет Дуэт")
keyboard11.row("Домашний Баскет", "Баскет L")
keyboard12 = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=True)
keyboard12.row("Да", "Нет")
keyboard13 = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=True)
keyboard13.row("Закончить оформление заказа", "Прлолжить оформление заказа")
keyboard14 = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=True)
keyboard14.row("Уже зарегистрирован", "Хочу подключиться")
keyboard15 = telebot.types.ReplyKeyboardMarkup(one_time_keyboard=True)
keyboard15.row("Готов принять заказ", "Передал заказ клиенту")
current_client = {"username": None, "name": None, "surname": None, "phone": None, "adress": None, "city": None}
foods = ""
current_client_for_restaurant = {"order_id": None}
@bot.message_handler(commands=['start'])
def start_message(message):
msg = bot.reply_to(message, 'Здравствуйте, скажите, пожалуйста, кто Вы?', reply_markup=keyboard1)
bot.register_next_step_handler(msg, process_client_1)
@bot.message_handler(content_types=['text'])
def process_client_1(message):
text = message.text
if text == "Клиент":
msg = bot.reply_to(message, 'Отлично, что бы Вы хотели сделать?', reply_markup=keyboard2)
bot.register_next_step_handler(msg, process_client_2)
if text == 'Ресторан':
msg = bot.reply_to(message, 'Отлично! Вы уже зарегистрированы в нашей системе?', reply_markup=keyboard8)
bot.register_next_step_handler(msg, process_restaurant_1)
if text == "Курьер":
msg = bot.reply_to(message, 'Отлично! Вы уже зарегистрированы в нашей системе?', reply_markup=keyboard14)
bot.register_next_step_handler(msg, process_courier_1)
else:
bot.send_message(message.chat.id, 'In development')
def process_courier_1(message):
if message.text == "Уже зарегистрирован":
msg = bot.reply_to(message, 'Введите, пожалуйста, Ваш ID курьера')
bot.register_next_step_handler(msg, process_courier_1_1)
if message.text == "Хочу подключиться":
bot.send_message(message.chat.id, "Напишите мне в Telegram на t.me/makarbaderko для подключения к нашей сети доставки")
def process_courier_1_1(message):
global current_courier_id
current_courier_id = message.text
msg = bot.reply_to(message, 'Введите, пожалуйста, Ваш key курьера')
bot.register_next_step_handler(msg, process_courier_1_2)
def process_courier_1_2(message):
global current_courier_id, current_courier_key
current_courier_key = message.text
if couriers[current_courier_id] == current_courier_key:
msg = bot.reply_to(message, 'Что бы Вы хотели сделать?', reply_markup=keyboard15)
bot.register_next_step_handler(msg, process_courier_2)
@bot.message_handler(content_types=['location'])
def process_courier_2(message):
if message.text == "Готов принять заказ":
data = db.get_random_order()
bot.send_message(message.chat.id, data)
msg = bot.reply_to(message, 'Что бы Вы хотели сделать?', reply_markup=keyboard15)
bot.register_next_step_handler(msg, process_courier_2)
if message.text == "Передал заказ клиенту":
msg = bot.reply_to(message, 'Введите, пожалуйста, номер переданного клиенту заказа?')
bot.register_next_step_handler(msg, process_courier_3)
def process_courier_3(message):
#Смена статуса заказа
db.update_status(message.text, "FINISHED")
bot.send_message(message.chat.id, "Статус заказа изменен на передан клиенту. Заказ в скором времени будет удален. Деньги будут перечислены Вам в течении 24-х часов")
msg = bot.reply_to(message, 'Что бы Вы хотели сделать?', reply_markup=keyboard15)
bot.register_next_step_handler(msg, process_courier_2)
def process_client_2(message):
if message.text == "Сделать заказ":
username = message.from_user.username
current_client["username"] = username
if db.user_exists(current_client["username"]) == True:
msg = bot.reply_to(message, 'Вы уже заказывали у нас, можем ли мы использовать данные с прошлого заказа?', reply_markup=keyboard3)
bot.register_next_step_handler(msg, process_client_3_yes)
else:
bot.send_message(message.chat.id, 'Сейчас мы попросим ввести Ваши данные, для нашей службы доставки.')
process_client_4(message)
else:
bot.send_message(message.chat.id, 'Поскольку в гашей системе пока нет зарегистрированных курьеров, поэтому статус заказа узнать нельзя.')
def process_client_3_yes(message):
if message.text == "Нет, я сейчас введу новые":
msg = bot.reply_to(message, 'Выберите, пожалуйста, Ваш город', reply_markup=keyboard4)
bot.register_next_step_handler(msg, process_client_4)
else:
process_client_7_1(message)
def process_client_4(message):
global current_client
current_client["city"] = message.text
msg = bot.reply_to(message, 'Введите, пожалуйста, Ваш адрес.')
bot.register_next_step_handler(msg, process_client_5)
def process_client_5(message):
global current_client
current_client["adress"] = message.text
msg = bot.reply_to(message, 'Введите, пожалуйста, Ваш номер телефона.')
bot.register_next_step_handler(msg, process_client_6)
def process_client_6(message):
global current_client
current_client ["phone"] = message.text
msg = bot.reply_to(message, 'Хотите ли Вы, чтобы мы сохранили Ваши данные у себя, для упрощения создания заказа Вами в будущем?', reply_markup=keyboard5)
bot.register_next_step_handler(msg, process_client_7)
def process_client_7(message):
if message.text == "Да, сохраняйте":
#db.add_user(username=current_client["username"], name=current_client["name"], surname=current_client["surname"], city=current_client["city"], phone=current_client["city"], adress=current_client["adress"])
msg = bot.reply_to(message, 'Где бы Вы хотели заказать еду?', reply_markup=keyboard6)
bot.register_next_step_handler(msg, process_client_8)
else:
msg = bot.reply_to(message, 'Где бы Вы хотели заказать еду?', reply_markup=keyboard6)
bot.register_next_step_handler(msg, process_client_8)
def process_client_7_1(message):
#db.add_user(username=current_client["username"], name=current_client["name"], surname=current_client["surname"], city=current_client["city"], phone=current_client["city"], adress=current_client["adress"])
msg = bot.reply_to(message, 'Где бы Вы хотели заказать еду?', reply_markup=keyboard6)
bot.register_next_step_handler(msg, process_client_8)
def process_client_8(message):
if message.text == "KFC":
msg = bot.reply_to(message, 'Что бы Вы хотели заказать?', reply_markup=keyboard10)
bot.register_next_step_handler(msg, process_client_8_1)
if message.text == "McDonalds":
msg = bot.reply_to(message, 'Что бы Вы хотели заказать?', reply_markup=keyboard9)
bot.register_next_step_handler(msg, process_client_8_2)
def process_client_8_1(message):
if message.text == "Баскеты":
msg = bot.reply_to(message, 'Что бы Вы хотели заказать?', reply_markup=keyboard11)
bot.register_next_step_handler(msg, process_client_9_1)
if message.text == "Твистеры":
pass
def process_client_8_2(message):
if message.text == "Cэндвичи":
pass
if message.text == "Бургеры":
pass
def process_client_9_1(message):
global foods
foods += str(message.text)
foods += " "
msg = bot.reply_to(message, 'Что-нибудь еще?', reply_markup=keyboard12)
bot.register_next_step_handler(msg, process_client_10)
def process_client_10(message):
if message.text == "Да":
msg = bot.reply_to(message, 'Что бы Вы хотели заказать?', reply_markup=keyboard10)
bot.register_next_step_handler(msg, process_client_8_1)
else:
msg = bot.reply_to(message, 'Закончим?', reply_markup=keyboard13)
bot.register_next_step_handler(msg, process_client_11)
def process_client_11(message):
bot.send_message(message.chat.id, "Заказ передан в службу доставки. Ожидайте. По всем вопросам звоните на +8 (800) 555-35-35")
def process_restaurant_1(message):
if message.text == "Уже зарегистрированы":
msg = bot.reply_to(message, 'Введите, пожалуйста, Ваш ID ресторана')
bot.register_next_step_handler(msg, process_restaurant_1_1)
if message.text == "Хотим подключиться":
bot.send_message(message.chat.id, "Напишите нам в Telegram на t.me/makarbaderko для подключения к нашей сети доставки")
def process_restaurant_1_1(message):
global current_restaurant_id
current_restaurant_id = message.text
msg = bot.reply_to(message, 'Введите, пожалуйста, Ваш key ресторана')
bot.register_next_step_handler(msg, process_restaurant_1_2)
def process_restaurant_1_2(message):
global current_restaurant_id, current_restaurant_key
current_restaurant_key = message.text
if restaurants[current_restaurant_id] == current_restaurant_key:
msg = bot.reply_to(message, 'Что бы Вы хотели сделать?', reply_markup=keyboard7)
bot.register_next_step_handler(msg, process_restaurant_2)
def process_restaurant_2(message):
if message.text == "Получить список заказов":
data = db.get_all_orders()
new_data = ""
for tupl in data:
new_data += f"Номер заказа: {tupl[0]} Состав заказа: {tupl[1]} Курьер: {tupl[2]}\n"
bot.send_message(message.chat.id, new_data)
msg = bot.reply_to(message, 'Что бы Вы хотели сделать?', reply_markup=keyboard7)
bot.register_next_step_handler(msg, process_restaurant_2)
if message.text == "Заказ отдан курьеру":
msg = bot.reply_to(message, 'Введите, пожалуйста, номер переданного заказа.')
bot.register_next_step_handler(msg, process_restaurant_2_1)
if message.text == "Что в заказе?":
msg = bot.reply_to(message, 'Введите, пожалуйста, номер заказа, по которму идет поиск')
bot.register_next_step_handler(msg, process_restaurant_2_2)
def process_restaurant_2_1(message):
text = int(message.text)
print(text)
db.update_status(text, "BEEN_DELIVERED")
bot.send_message(message.chat.id, "Статус заказа изменен на: <NAME>")
msg = bot.reply_to(message, 'Что бы Вы хотели сделать?', reply_markup=keyboard7)
bot.register_next_step_handler(msg, process_restaurant_2)
def process_restaurant_2_2(message):
data = db.get_food(message.text)
new_data = data[0][1]
bot.send_message(message.chat.id, new_data)
msg = bot.reply_to(message, 'Что бы Вы хотели сделать?', reply_markup=keyboard7)
bot.register_next_step_handler(msg, process_restaurant_2)
bot.polling() | [
"os.path.join",
"db_manager.SQL",
"telebot.types.ReplyKeyboardMarkup",
"os.path.abspath",
"telebot.TeleBot"
] | [((358, 380), 'telebot.TeleBot', 'telebot.TeleBot', (['token'], {}), '(token)\n', (373, 380), False, 'import telebot\n'), ((445, 482), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""database.db"""'], {}), "(BASE_DIR, 'database.db')\n", (457, 482), False, 'import os\n'), ((488, 500), 'db_manager.SQL', 'SQL', (['db_path'], {}), '(db_path)\n', (491, 500), False, 'from db_manager import SQL\n'), ((514, 571), 'telebot.types.ReplyKeyboardMarkup', 'telebot.types.ReplyKeyboardMarkup', ([], {'one_time_keyboard': '(True)'}), '(one_time_keyboard=True)\n', (547, 571), False, 'import telebot\n'), ((631, 688), 'telebot.types.ReplyKeyboardMarkup', 'telebot.types.ReplyKeyboardMarkup', ([], {'one_time_keyboard': '(True)'}), '(one_time_keyboard=True)\n', (664, 688), False, 'import telebot\n'), ((760, 817), 'telebot.types.ReplyKeyboardMarkup', 'telebot.types.ReplyKeyboardMarkup', ([], {'one_time_keyboard': '(True)'}), '(one_time_keyboard=True)\n', (793, 817), False, 'import telebot\n'), ((900, 957), 'telebot.types.ReplyKeyboardMarkup', 'telebot.types.ReplyKeyboardMarkup', ([], {'one_time_keyboard': '(True)'}), '(one_time_keyboard=True)\n', (933, 957), False, 'import telebot\n'), ((1014, 1071), 'telebot.types.ReplyKeyboardMarkup', 'telebot.types.ReplyKeyboardMarkup', ([], {'one_time_keyboard': '(True)'}), '(one_time_keyboard=True)\n', (1047, 1071), False, 'import telebot\n'), ((1134, 1191), 'telebot.types.ReplyKeyboardMarkup', 'telebot.types.ReplyKeyboardMarkup', ([], {'one_time_keyboard': '(True)'}), '(one_time_keyboard=True)\n', (1167, 1191), False, 'import telebot\n'), ((1239, 1296), 'telebot.types.ReplyKeyboardMarkup', 'telebot.types.ReplyKeyboardMarkup', ([], {'one_time_keyboard': '(True)'}), '(one_time_keyboard=True)\n', (1272, 1296), False, 'import telebot\n'), ((1391, 1448), 'telebot.types.ReplyKeyboardMarkup', 'telebot.types.ReplyKeyboardMarkup', ([], {'one_time_keyboard': '(True)'}), '(one_time_keyboard=True)\n', (1424, 1448), False, 'import telebot\n'), ((1522, 1579), 'telebot.types.ReplyKeyboardMarkup', 'telebot.types.ReplyKeyboardMarkup', ([], {'one_time_keyboard': '(True)'}), '(one_time_keyboard=True)\n', (1555, 1579), False, 'import telebot\n'), ((1631, 1688), 'telebot.types.ReplyKeyboardMarkup', 'telebot.types.ReplyKeyboardMarkup', ([], {'one_time_keyboard': '(True)'}), '(one_time_keyboard=True)\n', (1664, 1688), False, 'import telebot\n'), ((1741, 1798), 'telebot.types.ReplyKeyboardMarkup', 'telebot.types.ReplyKeyboardMarkup', ([], {'one_time_keyboard': '(True)'}), '(one_time_keyboard=True)\n', (1774, 1798), False, 'import telebot\n'), ((1907, 1964), 'telebot.types.ReplyKeyboardMarkup', 'telebot.types.ReplyKeyboardMarkup', ([], {'one_time_keyboard': '(True)'}), '(one_time_keyboard=True)\n', (1940, 1964), False, 'import telebot\n'), ((2007, 2064), 'telebot.types.ReplyKeyboardMarkup', 'telebot.types.ReplyKeyboardMarkup', ([], {'one_time_keyboard': '(True)'}), '(one_time_keyboard=True)\n', (2040, 2064), False, 'import telebot\n'), ((2156, 2213), 'telebot.types.ReplyKeyboardMarkup', 'telebot.types.ReplyKeyboardMarkup', ([], {'one_time_keyboard': '(True)'}), '(one_time_keyboard=True)\n', (2189, 2213), False, 'import telebot\n'), ((2287, 2344), 'telebot.types.ReplyKeyboardMarkup', 'telebot.types.ReplyKeyboardMarkup', ([], {'one_time_keyboard': '(True)'}), '(one_time_keyboard=True)\n', (2320, 2344), False, 'import telebot\n'), ((408, 433), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (423, 433), False, 'import os\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-10 20:53
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("bdiadmin", "0013_auto_20170319_1415"),
("stock", "0019_stockproductprov"),
]
operations = [
migrations.CreateModel(
name="StockProductCDS",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("year", models.PositiveIntegerField(default=2017)),
("week", models.CharField(max_length=3)),
("product", models.CharField(max_length=50)),
("quantity", models.FloatField(default=0.0)),
(
"cds",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="bdiadmin.CDS"
),
),
],
),
migrations.CreateModel(
name="StockProductDis",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("year", models.PositiveIntegerField(default=2017)),
("week", models.CharField(max_length=3)),
("product", models.CharField(max_length=50)),
("quantity", models.FloatField(default=0.0)),
(
"district",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="bdiadmin.District",
),
),
],
),
]
| [
"django.db.models.FloatField",
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.models.PositiveIntegerField",
"django.db.models.CharField"
] | [((523, 616), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (539, 616), False, 'from django.db import migrations, models\n'), ((777, 818), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(2017)'}), '(default=2017)\n', (804, 818), False, 'from django.db import migrations, models\n'), ((846, 876), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(3)'}), '(max_length=3)\n', (862, 876), False, 'from django.db import migrations, models\n'), ((907, 938), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (923, 938), False, 'from django.db import migrations, models\n'), ((970, 1000), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0.0)'}), '(default=0.0)\n', (987, 1000), False, 'from django.db import migrations, models\n'), ((1068, 1154), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""bdiadmin.CDS"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'bdiadmin.CDS')\n", (1085, 1154), False, 'from django.db import migrations, models\n'), ((1395, 1488), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1411, 1488), False, 'from django.db import migrations, models\n'), ((1649, 1690), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'default': '(2017)'}), '(default=2017)\n', (1676, 1690), False, 'from django.db import migrations, models\n'), ((1718, 1748), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(3)'}), '(max_length=3)\n', (1734, 1748), False, 'from django.db import migrations, models\n'), ((1779, 1810), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (1795, 1810), False, 'from django.db import migrations, models\n'), ((1842, 1872), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0.0)'}), '(default=0.0)\n', (1859, 1872), False, 'from django.db import migrations, models\n'), ((1945, 2036), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""bdiadmin.District"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'bdiadmin.District')\n", (1962, 2036), False, 'from django.db import migrations, models\n')] |
# -*- coding: UTF-8 -*-
"""
Source processing routines
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import warnings
from collections import OrderedDict
from astropy.cosmology import default_cosmology
import numpy as np
import os
import pysynphot as S
import astropy.table as at
from . import io
from . import passband
def load_source(sourcenames):
"""
Loads sources
Parameters
----------
sourcenames : array-like
The source names. Passed to :py:func:`source_synphot.io.read_source`
Returns
-------
sources : dict
The dictionary of source spectra
See Also
--------
:py:func:`source_synphot.io.read_source`
"""
sources = OrderedDict()
if np.isscalar(sourcenames):
sourcenames = np.array(sourcenames, ndmin=1)
else:
sourcenames = np.array(sourcenames).flatten()
nsource = len(sourcenames)
for source in sourcenames:
try:
thissource = io.read_source(source)
except Exception as e:
message = 'Source {} not loaded'.format(source)
warnings.warn(message, RuntimeWarning)
continue
sources[source] = thissource
return sources
def pre_process_source(source, sourcemag, sourcepb, sourcez, smooth=True):
"""
Pre-process a source at some redshift ``sourcez`` back to the rest-frame
and normalize it to have magnitude ``sourcemag`` in passband ``sourcepb``
Parameters
----------
sourcespec : str
The source spectrum filename
sourcemag : float
The magnitude of the source spectrum in passband ``sourcepb``
sourcepb : :py:class:`pysynphot.spectrum.ArraySpectralElement`
The passband in which `source` has magnitude ``sourcemag``
sourcez : float
The redshift of `source`
smooth : bool, optional
Smooth the spectrum (default: True)
Returns
-------
source : :py:class:`pysynphot.ArraySpectrum`
The de-redshifted, normalized and optionally smoothed spectrum
See Also
--------
:py:func:`astropy.table.Table.read`
"""
inspec = None
inspecz = np.nan
inspecmag = np.nan
inspecpb = None
source_table_file = os.path.join('sources', 'sourcetable.txt')
source_table_file = io.get_pkgfile(source_table_file)
source_table = at.Table.read(source_table_file, format='ascii')
ind = (source_table['specname'] == source)
nmatch = len(source_table['specname'][ind])
if nmatch == 1:
# load the file and the info
inspec = source_table['specname'][ind][0]
inspecz = source_table['redshift'][ind][0]
inspecmag = source_table['g'][ind][0] # for now, just normalize the g-band mag
elif nmatch == 0:
message = 'Spectrum {} not listed in lookup table'.format(source)
pass
else:
message = 'Spectrum {} not uniquely listed in lookup table'.format(source)
pass
if inspec is None:
warnings.warn(message, RuntimeWarning)
inspec = source
inspecz = sourcez
inspecmag = sourcemag
inspecpb = sourcepb
if not os.path.exists(inspec):
message = 'Spectrum {} could not be found'.format(inspec)
raise ValueError(message)
try:
spec = at.Table.read(inspec, names=('wave','flux'), format='ascii')
except Exception as e:
message = 'Could not read file {}'.format(source)
raise ValueError(message)
if hasattr(inspecpb,'wave') and hasattr(inspecpb, 'throughput'):
pass
else:
pbs = passband.load_pbs([inspecpb], 0.)
try:
inspecpb = pbs[inspecpb][0]
except KeyError as e:
message = 'Could not load passband {}'.format(inspecpb)
raise RuntimeError(message)
try:
inspecmag = float(inspecmag)
except (TypeError, ValueError) as e:
message = 'Source magnitude {} could not be interpreted as a float'.format(inspecmag)
raise ValueError(message)
try:
inspecz = float(inspecz)
except (TypeError, ValueError) as e:
message = 'Source redshift {} could not be interpreted as a float'.format(inspecz)
raise ValueError(message)
if inspecz < 0 :
message = 'Source must have positive definite cosmological redshift'
raise ValueError(message)
inspec = S.ArraySpectrum(spec['wave'], spec['flux'], fluxunits='flam')
try:
inspec = inspec.renorm(sourcemag, 'ABmag', inspecpb)
inspec.convert('flam')
except Exception as e:
message = 'Could not renormalize spectrum {}'.format(inspec)
raise RuntimeError(message)
if inspecz > 0:
zblue = 1./(1+inspecz) - 1.
inspec_rest = inspec.redshift(zblue)
inspec_rest.convert('flam')
c = default_cosmology.get()
mu = c.distmod(inspecz)
out = inspec_rest*(10.**(0.4*mu.value))
else:
out = inspec
# TODO renorm is basic and just calculates dmag = RNval - what the original spectrum's mag is
# and renormalizes - there's some sanity checking for overlaps
# we can do this without using it and relying on the .passband routines
return out
| [
"os.path.exists",
"collections.OrderedDict",
"numpy.isscalar",
"os.path.join",
"pysynphot.ArraySpectrum",
"numpy.array",
"astropy.cosmology.default_cosmology.get",
"warnings.warn",
"astropy.table.Table.read"
] | [((732, 745), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (743, 745), False, 'from collections import OrderedDict\n'), ((753, 777), 'numpy.isscalar', 'np.isscalar', (['sourcenames'], {}), '(sourcenames)\n', (764, 777), True, 'import numpy as np\n'), ((2274, 2316), 'os.path.join', 'os.path.join', (['"""sources"""', '"""sourcetable.txt"""'], {}), "('sources', 'sourcetable.txt')\n", (2286, 2316), False, 'import os\n'), ((2394, 2442), 'astropy.table.Table.read', 'at.Table.read', (['source_table_file'], {'format': '"""ascii"""'}), "(source_table_file, format='ascii')\n", (2407, 2442), True, 'import astropy.table as at\n'), ((4434, 4495), 'pysynphot.ArraySpectrum', 'S.ArraySpectrum', (["spec['wave']", "spec['flux']"], {'fluxunits': '"""flam"""'}), "(spec['wave'], spec['flux'], fluxunits='flam')\n", (4449, 4495), True, 'import pysynphot as S\n'), ((801, 831), 'numpy.array', 'np.array', (['sourcenames'], {'ndmin': '(1)'}), '(sourcenames, ndmin=1)\n', (809, 831), True, 'import numpy as np\n'), ((3035, 3073), 'warnings.warn', 'warnings.warn', (['message', 'RuntimeWarning'], {}), '(message, RuntimeWarning)\n', (3048, 3073), False, 'import warnings\n'), ((3200, 3222), 'os.path.exists', 'os.path.exists', (['inspec'], {}), '(inspec)\n', (3214, 3222), False, 'import os\n'), ((3349, 3410), 'astropy.table.Table.read', 'at.Table.read', (['inspec'], {'names': "('wave', 'flux')", 'format': '"""ascii"""'}), "(inspec, names=('wave', 'flux'), format='ascii')\n", (3362, 3410), True, 'import astropy.table as at\n'), ((4879, 4902), 'astropy.cosmology.default_cosmology.get', 'default_cosmology.get', ([], {}), '()\n', (4900, 4902), False, 'from astropy.cosmology import default_cosmology\n'), ((864, 885), 'numpy.array', 'np.array', (['sourcenames'], {}), '(sourcenames)\n', (872, 885), True, 'import numpy as np\n'), ((1124, 1162), 'warnings.warn', 'warnings.warn', (['message', 'RuntimeWarning'], {}), '(message, RuntimeWarning)\n', (1137, 1162), False, 'import warnings\n')] |
import pandas as pd
import numpy as np
import pickle
from sklearn.metrics.pairwise import linear_kernel
from sklearn.feature_extraction.text import TfidfVectorizer
import csv
from kivy.app import App
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
class Jaden:
_model = None
_vector = None
_vocabulary = None
def __init__(self):
self._model = pickle.load(open('_model.sav', 'rb'))
self._vector = pickle.load(open('_vectorized.sav', 'rb'))
with open('dataset/tarih.csv', newline='', encoding='utf8') as f:
reader = csv.reader(f)
_vocabulary = list(reader)
self._vocabulary = _vocabulary
def find_answer(self, question):
_cos_sim = linear_kernel(_model.transform([question]), _vector).flatten()
_cos_sim = np.ndarray.argsort(-_cos_sim)[:5]
_result = []
for i in _cos_sim:
_result.append(self._vocabulary[i+1][1])
return _result
class LoginScreen(GridLayout):
def __init__(self, **kwargs):
super(LoginScreen, self).__init__(**kwargs)
self.cols = 2
self.add_widget(Label(text='User Name'))
self.username = TextInput(multiline=False)
self.add_widget(self.username)
self.add_widget(Label(text='password'))
self.password = TextInput(password=True, multiline=False)
self.add_widget(self.password)
class MyApp(App):
def build(self):
return LoginScreen()
MyApp().run() | [
"kivy.uix.label.Label",
"numpy.ndarray.argsort",
"kivy.uix.textinput.TextInput",
"csv.reader"
] | [((1260, 1286), 'kivy.uix.textinput.TextInput', 'TextInput', ([], {'multiline': '(False)'}), '(multiline=False)\n', (1269, 1286), False, 'from kivy.uix.textinput import TextInput\n'), ((1398, 1439), 'kivy.uix.textinput.TextInput', 'TextInput', ([], {'password': '(True)', 'multiline': '(False)'}), '(password=True, multiline=False)\n', (1407, 1439), False, 'from kivy.uix.textinput import TextInput\n'), ((644, 657), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (654, 657), False, 'import csv\n'), ((879, 908), 'numpy.ndarray.argsort', 'np.ndarray.argsort', (['(-_cos_sim)'], {}), '(-_cos_sim)\n', (897, 908), True, 'import numpy as np\n'), ((1211, 1234), 'kivy.uix.label.Label', 'Label', ([], {'text': '"""User Name"""'}), "(text='User Name')\n", (1216, 1234), False, 'from kivy.uix.label import Label\n'), ((1350, 1372), 'kivy.uix.label.Label', 'Label', ([], {'text': '"""password"""'}), "(text='password')\n", (1355, 1372), False, 'from kivy.uix.label import Label\n')] |
from django import template
register = template.Library()
@register.inclusion_tag('quiz/correct_answer.html', takes_context=True)
def correct_answer_for_all(context, question):
"""
processes the correct answer based on a given question object
if the answer is incorrect, informs the user
"""
answers = question.get_answers()
incorrect_list = context.get('incorrect_questions', [])
if question.id in incorrect_list:
user_was_incorrect = True
else:
user_was_incorrect = False
return {'previous': {'answers': answers},
'user_was_incorrect': user_was_incorrect}
@register.filter
def answer_choice_to_string(question, answer):
return question.answer_choice_to_string(answer)
| [
"django.template.Library"
] | [((40, 58), 'django.template.Library', 'template.Library', ([], {}), '()\n', (56, 58), False, 'from django import template\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 29 14:14:45 2020
@author: Nikki
"""
import numpy as np
import cv2
import transform as tform
import sys
import math
import scipy.spatial
import markers
###---------------------------------------------------------------------------
# Allows video to be initialized using a string
#
# returns - video_path - path to video to be used
# returns - GPS_pix - matrix to convert from GPS to pixel
# - pix_GPS - matrix to convert from pixel to GPS
# - origin - approximate camera location in GPS
###
def sample_select(name):
if name == 'aot3':
video_path = 'C:/Users/Nikki/Documents/work/inputs-outputs/video/AOTsample3.mp4'
elif name == 'mrb3':
video_path = 'C:/Users/Nikki/Documents/work/inputs-outputs/video/20190422_153844_DA4A.mkv'
elif name == 'aot1':
video_path = 'C:/Users/Nikki/Documents/work/inputs-outputs/video/AOTsample1_1.mp4'
elif name == 'aot2':
video_path = 'C:/Users/Nikki/Documents/work/inputs-outputs/video/AOTsample2_1.mp4'
GPS_pix, pix_GPS, origin = get_transform(name)
return video_path, GPS_pix, pix_GPS, origin
###---------------------------------------------------------------------------
# Used to find transformation matrices between GPS and pixel space and vice versa.
#
# returns - GPS_pix - matrix to convert from GPS to pixel
# - pix_GPS - matrix to convert from pixel to GPS
###
def get_transform(name):
if name == 'mrb3':
x, y, origin = markers.mrb3_markers()
elif name == 'aot1':
x, y, origin = markers.aot_1_markers()
elif name == 'aot2':
x, y, origin = markers.aot_2_markers()
elif name == 'aot3':
x, y, origin = markers.aot_3_markers()
else:
print("Camera name invalid")
GPS_pix = tform.get_best_transform(x, y)
pix_GPS = tform.get_best_transform(y, x)
return(GPS_pix, pix_GPS, origin)
###---------------------------------------------------------------------------
# Given photo points at people's feet, draws '6 foot' ellipse around them.
# Most useful of these functions for implementing with yolo bounding box points.
#
# returns - img - input frame with ellipses drawn at specified points
###
def draw_radius(frame, pts, GPS_pix, pix_GPS, origin):
bounds = four_pts(pts, pix_GPS, GPS_pix, origin)
mytree = load_tree(pts, pix_GPS)
img, count = draw_ellipse(frame, bounds, pts, mytree, pix_GPS)
return img, count
###---------------------------------------------------------------------------
# Given an array of photo pts and conversion matrices, converts to GPS, finds
# defining points of 6 ft circle at camera angle, and converts back to pixel coords.
#
# returns - final - array of arrays of 4 pixel coordinates to be used to define each ellipse's axes
###
def four_pts(pts, pix_GPS, GPS_pix, origin):
#convert to gps coords
gps = tform.transform_pt_array(pts, pix_GPS)
final = []
#calculate locations six feet away at given bearings and add to array
for pt in gps:
degrees = calc_bearing(pt, origin)
for angle in degrees:
a = six_ft(pt, angle)
final.append(a)
#convert list of pts to numpy array
final = np.array([final])
final = np.squeeze(np.asarray(final))
#check if final has any elements?
#convert to pixel coords
final = tform.transform_pt_array(final, GPS_pix)
return final
###---------------------------------------------------------------------------
# Given a point, calculates it's bearing in relation to the approximate camera location.
# This enables GPS circle points to be found such that they define an ellipse within pixel
# plane that appears properly scaled. Uses haversine formula.
# Formula from: https://www.movable-type.co.uk/scripts/latlong.html
#
# returns - array of 4 bearings in degrees, clockwise from north. First is bearing
# between camera and given pt)
###
def calc_bearing(pt, origin):
#convert GPS coords to radians
la1 = math.radians(origin[0])
la2 = math.radians(pt[0])
lo1 = math.radians(origin[1])
lo2 = math.radians(pt[1])
#perform calculation
y = math.sin(lo2-lo1) * math.cos(la2)
x = math.cos(la1) * math.sin(la2) - math.sin(la1) * math.cos(la2) * math.cos(lo2-lo1)
b = math.atan2(y,x)
#convert to degrees
b = math.degrees(b)
#fill arrray with 90 degree increments
bearing = 4 * [None]
i = 0
while i < 4:
bearing[i] = (b + i * 90) % 360
i = i + 1
return bearing
###---------------------------------------------------------------------------
# Loads array of pts into a ckd tree for to enable easy finding of nearest pt
#
# returns - ckd tree
###
def load_tree(pts, pix_GPS):
gps = tform.transform_pt_array(pts, pix_GPS)
mytree = scipy.spatial.cKDTree(gps)
return mytree
###---------------------------------------------------------------------------
# Given array of defining points of several ellipses (endpoints of axes) and
# corresponding center points, draws ellipses on given image
#
# returns - all_img - given image with ellipses drawn onto it
###
def draw_ellipse(frame, pts, centers, mytree, pix_GPS):
#define qualities of the ellipse
thickness = -1
line_type = 8
#set transparency
alpha = 0.25
#create separate image for ellipses to be drawn into
ellipses = frame.copy()
#iterate through list of ellipse points and centers, drawing each into ellipse image
i = 0
count = 0
gps_centers = tform.transform_pt_array(centers, pix_GPS)
while i < pts.shape[0]:
a = pts[i]
b = pts[i + 1]
c = pts[i + 2]
d = pts[i + 3]
minor = int((math.sqrt(math.pow((c[0]-a[0]), 2) + math.pow((c[1]-a[1]), 2)))/2)
major = int((math.sqrt(math.pow((d[0]-b[0]), 2) + math.pow((d[1]-b[1]), 2)))/2)
if centers.size <= 2:
centers = np.array([centers])
center = centers[i//4]
x = int(center[0])
y = int(center[1])
if centers.size > 2:
gps_center = gps_centers[i//4]
dist, ind = mytree.query(gps_center, k=2)
closest = mytree.data[ind[1]]
dist = GPS_to_ft(gps_center, closest)
if dist < 6:
cv2.ellipse(ellipses, (x,y), (major, minor), 0, 0, 360, (255, 0, 0), thickness, line_type)
count = count + 1
elif dist < 8:
cv2.ellipse(ellipses, (x,y), (major, minor), 0, 0, 360, (255, 140, 0), thickness, line_type)
elif dist < 10:
cv2.ellipse(ellipses, (x,y), (major, minor), 0, 0, 360, (255, 255, 0), thickness, line_type)
else:
cv2.ellipse(ellipses, (x,y), (major, minor), 0, 0, 360, (0,255,0), thickness, line_type)
else:
cv2.ellipse(ellipses, (x,y), (major, minor), 0, 0, 360, (0,255,0), thickness, line_type)
i = i + 4
#combine original image and ellipse image into one
all_img = cv2.addWeighted(ellipses, alpha, frame, 1-alpha, 0)
return all_img, count
###---------------------------------------------------------------------------
# Given a GPS point and a bearing, finds point six feet away in that direction,
# using haversine formula.
# Formula from: https://www.movable-type.co.uk/scripts/latlong.html
#
# returns - GPS coord 6 ft away
###
def six_ft(pt1, b):
#convert to rad
la1 = math.radians(pt1[0])
lo1 = math.radians(pt1[1])
b = math.radians(b)
#calc latitude and longitude
radius = 20902231
d =(6.0/radius)
la2 = math.asin(math.sin(la1) * math.cos(d) + math.cos(la1) * math.sin(d) * math.cos(b))
lo2 = lo1 + math.atan2((math.sin(b) * math.sin(d) * math.cos(la1)), (math.cos(d) - math.sin(la1) * math.sin(la2)))
#reconvert to GPS standard, degrees
pt2 = (math.degrees(la2), math.degrees(lo2))
return(pt2)
###---------------------------------------------------------------------------
# Given two GPS points, finds distance in ft between them, calulated using
# haversine formula.
#
# returns - distance in ft between given points
###
def GPS_to_ft(pt1, pt2):
#earths rad in ft
radius = 20902231
la1 = math.radians(pt1[0])
la2 = math.radians(pt2[0])
lo1 = math.radians(pt1[1])
lo2 = math.radians(pt2[1])
#la2, lo2 = six_ft(pt1, 90)
a = math.pow(((la2 - la1) / 2), 2)
b = math.cos(la1) * math.cos(la2)
c = math.pow(((lo2 - lo1) / 2), 2)
d = math.sin(a) + b * math.sin(c)
dist = 2 * radius * math.asin(math.sqrt(d))
#print(dist)
return dist
###---------------------------------------------------------------------------
# Following functions are not utilized in video processing code, but were helpful
# during development
###---------------------------------------------------------------------------
###---------------------------------------------------------------------------
# Returns pixel coordinate value of location left-clicked on screen
# Based on:
# https://stackoverflow.com/questions/60066334/get-pixel-coordinates-using-mouse-in-cv2-video-frame-with-python
def get_pixel_coord(video_path):
try:
video_capture = cv2.VideoCapture(video_path)
def mouseHandler(event, x, y, flags, params):
if event == cv2.EVENT_LBUTTONDOWN:
print(x, y)
cv2.namedWindow("result", cv2.WINDOW_NORMAL)
cv2.setMouseCallback("result", mouseHandler)
while(True):
# Capture frame-by-frame
_, frame = video_capture.read()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.imshow("result", frame)
video_capture.release()
cv2.destroyAllWindows()
except:
video_capture.release()
cv2.destroyAllWindows()
###---------------------------------------------------------------------------
# Given points, draws circles around them
###
def make_circles(frame, centers, size):
size = size[0] // 128
thickness = -1
line_type = 8
for center in centers:
pt = (int(center[0]), int(center[1]))
cv2.circle(frame, pt, size, (0,0,255), thickness, line_type)
###---------------------------------------------------------------------------
# Draws 4 ellipses on video, utilizing most functions in this doc.
###
def test():
# define where video comes from
# video_path = './data/AOTsample3.mp4'
video_path = './data/20190422_153844_DA4A.mkv'
# get transfer function from known GPS and pixel locations
GPS_pix, pix_GPS = get_transform()
# load in sample pts
# a = np.array([36.148342, -86.799332]) #closest lamp
# b = np.array([36.148139, -86.799375]) #lamp across street, right
# c = np.array([36.148349, -86.799135]) #closest left corner of furthest crosswalk dash to right
# d = np.array([36.147740, -86.799218]) #sixth tree down the street
a = np.array([36.144187, -86.799707]) #far left street pole
b = np.array([36.143990, -86.799594]) #pole by bike sign
c = np.array([36.143997, -86.800180]) #corner of sidewalk
d = np.array([36.144203, -86.800149]) #right of sidewalk stripe closest to camera
x = np.array([a,b,c,d])
pts = tform.transform_pt_array(x, GPS_pix)
print(pts)
# start video
print("Video from: ", video_path )
vid = cv2.VideoCapture(video_path)
try:
while True:
# skip desired number of frames to speed up processing
for i in range (10):
vid.grab()
# read frame
return_value, frame = vid.read()
# if frame doesn't exist, exit
if not return_value:
cv2.destroyWindow('result')
print('Video has ended')
break
# draw ellipse
img, count = draw_radius(frame, pts, GPS_pix, pix_GPS)
cv2.namedWindow("result", cv2.WINDOW_NORMAL)
cv2.imshow("result", img)
if cv2.waitKey(1) & 0xFF == ord('q'): break
# end video, close viewer, stop writing to file
vid.release()
cv2.destroyAllWindows()
# if interrupted, end video, close viewer, stop writing to file
except:
print("Unexpected error:", sys.exc_info()[0])
vid.release()
cv2.destroyAllWindows()
#test() | [
"math.sqrt",
"cv2.imshow",
"math.cos",
"numpy.array",
"cv2.ellipse",
"sys.exc_info",
"cv2.destroyAllWindows",
"cv2.setMouseCallback",
"markers.aot_1_markers",
"numpy.asarray",
"markers.mrb3_markers",
"cv2.addWeighted",
"markers.aot_3_markers",
"cv2.waitKey",
"transform.get_best_transform... | [((1885, 1915), 'transform.get_best_transform', 'tform.get_best_transform', (['x', 'y'], {}), '(x, y)\n', (1909, 1915), True, 'import transform as tform\n'), ((1930, 1960), 'transform.get_best_transform', 'tform.get_best_transform', (['y', 'x'], {}), '(y, x)\n', (1954, 1960), True, 'import transform as tform\n'), ((3007, 3045), 'transform.transform_pt_array', 'tform.transform_pt_array', (['pts', 'pix_GPS'], {}), '(pts, pix_GPS)\n', (3031, 3045), True, 'import transform as tform\n'), ((3346, 3363), 'numpy.array', 'np.array', (['[final]'], {}), '([final])\n', (3354, 3363), True, 'import numpy as np\n'), ((3490, 3530), 'transform.transform_pt_array', 'tform.transform_pt_array', (['final', 'GPS_pix'], {}), '(final, GPS_pix)\n', (3514, 3530), True, 'import transform as tform\n'), ((4176, 4199), 'math.radians', 'math.radians', (['origin[0]'], {}), '(origin[0])\n', (4188, 4199), False, 'import math\n'), ((4210, 4229), 'math.radians', 'math.radians', (['pt[0]'], {}), '(pt[0])\n', (4222, 4229), False, 'import math\n'), ((4240, 4263), 'math.radians', 'math.radians', (['origin[1]'], {}), '(origin[1])\n', (4252, 4263), False, 'import math\n'), ((4274, 4293), 'math.radians', 'math.radians', (['pt[1]'], {}), '(pt[1])\n', (4286, 4293), False, 'import math\n'), ((4464, 4480), 'math.atan2', 'math.atan2', (['y', 'x'], {}), '(y, x)\n', (4474, 4480), False, 'import math\n'), ((4517, 4532), 'math.degrees', 'math.degrees', (['b'], {}), '(b)\n', (4529, 4532), False, 'import math\n'), ((4944, 4982), 'transform.transform_pt_array', 'tform.transform_pt_array', (['pts', 'pix_GPS'], {}), '(pts, pix_GPS)\n', (4968, 4982), True, 'import transform as tform\n'), ((5748, 5790), 'transform.transform_pt_array', 'tform.transform_pt_array', (['centers', 'pix_GPS'], {}), '(centers, pix_GPS)\n', (5772, 5790), True, 'import transform as tform\n'), ((7270, 7323), 'cv2.addWeighted', 'cv2.addWeighted', (['ellipses', 'alpha', 'frame', '(1 - alpha)', '(0)'], {}), '(ellipses, alpha, frame, 1 - alpha, 0)\n', (7285, 7323), False, 'import cv2\n'), ((7708, 7728), 'math.radians', 'math.radians', (['pt1[0]'], {}), '(pt1[0])\n', (7720, 7728), False, 'import math\n'), ((7739, 7759), 'math.radians', 'math.radians', (['pt1[1]'], {}), '(pt1[1])\n', (7751, 7759), False, 'import math\n'), ((7768, 7783), 'math.radians', 'math.radians', (['b'], {}), '(b)\n', (7780, 7783), False, 'import math\n'), ((8510, 8530), 'math.radians', 'math.radians', (['pt1[0]'], {}), '(pt1[0])\n', (8522, 8530), False, 'import math\n'), ((8541, 8561), 'math.radians', 'math.radians', (['pt2[0]'], {}), '(pt2[0])\n', (8553, 8561), False, 'import math\n'), ((8572, 8592), 'math.radians', 'math.radians', (['pt1[1]'], {}), '(pt1[1])\n', (8584, 8592), False, 'import math\n'), ((8603, 8623), 'math.radians', 'math.radians', (['pt2[1]'], {}), '(pt2[1])\n', (8615, 8623), False, 'import math\n'), ((8669, 8697), 'math.pow', 'math.pow', (['((la2 - la1) / 2)', '(2)'], {}), '((la2 - la1) / 2, 2)\n', (8677, 8697), False, 'import math\n'), ((8746, 8774), 'math.pow', 'math.pow', (['((lo2 - lo1) / 2)', '(2)'], {}), '((lo2 - lo1) / 2, 2)\n', (8754, 8774), False, 'import math\n'), ((11397, 11430), 'numpy.array', 'np.array', (['[36.144187, -86.799707]'], {}), '([36.144187, -86.799707])\n', (11405, 11430), True, 'import numpy as np\n'), ((11463, 11495), 'numpy.array', 'np.array', (['[36.14399, -86.799594]'], {}), '([36.14399, -86.799594])\n', (11471, 11495), True, 'import numpy as np\n'), ((11526, 11558), 'numpy.array', 'np.array', (['[36.143997, -86.80018]'], {}), '([36.143997, -86.80018])\n', (11534, 11558), True, 'import numpy as np\n'), ((11590, 11623), 'numpy.array', 'np.array', (['[36.144203, -86.800149]'], {}), '([36.144203, -86.800149])\n', (11598, 11623), True, 'import numpy as np\n'), ((11678, 11700), 'numpy.array', 'np.array', (['[a, b, c, d]'], {}), '([a, b, c, d])\n', (11686, 11700), True, 'import numpy as np\n'), ((11713, 11749), 'transform.transform_pt_array', 'tform.transform_pt_array', (['x', 'GPS_pix'], {}), '(x, GPS_pix)\n', (11737, 11749), True, 'import transform as tform\n'), ((11837, 11865), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_path'], {}), '(video_path)\n', (11853, 11865), False, 'import cv2\n'), ((1553, 1575), 'markers.mrb3_markers', 'markers.mrb3_markers', ([], {}), '()\n', (1573, 1575), False, 'import markers\n'), ((3387, 3404), 'numpy.asarray', 'np.asarray', (['final'], {}), '(final)\n', (3397, 3404), True, 'import numpy as np\n'), ((4332, 4351), 'math.sin', 'math.sin', (['(lo2 - lo1)'], {}), '(lo2 - lo1)\n', (4340, 4351), False, 'import math\n'), ((4352, 4365), 'math.cos', 'math.cos', (['la2'], {}), '(la2)\n', (4360, 4365), False, 'import math\n'), ((8132, 8149), 'math.degrees', 'math.degrees', (['la2'], {}), '(la2)\n', (8144, 8149), False, 'import math\n'), ((8151, 8168), 'math.degrees', 'math.degrees', (['lo2'], {}), '(lo2)\n', (8163, 8168), False, 'import math\n'), ((8708, 8721), 'math.cos', 'math.cos', (['la1'], {}), '(la1)\n', (8716, 8721), False, 'import math\n'), ((8724, 8737), 'math.cos', 'math.cos', (['la2'], {}), '(la2)\n', (8732, 8737), False, 'import math\n'), ((8785, 8796), 'math.sin', 'math.sin', (['a'], {}), '(a)\n', (8793, 8796), False, 'import math\n'), ((9518, 9546), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_path'], {}), '(video_path)\n', (9534, 9546), False, 'import cv2\n'), ((9702, 9746), 'cv2.namedWindow', 'cv2.namedWindow', (['"""result"""', 'cv2.WINDOW_NORMAL'], {}), "('result', cv2.WINDOW_NORMAL)\n", (9717, 9746), False, 'import cv2\n'), ((9755, 9799), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""result"""', 'mouseHandler'], {}), "('result', mouseHandler)\n", (9775, 9799), False, 'import cv2\n'), ((10099, 10122), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (10120, 10122), False, 'import cv2\n'), ((10548, 10610), 'cv2.circle', 'cv2.circle', (['frame', 'pt', 'size', '(0, 0, 255)', 'thickness', 'line_type'], {}), '(frame, pt, size, (0, 0, 255), thickness, line_type)\n', (10558, 10610), False, 'import cv2\n'), ((12637, 12660), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (12658, 12660), False, 'import cv2\n'), ((1633, 1656), 'markers.aot_1_markers', 'markers.aot_1_markers', ([], {}), '()\n', (1654, 1656), False, 'import markers\n'), ((4374, 4387), 'math.cos', 'math.cos', (['la1'], {}), '(la1)\n', (4382, 4387), False, 'import math\n'), ((4390, 4403), 'math.sin', 'math.sin', (['la2'], {}), '(la2)\n', (4398, 4403), False, 'import math\n'), ((4438, 4457), 'math.cos', 'math.cos', (['(lo2 - lo1)'], {}), '(lo2 - lo1)\n', (4446, 4457), False, 'import math\n'), ((6153, 6172), 'numpy.array', 'np.array', (['[centers]'], {}), '([centers])\n', (6161, 6172), True, 'import numpy as np\n'), ((7094, 7189), 'cv2.ellipse', 'cv2.ellipse', (['ellipses', '(x, y)', '(major, minor)', '(0)', '(0)', '(360)', '(0, 255, 0)', 'thickness', 'line_type'], {}), '(ellipses, (x, y), (major, minor), 0, 0, 360, (0, 255, 0),\n thickness, line_type)\n', (7105, 7189), False, 'import cv2\n'), ((8803, 8814), 'math.sin', 'math.sin', (['c'], {}), '(c)\n', (8811, 8814), False, 'import math\n'), ((8854, 8866), 'math.sqrt', 'math.sqrt', (['d'], {}), '(d)\n', (8863, 8866), False, 'import math\n'), ((10031, 10058), 'cv2.imshow', 'cv2.imshow', (['"""result"""', 'frame'], {}), "('result', frame)\n", (10041, 10058), False, 'import cv2\n'), ((10185, 10208), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (10206, 10208), False, 'import cv2\n'), ((12407, 12451), 'cv2.namedWindow', 'cv2.namedWindow', (['"""result"""', 'cv2.WINDOW_NORMAL'], {}), "('result', cv2.WINDOW_NORMAL)\n", (12422, 12451), False, 'import cv2\n'), ((12464, 12489), 'cv2.imshow', 'cv2.imshow', (['"""result"""', 'img'], {}), "('result', img)\n", (12474, 12489), False, 'import cv2\n'), ((12830, 12853), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (12851, 12853), False, 'import cv2\n'), ((1710, 1733), 'markers.aot_2_markers', 'markers.aot_2_markers', ([], {}), '()\n', (1731, 1733), False, 'import markers\n'), ((4406, 4419), 'math.sin', 'math.sin', (['la1'], {}), '(la1)\n', (4414, 4419), False, 'import math\n'), ((4422, 4435), 'math.cos', 'math.cos', (['la2'], {}), '(la2)\n', (4430, 4435), False, 'import math\n'), ((6535, 6630), 'cv2.ellipse', 'cv2.ellipse', (['ellipses', '(x, y)', '(major, minor)', '(0)', '(0)', '(360)', '(255, 0, 0)', 'thickness', 'line_type'], {}), '(ellipses, (x, y), (major, minor), 0, 0, 360, (255, 0, 0),\n thickness, line_type)\n', (6546, 6630), False, 'import cv2\n'), ((7884, 7897), 'math.sin', 'math.sin', (['la1'], {}), '(la1)\n', (7892, 7897), False, 'import math\n'), ((7900, 7911), 'math.cos', 'math.cos', (['d'], {}), '(d)\n', (7908, 7911), False, 'import math\n'), ((7944, 7955), 'math.cos', 'math.cos', (['b'], {}), '(b)\n', (7952, 7955), False, 'import math\n'), ((8013, 8026), 'math.cos', 'math.cos', (['la1'], {}), '(la1)\n', (8021, 8026), False, 'import math\n'), ((8030, 8041), 'math.cos', 'math.cos', (['d'], {}), '(d)\n', (8038, 8041), False, 'import math\n'), ((12197, 12224), 'cv2.destroyWindow', 'cv2.destroyWindow', (['"""result"""'], {}), "('result')\n", (12214, 12224), False, 'import cv2\n'), ((1791, 1814), 'markers.aot_3_markers', 'markers.aot_3_markers', ([], {}), '()\n', (1812, 1814), False, 'import markers\n'), ((6703, 6800), 'cv2.ellipse', 'cv2.ellipse', (['ellipses', '(x, y)', '(major, minor)', '(0)', '(0)', '(360)', '(255, 140, 0)', 'thickness', 'line_type'], {}), '(ellipses, (x, y), (major, minor), 0, 0, 360, (255, 140, 0),\n thickness, line_type)\n', (6714, 6800), False, 'import cv2\n'), ((7914, 7927), 'math.cos', 'math.cos', (['la1'], {}), '(la1)\n', (7922, 7927), False, 'import math\n'), ((7930, 7941), 'math.sin', 'math.sin', (['d'], {}), '(d)\n', (7938, 7941), False, 'import math\n'), ((7985, 7996), 'math.sin', 'math.sin', (['b'], {}), '(b)\n', (7993, 7996), False, 'import math\n'), ((7999, 8010), 'math.sin', 'math.sin', (['d'], {}), '(d)\n', (8007, 8010), False, 'import math\n'), ((8044, 8057), 'math.sin', 'math.sin', (['la1'], {}), '(la1)\n', (8052, 8057), False, 'import math\n'), ((8060, 8073), 'math.sin', 'math.sin', (['la2'], {}), '(la2)\n', (8068, 8073), False, 'import math\n'), ((9953, 9967), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (9964, 9967), False, 'import cv2\n'), ((12505, 12519), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (12516, 12519), False, 'import cv2\n'), ((12781, 12795), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (12793, 12795), False, 'import sys\n'), ((5947, 5971), 'math.pow', 'math.pow', (['(c[0] - a[0])', '(2)'], {}), '(c[0] - a[0], 2)\n', (5955, 5971), False, 'import math\n'), ((5974, 5998), 'math.pow', 'math.pow', (['(c[1] - a[1])', '(2)'], {}), '(c[1] - a[1], 2)\n', (5982, 5998), False, 'import math\n'), ((6035, 6059), 'math.pow', 'math.pow', (['(d[0] - b[0])', '(2)'], {}), '(d[0] - b[0], 2)\n', (6043, 6059), False, 'import math\n'), ((6062, 6086), 'math.pow', 'math.pow', (['(d[1] - b[1])', '(2)'], {}), '(d[1] - b[1], 2)\n', (6070, 6086), False, 'import math\n'), ((6840, 6937), 'cv2.ellipse', 'cv2.ellipse', (['ellipses', '(x, y)', '(major, minor)', '(0)', '(0)', '(360)', '(255, 255, 0)', 'thickness', 'line_type'], {}), '(ellipses, (x, y), (major, minor), 0, 0, 360, (255, 255, 0),\n thickness, line_type)\n', (6851, 6937), False, 'import cv2\n'), ((6979, 7074), 'cv2.ellipse', 'cv2.ellipse', (['ellipses', '(x, y)', '(major, minor)', '(0)', '(0)', '(360)', '(0, 255, 0)', 'thickness', 'line_type'], {}), '(ellipses, (x, y), (major, minor), 0, 0, 360, (0, 255, 0),\n thickness, line_type)\n', (6990, 7074), False, 'import cv2\n')] |
# mp4.py
# ---------------
# Licensing Information: You are free to use or extend this projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to the University of Illinois at Urbana-Champaign
#
# Created Fall 2018: <NAME>, <NAME>, <NAME>, <NAME> (adapted from a U. Penn assignment)
# Modified Spring 2020: <NAME>, <NAME>, and <NAME>
# Modified Fall 2020: <NAME>, <NAME>
# Modified Spring 2021 by <NAME> (<EMAIL>)
import collections
START_TAG = "START"
END_TAG = "END"
def evaluate_accuracies(predicted_sentences, tag_sentences):
"""
:param predicted_sentences:
:param tag_sentences:
:return: (Accuracy, correct word-tag counter, wrong word-tag counter)
"""
assert len(predicted_sentences) == len(tag_sentences), "The number of predicted sentence {} does not match the true number {}".format(len(predicted_sentences), len(tag_sentences))
correct_wordtagcounter = {}
wrong_wordtagcounter = {}
correct = 0
wrong = 0
for pred_sentence, tag_sentence in zip(predicted_sentences, tag_sentences):
assert len(pred_sentence) == len(tag_sentence), "The predicted sentence length {} does not match the true length {}".format(len(pred_sentence), len(tag_sentence))
for pred_wordtag, real_wordtag in zip(pred_sentence, tag_sentence):
assert pred_wordtag[0] == real_wordtag[0], "The predicted sentence WORDS do not match with the original sentence, you should only be predicting the tags"
word = pred_wordtag[0]
if real_wordtag[1] in [START_TAG, END_TAG]:
continue
if pred_wordtag[1] == real_wordtag[1]:
if word not in correct_wordtagcounter.keys():
correct_wordtagcounter[word] = collections.Counter()
correct_wordtagcounter[word][real_wordtag[1]] += 1
correct += 1
else:
if word not in wrong_wordtagcounter.keys():
wrong_wordtagcounter[word] = collections.Counter()
wrong_wordtagcounter[word][real_wordtag[1]] += 1
wrong += 1
accuracy = correct / (correct + wrong)
return accuracy, correct_wordtagcounter, wrong_wordtagcounter
def specialword_accuracies(train_sentences, predicted_sentences, tag_sentences):
"""
:param train_sentences:
:param predicted_sentences:
:param tag_sentences:
:return: Accuracy on words with multiple tags, and accuracy on words that do not occur in the training sentences
"""
seen_words, words_with_multitags_set = get_word_tag_statistics(train_sentences)
multitags_correct = 0
multitags_wrong = 0
unseen_correct = 0
unseen_wrong = 0
for i in range(len(predicted_sentences)):
for j in range(len(predicted_sentences[i])):
word = tag_sentences[i][j][0]
tag = tag_sentences[i][j][1]
if tag in [START_TAG, END_TAG]:
continue
if predicted_sentences[i][j][1] == tag:
if word in words_with_multitags_set:
multitags_correct += 1
if word not in seen_words:
unseen_correct += 1
else:
if word in words_with_multitags_set:
multitags_wrong += 1
if word not in seen_words:
unseen_wrong += 1
multitag_accuracy = multitags_correct / (multitags_correct + multitags_wrong)
total_unseen = unseen_correct + unseen_wrong
unseen_accuracy = unseen_correct / total_unseen if total_unseen > 0 else 0
return multitag_accuracy, unseen_accuracy
def topk_wordtagcounter(wordtagcounter, k):
top_items = sorted(wordtagcounter.items(), key=lambda item: sum(item[1].values()), reverse=True)[:k]
top_items = list(map(lambda item: (item[0], dict(item[1])), top_items))
return top_items
def load_dataset(data_file):
sentences = []
with open(data_file, 'r', encoding='UTF-8') as f:
for line in f:
sentence = [(START_TAG, START_TAG)]
raw = line.split()
for pair in raw:
splitted = pair.split('=')
if (len(splitted) < 2):
continue
else:
tag = splitted[-1]
# find word
word = splitted[0]
for element in splitted[1:-1]:
word += '/' + element
sentence.append((word.lower(), tag))
sentence.append((END_TAG, END_TAG))
sentences.append(sentence)
return sentences
def strip_tags(sentences):
'''
Strip tags
input: list of sentences
each sentence is a list of (word,tag) pairs
output: list of sentences
each sentence is a list of words (no tags)
'''
sentences_without_tags = []
for sentence in sentences:
sentence_without_tags = []
for i in range(len(sentence)):
pair = sentence[i]
sentence_without_tags.append(pair[0])
sentences_without_tags.append(sentence_without_tags)
return sentences_without_tags
def get_word_tag_statistics(data_set):
# get set of all seen words and set of words with multitags
word_tags = collections.defaultdict(lambda: set())
word_set = set()
for sentence in data_set:
for word, tag in sentence:
word_tags[word].add(tag)
word_set.add(word)
return word_set, set(map(lambda elem: elem[0], filter(lambda elem: len(elem[1]) > 1, word_tags.items())))
| [
"collections.Counter"
] | [((1845, 1866), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (1864, 1866), False, 'import collections\n'), ((2090, 2111), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (2109, 2111), False, 'import collections\n')] |
from ex2.sigmoid import sigmoid
def sigmoidGradient(z):
"""computes the gradient of the sigmoid function
evaluated at z. This should work regardless if z is a matrix or a
vector. In particular, if z is a vector or matrix, you should return
the gradient for each element."""
# ====================== YOUR CODE HERE ======================
# Instructions: Compute the gradient of the sigmoid function evaluated at
# each value of z (z can be a matrix, vector or scalar).
g = sigmoid(z) * (1 - sigmoid(z))
# =============================================================
return g
| [
"ex2.sigmoid.sigmoid"
] | [((521, 531), 'ex2.sigmoid.sigmoid', 'sigmoid', (['z'], {}), '(z)\n', (528, 531), False, 'from ex2.sigmoid import sigmoid\n'), ((539, 549), 'ex2.sigmoid.sigmoid', 'sigmoid', (['z'], {}), '(z)\n', (546, 549), False, 'from ex2.sigmoid import sigmoid\n')] |
# -*- coding: utf-8 -*-
from __future__ import (print_function, unicode_literals, absolute_import,
division)
from pusher.config import Config
from pusher.request import Request
from pusher.sync import SynchronousBackend
from pusher.util import GET, POST, text, validate_channel
import collections
import json
import six
class RequestMethod(object):
def __init__(self, pusher, f):
self.pusher = pusher
self.f = f
def __call__(self, *args, **kwargs):
return self.pusher.backend.send_request(self.make_request(*args, **kwargs))
def make_request(self, *args, **kwargs):
return self.f(self.pusher, *args, **kwargs)
def doc_string(doc):
def decorator(f):
f.__doc__ = doc
return f
return decorator
def request_method(f):
@property
@doc_string(f.__doc__)
def wrapped(self):
return RequestMethod(self, f)
return wrapped
def join_attributes(attributes):
for attr in attributes:
if not isinstance(attr, six.text_type):
raise TypeError('Each attr should be %s' % text)
return six.text_type(',').join(attributes)
class Pusher(object):
"""Client for the Pusher HTTP API.
This client supports various backend adapters to support various http
libraries available in the python ecosystem.
:param config: a pusher.Config instance
:param backend: an object that responds to the send_request(request)
method. If none is provided, a
python.sync.SynchronousBackend instance is created.
"""
def __init__(self, config, backend=None):
if not isinstance(config, Config):
raise TypeError("config should be a pusher.Config object")
self.backend = backend or SynchronousBackend(config)
self.config = config
@request_method
def trigger(self, channels, event_name, data, socket_id=None):
'''
Trigger an event on one or more channels, see:
http://pusher.com/docs/rest_api#method-post-event
'''
if isinstance(channels, six.string_types) or not isinstance(channels, (collections.Sized, collections.Iterable)):
raise TypeError("Expected a collection of channels (each channel should be %s)" % text)
if len(channels) > 10:
raise ValueError("Too many channels")
for channel in channels:
validate_channel(channel)
if not isinstance(event_name, six.text_type):
raise TypeError("event_name should be %s" % text)
if len(event_name) > 200:
raise ValueError("event_name too long")
if not isinstance(data, six.text_type):
data = json.dumps(data)
if len(data) > 10240:
raise ValueError("Too much data")
params = {
'name': event_name,
'channels': channels,
'data': data
}
if socket_id:
if not isinstance(socket_id, six.text_type):
raise TypeError("Socket ID should be %s" % text)
params['socket_id'] = socket_id
return Request(self.config, POST, "/apps/%s/events" % self.config.app_id, params)
@request_method
def channels_info(self, prefix_filter=None, attributes=[]):
'''
Get information on multiple channels, see:
http://pusher.com/docs/rest_api#method-get-channels
'''
params = {}
if attributes:
params['info'] = join_attributes(attributes)
if prefix_filter:
params['filter_by_prefix'] = prefix_filter
return Request(self.config, GET, "/apps/%s/channels" % self.config.app_id, params)
@request_method
def channel_info(self, channel, attributes=[]):
'''
Get information on a specific channel, see:
http://pusher.com/docs/rest_api#method-get-channel
'''
validate_channel(channel)
params = {}
if attributes:
params['info'] = join_attributes(attributes)
return Request(self.config, GET, "/apps/%s/channels/%s" % (self.config.app_id, channel), params)
@request_method
def users_info(self, channel):
'''
Fetch user ids currently subscribed to a presence channel
http://pusher.com/docs/rest_api#method-get-users
'''
validate_channel(channel)
return Request(self.config, GET, "/apps/%s/channels/%s/users" % (self.config.app_id, channel))
| [
"json.dumps",
"pusher.util.validate_channel",
"pusher.sync.SynchronousBackend",
"six.text_type",
"pusher.request.Request"
] | [((3131, 3205), 'pusher.request.Request', 'Request', (['self.config', 'POST', "('/apps/%s/events' % self.config.app_id)", 'params'], {}), "(self.config, POST, '/apps/%s/events' % self.config.app_id, params)\n", (3138, 3205), False, 'from pusher.request import Request\n'), ((3623, 3698), 'pusher.request.Request', 'Request', (['self.config', 'GET', "('/apps/%s/channels' % self.config.app_id)", 'params'], {}), "(self.config, GET, '/apps/%s/channels' % self.config.app_id, params)\n", (3630, 3698), False, 'from pusher.request import Request\n'), ((3916, 3941), 'pusher.util.validate_channel', 'validate_channel', (['channel'], {}), '(channel)\n', (3932, 3941), False, 'from pusher.util import GET, POST, text, validate_channel\n'), ((4058, 4151), 'pusher.request.Request', 'Request', (['self.config', 'GET', "('/apps/%s/channels/%s' % (self.config.app_id, channel))", 'params'], {}), "(self.config, GET, '/apps/%s/channels/%s' % (self.config.app_id,\n channel), params)\n", (4065, 4151), False, 'from pusher.request import Request\n'), ((4360, 4385), 'pusher.util.validate_channel', 'validate_channel', (['channel'], {}), '(channel)\n', (4376, 4385), False, 'from pusher.util import GET, POST, text, validate_channel\n'), ((4402, 4494), 'pusher.request.Request', 'Request', (['self.config', 'GET', "('/apps/%s/channels/%s/users' % (self.config.app_id, channel))"], {}), "(self.config, GET, '/apps/%s/channels/%s/users' % (self.config.\n app_id, channel))\n", (4409, 4494), False, 'from pusher.request import Request\n'), ((1117, 1135), 'six.text_type', 'six.text_type', (['""","""'], {}), "(',')\n", (1130, 1135), False, 'import six\n'), ((1783, 1809), 'pusher.sync.SynchronousBackend', 'SynchronousBackend', (['config'], {}), '(config)\n', (1801, 1809), False, 'from pusher.sync import SynchronousBackend\n'), ((2415, 2440), 'pusher.util.validate_channel', 'validate_channel', (['channel'], {}), '(channel)\n', (2431, 2440), False, 'from pusher.util import GET, POST, text, validate_channel\n'), ((2713, 2729), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (2723, 2729), False, 'import json\n')] |
"""
To run this module directly
"""
# pylint: disable=no-else-return
import argparse
import os
try:
from . import prefetch_from_file
except ImportError:
import prefetch_from_file
try:
from . import prefetch_from_url
except ImportError:
import prefetch_from_url
def validate_filepath_or_url(filepath_or_url=""):
"""validate string is filepath or URL"""
if ("://" in filepath_or_url) or (
os.path.isfile(filepath_or_url) and os.access(filepath_or_url, os.R_OK)
):
return filepath_or_url
else:
raise ValueError(filepath_or_url)
def build_argument_parser():
"""Build and return the argument parser."""
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
"filepath_or_url",
nargs="?",
type=validate_filepath_or_url,
default="bigfix_prefetch/__init__.py",
help="Path to file or URL to create prefetch for.",
)
parser.add_argument(
"--prefetch-block",
default=False,
action="store_true",
help="generate a prefetch block instead of prefetch statement",
)
parser.add_argument(
"--override-url",
default="http://localhost/unknown",
help="URL to use in prefetch statement if providing file path",
)
return parser
def main(argv=None):
"""execution starts here"""
# print("bigfix_prefetch __main__ main()")
# Parse command line arguments.
argparser = build_argument_parser()
args = argparser.parse_args(argv)
try:
prefetch_result = prefetch_from_file.file_to_prefetch(
args.filepath_or_url, args.override_url
)
print(prefetch_result)
return prefetch_result
except FileNotFoundError:
prefetch_result = prefetch_from_url.url_to_prefetch(args.filepath_or_url)
print(prefetch_result)
return prefetch_result
main()
| [
"argparse.ArgumentParser",
"os.access",
"os.path.isfile",
"prefetch_from_url.url_to_prefetch",
"prefetch_from_file.file_to_prefetch"
] | [((678, 781), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), '(description=__doc__, formatter_class=argparse.\n RawDescriptionHelpFormatter)\n', (701, 781), False, 'import argparse\n'), ((1642, 1718), 'prefetch_from_file.file_to_prefetch', 'prefetch_from_file.file_to_prefetch', (['args.filepath_or_url', 'args.override_url'], {}), '(args.filepath_or_url, args.override_url)\n', (1677, 1718), False, 'import prefetch_from_file\n'), ((423, 454), 'os.path.isfile', 'os.path.isfile', (['filepath_or_url'], {}), '(filepath_or_url)\n', (437, 454), False, 'import os\n'), ((459, 494), 'os.access', 'os.access', (['filepath_or_url', 'os.R_OK'], {}), '(filepath_or_url, os.R_OK)\n', (468, 494), False, 'import os\n'), ((1859, 1914), 'prefetch_from_url.url_to_prefetch', 'prefetch_from_url.url_to_prefetch', (['args.filepath_or_url'], {}), '(args.filepath_or_url)\n', (1892, 1914), False, 'import prefetch_from_url\n')] |
import numpy as np
from src import const
#TODO: should be imported from aguirregabiria_simple.py
def period_profit(p: np.ndarray, lambdas: np.ndarray, betas_transition=const.betas_transition):
"""
Correct expected period return profit. See ReadMe for derivation
"""
constant_part = (p-const.c) * np.e ** const.α * np.e ** ((const.σ_ɛ ** 2) / 2)
summation = np.dot(np.e**(betas_transition*np.log(p[:, np.newaxis])), lambdas)
return constant_part*summation
def test_period_profit():
p = np.array([1.4, 1.2])
lambdas = np.array([0.5, 0.4, 0.1])
beta_p_part = np.array([[np.e ** (-3. * 0.33647224), np.e ** (-2.5 * 0.33647224), np.e ** (-2 * 0.33647224)],
[np.e ** (-3. * 0.18232156), np.e ** (-2.5 * 0.18232156), np.e ** (-2 * 0.18232156)]])
summation_part = np.array([0.36443148 * lambdas[0] + 0.43120115 * lambdas[1] + 0.51020408 * lambdas[2],
0.5787037 * lambdas[0] + 0.63393814 * lambdas[1] + 0.69444444 * lambdas[2]])
expected = (p - const.c) * np.e ** const.α * np.e ** ((const.σ_ɛ ** 2) / 2) * summation_part
computed = period_profit(p, lambdas)
assert np.allclose(expected, computed, rtol=0.05) | [
"numpy.log",
"numpy.array",
"numpy.allclose"
] | [((518, 538), 'numpy.array', 'np.array', (['[1.4, 1.2]'], {}), '([1.4, 1.2])\n', (526, 538), True, 'import numpy as np\n'), ((553, 578), 'numpy.array', 'np.array', (['[0.5, 0.4, 0.1]'], {}), '([0.5, 0.4, 0.1])\n', (561, 578), True, 'import numpy as np\n'), ((598, 791), 'numpy.array', 'np.array', (['[[np.e ** (-3.0 * 0.33647224), np.e ** (-2.5 * 0.33647224), np.e ** (-2 * \n 0.33647224)], [np.e ** (-3.0 * 0.18232156), np.e ** (-2.5 * 0.18232156),\n np.e ** (-2 * 0.18232156)]]'], {}), '([[np.e ** (-3.0 * 0.33647224), np.e ** (-2.5 * 0.33647224), np.e **\n (-2 * 0.33647224)], [np.e ** (-3.0 * 0.18232156), np.e ** (-2.5 * \n 0.18232156), np.e ** (-2 * 0.18232156)]])\n', (606, 791), True, 'import numpy as np\n'), ((830, 1002), 'numpy.array', 'np.array', (['[0.36443148 * lambdas[0] + 0.43120115 * lambdas[1] + 0.51020408 * lambdas[2\n ], 0.5787037 * lambdas[0] + 0.63393814 * lambdas[1] + 0.69444444 *\n lambdas[2]]'], {}), '([0.36443148 * lambdas[0] + 0.43120115 * lambdas[1] + 0.51020408 *\n lambdas[2], 0.5787037 * lambdas[0] + 0.63393814 * lambdas[1] + \n 0.69444444 * lambdas[2]])\n', (838, 1002), True, 'import numpy as np\n'), ((1177, 1219), 'numpy.allclose', 'np.allclose', (['expected', 'computed'], {'rtol': '(0.05)'}), '(expected, computed, rtol=0.05)\n', (1188, 1219), True, 'import numpy as np\n'), ((410, 434), 'numpy.log', 'np.log', (['p[:, np.newaxis]'], {}), '(p[:, np.newaxis])\n', (416, 434), True, 'import numpy as np\n')] |
import mock
from pyramid.request import Request
from .support import DummyRequest, unittest
from kinto.core import authentication
from kinto.core.authorization import RouteFactory, AuthorizationPolicy
from kinto.core.storage import exceptions as storage_exceptions
class RouteFactoryTest(unittest.TestCase):
def setUp(self):
self.record_uri = "/foo/bar"
def assert_request_resolves_to(self, method, permission, uri=None,
record_not_found=False):
if uri is None:
uri = self.record_uri
with mock.patch('kinto.core.utils.current_service') as current_service:
# Patch current service.
resource = mock.MagicMock()
resource.record_id = 1
if record_not_found:
resource.model.get_record.side_effect = \
storage_exceptions.RecordNotFoundError
else:
resource.model.get_record.return_value = 1
current_service().resource.return_value = resource
# Do the actual call.
request = DummyRequest(method=method)
request.upath_info = uri
context = RouteFactory(request)
self.assertEquals(context.required_permission, permission)
def test_http_unknown_does_not_raise_a_500(self):
self.assert_request_resolves_to("unknown", None)
def test_http_get_resolves_in_a_read_permission(self):
self.assert_request_resolves_to("get", "read")
def test_http_post_resolves_in_a_create_permission(self):
self.assert_request_resolves_to("post", "create")
def test_http_delete_resolves_in_a_write_permission(self):
self.assert_request_resolves_to("delete", "write")
def test_http_put_unexisting_record_resolves_in_a_create_permission(self):
with mock.patch('kinto.core.utils.current_service') as current_service:
# Patch current service.
resource = mock.MagicMock()
resource.record_id = 1
resource.model.get_record.side_effect = \
storage_exceptions.RecordNotFoundError
current_service().resource.return_value = resource
current_service().collection_path = '/buckets/{bucket_id}'
# Do the actual call.
request = DummyRequest(method='put')
request.upath_info = '/buckets/abc/collections/1'
request.matchdict = {'bucket_id': 'abc'}
context = RouteFactory(request)
self.assertEquals(context.required_permission, 'create')
def test_http_put_existing_record_resolves_in_a_write_permission(self):
self.assert_request_resolves_to("put", "write")
def test_http_put_sets_current_record_attribute(self):
with mock.patch('kinto.core.utils.current_service') as current_service:
# Patch current service.
resource = mock.MagicMock()
resource.record_id = 1
resource.model.get_record.return_value = mock.sentinel.record
current_service().resource.return_value = resource
# Do the actual call.
request = DummyRequest(method='put')
context = RouteFactory(request)
self.assertEquals(context.current_record, mock.sentinel.record)
def test_http_patch_resolves_in_a_write_permission(self):
self.assert_request_resolves_to("patch", "write")
def test_attributes_are_none_with_blank_requests(self):
request = Request.blank(path='/')
request.registry = mock.Mock(settings={})
request.authn_type = 'fxa'
request.prefixed_userid = property(authentication.prefixed_userid)
context = RouteFactory(request)
self.assertIsNone(context.required_permission)
self.assertIsNone(context.current_record)
self.assertIsNone(context.resource_name)
self.assertIsNone(context.get_shared_ids)
def test_attributes_are_none_with_non_resource_requests(self):
basic_service = object()
request = Request.blank(path='/')
request.prefixed_userid = property(authentication.prefixed_userid)
request.matched_route = mock.Mock(pattern='foo')
request.registry = mock.Mock(cornice_services={'foo': basic_service})
request.registry.settings = {}
context = RouteFactory(request)
self.assertIsNone(context.current_record)
self.assertIsNone(context.required_permission)
self.assertIsNone(context.resource_name)
self.assertIsNone(context.get_shared_ids)
def test_route_factory_adds_allowed_principals_from_settings(self):
with mock.patch('kinto.core.utils.current_service') as current_service:
# Patch current service.
resource = mock.MagicMock()
current_service().resource.return_value = resource
current_service().collection_path = '/buckets'
# Do the actual call.
request = DummyRequest(method='post')
request.current_resource_name = 'bucket'
request.upath_info = '/buckets'
request.matchdict = {}
request.registry = mock.Mock()
request.registry.settings = {
'bucket_create_principals': 'fxa:user'
}
context = RouteFactory(request)
self.assertEquals(context.allowed_principals, ['fxa:user'])
class AuthorizationPolicyTest(unittest.TestCase):
def setUp(self):
self.authz = AuthorizationPolicy()
self.authz.get_bound_permissions = mock.sentinel.get_bound_perms
self.context = mock.MagicMock()
self.context.get_prefixed_userid.return_value = None
self.context.allowed_principals = []
self.context.object_id = mock.sentinel.object_id
self.context.required_permission = 'read'
self.principals = []
self.permission = 'dynamic'
def test_permits_does_not_refer_to_context_if_permission_is_private(self):
self.assertFalse(self.authz.permits(None, [], 'private'))
def test_permits_return_if_authenticated_when_permission_is_private(self):
self.assertTrue(self.authz.permits(None,
['system.Authenticated'],
'private'))
def test_permits_refers_to_context_to_check_permissions(self):
self.context.check_permission.return_value = True
allowed = self.authz.permits(self.context, self.principals, 'dynamic')
self.assertTrue(allowed)
def test_permits_refers_to_context_to_check_permission_principals(self):
self.context.check_permission.return_value = False
self.context.allowed_principals = ['fxa:user']
allowed = self.authz.permits(
self.context, ['fxa:user', 'system.Authenticated'], 'dynamic')
self.assertTrue(allowed)
def test_permits_reads_the_context_when_permission_is_dynamic(self):
self.authz.permits(self.context, self.principals, 'dynamic')
self.context.check_permission.assert_called_with(
'read',
self.principals,
get_bound_permissions=mock.sentinel.get_bound_perms)
def test_permits_consider_permission_when_not_dynamic(self):
self.authz.permits(self.context, self.principals, 'foobar')
self.context.check_permission.assert_called_with(
'foobar',
self.principals,
get_bound_permissions=mock.sentinel.get_bound_perms)
def test_permits_prepend_obj_type_to_permission_on_create(self):
self.context.required_permission = 'create'
self.context.resource_name = 'record'
self.authz.permits(self.context, self.principals, 'dynamic')
self.context.check_permission.assert_called_with(
'record:create',
self.principals,
get_bound_permissions=mock.sentinel.get_bound_perms)
def test_permits_takes_route_factory_allowed_principals_into_account(self):
self.context.resource_name = 'record'
self.context.required_permission = 'create'
self.context.allowed_principals = ['fxa:user']
has_permission = self.authz.permits(
self.context, ['fxa:user'], 'dynamic')
self.context.check_permission.assert_not_called()
self.assertTrue(has_permission)
def test_prefixed_userid_is_added_to_principals(self):
self.context.get_prefixed_userid.return_value = 'fxa:userid'
self.authz.permits(self.context, self.principals, 'foobar')
self.context.check_permission.assert_called_with(
'foobar',
self.principals + ['fxa:userid', 'fxa_userid'],
get_bound_permissions=mock.sentinel.get_bound_perms)
def test_unprefixed_userid_is_removed_from_principals(self):
self.context.get_prefixed_userid.return_value = 'fxa:userid'
self.authz.permits(self.context, ['userid'], 'foobar')
self.context.check_permission.assert_called_with(
'foobar',
['fxa:userid', 'fxa_userid'],
get_bound_permissions=mock.sentinel.get_bound_perms)
class GuestAuthorizationPolicyTest(unittest.TestCase):
def setUp(self):
self.authz = AuthorizationPolicy()
self.authz.get_bound_permissions = mock.sentinel.get_bound_perms
self.request = DummyRequest(method='GET')
self.context = RouteFactory(self.request)
self.context.on_collection = True
self.context.check_permission = mock.Mock(return_value=False)
def test_permits_returns_true_if_collection_and_shared_records(self):
self.context.fetch_shared_records = mock.MagicMock(return_value=[
'record1', 'record2'])
allowed = self.authz.permits(self.context, ['userid'], 'dynamic')
self.context.fetch_shared_records.assert_called_with(
'read',
['userid'],
get_bound_permissions=mock.sentinel.get_bound_perms)
self.assertTrue(allowed)
def test_permits_does_not_return_true_if_not_collection(self):
self.context.on_collection = False
allowed = self.authz.permits(self.context, ['userid'], 'dynamic')
self.assertFalse(allowed)
def test_permits_does_not_return_true_if_not_list_operation(self):
self.context.required_permission = 'create'
allowed = self.authz.permits(self.context, ['userid'], 'dynamic')
self.assertFalse(allowed)
allowed = self.authz.permits(self.context, ['userid'], 'create')
self.assertFalse(allowed)
def test_permits_returns_false_if_collection_is_unknown(self):
self.context.fetch_shared_records = mock.MagicMock(return_value=None)
allowed = self.authz.permits(self.context, ['userid'], 'dynamic')
self.context.fetch_shared_records.assert_called_with(
'read',
['userid'],
get_bound_permissions=mock.sentinel.get_bound_perms)
self.assertFalse(allowed)
| [
"mock.patch",
"mock.Mock",
"pyramid.request.Request.blank",
"kinto.core.authorization.RouteFactory",
"kinto.core.authorization.AuthorizationPolicy",
"mock.MagicMock"
] | [((3505, 3528), 'pyramid.request.Request.blank', 'Request.blank', ([], {'path': '"""/"""'}), "(path='/')\n", (3518, 3528), False, 'from pyramid.request import Request\n'), ((3556, 3578), 'mock.Mock', 'mock.Mock', ([], {'settings': '{}'}), '(settings={})\n', (3565, 3578), False, 'import mock\n'), ((3707, 3728), 'kinto.core.authorization.RouteFactory', 'RouteFactory', (['request'], {}), '(request)\n', (3719, 3728), False, 'from kinto.core.authorization import RouteFactory, AuthorizationPolicy\n'), ((4052, 4075), 'pyramid.request.Request.blank', 'Request.blank', ([], {'path': '"""/"""'}), "(path='/')\n", (4065, 4075), False, 'from pyramid.request import Request\n'), ((4183, 4207), 'mock.Mock', 'mock.Mock', ([], {'pattern': '"""foo"""'}), "(pattern='foo')\n", (4192, 4207), False, 'import mock\n'), ((4235, 4285), 'mock.Mock', 'mock.Mock', ([], {'cornice_services': "{'foo': basic_service}"}), "(cornice_services={'foo': basic_service})\n", (4244, 4285), False, 'import mock\n'), ((4344, 4365), 'kinto.core.authorization.RouteFactory', 'RouteFactory', (['request'], {}), '(request)\n', (4356, 4365), False, 'from kinto.core.authorization import RouteFactory, AuthorizationPolicy\n'), ((5503, 5524), 'kinto.core.authorization.AuthorizationPolicy', 'AuthorizationPolicy', ([], {}), '()\n', (5522, 5524), False, 'from kinto.core.authorization import RouteFactory, AuthorizationPolicy\n'), ((5621, 5637), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (5635, 5637), False, 'import mock\n'), ((9246, 9267), 'kinto.core.authorization.AuthorizationPolicy', 'AuthorizationPolicy', ([], {}), '()\n', (9265, 9267), False, 'from kinto.core.authorization import RouteFactory, AuthorizationPolicy\n'), ((9414, 9440), 'kinto.core.authorization.RouteFactory', 'RouteFactory', (['self.request'], {}), '(self.request)\n', (9426, 9440), False, 'from kinto.core.authorization import RouteFactory, AuthorizationPolicy\n'), ((9523, 9552), 'mock.Mock', 'mock.Mock', ([], {'return_value': '(False)'}), '(return_value=False)\n', (9532, 9552), False, 'import mock\n'), ((9672, 9723), 'mock.MagicMock', 'mock.MagicMock', ([], {'return_value': "['record1', 'record2']"}), "(return_value=['record1', 'record2'])\n", (9686, 9723), False, 'import mock\n'), ((10685, 10718), 'mock.MagicMock', 'mock.MagicMock', ([], {'return_value': 'None'}), '(return_value=None)\n', (10699, 10718), False, 'import mock\n'), ((575, 621), 'mock.patch', 'mock.patch', (['"""kinto.core.utils.current_service"""'], {}), "('kinto.core.utils.current_service')\n", (585, 621), False, 'import mock\n'), ((702, 718), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (716, 718), False, 'import mock\n'), ((1188, 1209), 'kinto.core.authorization.RouteFactory', 'RouteFactory', (['request'], {}), '(request)\n', (1200, 1209), False, 'from kinto.core.authorization import RouteFactory, AuthorizationPolicy\n'), ((1846, 1892), 'mock.patch', 'mock.patch', (['"""kinto.core.utils.current_service"""'], {}), "('kinto.core.utils.current_service')\n", (1856, 1892), False, 'import mock\n'), ((1973, 1989), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (1987, 1989), False, 'import mock\n'), ((2488, 2509), 'kinto.core.authorization.RouteFactory', 'RouteFactory', (['request'], {}), '(request)\n', (2500, 2509), False, 'from kinto.core.authorization import RouteFactory, AuthorizationPolicy\n'), ((2786, 2832), 'mock.patch', 'mock.patch', (['"""kinto.core.utils.current_service"""'], {}), "('kinto.core.utils.current_service')\n", (2796, 2832), False, 'import mock\n'), ((2913, 2929), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (2927, 2929), False, 'import mock\n'), ((3207, 3228), 'kinto.core.authorization.RouteFactory', 'RouteFactory', (['request'], {}), '(request)\n', (3219, 3228), False, 'from kinto.core.authorization import RouteFactory, AuthorizationPolicy\n'), ((4656, 4702), 'mock.patch', 'mock.patch', (['"""kinto.core.utils.current_service"""'], {}), "('kinto.core.utils.current_service')\n", (4666, 4702), False, 'import mock\n'), ((4783, 4799), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (4797, 4799), False, 'import mock\n'), ((5169, 5180), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (5178, 5180), False, 'import mock\n'), ((5314, 5335), 'kinto.core.authorization.RouteFactory', 'RouteFactory', (['request'], {}), '(request)\n', (5326, 5335), False, 'from kinto.core.authorization import RouteFactory, AuthorizationPolicy\n')] |
import copy
#from enum import IntFlag
from time import sleep
# I tried to use enum here, but I was having a problem with packages in the image, so I gave up as I just want to get it done
class FieldValue:
Empty = 0
Wall = 1
Player = 2
Box = 4
Goal = 8
class SenseHATColour:
Red = (204, 4, 4)
White = (255, 255, 255)
Yellow = (234, 231, 51)
Green = (1, 158, 1)
Blue = (13, 0, 198)
Black = (0, 0, 0)
def mapStringToBoardRow(line):
field_map = {
'#': FieldValue.Wall,
'@': FieldValue.Player,
'+': (FieldValue.Player | FieldValue.Goal),
'$': FieldValue.Box,
'*': (FieldValue.Box | FieldValue.Goal),
'.': FieldValue.Goal,
' ': FieldValue.Empty
}
row = []
for cell in line:
row.append(field_map[cell])
return row
def get_levels():
#board_definition = open('TestDefinition.txt', 'r')
board_definition = open('BoardDefinition.txt', 'r')
levels = []
board = []
number_of_rows = 0
for line in board_definition.read().splitlines():
if number_of_rows == 0:
if len(board) > 0:
levels.append(board)
board = []
number_of_rows = int(line)
else:
board.append(mapStringToBoardRow(line))
number_of_rows -= 1
levels.append(board)
return levels
def print_to_console(level):
field_to_console_map = {
FieldValue.Wall: '#',
FieldValue.Player: '@',
FieldValue.Player | FieldValue.Goal: '+',
FieldValue.Box: '$',
FieldValue.Box | FieldValue.Goal: '*',
FieldValue.Goal: '.',
FieldValue.Empty: ' '
}
for row in level:
for cell in row:
print(field_to_console_map[cell], end='')
print()
from sense_hat import SenseHat
sense = SenseHat()
def print_to_senseHAT(level):
field_to_colour_map = {
FieldValue.Wall: SenseHATColour.Red,
FieldValue.Player: SenseHATColour.White,
FieldValue.Player | FieldValue.Goal: SenseHATColour.White,
FieldValue.Box: SenseHATColour.Yellow,
FieldValue.Box | FieldValue.Goal: SenseHATColour.Green,
FieldValue.Goal: SenseHATColour.Blue,
FieldValue.Empty: SenseHATColour.Black
}
sense.clear()
print(level)
for row_index, row in enumerate(level):
for column_index, cell in enumerate(row):
sense.set_pixel(column_index, row_index, field_to_colour_map[cell])
def can_move(level, destination, behind):
(dest_x, dest_y) = destination
destination_value = level[dest_y][dest_x]
if destination_value == FieldValue.Wall:
return False
(behind_x, behind_y) = behind
behind_value = level[behind_y][behind_x]
if destination_value & FieldValue.Box == FieldValue.Box and (
behind_value & FieldValue.Box == FieldValue.Box or behind_value & FieldValue.Wall == FieldValue.Wall):
return False
return True
def get_player_position(level):
for row_index, row in enumerate(level):
for column_index, cell in enumerate(row):
if cell & FieldValue.Player == FieldValue.Player:
return column_index, row_index
def try_move(level, player_position, destination, behind):
if can_move(level, destination, behind):
level[player_position[1]][player_position[0]] = level[player_position[1]][player_position[0]] & ~FieldValue.Player
level[destination[1]][destination[0]] = level[destination[1]][destination[0]] | FieldValue.Player
if level[destination[1]][destination[0]] & FieldValue.Box == FieldValue.Box:
level[destination[1]][destination[0]] = level[destination[1]][destination[0]] & ~FieldValue.Box
level[behind[1]][behind[0]] = level[behind[1]][behind[0]] | FieldValue.Box
print(level)
return True
return False
def won(level):
for row in level:
for cell in row:
if cell & FieldValue.Goal == FieldValue.Goal and cell & FieldValue.Box != FieldValue.Box:
return False
return True
def play_level(level):
current_state = copy.deepcopy(level)
(position_x, position_y) = get_player_position(current_state)
board_changed = True
while True:
if board_changed:
board_changed = False
(position_x, position_y) = get_player_position(current_state)
print_to_senseHAT(current_state)
if won(current_state):
sleep(0.5)
return
for event in sense.stick.get_events():
if event.action == "pressed":
if event.direction == "middle":
current_state = copy.deepcopy(level)
board_changed = True
elif event.direction == "up":
board_changed = try_move(current_state, (position_x, position_y), (position_x, position_y - 1),
(position_x, position_y - 2))
elif event.direction == "down":
board_changed = try_move(current_state, (position_x, position_y), (position_x, position_y + 1),
(position_x, position_y + 2))
elif event.direction == "left":
board_changed = try_move(current_state, (position_x, position_y), (position_x - 1, position_y),
(position_x - 2, position_y))
elif event.direction == "right":
board_changed = try_move(current_state, (position_x, position_y), (position_x + 1, position_y),
(position_x + 2, position_y))
def show_victory_sequence():
victory_sequence = [(SenseHATColour.Red, 3, 4), (SenseHATColour.Blue, 2, 5), (SenseHATColour.Green, 1, 6), (SenseHATColour.Yellow, 0, 7)]
sense.clear()
for colour, start, end in victory_sequence:
for y in range(start, end + 1):
sense.set_pixel(start, y, colour)
sense.set_pixel(end, y, colour)
for x in range(start + 1, end):
sense.set_pixel(x, start, colour)
sense.set_pixel(x, end, colour)
sleep(0.25)
def main():
levels = get_levels()
while True:
for index, level in enumerate(levels):
sense.show_message(str(index + 1), text_colour = list(SenseHATColour.Red))
play_level(copy.deepcopy(level))
show_victory_sequence()
sense.show_message("You won!", text_colour = list(SenseHATColour.Green))
if __name__ == '__main__':
main()
| [
"sense_hat.SenseHat",
"time.sleep",
"copy.deepcopy"
] | [((1862, 1872), 'sense_hat.SenseHat', 'SenseHat', ([], {}), '()\n', (1870, 1872), False, 'from sense_hat import SenseHat\n'), ((4169, 4189), 'copy.deepcopy', 'copy.deepcopy', (['level'], {}), '(level)\n', (4182, 4189), False, 'import copy\n'), ((6258, 6269), 'time.sleep', 'sleep', (['(0.25)'], {}), '(0.25)\n', (6263, 6269), False, 'from time import sleep\n'), ((4527, 4537), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (4532, 4537), False, 'from time import sleep\n'), ((6483, 6503), 'copy.deepcopy', 'copy.deepcopy', (['level'], {}), '(level)\n', (6496, 6503), False, 'import copy\n'), ((4734, 4754), 'copy.deepcopy', 'copy.deepcopy', (['level'], {}), '(level)\n', (4747, 4754), False, 'import copy\n')] |
# coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.10.0
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class ParameterContextsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_parameter_context(self, body, **kwargs):
"""
Create a Parameter Context
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_parameter_context(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param ParameterContextEntity body: The Parameter Context. (required)
:return: ParameterContextEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_parameter_context_with_http_info(body, **kwargs)
else:
(data) = self.create_parameter_context_with_http_info(body, **kwargs)
return data
def create_parameter_context_with_http_info(self, body, **kwargs):
"""
Create a Parameter Context
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_parameter_context_with_http_info(body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param ParameterContextEntity body: The Parameter Context. (required)
:return: ParameterContextEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_parameter_context" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_parameter_context`")
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/parameter-contexts', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ParameterContextEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_parameter_context(self, id, **kwargs):
"""
Deletes the Parameter Context with the given ID
Deletes the Parameter Context with the given ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_parameter_context(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The Parameter Context ID. (required)
:param str version: The version is used to verify the client is working with the latest version of the flow.
:param str client_id: If the client id is not specified, a new one will be generated. This value (whether specified or generated) is included in the response.
:param bool disconnected_node_acknowledged: Acknowledges that this node is disconnected to allow for mutable requests to proceed.
:return: ParameterContextEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_parameter_context_with_http_info(id, **kwargs)
else:
(data) = self.delete_parameter_context_with_http_info(id, **kwargs)
return data
def delete_parameter_context_with_http_info(self, id, **kwargs):
"""
Deletes the Parameter Context with the given ID
Deletes the Parameter Context with the given ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_parameter_context_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The Parameter Context ID. (required)
:param str version: The version is used to verify the client is working with the latest version of the flow.
:param str client_id: If the client id is not specified, a new one will be generated. This value (whether specified or generated) is included in the response.
:param bool disconnected_node_acknowledged: Acknowledges that this node is disconnected to allow for mutable requests to proceed.
:return: ParameterContextEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'version', 'client_id', 'disconnected_node_acknowledged']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_parameter_context" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_parameter_context`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
if 'version' in params:
query_params.append(('version', params['version']))
if 'client_id' in params:
query_params.append(('clientId', params['client_id']))
if 'disconnected_node_acknowledged' in params:
query_params.append(('disconnectedNodeAcknowledged', params['disconnected_node_acknowledged']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/parameter-contexts/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ParameterContextEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_update_request(self, context_id, request_id, **kwargs):
"""
Deletes the Update Request with the given ID
Deletes the Update Request with the given ID. After a request is created via a POST to /nifi-api/parameter-contexts/update-requests, it is expected that the client will properly clean up the request by DELETE'ing it, once the Update process has completed. If the request is deleted before the request completes, then the Update request will finish the step that it is currently performing and then will cancel any subsequent steps.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_update_request(context_id, request_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str context_id: The ID of the ParameterContext (required)
:param str request_id: The ID of the Update Request (required)
:param bool disconnected_node_acknowledged: Acknowledges that this node is disconnected to allow for mutable requests to proceed.
:return: ParameterContextUpdateRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_update_request_with_http_info(context_id, request_id, **kwargs)
else:
(data) = self.delete_update_request_with_http_info(context_id, request_id, **kwargs)
return data
def delete_update_request_with_http_info(self, context_id, request_id, **kwargs):
"""
Deletes the Update Request with the given ID
Deletes the Update Request with the given ID. After a request is created via a POST to /nifi-api/parameter-contexts/update-requests, it is expected that the client will properly clean up the request by DELETE'ing it, once the Update process has completed. If the request is deleted before the request completes, then the Update request will finish the step that it is currently performing and then will cancel any subsequent steps.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_update_request_with_http_info(context_id, request_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str context_id: The ID of the ParameterContext (required)
:param str request_id: The ID of the Update Request (required)
:param bool disconnected_node_acknowledged: Acknowledges that this node is disconnected to allow for mutable requests to proceed.
:return: ParameterContextUpdateRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['context_id', 'request_id', 'disconnected_node_acknowledged']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_update_request" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'context_id' is set
if ('context_id' not in params) or (params['context_id'] is None):
raise ValueError("Missing the required parameter `context_id` when calling `delete_update_request`")
# verify the required parameter 'request_id' is set
if ('request_id' not in params) or (params['request_id'] is None):
raise ValueError("Missing the required parameter `request_id` when calling `delete_update_request`")
collection_formats = {}
path_params = {}
if 'context_id' in params:
path_params['contextId'] = params['context_id']
if 'request_id' in params:
path_params['requestId'] = params['request_id']
query_params = []
if 'disconnected_node_acknowledged' in params:
query_params.append(('disconnectedNodeAcknowledged', params['disconnected_node_acknowledged']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/parameter-contexts/{contextId}/update-requests/{requestId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ParameterContextUpdateRequestEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_validation_request(self, context_id, id, **kwargs):
"""
Deletes the Validation Request with the given ID
Deletes the Validation Request with the given ID. After a request is created via a POST to /nifi-api/validation-contexts, it is expected that the client will properly clean up the request by DELETE'ing it, once the validation process has completed. If the request is deleted before the request completes, then the Validation request will finish the step that it is currently performing and then will cancel any subsequent steps.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_validation_request(context_id, id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str context_id: The ID of the Parameter Context (required)
:param str id: The ID of the Update Request (required)
:param bool disconnected_node_acknowledged: Acknowledges that this node is disconnected to allow for mutable requests to proceed.
:return: ParameterContextValidationRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_validation_request_with_http_info(context_id, id, **kwargs)
else:
(data) = self.delete_validation_request_with_http_info(context_id, id, **kwargs)
return data
def delete_validation_request_with_http_info(self, context_id, id, **kwargs):
"""
Deletes the Validation Request with the given ID
Deletes the Validation Request with the given ID. After a request is created via a POST to /nifi-api/validation-contexts, it is expected that the client will properly clean up the request by DELETE'ing it, once the validation process has completed. If the request is deleted before the request completes, then the Validation request will finish the step that it is currently performing and then will cancel any subsequent steps.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_validation_request_with_http_info(context_id, id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str context_id: The ID of the Parameter Context (required)
:param str id: The ID of the Update Request (required)
:param bool disconnected_node_acknowledged: Acknowledges that this node is disconnected to allow for mutable requests to proceed.
:return: ParameterContextValidationRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['context_id', 'id', 'disconnected_node_acknowledged']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_validation_request" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'context_id' is set
if ('context_id' not in params) or (params['context_id'] is None):
raise ValueError("Missing the required parameter `context_id` when calling `delete_validation_request`")
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_validation_request`")
collection_formats = {}
path_params = {}
if 'context_id' in params:
path_params['contextId'] = params['context_id']
if 'id' in params:
path_params['id'] = params['id']
query_params = []
if 'disconnected_node_acknowledged' in params:
query_params.append(('disconnectedNodeAcknowledged', params['disconnected_node_acknowledged']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/parameter-contexts/{contextId}/validation-requests/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ParameterContextValidationRequestEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_parameter_context(self, id, **kwargs):
"""
Returns the Parameter Context with the given ID
Returns the Parameter Context with the given ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_parameter_context(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The ID of the Parameter Context (required)
:return: ParameterContextEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_parameter_context_with_http_info(id, **kwargs)
else:
(data) = self.get_parameter_context_with_http_info(id, **kwargs)
return data
def get_parameter_context_with_http_info(self, id, **kwargs):
"""
Returns the Parameter Context with the given ID
Returns the Parameter Context with the given ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_parameter_context_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: The ID of the Parameter Context (required)
:return: ParameterContextEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_parameter_context" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_parameter_context`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/parameter-contexts/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ParameterContextEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_parameter_context_update(self, context_id, request_id, **kwargs):
"""
Returns the Update Request with the given ID
Returns the Update Request with the given ID. Once an Update Request has been created by performing a POST to /nifi-api/parameter-contexts, that request can subsequently be retrieved via this endpoint, and the request that is fetched will contain the updated state, such as percent complete, the current state of the request, and any failures.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_parameter_context_update(context_id, request_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str context_id: The ID of the Parameter Context (required)
:param str request_id: The ID of the Update Request (required)
:return: ParameterContextUpdateRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_parameter_context_update_with_http_info(context_id, request_id, **kwargs)
else:
(data) = self.get_parameter_context_update_with_http_info(context_id, request_id, **kwargs)
return data
def get_parameter_context_update_with_http_info(self, context_id, request_id, **kwargs):
"""
Returns the Update Request with the given ID
Returns the Update Request with the given ID. Once an Update Request has been created by performing a POST to /nifi-api/parameter-contexts, that request can subsequently be retrieved via this endpoint, and the request that is fetched will contain the updated state, such as percent complete, the current state of the request, and any failures.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_parameter_context_update_with_http_info(context_id, request_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str context_id: The ID of the Parameter Context (required)
:param str request_id: The ID of the Update Request (required)
:return: ParameterContextUpdateRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['context_id', 'request_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_parameter_context_update" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'context_id' is set
if ('context_id' not in params) or (params['context_id'] is None):
raise ValueError("Missing the required parameter `context_id` when calling `get_parameter_context_update`")
# verify the required parameter 'request_id' is set
if ('request_id' not in params) or (params['request_id'] is None):
raise ValueError("Missing the required parameter `request_id` when calling `get_parameter_context_update`")
collection_formats = {}
path_params = {}
if 'context_id' in params:
path_params['contextId'] = params['context_id']
if 'request_id' in params:
path_params['requestId'] = params['request_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/parameter-contexts/{contextId}/update-requests/{requestId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ParameterContextUpdateRequestEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_validation_request(self, context_id, id, **kwargs):
"""
Returns the Validation Request with the given ID
Returns the Validation Request with the given ID. Once a Validation Request has been created by performing a POST to /nifi-api/validation-contexts, that request can subsequently be retrieved via this endpoint, and the request that is fetched will contain the updated state, such as percent complete, the current state of the request, and any failures.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_validation_request(context_id, id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str context_id: The ID of the Parameter Context (required)
:param str id: The ID of the Validation Request (required)
:return: ParameterContextValidationRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_validation_request_with_http_info(context_id, id, **kwargs)
else:
(data) = self.get_validation_request_with_http_info(context_id, id, **kwargs)
return data
def get_validation_request_with_http_info(self, context_id, id, **kwargs):
"""
Returns the Validation Request with the given ID
Returns the Validation Request with the given ID. Once a Validation Request has been created by performing a POST to /nifi-api/validation-contexts, that request can subsequently be retrieved via this endpoint, and the request that is fetched will contain the updated state, such as percent complete, the current state of the request, and any failures.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_validation_request_with_http_info(context_id, id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str context_id: The ID of the Parameter Context (required)
:param str id: The ID of the Validation Request (required)
:return: ParameterContextValidationRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['context_id', 'id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_validation_request" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'context_id' is set
if ('context_id' not in params) or (params['context_id'] is None):
raise ValueError("Missing the required parameter `context_id` when calling `get_validation_request`")
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_validation_request`")
collection_formats = {}
path_params = {}
if 'context_id' in params:
path_params['contextId'] = params['context_id']
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/parameter-contexts/{contextId}/validation-requests/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ParameterContextValidationRequestEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def submit_parameter_context_update(self, context_id, body, **kwargs):
"""
Initiate the Update Request of a Parameter Context
This will initiate the process of updating a Parameter Context. Changing the value of a Parameter may require that one or more components be stopped and restarted, so this acttion may take significantly more time than many other REST API actions. As a result, this endpoint will immediately return a ParameterContextUpdateRequestEntity, and the process of updating the necessary components will occur asynchronously in the background. The client may then periodically poll the status of the request by issuing a GET request to /parameter-contexts/update-requests/{requestId}. Once the request is completed, the client is expected to issue a DELETE request to /parameter-contexts/update-requests/{requestId}.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.submit_parameter_context_update(context_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str context_id: (required)
:param ParameterContextEntity body: The updated version of the parameter context. (required)
:return: ParameterContextUpdateRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.submit_parameter_context_update_with_http_info(context_id, body, **kwargs)
else:
(data) = self.submit_parameter_context_update_with_http_info(context_id, body, **kwargs)
return data
def submit_parameter_context_update_with_http_info(self, context_id, body, **kwargs):
"""
Initiate the Update Request of a Parameter Context
This will initiate the process of updating a Parameter Context. Changing the value of a Parameter may require that one or more components be stopped and restarted, so this acttion may take significantly more time than many other REST API actions. As a result, this endpoint will immediately return a ParameterContextUpdateRequestEntity, and the process of updating the necessary components will occur asynchronously in the background. The client may then periodically poll the status of the request by issuing a GET request to /parameter-contexts/update-requests/{requestId}. Once the request is completed, the client is expected to issue a DELETE request to /parameter-contexts/update-requests/{requestId}.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.submit_parameter_context_update_with_http_info(context_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str context_id: (required)
:param ParameterContextEntity body: The updated version of the parameter context. (required)
:return: ParameterContextUpdateRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['context_id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method submit_parameter_context_update" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'context_id' is set
if ('context_id' not in params) or (params['context_id'] is None):
raise ValueError("Missing the required parameter `context_id` when calling `submit_parameter_context_update`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `submit_parameter_context_update`")
collection_formats = {}
path_params = {}
if 'context_id' in params:
path_params['contextId'] = params['context_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/parameter-contexts/{contextId}/update-requests', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ParameterContextUpdateRequestEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def submit_validation_request(self, context_id, body, **kwargs):
"""
Initiate a Validation Request to determine how the validity of components will change if a Parameter Context were to be updated
This will initiate the process of validating all components whose Process Group is bound to the specified Parameter Context. Performing validation against an arbitrary number of components may be expect and take significantly more time than many other REST API actions. As a result, this endpoint will immediately return a ParameterContextValidationRequestEntity, and the process of validating the necessary components will occur asynchronously in the background. The client may then periodically poll the status of the request by issuing a GET request to /parameter-contexts/validation-requests/{requestId}. Once the request is completed, the client is expected to issue a DELETE request to /parameter-contexts/validation-requests/{requestId}.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.submit_validation_request(context_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str context_id: (required)
:param ParameterContextValidationRequestEntity body: The validation request (required)
:return: ParameterContextValidationRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.submit_validation_request_with_http_info(context_id, body, **kwargs)
else:
(data) = self.submit_validation_request_with_http_info(context_id, body, **kwargs)
return data
def submit_validation_request_with_http_info(self, context_id, body, **kwargs):
"""
Initiate a Validation Request to determine how the validity of components will change if a Parameter Context were to be updated
This will initiate the process of validating all components whose Process Group is bound to the specified Parameter Context. Performing validation against an arbitrary number of components may be expect and take significantly more time than many other REST API actions. As a result, this endpoint will immediately return a ParameterContextValidationRequestEntity, and the process of validating the necessary components will occur asynchronously in the background. The client may then periodically poll the status of the request by issuing a GET request to /parameter-contexts/validation-requests/{requestId}. Once the request is completed, the client is expected to issue a DELETE request to /parameter-contexts/validation-requests/{requestId}.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.submit_validation_request_with_http_info(context_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str context_id: (required)
:param ParameterContextValidationRequestEntity body: The validation request (required)
:return: ParameterContextValidationRequestEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['context_id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method submit_validation_request" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'context_id' is set
if ('context_id' not in params) or (params['context_id'] is None):
raise ValueError("Missing the required parameter `context_id` when calling `submit_validation_request`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `submit_validation_request`")
collection_formats = {}
path_params = {}
if 'context_id' in params:
path_params['contextId'] = params['context_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/parameter-contexts/{contextId}/validation-requests', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ParameterContextValidationRequestEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_parameter_context(self, id, body, **kwargs):
"""
Modifies a Parameter Context
This endpoint will update a Parameter Context to match the provided entity. However, this request will fail if any component is running and is referencing a Parameter in the Parameter Context. Generally, this endpoint is not called directly. Instead, an update request should be submitted by making a POST to the /parameter-contexts/update-requests endpoint. That endpoint will, in turn, call this endpoint.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_parameter_context(id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: (required)
:param ParameterContextEntity body: The updated Parameter Context (required)
:return: ParameterContextEntity
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_parameter_context_with_http_info(id, body, **kwargs)
else:
(data) = self.update_parameter_context_with_http_info(id, body, **kwargs)
return data
def update_parameter_context_with_http_info(self, id, body, **kwargs):
"""
Modifies a Parameter Context
This endpoint will update a Parameter Context to match the provided entity. However, this request will fail if any component is running and is referencing a Parameter in the Parameter Context. Generally, this endpoint is not called directly. Instead, an update request should be submitted by making a POST to the /parameter-contexts/update-requests endpoint. That endpoint will, in turn, call this endpoint.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_parameter_context_with_http_info(id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: (required)
:param ParameterContextEntity body: The updated Parameter Context (required)
:return: ParameterContextEntity
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_parameter_context" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `update_parameter_context`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `update_parameter_context`")
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['tokenAuth']
return self.api_client.call_api('/parameter-contexts/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ParameterContextEntity',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| [
"six.iteritems"
] | [((3606, 3633), 'six.iteritems', 'iteritems', (["params['kwargs']"], {}), "(params['kwargs'])\n", (3615, 3633), False, 'from six import iteritems\n'), ((8990, 9017), 'six.iteritems', 'iteritems', (["params['kwargs']"], {}), "(params['kwargs'])\n", (8999, 9017), False, 'from six import iteritems\n'), ((15208, 15235), 'six.iteritems', 'iteritems', (["params['kwargs']"], {}), "(params['kwargs'])\n", (15217, 15235), False, 'from six import iteritems\n'), ((21633, 21660), 'six.iteritems', 'iteritems', (["params['kwargs']"], {}), "(params['kwargs'])\n", (21642, 21660), False, 'from six import iteritems\n'), ((26658, 26685), 'six.iteritems', 'iteritems', (["params['kwargs']"], {}), "(params['kwargs'])\n", (26667, 26685), False, 'from six import iteritems\n'), ((32067, 32094), 'six.iteritems', 'iteritems', (["params['kwargs']"], {}), "(params['kwargs'])\n", (32076, 32094), False, 'from six import iteritems\n'), ((37875, 37902), 'six.iteritems', 'iteritems', (["params['kwargs']"], {}), "(params['kwargs'])\n", (37884, 37902), False, 'from six import iteritems\n'), ((44389, 44416), 'six.iteritems', 'iteritems', (["params['kwargs']"], {}), "(params['kwargs'])\n", (44398, 44416), False, 'from six import iteritems\n'), ((51125, 51152), 'six.iteritems', 'iteritems', (["params['kwargs']"], {}), "(params['kwargs'])\n", (51134, 51152), False, 'from six import iteritems\n'), ((56847, 56874), 'six.iteritems', 'iteritems', (["params['kwargs']"], {}), "(params['kwargs'])\n", (56856, 56874), False, 'from six import iteritems\n')] |
#!/usr/bin/env python
"""
Usage: python vtk_reindent_code.py [--test] <file1> [<file2> ...]
This script takes old-style "Whitesmiths" indented VTK source files as
input, and re-indents the braces according to the new VTK style.
Only the brace indentation is modified.
If called with the --test option, then it will print an error message
for each file that it would modify, but it will not actually modify the
files.
Written by <NAME> on Sep 30, 2015.
"""
import sys
import os
import re
def reindent(filename, dry_run=False):
"""Reindent a file from Whitesmiths style to Allman style"""
# The first part of this function clears all strings and comments
# where non-grammatical braces might be hiding. These changes will
# not be saved back to the file, they just simplify the parsing.
# look for ', ", /*, and //
keychar = re.compile(r"""[/"']""")
# comments of the form /* */
c_comment = re.compile(r"\/\*(\*(?!\/)|[^*])*\*\/")
c_comment_start = re.compile(r"\/\*(\*(?!\/)|[^*])*$")
c_comment_end = re.compile(r"^(\*(?!\/)|[^*])*\*\/")
# comments of the form //
cpp_comment = re.compile(r"\/\/.*")
# string literals ""
string_literal = re.compile(r'"([^\\"]|\\.)*"')
string_literal_start = re.compile(r'"([^\\"]|\\.)*\\$')
string_literal_end = re.compile(r'^([^\\"]|\\.)*"')
# character literals ''
char_literal = re.compile(r"'([^\\']|\\.)*'")
char_literal_start = re.compile(r"'([^\\']|\\.)*\\$")
char_literal_end = re.compile(r"^([^\\']|\\.)*'")
# read the file
try:
f = open(filename)
lines = f.readlines()
f.close()
except:
sys.stderr.write(filename + ": ")
sys.stderr.write(str(sys.exc_info()[1]) + "\n")
sys.exit(1)
# convert strings to "", char constants to '', and remove comments
n = len(lines) # 'lines' is the input
newlines = [] # 'newlines' is the output
cont = None # set if e.g. we found /* and we are looking for */
for i in range(n):
line = lines[i].rstrip()
if cont is not None:
# look for closing ' or " or */
match = cont.match(line)
if match:
# found closing ' or " or */
line = line[match.end():]
cont = None
else:
# this whole line is in the middle of a string or comment
if cont is c_comment_end:
# still looking for */, clear the whole line
newlines.append("")
continue
else:
# still looking for ' or ", set line to backslash
newlines.append('\\')
continue
# start at column 0 and search for ', ", /*, or //
pos = 0
while True:
match = keychar.search(line, pos)
if match is None:
break
pos = match.start()
end = match.end()
# was the match /* ... */ ?
match = c_comment.match(line, pos)
if match:
line = line[0:pos] + " " + line[match.end():]
pos += 1
continue
# does the line have /* ... without the */ ?
match = c_comment_start.match(line, pos)
if match:
if line[-1] == '\\':
line = line[0:pos] + ' \\'
else:
line = line[0:pos]
cont = c_comment_end
break
# does the line have // ?
match = cpp_comment.match(line, pos)
if match:
if line[-1] == '\\':
line = line[0:pos] + ' \\'
else:
line = line[0:pos]
break
# did we find "..." ?
match = string_literal.match(line, pos)
if match:
line = line[0:pos] + "\"\"" + line[match.end():]
pos += 2
continue
# did we find "... without the final " ?
match = string_literal_start.match(line, pos)
if match:
line = line[0:pos] + "\"\"\\"
cont = string_literal_end
break
# did we find '...' ?
match = char_literal.match(line, pos)
if match:
line = line[0:pos] + "\' \'" + line[match.end():]
pos += 3
continue
# did we find '... without the final ' ?
match = char_literal_start.match(line, pos)
if match:
line = line[0:pos] + "\' \'\\"
cont = char_literal_end
break
# if we got to here, we found / that wasn't /* or //
pos += 1
# strip any trailing whitespace!
newlines.append(line.rstrip())
# The second part of this function looks for braces in the simplified
# code that we wrote to "newlines" after removing the contents of all
# string literals, character literals, and comments.
# Whenever we encounter an opening brace, we push its position onto a
# stack. Whenever we encounter the matching closing brace, we indent
# the braces as a pair.
# For #if directives, we check whether there are mismatched braces
# within the conditional block, and if so, we print a warning and reset
# the stack to the depth that it had at the start of the block.
# For #define directives, we save the stack and then restart counting
# braces until the end of the #define. Then we restore the stack.
# all changes go through this function
lines_changed = {} # keeps track of each line that was changed
def changeline(i, newtext, lines_changed=lines_changed):
if newtext != lines[i]:
lines[i] = newtext
lines_changed[i] = newtext
# we push a tuple (delim, row, col, newcol) onto this stack whenever
# we find a {, (, or [ delimiter, this keeps track of where we found
# the delimeter and what column we want to move it to
stack = []
lastdepth = 0
# this is a superstack that allows us to save the entire stack when we
# enter into an #if conditional block
dstack = []
# these are syntactic elements we need to look for
directive = re.compile(r"\s*#\s*(..)")
label = re.compile(r"""(case(?!\w)([^:]|::)+|\w+\s*(::\s*)*\s*:(?!:))""")
cflow = re.compile(r"(if|else|for|do|while|switch)(?!\w)")
delims = re.compile(r"[{}()\[\];]")
spaces = re.compile(r"\s*")
other = re.compile(r"(\w+|[^{}()\[\];\w\s]+)\s*")
cplusplus = re.compile(r"\s*#\s*ifdef\s+__cplusplus")
indentation = 0 # current indentation column
continuation = False # true if line continues an unfinished statement
new_context = True # also set when we enter a #define statement
in_else = False # set if in an #else
in_define = False # set if in #define
in_assign = False # set to deal with "= {" or #define x {"
leaving_define = False # set if at the end of a #define
save_stack = None # save stack when entering a #define
for i in range(n):
line = newlines[i]
# restore stack when leaving #define
if leaving_define:
stack, indentation, continuation = save_stack
save_stack = None
in_define = False
leaving_define = False
# handle #if conditionals
is_directive = False
in_else = False
match = directive.match(line)
if match:
is_directive = True
if match.groups()[0] == 'if':
dstack.append((list(stack), indentation, continuation,
line))
elif match.groups()[0] in ('en', 'el'):
oldstack, oldindent, oldcont, dline = dstack.pop()
if len(stack) > len(oldstack) and not cplusplus.match(dline):
sys.stderr.write(filename + ":" + str(i) + ": ")
sys.stderr.write("mismatched delimiter in \"" +
dline + "\" block\n")
if match.groups()[0] == 'el':
in_else = True
indentation = oldindent
continuation = oldcont
stack = oldstack
dstack.append((list(stack), indentation, continuation,
line))
elif match.groups()[0] == 'de':
in_define = True
leaving_define = False
save_stack = (stack, indentation, continuation)
stack = []
new_context = True
# remove backslash at end of line, if present
if len(line) > 0 and line[-1] == '\\':
line = line[0:-1].rstrip()
elif in_define:
leaving_define = True
if not is_directive and len(line) > 0 and not continuation:
# what is the indentation of the current line?
match = spaces.match(line)
if not line[match.end()] == '{':
indentation = match.end()
continuation = True
# new_context marks beginning of a file or a macro
if new_context:
continuation = False
indentation = 0
new_context = False
# skip initial whitespace
if is_directive:
pos = directive.match(line).end()
else:
pos = spaces.match(line).end()
# check for a label e.g. case
match = label.match(line, pos)
if match:
base = True
for item in stack:
if item[0] != '{':
base = False
if base:
word = re.match(r"\w*", match.group())
if word in ("case", "default"):
indentation = pos
continuation = False
# check for multiple labels on the same line
while match:
pos = spaces.match(line, match.end()).end()
match = label.match(line, pos)
# parse the line
while pos != len(line):
# check for if, else, for, while, do, switch
match = cflow.match(line, pos)
if match:
# if we are at the beginning of the line
if spaces.match(line).end() == pos:
indentation = pos
pos = spaces.match(line, match.end()).end()
continue
# check for a delimiter {} () [] or ;
match = delims.match(line, pos)
if not match:
# check for any other identifiers, operators
match = other.match(line, pos)
if match:
pos = match.end()
continue
else:
break
# found a delimiter
delim = line[pos]
if delim in ('(', '['):
# save delim, row, col, and current indentation
stack.append((delim, i, pos, indentation))
elif delim == '{':
if in_assign or line[0:pos-1].rstrip()[-1:] == "=":
# do not adjust braces for initializer lists
stack.append((delim, i, -1, indentation))
elif ((in_else or in_define) and spaces.sub("", line) == "{"):
# for opening braces that might have no match
indent = " "*indentation
changeline(i, spaces.sub(indent, lines[i], count=1))
stack.append((delim, i, pos, indentation))
else:
# save delim, row, col, and previous indentation
stack.append((delim, i, pos, indentation))
if spaces.sub("", newlines[i][0:pos]) == "":
indentation += 2
continuation = False
elif delim == ';':
# ';' marks end of statement unless inside for (;;)
if len(stack) == 0 or stack[-1][0] == '{':
continuation = False
else:
# found a ')', ']', or '}' delimiter, so pop its partner
try:
ldelim, j, k, indentation = stack.pop()
in_assign = (k < 0)
except IndexError:
ldelim = ""
if ldelim != {'}':'{', ')':'(', ']':'['}[delim]:
sys.stderr.write(filename + ":" + str(i) + ": ")
sys.stderr.write("mismatched \'" + delim + "\'\n")
# adjust the indentation of matching '{', '}'
if (ldelim == '{' and delim == '}' and not in_assign and
spaces.sub("", line[0:pos]) == ""):
if spaces.sub("", newlines[j][0:k]) == "":
indent = " "*indentation
changeline(j, spaces.sub(indent, lines[j], count=1))
changeline(i, spaces.sub(indent, lines[i], count=1))
elif i != j:
indent = " "*indentation
changeline(i, spaces.sub(indent, lines[i], count=1))
if delim == '}':
continuation = False
# eat whitespace and continue
pos = spaces.match(line, match.end()).end()
# check for " = " and #define assignments for the sake of
# the { inializer list } that might be on the following line
if len(line) > 0:
if (line[-1] == '=' or
(is_directive and in_define and not leaving_define)):
in_assign = True
elif not is_directive:
in_assign = False
if len(dstack) != 0:
sys.stderr.write(filename + ": ")
sys.stderr.write("mismatched #if conditional.\n")
if len(stack) != 0:
sys.stderr.write(filename + ":" + str(stack[0][1]) + ": ")
sys.stderr.write("no match for " + stack[0][0] +
" before end of file.\n")
if lines_changed:
# remove any trailing whitespace
trailing = re.compile(r" *$")
for i in range(n):
lines[i] = trailing.sub("", lines[i])
while n > 0 and lines[n-1].rstrip() == "":
n -= 1
if dry_run:
errcount = len(lines_changed)
line_numbers = list(lines_changed.keys())
line_numbers.sort()
line_numbers = [str(l + 1) for l in line_numbers[0:10] ]
if errcount > len(line_numbers):
line_numbers.append("...")
sys.stderr.write("Warning: " + filename +
": incorrect brace indentation on " +
str(errcount) +
(" lines: ", "line: ")[errcount == 1] +
", ".join(line_numbers) + "\n")
else:
# rewrite the file
ofile = open(filename, 'w')
ofile.writelines(lines)
ofile.close()
return True
return False
if __name__ == "__main__":
# ignore generated files
ignorefiles = ["lex.yy.c", "vtkParse.tab.c"]
files = []
opt_ignore = False # ignore all further options
opt_test = False # the --test option
for arg in sys.argv[1:]:
if arg[0:1] == '-' and not opt_ignore:
if arg == '--':
opt_ignore = True
elif arg == '--test':
opt_test = True
else:
sys.stderr.write("%s: unrecognized option %s\n" %
(os.path.split(sys.argv[0])[-1], arg))
sys.exit(1)
elif os.path.split(arg)[-1] not in ignorefiles:
files.append(arg)
# if --test was set, whenever a file needs modification, we set
# "failed" and continue checking the rest of the files
failed = False
for filename in files:
# repeat until no further changes occur
while reindent(filename, dry_run=opt_test):
if opt_test:
failed = True
break
if failed:
sys.exit(1)
| [
"re.compile",
"os.path.split",
"sys.stderr.write",
"sys.exc_info",
"sys.exit"
] | [((882, 902), 're.compile', 're.compile', (['"""[/"\']"""'], {}), '(\'[/"\\\']\')\n', (892, 902), False, 'import re\n'), ((958, 1002), 're.compile', 're.compile', (['"""\\\\/\\\\*(\\\\*(?!\\\\/)|[^*])*\\\\*\\\\/"""'], {}), "('\\\\/\\\\*(\\\\*(?!\\\\/)|[^*])*\\\\*\\\\/')\n", (968, 1002), False, 'import re\n'), ((1021, 1060), 're.compile', 're.compile', (['"""\\\\/\\\\*(\\\\*(?!\\\\/)|[^*])*$"""'], {}), "('\\\\/\\\\*(\\\\*(?!\\\\/)|[^*])*$')\n", (1031, 1060), False, 'import re\n'), ((1079, 1118), 're.compile', 're.compile', (['"""^(\\\\*(?!\\\\/)|[^*])*\\\\*\\\\/"""'], {}), "('^(\\\\*(?!\\\\/)|[^*])*\\\\*\\\\/')\n", (1089, 1118), False, 'import re\n'), ((1166, 1188), 're.compile', 're.compile', (['"""\\\\/\\\\/.*"""'], {}), "('\\\\/\\\\/.*')\n", (1176, 1188), False, 'import re\n'), ((1236, 1269), 're.compile', 're.compile', (['""""([^\\\\\\\\"]|\\\\\\\\.)*\\""""'], {}), '(\'"([^\\\\\\\\"]|\\\\\\\\.)*"\')\n', (1246, 1269), False, 'import re\n'), ((1295, 1332), 're.compile', 're.compile', (['""""([^\\\\\\\\"]|\\\\\\\\.)*\\\\\\\\$"""'], {}), '(\'"([^\\\\\\\\"]|\\\\\\\\.)*\\\\\\\\$\')\n', (1305, 1332), False, 'import re\n'), ((1354, 1387), 're.compile', 're.compile', (['"""^([^\\\\\\\\"]|\\\\\\\\.)*\\""""'], {}), '(\'^([^\\\\\\\\"]|\\\\\\\\.)*"\')\n', (1364, 1387), False, 'import re\n'), ((1434, 1467), 're.compile', 're.compile', (['"""\'([^\\\\\\\\\']|\\\\\\\\.)*\'"""'], {}), '("\'([^\\\\\\\\\']|\\\\\\\\.)*\'")\n', (1444, 1467), False, 'import re\n'), ((1491, 1528), 're.compile', 're.compile', (['"""\'([^\\\\\\\\\']|\\\\\\\\.)*\\\\\\\\$"""'], {}), '("\'([^\\\\\\\\\']|\\\\\\\\.)*\\\\\\\\$")\n', (1501, 1528), False, 'import re\n'), ((1548, 1581), 're.compile', 're.compile', (['"""^([^\\\\\\\\\']|\\\\\\\\.)*\'"""'], {}), '("^([^\\\\\\\\\']|\\\\\\\\.)*\'")\n', (1558, 1581), False, 'import re\n'), ((6567, 6594), 're.compile', 're.compile', (['"""\\\\s*#\\\\s*(..)"""'], {}), "('\\\\s*#\\\\s*(..)')\n", (6577, 6594), False, 'import re\n'), ((6607, 6672), 're.compile', 're.compile', (['"""(case(?!\\\\w)([^:]|::)+|\\\\w+\\\\s*(::\\\\s*)*\\\\s*:(?!:))"""'], {}), "('(case(?!\\\\w)([^:]|::)+|\\\\w+\\\\s*(::\\\\s*)*\\\\s*:(?!:))')\n", (6617, 6672), False, 'import re\n'), ((6686, 6736), 're.compile', 're.compile', (['"""(if|else|for|do|while|switch)(?!\\\\w)"""'], {}), "('(if|else|for|do|while|switch)(?!\\\\w)')\n", (6696, 6736), False, 'import re\n'), ((6751, 6778), 're.compile', 're.compile', (['"""[{}()\\\\[\\\\];]"""'], {}), "('[{}()\\\\[\\\\];]')\n", (6761, 6778), False, 'import re\n'), ((6792, 6810), 're.compile', 're.compile', (['"""\\\\s*"""'], {}), "('\\\\s*')\n", (6802, 6810), False, 'import re\n'), ((6824, 6870), 're.compile', 're.compile', (['"""(\\\\w+|[^{}()\\\\[\\\\];\\\\w\\\\s]+)\\\\s*"""'], {}), "('(\\\\w+|[^{}()\\\\[\\\\];\\\\w\\\\s]+)\\\\s*')\n", (6834, 6870), False, 'import re\n'), ((6883, 6926), 're.compile', 're.compile', (['"""\\\\s*#\\\\s*ifdef\\\\s+__cplusplus"""'], {}), "('\\\\s*#\\\\s*ifdef\\\\s+__cplusplus')\n", (6893, 6926), False, 'import re\n'), ((14327, 14360), 'sys.stderr.write', 'sys.stderr.write', (["(filename + ': ')"], {}), "(filename + ': ')\n", (14343, 14360), False, 'import sys\n'), ((14370, 14419), 'sys.stderr.write', 'sys.stderr.write', (['"""mismatched #if conditional.\n"""'], {}), "('mismatched #if conditional.\\n')\n", (14386, 14419), False, 'import sys\n'), ((14524, 14598), 'sys.stderr.write', 'sys.stderr.write', (["('no match for ' + stack[0][0] + ' before end of file.\\n')"], {}), "('no match for ' + stack[0][0] + ' before end of file.\\n')\n", (14540, 14598), False, 'import sys\n'), ((14712, 14729), 're.compile', 're.compile', (['""" *$"""'], {}), "(' *$')\n", (14722, 14729), False, 'import re\n'), ((16793, 16804), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (16801, 16804), False, 'import sys\n'), ((1712, 1745), 'sys.stderr.write', 'sys.stderr.write', (["(filename + ': ')"], {}), "(filename + ': ')\n", (1728, 1745), False, 'import sys\n'), ((1812, 1823), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1820, 1823), False, 'import sys\n'), ((16303, 16314), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (16311, 16314), False, 'import sys\n'), ((16329, 16347), 'os.path.split', 'os.path.split', (['arg'], {}), '(arg)\n', (16342, 16347), False, 'import os\n'), ((8341, 8408), 'sys.stderr.write', 'sys.stderr.write', (['(\'mismatched delimiter in "\' + dline + \'" block\\n\')'], {}), '(\'mismatched delimiter in "\' + dline + \'" block\\n\')\n', (8357, 8408), False, 'import sys\n'), ((1776, 1790), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (1788, 1790), False, 'import sys\n'), ((13055, 13103), 'sys.stderr.write', 'sys.stderr.write', (['("mismatched \'" + delim + "\'\\n")'], {}), '("mismatched \'" + delim + "\'\\n")\n', (13071, 13103), False, 'import sys\n'), ((16248, 16274), 'os.path.split', 'os.path.split', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (16261, 16274), False, 'import os\n')] |
import os.path
from scipy.optimize import fsolve
import math
import numpy as np
from matplotlib import pyplot as plt
import pandas as pd
import utils_Florian as utils
def equations(p, t_peak, t_half):
x, y = p
return (0.5 * (math.exp(-x * t_peak) - math.exp(-y * t_peak)) - (math.exp(-x * t_half) - math.exp(-y * t_half)), -x * math.exp(-x * t_peak) + y * math.exp(-y * t_peak))
results = pd.DataFrame()
t_peaks = []
t_halfs = []
xs = []
ys = []
initial_conditions = ((12, 5),
(14, 4),
(14, 4),
(30, 1),
(30, 1),
(30, 1),
(30, 1),
(30, 1),
(30, 1),
(30, 1),
(30, 1),
(30, 1),
(30, 1),
(30, 1),
(30, 1),
(30, 1))
for alpha in range(1, 16):
t_peak = 0.1415
t_half = t_peak + 0.2 + alpha * 0.05
print("Target: ", t_half)
x, y = fsolve(equations, initial_conditions[alpha], args=(t_peak, t_half))
t_peaks.append(t_peak)
t_halfs.append(t_half - t_peak)
xs.append(x)
ys.append(y)
t = np.linspace(0, 2.0, 10000)
crf = -np.exp(-x * t) + np.exp(-y * t)
crf = crf / sum(crf)
print("t peak", t[np.argmax(crf)])
diff = crf - 0.5 * max(crf)
diff[:np.argmax(crf)] = np.inf
diff = np.abs(diff)
half_idx = np.argmin(diff)
print("t half", t[half_idx] - t[np.argmax(crf)])
plt.plot(t, crf, label=str(t_half - t_peak))
results = results.append(pd.DataFrame({"t_peak": [t_peak], "t_half": [t_half - t_peak], "a": [x], "b": [y]}))
results.to_csv(os.path.join(utils.output_dir, "crf_parameters.csv"))
| [
"scipy.optimize.fsolve",
"numpy.abs",
"numpy.argmax",
"numpy.exp",
"numpy.linspace",
"numpy.argmin",
"pandas.DataFrame",
"math.exp"
] | [((404, 418), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (416, 418), True, 'import pandas as pd\n'), ((1088, 1155), 'scipy.optimize.fsolve', 'fsolve', (['equations', 'initial_conditions[alpha]'], {'args': '(t_peak, t_half)'}), '(equations, initial_conditions[alpha], args=(t_peak, t_half))\n', (1094, 1155), False, 'from scipy.optimize import fsolve\n'), ((1267, 1293), 'numpy.linspace', 'np.linspace', (['(0)', '(2.0)', '(10000)'], {}), '(0, 2.0, 10000)\n', (1278, 1293), True, 'import numpy as np\n'), ((1479, 1491), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (1485, 1491), True, 'import numpy as np\n'), ((1507, 1522), 'numpy.argmin', 'np.argmin', (['diff'], {}), '(diff)\n', (1516, 1522), True, 'import numpy as np\n'), ((1322, 1336), 'numpy.exp', 'np.exp', (['(-y * t)'], {}), '(-y * t)\n', (1328, 1336), True, 'import numpy as np\n'), ((1654, 1741), 'pandas.DataFrame', 'pd.DataFrame', (["{'t_peak': [t_peak], 't_half': [t_half - t_peak], 'a': [x], 'b': [y]}"], {}), "({'t_peak': [t_peak], 't_half': [t_half - t_peak], 'a': [x],\n 'b': [y]})\n", (1666, 1741), True, 'import pandas as pd\n'), ((1305, 1319), 'numpy.exp', 'np.exp', (['(-x * t)'], {}), '(-x * t)\n', (1311, 1319), True, 'import numpy as np\n'), ((1384, 1398), 'numpy.argmax', 'np.argmax', (['crf'], {}), '(crf)\n', (1393, 1398), True, 'import numpy as np\n'), ((1443, 1457), 'numpy.argmax', 'np.argmax', (['crf'], {}), '(crf)\n', (1452, 1457), True, 'import numpy as np\n'), ((288, 309), 'math.exp', 'math.exp', (['(-x * t_half)'], {}), '(-x * t_half)\n', (296, 309), False, 'import math\n'), ((312, 333), 'math.exp', 'math.exp', (['(-y * t_half)'], {}), '(-y * t_half)\n', (320, 333), False, 'import math\n'), ((341, 362), 'math.exp', 'math.exp', (['(-x * t_peak)'], {}), '(-x * t_peak)\n', (349, 362), False, 'import math\n'), ((369, 390), 'math.exp', 'math.exp', (['(-y * t_peak)'], {}), '(-y * t_peak)\n', (377, 390), False, 'import math\n'), ((1559, 1573), 'numpy.argmax', 'np.argmax', (['crf'], {}), '(crf)\n', (1568, 1573), True, 'import numpy as np\n'), ((238, 259), 'math.exp', 'math.exp', (['(-x * t_peak)'], {}), '(-x * t_peak)\n', (246, 259), False, 'import math\n'), ((262, 283), 'math.exp', 'math.exp', (['(-y * t_peak)'], {}), '(-y * t_peak)\n', (270, 283), False, 'import math\n')] |
import re
import string
import numpy as np
from tqdm import tqdm
from typing import List
from docqa.triviaqa.read_data import TriviaQaQuestion
from docqa.triviaqa.trivia_qa_eval import normalize_answer, f1_score
from docqa.utils import flatten_iterable, split
"""
Tools for turning the aliases and answer strings from TriviaQA into labelled spans
"""
class ExactMatchDetector(object):
def __init__(self):
self.answer_tokens = None
def set_question(self, normalized_aliases):
self.answer_tokens = normalized_aliases
def any_found(self, para):
words = [x.lower() for x in flatten_iterable(para)]
occurances = []
for answer_ix, answer in enumerate(self.answer_tokens):
word_starts = [i for i, w in enumerate(words) if answer[0] == w]
n_tokens = len(answer)
for start in word_starts:
end = start + 1
ans_token = 1
while ans_token < n_tokens and end < len(words):
next = words[end]
if answer[ans_token] == next:
ans_token += 1
end += 1
else:
break
if n_tokens == ans_token:
occurances.append((start, end))
return list(set(occurances))
class NormalizedAnswerDetector(object):
""" Try to labels tokens sequences, such that the extracted sequence would be evaluated as 100% correct
by the official trivia-qa evaluation script """
def __init__(self):
self.answer_tokens = None
def set_question(self, normalized_aliases):
self.answer_tokens = normalized_aliases
def any_found(self, para):
words = [normalize_answer(w) for w in flatten_iterable(para)]
occurances = []
for answer_ix, answer in enumerate(self.answer_tokens):
word_starts = [i for i, w in enumerate(words) if answer[0] == w]
n_tokens = len(answer)
for start in word_starts:
end = start + 1
ans_token = 1
while ans_token < n_tokens and end < len(words):
next = words[end]
if answer[ans_token] == next:
ans_token += 1
end += 1
elif next == "":
end += 1
else:
break
if n_tokens == ans_token:
occurances.append((start, end))
return list(set(occurances))
class FastNormalizedAnswerDetector(object):
""" almost twice as fast and very,very close to NormalizedAnswerDetector's output """
def __init__(self):
# These come from the TrivaQA official evaluation script
self.skip = {"a", "an", "the", ""}
self.strip = string.punctuation + "".join([u"‘", u"’", u"´", u"`", "_"])
self.answer_tokens = None
def set_question(self, normalized_aliases):
self.answer_tokens = normalized_aliases
def any_found(self, para):
# Normalize the paragraph
words = [w.lower().strip(self.strip) for w in flatten_iterable(para)]
occurances = []
for answer_ix, answer in enumerate(self.answer_tokens):
# Locations where the first word occurs
word_starts = [i for i, w in enumerate(words) if answer[0] == w]
n_tokens = len(answer)
# Advance forward until we find all the words, skipping over articles
for start in word_starts:
end = start + 1
ans_token = 1
while ans_token < n_tokens and end < len(words):
next = words[end]
if answer[ans_token] == next:
ans_token += 1
end += 1
elif next in self.skip:
end += 1
else:
break
if n_tokens == ans_token:
occurances.append((start, end))
return list(set(occurances))
class CarefulAnswerDetector(object):
"""
There are some common false negatives in the above answer detection, in particular plurals of answers are
often not found (nor are counted correct by the official script). This detector makes a stronger effort to
find them, although its unclear if training with these additional answers would hurt/help our overall score
since I never got around to trying it.
"""
def __init__(self):
self.skip = {"a", "an", "the", "&", "and", "-", "\u2019", "\u2018", "\"", ";", "'",
"(", ")", "'s'", "s", ":", ",", "."}
self.answer_regex = None
self.aliases = None
def set_question(self, normalized_aliases):
answer_regex = []
self.aliases = normalized_aliases
for answer in normalized_aliases:
tokens = []
for token in answer:
if len(token) > 1:
tokens.append(token + "s?")
else:
tokens.append(token)
if tokens[-1] == "s":
tokens[-1] = "s?"
answer_regex.append([re.compile(x, re.IGNORECASE) for x in tokens])
self.answer_regex = answer_regex
def any_found(self, para):
words = flatten_iterable(para)
occurances = []
for answer_ix, answer in enumerate(self.answer_regex):
word_starts = [i for i, w in enumerate(words) if answer[0].fullmatch(w)]
n_tokens = len(answer)
for start in word_starts:
end = start + 1
ans_token = 1
while ans_token < n_tokens and end < len(words):
next = words[end]
if answer[ans_token].match(next):
ans_token += 1
end += 1
elif next in self.skip:
end += 1
else:
break
if n_tokens == ans_token:
occurances.append((start, end))
return list(set(occurances))
def evaluate_question_detector(questions, corpus, word_tokenize, detector, reference_detector=None, compute_f1s=False):
""" Just for debugging """
n_no_docs = 0
answer_per_doc = []
answer_f1s = []
for question_ix, q in enumerate(tqdm(questions)):
tokenized_aliases = [word_tokenize(x) for x in q.answer.normalized_aliases]
detector.set_question(tokenized_aliases)
for doc in q.all_docs:
doc = corpus.get_document(doc.doc_id)
if doc is None:
n_no_docs += 1
continue
output = []
for i, para in enumerate(doc):
for s,e in detector.any_found(para):
output.append((i, s, e))
if len(output) == 0 and reference_detector is not None:
if reference_detector is not None:
reference_detector.set_question(tokenized_aliases)
detected = []
for i, para in enumerate(doc):
for s, e in reference_detector.any_found(para):
detected.append((i, s, e))
if len(detected) > 0:
print("Found a difference")
print(q.answer.normalized_aliases)
print(tokenized_aliases)
for p, s, e in detected:
token = flatten_iterable(doc[p])[s:e]
print(token)
answer_per_doc.append(output)
if compute_f1s:
f1s = []
for p, s, e in output:
token = flatten_iterable(doc[p])[s:e]
answer = normalize_answer(" ".join(token))
f1 = 0
for gt in q.answer.normalized_aliases:
f1 = max(f1, f1_score(answer, gt))
f1s.append(f1)
answer_f1s.append(f1s)
n_answers = sum(len(x) for x in answer_per_doc)
print("Found %d answers (av %.4f)" % (n_answers, n_answers/len(answer_per_doc)))
print("%.4f docs have answers" % np.mean([len(x) > 0 for x in answer_per_doc]))
if len(answer_f1s) > 0:
print("Average f1 is %.4f" % np.mean(flatten_iterable(answer_f1s)))
def compute_answer_spans(questions: List[TriviaQaQuestion], corpus, word_tokenize,
detector):
for i, q in enumerate(questions):
if i % 500 == 0:
print("Completed question %d of %d (%.3f)" % (i, len(questions), i/len(questions)))
q.question = word_tokenize(q.question)
if q.answer is None:
continue
tokenized_aliases = [word_tokenize(x) for x in q.answer.all_answers]
if len(tokenized_aliases) == 0:
raise ValueError()
detector.set_question(tokenized_aliases)
for doc in q.all_docs:
text = corpus.get_document(doc.doc_id)
if text is None:
raise ValueError()
spans = []
offset = 0
for para_ix, para in enumerate(text):
for s, e in detector.any_found(para):
spans.append((s+offset, e+offset-1)) # turn into inclusive span
offset += sum(len(s) for s in para)
if len(spans) == 0:
spans = np.zeros((0, 2), dtype=np.int32)
else:
spans = np.array(spans, dtype=np.int32)
doc.answer_spans = spans
def _compute_answer_spans_chunk(questions, corpus, tokenizer, detector):
# We use tokenize_paragraph since some questions can have multiple sentences,
# but we still store the results as a flat list of tokens
word_tokenize = tokenizer.tokenize_paragraph_flat
compute_answer_spans(questions, corpus, word_tokenize, detector)
return questions
def compute_answer_spans_par(questions: List[TriviaQaQuestion], corpus,
tokenizer, detector, n_processes: int):
if n_processes == 1:
word_tokenize = tokenizer.tokenize_paragraph_flat
compute_answer_spans(questions, corpus, word_tokenize, detector)
return questions
from multiprocessing import Pool
with Pool(n_processes) as p:
chunks = split(questions, n_processes)
questions = flatten_iterable(p.starmap(_compute_answer_spans_chunk,
[[c, corpus, tokenizer, detector] for c in chunks]))
return questions
def main():
from trivia_qa.build_span_corpus import TriviaQaWebDataset
from data_processing.text_utils import NltkAndPunctTokenizer
dataset = TriviaQaWebDataset()
qs = dataset.get_train()
qs = np.random.RandomState(0).choice(qs, 1000, replace=False)
evaluate_question_detector(qs, dataset.evidence, NltkAndPunctTokenizer().tokenize_paragraph_flat,
FastNormalizedAnswerDetector())
if __name__ == "__main__":
main() | [
"docqa.triviaqa.trivia_qa_eval.normalize_answer",
"docqa.utils.flatten_iterable",
"re.compile",
"tqdm.tqdm",
"trivia_qa.build_span_corpus.TriviaQaWebDataset",
"data_processing.text_utils.NltkAndPunctTokenizer",
"numpy.array",
"numpy.zeros",
"multiprocessing.Pool",
"docqa.triviaqa.trivia_qa_eval.f1... | [((10899, 10919), 'trivia_qa.build_span_corpus.TriviaQaWebDataset', 'TriviaQaWebDataset', ([], {}), '()\n', (10917, 10919), False, 'from trivia_qa.build_span_corpus import TriviaQaWebDataset\n'), ((5407, 5429), 'docqa.utils.flatten_iterable', 'flatten_iterable', (['para'], {}), '(para)\n', (5423, 5429), False, 'from docqa.utils import flatten_iterable, split\n'), ((6482, 6497), 'tqdm.tqdm', 'tqdm', (['questions'], {}), '(questions)\n', (6486, 6497), False, 'from tqdm import tqdm\n'), ((10470, 10487), 'multiprocessing.Pool', 'Pool', (['n_processes'], {}), '(n_processes)\n', (10474, 10487), False, 'from multiprocessing import Pool\n'), ((10511, 10540), 'docqa.utils.split', 'split', (['questions', 'n_processes'], {}), '(questions, n_processes)\n', (10516, 10540), False, 'from docqa.utils import flatten_iterable, split\n'), ((1755, 1774), 'docqa.triviaqa.trivia_qa_eval.normalize_answer', 'normalize_answer', (['w'], {}), '(w)\n', (1771, 1774), False, 'from docqa.triviaqa.trivia_qa_eval import normalize_answer, f1_score\n'), ((10958, 10982), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (10979, 10982), True, 'import numpy as np\n'), ((11068, 11091), 'data_processing.text_utils.NltkAndPunctTokenizer', 'NltkAndPunctTokenizer', ([], {}), '()\n', (11089, 11091), False, 'from data_processing.text_utils import NltkAndPunctTokenizer\n'), ((613, 635), 'docqa.utils.flatten_iterable', 'flatten_iterable', (['para'], {}), '(para)\n', (629, 635), False, 'from docqa.utils import flatten_iterable, split\n'), ((1784, 1806), 'docqa.utils.flatten_iterable', 'flatten_iterable', (['para'], {}), '(para)\n', (1800, 1806), False, 'from docqa.utils import flatten_iterable, split\n'), ((3192, 3214), 'docqa.utils.flatten_iterable', 'flatten_iterable', (['para'], {}), '(para)\n', (3208, 3214), False, 'from docqa.utils import flatten_iterable, split\n'), ((9593, 9625), 'numpy.zeros', 'np.zeros', (['(0, 2)'], {'dtype': 'np.int32'}), '((0, 2), dtype=np.int32)\n', (9601, 9625), True, 'import numpy as np\n'), ((9668, 9699), 'numpy.array', 'np.array', (['spans'], {'dtype': 'np.int32'}), '(spans, dtype=np.int32)\n', (9676, 9699), True, 'import numpy as np\n'), ((5270, 5298), 're.compile', 're.compile', (['x', 're.IGNORECASE'], {}), '(x, re.IGNORECASE)\n', (5280, 5298), False, 'import re\n'), ((8498, 8526), 'docqa.utils.flatten_iterable', 'flatten_iterable', (['answer_f1s'], {}), '(answer_f1s)\n', (8514, 8526), False, 'from docqa.utils import flatten_iterable, split\n'), ((7891, 7915), 'docqa.utils.flatten_iterable', 'flatten_iterable', (['doc[p]'], {}), '(doc[p])\n', (7907, 7915), False, 'from docqa.utils import flatten_iterable, split\n'), ((8107, 8127), 'docqa.triviaqa.trivia_qa_eval.f1_score', 'f1_score', (['answer', 'gt'], {}), '(answer, gt)\n', (8115, 8127), False, 'from docqa.triviaqa.trivia_qa_eval import normalize_answer, f1_score\n'), ((7656, 7680), 'docqa.utils.flatten_iterable', 'flatten_iterable', (['doc[p]'], {}), '(doc[p])\n', (7672, 7680), False, 'from docqa.utils import flatten_iterable, split\n')] |
# Generated by Django 3.2.9 on 2021-11-13 14:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0005_authquery_password'),
]
operations = [
migrations.AddField(
model_name='profile',
name='history',
field=models.CharField(default='[]', max_length=1000),
),
]
| [
"django.db.models.CharField"
] | [((348, 395), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""[]"""', 'max_length': '(1000)'}), "(default='[]', max_length=1000)\n", (364, 395), False, 'from django.db import migrations, models\n')] |
import nifty.tools as nt
import numpy as np
import z5py
from elf.label_multiset import deserialize_multiset
from tqdm import trange
def check_serialization(mset1, mset2):
if len(mset1) != len(mset2):
print("Serialization sizes disagree:", len(mset1), len(mset2))
return False
if not np.array_equal(mset1, mset2):
disagree = (mset1 != mset2)
print("Serializations disagree for entries", disagree.sum(), "/", disagree.size)
return False
print("Check serialization passed")
return True
def check_multiset_members(mset1, mset2):
assert mset1.shape == mset2.shape
if mset1.n_elements != mset2.n_elements:
print("N-elements disagree:", mset1.n_elements, mset2.n_elements)
return False
amax1, amax2 = mset1.argmax, mset2.argmax
if not np.array_equal(amax1, amax2):
disagree = (amax1 != amax2)
print("Argmax disagree for entries", disagree.sum(), "/", disagree.size)
return False
off1, off2 = mset1.offsets, mset2.offsets
if not np.array_equal(off1, off2):
disagree = (off1 != off2)
print("Offsets disagree for entries", disagree.sum(), "/", disagree.size)
return False
id1, id2 = mset1.ids, mset2.ids
if not np.array_equal(id1, id2):
disagree = (id1 != id2)
print("Ids disagree for entries", disagree.sum(), "/", disagree.size)
return False
count1, count2 = mset1.counts, mset2.counts
if not np.array_equal(count1, count2):
disagree = (count1 != count2)
print("Counts disagree for entries", disagree.sum(), "/", disagree.size)
return False
print("Check members passed")
return True
def check_pixels(mset1, mset2, seg, scale, offset):
roi_end = mset1.shape
blocking = nt.blocking([0, 0, 0], roi_end, [1, 1, 1])
for block_id in trange(blocking.numberOfBlocks):
block = blocking.getBlock(block_id)
bb = tuple(slice(beg, end) for beg, end in zip(block.begin, block.end))
i1, c1 = mset1[bb]
i2, c2 = mset2[bb]
if not np.array_equal(i1, i2) or not np.array_equal(c1, c2):
print("Entries disagree for block", block_id, ":", bb)
print("Ids")
print("Res:", i1)
print("Exp:", i2)
print("Counts")
print("Res:", c1)
print("Exp:", c2)
print("From segmentation")
effective_bb = tuple(slice(b.start * sc + off, b.stop * sc + off) for b, sc, off in zip(bb, scale, offset))
print(effective_bb)
sub_seg = seg[effective_bb]
print(sub_seg)
sids, scounts = np.unique(sub_seg, return_counts=True)
print("Ids")
print(sids)
print("Counts")
print(scounts)
return False
print("Check pixels passed")
return True
def check_chunk(blocking, chunk_id, ds_mset1, ds_mset2, ds_seg, scale):
if isinstance(chunk_id, tuple):
bpd = blocking.blocksPerAxis
strides = [bpd[2] * bpd[1], bpd[2], 1]
chunk_id = sum([stride * cid for stride, cid in zip(strides, chunk_id)])
print(chunk_id)
block = blocking.getBlock(chunk_id)
chunk = tuple(beg // ch for beg, ch in zip(block.begin, blocking.blockShape))
mset1 = ds_mset1.read_chunk(chunk)
mset2 = ds_mset2.read_chunk(chunk)
if(check_serialization(mset1, mset2)):
print("Multisets agree")
return
mset1 = deserialize_multiset(mset1, block.shape)
mset2 = deserialize_multiset(mset2, block.shape)
if(check_multiset_members(mset1, mset2)):
print("Multisets agree")
return
ds_seg.n_threads = 8
seg = ds_seg[:]
offset = tuple(beg * sc for beg, sc in zip(block.begin, scale))
if(check_pixels(mset1, mset2, seg, scale, offset)):
print("Multisets agree")
else:
print("Multisets disagree")
def check_multiset(level, chunk_id=0):
path = '/home/pape/Work/data/cremi/example/sampleA.n5'
seg_key = 'volumes/segmentation/multicut'
mset_key = 'paintera/data/s%i' % level
f = z5py.File(path)
ds_seg = f[seg_key]
ds_mset = f[mset_key]
path1 = '/home/pape/Work/data/cremi/example/sampleA_paintera.n5'
mset_key1 = 'volumes/segmentation/multicut/data/s%i' % level
f1 = z5py.File(path1)
ds_mset1 = f1[mset_key1]
assert ds_mset.shape == ds_mset1.shape
assert ds_mset.chunks == ds_mset1.chunks, "%s, %s" % (str(ds_mset.chunks),
str(ds_mset1.chunks))
shape, chunks = ds_mset.shape, ds_mset.chunks
ds_factor = ds_mset.attrs.get('downsamplingFactors', None)
ds_factor_exp = ds_mset1.attrs.get('downsamplingFactors', None)
assert ds_factor == ds_factor_exp
scale = [int(df) for df in ds_factor[::-1]]
print("Have scale", scale)
blocking = nt.blocking([0, 0, 0], shape, chunks)
check_chunk(blocking, chunk_id, ds_mset, ds_mset1, ds_seg, scale)
if __name__ == '__main__':
level = 1
# chunk_id = 0
chunk_id = (0, 2, 0)
check_multiset(level, chunk_id)
# print("Checking mult-sets for chunk 0 of scales:")
# for scale in range(5):
# print("Check scale", scale)
# check_multiset(scale)
| [
"numpy.unique",
"elf.label_multiset.deserialize_multiset",
"nifty.tools.blocking",
"z5py.File",
"numpy.array_equal",
"tqdm.trange"
] | [((1798, 1840), 'nifty.tools.blocking', 'nt.blocking', (['[0, 0, 0]', 'roi_end', '[1, 1, 1]'], {}), '([0, 0, 0], roi_end, [1, 1, 1])\n', (1809, 1840), True, 'import nifty.tools as nt\n'), ((1861, 1892), 'tqdm.trange', 'trange', (['blocking.numberOfBlocks'], {}), '(blocking.numberOfBlocks)\n', (1867, 1892), False, 'from tqdm import trange\n'), ((3493, 3533), 'elf.label_multiset.deserialize_multiset', 'deserialize_multiset', (['mset1', 'block.shape'], {}), '(mset1, block.shape)\n', (3513, 3533), False, 'from elf.label_multiset import deserialize_multiset\n'), ((3546, 3586), 'elf.label_multiset.deserialize_multiset', 'deserialize_multiset', (['mset2', 'block.shape'], {}), '(mset2, block.shape)\n', (3566, 3586), False, 'from elf.label_multiset import deserialize_multiset\n'), ((4129, 4144), 'z5py.File', 'z5py.File', (['path'], {}), '(path)\n', (4138, 4144), False, 'import z5py\n'), ((4339, 4355), 'z5py.File', 'z5py.File', (['path1'], {}), '(path1)\n', (4348, 4355), False, 'import z5py\n'), ((4902, 4939), 'nifty.tools.blocking', 'nt.blocking', (['[0, 0, 0]', 'shape', 'chunks'], {}), '([0, 0, 0], shape, chunks)\n', (4913, 4939), True, 'import nifty.tools as nt\n'), ((311, 339), 'numpy.array_equal', 'np.array_equal', (['mset1', 'mset2'], {}), '(mset1, mset2)\n', (325, 339), True, 'import numpy as np\n'), ((824, 852), 'numpy.array_equal', 'np.array_equal', (['amax1', 'amax2'], {}), '(amax1, amax2)\n', (838, 852), True, 'import numpy as np\n'), ((1050, 1076), 'numpy.array_equal', 'np.array_equal', (['off1', 'off2'], {}), '(off1, off2)\n', (1064, 1076), True, 'import numpy as np\n'), ((1263, 1287), 'numpy.array_equal', 'np.array_equal', (['id1', 'id2'], {}), '(id1, id2)\n', (1277, 1287), True, 'import numpy as np\n'), ((1480, 1510), 'numpy.array_equal', 'np.array_equal', (['count1', 'count2'], {}), '(count1, count2)\n', (1494, 1510), True, 'import numpy as np\n'), ((2669, 2707), 'numpy.unique', 'np.unique', (['sub_seg'], {'return_counts': '(True)'}), '(sub_seg, return_counts=True)\n', (2678, 2707), True, 'import numpy as np\n'), ((2088, 2110), 'numpy.array_equal', 'np.array_equal', (['i1', 'i2'], {}), '(i1, i2)\n', (2102, 2110), True, 'import numpy as np\n'), ((2118, 2140), 'numpy.array_equal', 'np.array_equal', (['c1', 'c2'], {}), '(c1, c2)\n', (2132, 2140), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision.transforms import ToTensor
from torchvision.datasets import MNIST
from vis_util import visual_mnist
##### settings
x_dim = 28 * 28 # size of mnist digit
z_dim = 100 # random noise
h_dim = 128 # hidden layer
batch_size = 60000 // 1000
lr = 1e-3
epochs = 120
##### load data and generate targets
# NOTE: we only need train data
trainset = MNIST('mnist', train=True, transform=ToTensor(), download=True)
dataloader = DataLoader(trainset, batch_size=batch_size, shuffle=True)
# targets for D, who only output 1 for real data, 0 for fake data
real_target = torch.ones(batch_size, 1)
fake_target = torch.zeros(batch_size, 1)
##### network arch
class Generator(nn.Module):
"""generator, generates fake data which is similar to the real data, to
fool the discriminator"""
def __init__(self, z_dim, h_dim):
super().__init__()
self.net = nn.Sequential(
nn.Linear(z_dim, h_dim),
nn.ReLU(inplace=True),
nn.Linear(h_dim, x_dim),
nn.Sigmoid(),
)
def forward(self, x):
x = self.net(x)
return x
class Discriminator(nn.Module):
"""discriminator, is response for judging whether an input is real data
or fake data that generated by the generator"""
def __init__(self, x_dim, h_dim):
super().__init__()
self.net = nn.Sequential(
nn.Linear(x_dim, h_dim),
nn.ReLU(inplace=True),
nn.Linear(h_dim, 1),
nn.Sigmoid(),
)
def forward(self, x):
x = self.net(x)
return x
##### init model
G = Generator(z_dim, h_dim)
D = Discriminator(x_dim, h_dim)
##### optimizer and loss
G_optim = optim.Adam(G.parameters(), lr=lr)
D_optim = optim.Adam(D.parameters(), lr=lr)
D_loss_real = nn.BCELoss()
D_loss_fake = nn.BCELoss()
G_loss_f2r = nn.BCELoss()
##### training loop
G.train()
D.train()
for epoch_i in range(epochs):
for x, _ in dataloader:
# step 1: G generate fake data fx using noise z
z = torch.randn(batch_size, z_dim)
fx = G(z)
# step 2: D judge on (fx, x), then update itself
fake = D(fx)
x = x.view(-1, x_dim)
real = D(x)
D_loss = D_loss_real(real, real_target) + D_loss_fake(fake, fake_target)
D_optim.zero_grad()
D_loss.backward()
D_optim.step()
# step 3: G update
z = torch.randn(batch_size, z_dim)
fx = G(z)
fake = D(fx)
G_loss = G_loss_f2r(fake, real_target)
G_optim.zero_grad()
G_loss.backward()
G_optim.step()
# for each epoch, visualize result
print(f"Epoch-{epoch_i}: D_loss: {D_loss:.5f}, G_loss: {G_loss:.5f}")
visual_mnist(epoch_i, fx.detach().numpy()[:16], (4, 4)) | [
"torch.nn.Sigmoid",
"torch.nn.ReLU",
"torch.randn",
"torch.nn.BCELoss",
"torch.nn.Linear",
"torch.utils.data.DataLoader",
"torchvision.transforms.ToTensor",
"torch.zeros",
"torch.ones"
] | [((548, 605), 'torch.utils.data.DataLoader', 'DataLoader', (['trainset'], {'batch_size': 'batch_size', 'shuffle': '(True)'}), '(trainset, batch_size=batch_size, shuffle=True)\n', (558, 605), False, 'from torch.utils.data import DataLoader\n'), ((686, 711), 'torch.ones', 'torch.ones', (['batch_size', '(1)'], {}), '(batch_size, 1)\n', (696, 711), False, 'import torch\n'), ((727, 753), 'torch.zeros', 'torch.zeros', (['batch_size', '(1)'], {}), '(batch_size, 1)\n', (738, 753), False, 'import torch\n'), ((1905, 1917), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (1915, 1917), True, 'import torch.nn as nn\n'), ((1932, 1944), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (1942, 1944), True, 'import torch.nn as nn\n'), ((1958, 1970), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (1968, 1970), True, 'import torch.nn as nn\n'), ((508, 518), 'torchvision.transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (516, 518), False, 'from torchvision.transforms import ToTensor\n'), ((2139, 2169), 'torch.randn', 'torch.randn', (['batch_size', 'z_dim'], {}), '(batch_size, z_dim)\n', (2150, 2169), False, 'import torch\n'), ((2515, 2545), 'torch.randn', 'torch.randn', (['batch_size', 'z_dim'], {}), '(batch_size, z_dim)\n', (2526, 2545), False, 'import torch\n'), ((1023, 1046), 'torch.nn.Linear', 'nn.Linear', (['z_dim', 'h_dim'], {}), '(z_dim, h_dim)\n', (1032, 1046), True, 'import torch.nn as nn\n'), ((1060, 1081), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1067, 1081), True, 'import torch.nn as nn\n'), ((1095, 1118), 'torch.nn.Linear', 'nn.Linear', (['h_dim', 'x_dim'], {}), '(h_dim, x_dim)\n', (1104, 1118), True, 'import torch.nn as nn\n'), ((1132, 1144), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (1142, 1144), True, 'import torch.nn as nn\n'), ((1499, 1522), 'torch.nn.Linear', 'nn.Linear', (['x_dim', 'h_dim'], {}), '(x_dim, h_dim)\n', (1508, 1522), True, 'import torch.nn as nn\n'), ((1536, 1557), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1543, 1557), True, 'import torch.nn as nn\n'), ((1571, 1590), 'torch.nn.Linear', 'nn.Linear', (['h_dim', '(1)'], {}), '(h_dim, 1)\n', (1580, 1590), True, 'import torch.nn as nn\n'), ((1604, 1616), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (1614, 1616), True, 'import torch.nn as nn\n')] |
from django.shortcuts import render, redirect, reverse
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.db.models import Q
from django.views.generic import View
from .models import OrgInfo, CityInfo, TeacherInfo
from operations.models import UserLove
# Create your views here.
class OrgList(View):
"""
org_list 机构列表展示
"""
@staticmethod
def get(request):
all_orgs = OrgInfo.objects.all()
all_citys = CityInfo.objects.all()
sort_orgs = all_orgs.order_by('-love_num')[:3]
# 全局搜索过滤,模糊搜索
keyword = request.GET.get('keyword', '')
if keyword:
all_orgs = all_orgs.filter(Q(name__icontains=keyword)|Q(desc__icontains=keyword)|Q(detail__icontains=keyword))
# 根据机构类型进行过滤
category = request.GET.get('cat', '')
if category:
all_orgs = all_orgs.filter(org_category=category)
# 根据城市进行过滤
city_id = request.GET.get('city', '')
if city_id:
all_orgs = all_orgs.filter(city_id=int(city_id))
# 排序
sort = request.GET.get('sort', '')
if sort:
if sort == 'course_num':
pass
else:
all_orgs = all_orgs.order_by('-'+sort)
# 分页
page = request.GET.get('page')
pa = Paginator(all_orgs, 2)
try:
pages = pa.page(page)
except PageNotAnInteger:
pages = pa.page(1)
except EmptyPage:
pages = pa.page(pa.num_pages)
return render(request, 'orgs/org-list.html', {
'all_orgs': all_orgs,
'all_citys': all_citys,
'sort_orgs': sort_orgs,
'pages': pages,
'category': category,
'city_id': city_id,
'sort': sort,
'keyword': keyword,
})
class OrgDetail(View):
"""
org_detail 机构详情页
用户点击机构详情页,点击数+1
当用户登录时显示用户收藏状态信息
@params org_id: 机构id 通过查询数据库找到对应的机构进行展示
"""
@staticmethod
def get(request, org_id):
if org_id:
org = OrgInfo.objects.filter(id=int(org_id))[0]
# 动态修改机构点击数
org.click_num += 1
org.save()
# 在返回页面数据的时候,需要返回收藏这个机构的收藏状态
love_status = False
if request.user.is_authenticated:
love = UserLove.objects.filter(love_man=request.user, love_id=int(org_id), love_type=1, love_status=True)
if love:
love_status = True
return render(request, 'orgs/org-detail-homepage.html', {
'org': org,
'detail_type': 'home',
'love_status': love_status,
})
class OrgDetailCourse(View):
"""
org_detail_course 机构详情页-机构课程
"""
def get(self, request, org_id):
if org_id:
org = OrgInfo.objects.filter(id=int(org_id))[0]
love_status = False
if request.user.is_authenticated:
love = UserLove.objects.filter(love_man=request.user, love_id=int(org_id), love_type=1, love_status=True)
if love:
love_status = True
return render(request, 'orgs/org-detail-course.html', {
'org': org,
'detail_type': 'course',
'love_status': love_status,
})
class OrgDetailDesc(View):
"""
org_detail_desc 机构详情页-机构描述
"""
def get(self, request, org_id):
if org_id:
org = OrgInfo.objects.filter(id=int(org_id))[0]
love_status = False
if request.user.is_authenticated:
love = UserLove.objects.filter(love_man=request.user, love_id=int(org_id), love_type=1, love_status=True)
if love:
love_status = True
return render(request, 'orgs/org-detail-desc.html', {
'org': org,
'detail_type': 'desc',
'love_status': love_status,
})
class OrgDetailTeacher(View):
"""
org_detail_teacher 机构详情页-机构讲师
"""
def get(self, request, org_id):
if org_id:
org = OrgInfo.objects.filter(id=int(org_id))[0]
love_status = False
if request.user.is_authenticated:
love = UserLove.objects.filter(love_man=request.user, love_id=int(org_id), love_type=1, love_status=True)
if love:
love_status = True
return render(request, 'orgs/org-detail-teachers.html', {
'org': org,
'detail_type': 'teacher',
'love_status': love_status,
})
class TeacherList(View):
"""
teacher_list 讲师列表
"""
@staticmethod
def get(request):
all_teachers = TeacherInfo.objects.all()
recommend = all_teachers.order_by('-love_num')[:2]
# 全局搜索过滤,模糊搜索
keyword = request.GET.get('keyword', '')
if keyword:
all_teachers = all_teachers.filter(Q(name__icontains=keyword))
# 排序
sort = request.GET.get('sort', '')
if sort:
all_teachers = all_teachers.order_by('-' + sort)
# 分页
page = request.GET.get('page')
pa = Paginator(all_teachers, 2)
try:
pages = pa.page(page)
except PageNotAnInteger:
pages = pa.page(1)
except EmptyPage:
pages = pa.page(pa.num_pages)
return render(request, 'orgs/teachers-list.html', {
'all_teachers': all_teachers,
'pages': pages,
'recommend': recommend,
'sort': sort,
'keyword': keyword,
})
class TeacherDetail(View):
"""
teacher_detail 讲师详情
"""
def get(self, request, teacher_id):
if teacher_id:
teacher_list = TeacherInfo.objects.filter(id=teacher_id)
if teacher_list:
teacher = teacher_list[0]
teacher.click_num += 1
teacher.save()
# 讲师排行
recommend = TeacherInfo.objects.all().order_by('-click_num')[:3]
return render(request, 'orgs/teacher-detail.html', {
'teacher': teacher,
'recommend': recommend,
})
| [
"django.shortcuts.render",
"django.db.models.Q",
"django.core.paginator.Paginator"
] | [((1336, 1358), 'django.core.paginator.Paginator', 'Paginator', (['all_orgs', '(2)'], {}), '(all_orgs, 2)\n', (1345, 1358), False, 'from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\n'), ((1554, 1763), 'django.shortcuts.render', 'render', (['request', '"""orgs/org-list.html"""', "{'all_orgs': all_orgs, 'all_citys': all_citys, 'sort_orgs': sort_orgs,\n 'pages': pages, 'category': category, 'city_id': city_id, 'sort': sort,\n 'keyword': keyword}"], {}), "(request, 'orgs/org-list.html', {'all_orgs': all_orgs, 'all_citys':\n all_citys, 'sort_orgs': sort_orgs, 'pages': pages, 'category': category,\n 'city_id': city_id, 'sort': sort, 'keyword': keyword})\n", (1560, 1763), False, 'from django.shortcuts import render, redirect, reverse\n'), ((5265, 5291), 'django.core.paginator.Paginator', 'Paginator', (['all_teachers', '(2)'], {}), '(all_teachers, 2)\n', (5274, 5291), False, 'from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\n'), ((5487, 5639), 'django.shortcuts.render', 'render', (['request', '"""orgs/teachers-list.html"""', "{'all_teachers': all_teachers, 'pages': pages, 'recommend': recommend,\n 'sort': sort, 'keyword': keyword}"], {}), "(request, 'orgs/teachers-list.html', {'all_teachers': all_teachers,\n 'pages': pages, 'recommend': recommend, 'sort': sort, 'keyword': keyword})\n", (5493, 5639), False, 'from django.shortcuts import render, redirect, reverse\n'), ((2542, 2659), 'django.shortcuts.render', 'render', (['request', '"""orgs/org-detail-homepage.html"""', "{'org': org, 'detail_type': 'home', 'love_status': love_status}"], {}), "(request, 'orgs/org-detail-homepage.html', {'org': org, 'detail_type':\n 'home', 'love_status': love_status})\n", (2548, 2659), False, 'from django.shortcuts import render, redirect, reverse\n'), ((3198, 3315), 'django.shortcuts.render', 'render', (['request', '"""orgs/org-detail-course.html"""', "{'org': org, 'detail_type': 'course', 'love_status': love_status}"], {}), "(request, 'orgs/org-detail-course.html', {'org': org, 'detail_type':\n 'course', 'love_status': love_status})\n", (3204, 3315), False, 'from django.shortcuts import render, redirect, reverse\n'), ((3850, 3963), 'django.shortcuts.render', 'render', (['request', '"""orgs/org-detail-desc.html"""', "{'org': org, 'detail_type': 'desc', 'love_status': love_status}"], {}), "(request, 'orgs/org-detail-desc.html', {'org': org, 'detail_type':\n 'desc', 'love_status': love_status})\n", (3856, 3963), False, 'from django.shortcuts import render, redirect, reverse\n'), ((4504, 4624), 'django.shortcuts.render', 'render', (['request', '"""orgs/org-detail-teachers.html"""', "{'org': org, 'detail_type': 'teacher', 'love_status': love_status}"], {}), "(request, 'orgs/org-detail-teachers.html', {'org': org, 'detail_type':\n 'teacher', 'love_status': love_status})\n", (4510, 4624), False, 'from django.shortcuts import render, redirect, reverse\n'), ((6159, 6252), 'django.shortcuts.render', 'render', (['request', '"""orgs/teacher-detail.html"""', "{'teacher': teacher, 'recommend': recommend}"], {}), "(request, 'orgs/teacher-detail.html', {'teacher': teacher,\n 'recommend': recommend})\n", (6165, 6252), False, 'from django.shortcuts import render, redirect, reverse\n'), ((5036, 5062), 'django.db.models.Q', 'Q', ([], {'name__icontains': 'keyword'}), '(name__icontains=keyword)\n', (5037, 5062), False, 'from django.db.models import Q\n'), ((737, 765), 'django.db.models.Q', 'Q', ([], {'detail__icontains': 'keyword'}), '(detail__icontains=keyword)\n', (738, 765), False, 'from django.db.models import Q\n'), ((683, 709), 'django.db.models.Q', 'Q', ([], {'name__icontains': 'keyword'}), '(name__icontains=keyword)\n', (684, 709), False, 'from django.db.models import Q\n'), ((710, 736), 'django.db.models.Q', 'Q', ([], {'desc__icontains': 'keyword'}), '(desc__icontains=keyword)\n', (711, 736), False, 'from django.db.models import Q\n')] |
from keras.callbacks import TensorBoard,EarlyStopping,TerminateOnNaN,ReduceLROnPlateau,ModelCheckpoint
import os
import sys
import tensorflow as tf
import keras.backend.tensorflow_backend as KTF
file_abspath = os.path.abspath(sys.argv[0]) # exe所在文件地址
location = os.path.dirname(file_abspath) # exe所在文件夹目录地址
tbCallBack = TensorBoard(log_dir='./logs', histogram_freq=0, batch_size=32, write_graph=True, write_grads=True, write_images=True, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None, embeddings_data=None, update_freq='epoch')
esCallBack=EarlyStopping(monitor='val_acc', min_delta=0, patience=10, verbose=0, mode='auto', baseline=None, restore_best_weights=False)
tnonCallBack = TerminateOnNaN()
rpCallBack = ReduceLROnPlateau(monitor='val_acc', factor=0.2,patience=3, min_lr=0.0001)
mcCallBack = ModelCheckpoint(filepath=file_abspath[:-3]+'.model', monitor='val_acc', mode='auto', period=1,save_best_only=True)
callbacklist=[tbCallBack,esCallBack,tnonCallBack,rpCallBack,mcCallBack]
| [
"keras.callbacks.ModelCheckpoint",
"keras.callbacks.ReduceLROnPlateau",
"keras.callbacks.TerminateOnNaN",
"keras.callbacks.TensorBoard",
"os.path.dirname",
"keras.callbacks.EarlyStopping",
"os.path.abspath"
] | [((213, 241), 'os.path.abspath', 'os.path.abspath', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (228, 241), False, 'import os\n'), ((266, 295), 'os.path.dirname', 'os.path.dirname', (['file_abspath'], {}), '(file_abspath)\n', (281, 295), False, 'import os\n'), ((327, 575), 'keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': '"""./logs"""', 'histogram_freq': '(0)', 'batch_size': '(32)', 'write_graph': '(True)', 'write_grads': '(True)', 'write_images': '(True)', 'embeddings_freq': '(0)', 'embeddings_layer_names': 'None', 'embeddings_metadata': 'None', 'embeddings_data': 'None', 'update_freq': '"""epoch"""'}), "(log_dir='./logs', histogram_freq=0, batch_size=32, write_graph=\n True, write_grads=True, write_images=True, embeddings_freq=0,\n embeddings_layer_names=None, embeddings_metadata=None, embeddings_data=\n None, update_freq='epoch')\n", (338, 575), False, 'from keras.callbacks import TensorBoard, EarlyStopping, TerminateOnNaN, ReduceLROnPlateau, ModelCheckpoint\n'), ((573, 703), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_acc"""', 'min_delta': '(0)', 'patience': '(10)', 'verbose': '(0)', 'mode': '"""auto"""', 'baseline': 'None', 'restore_best_weights': '(False)'}), "(monitor='val_acc', min_delta=0, patience=10, verbose=0, mode=\n 'auto', baseline=None, restore_best_weights=False)\n", (586, 703), False, 'from keras.callbacks import TensorBoard, EarlyStopping, TerminateOnNaN, ReduceLROnPlateau, ModelCheckpoint\n'), ((714, 730), 'keras.callbacks.TerminateOnNaN', 'TerminateOnNaN', ([], {}), '()\n', (728, 730), False, 'from keras.callbacks import TensorBoard, EarlyStopping, TerminateOnNaN, ReduceLROnPlateau, ModelCheckpoint\n'), ((744, 819), 'keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""val_acc"""', 'factor': '(0.2)', 'patience': '(3)', 'min_lr': '(0.0001)'}), "(monitor='val_acc', factor=0.2, patience=3, min_lr=0.0001)\n", (761, 819), False, 'from keras.callbacks import TensorBoard, EarlyStopping, TerminateOnNaN, ReduceLROnPlateau, ModelCheckpoint\n'), ((832, 953), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': "(file_abspath[:-3] + '.model')", 'monitor': '"""val_acc"""', 'mode': '"""auto"""', 'period': '(1)', 'save_best_only': '(True)'}), "(filepath=file_abspath[:-3] + '.model', monitor='val_acc',\n mode='auto', period=1, save_best_only=True)\n", (847, 953), False, 'from keras.callbacks import TensorBoard, EarlyStopping, TerminateOnNaN, ReduceLROnPlateau, ModelCheckpoint\n')] |
# Generated by Django 3.1.1 on 2020-12-26 20:50
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0033_auto_20201226_2148'),
]
operations = [
migrations.RenameField(
model_name='sleepnight',
old_name='data',
new_name='diary_day',
),
]
| [
"django.db.migrations.RenameField"
] | [((228, 319), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""sleepnight"""', 'old_name': '"""data"""', 'new_name': '"""diary_day"""'}), "(model_name='sleepnight', old_name='data', new_name=\n 'diary_day')\n", (250, 319), False, 'from django.db import migrations\n')] |
"""Fear and Greed Index Class"""
__docformat__ = "numpy"
from matplotlib import pyplot as plt
from fear_greed_index import scrape_cnn
from fear_greed_index.FearAndGreedIndicator import FearAndGreedIndicator
class CNNFearAndGreedIndex:
"""CNN Fear and Greed Index
Attributes
----------
junk_bond_demand : FearAndGreedIndicator
Junk Bond Demand indicator
market_volatility : FearAndGreedIndicator
Market Volatility indicator
put_and_call_options : FearAndGreedIndicator
Put and Call Options indicator
market_momentum : FearAndGreedIndicator
Market Momentum indicator
stock_price_strength : FearAndGreedIndicator
Stock Price Strength indicator
stock_price_breadth : FearAndGreedIndicator
Stock Price Breadth indicator
safe_heaven_demand : FearAndGreedIndicator
Safe Heaven Demand indicator
index_summary : str
Summary of the current sentiment index
index_chat : "N/A"
Chart of the historical sentiment index
"""
indicator_chart_type = {
"Junk Bond Demand": "IGHYPtile",
"Market Volatility": "VIXPtile",
"Put and Call Options": "PutCallPtile",
"Market Momentum": "SPXPtile",
"Stock Price Strength": "NHNLPtile",
"Stock Price Breadth": "McOscPtile",
"Safe Heaven Demand": "StkBdPtile",
}
def __init__(self):
"""Constructor"""
self.junk_bond_demand = FearAndGreedIndicator("Junk Bond Demand")
self.market_volatility = FearAndGreedIndicator("Market Volatility")
self.put_and_call_options = FearAndGreedIndicator("Put and Call Options")
self.market_momentum = FearAndGreedIndicator("Market Momentum")
self.stock_price_strength = FearAndGreedIndicator("Stock Price Strength")
self.stock_price_breadth = FearAndGreedIndicator("Stock Price Breadth")
self.safe_heaven_demand = FearAndGreedIndicator("Safe Heaven Demand")
self.index_summary = "N/A"
self.index_chart = None
self.all_indicators = [
self.junk_bond_demand,
self.market_volatility,
self.put_and_call_options,
self.market_momentum,
self.stock_price_strength,
self.stock_price_breadth,
self.safe_heaven_demand,
]
self._load_fear_and_greed()
def _load_fear_and_greed(self):
"""Load Fear and Greed Index by scraping CNN data"""
text_soup_cnn = scrape_cnn._get_fear_greed_index()
# Fill in indicators summary, last_sentiment, last_changed, update_on
indicator_idx = 0
for text in text_soup_cnn.findAll("div", {"class": "modContent feargreed"}):
for content in text.contents:
for txt in content.find_all("div", {"class": "wsod_fLeft smarttext"}):
self.all_indicators[indicator_idx]._set_summary(
txt.contents[0].text
)
if len(txt.contents) > 1:
self.all_indicators[indicator_idx]._set_last_changed(
txt.contents[1].text
)
self.all_indicators[indicator_idx]._set_last_sentiment(
txt.contents[1].span.text
)
if len(txt.contents) > 2:
self.all_indicators[indicator_idx]._set_update_on(
txt.contents[2].text
)
indicator_idx += 1
# Fill in indicator sentiment
indicator_idx = 0
for text in text_soup_cnn.findAll("div", {"class": "modContent feargreed"}):
for content in text.contents:
for txt in content.find_all("div", {"class": "wsod_fRight"}):
if "wsod_fgIndicatorCht" not in txt["class"]:
self.all_indicators[indicator_idx]._set_sentiment(
txt.contents[0]
)
indicator_idx += 1
# Fill in indicators charts
for indicator in self.all_indicators:
indicator._set_chart(
scrape_cnn._get_chart(
self.indicator_chart_type[indicator.get_type_indicator()]
)
)
# Fill in fear and greed index
index_data = (
text_soup_cnn.findAll("div", {"class": "modContent feargreed"})[0]
.contents[0]
.text
)
fg_index = [fg + ")" for fg in index_data.split(")")[:-1]]
self.index_summary = fg_index[0] + "\n "
self.index_summary += "\n ".join(
[fg.strip("Fear & Greed ") for fg in fg_index[1:]]
)
# Fill in index chart
self.index_chart = scrape_cnn._get_chart("AvgPtileModel")
def get_junk_bond_demand(self):
"""Get Junk Bond Demand"""
return self.junk_bond_demand
def get_market_volatility(self):
"""Get Market Volatility"""
return self.market_volatility
def get_put_and_call_options(self):
"""Get Put and Call Options"""
return self.put_and_call_options
def get_market_momentum(self):
"""Get Market Momentum"""
return self.market_momentum
def get_stock_price_strength(self):
"""Get Stock Price Strength"""
return self.stock_price_strength
def get_stock_price_breadth(self):
"""Get Stock Price Breadth"""
return self.stock_price_breadth
def get_safe_heaven_demand(self):
"""Get Safe Heaven Demand"""
return self.safe_heaven_demand
def get_indicators_report(self):
"""Get Indicators Report"""
indicators_report = ""
for indicator in self.all_indicators:
indicators_report += indicator.get_report() + "\n"
return indicators_report
def get_index(self):
"""Get Index Summary"""
return self.index_summary
def get_index_chart(self):
"""Get Index Chart"""
return self.index_chart
def get_complete_report(self):
"""Plot Complete report"""
complete_report = self.get_index() + "\n\n"
complete_report += self.get_indicators_report()
return complete_report
def plot_all_charts(self, fig: plt.figure):
"""Plot all indicators and index charts
Parameters
----------
plt.figure
matplotlib figure to plot all charts
Returns
-------
plt.figure
matplotlib figure ready to be plot
"""
for i, indicator in enumerate(self.all_indicators):
ax = fig.add_subplot(3, 3, i + 1)
ax.set_axis_off()
plt.imshow(indicator.chart)
ax = fig.add_subplot(3, 3, 8)
ax.set_axis_off()
plt.imshow(self.index_chart)
fig.subplots_adjust(wspace=0, hspace=-1)
plt.tight_layout()
return fig | [
"matplotlib.pyplot.imshow",
"fear_greed_index.FearAndGreedIndicator.FearAndGreedIndicator",
"fear_greed_index.scrape_cnn._get_chart",
"matplotlib.pyplot.tight_layout",
"fear_greed_index.scrape_cnn._get_fear_greed_index"
] | [((1459, 1500), 'fear_greed_index.FearAndGreedIndicator.FearAndGreedIndicator', 'FearAndGreedIndicator', (['"""Junk Bond Demand"""'], {}), "('Junk Bond Demand')\n", (1480, 1500), False, 'from fear_greed_index.FearAndGreedIndicator import FearAndGreedIndicator\n'), ((1534, 1576), 'fear_greed_index.FearAndGreedIndicator.FearAndGreedIndicator', 'FearAndGreedIndicator', (['"""Market Volatility"""'], {}), "('Market Volatility')\n", (1555, 1576), False, 'from fear_greed_index.FearAndGreedIndicator import FearAndGreedIndicator\n'), ((1613, 1658), 'fear_greed_index.FearAndGreedIndicator.FearAndGreedIndicator', 'FearAndGreedIndicator', (['"""Put and Call Options"""'], {}), "('Put and Call Options')\n", (1634, 1658), False, 'from fear_greed_index.FearAndGreedIndicator import FearAndGreedIndicator\n'), ((1690, 1730), 'fear_greed_index.FearAndGreedIndicator.FearAndGreedIndicator', 'FearAndGreedIndicator', (['"""Market Momentum"""'], {}), "('Market Momentum')\n", (1711, 1730), False, 'from fear_greed_index.FearAndGreedIndicator import FearAndGreedIndicator\n'), ((1767, 1812), 'fear_greed_index.FearAndGreedIndicator.FearAndGreedIndicator', 'FearAndGreedIndicator', (['"""Stock Price Strength"""'], {}), "('Stock Price Strength')\n", (1788, 1812), False, 'from fear_greed_index.FearAndGreedIndicator import FearAndGreedIndicator\n'), ((1848, 1892), 'fear_greed_index.FearAndGreedIndicator.FearAndGreedIndicator', 'FearAndGreedIndicator', (['"""Stock Price Breadth"""'], {}), "('Stock Price Breadth')\n", (1869, 1892), False, 'from fear_greed_index.FearAndGreedIndicator import FearAndGreedIndicator\n'), ((1927, 1970), 'fear_greed_index.FearAndGreedIndicator.FearAndGreedIndicator', 'FearAndGreedIndicator', (['"""Safe Heaven Demand"""'], {}), "('Safe Heaven Demand')\n", (1948, 1970), False, 'from fear_greed_index.FearAndGreedIndicator import FearAndGreedIndicator\n'), ((2497, 2531), 'fear_greed_index.scrape_cnn._get_fear_greed_index', 'scrape_cnn._get_fear_greed_index', ([], {}), '()\n', (2529, 2531), False, 'from fear_greed_index import scrape_cnn\n'), ((4879, 4917), 'fear_greed_index.scrape_cnn._get_chart', 'scrape_cnn._get_chart', (['"""AvgPtileModel"""'], {}), "('AvgPtileModel')\n", (4900, 4917), False, 'from fear_greed_index import scrape_cnn\n'), ((6927, 6955), 'matplotlib.pyplot.imshow', 'plt.imshow', (['self.index_chart'], {}), '(self.index_chart)\n', (6937, 6955), True, 'from matplotlib import pyplot as plt\n'), ((7013, 7031), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7029, 7031), True, 'from matplotlib import pyplot as plt\n'), ((6826, 6853), 'matplotlib.pyplot.imshow', 'plt.imshow', (['indicator.chart'], {}), '(indicator.chart)\n', (6836, 6853), True, 'from matplotlib import pyplot as plt\n')] |
# -*- coding: utf-8 -*-
"""
idfy_rest_client.models.person_person_information
This file was automatically generated for Idfy by APIMATIC v2.0 ( https://apimatic.io )
"""
from idfy_rest_client.api_helper import APIHelper
class PersonPersonInformation(object):
"""Implementation of the 'Person.PersonInformation' model.
TODO: type model description here.
Attributes:
firstname (string): TODO: type description here.
middlename (string): TODO: type description here.
lastname (string): TODO: type description here.
date_of_birth (string): TODO: type description here.
address (string): TODO: type description here.
zip_code (string): TODO: type description here.
city (string): TODO: type description here.
mobile (string): TODO: type description here.
phone (string): TODO: type description here.
gender (string): TODO: type description here.
raw_json (string): TODO: type description here.
request_id (string): TODO: type description here.
dead (datetime): TODO: type description here.
source (string): TODO: type description here.
"""
# Create a mapping from Model property names to API property names
_names = {
"firstname":'Firstname',
"middlename":'Middlename',
"lastname":'Lastname',
"date_of_birth":'DateOfBirth',
"address":'Address',
"zip_code":'ZipCode',
"city":'City',
"mobile":'Mobile',
"phone":'Phone',
"gender":'Gender',
"raw_json":'RawJson',
"request_id":'RequestId',
"dead":'Dead',
"source":'Source'
}
def __init__(self,
firstname=None,
middlename=None,
lastname=None,
date_of_birth=None,
address=None,
zip_code=None,
city=None,
mobile=None,
phone=None,
gender=None,
raw_json=None,
request_id=None,
dead=None,
source=None,
additional_properties = {}):
"""Constructor for the PersonPersonInformation class"""
# Initialize members of the class
self.firstname = firstname
self.middlename = middlename
self.lastname = lastname
self.date_of_birth = date_of_birth
self.address = address
self.zip_code = zip_code
self.city = city
self.mobile = mobile
self.phone = phone
self.gender = gender
self.raw_json = raw_json
self.request_id = request_id
self.dead = APIHelper.RFC3339DateTime(dead) if dead else None
self.source = source
# Add additional model properties to the instance
self.additional_properties = additional_properties
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
firstname = dictionary.get('Firstname')
middlename = dictionary.get('Middlename')
lastname = dictionary.get('Lastname')
date_of_birth = dictionary.get('DateOfBirth')
address = dictionary.get('Address')
zip_code = dictionary.get('ZipCode')
city = dictionary.get('City')
mobile = dictionary.get('Mobile')
phone = dictionary.get('Phone')
gender = dictionary.get('Gender')
raw_json = dictionary.get('RawJson')
request_id = dictionary.get('RequestId')
dead = APIHelper.RFC3339DateTime.from_value(dictionary.get("Dead")).datetime if dictionary.get("Dead") else None
source = dictionary.get('Source')
# Clean out expected properties from dictionary
for key in cls._names.values():
if key in dictionary:
del dictionary[key]
# Return an object of this model
return cls(firstname,
middlename,
lastname,
date_of_birth,
address,
zip_code,
city,
mobile,
phone,
gender,
raw_json,
request_id,
dead,
source,
dictionary)
| [
"idfy_rest_client.api_helper.APIHelper.RFC3339DateTime"
] | [((2794, 2825), 'idfy_rest_client.api_helper.APIHelper.RFC3339DateTime', 'APIHelper.RFC3339DateTime', (['dead'], {}), '(dead)\n', (2819, 2825), False, 'from idfy_rest_client.api_helper import APIHelper\n')] |
import logging
import subprocess
# create logger
logger = logging.getLogger('simple_example')
logger.setLevel(logging.DEBUG)
# create console handler and set level to INFO
console_logger = logging.StreamHandler()
console_logger.setLevel(logging.INFO)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
# add formatter to console_logger
console_logger.setFormatter(formatter)
# add console_logger to logger
logger.addHandler(console_logger)
def update_colors(color_dictionary):
subprocess.call('', shell=True) # Called to enable ANSI encoding on Windows
escape_code = u'\033'
color_dictionary['error'] = escape_code + u'[91m'
color_dictionary['debug'] = escape_code + u'[3;35m'
color_dictionary['info'] = escape_code + u'[3m'
color_dictionary['warning'] = escape_code + u'[1;33m'
color_dictionary['stack'] = escape_code + u'[1;93m'
color_dictionary['underline'] = escape_code + u'[4m'
color_dictionary['reset'] = escape_code + u'[0m' | [
"logging.getLogger",
"logging.Formatter",
"logging.StreamHandler",
"subprocess.call"
] | [((59, 94), 'logging.getLogger', 'logging.getLogger', (['"""simple_example"""'], {}), "('simple_example')\n", (76, 94), False, 'import logging\n'), ((191, 214), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (212, 214), False, 'import logging\n'), ((285, 347), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(levelname)s - %(message)s')\n", (302, 347), False, 'import logging\n'), ((530, 561), 'subprocess.call', 'subprocess.call', (['""""""'], {'shell': '(True)'}), "('', shell=True)\n", (545, 561), False, 'import subprocess\n')] |
import numpy as np
from pandas import read_csv
import pandas as pd
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from config import *
from lib.preprocess.read_data import DataReader
from lib.scaler.preprocessing_data.data_normalizer import DataNormalizer
class DataPreprocessor:
def __init__(self, metrics):
self.train_size = Config.TRAIN_SIZE
self.valid_size = Config.VALID_SIZE
self.train_data_type = metrics['train_data_type']
self.predict_data = metrics['predict_data']
self.google_trace_config = Config.GOOGLE_TRACE_DATA_CONFIG
self.read_data()
def read_data(self):
self.data = None
data_reader = DataReader()
official_data = data_reader.read()
self.x_data, self.y_data = self.create_x_y_data(official_data)
def create_x_y_data(self, official_data):
if Config.DATA_EXPERIMENT == 'google_trace':
# DEFINE X DATA
if self.train_data_type == 'cpu_mem':
x_data = [official_data['cpu'], official_data['mem']]
elif self.train_data_type == 'cpu':
x_data = [official_data['cpu']]
elif self.train_data_type == 'mem':
x_data = [official_data['mem']]
# DEFINE Y DATA
if self.predict_data == 'cpu':
y_data = official_data['cpu']
elif self.predict_data == 'mem':
y_data = official_data['mem']
else:
print('|-> ERROR: Not support these data')
return x_data, y_data
def create_timeseries(self, X):
if len(X) > 1:
data = np.concatenate((X[0], X[1]), axis=1)
if(len(X) > 2):
for i in range(2, len(X), 1):
data = np.column_stack((data, X[i]))
else:
data = []
for i in range(len(X[0])):
data.append(X[0][i])
data = np.array(data)
return data
def create_x(self, timeseries, sliding):
dataX = []
for i in range(len(timeseries) - sliding):
datai = []
for j in range(sliding):
datai.append(timeseries[i + j])
dataX.append(datai)
return dataX
def init_data_lstm(self, sliding, scaler_method):
#print('>>> start init data for training LSTM model <<<')
data_normalizer = DataNormalizer(scaler_method)
x_timeseries, y_time_series, self.y_scaler = data_normalizer.normalize(
self.x_data, self.y_data)
num_points = x_timeseries.shape[0]
train_point = int(self.train_size * num_points)
x_sample = self.create_x(x_timeseries, sliding)
x_train = x_sample[0:train_point - sliding]
x_train = np.array(x_train)
x_test = x_sample[train_point - sliding:]
x_test = np.array(x_test)
y_train = y_time_series[sliding: train_point]
y_train = np.array(y_train)
y_test = self.y_data[train_point:]
y_test = np.array(y_test)
# print(x_train.shape, x_test.shape)
# print(y_train.shape, y_test.shape)
# print('>>> Init data for training model complete <<<')
return x_train, y_train, x_test, y_test, data_normalizer
def init_data_ann(self, sliding, scaler_method):
print('>>> start init data for training ANN model <<<')
data_normalizer = DataNormalizer(scaler_method)
x_timeseries, y_time_series, self.y_scaler = data_normalizer.normalize(
self.x_data, self.y_data)
num_points = x_timeseries.shape[0]
train_point = int(self.train_size * num_points)
x_sample = self.create_x(x_timeseries, sliding)
x_train = x_sample[0:train_point - sliding]
x_train = np.array(x_train)
x_train = np.reshape(
x_train, (x_train.shape[0], sliding * int(x_train.shape[2])))
x_test = x_sample[train_point - sliding:]
x_test = np.array(x_test)
x_test = np.reshape(
x_test, (x_test.shape[0], sliding * int(x_test.shape[2])))
y_train = y_time_series[sliding: train_point]
y_train = np.array(y_train)
y_test = self.y_data[train_point:]
y_test = np.array(y_test)
return x_train, y_train, x_test, y_test, data_normalizer
| [
"lib.scaler.preprocessing_data.data_normalizer.DataNormalizer",
"lib.preprocess.read_data.DataReader",
"numpy.column_stack",
"numpy.array",
"numpy.concatenate"
] | [((717, 729), 'lib.preprocess.read_data.DataReader', 'DataReader', ([], {}), '()\n', (727, 729), False, 'from lib.preprocess.read_data import DataReader\n'), ((2432, 2461), 'lib.scaler.preprocessing_data.data_normalizer.DataNormalizer', 'DataNormalizer', (['scaler_method'], {}), '(scaler_method)\n', (2446, 2461), False, 'from lib.scaler.preprocessing_data.data_normalizer import DataNormalizer\n'), ((2808, 2825), 'numpy.array', 'np.array', (['x_train'], {}), '(x_train)\n', (2816, 2825), True, 'import numpy as np\n'), ((2894, 2910), 'numpy.array', 'np.array', (['x_test'], {}), '(x_test)\n', (2902, 2910), True, 'import numpy as np\n'), ((2984, 3001), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (2992, 3001), True, 'import numpy as np\n'), ((3063, 3079), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (3071, 3079), True, 'import numpy as np\n'), ((3447, 3476), 'lib.scaler.preprocessing_data.data_normalizer.DataNormalizer', 'DataNormalizer', (['scaler_method'], {}), '(scaler_method)\n', (3461, 3476), False, 'from lib.scaler.preprocessing_data.data_normalizer import DataNormalizer\n'), ((3823, 3840), 'numpy.array', 'np.array', (['x_train'], {}), '(x_train)\n', (3831, 3840), True, 'import numpy as np\n'), ((4014, 4030), 'numpy.array', 'np.array', (['x_test'], {}), '(x_test)\n', (4022, 4030), True, 'import numpy as np\n'), ((4204, 4221), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (4212, 4221), True, 'import numpy as np\n'), ((4283, 4299), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (4291, 4299), True, 'import numpy as np\n'), ((1674, 1710), 'numpy.concatenate', 'np.concatenate', (['(X[0], X[1])'], {'axis': '(1)'}), '((X[0], X[1]), axis=1)\n', (1688, 1710), True, 'import numpy as np\n'), ((1973, 1987), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (1981, 1987), True, 'import numpy as np\n'), ((1812, 1841), 'numpy.column_stack', 'np.column_stack', (['(data, X[i])'], {}), '((data, X[i]))\n', (1827, 1841), True, 'import numpy as np\n')] |
# pylint: disable=arguments-differ,unused-argument,line-too-long
"""R2Plus1D, implemented in Gluon. https://arxiv.org/abs/1711.11248.
Code partially borrowed from https://github.com/pytorch/vision/blob/master/torchvision/models/video/resnet.py."""
__all__ = ['R2Plus1D', 'r2plus1d_resnet18_kinetics400',
'r2plus1d_resnet34_kinetics400', 'r2plus1d_resnet50_kinetics400',
'r2plus1d_resnet101_kinetics400', 'r2plus1d_resnet152_kinetics400']
from mxnet import init
from mxnet.context import cpu
from mxnet.gluon.block import HybridBlock
from mxnet.gluon import nn
from mxnet.gluon.nn import BatchNorm
def conv3x1x1(in_planes, out_planes, spatial_stride=1, temporal_stride=1, dilation=1):
"""3x1x1 convolution with padding"""
return nn.Conv3D(in_channels=in_planes,
channels=out_planes,
kernel_size=(3, 1, 1),
strides=(temporal_stride, spatial_stride, spatial_stride),
padding=(dilation, 0, 0),
dilation=dilation,
use_bias=False)
class Conv2Plus1D(HybridBlock):
r"""Building block of Conv2Plus1D
Parameters
----------
inplanes : int.
Input channels of each block.
planes : int.
Output channels of each block.
midplanes : int.
Intermediate channels of each block.
stride : int, default is 1.
Stride in each dimension of 3D convolutional layers in a block.
padding : int, default is 1.
Padding in each dimension of the feature map.
norm_layer : object
Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`)
Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
norm_kwargs : dict
Additional `norm_layer` arguments, for example `num_devices=4`
for :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
"""
def __init__(self,
inplanes,
planes,
midplanes,
stride=1,
padding=1,
norm_layer=BatchNorm,
norm_kwargs=None,
**kwargs):
super(Conv2Plus1D, self).__init__()
with self.name_scope():
self.conv1 = nn.Conv3D(in_channels=inplanes,
channels=midplanes,
kernel_size=(1, 3, 3),
strides=(1, stride, stride),
padding=(0, padding, padding),
use_bias=False)
self.bn1 = norm_layer(in_channels=midplanes,
**({} if norm_kwargs is None else norm_kwargs))
self.relu = nn.Activation('relu')
self.conv2 = nn.Conv3D(in_channels=midplanes,
channels=planes,
kernel_size=(3, 1, 1),
strides=(stride, 1, 1),
padding=(padding, 0, 0),
use_bias=False)
def hybrid_forward(self, F, x):
"""Hybrid forward of a Conv2Plus1D block."""
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
return x
class BasicBlock(HybridBlock):
r"""ResNet Basic Block for R2Plus1D
Parameters
----------
inplanes : int.
Input channels of each block.
planes : int.
Output channels of each block.
stride : int, default is 1.
Stride in each dimension of 3D convolutional layers in a block.
downsample : bool.
Whether to contain a downsampling layer in the block.
norm_layer : object
Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`)
Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
norm_kwargs : dict
Additional `norm_layer` arguments, for example `num_devices=4`
for :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
layer_name : str, default is ''.
Give a name to current block.
"""
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None,
norm_layer=BatchNorm, norm_kwargs=None, layer_name='',
**kwargs):
super(BasicBlock, self).__init__()
self.downsample = downsample
with self.name_scope():
midplanes = (inplanes * planes * 3 * 3 * 3) // (inplanes * 3 * 3 + 3 * planes)
self.conv1 = Conv2Plus1D(inplanes, planes, midplanes, stride)
self.bn1 = norm_layer(in_channels=planes,
**({} if norm_kwargs is None else norm_kwargs))
self.relu = nn.Activation('relu')
self.conv2 = Conv2Plus1D(planes, planes, midplanes)
self.bn2 = norm_layer(in_channels=planes,
**({} if norm_kwargs is None else norm_kwargs))
def hybrid_forward(self, F, x):
"""Hybrid forward of a ResBlock in R2+1D."""
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out = F.Activation(out + identity, act_type='relu')
return out
class Bottleneck(HybridBlock):
r"""ResNet Bottleneck Block for R2Plus1D
Parameters
----------
inplanes : int.
Input channels of each block.
planes : int.
Output channels of each block.
stride : int, default is 1.
Stride in each dimension of 3D convolutional layers in a block.
downsample : bool.
Whether to contain a downsampling layer in the block.
norm_layer : object
Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`)
Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
norm_kwargs : dict
Additional `norm_layer` arguments, for example `num_devices=4`
for :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
layer_name : str, default is ''.
Give a name to current block.
"""
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None,
norm_layer=BatchNorm, norm_kwargs=None, layer_name='',
**kwargs):
super(Bottleneck, self).__init__()
self.downsample = downsample
with self.name_scope():
midplanes = (inplanes * planes * 3 * 3 * 3) // (inplanes * 3 * 3 + 3 * planes)
# 1x1x1
self.conv1 = nn.Conv3D(in_channels=inplanes, channels=planes, kernel_size=1, use_bias=False)
self.bn1 = norm_layer(in_channels=planes,
**({} if norm_kwargs is None else norm_kwargs))
self.relu = nn.Activation('relu')
# Second kernel
self.conv2 = Conv2Plus1D(planes, planes, midplanes, stride)
self.bn2 = norm_layer(in_channels=planes,
**({} if norm_kwargs is None else norm_kwargs))
self.conv3 = nn.Conv3D(in_channels=planes, channels=planes * self.expansion,
kernel_size=1, use_bias=False)
self.bn3 = norm_layer(in_channels=planes * self.expansion,
**({} if norm_kwargs is None else norm_kwargs))
def hybrid_forward(self, F, x):
"""Hybrid forward of a ResBlock in R2+1D."""
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out = F.Activation(out + identity, act_type='relu')
return out
class R2Plus1D(HybridBlock):
r"""The R2+1D network.
A Closer Look at Spatiotemporal Convolutions for Action Recognition.
CVPR, 2018. https://arxiv.org/abs/1711.11248
Parameters
----------
nclass : int
Number of classes in the training dataset.
block : Block, default is `Bottleneck`.
Class for the residual block.
layers : list of int
Numbers of layers in each block
dropout_ratio : float, default is 0.5.
The dropout rate of a dropout layer.
The larger the value, the more strength to prevent overfitting.
num_segments : int, default is 1.
Number of segments used to evenly divide a video.
num_crop : int, default is 1.
Number of crops used during evaluation, choices are 1, 3 or 10.
feat_ext : bool.
Whether to extract features before dense classification layer or
do a complete forward pass.
init_std : float, default is 0.001.
Standard deviation value when initialize the dense layers.
ctx : Context, default CPU.
The context in which to load the pretrained weights.
partial_bn : bool, default False.
Freeze all batch normalization layers during training except the first layer.
norm_layer : object
Normalization layer used (default: :class:`mxnet.gluon.nn.BatchNorm`)
Can be :class:`mxnet.gluon.nn.BatchNorm` or :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
norm_kwargs : dict
Additional `norm_layer` arguments, for example `num_devices=4`
for :class:`mxnet.gluon.contrib.nn.SyncBatchNorm`.
"""
def __init__(self, nclass, block, layers, dropout_ratio=0.5,
num_segments=1, num_crop=1, feat_ext=False,
init_std=0.001, ctx=None, partial_bn=False,
norm_layer=BatchNorm, norm_kwargs=None, **kwargs):
super(R2Plus1D, self).__init__()
self.partial_bn = partial_bn
self.dropout_ratio = dropout_ratio
self.init_std = init_std
self.num_segments = num_segments
self.num_crop = num_crop
self.feat_ext = feat_ext
self.inplanes = 64
self.feat_dim = 512 * block.expansion
with self.name_scope():
self.conv1 = nn.Conv3D(in_channels=3, channels=45, kernel_size=(1, 7, 7),
strides=(1, 2, 2), padding=(0, 3, 3), use_bias=False)
self.bn1 = norm_layer(in_channels=45, **({} if norm_kwargs is None else norm_kwargs))
self.relu = nn.Activation('relu')
self.conv2 = conv3x1x1(in_planes=45, out_planes=64)
self.bn2 = norm_layer(in_channels=64, **({} if norm_kwargs is None else norm_kwargs))
if self.partial_bn:
if norm_kwargs is not None:
norm_kwargs['use_global_stats'] = True
else:
norm_kwargs = {}
norm_kwargs['use_global_stats'] = True
self.layer1 = self._make_res_layer(block=block,
planes=64 * block.expansion,
blocks=layers[0],
layer_name='layer1_')
self.layer2 = self._make_res_layer(block=block,
planes=128 * block.expansion,
blocks=layers[1],
stride=2,
layer_name='layer2_')
self.layer3 = self._make_res_layer(block=block,
planes=256 * block.expansion,
blocks=layers[2],
stride=2,
layer_name='layer3_')
self.layer4 = self._make_res_layer(block=block,
planes=512 * block.expansion,
blocks=layers[3],
stride=2,
layer_name='layer4_')
self.avgpool = nn.GlobalAvgPool3D()
self.dropout = nn.Dropout(rate=self.dropout_ratio)
self.fc = nn.Dense(in_units=self.feat_dim, units=nclass,
weight_initializer=init.Normal(sigma=self.init_std))
def hybrid_forward(self, F, x):
"""Hybrid forward of R2+1D net"""
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = F.squeeze(x, axis=(2, 3, 4))
# segmental consensus
x = F.reshape(x, shape=(-1, self.num_segments * self.num_crop, self.feat_dim))
x = F.mean(x, axis=1)
if self.feat_ext:
return x
x = self.fc(self.dropout(x))
return x
def _make_res_layer(self,
block,
planes,
blocks,
stride=1,
norm_layer=BatchNorm,
norm_kwargs=None,
layer_name=''):
"""Build each stage of a ResNet"""
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.HybridSequential(prefix=layer_name + 'downsample_')
with downsample.name_scope():
downsample.add(nn.Conv3D(in_channels=self.inplanes,
channels=planes * block.expansion,
kernel_size=1,
strides=(stride, stride, stride),
use_bias=False))
downsample.add(norm_layer(in_channels=planes * block.expansion,
**({} if norm_kwargs is None else norm_kwargs)))
layers = nn.HybridSequential(prefix=layer_name)
with layers.name_scope():
layers.add(block(inplanes=self.inplanes,
planes=planes,
stride=stride,
downsample=downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.add(block(inplanes=self.inplanes, planes=planes))
return layers
def r2plus1d_resnet18_kinetics400(nclass=400, pretrained=False, pretrained_base=True,
root='~/.mxnet/models', num_segments=1, num_crop=1,
feat_ext=False, ctx=cpu(), **kwargs):
r"""R2Plus1D with ResNet18 backbone trained on Kinetics400 dataset.
Parameters
----------
nclass : int.
Number of categories in the dataset.
pretrained : bool or str.
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True.
Load pretrained base network, the extra layers are randomized. Note that
if pretrained is `True`, this has no effect.
ctx : Context, default CPU.
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
num_segments : int, default is 1.
Number of segments used to evenly divide a video.
num_crop : int, default is 1.
Number of crops used during evaluation, choices are 1, 3 or 10.
feat_ext : bool.
Whether to extract features before dense classification layer or
do a complete forward pass.
"""
model = R2Plus1D(nclass=nclass,
block=BasicBlock,
layers=[2, 2, 2, 2],
ctx=ctx,
**kwargs)
model.initialize(init.MSRAPrelu(), ctx=ctx)
if pretrained:
from ..model_store import get_model_file
model.load_parameters(get_model_file('r2plus1d_resnet18_kinetics400',
tag=pretrained, root=root), ctx=ctx)
from ...data import Kinetics400Attr
attrib = Kinetics400Attr()
model.classes = attrib.classes
model.collect_params().reset_ctx(ctx)
return model
def r2plus1d_resnet34_kinetics400(nclass=400, pretrained=False, pretrained_base=True,
root='~/.mxnet/models', num_segments=1, num_crop=1,
feat_ext=False, ctx=cpu(), **kwargs):
r"""R2Plus1D with ResNet34 backbone trained on Kinetics400 dataset.
Parameters
----------
nclass : int.
Number of categories in the dataset.
pretrained : bool or str.
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True.
Load pretrained base network, the extra layers are randomized. Note that
if pretrained is `True`, this has no effect.
ctx : Context, default CPU.
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
num_segments : int, default is 1.
Number of segments used to evenly divide a video.
num_crop : int, default is 1.
Number of crops used during evaluation, choices are 1, 3 or 10.
feat_ext : bool.
Whether to extract features before dense classification layer or
do a complete forward pass.
"""
model = R2Plus1D(nclass=nclass,
block=BasicBlock,
layers=[3, 4, 6, 3],
ctx=ctx,
**kwargs)
model.initialize(init.MSRAPrelu(), ctx=ctx)
if pretrained:
from ..model_store import get_model_file
model.load_parameters(get_model_file('r2plus1d_resnet34_kinetics400',
tag=pretrained, root=root), ctx=ctx)
from ...data import Kinetics400Attr
attrib = Kinetics400Attr()
model.classes = attrib.classes
model.collect_params().reset_ctx(ctx)
return model
def r2plus1d_resnet50_kinetics400(nclass=400, pretrained=False, pretrained_base=True,
root='~/.mxnet/models', num_segments=1, num_crop=1,
feat_ext=False, ctx=cpu(), **kwargs):
r"""R2Plus1D with ResNet50 backbone trained on Kinetics400 dataset.
Parameters
----------
nclass : int.
Number of categories in the dataset.
pretrained : bool or str.
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True.
Load pretrained base network, the extra layers are randomized. Note that
if pretrained is `True`, this has no effect.
ctx : Context, default CPU.
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
num_segments : int, default is 1.
Number of segments used to evenly divide a video.
num_crop : int, default is 1.
Number of crops used during evaluation, choices are 1, 3 or 10.
feat_ext : bool.
Whether to extract features before dense classification layer or
do a complete forward pass.
"""
model = R2Plus1D(nclass=nclass,
block=Bottleneck,
layers=[3, 4, 6, 3],
ctx=ctx,
**kwargs)
model.initialize(init.MSRAPrelu(), ctx=ctx)
if pretrained:
from ..model_store import get_model_file
model.load_parameters(get_model_file('r2plus1d_resnet50_kinetics400',
tag=pretrained, root=root), ctx=ctx)
from ...data import Kinetics400Attr
attrib = Kinetics400Attr()
model.classes = attrib.classes
model.collect_params().reset_ctx(ctx)
return model
def r2plus1d_resnet101_kinetics400(nclass=400, pretrained=False, pretrained_base=True,
root='~/.mxnet/models', num_segments=1, num_crop=1,
feat_ext=False, ctx=cpu(), **kwargs):
r"""R2Plus1D with ResNet101 backbone trained on Kinetics400 dataset.
Parameters
----------
nclass : int.
Number of categories in the dataset.
pretrained : bool or str.
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True.
Load pretrained base network, the extra layers are randomized. Note that
if pretrained is `True`, this has no effect.
ctx : Context, default CPU.
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
num_segments : int, default is 1.
Number of segments used to evenly divide a video.
num_crop : int, default is 1.
Number of crops used during evaluation, choices are 1, 3 or 10.
feat_ext : bool.
Whether to extract features before dense classification layer or
do a complete forward pass.
"""
model = R2Plus1D(nclass=nclass,
block=Bottleneck,
layers=[3, 4, 23, 3],
ctx=ctx,
**kwargs)
model.initialize(init.MSRAPrelu(), ctx=ctx)
if pretrained:
from ..model_store import get_model_file
model.load_parameters(get_model_file('r2plus1d_resnet101_kinetics400',
tag=pretrained, root=root), ctx=ctx)
from ...data import Kinetics400Attr
attrib = Kinetics400Attr()
model.classes = attrib.classes
model.collect_params().reset_ctx(ctx)
return model
def r2plus1d_resnet152_kinetics400(nclass=400, pretrained=False, pretrained_base=True,
root='~/.mxnet/models', num_segments=1, num_crop=1,
feat_ext=False, ctx=cpu(), **kwargs):
r"""R2Plus1D with ResNet152 backbone trained on Kinetics400 dataset.
Parameters
----------
nclass : int.
Number of categories in the dataset.
pretrained : bool or str.
Boolean value controls whether to load the default pretrained weights for model.
String value represents the hashtag for a certain version of pretrained weights.
pretrained_base : bool or str, optional, default is True.
Load pretrained base network, the extra layers are randomized. Note that
if pretrained is `True`, this has no effect.
ctx : Context, default CPU.
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
num_segments : int, default is 1.
Number of segments used to evenly divide a video.
num_crop : int, default is 1.
Number of crops used during evaluation, choices are 1, 3 or 10.
feat_ext : bool.
Whether to extract features before dense classification layer or
do a complete forward pass.
"""
model = R2Plus1D(nclass=nclass,
block=Bottleneck,
layers=[3, 8, 36, 3],
ctx=ctx,
**kwargs)
model.initialize(init.MSRAPrelu(), ctx=ctx)
if pretrained:
from ..model_store import get_model_file
model.load_parameters(get_model_file('r2plus1d_resnet152_kinetics400',
tag=pretrained, root=root), ctx=ctx)
from ...data import Kinetics400Attr
attrib = Kinetics400Attr()
model.classes = attrib.classes
model.collect_params().reset_ctx(ctx)
return model
| [
"mxnet.gluon.nn.GlobalAvgPool3D",
"mxnet.gluon.nn.HybridSequential",
"mxnet.init.Normal",
"mxnet.gluon.nn.Dropout",
"mxnet.init.MSRAPrelu",
"mxnet.context.cpu",
"mxnet.gluon.nn.Conv3D",
"mxnet.gluon.nn.Activation"
] | [((762, 967), 'mxnet.gluon.nn.Conv3D', 'nn.Conv3D', ([], {'in_channels': 'in_planes', 'channels': 'out_planes', 'kernel_size': '(3, 1, 1)', 'strides': '(temporal_stride, spatial_stride, spatial_stride)', 'padding': '(dilation, 0, 0)', 'dilation': 'dilation', 'use_bias': '(False)'}), '(in_channels=in_planes, channels=out_planes, kernel_size=(3, 1, 1),\n strides=(temporal_stride, spatial_stride, spatial_stride), padding=(\n dilation, 0, 0), dilation=dilation, use_bias=False)\n', (771, 967), False, 'from mxnet.gluon import nn\n'), ((14942, 14947), 'mxnet.context.cpu', 'cpu', ([], {}), '()\n', (14945, 14947), False, 'from mxnet.context import cpu\n'), ((16918, 16923), 'mxnet.context.cpu', 'cpu', ([], {}), '()\n', (16921, 16923), False, 'from mxnet.context import cpu\n'), ((18894, 18899), 'mxnet.context.cpu', 'cpu', ([], {}), '()\n', (18897, 18899), False, 'from mxnet.context import cpu\n'), ((20873, 20878), 'mxnet.context.cpu', 'cpu', ([], {}), '()\n', (20876, 20878), False, 'from mxnet.context import cpu\n'), ((22855, 22860), 'mxnet.context.cpu', 'cpu', ([], {}), '()\n', (22858, 22860), False, 'from mxnet.context import cpu\n'), ((14259, 14297), 'mxnet.gluon.nn.HybridSequential', 'nn.HybridSequential', ([], {'prefix': 'layer_name'}), '(prefix=layer_name)\n', (14278, 14297), False, 'from mxnet.gluon import nn\n'), ((16257, 16273), 'mxnet.init.MSRAPrelu', 'init.MSRAPrelu', ([], {}), '()\n', (16271, 16273), False, 'from mxnet import init\n'), ((18233, 18249), 'mxnet.init.MSRAPrelu', 'init.MSRAPrelu', ([], {}), '()\n', (18247, 18249), False, 'from mxnet import init\n'), ((20209, 20225), 'mxnet.init.MSRAPrelu', 'init.MSRAPrelu', ([], {}), '()\n', (20223, 20225), False, 'from mxnet import init\n'), ((22190, 22206), 'mxnet.init.MSRAPrelu', 'init.MSRAPrelu', ([], {}), '()\n', (22204, 22206), False, 'from mxnet import init\n'), ((24172, 24188), 'mxnet.init.MSRAPrelu', 'init.MSRAPrelu', ([], {}), '()\n', (24186, 24188), False, 'from mxnet import init\n'), ((2283, 2437), 'mxnet.gluon.nn.Conv3D', 'nn.Conv3D', ([], {'in_channels': 'inplanes', 'channels': 'midplanes', 'kernel_size': '(1, 3, 3)', 'strides': '(1, stride, stride)', 'padding': '(0, padding, padding)', 'use_bias': '(False)'}), '(in_channels=inplanes, channels=midplanes, kernel_size=(1, 3, 3),\n strides=(1, stride, stride), padding=(0, padding, padding), use_bias=False)\n', (2292, 2437), False, 'from mxnet.gluon import nn\n'), ((2772, 2793), 'mxnet.gluon.nn.Activation', 'nn.Activation', (['"""relu"""'], {}), "('relu')\n", (2785, 2793), False, 'from mxnet.gluon import nn\n'), ((2819, 2960), 'mxnet.gluon.nn.Conv3D', 'nn.Conv3D', ([], {'in_channels': 'midplanes', 'channels': 'planes', 'kernel_size': '(3, 1, 1)', 'strides': '(stride, 1, 1)', 'padding': '(padding, 0, 0)', 'use_bias': '(False)'}), '(in_channels=midplanes, channels=planes, kernel_size=(3, 1, 1),\n strides=(stride, 1, 1), padding=(padding, 0, 0), use_bias=False)\n', (2828, 2960), False, 'from mxnet.gluon import nn\n'), ((4809, 4830), 'mxnet.gluon.nn.Activation', 'nn.Activation', (['"""relu"""'], {}), "('relu')\n", (4822, 4830), False, 'from mxnet.gluon import nn\n'), ((6735, 6814), 'mxnet.gluon.nn.Conv3D', 'nn.Conv3D', ([], {'in_channels': 'inplanes', 'channels': 'planes', 'kernel_size': '(1)', 'use_bias': '(False)'}), '(in_channels=inplanes, channels=planes, kernel_size=1, use_bias=False)\n', (6744, 6814), False, 'from mxnet.gluon import nn\n'), ((6975, 6996), 'mxnet.gluon.nn.Activation', 'nn.Activation', (['"""relu"""'], {}), "('relu')\n", (6988, 6996), False, 'from mxnet.gluon import nn\n'), ((7260, 7359), 'mxnet.gluon.nn.Conv3D', 'nn.Conv3D', ([], {'in_channels': 'planes', 'channels': '(planes * self.expansion)', 'kernel_size': '(1)', 'use_bias': '(False)'}), '(in_channels=planes, channels=planes * self.expansion, kernel_size\n =1, use_bias=False)\n', (7269, 7359), False, 'from mxnet.gluon import nn\n'), ((10299, 10417), 'mxnet.gluon.nn.Conv3D', 'nn.Conv3D', ([], {'in_channels': '(3)', 'channels': '(45)', 'kernel_size': '(1, 7, 7)', 'strides': '(1, 2, 2)', 'padding': '(0, 3, 3)', 'use_bias': '(False)'}), '(in_channels=3, channels=45, kernel_size=(1, 7, 7), strides=(1, 2,\n 2), padding=(0, 3, 3), use_bias=False)\n', (10308, 10417), False, 'from mxnet.gluon import nn\n'), ((10571, 10592), 'mxnet.gluon.nn.Activation', 'nn.Activation', (['"""relu"""'], {}), "('relu')\n", (10584, 10592), False, 'from mxnet.gluon import nn\n'), ((12292, 12312), 'mxnet.gluon.nn.GlobalAvgPool3D', 'nn.GlobalAvgPool3D', ([], {}), '()\n', (12310, 12312), False, 'from mxnet.gluon import nn\n'), ((12340, 12375), 'mxnet.gluon.nn.Dropout', 'nn.Dropout', ([], {'rate': 'self.dropout_ratio'}), '(rate=self.dropout_ratio)\n', (12350, 12375), False, 'from mxnet.gluon import nn\n'), ((13640, 13694), 'mxnet.gluon.nn.HybridSequential', 'nn.HybridSequential', ([], {'prefix': "(layer_name + 'downsample_')"}), "(prefix=layer_name + 'downsample_')\n", (13659, 13694), False, 'from mxnet.gluon import nn\n'), ((12495, 12527), 'mxnet.init.Normal', 'init.Normal', ([], {'sigma': 'self.init_std'}), '(sigma=self.init_std)\n', (12506, 12527), False, 'from mxnet import init\n'), ((13768, 13908), 'mxnet.gluon.nn.Conv3D', 'nn.Conv3D', ([], {'in_channels': 'self.inplanes', 'channels': '(planes * block.expansion)', 'kernel_size': '(1)', 'strides': '(stride, stride, stride)', 'use_bias': '(False)'}), '(in_channels=self.inplanes, channels=planes * block.expansion,\n kernel_size=1, strides=(stride, stride, stride), use_bias=False)\n', (13777, 13908), False, 'from mxnet.gluon import nn\n')] |
#!/usr/bin/env python3
import retrieve_author_ppn as autppn
import retrieve_references as refs
import zot_helpers as pyzot
from itertools import islice
researchers = autppn.constructOutput('test.csv')
autppn.writeCsv('out.csv', researchers)
for researcher in researchers:
ppn = researcher['ppn']
creator_names = researcher['firstname']+" "+researcher['lastname']
collection_name = researcher['lastname'].lower()+"_"+researcher['ppn']
if ppn != "":
json_loaded = refs.getReferences(ppn)
biblio = refs.getRefsByRole(json_loaded, 'aut', creator_names)
total_items = len(biblio)
print(f"Pushing {total_items} items in Zotero bibliography : {collection_name}")
collection_id = pyzot.create_collection(collection_name)
# print(collection_id)
for i in range(0, total_items, 50):
start = i
if i+50 <= total_items:
end = i+50
else :
end = total_items
pyzot.create_items(collection_id, list(islice(biblio, start, end)))
| [
"itertools.islice",
"retrieve_author_ppn.constructOutput",
"retrieve_references.getRefsByRole",
"zot_helpers.create_collection",
"retrieve_author_ppn.writeCsv",
"retrieve_references.getReferences"
] | [((168, 202), 'retrieve_author_ppn.constructOutput', 'autppn.constructOutput', (['"""test.csv"""'], {}), "('test.csv')\n", (190, 202), True, 'import retrieve_author_ppn as autppn\n'), ((204, 243), 'retrieve_author_ppn.writeCsv', 'autppn.writeCsv', (['"""out.csv"""', 'researchers'], {}), "('out.csv', researchers)\n", (219, 243), True, 'import retrieve_author_ppn as autppn\n'), ((490, 513), 'retrieve_references.getReferences', 'refs.getReferences', (['ppn'], {}), '(ppn)\n', (508, 513), True, 'import retrieve_references as refs\n'), ((531, 584), 'retrieve_references.getRefsByRole', 'refs.getRefsByRole', (['json_loaded', '"""aut"""', 'creator_names'], {}), "(json_loaded, 'aut', creator_names)\n", (549, 584), True, 'import retrieve_references as refs\n'), ((732, 772), 'zot_helpers.create_collection', 'pyzot.create_collection', (['collection_name'], {}), '(collection_name)\n', (755, 772), True, 'import zot_helpers as pyzot\n'), ((1038, 1064), 'itertools.islice', 'islice', (['biblio', 'start', 'end'], {}), '(biblio, start, end)\n', (1044, 1064), False, 'from itertools import islice\n')] |
#!/usr/bin/env python
from pkg_resources import get_distribution
from setuptools import setup, find_packages
with open("README.md", "r") as f:
long_description = f.read()
version = get_distribution("autolabel").version
setup(
packages=find_packages(),
install_requires=[
'click',
'more-itertools',
'torchvision',
'torch',
'pillow',
'numpy'
],
entry_points='''
[console_scripts]
autolabel=autolabel.cli:main
''',
url='https://github.com/walwe/autolabel',
version=version,
author='walwe',
python_requires='>=3.6',
description='Autolabel is an image labeling tool using Neural Network',
long_description_content_type="text/markdown",
long_description=long_description,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
]
)
| [
"setuptools.find_packages",
"pkg_resources.get_distribution"
] | [((187, 216), 'pkg_resources.get_distribution', 'get_distribution', (['"""autolabel"""'], {}), "('autolabel')\n", (203, 216), False, 'from pkg_resources import get_distribution\n'), ((246, 261), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (259, 261), False, 'from setuptools import setup, find_packages\n')] |
import os
from docker.models.containers import Container
import docker
import pytest
from tox_docker.config import runas_name
def find_container(instance_name: str) -> Container:
# TODO: refactor this as a pytest fixture
# this is running in a child-process of the tox instance which
# spawned the container; so we need to pass the parent pid to
# get the right runas_name()
running_name = runas_name(instance_name, pid=os.getppid())
client = docker.from_env(version="auto")
for container in client.containers.list():
container.attrs["Config"].get("Labels", {})
if container.name == running_name:
return container
pytest.fail(f"No running container with instance name {running_name!r}")
| [
"pytest.fail",
"os.getppid",
"docker.from_env"
] | [((472, 503), 'docker.from_env', 'docker.from_env', ([], {'version': '"""auto"""'}), "(version='auto')\n", (487, 503), False, 'import docker\n'), ((680, 752), 'pytest.fail', 'pytest.fail', (['f"""No running container with instance name {running_name!r}"""'], {}), "(f'No running container with instance name {running_name!r}')\n", (691, 752), False, 'import pytest\n'), ((445, 457), 'os.getppid', 'os.getppid', ([], {}), '()\n', (455, 457), False, 'import os\n')] |
#!usr/bin/env python3
# -*- coding: utf-8 -*-
#
# @author <NAME>
#
# get posts of a fb page, group or account
# returns a json lines files with a line for each post
#
from argparse import ArgumentParser
from datetime import date, datetime
from facebook_scraper import get_posts
from json import dumps, JSONEncoder
class DateTimeEncoder(JSONEncoder):
def default(self, o):
if isinstance(o, (date, datetime)):
return str(o)
else:
return super().default(o)
def write_posts(account, posts):
today = date.today().strftime("%Y%m%d")
with open("{}_{}_facebook.jsonl".format(today, account), 'w') as output_file:
for post in posts:
del post['text']
output_file.write(dumps(post, cls=DateTimeEncoder, ensure_ascii=False))
output_file.write("\n")
output_file.close()
def get_fb_posts(args):
account = args.account
reactions = args.reactions
comments = args.comments
pages = args.pages
cookies = None
if args.cookies:
cookies = args.cookies
if args.group:
posts = get_posts(group=account, cookies=cookies, pages=pages, extra_info=True, options={"comments": comments, "reactors": reactions})
else:
posts = get_posts(account=account, cookies=cookies, pages=pages, extra_info=True, options={"comments": comments, "reactors": reactions})
write_posts(account, posts)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('account', help="name of the account", type=str)
parser.add_argument('--cookies', help="cookie file for getting data of a private account", required=False)
parser.add_argument('--reactions', help="extract likes and so from posts", action='store_true')
parser.add_argument('--comments', help="scrape comments too", action='store_true')
parser.add_argument('--group', help="account is a group", action='store_true')
parser.add_argument('--pages', help="number of pages to scrape", type=int, default=10)
args = parser.parse_args()
get_fb_posts(args) | [
"datetime.date.today",
"json.dumps",
"facebook_scraper.get_posts",
"argparse.ArgumentParser"
] | [((1456, 1472), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (1470, 1472), False, 'from argparse import ArgumentParser\n'), ((1101, 1231), 'facebook_scraper.get_posts', 'get_posts', ([], {'group': 'account', 'cookies': 'cookies', 'pages': 'pages', 'extra_info': '(True)', 'options': "{'comments': comments, 'reactors': reactions}"}), "(group=account, cookies=cookies, pages=pages, extra_info=True,\n options={'comments': comments, 'reactors': reactions})\n", (1110, 1231), False, 'from facebook_scraper import get_posts\n'), ((1254, 1386), 'facebook_scraper.get_posts', 'get_posts', ([], {'account': 'account', 'cookies': 'cookies', 'pages': 'pages', 'extra_info': '(True)', 'options': "{'comments': comments, 'reactors': reactions}"}), "(account=account, cookies=cookies, pages=pages, extra_info=True,\n options={'comments': comments, 'reactors': reactions})\n", (1263, 1386), False, 'from facebook_scraper import get_posts\n'), ((546, 558), 'datetime.date.today', 'date.today', ([], {}), '()\n', (556, 558), False, 'from datetime import date, datetime\n'), ((746, 798), 'json.dumps', 'dumps', (['post'], {'cls': 'DateTimeEncoder', 'ensure_ascii': '(False)'}), '(post, cls=DateTimeEncoder, ensure_ascii=False)\n', (751, 798), False, 'from json import dumps, JSONEncoder\n')] |
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk as gtk
class Ked:
def __init__(self, app_path, ufile=None):
glade_layout = f'{app_path}/data/ked_layout.glade'
self.builder = gtk.Builder()
self.builder.add_from_file(glade_layout)
win = self.builder.get_object("KedMain")
win.connect("delete-event", gtk.main_quit)
win.show()
def echo(self, msg):
print(f'Message: {msg}')
def start_ked(app_path, user_file=None):
main = Ked(app_path)
gtk.main()
| [
"gi.repository.Gtk.Builder",
"gi.repository.Gtk.main",
"gi.require_version"
] | [((11, 43), 'gi.require_version', 'gi.require_version', (['"""Gtk"""', '"""3.0"""'], {}), "('Gtk', '3.0')\n", (29, 43), False, 'import gi\n'), ((536, 546), 'gi.repository.Gtk.main', 'gtk.main', ([], {}), '()\n', (544, 546), True, 'from gi.repository import Gtk as gtk\n'), ((221, 234), 'gi.repository.Gtk.Builder', 'gtk.Builder', ([], {}), '()\n', (232, 234), True, 'from gi.repository import Gtk as gtk\n')] |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 ts=4 sts=4 sw=4 et tw=80 :
#
# Compare an image file and its associated uncertainty image.
#
# <NAME>
# Created: 2021-06-03
# Last modified: 2021-06-03
#--------------------------------------------------------------------------
#**************************************************************************
#--------------------------------------------------------------------------
## Logging setup:
import logging
#logging.basicConfig(level=logging.DEBUG)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
#logger.setLevel(logging.DEBUG)
logger.setLevel(logging.INFO)
## Current version:
__version__ = "0.0.1"
## Python version-agnostic module reloading:
try:
reload # Python 2.7
except NameError:
try:
from importlib import reload # Python 3.4+
except ImportError:
from imp import reload # Python 3.0 - 3.3
## Modules:
#import argparse
#import shutil
import resource
import signal
#import glob
import gc
import os
import sys
import time
#import vaex
#import calendar
#import ephem
import numpy as np
#from numpy.lib.recfunctions import append_fields
#import datetime as dt
#from dateutil import parser as dtp
#import scipy.linalg as sla
#import scipy.signal as ssig
#import scipy.ndimage as ndi
#import scipy.optimize as opti
#import scipy.interpolate as stp
#import scipy.spatial.distance as ssd
import matplotlib.pyplot as plt
#import matplotlib.cm as cm
#import matplotlib.ticker as mt
#import matplotlib._pylab_helpers as hlp
#from matplotlib.colors import LogNorm
#import matplotlib.colors as mplcolors
#import matplotlib.collections as mcoll
#import matplotlib.gridspec as gridspec
#from functools import partial
#from collections import OrderedDict
#from collections.abc import Iterable
#import multiprocessing as mp
#np.set_printoptions(suppress=True, linewidth=160)
#import pandas as pd
#import statsmodels.api as sm
#import statsmodels.formula.api as smf
#from statsmodels.regression.quantile_regression import QuantReg
#import PIL.Image as pli
#import seaborn as sns
#import cmocean
import theil_sen as ts
#import window_filter as wf
#import itertools as itt
_have_np_vers = float('.'.join(np.__version__.split('.')[:2]))
##--------------------------------------------------------------------------##
## Disable buffering on stdout/stderr:
class Unbuffered(object):
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
sys.stdout = Unbuffered(sys.stdout)
sys.stderr = Unbuffered(sys.stderr)
##--------------------------------------------------------------------------##
##--------------------------------------------------------------------------##
## Home-brew robust statistics:
try:
import robust_stats
reload(robust_stats)
rs = robust_stats
except ImportError:
logger.error("module robust_stats not found! Install and retry.")
sys.stderr.write("\nError! robust_stats module not found!\n"
"Please install and try again ...\n\n")
sys.exit(1)
## Home-brew KDE:
#try:
# import my_kde
# reload(my_kde)
# mk = my_kde
#except ImportError:
# logger.error("module my_kde not found! Install and retry.")
# sys.stderr.write("\nError! my_kde module not found!\n"
# "Please install and try again ...\n\n")
# sys.exit(1)
## Fast FITS I/O:
#try:
# import fitsio
#except ImportError:
# logger.error("fitsio module not found! Install and retry.")
# sys.stderr.write("\nError: fitsio module not found!\n")
# sys.exit(1)
## Various from astropy:
try:
# import astropy.io.ascii as aia
import astropy.io.fits as pf
# import astropy.io.votable as av
# import astropy.table as apt
# import astropy.time as astt
# import astropy.wcs as awcs
# from astropy import constants as aconst
# from astropy import coordinates as coord
# from astropy import units as uu
except ImportError:
# logger.error("astropy module not found! Install and retry.")
sys.stderr.write("\nError: astropy module not found!\n")
sys.exit(1)
## Star extraction:
#try:
# import easy_sep
# reload(easy_sep)
#except ImportError:
# logger.error("easy_sep module not found! Install and retry.")
# sys.stderr.write("Error: easy_sep module not found!\n\n")
# sys.exit(1)
#pse = easy_sep.EasySEP()
##--------------------------------------------------------------------------##
## Colors for fancy terminal output:
NRED = '\033[0;31m' ; BRED = '\033[1;31m'
NGREEN = '\033[0;32m' ; BGREEN = '\033[1;32m'
NYELLOW = '\033[0;33m' ; BYELLOW = '\033[1;33m'
NBLUE = '\033[0;34m' ; BBLUE = '\033[1;34m'
NMAG = '\033[0;35m' ; BMAG = '\033[1;35m'
NCYAN = '\033[0;36m' ; BCYAN = '\033[1;36m'
NWHITE = '\033[0;37m' ; BWHITE = '\033[1;37m'
ENDC = '\033[0m'
## Suppress colors in cron jobs:
if (os.getenv('FUNCDEF') == '--nocolors'):
NRED = '' ; BRED = ''
NGREEN = '' ; BGREEN = ''
NYELLOW = '' ; BYELLOW = ''
NBLUE = '' ; BBLUE = ''
NMAG = '' ; BMAG = ''
NCYAN = '' ; BCYAN = ''
NWHITE = '' ; BWHITE = ''
ENDC = ''
## Fancy text:
degree_sign = u'\N{DEGREE SIGN}'
## Dividers:
halfdiv = '-' * 40
fulldiv = '-' * 80
##--------------------------------------------------------------------------##
## Save FITS image with clobber (astropy / pyfits):
#def qsave(iname, idata, header=None, padkeys=1000, **kwargs):
# this_func = sys._getframe().f_code.co_name
# parent_func = sys._getframe(1).f_code.co_name
# sys.stderr.write("Writing to '%s' ... " % iname)
# if header:
# while (len(header) < padkeys):
# header.append() # pad header
# if os.path.isfile(iname):
# os.remove(iname)
# pf.writeto(iname, idata, header=header, **kwargs)
# sys.stderr.write("done.\n")
##--------------------------------------------------------------------------##
## Save FITS image with clobber (fitsio):
#def qsave(iname, idata, header=None, **kwargs):
# this_func = sys._getframe().f_code.co_name
# parent_func = sys._getframe(1).f_code.co_name
# sys.stderr.write("Writing to '%s' ... " % iname)
# #if os.path.isfile(iname):
# # os.remove(iname)
# fitsio.write(iname, idata, clobber=True, header=header, **kwargs)
# sys.stderr.write("done.\n")
##--------------------------------------------------------------------------##
def ldmap(things):
return dict(zip(things, range(len(things))))
def argnear(vec, val):
return (np.abs(vec - val)).argmin()
##--------------------------------------------------------------------------##
## New-style string formatting (more at https://pyformat.info/):
#oldway = '%s %s' % ('one', 'two')
#newway = '{} {}'.format('one', 'two')
#oldway = '%d %d' % (1, 2)
#newway = '{} {}'.format(1, 2)
# With padding:
#oldway = '%10s' % ('test',) # right-justified
#newway = '{:>10}'.format('test') # right-justified
#oldway = '%-10s' % ('test',) # left-justified
#newway = '{:10}'.format('test') # left-justified
# Ordinally:
#newway = '{1} {0}'.format('one', 'two') # prints "two one"
# Dictionarily:
#newway = '{lastname}, {firstname}'.format(firstname='Rob', lastname='Siverd')
# Centered (new-only):
#newctr = '{:^10}'.format('test') # prints " test "
# Numbers:
#oldway = '%06.2f' % (3.141592653589793,)
#newway = '{:06.2f}'.format(3.141592653589793)
##--------------------------------------------------------------------------##
## Quick ASCII I/O:
#data_file = 'data.txt'
#gftkw = {'encoding':None} if (_have_np_vers >= 1.14) else {}
#gftkw.update({'names':True, 'autostrip':True})
#gftkw.update({'delimiter':'|', 'comments':'%0%0%0%0'})
#gftkw.update({'loose':True, 'invalid_raise':False})
#all_data = np.genfromtxt(data_file, dtype=None, **gftkw)
#all_data = aia.read(data_file)
#all_data = pd.read_csv(data_file)
#all_data = pd.read_table(data_file, delim_whitespace=True)
#all_data = pd.read_table(data_file, skipinitialspace=True)
#all_data = pd.read_table(data_file, sep='|')
#fields = all_data.dtype.names
#if not fields:
# x = all_data[:, 0]
# y = all_data[:, 1]
#else:
# x = all_data[fields[0]]
# y = all_data[fields[1]]
#vot_file = 'neato.xml'
#vot_data = av.parse_single_table(vot_file)
#vot_data = av.parse_single_table(vot_file).to_table()
##--------------------------------------------------------------------------##
## Quick FITS I/O:
ifile = 'SPITZER_I2_44772864_0004_0000_2_cbcd.fits'
ufile = 'SPITZER_I2_44772864_0004_0000_2_cbunc.fits'
idata, ihdrs = pf.getdata(ifile, header=True)
udata, uhdrs = pf.getdata(ufile, header=True)
gain = ihdrs['GAIN']
exptime = ihdrs['EXPTIME']
fluxconv = ihdrs['FLUXCONV']
ignore = np.isnan(idata) | np.isnan(udata)
isafe = idata[~ignore]
usafe = udata[~ignore]
ignore = (isafe <= 0.0)
iclean = isafe[~ignore]
uclean = usafe[~ignore]
ui_ratio = uclean / iclean
## Try to reproduce the idata:udata relationship ...
icounts = iclean / fluxconv * exptime * gain # in electrons
ucounts = uclean / fluxconv * exptime * gain # in electrons
#icounts -= np.median(icounts)
##--------------------------------------------------------------------------##
##--------------------------------------------------------------------------##
## Estimate icounts:ucounts relationship from bright pixels:
cutoff = 1e3
bright = (icounts >= cutoff)
ic_fit = icounts[bright]
uc_fit = ucounts[bright]
vc_fit = uc_fit**2
sys.stderr.write("Fitting variance(counts) for bright pixels ... ")
model = ts.linefit(ic_fit, vc_fit)
sys.stderr.write("done.\n")
#model = np.array([375., 1.05])
## A line for plotting:
pcounts = np.linspace(0.1, 3e4, 1000)
pcounts = np.logspace(-1.0, 4.5, 1000)
pvarian = model[0] + model[1] * pcounts
##--------------------------------------------------------------------------##
## Theil-Sen line-fitting (linear):
#model = ts.linefit(xvals, yvals)
#icept, slope = ts.linefit(xvals, yvals)
## Theil-Sen line-fitting (loglog):
#xvals, yvals = np.log10(original_xvals), np.log10(original_yvals)
#xvals, yvals = np.log10(df['x'].values), np.log10(df['y'].values)
#llmodel = ts.linefit(np.log10(xvals), np.log10(yvals))
#icept, slope = ts.linefit(xvals, yvals)
#fit_exponent = slope
#fit_multiplier = 10**icept
#bestfit_x = np.arange(5000)
#bestfit_y = fit_multiplier * bestfit_x**fit_exponent
## Log-log evaluator:
#def loglog_eval(xvals, model):
# icept, slope = model
# return 10**icept * xvals**slope
#def loglog_eval(xvals, icept, slope):
# return 10**icept * xvals**slope
##--------------------------------------------------------------------------##
## Plot config:
# gridspec examples:
# https://matplotlib.org/users/gridspec.html
#gs1 = gridspec.GridSpec(4, 4)
#gs1.update(wspace=0.025, hspace=0.05) # set axis spacing
#ax1 = plt.subplot2grid((3, 3), (0, 0), colspan=3) # top-left + center + right
#ax2 = plt.subplot2grid((3, 3), (1, 0), colspan=2) # mid-left + mid-center
#ax3 = plt.subplot2grid((3, 3), (1, 2), rowspan=2) # mid-right + bot-right
#ax4 = plt.subplot2grid((3, 3), (2, 0)) # bot-left
#ax5 = plt.subplot2grid((3, 3), (2, 1)) # bot-center
##--------------------------------------------------------------------------##
#plt.style.use('bmh') # Bayesian Methods for Hackers style
fig_dims = (12, 10)
fig = plt.figure(1, figsize=fig_dims)
plt.gcf().clf()
#fig, axs = plt.subplots(2, 2, sharex=True, figsize=fig_dims, num=1)
# sharex='col' | sharex='row'
#fig.frameon = False # disable figure frame drawing
#fig.subplots_adjust(left=0.07, right=0.95)
#ax1 = plt.subplot(gs[0, 0])
ax1 = fig.add_subplot(111)
#ax1 = fig.add_axes([0, 0, 1, 1])
#ax1.patch.set_facecolor((0.8, 0.8, 0.8))
#ax1.grid(True)
#ax1.axis('off')
ax1.grid(True)
#ax1.scatter(iclean, uclean, lw=0, s=5)
ax1.scatter(icounts, ucounts**2, lw=0, s=5)
ax1.plot(pcounts, pvarian, c='r')
ax1.set_yscale('log')
ax1.set_xscale('log')
plot_name = 'gain_log.png'
fig.tight_layout() # adjust boundaries sensibly, matplotlib v1.1+
plt.draw()
fig.savefig(plot_name, bbox_inches='tight')
ax1.set_xscale('linear')
ax1.set_yscale('linear')
plot_name = 'gain_lin.png'
fig.tight_layout() # adjust boundaries sensibly, matplotlib v1.1+
plt.draw()
fig.savefig(plot_name, bbox_inches='tight')
## Disable axis offsets:
#ax1.xaxis.get_major_formatter().set_useOffset(False)
#ax1.yaxis.get_major_formatter().set_useOffset(False)
#ax1.plot(kde_pnts, kde_vals)
#blurb = "some text"
#ax1.text(0.5, 0.5, blurb, transform=ax1.transAxes)
#ax1.text(0.5, 0.5, blurb, transform=ax1.transAxes,
# va='top', ha='left', bbox=dict(facecolor='white', pad=10.0))
# fontdict={'family':'monospace'}) # fixed-width
#colors = cm.rainbow(np.linspace(0, 1, len(plot_list)))
#for camid, c in zip(plot_list, colors):
# cam_data = subsets[camid]
# xvalue = cam_data['CCDATEMP']
# yvalue = cam_data['PIX_MED']
# yvalue = cam_data['IMEAN']
# ax1.scatter(xvalue, yvalue, color=c, lw=0, label=camid)
#mtickpos = [2,5,7]
#ndecades = 1.0 # for symlog, set width of linear portion in units of dex
#nonposx='mask' | nonposx='clip' | nonposy='mask' | nonposy='clip'
#ax1.set_xscale('log', basex=10, nonposx='mask', subsx=mtickpos)
#ax1.set_xscale('log', nonposx='clip', subsx=[3])
#ax1.set_yscale('symlog', basey=10, linthreshy=0.1, linscaley=ndecades)
#ax1.xaxis.set_major_formatter(formatter) # re-format x ticks
#ax1.set_ylim(ax1.get_ylim()[::-1])
#ax1.set_xlabel('whatever', labelpad=30) # push X label down
#ax1.set_xticks([1.0, 3.0, 10.0, 30.0, 100.0])
#ax1.set_xticks([1, 2, 3], ['Jan', 'Feb', 'Mar'])
#for label in ax1.get_xticklabels():
# label.set_rotation(30)
# label.set_fontsize(14)
#ax1.xaxis.label.set_fontsize(18)
#ax1.yaxis.label.set_fontsize(18)
#ax1.set_xlim(nice_limits(xvec, pctiles=[1,99], pad=1.2))
#ax1.set_ylim(nice_limits(yvec, pctiles=[1,99], pad=1.2))
#spts = ax1.scatter(x, y, lw=0, s=5)
##cbar = fig.colorbar(spts, orientation='vertical') # old way
#cbnorm = mplcolors.Normalize(*spts.get_clim())
#scm = plt.cm.ScalarMappable(norm=cbnorm, cmap=spts.cmap)
#scm.set_array([])
#cbar = fig.colorbar(scm, orientation='vertical')
#cbar = fig.colorbar(scm, ticks=cs.levels, orientation='vertical') # contours
#cbar.formatter.set_useOffset(False)
#cbar.update_ticks()
fig.tight_layout() # adjust boundaries sensibly, matplotlib v1.1+
plt.draw()
#fig.savefig(plot_name, bbox_inches='tight')
######################################################################
# CHANGELOG (compare_images.py):
#---------------------------------------------------------------------
#
# 2021-06-03:
# -- Increased __version__ to 0.0.1.
# -- First created compare_images.py.
#
| [
"logging.basicConfig",
"logging.getLogger",
"numpy.__version__.split",
"numpy.abs",
"os.getenv",
"matplotlib.pyplot.gcf",
"theil_sen.linefit",
"imp.reload",
"sys.stderr.write",
"astropy.io.fits.getdata",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.isnan",
"sys.exit",
"matplotlib... | [((514, 553), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (533, 553), False, 'import logging\n'), ((563, 590), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (580, 590), False, 'import logging\n'), ((8752, 8782), 'astropy.io.fits.getdata', 'pf.getdata', (['ifile'], {'header': '(True)'}), '(ifile, header=True)\n', (8762, 8782), True, 'import astropy.io.fits as pf\n'), ((8798, 8828), 'astropy.io.fits.getdata', 'pf.getdata', (['ufile'], {'header': '(True)'}), '(ufile, header=True)\n', (8808, 8828), True, 'import astropy.io.fits as pf\n'), ((9644, 9711), 'sys.stderr.write', 'sys.stderr.write', (['"""Fitting variance(counts) for bright pixels ... """'], {}), "('Fitting variance(counts) for bright pixels ... ')\n", (9660, 9711), False, 'import sys\n'), ((9720, 9746), 'theil_sen.linefit', 'ts.linefit', (['ic_fit', 'vc_fit'], {}), '(ic_fit, vc_fit)\n', (9730, 9746), True, 'import theil_sen as ts\n'), ((9747, 9774), 'sys.stderr.write', 'sys.stderr.write', (['"""done.\n"""'], {}), "('done.\\n')\n", (9763, 9774), False, 'import sys\n'), ((9842, 9873), 'numpy.linspace', 'np.linspace', (['(0.1)', '(30000.0)', '(1000)'], {}), '(0.1, 30000.0, 1000)\n', (9853, 9873), True, 'import numpy as np\n'), ((9880, 9908), 'numpy.logspace', 'np.logspace', (['(-1.0)', '(4.5)', '(1000)'], {}), '(-1.0, 4.5, 1000)\n', (9891, 9908), True, 'import numpy as np\n'), ((11513, 11544), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': 'fig_dims'}), '(1, figsize=fig_dims)\n', (11523, 11544), True, 'import matplotlib.pyplot as plt\n'), ((12193, 12203), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (12201, 12203), True, 'import matplotlib.pyplot as plt\n'), ((12392, 12402), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (12400, 12402), True, 'import matplotlib.pyplot as plt\n'), ((14521, 14531), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (14529, 14531), True, 'import matplotlib.pyplot as plt\n'), ((2951, 2971), 'imp.reload', 'reload', (['robust_stats'], {}), '(robust_stats)\n', (2957, 2971), False, 'from imp import reload\n'), ((5049, 5069), 'os.getenv', 'os.getenv', (['"""FUNCDEF"""'], {}), "('FUNCDEF')\n", (5058, 5069), False, 'import os\n'), ((8917, 8932), 'numpy.isnan', 'np.isnan', (['idata'], {}), '(idata)\n', (8925, 8932), True, 'import numpy as np\n'), ((8935, 8950), 'numpy.isnan', 'np.isnan', (['udata'], {}), '(udata)\n', (8943, 8950), True, 'import numpy as np\n'), ((3089, 3197), 'sys.stderr.write', 'sys.stderr.write', (['"""\nError! robust_stats module not found!\nPlease install and try again ...\n\n"""'], {}), '(\n """\nError! robust_stats module not found!\nPlease install and try again ...\n\n"""\n )\n', (3105, 3197), False, 'import sys\n'), ((3206, 3217), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3214, 3217), False, 'import sys\n'), ((4179, 4237), 'sys.stderr.write', 'sys.stderr.write', (['"""\nError: astropy module not found!\n"""'], {}), '("""\nError: astropy module not found!\n""")\n', (4195, 4237), False, 'import sys\n'), ((4240, 4251), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4248, 4251), False, 'import sys\n'), ((11545, 11554), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (11552, 11554), True, 'import matplotlib.pyplot as plt\n'), ((2259, 2284), 'numpy.__version__.split', 'np.__version__.split', (['"""."""'], {}), "('.')\n", (2279, 2284), True, 'import numpy as np\n'), ((6713, 6730), 'numpy.abs', 'np.abs', (['(vec - val)'], {}), '(vec - val)\n', (6719, 6730), True, 'import numpy as np\n')] |
import logging
import os
try:
import simplejson as json
except ImportError:
import json
from flask import Flask, request, make_response, Response
from cStringIO import StringIO
import zipfile
def get_mocked_server(binary_directory):
mocked_cb_server = Flask('cb')
files = os.listdir(binary_directory)
@mocked_cb_server.route('/api/v1/binary', methods=['GET', 'POST'])
def binary_search_endpoint():
if request.method == 'GET':
query_string = request.args.get('q', '')
rows = int(request.args.get('rows', 10))
start = int(request.args.get('start', 0))
elif request.method == 'POST':
parsed_data = json.loads(request.data)
if 'q' in parsed_data:
query_string = parsed_data['q']
else:
query_string = ''
if 'rows' in parsed_data:
rows = int(parsed_data['rows'])
else:
rows = 10
if 'start' in parsed_data:
start = int(parsed_data['start'])
else:
start = 0
else:
return make_response('Invalid Request', 500)
return Response(response=json.dumps(binary_search(query_string, rows, start)),
mimetype='application/json')
def binary_search(q, rows, start):
return {
'results':
[json.load(open(os.path.join(binary_directory, fn), 'r')) for fn in files[start:start+rows]],
'terms': '',
'total_results': len(files),
'start': start,
'elapsed': 0.1,
'highlights': [],
'facets': {}
}
@mocked_cb_server.route('/api/v1/binary/<md5sum>/summary')
def get_binary_summary(md5sum):
filepath = os.path.join(binary_directory, '%s.json' % md5sum.lower())
if not os.path.exists(filepath):
return Response("File not found", 404)
binary_data = open(filepath, 'r').read()
return Response(response=binary_data, mimetype='application/json')
@mocked_cb_server.route('/api/v1/binary/<md5sum>')
def get_binary(md5sum):
metadata_filepath = os.path.join(binary_directory, '%s.json' % md5sum.lower())
content_filepath = os.path.join(binary_directory, '%s' % md5sum.lower())
for filepath in [metadata_filepath, content_filepath]:
if not os.path.exists(filepath):
return Response("File not found", 404)
zipfile_contents = StringIO()
zf = zipfile.ZipFile(zipfile_contents, 'w', zipfile.ZIP_DEFLATED, False)
zf.writestr('filedata', open(content_filepath, 'r').read())
zf.writestr('metadata', open(metadata_filepath, 'r').read())
zf.close()
return Response(response=zipfile_contents.getvalue(), mimetype='application/zip')
@mocked_cb_server.route('/api/info')
def info():
return Response(response=json.dumps({"version": "5.1.0"}), mimetype='application/json')
return mocked_cb_server
if __name__ == '__main__':
mydir = os.path.dirname(os.path.abspath(__file__))
binaries_dir = os.path.join(mydir, '..', 'data', 'binary_data')
mock_server = get_mocked_server(binaries_dir)
mock_server.run('127.0.0.1', 7982, debug=True) | [
"flask.request.args.get",
"os.path.exists",
"cStringIO.StringIO",
"os.listdir",
"json.loads",
"zipfile.ZipFile",
"flask.Flask",
"json.dumps",
"os.path.join",
"flask.Response",
"os.path.abspath",
"flask.make_response"
] | [((268, 279), 'flask.Flask', 'Flask', (['"""cb"""'], {}), "('cb')\n", (273, 279), False, 'from flask import Flask, request, make_response, Response\n'), ((293, 321), 'os.listdir', 'os.listdir', (['binary_directory'], {}), '(binary_directory)\n', (303, 321), False, 'import os\n'), ((3167, 3215), 'os.path.join', 'os.path.join', (['mydir', '""".."""', '"""data"""', '"""binary_data"""'], {}), "(mydir, '..', 'data', 'binary_data')\n", (3179, 3215), False, 'import os\n'), ((2038, 2097), 'flask.Response', 'Response', ([], {'response': 'binary_data', 'mimetype': '"""application/json"""'}), "(response=binary_data, mimetype='application/json')\n", (2046, 2097), False, 'from flask import Flask, request, make_response, Response\n'), ((2542, 2552), 'cStringIO.StringIO', 'StringIO', ([], {}), '()\n', (2550, 2552), False, 'from cStringIO import StringIO\n'), ((2566, 2633), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zipfile_contents', '"""w"""', 'zipfile.ZIP_DEFLATED', '(False)'], {}), "(zipfile_contents, 'w', zipfile.ZIP_DEFLATED, False)\n", (2581, 2633), False, 'import zipfile\n'), ((3121, 3146), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (3136, 3146), False, 'import os\n'), ((491, 516), 'flask.request.args.get', 'request.args.get', (['"""q"""', '""""""'], {}), "('q', '')\n", (507, 516), False, 'from flask import Flask, request, make_response, Response\n'), ((1896, 1920), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (1910, 1920), False, 'import os\n'), ((1941, 1972), 'flask.Response', 'Response', (['"""File not found"""', '(404)'], {}), "('File not found', 404)\n", (1949, 1972), False, 'from flask import Flask, request, make_response, Response\n'), ((540, 568), 'flask.request.args.get', 'request.args.get', (['"""rows"""', '(10)'], {}), "('rows', 10)\n", (556, 568), False, 'from flask import Flask, request, make_response, Response\n'), ((594, 622), 'flask.request.args.get', 'request.args.get', (['"""start"""', '(0)'], {}), "('start', 0)\n", (610, 622), False, 'from flask import Flask, request, make_response, Response\n'), ((689, 713), 'json.loads', 'json.loads', (['request.data'], {}), '(request.data)\n', (699, 713), False, 'import json\n'), ((1147, 1184), 'flask.make_response', 'make_response', (['"""Invalid Request"""', '(500)'], {}), "('Invalid Request', 500)\n", (1160, 1184), False, 'from flask import Flask, request, make_response, Response\n'), ((2433, 2457), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (2447, 2457), False, 'import os\n'), ((2482, 2513), 'flask.Response', 'Response', (['"""File not found"""', '(404)'], {}), "('File not found', 404)\n", (2490, 2513), False, 'from flask import Flask, request, make_response, Response\n'), ((2972, 3004), 'json.dumps', 'json.dumps', (["{'version': '5.1.0'}"], {}), "({'version': '5.1.0'})\n", (2982, 3004), False, 'import json\n'), ((1438, 1472), 'os.path.join', 'os.path.join', (['binary_directory', 'fn'], {}), '(binary_directory, fn)\n', (1450, 1472), False, 'import os\n')] |
from common.parse_tex import (
BeginDocumentExtractor,
BibitemExtractor,
DocumentclassExtractor,
EquationExtractor,
MacroExtractor,
PlaintextExtractor,
)
from common.types import MacroDefinition
from entities.sentences.extractor import SentenceExtractor
def test_extract_plaintext_with_newlines():
extractor = PlaintextExtractor()
plaintext_segments = list(
extractor.parse(
"main.tex",
"This sentence is followed by a newline.\nThis is the second sentence.",
)
)
# Earlier versions of the plaintext extractor inadvertently removed newlines, which are needed
# to accurately perform downstream tasks like sentence boundary detection. This test makes sure
# that the newlines are preserved.
plaintext = "".join([segment.text for segment in plaintext_segments])
assert (
plaintext
== "This sentence is followed by a newline.\nThis is the second sentence."
)
def test_extract_sentences():
extractor = SentenceExtractor()
sentences = list(
extractor.parse(
"main.tex",
"This is the first \\macro[arg]{sentence}. This is the second sentence.",
)
)
assert len(sentences) == 2
sentence1 = sentences[0]
assert sentence1.start == 0
assert sentence1.end == 40
assert sentences[0].text == "This is the first argsentence."
sentence2 = sentences[1]
assert sentence2.start == 41
assert sentence2.end == 69
assert sentences[1].text == "This is the second sentence."
def test_ignore_periods_in_equations():
extractor = SentenceExtractor()
sentences = list(
extractor.parse("main.tex", "This sentence has an $ equation. In $ the middle.")
)
assert len(sentences) == 1
assert sentences[0].text == "This sentence has an [[math]] the middle."
def test_extract_equation_from_dollar_sign():
extractor = EquationExtractor()
equations = list(extractor.parse("main.tex", "$x + y$"))
assert len(equations) == 1
equation = equations[0]
assert equation.start == 0
assert equation.content_start == 1
assert equation.end == 7
assert equation.content_tex == "x + y"
assert equation.tex == "$x + y$"
def test_extract_equation_from_equation_environment():
extractor = EquationExtractor()
equations = list(extractor.parse("main.tex", "\\begin{equation}x\\end{equation}"))
assert len(equations) == 1
equation = equations[0]
assert equation.start == 0
assert equation.content_start == 16
assert equation.end == 31
assert equation.content_tex == "x"
assert equation.tex == "\\begin{equation}x\\end{equation}"
def test_extract_equation_from_star_environment():
extractor = EquationExtractor()
equations = list(extractor.parse("main.tex", "\\begin{equation*}x\\end{equation*}"))
assert len(equations) == 1
equation = equations[0]
assert equation.start == 0
assert equation.end == 33
def test_extract_equation_environment_with_argument():
extractor = EquationExtractor()
equations = list(extractor.parse("main.tex", "\\begin{array}{c}x\\end{array}"))
assert len(equations) == 1
equation = equations[0]
assert equation.content_start == 16
def test_extract_equation_from_double_dollar_signs():
extractor = EquationExtractor()
equations = list(extractor.parse("main.tex", "$$x$$"))
assert len(equations) == 1
equation = equations[0]
assert equation.start == 0
assert equation.end == 5
def test_dont_extract_equation_from_command_argument_brackets():
extractor = EquationExtractor()
equations = list(extractor.parse("main.tex", "\\documentclass[11pt]{article}"))
assert len(equations) == 0
def test_extract_equation_from_brackets():
extractor = EquationExtractor()
equations = list(extractor.parse("main.tex", "\\[x + y\\]"))
assert len(equations) == 1
equation = equations[0]
assert equation.start == 0
assert equation.content_start == 2
assert equation.end == 9
def test_extract_nested_equations():
extractor = EquationExtractor()
equations = list(
extractor.parse("main.tex", "$x + \\hbox{\\begin{equation}y\\end{equation}}$")
)
assert len(equations) == 2
outer = next(filter(lambda e: e.start == 0, equations))
assert outer.end == 44
inner = next(filter(lambda e: e.start == 11, equations))
assert inner.end == 42
def test_handle_unclosed_environments():
extractor = EquationExtractor()
equations = list(extractor.parse("main.tex", "$x + \\hbox{\\begin{equation}y}$"))
assert len(equations) == 1
equation = equations[0]
assert equation.start == 0
assert equation.end == 30
def test_ignore_escaped_dollar_sign():
extractor = EquationExtractor()
equations = list(extractor.parse("main.tex", "\\$\\$"))
assert len(equations) == 0
def test_extract_begindocument():
extractor = BeginDocumentExtractor()
tex = "\\RequirePackage[hyperindex]{hyperref}\n\\begin{document}"
begindocument = extractor.parse(tex)
assert begindocument.start == 38
assert begindocument.end == 54
def test_extract_documentclass_after_comment_ending_with_whitespace():
extractor = DocumentclassExtractor()
tex = "\n\n%\\documentclass{IEEEtran} \n\\documentclass{article}"
documentclass = extractor.parse(tex)
assert documentclass is not None
def test_documentclass_after_macro():
# In some TeX files, the documentclass isn't declared until after some initial macros.
# We still want to detect the documentclass in these documents.
extractor = DocumentclassExtractor()
tex = "\\def\year{2020}\n\\documentclass{article}"
documentclass = extractor.parse(tex)
assert documentclass is not None
def test_extract_bibitems():
tex = "\n".join(
[
"\\bibitem[label]{key1}",
"token1",
"\\newblock \\emph{token2}",
"\\newblock token3",
"\\bibitem[label]{key2}",
"token4",
"\\newblock \\emph{token5}",
]
)
extractor = BibitemExtractor()
bibitems = list(extractor.parse(tex))
assert len(bibitems) == 2
assert bibitems[0].key == "key1"
assert bibitems[0].text == "token1 token2 token3"
assert bibitems[1].key == "key2"
assert bibitems[1].text == "token4 token5"
def test_extract_bibitem_tokens_from_curly_braces():
tex = "\n".join(["\\bibitem[label]{key1}", "token1 {token2} {token3}",])
extractor = BibitemExtractor()
bibitems = list(extractor.parse(tex))
assert len(bibitems) == 1
assert bibitems[0].key == "key1"
assert bibitems[0].text == "token1 token2 token3"
def test_extract_bibitems_from_environment():
tex = "\n".join(
[
"\\begin{thebibliography}",
"\\bibitem[label]{key1}",
"token1",
"\\end{thebibliography}",
]
)
extractor = BibitemExtractor()
bibitems = list(extractor.parse(tex))
assert len(bibitems) == 1
assert bibitems[0].key == "key1"
assert bibitems[0].text == "token1"
def test_extract_bibitem_stop_at_newline():
tex = "\n".join(
["\\bibitem[label]{key1}", "token1", "", "text after bibliography (to ignore)"]
)
extractor = BibitemExtractor()
bibitems = list(extractor.parse(tex))
assert len(bibitems) == 1
assert bibitems[0].key == "key1"
assert bibitems[0].text == "token1"
def test_extract_macro():
tex = "\\macro"
extractor = MacroExtractor()
macros = list(extractor.parse(tex, MacroDefinition("macro", "")))
assert len(macros) == 1
assert macros[0].start == 0
assert macros[0].end == 6
def test_extract_macro_with_delimited_parameter():
tex = "\\macro arg."
extractor = MacroExtractor()
macros = list(extractor.parse(tex, MacroDefinition("macro", "#1.")))
assert len(macros) == 1
assert macros[0].start == 0
assert macros[0].end == 11
assert macros[0].tex == "\\macro arg."
def test_extract_macro_with_undelimited_parameter():
# the scanner for undelimited parameter '#1' should match the first non-blank token 'a'.
tex = "\\macro a"
extractor = MacroExtractor()
macros = list(extractor.parse(tex, MacroDefinition("macro", "#1")))
assert len(macros) == 1
assert macros[0].start == 0
assert macros[0].end == 9
assert macros[0].tex == "\\macro a"
def test_extract_macro_balance_nested_braces_for_argument():
tex = "\\macro{{nested}}"
extractor = MacroExtractor()
macros = list(extractor.parse(tex, MacroDefinition("macro", "#1")))
assert len(macros) == 1
assert macros[0].start == 0
assert macros[0].end == 16
assert macros[0].tex == "\\macro{{nested}}"
def test_sentence_splitting_end_points():
extractor = SentenceExtractor()
sentences = list(
extractor.parse(
"main.tex",
"This is a sentence. Next we describe two items. 1) The first item. 2) The second item.",
)
)
assert len(sentences) == 4
sentence_end_points = [[0, 19], [20, 47], [48, 66], [67, 86]]
for i, [start, end] in enumerate(sentence_end_points):
assert sentences[i].start == start
assert sentences[i].end == end
def test_sentence_splitting_end_points_and_more_text():
extractor = SentenceExtractor()
sentences = list(
extractor.parse(
"main.tex",
"This sentence. has extra. text. 1. first 2. second 3. third. And some extra. stuff.",
)
)
assert len(sentences) == 8
sentence_end_points = [
[0, 14],
[15, 25],
[26, 31],
[32, 40],
[41, 50],
[51, 60],
[61, 76],
[77, 83],
]
for i, [start, end] in enumerate(sentence_end_points):
assert sentences[i].start == start
assert sentences[i].end == end
| [
"common.parse_tex.BeginDocumentExtractor",
"common.parse_tex.DocumentclassExtractor",
"entities.sentences.extractor.SentenceExtractor",
"common.types.MacroDefinition",
"common.parse_tex.EquationExtractor",
"common.parse_tex.BibitemExtractor",
"common.parse_tex.PlaintextExtractor",
"common.parse_tex.Ma... | [((340, 360), 'common.parse_tex.PlaintextExtractor', 'PlaintextExtractor', ([], {}), '()\n', (358, 360), False, 'from common.parse_tex import BeginDocumentExtractor, BibitemExtractor, DocumentclassExtractor, EquationExtractor, MacroExtractor, PlaintextExtractor\n'), ((1023, 1042), 'entities.sentences.extractor.SentenceExtractor', 'SentenceExtractor', ([], {}), '()\n', (1040, 1042), False, 'from entities.sentences.extractor import SentenceExtractor\n'), ((1620, 1639), 'entities.sentences.extractor.SentenceExtractor', 'SentenceExtractor', ([], {}), '()\n', (1637, 1639), False, 'from entities.sentences.extractor import SentenceExtractor\n'), ((1928, 1947), 'common.parse_tex.EquationExtractor', 'EquationExtractor', ([], {}), '()\n', (1945, 1947), False, 'from common.parse_tex import BeginDocumentExtractor, BibitemExtractor, DocumentclassExtractor, EquationExtractor, MacroExtractor, PlaintextExtractor\n'), ((2321, 2340), 'common.parse_tex.EquationExtractor', 'EquationExtractor', ([], {}), '()\n', (2338, 2340), False, 'from common.parse_tex import BeginDocumentExtractor, BibitemExtractor, DocumentclassExtractor, EquationExtractor, MacroExtractor, PlaintextExtractor\n'), ((2760, 2779), 'common.parse_tex.EquationExtractor', 'EquationExtractor', ([], {}), '()\n', (2777, 2779), False, 'from common.parse_tex import BeginDocumentExtractor, BibitemExtractor, DocumentclassExtractor, EquationExtractor, MacroExtractor, PlaintextExtractor\n'), ((3063, 3082), 'common.parse_tex.EquationExtractor', 'EquationExtractor', ([], {}), '()\n', (3080, 3082), False, 'from common.parse_tex import BeginDocumentExtractor, BibitemExtractor, DocumentclassExtractor, EquationExtractor, MacroExtractor, PlaintextExtractor\n'), ((3339, 3358), 'common.parse_tex.EquationExtractor', 'EquationExtractor', ([], {}), '()\n', (3356, 3358), False, 'from common.parse_tex import BeginDocumentExtractor, BibitemExtractor, DocumentclassExtractor, EquationExtractor, MacroExtractor, PlaintextExtractor\n'), ((3621, 3640), 'common.parse_tex.EquationExtractor', 'EquationExtractor', ([], {}), '()\n', (3638, 3640), False, 'from common.parse_tex import BeginDocumentExtractor, BibitemExtractor, DocumentclassExtractor, EquationExtractor, MacroExtractor, PlaintextExtractor\n'), ((3817, 3836), 'common.parse_tex.EquationExtractor', 'EquationExtractor', ([], {}), '()\n', (3834, 3836), False, 'from common.parse_tex import BeginDocumentExtractor, BibitemExtractor, DocumentclassExtractor, EquationExtractor, MacroExtractor, PlaintextExtractor\n'), ((4116, 4135), 'common.parse_tex.EquationExtractor', 'EquationExtractor', ([], {}), '()\n', (4133, 4135), False, 'from common.parse_tex import BeginDocumentExtractor, BibitemExtractor, DocumentclassExtractor, EquationExtractor, MacroExtractor, PlaintextExtractor\n'), ((4516, 4535), 'common.parse_tex.EquationExtractor', 'EquationExtractor', ([], {}), '()\n', (4533, 4535), False, 'from common.parse_tex import BeginDocumentExtractor, BibitemExtractor, DocumentclassExtractor, EquationExtractor, MacroExtractor, PlaintextExtractor\n'), ((4799, 4818), 'common.parse_tex.EquationExtractor', 'EquationExtractor', ([], {}), '()\n', (4816, 4818), False, 'from common.parse_tex import BeginDocumentExtractor, BibitemExtractor, DocumentclassExtractor, EquationExtractor, MacroExtractor, PlaintextExtractor\n'), ((4962, 4986), 'common.parse_tex.BeginDocumentExtractor', 'BeginDocumentExtractor', ([], {}), '()\n', (4984, 4986), False, 'from common.parse_tex import BeginDocumentExtractor, BibitemExtractor, DocumentclassExtractor, EquationExtractor, MacroExtractor, PlaintextExtractor\n'), ((5259, 5283), 'common.parse_tex.DocumentclassExtractor', 'DocumentclassExtractor', ([], {}), '()\n', (5281, 5283), False, 'from common.parse_tex import BeginDocumentExtractor, BibitemExtractor, DocumentclassExtractor, EquationExtractor, MacroExtractor, PlaintextExtractor\n'), ((5650, 5674), 'common.parse_tex.DocumentclassExtractor', 'DocumentclassExtractor', ([], {}), '()\n', (5672, 5674), False, 'from common.parse_tex import BeginDocumentExtractor, BibitemExtractor, DocumentclassExtractor, EquationExtractor, MacroExtractor, PlaintextExtractor\n'), ((6137, 6155), 'common.parse_tex.BibitemExtractor', 'BibitemExtractor', ([], {}), '()\n', (6153, 6155), False, 'from common.parse_tex import BeginDocumentExtractor, BibitemExtractor, DocumentclassExtractor, EquationExtractor, MacroExtractor, PlaintextExtractor\n'), ((6551, 6569), 'common.parse_tex.BibitemExtractor', 'BibitemExtractor', ([], {}), '()\n', (6567, 6569), False, 'from common.parse_tex import BeginDocumentExtractor, BibitemExtractor, DocumentclassExtractor, EquationExtractor, MacroExtractor, PlaintextExtractor\n'), ((6982, 7000), 'common.parse_tex.BibitemExtractor', 'BibitemExtractor', ([], {}), '()\n', (6998, 7000), False, 'from common.parse_tex import BeginDocumentExtractor, BibitemExtractor, DocumentclassExtractor, EquationExtractor, MacroExtractor, PlaintextExtractor\n'), ((7327, 7345), 'common.parse_tex.BibitemExtractor', 'BibitemExtractor', ([], {}), '()\n', (7343, 7345), False, 'from common.parse_tex import BeginDocumentExtractor, BibitemExtractor, DocumentclassExtractor, EquationExtractor, MacroExtractor, PlaintextExtractor\n'), ((7559, 7575), 'common.parse_tex.MacroExtractor', 'MacroExtractor', ([], {}), '()\n', (7573, 7575), False, 'from common.parse_tex import BeginDocumentExtractor, BibitemExtractor, DocumentclassExtractor, EquationExtractor, MacroExtractor, PlaintextExtractor\n'), ((7830, 7846), 'common.parse_tex.MacroExtractor', 'MacroExtractor', ([], {}), '()\n', (7844, 7846), False, 'from common.parse_tex import BeginDocumentExtractor, BibitemExtractor, DocumentclassExtractor, EquationExtractor, MacroExtractor, PlaintextExtractor\n'), ((8241, 8257), 'common.parse_tex.MacroExtractor', 'MacroExtractor', ([], {}), '()\n', (8255, 8257), False, 'from common.parse_tex import BeginDocumentExtractor, BibitemExtractor, DocumentclassExtractor, EquationExtractor, MacroExtractor, PlaintextExtractor\n'), ((8570, 8586), 'common.parse_tex.MacroExtractor', 'MacroExtractor', ([], {}), '()\n', (8584, 8586), False, 'from common.parse_tex import BeginDocumentExtractor, BibitemExtractor, DocumentclassExtractor, EquationExtractor, MacroExtractor, PlaintextExtractor\n'), ((8858, 8877), 'entities.sentences.extractor.SentenceExtractor', 'SentenceExtractor', ([], {}), '()\n', (8875, 8877), False, 'from entities.sentences.extractor import SentenceExtractor\n'), ((9380, 9399), 'entities.sentences.extractor.SentenceExtractor', 'SentenceExtractor', ([], {}), '()\n', (9397, 9399), False, 'from entities.sentences.extractor import SentenceExtractor\n'), ((7615, 7643), 'common.types.MacroDefinition', 'MacroDefinition', (['"""macro"""', '""""""'], {}), "('macro', '')\n", (7630, 7643), False, 'from common.types import MacroDefinition\n'), ((7886, 7917), 'common.types.MacroDefinition', 'MacroDefinition', (['"""macro"""', '"""#1."""'], {}), "('macro', '#1.')\n", (7901, 7917), False, 'from common.types import MacroDefinition\n'), ((8297, 8327), 'common.types.MacroDefinition', 'MacroDefinition', (['"""macro"""', '"""#1"""'], {}), "('macro', '#1')\n", (8312, 8327), False, 'from common.types import MacroDefinition\n'), ((8626, 8656), 'common.types.MacroDefinition', 'MacroDefinition', (['"""macro"""', '"""#1"""'], {}), "('macro', '#1')\n", (8641, 8656), False, 'from common.types import MacroDefinition\n')] |
from webdriver_manager.driver import EdgeDriver, IEDriver
from webdriver_manager.manager import DriverManager
from webdriver_manager import utils
class EdgeDriverManager(DriverManager):
def __init__(self, version=None,
os_type=utils.os_name()):
super(EdgeDriverManager, self).__init__()
self.driver = EdgeDriver(version=version,
os_type=os_type)
def install(self, path=None):
# type: () -> str
return self._file_manager.download_binary(self.driver, path).path
class IEDriverManager(DriverManager):
def __init__(self, version=None, os_type=utils.os_type()):
super(IEDriverManager, self).__init__()
self.driver = IEDriver(version=version, os_type=os_type)
def install(self, path=None):
# type: () -> str
return self._file_manager.download_driver(self.driver, path).path
| [
"webdriver_manager.utils.os_type",
"webdriver_manager.utils.os_name",
"webdriver_manager.driver.IEDriver",
"webdriver_manager.driver.EdgeDriver"
] | [((250, 265), 'webdriver_manager.utils.os_name', 'utils.os_name', ([], {}), '()\n', (263, 265), False, 'from webdriver_manager import utils\n'), ((340, 384), 'webdriver_manager.driver.EdgeDriver', 'EdgeDriver', ([], {'version': 'version', 'os_type': 'os_type'}), '(version=version, os_type=os_type)\n', (350, 384), False, 'from webdriver_manager.driver import EdgeDriver, IEDriver\n'), ((638, 653), 'webdriver_manager.utils.os_type', 'utils.os_type', ([], {}), '()\n', (651, 653), False, 'from webdriver_manager import utils\n'), ((726, 768), 'webdriver_manager.driver.IEDriver', 'IEDriver', ([], {'version': 'version', 'os_type': 'os_type'}), '(version=version, os_type=os_type)\n', (734, 768), False, 'from webdriver_manager.driver import EdgeDriver, IEDriver\n')] |
"""Parent class DataN."""
import os
import os.path
from warnings import warn
from typing import Union, NoReturn
from pycifstar import Data, to_data
from cryspy.A_functions_base.function_1_markdown import md_to_html
from cryspy.A_functions_base.function_1_objects import \
get_functions_of_objet, get_table_html_for_variables
from cryspy.B_parent_classes.cl_1_item import ItemN
from cryspy.B_parent_classes.cl_2_loop import LoopN
class DataN(object):
"""Data container of loops and items."""
def __repr__(self):
"""
Magic method print() is redefined.
Returns
-------
TYPE
DESCRIPTION.
"""
ls_out = [f"# Object '{self.get_name():}'"]
for item in self.items:
if isinstance(item, ItemN):
ls_out.append(f"{4*' ':}.{item.get_name():}")
else:
ls_out.append(f"{4*' ':}.{item.get_name():} (loop)")
method = self.methods_html()
if method != "":
ls_out.append(f"\n# Methods:\n{method:}\n")
return "\n".join(ls_out)
def _repr_html_(self):
"""Representation in HTML format."""
ls_html = [f"<h2>Object '{self.get_name():}'</h2>"]
ls_html.append(self.attributes_to_html())
ls_html.append(get_table_html_for_variables(self))
report = self.report_html()
if report != "":
ls_html.append(f"<h2>Description </h2> {report:}")
ls_html.append(f"<h2>Classes and methods</h2>")
try:
names = sorted([obj.__name__ for obj in self.CLASSES_MANDATORY])
if len(names) != 0:
ls_html.append("<b>Mandatory classes: </b>")
ls_html.append(f"{', '.join(names):}.<br>")
except AttributeError:
pass
try:
names = sorted([obj.__name__ for obj in self.CLASSES_OPTIONAL])
if len(names) != 0:
ls_html.append("<b>Optional classes: </b>")
ls_html.append(f"{', '.join(names):}.<br>")
except AttributeError:
pass
method = self.methods_html()
if method != "":
ls_html.append(f"<b>Methods: </b> {method:}")
return " ".join(ls_html)
def methods_html(self):
ls_html = [f".{func_name}" for func_name in
get_functions_of_objet(self)]
return ", ".join(ls_html)+"."
def attributes_to_html(self) -> str:
"""Representation of defined parameters in HTML format.
"""
ls_html = ["<table>"]
ls_html.append("<tr><th>Attribute</th><th> Note </th></tr>")
items_sorted = sorted(self.items, key=lambda item: item.get_name())
for item in items_sorted:
item_type = item.__doc__.strip().split("\n")[0]
ls_html.append(f"<tr><td>.{item.get_name():}</td>\
<td>{item_type:}</td></tr>")
ls_html.append("</table>")
return " ".join(ls_html)
def __str__(self):
"""
Magic method str() is redefined.
Returns
-------
TYPE
DESCRIPTION.
"""
return self.to_cif()
def __getattr__(self, name):
"""
Magic method __getattr__ is slightly changed for special attributes.
Parameters
----------
name : TYPE
DESCRIPTION.
Raises
------
AttributeError
DESCRIPTION.
Returns
-------
res : TYPE
DESCRIPTION.
"""
for item in self.items:
if name.lower() == item.get_name():
return item
raise AttributeError(f"Attribute '{name:}' is not defined")
def is_attribute(self, name):
"""Temporary construction.
Better to use:
try:
obj = self.attribute_name
except AttributeError as e:
obj = ...
"""
for item in self.items:
if name.lower() == item.get_name():
return True
return False
def __setattr__(self, name, value) -> NoReturn:
"""
Rules to set attribute.
Parameters
----------
name : TYPE
DESCRIPTION.
value : TYPE
DESCRIPTION.
Returns
-------
NoReturn
DESCRIPTION.
"""
flag_items, flag_direct = False, True
if name == "data_name":
flag_direct = False
val_new = str(value).strip()
elif name == "items":
flag_items = True
self.add_items(value)
else:
cls_value = type(value)
if cls_value in self.CLASSES:
l_name = [item.get_name() for item in self.items]
name_new = value.get_name()
if name_new in l_name:
self.items.pop(l_name.index(name))
self.items.append(value)
flag_items, flag_direct = True, False
if name_new != name:
warn(f"Access to variable by '{name_new:}'.", UserWarning)
if flag_items:
pass
elif flag_direct:
self.__dict__[name] = value
else:
self.__dict__[name] = val_new
def add_items(self, items: list):
"""Add items."""
l_name = [item.get_name() for item in items]
s_name = set(l_name)
if len(s_name) != len(l_name):
warn("Double items were given.", UserWarning)
items_unique = [items[l_name.index(name)] for name in s_name]
else:
items_unique = items
l_ind_del = []
for ind_item, item in enumerate(self.items):
if item.get_name() in s_name:
l_ind_del.append(ind_item)
l_ind_del.reverse()
for ind in l_ind_del:
self.items.pop(ind)
for item in items_unique:
if isinstance(item, self.CLASSES):
self.items.append(item)
@classmethod
def make_container(cls, cls_mandatory, cls_optional, prefix):
"""Create DataN object as a container for items."""
if cls is not DataN:
warn("The method 'make_container' is used only for DataN class.")
return
obj = cls()
obj.__dict__["CLASSES_MANDATORY"] = cls_mandatory
obj.__dict__["CLASSES_OPTIONAL"] = cls_optional
obj.__dict__["CLASSES"] = cls_mandatory+cls_optional
obj.__dict__["PREFIX"] = prefix
obj.__dict__["D_DEFAULT"] = {}
obj.__dict__["items"] = []
obj.__dict__["data_name"] = ""
return obj
@classmethod
def get_mandatory_attributes(cls, separator: str = "_"):
"""Get a list of mandatory attributes from mandatory classes."""
l_res = []
for cls_obj in cls.CLASSES_MANDATORY:
if issubclass(cls_obj, ItemN):
cls_item = cls_obj
else: #LoopN
cls_item = cls_obj.ITEM_CLASS
l_res.extend([f"{cls_item.PREFIX:}{separator:}{name_cif:}"
for name_cif in cls_item.ATTR_MANDATORY_CIF])
return l_res
def __getitem__(self, name: Union[int, str]):
"""
Get item by index or predefined index.
Parameters
----------
name : TYPE
DESCRIPTION.
Returns
-------
TYPE
DESCRIPTION.
"""
if isinstance(name, int):
return self.items[name]
elif isinstance(name, str):
for item in self.items:
if name.lower() == item.get_name():
return item
return None
def get_name(self) -> str:
"""Name of object."""
name = self.PREFIX
data_name = self.data_name
if data_name is not None:
name = f"{name:}_{data_name:}"
return name.lower()
def get_variable_names(self) -> list:
"""
Get names of variable as a list.
(((#prefix, #NAME), (#prefix, #NAME), (#attribute, #index))
Returns
-------
list
List of names of variable.
"""
prefix = self.PREFIX
data_name = self.data_name
l_var = []
for item in self.items:
l_var.extend(item.get_variable_names())
l_var_out = [((prefix, data_name), ) + var for var in l_var]
return l_var_out
def is_variables(self) -> bool:
"""Define is there variables or not."""
flag = False
for item in self.items:
if item.is_variables():
flag = True
break
return flag
def get_variable_by_name(self, name: tuple) -> Union[float, int, str]:
"""
Get variable given by name.
Parameters
----------
name : tuple
(((#prefix, #data_name), (#prefix, #loop_name),
(#attribute, #index_item))
Returns
-------
Union[float, int, str]
DESCRIPTION.
"""
prefix = self.PREFIX
data_name = self.data_name
prefix_d, prefix_n = name[0], name[1]
if prefix_d != (prefix, data_name):
return None
name_sh = tuple(name[1:])
for item in self.items:
if isinstance(item, ItemN):
prefix = item.PREFIX
elif isinstance(item, LoopN):
item_cls = item.ITEM_CLASS
if item_cls is ItemN:
prefix = item[0].PREFIX
else:
prefix = item_cls.PREFIX
else:
raise AttributeError(
f"Unknown type object '{type(item).__name__:}'")
if prefix == prefix_n[0]:
res = item.get_variable_by_name(name_sh)
if res is not None:
return res
return None
def set_variable_by_name(self, name: tuple, value) -> NoReturn:
"""
Set value to variable given by name.
Parameters
----------
name : tuple
DESCRIPTION.
value : TYPE
DESCRIPTION.
Returns
-------
NoReturn
DESCRIPTION.
"""
prefix = self.PREFIX
data_name = self.data_name
prefix_d, prefix_n = name[0], name[1]
if prefix_d != (prefix, data_name):
return
name_sh = tuple(name[1:])
for item in self.items:
if isinstance(item, ItemN):
prefix = item.PREFIX
elif isinstance(item, LoopN):
item_cls = item.ITEM_CLASS
if item_cls is ItemN:
prefix = item[0].PREFIX
else:
prefix = item_cls.PREFIX
else:
raise AttributeError(
f"Unknown type object '{type(item).__name__:}'")
if prefix == prefix_n[0]:
item.set_variable_by_name(name_sh, value)
def is_defined(self) -> bool:
"""
If all mandatory attributes is defined.
Returns
-------
bool
DESCRIPTION.
"""
flag = True
for item in self.items:
if not(item.is_defined()):
flag = False
if isinstance(item, ItemN):
warn(f"{item.PREFIX:} is not fully described.",
UserWarning)
break
elif isinstance(item, LoopN):
warn(f"{item.ITEM_CLASS.PREFIX:} is not fully described.",
UserWarning)
break
if flag:
cls_items = [type(item) for item in self.items]
for cls_mand in self.CLASSES_MANDATORY:
if not(cls_mand in cls_items):
flag = False
warn(f"The object of {cls_mand.__name__:} is not defined.",
UserWarning)
break
return flag
def form_object(self):
"""Form object."""
pass
def to_cif(self, separator="_") -> str:
"""Print information about object in string in STAR format.
Arguments
---------
prefix: prefix in front of label of attribute
separator: separator between prefix and attribute ("_" or ".")
flag: for undefined attribute "." will be printed
flag_minimal if it's True the minimal set of object will be printed
Returns
-------
A string in STAR/CIF format
"""
ls_out = []
if self.data_name is None:
ls_out.append("data_\n")
else:
ls_out.append(f"data_{self.data_name:}\n")
l_item = self.items
l_s_itemn = [item.to_cif(separator=separator)+"\n"
for item in l_item if isinstance(item, ItemN)]
l_s_loopn = [item.to_cif(separator=separator)+"\n"
for item in l_item if isinstance(item, LoopN)]
if l_s_loopn != []:
n_max_loop = max([len(_) for _ in l_s_loopn])
if n_max_loop < 1000:
n_max_loop = 1000
else:
n_max_loop = 10000
l_n_max_item = [len(_) for _ in l_s_itemn]
ls_out.extend([_1 for _1, _2 in zip(l_s_itemn, l_n_max_item)
if _2 <= n_max_loop])
ls_out.extend([_ for _ in l_s_loopn])
ls_out.extend([_1 for _1, _2 in zip(l_s_itemn, l_n_max_item)
if _2 > n_max_loop])
return "\n".join(ls_out)
@classmethod
def from_cif(cls, string: str):
"""Generate object from string of CIF format."""
cif_data = Data()
flag = cif_data.take_from_string(string)
cif_items = cif_data.items
cif_loops = cif_data.loops
items = []
flag = True
n_mandatory = len(cls.CLASSES_MANDATORY)
for i_cls, cls_ in enumerate(cls.CLASSES):
flag = i_cls >= n_mandatory
if issubclass(cls_, ItemN):
prefix_cls = cls_.PREFIX
if cif_items.is_prefix(prefix_cls):
cif_items_prefix = cif_items[prefix_cls]
cif_string = str(cif_items_prefix)
obj_prefix = cls_.from_cif(cif_string)
if obj_prefix is not None:
items.append(obj_prefix)
flag = True
elif issubclass(cls_, LoopN):
prefix_cls = cls_.ITEM_CLASS.PREFIX
for cif_loop in cif_loops:
if cif_loop.is_prefix("_"+prefix_cls):
cif_string = str(cif_loop)
obj_prefix = cls_.from_cif(cif_string)
if obj_prefix is not None:
items.append(obj_prefix)
flag = True
if (not(flag)):
warn(f"Mandatory class: '{cls_.__name__:}' is not given.",
UserWarning)
break
if not(flag):
return None
data_name = cif_data.name
obj = cls(data_name=data_name, items=items)
obj.form_object()
return obj
@classmethod
def from_cif_file(cls, f_name: str):
"""Read from cif file."""
if not(os.path.isfile(f_name)):
raise UserWarning(f"File {f_name:} is not found.")
return None
str_from_cif = str(to_data(f_name))
obj = cls.from_cif(str_from_cif)
obj.file_input = f_name
return obj
def copy(self, data_name: str = ""):
"""Deep copy of object with new data name."""
s_cif = self.to_cif()
obj_new = type(self).from_cif(s_cif)
obj_new.data_name = data_name
return obj_new
def report(self):
return ""
def report_html(self):
return md_to_html(self.report())
def plots(self):
l_res = []
for item in self.items:
for plot in item.plots():
if plot is not None:
l_res.append(plot)
return l_res
def fix_variables(self):
"""Fix variables."""
for item in self.items:
item.fix_variables()
def set_variable(self, name: str, index=None):
"""Set refinement for variable given by name.
Index parameters is used only for objects given as a matrix.
"""
name_sh = name.strip(".").lower()
l_name = name_sh.split(".")
name_1 = l_name[0]
for item in self.items:
if name_1 == item.get_name():
if len(l_name) == 1:
attr_refs = []
if isinstance(item, ItemN):
attr_refs = item.ATTR_REF
elif isinstance(item, LoopN):
item_class = item.ITEM_CLASS
if item_class is ItemN:
if len(self.items) != 0:
attr_refs = item.items[0].ATTR_REF
else:
attr_refs = item_class.ATTR_REF
for attr_ref in attr_refs:
item.set_variable(attr_ref, index=index)
else:
item.set_variable(".".join(l_name[1:]), index=index)
| [
"pycifstar.to_data",
"os.path.isfile",
"warnings.warn",
"cryspy.A_functions_base.function_1_objects.get_table_html_for_variables",
"cryspy.A_functions_base.function_1_objects.get_functions_of_objet",
"pycifstar.Data"
] | [((13869, 13875), 'pycifstar.Data', 'Data', ([], {}), '()\n', (13873, 13875), False, 'from pycifstar import Data, to_data\n'), ((1332, 1366), 'cryspy.A_functions_base.function_1_objects.get_table_html_for_variables', 'get_table_html_for_variables', (['self'], {}), '(self)\n', (1360, 1366), False, 'from cryspy.A_functions_base.function_1_objects import get_functions_of_objet, get_table_html_for_variables\n'), ((5519, 5564), 'warnings.warn', 'warn', (['"""Double items were given."""', 'UserWarning'], {}), "('Double items were given.', UserWarning)\n", (5523, 5564), False, 'from warnings import warn\n'), ((6243, 6308), 'warnings.warn', 'warn', (['"""The method \'make_container\' is used only for DataN class."""'], {}), '("The method \'make_container\' is used only for DataN class.")\n', (6247, 6308), False, 'from warnings import warn\n'), ((15516, 15538), 'os.path.isfile', 'os.path.isfile', (['f_name'], {}), '(f_name)\n', (15530, 15538), False, 'import os\n'), ((15655, 15670), 'pycifstar.to_data', 'to_data', (['f_name'], {}), '(f_name)\n', (15662, 15670), False, 'from pycifstar import Data, to_data\n'), ((2385, 2413), 'cryspy.A_functions_base.function_1_objects.get_functions_of_objet', 'get_functions_of_objet', (['self'], {}), '(self)\n', (2407, 2413), False, 'from cryspy.A_functions_base.function_1_objects import get_functions_of_objet, get_table_html_for_variables\n'), ((15114, 15185), 'warnings.warn', 'warn', (['f"""Mandatory class: \'{cls_.__name__:}\' is not given."""', 'UserWarning'], {}), '(f"Mandatory class: \'{cls_.__name__:}\' is not given.", UserWarning)\n', (15118, 15185), False, 'from warnings import warn\n'), ((11486, 11546), 'warnings.warn', 'warn', (['f"""{item.PREFIX:} is not fully described."""', 'UserWarning'], {}), "(f'{item.PREFIX:} is not fully described.', UserWarning)\n", (11490, 11546), False, 'from warnings import warn\n'), ((12016, 12088), 'warnings.warn', 'warn', (['f"""The object of {cls_mand.__name__:} is not defined."""', 'UserWarning'], {}), "(f'The object of {cls_mand.__name__:} is not defined.', UserWarning)\n", (12020, 12088), False, 'from warnings import warn\n'), ((5099, 5157), 'warnings.warn', 'warn', (['f"""Access to variable by \'{name_new:}\'."""', 'UserWarning'], {}), '(f"Access to variable by \'{name_new:}\'.", UserWarning)\n', (5103, 5157), False, 'from warnings import warn\n'), ((11664, 11735), 'warnings.warn', 'warn', (['f"""{item.ITEM_CLASS.PREFIX:} is not fully described."""', 'UserWarning'], {}), "(f'{item.ITEM_CLASS.PREFIX:} is not fully described.', UserWarning)\n", (11668, 11735), False, 'from warnings import warn\n')] |
import os
import unittest
def require(f):
def skipit(*args, **kwargs):
raise unittest.SkipTest('VIVBINS env var...')
if os.getenv('VIVBINS') == None:
return skipit
return f
| [
"unittest.SkipTest",
"os.getenv"
] | [((90, 129), 'unittest.SkipTest', 'unittest.SkipTest', (['"""VIVBINS env var..."""'], {}), "('VIVBINS env var...')\n", (107, 129), False, 'import unittest\n'), ((138, 158), 'os.getenv', 'os.getenv', (['"""VIVBINS"""'], {}), "('VIVBINS')\n", (147, 158), False, 'import os\n')] |
from setuptools import setup
from setuptools.command.install import install as _install
class Install(_install):
def run(self):
_install.do_egg_install(self)
import nltk
nltk.download("popular")
setup(
cmdclass={'install': Install},
install_requires=['nltk'],
setup_requires=['nltk'])
| [
"setuptools.setup",
"nltk.download",
"setuptools.command.install.install.do_egg_install"
] | [((227, 319), 'setuptools.setup', 'setup', ([], {'cmdclass': "{'install': Install}", 'install_requires': "['nltk']", 'setup_requires': "['nltk']"}), "(cmdclass={'install': Install}, install_requires=['nltk'],\n setup_requires=['nltk'])\n", (232, 319), False, 'from setuptools import setup\n'), ((142, 171), 'setuptools.command.install.install.do_egg_install', '_install.do_egg_install', (['self'], {}), '(self)\n', (165, 171), True, 'from setuptools.command.install import install as _install\n'), ((200, 224), 'nltk.download', 'nltk.download', (['"""popular"""'], {}), "('popular')\n", (213, 224), False, 'import nltk\n')] |