id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
381905 | <gh_stars>1-10
from EvaMap.Dimensions.Dimension import Dimension
from EvaMap.Metrics.sameAs import sameAs
from EvaMap.Metrics.externalURIs import externalURIs
from EvaMap.Metrics.externalLink import externalLink
from EvaMap.Metrics.localLinks import localLinks
class Connectability(Dimension) :
def __init__(self, nom='Connectability', list_metrics=[sameAs, externalURIs, localLinks, externalLink]):
super().__init__(nom, list_metrics)
| StarcoderdataPython |
1986965 | # 1. import dependancies
from flask import Flask
from flask import jsonify
import datetime as dt
import numpy as np
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, inspect, distinct
#-----------------------------------------------------
# 2. Database
engine = create_engine("sqlite:///./Resources/hawaii.sqlite")
Base = automap_base()
Base.prepare(engine, reflect=True)
Measurement = Base.classes.measurement
Station = Base.classes.station
# 3. Create an app, using pass __name__
app = Flask(__name__)
# 4. Define what to do when a user hits the different routes
@app.route("/")
def home():
"""List all available api routes."""
return (
f"Available Routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
)
# app route Precipitation
@app.route("/api/v1.0/precipitation")
def precipitation():
session = Session(engine)
"""Return a list of all passenger names"""
results = session.query(Measurement.date, Measurement.prcp).all()
session.close()
weather_data = []
for date, prcp in results:
weather_dict = {}
weather_dict['date'] = date
weather_dict['prcp'] = prcp
weather_data.append(weather_dict)
return jsonify(weather_data)
# app route Stations
@app.route("/api/v1.0/stations")
def stations():
session = Session(engine)
"""Return a list of additional passenger data including the name, age, and gender"""
results = session.query(Station.station).all()
session.close()
stations = []
for result in results:
stations.append(result[0])
return jsonify(stations)
# app route TOBS
@app.route("/api/v1.0/tobs")
def tobs():
session = Session(engine)
"""Return a JSON list of Temperature Observations for the year prior"""
one_year_prior = dt.date(2017, 8, 23) - dt.timedelta(days=365)
station_results = session.query(Measurement.date, Measurement.tobs).\
filter(Measurement.date >= one_year_prior).\
filter(Measurement.station == 'USC00519281').\
order_by(Measurement.date.desc()).all()
session.close()
return jsonify(station_results)
# app route start and end
@app.route("/api/v1.0/<start>")
def start_date(start):
session = Session(engine)
"""Return a JSON list of TMIN, TAVG, and TMAX"""
min_temp = func.min(Measurement.tobs)
avg_temp = func.avg(Measurement.tobs)
max_temp = func.max(Measurement.tobs)
temp_results = session.query(min_temp, avg_temp, max_temp).\
filter(Measurement.date >= start).all()
session.close()
return jsonify(temp_results)
@app.route("/api/v1.0/<start>/<end>")
def start_end_date(start, end):
session = Session(engine)
"""Return a JSON list of TMIN, TAVG, and TMAX"""
min_temp = func.min(Measurement.tobs)
avg_temp = func.avg(Measurement.tobs)
max_temp = func.max(Measurement.tobs)
temp_results = session.query(min_temp, avg_temp, max_temp).\
filter(Measurement.date >= start).\
filter(Measurement.date <= end).all()
session.close()
return jsonify(temp_results)
# 6. Run the app
if __name__ == "__main__":
app.run(debug=True)
| StarcoderdataPython |
8165415 | <reponame>dirodriguezm/APF
from apf.core.step import {{step_class}} as Step
import logging
class {{step_name}}(Step):
"""{{step_name}} Description
Parameters
----------
config : dict
Configuration for the step and its components
**step_args : type
Other args passed to step (DB connections, API requests, etc.)
"""
def __init__(self, config = None, level = logging.INFO, **step_args):
super().__init__(config=config, level=level)
| StarcoderdataPython |
8187417 | <gh_stars>1-10
from Logic.Data.DataManager import Writer
from Logic.Data.DataManager import Reader
class StreamEntry:
def encode(self: Writer, info):
self.writeLogicLong(info['StreamID'][0], info['StreamID'][1]) # StreamEntryID
self.writeLogicLong(info['PlayerID'][0], info['PlayerID'][1]) # TargetID
self.writeString(info['PlayerName'])
self.writeVint(info['PlayerRole'])
self.writeVint(0)
self.writeBoolean(False)
| StarcoderdataPython |
5056103 |
# %%
fold_number = 3
tta = 20
batch_size = {
"tpu": 8, # x8
"gpu": 16, # 10 without AMP
"cpu": 4,
}
arch = "efficientnet-b5"
resolution = 456 # orignal res for B5
input_res = 512
lr = 1e-5 # * batch_size
weight_decay = 2e-5
pos_weight = 3.2
label_smoothing = 0.03
max_epochs = 4
# %%
#!pip install --upgrade wandb
#!wandb login 6ff8d5e5bd920e68d1f76b574f1880278b4<PASSWORD>
# %% [markdown]
# # Install modules
#
# Update PyTorch to enable its native support to Mixed Precision or XLA for TPU
# %%
# import wandb
# from pytorch_lightning.loggers import WandbLogger
# %% [markdown]
# # Hardware lookup
# %%
# import os
# import collections
# from datetime import datetime, timedelta
# if 'TPU_NAME' in os.environ.keys():
# !curl https://raw.githubusercontent.com/pytorch/xla/master/contrib/scripts/env-setup.py -o pytorch-xla-env-setup.py
# python3 pytorch-xla-env-setup.py --version nightly --apt-packages libomp5 libopenblas-dev
# os.environ["XRT_TPU_CONFIG"] = "tpu_worker;0;10.0.0.2:8470"
# _VersionConfig = collections.namedtuple('_VersionConfig', 'wheels,server')
# VERSION = "torch_xla==nightly"
# CONFIG = {
# 'torch_xla==nightly': _VersionConfig('nightly', 'XRT-dev{}'.format(
# (datetime.today() - timedelta(1)).strftime('%Y%m%d')))}[VERSION]
# DIST_BUCKET = 'gs://tpu-pytorch/wheels'
# TORCH_WHEEL = 'torch-{}-cp36-cp36m-linux_x86_64.whl'.format(CONFIG.wheels)
# TORCH_XLA_WHEEL = 'torch_xla-{}-cp36-cp36m-linux_x86_64.whl'.format(CONFIG.wheels)
# TORCHVISION_WHEEL = 'torchvision-{}-cp36-cp36m-linux_x86_64.whl'.format(CONFIG.wheels)
# !export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH
# !apt-get install libomp5 -y
# !apt-get install libopenblas-dev -y
# !pip3 uninstall -y torch torchvision
# !gsutil cp "$DIST_BUCKET/$TORCH_WHEEL" .
# !gsutil cp "$DIST_BUCKET/$TORCH_XLA_WHEEL" .
# !gsutil cp "$DIST_BUCKET/$TORCHVISION_WHEEL" .
# !pip3 install "$TORCH_WHEEL"
# !pip3 install "$TORCH_XLA_WHEEL"
# !pip3 install "$TORCHVISION_WHEEL"
# else:
# print(f'No TPU Baby!')
# %%
# %%capture
#!pip3 install -U pip albumentations==0.4.5 PyYAML pytorch-lightning==0.8.5 efficientnet_pytorch
# Update PyTorch to enable its native support to Mixed Precision
#!pip3 install --pre torch==1.7.0.dev20200701+cu101 torchvision==0.8.0.dev20200701+cu101 -f https://download.pytorch.org/whl/nightly/cu101/torch_nightly.html
# %%
import os
import torch
num_workers = os.cpu_count()
gpus = 1 if torch.cuda.is_available() else None
try:
import torch_xla
import torch_xla.utils.utils as xu
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import torch_xla.distributed.data_parallel as dp
import torch_xla.distributed.parallel_loader as pl
import torch_xla.distributed.xla_multiprocessing as xmp
tpu_cores = 8 # xm.xrt_world_size()
except:
tpu_cores = None
# %%
if isinstance(batch_size, dict):
if tpu_cores:
batch_size = batch_size["tpu"]
lr *= tpu_cores
num_workers = 1
elif gpus:
batch_size = batch_size["gpu"]
# support for free Colab GPU's
if "K80" in torch.cuda.get_device_name():
batch_size = batch_size // 3
elif "T4" in torch.cuda.get_device_name():
batch_size = int(batch_size * 0.66)
else:
batch_size = batch_size["cpu"]
lr *= batch_size
print(
dict(
num_workers=num_workers,
tpu_cores=tpu_cores,
gpus=gpus,
batch_size=batch_size,
lr=lr,
)
)
# check for torch's native mixed precision support (pt1.6+)
if gpus and not hasattr(torch.cuda, "amp"):
try:
from apex import amp
except:
get_ipython().system("git clone https://github.com/NVIDIA/apex nv_apex")
get_ipython().system(
'pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./nv_apex'
)
from apex import amp
# with PyTorch Lightning all you need to do now is set precision=16
# %% [markdown]
# # Imports
# %%
import os
import time
import random
from datetime import datetime
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import cv2
from skimage import io
import albumentations as A
from albumentations.pytorch.transforms import ToTensorV2
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_auc_score
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
from glob import glob
import sklearn
import pytorch_lightning as pl
import warnings
warnings.filterwarnings("ignore")
warnings.filterwarnings("ignore", category=DeprecationWarning)
from dataset import load_datasets
from utils import get_train_transforms, get_valid_transforms, get_tta_transforms
from data import *
from pathlib import Path
from fastprogress import progress_bar as tqdm
SAVE_DIR = OUT / f"pl/fold_{fold_number}"
SAVE_DIR.mkdir(exist_ok=True, parents=True)
print(torch.__version__)
# %% [markdown]
# # Setup dataset
# %%
df_folds = pd.read_csv(
f"{DATA}/upsample.csv",
index_col="image_id",
usecols=["image_id", "fold", "target"],
dtype={"fold": np.byte, "target": np.byte},
)
_ = df_folds.groupby("fold").target.hist(alpha=0.4)
df_folds.groupby("fold").target.mean().to_frame("ratio").T
# %%
df_test = pd.read_csv(f"{DATA}/test.csv", index_col="image_name")
# %%
ds_train, ds_val, ds_test = load_datasets(fold_number)
# %%
len(ds_train), len(ds_val), len(ds_test)
# %% [markdown]
# # Model
# %%
from efficientnet_pytorch import EfficientNet
from pytorch_lightning.metrics.classification import AUROC
from sklearn.metrics import roc_auc_score
class Model(pl.LightningModule):
def __init__(self, *args, **kwargs):
super().__init__()
self.net = EfficientNet.from_pretrained(arch, advprop=True)
self.net._fc = nn.Linear(
in_features=self.net._fc.in_features, out_features=1, bias=True
)
def forward(self, x):
return self.net(x)
def configure_optimizers(self):
optimizer = torch.optim.AdamW(
self.parameters(), lr=lr, weight_decay=weight_decay
)
scheduler = torch.optim.lr_scheduler.OneCycleLR(
max_lr=lr,
epochs=max_epochs,
optimizer=optimizer,
steps_per_epoch=int(len(ds_train) / batch_size),
pct_start=0.1,
div_factor=10,
final_div_factor=100,
base_momentum=0.90,
max_momentum=0.95,
)
return [optimizer], [scheduler]
def step(self, batch):
# return batch loss
x, y = batch
y_hat = self(x).flatten()
y_smo = y.float() * (1 - label_smoothing) + 0.5 * label_smoothing
loss = F.binary_cross_entropy_with_logits(
y_hat, y_smo.type_as(y_hat), pos_weight=torch.tensor(pos_weight)
)
return loss, y, y_hat.sigmoid()
def training_step(self, batch, batch_nb):
# hardware agnostic training
loss, y, y_hat = self.step(batch)
acc = (y_hat.round() == y).float().mean().item()
tensorboard_logs = {"train_loss": loss, "acc": acc}
return {"loss": loss, "acc": acc, "log": tensorboard_logs}
def validation_step(self, batch, batch_nb):
loss, y, y_hat = self.step(batch)
return {"val_loss": loss, "y": y.detach(), "y_hat": y_hat.detach()}
def validation_epoch_end(self, outputs):
avg_loss = torch.stack([x["val_loss"] for x in outputs]).mean()
y = torch.cat([x["y"] for x in outputs])
y_hat = torch.cat([x["y_hat"] for x in outputs])
auc = (
AUROC()(pred=y_hat, target=y) if y.float().mean() > 0 else 0.5
) # skip sanity check
acc = (y_hat.round() == y).float().mean().item()
print(f"Epoch {self.current_epoch} acc:{acc} auc:{auc}")
tensorboard_logs = {"val_loss": avg_loss, "val_auc": auc, "val_acc": acc}
return {
"avg_val_loss": avg_loss,
"val_auc": auc,
"val_acc": acc,
"log": tensorboard_logs,
}
def test_step(self, batch, batch_nb):
x, _ = batch
y_hat = self(x).flatten().sigmoid()
return {"y_hat": y_hat}
def test_epoch_end(self, outputs):
y_hat = torch.cat([x["y_hat"] for x in outputs])
assert len(df_test) == len(y_hat), f"{len(df_test)} != {len(y_hat)}"
df_test["target"] = y_hat.tolist()
N = len(glob("submission*.csv"))
df_test.target.to_csv(f"submission{N}.csv")
return {"tta": N}
def train_dataloader(self):
return DataLoader(
ds_train,
batch_size=batch_size,
num_workers=num_workers,
drop_last=True,
shuffle=True,
pin_memory=False,
)
def val_dataloader(self):
return DataLoader(
ds_val,
batch_size=batch_size,
num_workers=num_workers,
drop_last=False,
shuffle=False,
pin_memory=False,
)
def test_dataloader(self):
return DataLoader(
ds_test,
batch_size=batch_size,
num_workers=num_workers,
drop_last=False,
shuffle=False,
pin_memory=False,
)
# %%
checkpoint = sorted(list(SAVE_DIR.iterdir()), key=lambda x: int(x.stem.split("_")[0]))
checkpoint = str(checkpoint[-1]) if len(checkpoint) else None
# %%
model = Model() # .load_from_checkpoint(str(checkpoint))
# %%
# Plot some training images
import torchvision.utils as vutils
batch, targets = next(iter(model.train_dataloader()))
plt.figure(figsize=(16, 8))
plt.axis("off")
plt.title("Training Images")
_ = plt.imshow(
vutils.make_grid(batch[:16], nrow=8, padding=2, normalize=True)
.cpu()
.numpy()
.transpose((1, 2, 0))
)
targets[:16].reshape([2, 8]) if len(targets) >= 16 else targets
# %%
# import wandb
# wandb.init(project='melanoma', tags=['lightning'], name='upsampled_full_data_tpu')
# wandb_logger = WandbLogger(project='melanoma', tags=['lightning'], name='upsampled_full_data_tpu')
# wandb.watch(model)
# %%
# # test the same images
# with torch.no_grad():
# print(model(batch[:16]).reshape([len(targets)//8,8]).sigmoid())
# del batch; del targets
# %% [markdown]
# # Train
# The Trainer automates the rest.
#
# Trains on 8 TPU cores, GPU or CPU - whatever is available.
# %%
checkpoint_callback = pl.callbacks.ModelCheckpoint(
filepath=SAVE_DIR / "{02d}_{val_auc:.4f}",
save_top_k=1,
monitor="val_auc",
mode="max",
)
if checkpoint:
trainer = pl.Trainer(
resume_from_checkpoint=checkpoint,
default_root_dir=SAVE_DIR,
tpu_cores=tpu_cores,
gpus=gpus,
precision=16, # if gpus else 32,
max_epochs=max_epochs,
checkpoint_callback=checkpoint_callback
# logger=wandb_logger
)
else:
trainer = pl.Trainer(
default_root_dir=SAVE_DIR,
tpu_cores=tpu_cores,
gpus=gpus,
precision=16, # if gpus else 32,
max_epochs=max_epochs,
checkpoint_callback=checkpoint_callback
# logger=wandb_logger
)
# %%
# clean up gpu in case you are debugging
# import gc
# torch.cuda.empty_cache(); gc.collect()
# torch.cuda.empty_cache(); gc.collect()
# %%
trainer.fit(model)
| StarcoderdataPython |
291541 | <reponame>luiszeni/sigrapi18_gender_detection
import cv2
import numpy as np
from core.BoundBox import BoundBox
from math import sqrt
def convert_detections_to_my_imp(out_boxes, out_scores, out_classes, class_names,shape):
detections = []
for i, c in enumerate(out_classes):
predicted_class = class_names[c]
box = out_boxes[i]
score = out_scores[i]
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(shape[1], np.floor(bottom + 0.5).astype('int32'))
right = min(shape[2], np.floor(right + 0.5).astype('int32'))
detections.append(BoundBox(left, top, right, bottom, classId=predicted_class, pred=score))
return detections
def normalize_img(img, model_image_size):
img = cv2.resize(img, model_image_size, interpolation = cv2.INTER_CUBIC)
img = np.array(img, dtype='float32')
img /= 255.
img = np.expand_dims(img, axis=0)
return img
def image_resize(image, width = None, height = None, inter = cv2.INTER_AREA):
dim = None
(h, w) = image.shape[:2]
if width is None and height is None:
return image
if width is None:
r = height / float(h)
dim = (int(w * r), height)
elif height is None:
r = width / float(w)
dim = (width, int(h * r))
else:
dim = (width, height)
resized = cv2.resize(image, dim, interpolation = inter)
return resized
#I really feel ashamed about this function =/
def the_worst_tracking(detections, detections_old, images):
if detections_old is None:
return images, detections
ordedImgs = []
ordedDetecs = []
for det_old in detections_old:
if len(images) == 0:
break
minDist = 10000
img = images[0]
indexMix = 0
for i, det in enumerate(detections):
dist = sqrt((det_old.xmin - det.xmin)**2 + (det_old.ymin - det.ymin)**2)
if minDist > dist:
minDist = dist
img = images[i]
indexMix = i
ordedImgs.append(img)
ordedDetecs.append(detections[indexMix])
del detections[indexMix]
del images[indexMix]
for img in images:
ordedImgs.append(img)
for det in detections:
ordedDetecs.append(det)
return ordedImgs,ordedDetecs
| StarcoderdataPython |
3527581 | <reponame>amplifylitco/asiaq
"""
This module contains logic that processes a VPC's peering connections
"""
import logging
from itertools import product
from sets import ImmutableSet
from boto.exception import EC2ResponseError
import boto3
# FIXME: Disabling complaint about relative-import. This seems to be the only
# way that works for unit tests.
# pylint: disable=W0403
import disco_vpc
from .disco_config import read_config
from .resource_helper import tag2dict, create_filters, throttled_call
from .exceptions import VPCPeeringSyntaxError, VPCConfigError
from .disco_constants import VPC_CONFIG_FILE
logger = logging.getLogger(__name__)
LIVE_PEERING_STATES = ["pending-acceptance", "provisioning", "active"]
class DiscoVPCPeerings(object):
"""
This class takes care of processing of a VPC's peering connections
"""
def __init__(self, boto3_ec2=None):
if boto3_ec2:
self.client = boto3_ec2
else:
self.client = boto3.client('ec2')
def update_peering_connections(self, vpc, dry_run=False, delete_extra_connections=False):
""" Update peering connections for a VPC """
desired_peerings = self._get_peerings_from_config(vpc.get_vpc_id())
existing_peerings = self._get_existing_peerings(vpc)
def _check_peering_exists(peering_config, peerings):
"""Check if a peering exists for a peering config"""
for peering in peerings:
peering_config_endpoints = {peering_config.source_endpoint.vpc['VpcId'],
peering_config.target_endpoint.vpc['VpcId']}
peering_endpoints = {peering.source_endpoint.vpc['VpcId'],
peering.target_endpoint.vpc['VpcId']}
if peering_config_endpoints == peering_endpoints:
return True
return False
missing_peerings = set.copy(desired_peerings)
for peering in desired_peerings:
if _check_peering_exists(peering, existing_peerings):
missing_peerings.discard(peering)
logger.info("Desired VPC peering connections: %s", desired_peerings)
logger.info("Existing VPC peering connections: %s", existing_peerings)
logger.info("Missing VPC peering connections: %s", missing_peerings)
if delete_extra_connections and existing_peerings > desired_peerings:
raise RuntimeError("Some existing VPC peering connections are not "
"defined in the configuration: {0}. Deletion of VPC peerings is "
"not implemented yet."
.format(existing_peerings - desired_peerings))
if not dry_run and missing_peerings:
self._create_peering_connections(missing_peerings)
self._create_peering_routes(missing_peerings)
def _get_existing_peerings(self, vpc):
"""
Get the set of PeeringConnections for the existing peerings for given DiscoVPC object
"""
current_peerings = set()
for peering in self.list_peerings(vpc.get_vpc_id()):
peer_vpc = self._find_peer_vpc(self._get_peer_vpc_id(vpc.get_vpc_id(), peering))
if not peer_vpc:
logger.warning("Failed to find the peer VPC (%s) associated with peering (%s). "
"If the VPC no longer exists, please delete the peering manually.",
peer_vpc['VpcId'], peering['VpcPeeringConnectionId'])
continue
for route_table in self._get_peering_route_tables(peering['VpcPeeringConnectionId']):
tags_dict = tag2dict(route_table['Tags'])
subnet_env, subnet_network = tags_dict['Name'].split('_')[:2]
if subnet_env == vpc.environment_name:
source_endpoint = PeeringEndpoint(
vpc.environment_name,
vpc.environment_type,
subnet_network,
vpc.vpc
)
# find the metanetwork of the peering connection by matching the peering routes with
# the CIDRs of the VPC metanetworks
route_cidrs = [
route['DestinationCidrBlock'] for route in route_table['Routes']
if route.get('VpcPeeringConnectionId') == peering['VpcPeeringConnectionId']
]
peered_networks = [network for network in peer_vpc.networks.values()
if str(network.network_cidr) in route_cidrs]
if peered_networks:
target_endpoint = PeeringEndpoint(
peer_vpc.environment_name,
peer_vpc.environment_type,
peered_networks[0].name,
peer_vpc.vpc
)
current_peerings.add(PeeringConnection(source_endpoint, target_endpoint))
return current_peerings
def _get_peering_route_tables(self, peering_conn_id):
""" Get all of the route tables associated with a given peering connection """
return throttled_call(
self.client.describe_route_tables,
Filters=create_filters({
'route.vpc-peering-connection-id': [peering_conn_id]
})
)['RouteTables']
def _get_peer_vpc_id(self, vpc_id, peering):
accepter_vpcid = peering['AccepterVpcInfo']['VpcId']
return accepter_vpcid if accepter_vpcid != vpc_id else peering['RequesterVpcInfo']['VpcId']
def _find_peer_vpc(self, peer_vpc_id):
try:
peer_vpc = throttled_call(self.client.describe_vpcs, VpcIds=[peer_vpc_id])['Vpcs'][0]
except Exception:
return None
try:
vpc_tags_dict = tag2dict(peer_vpc['Tags'])
return disco_vpc.DiscoVPC(vpc_tags_dict['Name'], vpc_tags_dict['type'], peer_vpc)
except UnboundLocalError:
raise RuntimeError("VPC {0} is missing tags: 'Name', 'type'.".format(peer_vpc_id))
def _create_peering_connections(self, peerings):
""" Create peerings in AWS for the given PeeringConnection objects"""
peering_conns = [self._create_peering_conn(peering) for peering in peerings]
peering_conn_ids = [peering_conn['VpcPeeringConnectionId'] for peering_conn in peering_conns]
# wait for the peering connection to be ready
throttled_call(self.client.get_waiter('vpc_peering_connection_exists').wait,
VpcPeeringConnectionIds=peering_conn_ids,
Filters=[{'Name': 'status-code', 'Values': LIVE_PEERING_STATES}])
for peering_conn in peering_conns:
throttled_call(
self.client.accept_vpc_peering_connection,
VpcPeeringConnectionId=peering_conn['VpcPeeringConnectionId']
)
def _create_peering_conn(self, peering):
""" Create a AWS peering connection for the given peering config object """
return throttled_call(
self.client.create_vpc_peering_connection,
VpcId=peering.source_endpoint.vpc['VpcId'],
PeerVpcId=peering.target_endpoint.vpc['VpcId']
)['VpcPeeringConnection']
def _create_peering_routes(self, peerings):
""" create/update routes via peering connections between VPCs """
connection_map = {}
for peering_connection in self.list_peerings():
source_target_key = '%s-%s' % (peering_connection['AccepterVpcInfo']['VpcId'],
peering_connection['RequesterVpcInfo']['VpcId'])
connection_map[source_target_key] = peering_connection['VpcPeeringConnectionId']
target_source_key = '%s-%s' % (peering_connection['RequesterVpcInfo']['VpcId'],
peering_connection['AccepterVpcInfo']['VpcId'])
connection_map[target_source_key] = peering_connection['VpcPeeringConnectionId']
for peering in peerings:
source_vpc = disco_vpc.DiscoVPC(peering.source_endpoint.name,
peering.source_endpoint.type,
peering.source_endpoint.vpc)
target_vpc = disco_vpc.DiscoVPC(peering.target_endpoint.name,
peering.target_endpoint.type,
peering.target_endpoint.vpc)
source_network = source_vpc.networks[peering.source_endpoint.metanetwork]
target_network = target_vpc.networks[peering.target_endpoint.metanetwork]
peering_conn_key = '%s-%s' % (peering.source_endpoint.vpc['VpcId'],
peering.target_endpoint.vpc['VpcId'])
if peering_conn_key in connection_map:
vpc_peering_conn_id = connection_map[peering_conn_key]
else:
raise RuntimeError('Peering connection %s not found. Cannot create routes' % peering_conn_key)
source_network.create_peering_route(
vpc_peering_conn_id,
str(target_network.network_cidr)
)
target_network.create_peering_route(
vpc_peering_conn_id,
str(source_network.network_cidr)
)
def _delete_peering_routes(self, peering):
"""Delete routes related to a peering connection"""
route_tables = self._get_peering_route_tables(peering['VpcPeeringConnectionId'])
for route_table in route_tables:
route_table_id = route_table['RouteTableId']
peering_routes = [route for route in route_table['Routes']
if route.get('VpcPeeringConnectionId') == peering['VpcPeeringConnectionId']]
for route in peering_routes:
self.client.delete_route(
DestinationCidrBlock=route['DestinationCidrBlock'],
RouteTableId=route_table_id
)
def _get_peerings_from_config(self, vpc_id=None):
"""
Parses configuration from disco_vpc.ini's peerings sections.
If vpc_id is specified, only configuration relevant to vpc_id is included.
"""
peering_configs = set()
for peering in self._get_peering_lines():
# resolve the peering line into a list of PeeringConnection objects
# a single peering line might resolve to multiple peerings if there are wildcards
resolved_peerings = self._resolve_peering_connection_line(peering)
for resolved_peering in resolved_peerings:
if vpc_id and not resolved_peering.contains_vpc_id(vpc_id):
logger.debug("Skipping peering %s because it doesn't include %s", peering, vpc_id)
else:
peering_configs.add(resolved_peering)
return peering_configs
def _get_peering_lines(self):
logger.debug("Parsing peerings configuration specified in %s", VPC_CONFIG_FILE)
config = read_config(VPC_CONFIG_FILE)
if 'peerings' not in config.sections():
logger.info("No VPC peering configuration defined.")
return {}
peerings = [
peering[1]
for peering in config.items('peerings')
if peering[0].startswith('connection_')
]
for peering in peerings:
endpoints = [_.strip() for _ in peering.split(' ')]
if len(endpoints) != 2:
raise VPCPeeringSyntaxError(
"Syntax error in vpc peering connection. "
"Expected 2 space-delimited endpoints but found: '{}'".format(peering))
return peerings
def delete_peerings(self, vpc_id=None):
"""Delete peerings. If vpc_id is specified, delete all peerings of the VPCs only"""
for peering in self.list_peerings(vpc_id):
try:
logger.info('deleting routes for peering connection %s', peering['VpcPeeringConnectionId'])
throttled_call(self._delete_peering_routes, peering)
logger.info('deleting peering connection %s', peering['VpcPeeringConnectionId'])
throttled_call(
self.client.delete_vpc_peering_connection,
VpcPeeringConnectionId=peering['VpcPeeringConnectionId']
)
except EC2ResponseError:
raise RuntimeError(
'Failed to delete VPC Peering connection {}'.format(peering['VpcPeeringConnectionId'])
)
def list_peerings(self, vpc_id=None, include_failed=False):
"""
Return list of live vpc peering connection id.
If vpc_id is given, return only that vpcs peerings
Peerings that cannot be manipulated are ignored.
"""
if vpc_id:
peerings = throttled_call(
self.client.describe_vpc_peering_connections,
Filters=create_filters({'requester-vpc-info.vpc-id': [vpc_id]})
)['VpcPeeringConnections']
peerings += throttled_call(
self.client.describe_vpc_peering_connections,
Filters=create_filters({'accepter-vpc-info.vpc-id': [vpc_id]})
)['VpcPeeringConnections']
else:
peerings = throttled_call(self.client.describe_vpc_peering_connections)['VpcPeeringConnections']
peering_states = LIVE_PEERING_STATES + (["failed"] if include_failed else [])
return [
peering
for peering in peerings
if peering['Status']['Code'] in peering_states
]
def _resolve_peering_connection_line(self, line):
"""
Resolve a peering connection line into a set of PeeringConnections. Expand any wildcards
Args:
line (str): A peering line like `vpc_name[:vpc_type]/metanetwork vpc_name[:vpc_type]/metanetwork`
`vpc_name` may be the name of a VPC or a `*` wildcard to peer with any VPC of vpc_type
"""
# convert the config line into a PeeringConnection but it may contain wildcards
unresolved_peering = PeeringConnection.from_peering_line(line)
# get all VPCs created through Asiaq. Ones that have type and Name tags
existing_vpcs = [vpc for vpc in throttled_call(self.client.describe_vpcs).get('Vpcs', [])
if all(tag in tag2dict(vpc.get('Tags', [])) for tag in ['type', 'Name'])]
def resolve_endpoint(endpoint):
"""
Convert a PeeringEndpoint that may contain wildcards into a list of PeeringEndpoints
with wildcards resolved
"""
endpoints = []
for vpc in existing_vpcs:
tags = tag2dict(vpc['Tags'])
vpc_name = tags['Name']
vpc_type = tags['type']
if endpoint.match_vpc(vpc_name, vpc_type):
endpoints.append(PeeringEndpoint(vpc_name, vpc_type, endpoint.metanetwork, vpc))
return endpoints
# find the VPCs that match the peering config. Replace wildcards with real VPC names
source_endpoints = resolve_endpoint(unresolved_peering.source_endpoint)
target_endpoints = resolve_endpoint(unresolved_peering.target_endpoint)
# generate new connection lines by peering the cross product of every source and target endpoint
return {PeeringConnection(source, target)
for source, target in product(source_endpoints, target_endpoints)
# Don't peer a VPC with itself
if not source == target}
class PeeringEndpoint(object):
"""
Represents one side of a PeeringConnection
"""
def __init__(self, env_name, env_type, metanetwork, vpc=None):
self.name = env_name
self.type = env_type
self.metanetwork = metanetwork
self.vpc = vpc
@staticmethod
def from_endpoint_str(endpoint):
""" Get a PeeringEndpoint from one of the sides of a peering config """
parts = endpoint.split('/')
vpc_name = parts[0].split(':')[0].strip()
# get type from `name[:type]/metanetwork`, defaulting to name if type is omitted
vpc_type = parts[0].split(':')[-1].strip()
# get metanetwork from `name[:type]/metanetwork`
metanetwork = parts[1].strip()
if vpc_type == '*':
raise VPCConfigError(
'Wildcards are not allowed for VPC type in "%s". '
'Please specify a VPC type when using a wild card for the VPC name' % endpoint
)
return PeeringEndpoint(vpc_name, vpc_type, metanetwork)
def match_vpc(self, vpc_name, vpc_type):
""" Return True if the given vpc type and name match the VPC of this endpoint """
return self.name in ('*', vpc_name) and self.type == vpc_type
def __eq__(self, other):
return hash(self) == hash(other)
def __hash__(self):
return hash(str(self))
def __str__(self):
return '%s:%s/%s' % (
self.name,
self.type,
self.metanetwork
)
class PeeringConnection(object):
"""
Represents a connection between two different VPCs
"""
def __init__(self, source_endpoint, target_endpoint):
"""
Args:
source_endpoint (PeeringEndpoint): source side of connection
target_endpoint (PeeringEndpoint): target side of connection
"""
self.source_endpoint = source_endpoint
self.target_endpoint = target_endpoint
def contains_vpc_name(self, vpc_name):
""" Return true if the given vpc_name is the name of a VPC on one of the sides of the connection"""
return self.source_endpoint.name == vpc_name or self.target_endpoint.name == vpc_name
def contains_vpc_id(self, vpc_id):
""" Return true if the given vpc_id is the id of a VPC on one of the sides of the connection"""
return self.source_endpoint.vpc['VpcId'] == vpc_id or self.target_endpoint.vpc['VpcId'] == vpc_id
@staticmethod
def from_peering_line(line):
""" Parse a peering connection config line into a PeeringConnection object """
endpoints = line.split(' ')
if not len(endpoints) == 2:
raise VPCConfigError('Invalid peering config "%s". Peering config must be of the format '
'vpc_name[:vpc_type]/metanetwork vpc_name[:vpc_type]/metanetwork' % line)
source_peering = PeeringEndpoint.from_endpoint_str(endpoints[0])
target_peering = PeeringEndpoint.from_endpoint_str(endpoints[1])
return PeeringConnection(source_peering, target_peering)
def __eq__(self, other):
return hash(self) == hash(other)
def __hash__(self):
# use a immutable set because regular sets aren't hashable
return hash(ImmutableSet([self.source_endpoint, self.target_endpoint]))
def __str__(self):
return str(self.source_endpoint) + ' ' + str(self.target_endpoint)
def __repr__(self):
return str(self)
| StarcoderdataPython |
5189778 | <gh_stars>1-10
"""Views for doc serving."""
import logging
import mimetypes
import os
from functools import wraps
from urllib.parse import urlparse
from django.conf import settings
from django.core.files.storage import get_storage_class
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.utils.encoding import iri_to_uri
from django.views.static import serve
from readthedocs.core.resolver import resolve
from readthedocs.projects.models import Project, ProjectRelationship
log = logging.getLogger(__name__) # noqa
def _serve_401(request, project):
res = render(request, '401.html')
res.status_code = 401
log.debug('Unauthorized access to %s documentation', project.slug)
return res
def _fallback():
# TODO: This currently isn't used. It might be though, so keeping it for now.
res = HttpResponse('Internal fallback to RTD app')
res.status_code = 420
log.debug('Falling back to RTD app')
return res
def map_subproject_slug(view_func):
"""
A decorator that maps a ``subproject_slug`` URL param into a Project.
:raises: Http404 if the Project doesn't exist
.. warning:: Does not take into account any kind of privacy settings.
"""
@wraps(view_func)
def inner_view( # noqa
request, subproject=None, subproject_slug=None, *args, **kwargs
):
if subproject is None and subproject_slug:
# Try to fetch by subproject alias first, otherwise we might end up
# redirected to an unrelated project.
# Depends on a project passed into kwargs
rel = ProjectRelationship.objects.filter(
parent=kwargs['project'],
alias=subproject_slug,
).first()
if rel:
subproject = rel.child
else:
rel = ProjectRelationship.objects.filter(
parent=kwargs['project'],
child__slug=subproject_slug,
).first()
if rel:
subproject = rel.child
else:
log.warning(
'The slug is not subproject of project. subproject_slug=%s project_slug=%s',
subproject_slug, kwargs['project'].slug
)
raise Http404('Invalid subproject slug')
return view_func(request, subproject=subproject, *args, **kwargs)
return inner_view
def map_project_slug(view_func):
"""
A decorator that maps a ``project_slug`` URL param into a Project.
:raises: Http404 if the Project doesn't exist
.. warning:: Does not take into account any kind of privacy settings.
"""
@wraps(view_func)
def inner_view( # noqa
request, project=None, project_slug=None, *args, **kwargs
):
if project is None:
# Get a slug from the request if it can't be found in the URL
if not project_slug:
project_slug = request.host_project_slug
log.debug(
'Inserting project slug from request slug=[%s]',
project_slug
)
try:
project = Project.objects.get(slug=project_slug)
except Project.DoesNotExist:
raise Http404('Project does not exist.')
return view_func(request, project=project, *args, **kwargs)
return inner_view
@map_project_slug
@map_subproject_slug
def redirect_page_with_filename(request, project, subproject, filename): # pylint: disable=unused-argument # noqa
"""Redirect /page/file.html to /<default-lang>/<default-version>/file.html."""
urlparse_result = urlparse(request.get_full_path())
return HttpResponseRedirect(
resolve(
subproject or project,
filename=filename,
query_params=urlparse_result.query,
)
)
@map_project_slug
@map_subproject_slug
def redirect_project_slug(request, project, subproject): # pylint: disable=unused-argument
"""Handle / -> /en/latest/ directs on subdomains."""
urlparse_result = urlparse(request.get_full_path())
return HttpResponseRedirect(
resolve(
subproject or project,
query_params=urlparse_result.query,
),
)
@map_project_slug
@map_subproject_slug
def serve_docs(
request,
project,
subproject,
lang_slug=None,
version_slug=None,
filename='',
):
"""Take the incoming parsed URL's and figure out what file to serve."""
log.debug(
'project=%s, subproject=%s, lang_slug=%s, version_slug=%s, filename=%s',
project, subproject, lang_slug, version_slug, filename
)
# Take the most relevant project so far
current_project = subproject or project
# Handle a / redirect when we aren't a single version
if all([lang_slug is None, version_slug is None, filename == '',
not current_project.single_version]):
log.info('Proxito redirect: slug=%s', current_project.slug)
return redirect_project_slug(
request,
project=current_project,
subproject=None,
)
if (lang_slug is None or version_slug is None) and not current_project.single_version:
log.info('Invalid URL for project with versions. url=%s', filename)
raise Http404('Invalid URL for project with versions')
# Handle single-version projects that have URLs like a real project
if current_project.single_version:
if lang_slug and version_slug:
filename = os.path.join(lang_slug, version_slug, filename)
lang_slug = version_slug = None
# Check to see if we need to serve a translation
if not lang_slug or lang_slug == current_project.language:
final_project = current_project
else:
final_project = get_object_or_404(
current_project.translations.all(), language=lang_slug
)
# ``final_project`` is now the actual project we want to serve docs on,
# accounting for:
# * Project
# * Subproject
# * Translations
# TODO: Redirects need to be refactored before we can turn them on
# They currently do 1 request per redirect that exists for the project
# path, http_status = final_project.redirects.get_redirect_path_with_status(
# language=lang_slug, version_slug=version_slug, path=filename
# )
# Handle single version by grabbing the default version
if final_project.single_version:
version_slug = final_project.get_default_version()
# Don't do auth checks
# try:
# Version.objects.public(user=request.user, project=final_project).get(slug=version_slug)
# except Version.DoesNotExist:
# # Properly raise a 404 if the version doesn't exist (or is inactive) and
# # a 401 if it does
# if final_project.versions.filter(slug=version_slug, active=True).exists():
# return _serve_401(request, final_project)
# raise Http404('Version does not exist.')
storage_path = final_project.get_storage_path(
type_='html', version_slug=version_slug, include_file=False
)
path = f'{storage_path}/{filename}'
# Handle out backend storage not supporting directory indexes,
# so we need to append index.html when appropriate.
# TODO: We don't currently support `docs.example.com/en/latest/install`
# where the user actually wants `docs.example.com/en/latest/install/index.html`
# We would need to emulate nginx's try_files in order to handle this.
if path[-1] == '/':
path += 'index.html'
# Serve from the filesystem if using PYTHON_MEDIA
# We definitely shouldn't do this in production,
# but I don't want to force a check for DEBUG.
if settings.PYTHON_MEDIA:
log.info('[Django serve] path=%s, project=%s', path, final_project)
storage = get_storage_class(settings.RTD_BUILD_MEDIA_STORAGE)()
root_path = storage.path('')
# Serve from Python
return serve(request, path, root_path)
# Serve via nginx
log.info('[Nginx serve] path=%s, project=%s', path, final_project)
return _serve_docs_nginx(
request, final_project=final_project, path=f'/proxito/{path}'
)
def _serve_docs_nginx(request, final_project, path):
# Serve from Nginx
content_type, encoding = mimetypes.guess_type(path)
content_type = content_type or 'application/octet-stream'
response = HttpResponse(
f'Serving internal path: {path}', content_type=content_type
)
if encoding:
response['Content-Encoding'] = encoding
# NGINX does not support non-ASCII characters in the header, so we
# convert the IRI path to URI so it's compatible with what NGINX expects
# as the header value.
# https://github.com/benoitc/gunicorn/issues/1448
# https://docs.djangoproject.com/en/1.11/ref/unicode/#uri-and-iri-handling
x_accel_redirect = iri_to_uri(path)
response['X-Accel-Redirect'] = x_accel_redirect
return response
| StarcoderdataPython |
1915709 | #!/usr/bin/python3
import os, sys
import logging, argparse
import re
import networkx as nx
import matplotlib.pyplot as plt
import multiprocessing
from multiprocessing.managers import BaseManager
from queue import LifoQueue
from tqdm import tqdm
from nltk.tokenize import word_tokenize
from datasketch import MinHash
logger = logging.getLogger("global")
formatter = logging.Formatter('[%(asctime)s.%(msecs)03d][%(levelname)s:%(lineno)s] %(message)s',
datefmt='%y-%m-%d %H:%M:%S')
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.setLevel(level=logging.INFO)
logger.addHandler(stream_handler)
class data_manager(BaseManager):
pass
def clear_indent(source):
source = source.split('\n')
result = []
for line in source:
line = line.strip()
if line: result.append(line)
return "\n".join(result)
def remove_comment(text, remove_pattern):
remove_list = [(remove_pattern, '')]
for pattern, repl in remove_list:
text = re.sub(pattern=pattern, repl=repl, string=text)
return text
def clear_matrix_template_code(matrix_template, text):
text = text.split('\n')
matrix_template = matrix_template.split('\n')
result = []
idx = 0
try:
for line in text:
if line == matrix_template[idx]:
idx = idx + 1
else:
result.append(line)
except IndexError:
pass
return "\n".join(result)
def cleasing(text, remove_pattern, matrix_template=None):
text = clear_indent(text)
text = remove_comment(text, remove_pattern)
text = '\n'.join(sorted(text.split('\n'), key=str.lower))
if matrix_template != None:
text = clear_matrix_template_code(matrix_template, text)
return text
def load_matrix_template_text(matrix_template_file_name, remove_pattern):
source_file = open(matrix_template_file_name, "r")
text = cleasing(source_file.read(), remove_pattern)
source_file.close()
return text
def prepare_the_word(text, remove_pattern, matrix_template):
text = cleasing(text, remove_pattern, matrix_template).replace('\n',' ').split(' ')
text = [_word for _word in text if _word != '']
minhash = MinHash()
for word in text:
minhash.update(word.encode('utf8'))
return minhash
def compare_two_document(src, dst):
return src.jaccard(dst)
def compare_file(current_name, remove_pattern, file_list, lifo_queue):
csv_result_list = []
src_file = open(current_name, "r")
src = prepare_the_word(src_file.read(), remove_pattern, matrix_template)
for compare_name in file_list:
if compare_name == current_name:
continue
dst_file = open(compare_name, "r")
dst = prepare_the_word(dst_file.read(), remove_pattern, matrix_template)
src_name = current_name.split(os.path.sep)[-1]
dst_name = compare_name.split(os.path.sep)[-1]
csv_result_list += [(src_name, dst_name,
compare_two_document(src, dst))]
dst_file.close()
src_file.close()
lifo_queue.put(csv_result_list)
def compare_file_helper(data_set):
compare_file(data_set[0], data_set[1], data_set[2], data_set[3])
def compare_file_list(file_list, remove_pattern, matrix_template):
data_manager.register('LifoQueue', LifoQueue)
manager = data_manager()
manager.start()
lifo_queue = manager.LifoQueue()
p = multiprocessing.Pool(multiprocessing.cpu_count())
logger.info("compare files start")
data_set = [(current_name, remove_pattern, file_list, lifo_queue) for current_name in file_list]
p.map(compare_file_helper, tqdm(data_set))
p.close()
p.join()
csv_result = {
"all":"",
"summary":{}
}
temp_list = []
logger.info("get data from LIFO queue")
while not lifo_queue.empty():
temp_list += [_row for _row in lifo_queue.get()]
logger.info("sort the data based on source file name")
temp_list.sort(key = lambda item : item[0])
logger.info("make an csv format string")
csv_result['all'] = "cmp1,cmp2,similarity\n"
for row in temp_list:
current_name = row[0]
compare_name = row[1]
similarity = row[2]
csv_result['summary'].setdefault(current_name, -1)
csv_result['summary'][current_name] = max(csv_result['summary'][current_name], similarity)
csv_result['all'] += "{},{},{}\n".format(current_name, compare_name, similarity)
temp = "id,max similarity\n"
for key in csv_result['summary']:
temp += "{},{}\n".format(key, csv_result['summary'][key])
csv_result['summary'] = temp
manager.shutdown()
return csv_result
if __name__=="__main__":
### PREDEFINED VALUES ###
C_COMMENT_REMOVE_PATTERN = "(/\*([^*]|[\r\n]|(\*+([^*/]|[\r\n])))*\*+/)|(//.*)"
summary_file_name = 'summary.csv'
result_file_name = 'result.csv'
matrix_template_file_name = None
remove_pattern = C_COMMENT_REMOVE_PATTERN
files_path = os.getcwd()
### ARGUMENT SETTING ###
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--matrix_template', metavar='<matrix_template file name>', type=str, help="set matrix_template file")
parser.add_argument('-o', '--output', metavar='<output file name>', type=str, help="set output file")
parser.add_argument('-p', '--path', metavar='<working path>', type=str, help="set compare files path")
parser.add_argument('-r', '--remove', metavar='<remove regex pattern>', type=str, help="set remove patterns(regex) in file")
parser.add_argument('-s', '--summary', metavar='<summary file name>', type=str, help="set summary file")
parser.add_argument('-g', '--graph', metavar='<graph weight(0.0 ~ 1.0)>', type=float, help="show associativity graph and set weight(0.0 ~ 1.0)")
args = parser.parse_args()
if args.matrix_template != None:
matrix_template_file_name = args.matrix_template
logger.info('current matrix_template file "{}"'.format(matrix_template_file_name))
if args.summary != None:
summary_file_name = args.summary
if args.output != None:
result_file_name = args.output
if args.path != None:
files_path = args.path
if args.remove != None:
remove_pattern = args.remove
if files_path[-1] != '/':
files_path += '/'
logger.info('current summary file "{}"'.format(summary_file_name))
logger.info('current output file "{}"'.format(result_file_name))
logger.info('current files path "{}"'.format(files_path))
### GET FILE ###
current_file = os.path.split(__file__)[-1]
exception_file_list = [current_file,
matrix_template_file_name,
result_file_name,
summary_file_name]
file_list = [_file \
for _file in os.listdir(files_path) \
if os.path.isfile(os.path.join(files_path, _file))]
file_list = [files_path+_file \
for _file in file_list \
if not _file in exception_file_list]
matrix_template = None
if matrix_template_file_name != None and os.path.isfile(matrix_template_file_name):
matrix_template = load_matrix_template_text(matrix_template_file_name, remove_pattern)
### RUN PLAGIARISM DETECTOR ###
csv_result = compare_file_list(file_list, remove_pattern, matrix_template)
### WRITE_CSV_FILE ###
result_file = open(result_file_name, "w")
result_file.write(csv_result['all'])
result_file.close()
logger.info('complete to save a file in "{}"'.format(result_file_name))
result_file = open(summary_file_name, "w")
result_file.write(csv_result['summary'])
result_file.close()
logger.info('complete to save a file in "{}"'.format(summary_file_name))
### DRAW THE GRAPH ###
if args.graph != None:
logger.info('graph generate start')
node_list = [_value.split(',')[0] \
for _value in csv_result['summary'].split('\n')[1:] \
if _value != "" and float(_value.split(',')[1]) > args.graph]
edge_list = [tuple(_value.split(',')[0:2]) \
for _value in csv_result['all'].split('\n')[1:] \
if _value != "" and float(_value.split(',')[2]) > args.graph]
G = nx.MultiGraph()
G.add_nodes_from(node_list)
G.add_edges_from(edge_list)
nx.draw_spring(G, with_labels=True)
plt.show()
| StarcoderdataPython |
1752719 | <filename>benchmark/rclpy/service/client-stress-test.py<gh_stars>100-1000
#!/usr/bin/env python3
# Copyright (c) 2018 Intel Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import math
from nav_msgs.srv import *
import rclpy
from time import time
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--run", type=int, help="How many times to run")
args = parser.parse_args()
if args.run is None:
args.run = 1
rclpy.init()
print(
'The client will send a GetMap request continuously until receiving %s response times.' % args.run)
start = time();
node = rclpy.create_node('stress_client_rclpy')
client = node.create_client(GetMap, 'get_map')
request = GetMap.Request()
received_times = 0
while rclpy.ok():
if received_times > args.run:
node.destroy_node()
rclpy.shutdown()
diff = time() - start
milliseconds, seconds = math.modf(diff)
print('Benchmark took %d seconds and %d milliseconds.' % (seconds, round(milliseconds * 1000)))
else:
future = client.call_async(request)
rclpy.spin_until_future_complete(node, future)
if future.result() is not None:
received_times += 1
if __name__ == '__main__':
main()
| StarcoderdataPython |
5132064 | #!/usr/bin/python
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import urllib
from catalogService.rest.models import clouds
from catalogService.rest.models import cloud_types
class NodeFactory(object):
__slots__ = [ 'cloudConfigurationDescriptorFactory',
'credentialsDescriptorFactory',
'cloudFactory', 'cloudTypeFactory', 'credentialsFactory',
'credentialsFieldFactory', 'credentialsFieldsFactory',
'imageFactory', 'instanceFactory', 'instanceUpdateStatusFactory',
'instanceTypeFactory', 'instanceLaunchJobFactory',
'jobTypeFactory', 'keyPairFactory',
'securityGroupFactory',
'baseUrl', 'cloudType', 'cloudName', 'userId']
def __init__(self, **kwargs):
for slot in self.__slots__:
if not slot.startswith('_'):
setattr(self, slot, kwargs.get(slot, None))
def newCloudType(self, *args, **kwargs):
node = self.cloudTypeFactory(*args, **kwargs)
cloudTypeId = self._getCloudTypeUrl(self.cloudType)
node.setId(cloudTypeId)
node.setCloudInstances(cloud_types.CloudInstances(
href = self.join(cloudTypeId, 'instances')))
node.setDescriptorCredentials(cloud_types.DescriptorCredentials(
href = self.join(cloudTypeId, 'descriptor', 'credentials')))
node.setDescriptorInstanceConfiguration(
cloud_types.DescriptorInstanceConfiguration(
href = self.join(cloudTypeId, 'descriptor', 'configuration')))
return node
def newCloud(self, *args, **kwargs):
node = self.cloudFactory(*args, **kwargs)
cloudId = self.getCloudUrl(node)
node.setId(cloudId)
cloudType = clouds.Type(href = self._getCloudTypeUrl(self.cloudType)).characters(self.cloudType)
node.setType(cloudType)
node.setImages(clouds.Images(href = self.join(cloudId, 'images')))
node.setInstances(clouds.Instances(href = self.join(cloudId, 'instances')))
node.setUserCredentials(clouds.UserCredentials(href = self.join(cloudId, 'users', self.userId, 'credentials')))
node.setConfiguration(clouds.Configuration(href = self.join(cloudId,
'configuration')))
node.setDescriptorLaunch(clouds.DescriptorLaunch(href =
self.join(cloudId, 'descriptor', 'launch')))
node.setDescriptorDeploy(clouds.DescriptorDeploy(href =
self.join(cloudId, 'descriptor', 'deployImage')))
searchParams = dict(cloudName = node.getCloudName(),
cloudType = self.cloudType,
status = 'Running')
node.setActiveJobs(clouds.ActiveJobs(href = self.getJobSearchUrl(
'instance-launch', searchParams)))
return node
def newCloudConfigurationDescriptor(self, descr):
return self.newLaunchDescriptor(descr)
def newCredentialsDescriptor(self, *args, **kwargs):
node = self.credentialsDescriptorFactory(*args, **kwargs)
return node
def newCloudConfigurationDescriptorData(self, node):
node.setId(self.join(self._getCloudUrlFromParams(), 'configuration'))
return node
def newCredentialsDescriptorData(self, node):
node.setId(self.join(self._getCloudUrlFromParams(), 'users', self.userId,
'credentials'))
return node
def newCredentials(self, valid, fields = None):
# XXX deprecated
if fields is None:
fields = []
fieldsNode = self.credentialsFieldsFactory()
for credName, credVal in fields:
fieldsNode.append(self.credentialsFieldFactory(
credentialName = credName, value = credVal))
credsNode = self.credentialsFactory(fields = fieldsNode,
valid = valid)
return credsNode
def newImage(self, *args, **kwargs):
node = self.imageFactory(*args, **kwargs)
node.setId(self.getImageUrl(node))
node.setCloudType(self.cloudType)
if node.getInternalTargetId() is None:
node.setInternalTargetId(node.getImageId())
return node
def newInstance(self, *args, **kwargs):
node = self.instanceFactory(*args, **kwargs)
return self.refreshInstance(node)
def refreshInstance(self, node):
node.setCloudType(self.cloudType)
node.setCloudName(self.cloudName)
instanceUrl = self.getInstanceUrl(node)
node.setId(instanceUrl)
node.setForceUpdateUrl("%s/forceUpdate" % instanceUrl)
updateStatus = self.instanceUpdateStatusFactory()
updateStatus.setState('')
updateStatus.setTime('')
node.setUpdateStatus(updateStatus)
if node.getOutOfDate() is None:
node.setOutOfDate(False)
# Software stuff
for instSoftware in (node.getInstalledSoftware() or []):
isid = instSoftware.getId()
isid = "%s/installedSoftware/%s" % (instanceUrl, isid)
instSoftware.setId(isid)
tc = instSoftware.getTroveChanges()
if tc:
href = tc.getHref()
if href:
tc.setHref("%s/troveChanges" % isid)
for availUpdate in (node.getAvailableUpdate() or []):
href = os.path.basename(availUpdate.getId())
availUpdate.setId("%s/availableUpdates/%s" %
(instanceUrl, href))
instSoftware = availUpdate.getInstalledSoftware()
href = os.path.basename(instSoftware.getHref())
instSoftware.setHref("%s/installedSoftware/%s" %
(instanceUrl, href))
troveChanges = availUpdate.getTroveChanges()
href = os.path.basename(troveChanges.getHref())
troveChanges.setHref("%s/availableUpdates/%s/troveChanges" %
(instanceUrl, href))
return node
def newInstanceLaunchJob(self, node):
node.set_id(self.getJobIdUrl(node.get_id(), node.get_type()))
imageId = node.get_imageId()
if imageId:
node.set_imageId(self._getImageUrl(node, imageId))
for result in (node.get_resultResource() or []):
href = result.get_href()
if href:
result.set_href(self._getInstanceUrl(node, href))
return node
def newImageDeploymentJob(self, node):
node.set_id(self.getJobIdUrl(node.get_id(), node.get_type()))
imageId = node.get_imageId()
if imageId:
node.set_imageId(self._getImageUrl(node, imageId))
for result in (node.get_resultResource() or []):
href = result.get_href()
if href:
result.set_href(self._getImageUrl(node, href))
return node
def newLaunchDescriptor(self, descriptor):
prefix = self._getTargetTypeHelpUrl(self.cloudType)
for field in descriptor.getDataFields():
for helpNode in field.help:
href = helpNode.href
if '://' not in href:
helpNode.href = self.join(prefix, href)
return descriptor
def newSecurityGroup(self, instanceId, secGroup):
sgId = self.join(self._getCloudUrl(self.cloudType, self.cloudName),
'instances', instanceId, 'securityGroups',
self._quote(secGroup.getId()))
secGroup.setId(sgId)
return secGroup
def getJobIdUrl(self, jobId, jobType):
jobId = str(jobId)
jobType = os.path.basename(jobType)
return self.join(self.baseUrl, 'jobs', 'types', jobType, 'jobs',
jobId)
def getJobSearchUrl(self, jobType, params):
q = urllib.quote_plus
params = sorted(params.items())
params = '&'.join("%s=%s" % (q(x, safe=':'), q(y, safe=':'))
for (x, y) in params)
return self.join(self.baseUrl, 'jobs', 'types', jobType,
'jobs?' + params)
@classmethod
def join(cls, *args):
"""Join the arguments into a URL"""
args = [ args[0].rstrip('/') ] + [ x.strip('/') for x in args[1:] ]
return '/'.join(args)
def getCloudUrl(self, node):
if hasattr(node, "get_cloudName"):
cloudName = node.get_cloudName()
else:
cloudName = node.getCloudName()
return self._getCloudUrl(self.cloudType, cloudName)
def getImageUrl(self, node):
return self._getImageUrl(node, node.getId())
def _getImageUrl(self, node, imageId):
if imageId is None:
return None
return self.join(self.getCloudUrl(node), 'images',
self._quote(imageId))
def getInstanceUrl(self, node):
return self._getInstanceUrl(node, node.getId())
def _getInstanceUrl(self, node, instanceId):
if instanceId is None:
return None
instanceId = instanceId.split('/')[-1]
return self.join(self.getCloudUrl(node), 'instances',
self._quote(instanceId))
def _getCloudTypeUrl(self, cloudType):
return self.join(self.baseUrl, 'clouds', cloudType)
def _getCloudUrl(self, cloudType, cloudName):
return self.join(self._getCloudTypeUrl(cloudType), 'instances',
cloudName)
def _getTargetTypeHelpUrl(self, cloudType):
return self.join(self.baseUrl, 'help/targets/drivers', cloudType)
def _getCloudUrlFromParams(self):
return self._getCloudUrl(self.cloudType,
self.cloudName)
@classmethod
def _quote(cls, data):
if isinstance(data, int):
data = str(data)
return urllib.quote(data, safe="")
| StarcoderdataPython |
8199492 | def say_hi():
print('我是mymodule模块的say_hi函数')
__version__= '0.2' | StarcoderdataPython |
1649321 | <filename>tests/flake8_integration/test_formatter.py<gh_stars>0
# -*- coding: utf-8 -*-
import os
from typing import List
import pytest
from flake8_nb.flake8_integration.formatter import IpynbFormatter
from flake8_nb.parsers.notebook_parsers import NotebookParser
TEST_NOTEBOOK_PATH = os.path.join(
"tests", "data", "notebooks", "notebook_with_flake8_tags.ipynb#In[{}]"
)
def get_test_intermediate_path(intermediate_names):
filename = [
filename
for filename in intermediate_names
if filename.endswith("notebook_with_flake8_tags.ipynb_parsed")
][0]
return filename
class MockedOption:
def __init__(self, formatter="default_notebook"):
self.output_file = ""
self.format = formatter
class MockError:
def __init__(self, filename: str, line_number: int):
self.filename = os.path.normpath(filename)
self.line_number = line_number
self.code = "AB123"
self.text = "This is just for the coverage"
self.column_number = 2
@pytest.mark.parametrize(
"line_number,expected_input_number,expected_line_number",
[(8, 1, 2), (15, 2, 2), (29, 4, 2), (30, 4, 3), (38, 5, 3)],
)
def test_IpynbFormatter__map_notebook_error(
notebook_parser: NotebookParser,
line_number: int,
expected_input_number: int,
expected_line_number: int,
):
mocked_option = MockedOption()
formatter = IpynbFormatter(mocked_option)
expected_filename = TEST_NOTEBOOK_PATH.format(expected_input_number)
filename = get_test_intermediate_path(notebook_parser.intermediate_py_file_paths)
mock_error = MockError(filename, line_number)
filename, input_cell_line_number = formatter.map_notebook_error(mock_error)
assert input_cell_line_number == expected_line_number
assert filename == expected_filename
@pytest.mark.parametrize(
"format_str,file_path_list,expected_result_str",
[
(
"default_notebook",
[],
"{expected_filename}:2:2: AB123 This is just for the coverage",
),
(
"%(path)s:%(row)d: %(text)s",
[],
"{expected_filename}:2: This is just for the coverage",
),
(
"default_notebook",
["tests", "data", "notebooks", "falsy_python_file.py"],
"{expected_filename}:8:2: AB123 This is just for the coverage",
),
(
"default_notebook",
[
"tests",
"data",
"intermediate_py_files",
"notebook_with_flake8_tags.ipynb_parsed",
],
"{expected_filename}:8:2: AB123 This is just for the coverage",
),
],
)
def test_IpynbFormatter__format(
notebook_parser: NotebookParser,
file_path_list: List[str],
format_str: str,
expected_result_str: str,
):
mocked_option = MockedOption(format_str)
formatter = IpynbFormatter(mocked_option)
if file_path_list:
filename = expected_filename = os.path.join(*file_path_list)
else:
expected_filename = TEST_NOTEBOOK_PATH.format(1)
filename = get_test_intermediate_path(
notebook_parser.intermediate_py_file_paths
)
mock_error = MockError(filename, 8)
result = formatter.format(mock_error)
expected_result = expected_result_str.format(expected_filename=expected_filename)
assert result == expected_result
| StarcoderdataPython |
9649920 | <reponame>zmxdream/Paddle<filename>python/paddle/fluid/tests/unittests/npu/test_bce_loss_npu.py
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import paddle
import paddle.fluid as fluid
import numpy as np
import unittest
import sys
sys.path.append("..")
from op_test import OpTest
paddle.enable_static()
def test_static_layer(place,
input_np,
label_np,
reduction='mean',
weight_np=None):
prog = paddle.static.Program()
startup_prog = paddle.static.Program()
with paddle.static.program_guard(prog, startup_prog):
input = paddle.fluid.data(
name='input', shape=input_np.shape, dtype='float32')
label = paddle.fluid.data(
name='label', shape=label_np.shape, dtype='float32')
if weight_np is not None:
weight = paddle.fluid.data(
name='weight', shape=weight_np.shape, dtype='float32')
bce_loss = paddle.nn.loss.BCELoss(
weight=weight, reduction=reduction)
else:
bce_loss = paddle.nn.loss.BCELoss(reduction=reduction)
res = bce_loss(input, label)
exe = paddle.static.Executor(place)
static_result = exe.run(prog,
feed={"input": input_np,
"label": label_np}
if weight_np is None else {
"input": input_np,
"label": label_np,
"weight": weight_np
},
fetch_list=[res])
return static_result
def test_static_functional(place,
input_np,
label_np,
reduction='mean',
weight_np=None):
prog = paddle.static.Program()
startup_prog = paddle.static.Program()
with paddle.static.program_guard(prog, startup_prog):
input = paddle.fluid.data(
name='input', shape=input_np.shape, dtype='float32')
label = paddle.fluid.data(
name='label', shape=label_np.shape, dtype='float32')
if weight_np is not None:
weight = paddle.fluid.data(
name='weight', shape=weight_np.shape, dtype='float32')
res = paddle.nn.functional.binary_cross_entropy(
input, label, weight=weight, reduction=reduction)
else:
res = paddle.nn.functional.binary_cross_entropy(
input, label, reduction=reduction)
exe = paddle.static.Executor(place)
static_result = exe.run(prog,
feed={"input": input_np,
"label": label_np}
if weight_np is None else {
"input": input_np,
"label": label_np,
"weight": weight_np
},
fetch_list=[res])
return static_result
def test_dygraph_layer(place,
input_np,
label_np,
reduction='mean',
weight_np=None):
paddle.disable_static(place)
if weight_np is not None:
weight = paddle.to_tensor(weight_np)
bce_loss = paddle.nn.loss.BCELoss(weight=weight, reduction=reduction)
else:
bce_loss = paddle.nn.loss.BCELoss(reduction=reduction)
dy_res = bce_loss(paddle.to_tensor(input_np), paddle.to_tensor(label_np))
dy_result = dy_res.numpy()
paddle.enable_static()
return dy_result
def test_dygraph_functional(place,
input_np,
label_np,
reduction='mean',
weight_np=None):
paddle.disable_static(place)
input = paddle.to_tensor(input_np)
label = paddle.to_tensor(label_np)
if weight_np is not None:
weight = paddle.to_tensor(weight_np)
dy_res = paddle.nn.functional.binary_cross_entropy(
input, label, weight=weight, reduction=reduction)
else:
dy_res = paddle.nn.functional.binary_cross_entropy(
input, label, reduction=reduction)
dy_result = dy_res.numpy()
paddle.enable_static()
return dy_result
def calc_bceloss(input_np, label_np, reduction='mean', weight_np=None):
if weight_np is None:
expected = -1 * (label_np * np.log(input_np) +
(1. - label_np) * np.log(1. - input_np))
else:
expected = -1 * weight_np * (label_np * np.log(input_np) +
(1. - label_np) * np.log(1. - input_np))
if reduction == 'mean':
expected = np.mean(expected)
elif reduction == 'sum':
expected = np.sum(expected)
else:
expected = expected
return expected
class TestBCELoss(unittest.TestCase):
def test_BCELoss(self):
input_np = np.random.uniform(0.1, 0.8, size=(20, 30)).astype(np.float32)
label_np = np.random.randint(0, 2, size=(20, 30)).astype(np.float32)
places = [fluid.CPUPlace()]
if fluid.core.is_compiled_with_npu():
places.append(fluid.NPUPlace(0))
reductions = ['sum', 'mean', 'none']
for place in places:
for reduction in reductions:
static_result = test_static_layer(place, input_np, label_np,
reduction)
dy_result = test_dygraph_layer(place, input_np, label_np,
reduction)
expected = calc_bceloss(input_np, label_np, reduction)
self.assertTrue(np.allclose(static_result, expected))
self.assertTrue(np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected))
static_functional = test_static_functional(place, input_np,
label_np, reduction)
dy_functional = test_dygraph_functional(place, input_np,
label_np, reduction)
self.assertTrue(np.allclose(static_functional, expected))
self.assertTrue(np.allclose(static_functional, dy_functional))
self.assertTrue(np.allclose(dy_functional, expected))
def test_BCELoss_weight(self):
input_np = np.random.uniform(
0.1, 0.8, size=(2, 3, 4, 10)).astype(np.float32)
label_np = np.random.randint(
0, 2, size=(2, 3, 4, 10)).astype(np.float32)
weight_np = np.random.random(size=(3, 4, 10)).astype(np.float32)
place = fluid.NPUPlace(0) if fluid.core.is_compiled_with_npu(
) else fluid.CPUPlace()
for reduction in ['sum', 'mean', 'none']:
static_result = test_static_layer(
place, input_np, label_np, reduction, weight_np=weight_np)
dy_result = test_dygraph_layer(
place, input_np, label_np, reduction, weight_np=weight_np)
expected = calc_bceloss(
input_np, label_np, reduction, weight_np=weight_np)
self.assertTrue(np.allclose(static_result, expected))
self.assertTrue(np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected))
static_functional = test_static_functional(
place, input_np, label_np, reduction, weight_np=weight_np)
dy_functional = test_dygraph_functional(
place, input_np, label_np, reduction, weight_np=weight_np)
self.assertTrue(np.allclose(static_functional, expected))
self.assertTrue(np.allclose(static_functional, dy_functional))
self.assertTrue(np.allclose(dy_functional, expected))
def test_BCELoss_error(self):
paddle.disable_static(paddle.NPUPlace(0))
self.assertRaises(
ValueError, paddle.nn.loss.BCELoss, reduction="unsupport reduction")
input = paddle.to_tensor([[0.1, 0.3]], dtype='float32')
label = paddle.to_tensor([[0.0, 1.0]], dtype='float32')
self.assertRaises(
ValueError,
paddle.nn.functional.binary_cross_entropy,
input=input,
label=label,
reduction="unsupport reduction")
paddle.enable_static()
def bce_loss(input, label):
return -1 * (label * np.log(input) + (1. - label) * np.log(1. - input))
class TestBceLossOp(OpTest):
def setUp(self):
self.set_npu()
self.init_test_case()
self.op_type = "bce_loss"
input_np = np.random.uniform(0.1, 0.8, self.shape).astype("float32")
label_np = np.random.randint(0, 2, self.shape).astype("float32")
output_np = bce_loss(input_np, label_np)
self.inputs = {'X': input_np, 'Label': label_np}
self.outputs = {'Out': output_np}
def set_npu(self):
self.__class__.use_npu = True
self.place = paddle.NPUPlace(0)
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
self.check_grad_with_place(self.place, ['X'], 'Out')
def init_test_case(self):
self.shape = [10, 10]
class TestBceLossOpCase1(OpTest):
def init_test_cast(self):
self.shape = [2, 3, 4, 5]
class TestBceLossOpCase2(OpTest):
def init_test_cast(self):
self.shape = [2, 3, 20]
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
1737857 | <gh_stars>0
# Autogenerated file. ANY CHANGES WILL BE OVERWRITTEN
from to_python.core.types import FunctionType, \
FunctionArgument, \
FunctionArgumentValues, \
FunctionReturnTypes, \
FunctionSignature, \
FunctionDoc, \
FunctionData, \
CompoundFunctionData
DUMP_PARTIAL = [
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='createMarker',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['marker'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='x',
argument_type=FunctionType(
names=['float'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='y',
argument_type=FunctionType(
names=['float'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='z',
argument_type=FunctionType(
names=['float'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='theType',
argument_type=FunctionType(
names=['string'],
is_optional=True,
),
default_value='"checkpoint"',
)
],
[
FunctionArgument(
name='size',
argument_type=FunctionType(
names=['float'],
is_optional=True,
),
default_value='4.0',
)
],
[
FunctionArgument(
name='r',
argument_type=FunctionType(
names=['int'],
is_optional=True,
),
default_value='0',
)
],
[
FunctionArgument(
name='g',
argument_type=FunctionType(
names=['int'],
is_optional=True,
),
default_value='0',
)
],
[
FunctionArgument(
name='b',
argument_type=FunctionType(
names=['int'],
is_optional=True,
),
default_value='255',
)
],
[
FunctionArgument(
name='a',
argument_type=FunctionType(
names=['int'],
is_optional=True,
),
default_value='255',
)
],
[
FunctionArgument(
name='visibleTo',
argument_type=FunctionType(
names=['element'],
is_optional=True,
),
default_value='getRootElement()',
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function creates a marker. A marker is a 3D model in the world that can highlight a particular point or area, often used to instruct players where to go to perform actions such as entering buildings.\nThere are various limits that govern the maximum number of each type that can be visible at once. These are:\n* Coronas: 32\n* Checkpoints, Rings, Cylinders and Arrows combined: 32\nYou are able to create as many markers as you wish (memory and element limit permitting), but the player will only be able to see the nearest ones up to the limit.\n<br><br><br><br>' ,
arguments={
"x": """: A floating point number representing the X coordinate on the map. """,
"y": """: A floating point number representing the Y coordinate on the map. """,
"z": """: A floating point number representing the Z coordinate on the map. """,
"theType": """: The visual type of the marker to be created. Possible values: """,
"size": """: The diameter of the marker to be created, in meters. """,
"r": """: An integer number representing the amount of red to use in the colouring of the marker (0 - 255). """,
"g": """: An integer number representing the amount of green to use in the colouring of the marker (0 - 255). """,
"b": """: An integer number representing the amount of blue to use in the colouring of the marker (0 - 255). """,
"a": """: An integer number representing the amount of alpha to use in the colouring of the marker (0 - 255 where 0 is transparent and 255 is opaque). """,
"visibleTo": """: This defines which elements can see the marker. Defaults to visible to everyone. See visibility. """
},
result='' ,
),
url='createMarker',
)
],
client=[
FunctionData(
signature=FunctionSignature(
name='createMarker',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['marker'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='x',
argument_type=FunctionType(
names=['float'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='y',
argument_type=FunctionType(
names=['float'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='z',
argument_type=FunctionType(
names=['float'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='theType',
argument_type=FunctionType(
names=['string'],
is_optional=True,
),
default_value='"checkpoint"',
)
],
[
FunctionArgument(
name='size',
argument_type=FunctionType(
names=['float'],
is_optional=True,
),
default_value='4.0',
)
],
[
FunctionArgument(
name='r',
argument_type=FunctionType(
names=['int'],
is_optional=True,
),
default_value='0',
)
],
[
FunctionArgument(
name='g',
argument_type=FunctionType(
names=['int'],
is_optional=True,
),
default_value='0',
)
],
[
FunctionArgument(
name='b',
argument_type=FunctionType(
names=['int'],
is_optional=True,
),
default_value='255',
)
],
[
FunctionArgument(
name='a',
argument_type=FunctionType(
names=['int'],
is_optional=True,
),
default_value='255',
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function creates a marker. A marker is a 3D model in the world that can highlight a particular point or area, often used to instruct players where to go to perform actions such as entering buildings.\nThere are various limits that govern the maximum number of each type that can be visible at once. These are:\n* Coronas: 32\n* Checkpoints, Rings, Cylinders and Arrows combined: 32\nYou are able to create as many markers as you wish (memory and element limit permitting), but the player will only be able to see the nearest ones up to the limit.\n<br><br><br><br>' ,
arguments={
"x": """: A floating point number representing the X coordinate on the map. """,
"y": """: A floating point number representing the Y coordinate on the map. """,
"z": """: A floating point number representing the Z coordinate on the map. """,
"theType": """: The visual type of the marker to be created. Possible values: """,
"size": """: The diameter of the marker to be created, in meters. """,
"r": """: An integer number representing the amount of red to use in the colouring of the marker (0 - 255). """,
"g": """: An integer number representing the amount of green to use in the colouring of the marker (0 - 255). """,
"b": """: An integer number representing the amount of blue to use in the colouring of the marker (0 - 255). """,
"a": """: An integer number representing the amount of alpha to use in the colouring of the marker (0 - 255 where 0 is transparent and 255 is opaque). """
},
result='' ,
),
url='createMarker',
)
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='getMarkerColor',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
),
FunctionType(
names=['int'],
is_optional=False,
),
FunctionType(
names=['int'],
is_optional=False,
),
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theMarker',
argument_type=FunctionType(
names=['marker'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns the color and transparency for a marker element. Not all marker types support transparency.' ,
arguments={
"theMarker": """: The marker that you wish to retrieve the color of. """
},
result='returns four ints corresponding to the amount of red, green, blue and alpha (respectively) of the marker, false if invalid arguments were passed.' ,
),
url='getMarkerColor',
)
],
client=[
FunctionData(
signature=FunctionSignature(
name='getMarkerColor',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
),
FunctionType(
names=['int'],
is_optional=False,
),
FunctionType(
names=['int'],
is_optional=False,
),
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theMarker',
argument_type=FunctionType(
names=['marker'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns the color and transparency for a marker element. Not all marker types support transparency.' ,
arguments={
"theMarker": """: The marker that you wish to retrieve the color of. """
},
result='returns four ints corresponding to the amount of red, green, blue and alpha (respectively) of the marker, false if invalid arguments were passed.' ,
),
url='getMarkerColor',
)
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='getMarkerCount',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='Returns the number of markers that currently exist in the world.' ,
arguments={
},
result='returns the number of markers that currently exist.' ,
),
url='getMarkerCount',
)
],
client=[
FunctionData(
signature=FunctionSignature(
name='getMarkerCount',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='Returns the number of markers that currently exist in the world.' ,
arguments={
},
result='returns the number of markers that currently exist.' ,
),
url='getMarkerCount',
)
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='getMarkerIcon',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['string'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theMarker',
argument_type=FunctionType(
names=['marker'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns the icon name for a marker.' ,
arguments={
"theMarker": """: A marker element referencing the specified marker. """
},
result='returns false if the marker passed is invalid or a string containing one of the following:\n* none: no icon\n* arrow: arrow icon\n* finish: finish (end-race) icon' ,
),
url='getMarkerIcon',
)
],
client=[
FunctionData(
signature=FunctionSignature(
name='getMarkerIcon',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['string'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theMarker',
argument_type=FunctionType(
names=['marker'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns the icon name for a marker.' ,
arguments={
"theMarker": """: A marker element referencing the specified marker. """
},
result='returns false if the marker passed is invalid or a string containing one of the following:\n* none: no icon\n* arrow: arrow icon\n* finish: finish (end-race) icon' ,
),
url='getMarkerIcon',
)
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='getMarkerSize',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['float'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='myMarker',
argument_type=FunctionType(
names=['marker'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns a float containing the size of the specified marker.' ,
arguments={
"myMarker": """: The marker that you wish to retrieve the size of. """
},
result='returns a float containing the size of the specified marker.' ,
),
url='getMarkerSize',
)
],
client=[
FunctionData(
signature=FunctionSignature(
name='getMarkerSize',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['float'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='myMarker',
argument_type=FunctionType(
names=['marker'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns a float containing the size of the specified marker.' ,
arguments={
"myMarker": """: The marker that you wish to retrieve the size of. """
},
result='returns a float containing the size of the specified marker.' ,
),
url='getMarkerSize',
)
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='getMarkerTarget',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['float'],
is_optional=False,
),
FunctionType(
names=['float'],
is_optional=False,
),
FunctionType(
names=['float'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theMarker',
argument_type=FunctionType(
names=['marker'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns the position of the specified markers target, the position it points to. This only works for checkpoint markers and ring markers. For checkpoints it returns the position the arrow is pointing to, for ring markers it returns the position the ring is facing. You can set this target with setMarkerTarget.' ,
arguments={
"theMarker": """The marker you wish to retrieve the target position of. """
},
result='returns three floats if a target is set, or false in the first variable and nil in the two others if the marker is invalid or no target is set.' ,
),
url='getMarkerTarget',
)
],
client=[
FunctionData(
signature=FunctionSignature(
name='getMarkerTarget',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['float'],
is_optional=False,
),
FunctionType(
names=['float'],
is_optional=False,
),
FunctionType(
names=['float'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theMarker',
argument_type=FunctionType(
names=['marker'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns the position of the specified markers target, the position it points to. This only works for checkpoint markers and ring markers. For checkpoints it returns the position the arrow is pointing to, for ring markers it returns the position the ring is facing. You can set this target with setMarkerTarget.' ,
arguments={
"theMarker": """The marker you wish to retrieve the target position of. """
},
result='returns three floats if a target is set, or false in the first variable and nil in the two others if the marker is invalid or no target is set.' ,
),
url='getMarkerTarget',
)
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='getMarkerType',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['string'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theMarker',
argument_type=FunctionType(
names=['marker'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns a markers type.' ,
arguments={
"theMarker": """: A marker element referencing the specified marker. """
},
result='* returns one of the following strings:\nif an invalid marker is specified, false is returned.' ,
),
url='getMarkerType',
)
],
client=[
FunctionData(
signature=FunctionSignature(
name='getMarkerType',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['string'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theMarker',
argument_type=FunctionType(
names=['marker'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns a markers type.' ,
arguments={
"theMarker": """: A marker element referencing the specified marker. """
},
result='* returns one of the following strings:\nif an invalid marker is specified, false is returned.' ,
),
url='getMarkerType',
)
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='setMarkerColor',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theMarker',
argument_type=FunctionType(
names=['marker'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='r',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='g',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='b',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='a',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function sets the color of the specified marker by modifying the values for red, green, blue and alpha.' ,
arguments={
"theMarker": """The marker that you wish to set the color of. """,
"r": """The amount of red in the final color (0 to 255). """,
"g": """The amount of green in the final color (0 to 255). """,
"b": """The amount of blue in the final color (0 to 255). """,
"a": """The amount of alpha in the final color (0 to 255). """
},
result='' ,
),
url='setMarkerColor',
)
],
client=[
FunctionData(
signature=FunctionSignature(
name='setMarkerColor',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theMarker',
argument_type=FunctionType(
names=['marker'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='r',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='g',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='b',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='a',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function sets the color of the specified marker by modifying the values for red, green, blue and alpha.' ,
arguments={
"theMarker": """The marker that you wish to set the color of. """,
"r": """The amount of red in the final color (0 to 255). """,
"g": """The amount of green in the final color (0 to 255). """,
"b": """The amount of blue in the final color (0 to 255). """,
"a": """The amount of alpha in the final color (0 to 255). """
},
result='' ,
),
url='setMarkerColor',
)
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='setMarkerIcon',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theMarker',
argument_type=FunctionType(
names=['marker'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='icon',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function allows changing the icon of a checkpoint marker.' ,
arguments={
"theMarker": """The marker to change the visual style of """,
"icon": """A string referring to the type of icon, acceptable values are: """,
"none": """: No icon """,
"arrow": """: Arrow icon """,
"finish": """: Finish icon (at end of race) """
},
result='' ,
),
url='setMarkerIcon',
)
],
client=[
FunctionData(
signature=FunctionSignature(
name='setMarkerIcon',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theMarker',
argument_type=FunctionType(
names=['marker'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='icon',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function allows changing the icon of a checkpoint marker.' ,
arguments={
"theMarker": """The marker to change the visual style of """,
"icon": """A string referring to the type of icon, acceptable values are: """,
"none": """: No icon """,
"arrow": """: Arrow icon """,
"finish": """: Finish icon (at end of race) """
},
result='' ,
),
url='setMarkerIcon',
)
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='setMarkerSize',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theMarker',
argument_type=FunctionType(
names=['marker'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='size',
argument_type=FunctionType(
names=['float'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function sets the size of the specified marker.\nSetting negative value will flip the marker, do nothing or make it invisible:\n* cylinder or arrow: upside down\n* ring: inside out\n* checkpoint: disappear\n* corona: bigger' ,
arguments={
"theMarker": """The marker that you wish to set the size of. """,
"size": """A float representing new size of the marker. """
},
result='returns true if successful, false if failed.' ,
),
url='setMarkerSize',
)
],
client=[
FunctionData(
signature=FunctionSignature(
name='setMarkerSize',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theMarker',
argument_type=FunctionType(
names=['marker'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='size',
argument_type=FunctionType(
names=['float'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function sets the size of the specified marker.\nSetting negative value will flip the marker, do nothing or make it invisible:\n* cylinder or arrow: upside down\n* ring: inside out\n* checkpoint: disappear\n* corona: bigger' ,
arguments={
"theMarker": """The marker that you wish to set the size of. """,
"size": """A float representing new size of the marker. """
},
result='returns true if successful, false if failed.' ,
),
url='setMarkerSize',
)
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='setMarkerTarget',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theMarker',
argument_type=FunctionType(
names=['marker'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='x',
argument_type=FunctionType(
names=['float'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='y',
argument_type=FunctionType(
names=['float'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='z',
argument_type=FunctionType(
names=['float'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function sets the target for a marker. Only the checkpoint and ring marker types can have a target.\nFor checkpoint markers, the target is shown as an arrow aiming at the point specified.\nFor ring markers, the target is shown by rotating the whole ring so that it faces the point specified.\nThis function is most useful for setting up markers for races, where each marker points to the next ones position.\n(This is mostly used in races!)' ,
arguments={
"theMarker": """The marker to set the target of """,
"x": """The x axis of the coordinate to target the marker at """,
"y": """The y axis of the coordinate to target the marker at """,
"z": """The z axis of the coordinate to target the marker at """
},
result='returns true if target was set, false otherwise.' ,
),
url='setMarkerTarget',
)
],
client=[
FunctionData(
signature=FunctionSignature(
name='setMarkerTarget',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theMarker',
argument_type=FunctionType(
names=['marker'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='x',
argument_type=FunctionType(
names=['float'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='y',
argument_type=FunctionType(
names=['float'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='z',
argument_type=FunctionType(
names=['float'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function sets the target for a marker. Only the checkpoint and ring marker types can have a target.\nFor checkpoint markers, the target is shown as an arrow aiming at the point specified.\nFor ring markers, the target is shown by rotating the whole ring so that it faces the point specified.\nThis function is most useful for setting up markers for races, where each marker points to the next ones position.\n(This is mostly used in races!)' ,
arguments={
"theMarker": """The marker to set the target of """,
"x": """The x axis of the coordinate to target the marker at """,
"y": """The y axis of the coordinate to target the marker at """,
"z": """The z axis of the coordinate to target the marker at """
},
result='returns true if target was set, false otherwise.' ,
),
url='setMarkerTarget',
)
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='setMarkerType',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theMarker',
argument_type=FunctionType(
names=['marker'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='markerType',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function changes a markers type. The type controls how the marker is displayed in the game. Its important that you use marker types that users are used to from the single player game. For example, checkpoints are used in races, rings are used for aircraft races, arrows are used for entering buildings etc.' ,
arguments={
"theMarker": """: A marker element referencing the specified marker. """,
"markerType": """: A string denoting the marker type. Valid values are: """
},
result='returns true if the marker type was changed, false if it wasnt or marker values were invalid.' ,
),
url='setMarkerType',
)
],
client=[
FunctionData(
signature=FunctionSignature(
name='setMarkerType',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='theMarker',
argument_type=FunctionType(
names=['marker'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='markerType',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function changes a markers type. The type controls how the marker is displayed in the game. Its important that you use marker types that users are used to from the single player game. For example, checkpoints are used in races, rings are used for aircraft races, arrows are used for entering buildings etc.' ,
arguments={
"theMarker": """: A marker element referencing the specified marker. """,
"markerType": """: A string denoting the marker type. Valid values are: """
},
result='returns true if the marker type was changed, false if it wasnt or marker values were invalid.' ,
),
url='setMarkerType',
)
],
)
]
| StarcoderdataPython |
257856 | <filename>scripts/tf_cnn_benchmarks/test_util.py<gh_stars>1-10
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Shared functionality across multiple test files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
from contextlib import contextmanager
import os
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import test
import benchmark_cnn
import cnn_util
import datasets
import preprocessing
from models import model
from platforms import util as platforms_util
@contextmanager
def monkey_patch(obj, **kwargs):
"""Context mgr to monkey patch attributes on an object (such as a module).
The attributes are patched back to their original value when the context
manager exits.
For example, to replace benchmark_cnn.get_data_type with an identity function,
do:
```
with monkey_patch(benchmark_cnn, get_data_type=lambda x: x)
loss1 = benchmark_cnn.loss_function(1) # loss1 will be 1
loss2 = benchmark_cnn.loss_function(params) # Call the original function
```
Args:
obj: The object (which can be a module) to monkey patch attributes on.
**kwargs: Dictionary mapping from attribute name to value that the attribute
will be patched with.
Yields:
Nothing.
"""
old_values = {key: getattr(obj, key) for key in kwargs}
try:
for key, value in kwargs.items():
setattr(obj, key, value)
yield
finally:
for key, value in old_values.items():
setattr(obj, key, value)
def monkey_patch_base_cluster_manager():
"""Monkey patches get_cluster_manager to return a BaseClusterManager.
This function replaces platforms_util.get_cluster_manager with a function that
always return a BaseClusterManager.
This is useful for testing creating a graph in distributed mode, with only a
single process. GrpcClusterManager's constructor blocks until a cluster is set
up, which requires multiple processes to be created.
"""
def get_test_cluster_manager(params, config_proto):
del config_proto
return cnn_util.BaseClusterManager(params)
platforms_util.get_cluster_manager = get_test_cluster_manager
def print_and_add_to_list(print_list):
"""Returns a function which prints the input, then adds it to print_list."""
def f(string):
print(string)
print_list.append(string)
return f
TrainingOutput = namedtuple('TrainingOutput',
['loss', 'top_1_accuracy', 'top_5_accuracy'])
EvalOutput = namedtuple('EvalOutput', ['top_1_accuracy', 'top_5_accuracy'])
def get_training_outputs_from_logs(logs, print_training_accuracy):
"""Returns a list of TrainingOutputs by parsing the logs of a training run.
Args:
logs: A list of strings, each which is a line from the standard output of
tf_cnn_benchmarks from training. Only lines in the form:
10 images/sec: 14.2 +/- 0.0 (jitter = 0.0) 7.020
are parsed (the line may also contain the training accuracies).
print_training_accuracy: The value of the param print_training_accuracy.
Returns:
A list of TrainingOutputs. The list has one element per element of logs
that is in the format above. top_1_accuracy and top_5_accuracy are set to -1
if the line does not contain accuracies.
"""
outputs = []
for log in logs:
if 'images/sec' in log and '+/-' in log:
parts = log.split()
if print_training_accuracy:
# Example log with training accuracy:
# 10 images/sec: 0.2 +/- 0.0 (jitter = 0.0) 6.908 0.500 1.000
assert len(parts) == 11
top_1_acc = float(parts[9])
top_5_acc = float(parts[10])
else:
# Example log without training accuracy:
# 10 images/sec: 0.2 +/- 0.0 (jitter = 0.0) 6.908
assert len(parts) == 9
top_1_acc = -1
top_5_acc = -1
loss = float(parts[8])
outputs.append(TrainingOutput(loss=loss, top_1_accuracy=top_1_acc,
top_5_accuracy=top_5_acc))
assert len(outputs) >= 1
return outputs
def get_evaluation_outputs_from_logs(logs):
"""Returns the top 1 and 5 accuracies by parsing the logs of an eval run.
Args:
logs: A list of strings, each which is a line from the standard output of
tf_cnn_benchmarks from evaluation. Only the line in the form:
Accuracy @ 1 = 0.5000 Accuracy @ 5 = 1.0000 [80 examples]
is parsed. The log should only contain one such line.
Returns:
An EvalOutput.
"""
top_1_accuracy = None
top_5_accuracy = None
for log in logs:
if 'Accuracy @ ' in log:
# Example log:
# Accuracy @ 1 = 0.5000 Accuracy @ 5 = 1.0000 [80 examples]
parts = log.split()
assert len(parts) == 12
assert top_1_accuracy is None
assert top_5_accuracy is None
top_1_accuracy = float(parts[4])
top_5_accuracy = float(parts[9])
assert top_1_accuracy is not None
assert top_5_accuracy is not None
return EvalOutput(top_1_accuracy, top_5_accuracy)
def check_training_outputs_are_reasonable(testcase, training_outputs,
print_training_accuracy,
max_final_loss=10.,
previous_final_loss=None):
"""Checks the outputs from training a model are reasonable.
An assert is failed if the outputs are not reasonable. The final top-1 and
top-5 accuracies are asserted to be 1, and so the dataset used to train should
be trivial to learn. For example, the dataset could consist of a black image
with label 0 and a white image with label 1.
Args:
testcase: A tf.test.TestCase used for assertions.
training_outputs: A list of TrainingOutputs, as returned from
get_training_outputs_from_logs().
print_training_accuracy: Whether training accuracies were printed and stored
in training_outputs.
max_final_loss: The loss of the final training output is asserted to be at
most this value.
previous_final_loss: If training was resumed from a checkpoint, the loss of
the final step from the previous training run that saved the checkpoint.
"""
if previous_final_loss is not None:
# Ensure the loss hasn't raised significantly from the final loss of the
# previous training run.
testcase.assertLessEqual(training_outputs[0].loss,
previous_final_loss * 1.01)
for output in training_outputs:
testcase.assertLessEqual(output.loss, 100.)
last_output = training_outputs[-1]
if print_training_accuracy:
testcase.assertEqual(last_output.top_1_accuracy, 1.0)
testcase.assertEqual(last_output.top_5_accuracy, 1.0)
if max_final_loss is not None:
testcase.assertLessEqual(last_output.loss, max_final_loss)
def train_and_eval(testcase,
run_fn,
params,
check_output_values,
max_final_loss=10.,
skip=None):
"""Trains a model then evaluates it.
This function should be used to verify training and evaluating
BenchmarkCNN works without crashing and that it outputs reasonable
values. BenchmarkCNN will be run three times. First, it will train a
model from scratch, saving a checkpoint. Second, it will load the checkpoint
to continue training. Finally, it evaluates based on the loaded checkpoint.
Args:
testcase: A tf.test.TestCase used for assertions.
run_fn: Must run `BenchmarkCNN` exactly once. BenchmarkCNN is
never used directly, but instead is only run through `run_fn`. `run_fn`
has the signature (run_type, inner_params) -> output_list, where:
* run_type is a string indicating how BenchmarkCNN will be run.
Either 'InitialTraining', 'TrainingFromCheckpoint' or 'Evaluation'.
* inner_params is the params BenchmarkCNN should be run with.
* output_list[i] is a list of lines from the ith worker's stdout.
params: The params BenchmarkCNN will be run with.
Will be passed to `run_fn` slightly modified in order to run with both
training and evaluation.
check_output_values: Whether the outputs of the workers, such as training
accuracy, should be checked to make sure their values are reasonable.
Fails an assert on `testcase` if a check fails.
max_final_loss: The loss of the final training output is asserted to be at
most this value for both training runs.
skip: If 'eval', evaluation is not done. if
'eval_and_train_from_checkpoint', evaluation and training from a
checkpoint are both not done.
"""
assert not skip or skip in {'eval', 'eval_and_train_from_checkpoint'}
# Part 1: Train from scratch.
tf.logging.info('Training model from scratch')
print_training_accuracy = (params.print_training_accuracy or
params.forward_only)
initial_train_logs = run_fn('InitialTraining', params)
testcase.assertGreaterEqual(len(initial_train_logs), 1)
for lines in initial_train_logs:
initial_train_outputs = get_training_outputs_from_logs(
lines, print_training_accuracy)
if params.cross_replica_sync and params.batch_group_size == 1:
testcase.assertEqual(len(initial_train_outputs), params.num_batches)
if check_output_values:
check_training_outputs_are_reasonable(testcase, initial_train_outputs,
print_training_accuracy,
max_final_loss=max_final_loss)
if params.train_dir is not None:
train_dir_entries = set(os.listdir(params.train_dir))
testcase.assertGreater(len(train_dir_entries), 0)
else:
train_dir_entries = None
if skip == 'eval_and_train_from_checkpoint':
return
# Part 2: Train from the loaded checkpoint.
testcase.assertIsNotNone(train_dir_entries)
tf.logging.info('Training model from loaded checkpoint')
# Run for same number of batches as before.
params = params._replace(num_batches=params.num_batches * 2)
train_logs_from_ckpt = run_fn('TrainingFromCheckpoint', params)
testcase.assertGreaterEqual(len(train_logs_from_ckpt), 1)
for lines in train_logs_from_ckpt:
train_outputs_from_ckpt = get_training_outputs_from_logs(
lines, print_training_accuracy)
if params.cross_replica_sync and params.batch_group_size == 1:
testcase.assertEqual(len(train_outputs_from_ckpt),
params.num_batches // 2 - params.num_warmup_batches)
if check_output_values:
check_training_outputs_are_reasonable(
testcase, train_outputs_from_ckpt, print_training_accuracy,
max_final_loss=max_final_loss,
previous_final_loss=initial_train_outputs[-1].loss)
# Ensure a new checkpoint was written out.
testcase.assertNotEqual(train_dir_entries, set(os.listdir(params.train_dir)))
if skip == 'eval':
return
# Part 3: Evaluate from the loaded checkpoint.
tf.logging.info('Evaluating model from checkpoint')
params = params._replace(num_batches=params.num_batches // 2, eval=True)
eval_logs = run_fn('Evaluation', params)
testcase.assertGreaterEqual(len(eval_logs), 1)
for lines in eval_logs:
top_1_accuracy, top_5_accuracy = get_evaluation_outputs_from_logs(lines)
if check_output_values:
testcase.assertEqual(top_1_accuracy, 1.0)
testcase.assertEqual(top_5_accuracy, 1.0)
def get_temp_dir(dir_name):
dir_path = os.path.join(test.get_temp_dir(), dir_name)
os.mkdir(dir_path)
return dir_path
def get_params(train_dir_name):
"""Returns params that can be used to train."""
return benchmark_cnn.make_params(
batch_size=2,
display_every=1,
init_learning_rate=0.005,
model='trivial',
num_batches=20,
num_gpus=2,
num_warmup_batches=5,
optimizer='sgd',
print_training_accuracy=True,
train_dir=get_temp_dir(train_dir_name),
variable_update='parameter_server',
weight_decay=0)
def get_var_update_params():
"""Returns params that are used when testing variable updates."""
return benchmark_cnn.make_params(
batch_size=2,
model='test_model',
num_gpus=2,
display_every=1,
num_warmup_batches=0,
num_batches=4,
weight_decay=2 ** -4,
init_learning_rate=2 ** -4,
optimizer='sgd')
def get_fake_var_update_inputs():
"""Returns fake input 1x1 images to use in variable update tests."""
# BenchmarkCNN divides by 127.5 then subtracts 1.0 from the images, so after
# that, the images will be -1., 0., 1., ..., 14.
return np.resize(127.5 * np.array(range(16)), (16, 1, 1, 1))
def _worker_batches_in_numpy_array(numpy_inputs, batch_size, shift_ratio):
"""Yields batches from a numpy array, for a single worker."""
numpy_inputs = cnn_util.roll_numpy_batches(numpy_inputs, batch_size,
shift_ratio)
i = 0
total_batches = numpy_inputs.shape[0]
assert total_batches % batch_size == 0
while True:
yield numpy_inputs[i:i + batch_size, ...]
i = (i + batch_size) % total_batches
def manually_compute_losses(numpy_inputs, inputs_placeholder, loss, num_workers,
params):
"""Manually compute the losses each worker should report in tf_cnn_benchmarks.
This function essentially simulates tf_cnn_benchmarks, computing what the loss
of each worker should be. The caller should create a model, that takes in
images from `inputs_placeholder`, a tf.placeholder, and computes `loss`.
This function, and all ops passed to this function, must be run under a
tf.device('cpu:0') context manager.
Non-SGD optimizers are not supported with multiple workers.
Args:
numpy_inputs: A Numpy array to use as the input images.
inputs_placeholder: A tf.placeholder tensor, where input images can be fed
into.
loss: A scalar tensor representing the loss of the model, which is obtained
from the input images in inputs_placeholder.
num_workers: How many workers should be simulated.
params: Params tuple. This doesn't have to have information about the
distributed cluster, such as --num_workers, as num_workers is passed in
separately.
Returns:
A list of list of losses. return_value[i][j] is the loss of the ith worker
after the jth step.
"""
batch_size = params.batch_size * params.num_gpus
assert numpy_inputs.shape[0] % (num_workers * batch_size) == 0
l2_loss = tf.add_n([tf.nn.l2_loss(x) for x in tf.trainable_variables()])
total_loss = loss + params.weight_decay * l2_loss
reported_loss = (loss if params.loss_type_to_report == 'base_loss'
else total_loss)
gradient_multiplier = 1
if params.variable_update in ('replicated', 'distributed_all_reduce'):
# In certain variable updates, tf_cnn_benchmarks add the gradients of the
# GPUs instead of taking their mean, making the gradients effectively
# params.num_gpu times higher.
# TODO(b/62722498): Make all variable updates consistent.
gradient_multiplier = params.num_gpus
opt = benchmark_cnn.get_optimizer(params, params.init_learning_rate)
grad_vars = opt.compute_gradients(
total_loss, grad_loss=tf.constant(gradient_multiplier, dtype=tf.float32))
grads = [g for g, _ in grad_vars]
# We apply gradients from a placeholder. That way, we can first compute the
# gradients from each worker, then afterwards apply them one by one by feeding
# them into the placeholder.
placeholder_grad_vars = [(tf.placeholder(g.dtype, g.shape), v)
for g, v in grad_vars]
placeholder_grads = [g for g, _ in placeholder_grad_vars]
apply_grads_op = opt.apply_gradients(placeholder_grad_vars)
batch_iterators = [_worker_batches_in_numpy_array(numpy_inputs, batch_size,
shift_ratio=i / num_workers)
for i in range(num_workers)]
# Set the GPU count to 0, to avoid taking all the GPU memory. Unfortunately,
# doing so still takes up about ~1GB for some reason.
with tf.Session(config=tf.ConfigProto(device_count={'GPU': 0})) as sess:
sess.run(tf.global_variables_initializer())
losses = [[] for _ in range(num_workers)]
for i in range(params.num_batches):
computed_grads = []
for j in range(num_workers):
batch_feed = next(batch_iterators[j])
batch_feed = batch_feed / 127.5 - 1
worker_loss, worker_grads = sess.run((reported_loss, grads),
{inputs_placeholder: batch_feed})
losses[j].append(worker_loss)
computed_grads.append(worker_grads)
for worker_grads in computed_grads:
# TODO(reedwm): With multiple workers, applying the gradients
# sequentially per worker is not equivalent to what tf_cnn_benchmarks
# does when the optmizer is not SGD. Therefore, this currently does not
# work currently when num_workers > 1 and params.optimizer != 'sgd'.
feed_dict = dict(zip(placeholder_grads, worker_grads))
sess.run(apply_grads_op, feed_dict)
return losses
class TestCNNModel(model.CNNModel):
"""A simple model used for testing.
The input is a 1-channel 1x1 image, consisting of a single number. The model
has two scalar variables: A and B, initialized to 1 and 2 respectively. Given
an image x, the loss is defined as:
loss = x * A * B
"""
def __init__(self):
super(TestCNNModel, self).__init__(
'test_cnn_model', image_size=1, batch_size=1, learning_rate=1)
VAR_A_INITIAL_VALUE = 1.
VAR_B_INITIAL_VALUE = 2.
def add_inference(self, cnn):
# This model only supports 1x1 images with 1 channel
assert cnn.top_layer.shape[1:] == (1, 1, 1)
# Multiply by variable A.
with tf.name_scope('mult_by_var_A'):
cnn.conv(1, 1, 1, 1, 1, use_batch_norm=None, activation=None, bias=None,
kernel_initializer=tf.constant_initializer(
self.VAR_A_INITIAL_VALUE))
# Multiply by variable B.
with tf.name_scope('mult_by_var_B'):
cnn.conv(1, 1, 1, 1, 1, use_batch_norm=None, activation=None, bias=None,
kernel_initializer=tf.constant_initializer(
self.VAR_B_INITIAL_VALUE))
with tf.name_scope('reshape_to_scalar'):
cnn.reshape([-1, 1])
def skip_final_affine_layer(self):
return True
def loss_function(self, build_network_result, labels):
del labels
return tf.reduce_mean(build_network_result.logits)
def manually_compute_losses(self, inputs, num_workers, params):
with tf.Graph().as_default(), tf.device('/cpu:0'):
a = tf.Variable(self.VAR_A_INITIAL_VALUE, name='A')
b = tf.Variable(self.VAR_B_INITIAL_VALUE, name='B')
inputs_placeholder = tf.placeholder(tf.float32,
(None, 1, 1, 1),
name='inputs_placeholder')
inputs_reshaped = tf.reshape(inputs_placeholder, (-1, 1))
loss = self.loss_function(
model.BuildNetworkResult(logits=inputs_reshaped * a * b,
extra_info=None),
None)
return manually_compute_losses(inputs, inputs_placeholder, loss,
num_workers, params)
class TestDataSet(datasets.ImageDataset):
"""A Dataset consisting of 1x1 images with a depth of 1."""
def __init__(self, height=1, width=1, depth=1):
super(TestDataSet, self).__init__('test_dataset', height=height,
width=width, depth=depth, data_dir=None,
queue_runner_required=True, num_classes=1)
def num_examples_per_epoch(self, subset='train'):
del subset
return 1
def get_input_preprocessor(self, input_preprocessor='default'):
return preprocessing.TestImagePreprocessor
def use_synthetic_gpu_inputs(self):
return False
| StarcoderdataPython |
3393935 | # Hack 1: InfoDB lists. Build your own/personalized InfoDb with a list length > 3, create list within a list as illustrated with Owns_Cars
InfoDb = []
# List with dictionary records placed in a list
InfoDb.append({
"FirstName": "Ben",
"LastName": "Holland",
"Residence": "San Diego",
"Owns_Shoes":["Vans", "Timberland", "Converse"]
})
InfoDb.append({
"FirstName": "Jayson",
"LastName": "Borg",
"Residence": "San Diego",
"Owns_Shoes":["Timberland","Nike","Vans", "Converse"]
})
InfoDb.append({
"FirstName": "Anthony",
"LastName": "Pacheco",
"Residence": "San Diego",
"Owns_Shoes":["Vans","Nike"]
})
InfoDb.append({
"FirstName": "Ben",
"LastName": "Shamloufard",
"Residence": "San Diego",
"Owns_Shoes":["Vans","Nike","Puma"]
})
InfoDb.append({
"FirstName": "Kian",
"LastName": "Pasokhi",
"Residence": "San Diego",
"Owns_Shoes":["Converse","Reebok", "Nike", "Vans"]
})
InfoDb.append({
"FirstName": "Daniel",
"LastName": "Bertino",
"Residence": "San Diego",
"DOB": "March 14",
"Age": "18",
"Sports": ["Basketball","Soccer"],
"Fav_Food": "Pizza",
"School": "DNHS",
"Subjects": ["cs","stats","literature","econ"],
"Owns_Shoes":["Addidas","Puma","Vans"]
})
def print_data(n):
print(InfoDb[n]["FirstName"], InfoDb[n]["LastName"]) # using comma puts space between values
print("\t", "Shoes: ", end="") # \t is a tab indent, end="" make sure no return occurs
print(", ".join(InfoDb[n]["Owns_Shoes"])) # join allows printing a string list with separator
print()
# Hack 2: InfoDB loops. Print values from the lists using three different ways: for, while, recursion
## hack 2a: def for_loop()
## hack 2b: def while_loop(0)
## hack 2c : def recursive_loop(0)
# for loop iterates on length of InfoDb
def for_loop():
print("=" * 25)
print("For loop")
print("-" * 25)
for n in range(len(InfoDb)):
print_data(n)
# while loop contains an initial n and an index incrementing statement (n += 1)
def while_loop(n):
print("=" * 25)
print("While loop")
print("-" * 25)
while n < len(InfoDb):
print_data(n)
n += 1
return
def while_loop_run():
while_loop(0)
# recursion simulates loop incrementing on each call (n + 1) until exit condition is met
def recursive_loop(n):
if n < len(InfoDb):
print_data(n)
recursive_loop(n + 1)
return # exit condition
def recursive_loop_run():
print("=" * 25)
print("Recursive loop")
print("-" * 25)
recursive_loop(0)
print("=" * 25)
def list_finder():
num = int(input("Which index do you want to search (0-" + str(len(InfoDb)-1) +"): "))
print("-" * 25)
try:
# Prints all info of the given index
print(InfoDb[num]["FirstName"] + " "+ InfoDb[num]["LastName"])
print("Residence: " + InfoDb[num]["Residence"])
print("Owns Shoes: ")
for i in range (0, len(InfoDb[num]["Owns_Shoes"])):
print(" - " + InfoDb[num]["Owns_Shoes"][i])
except:
# Prints this if the index is not in the list
print("Invalid index given.")
def tester():
for_loop()
while_loop(0) # requires initial index to start while
recursive_loop_run(0) # requires initial index to start recursion
list_finder()
# this only applies if the file is run as main
if __name__ == "__main__":
tester() | StarcoderdataPython |
366653 | from src.interface import Coord
from src.sons import Sons
from src.jeu import Jeu
def listeJoueursTri(listeJoueurs):
"""Retourne la liste des joueurs du plus avancé en km au moins avancé."""
listeTri = []
for joueur in listeJoueurs[::-1]:
i = 0
while len(listeTri) > i and listeTri[i].km >= joueur.km:
i += 1
listeTri.insert(i, joueur)
return listeTri
class Joueur(object):
"""Crée un joueur de la partie."""
def __init__(self, nom, orientation, points, pioche):
"""Initialise un joueur."""
self.nom = nom
self.orientation = orientation
self.km = 0
self.points = points
self.main = []
self.distribueJoueur(pioche)
self.cartesEmplacements = [['feu_rouge'], [], [], [], [], [], []]
self.bloque = True
self.limite = False
self.bottes = []
if orientation == 'sud':
self.emplacements_x = 350
self.emplacements_y = Coord.length - 310
self.emplacements_width = 110 * 7
self.emplacements_length = 150
self.rotation = False
self.coord_nom = (int(Coord.width / 2) - 50, Coord.length - 340)
elif orientation == 'nord':
self.emplacements_x = 350
self.emplacements_y = 50
self.emplacements_width = 110 * 7
self.emplacements_length = 150
self.rotation = False
self.coord_nom = (int(Coord.width / 2) - 50, 220)
elif orientation == 'ouest':
self.emplacements_x = 50
self.emplacements_y = 20
self.emplacements_width = 150
self.emplacements_length = 110 * 7
self.rotation = True
self.coord_nom = (240, int(Coord.length / 2))
elif orientation == 'est':
self.emplacements_x = Coord.width - 200
self.emplacements_y = 20
self.emplacements_width = 150
self.emplacements_length = 110 * 7
self.rotation = True
self.coord_nom = (Coord.width - 400, int(Coord.length / 2))
else:
print('Erreur : mauvaise orientation.')
def trieMain(self):
"""
Trie la main prise en paramètre en plaçant de gauche à droite
les kms, les attaques, les parades puis les bottes."""
kmMain, attaquesMain, paradesMain, bottesMain = [], [], [], []
for carte in self.main:
if carte in Jeu.listeKM:
i = 0
while len(kmMain) > i and int(kmMain[i]) >= int(carte):
i += 1
kmMain.insert(i, carte)
elif carte in Jeu.listeAttaques:
if carte in attaquesMain:
attaquesMain.insert(attaquesMain.index(carte), carte)
else:
attaquesMain.append(carte)
elif carte in Jeu.listeParades:
if carte in paradesMain:
paradesMain.insert(paradesMain.index(carte), carte)
else:
paradesMain.append(carte)
elif carte in Jeu.listeBottes:
bottesMain.append(carte)
self.main = kmMain + attaquesMain + paradesMain + bottesMain
def distribueJoueur(self, pioche):
"""Distribue 6 cartes au joueur au début de la partie."""
for _ in range(6):
self.main.append(pioche.pop(0))
self.trieMain()
def joue(self, listeJoueurs, pioche, pot):
"""Fais joueur un joueur automatiquement."""
# Pioche une carte
self.main.append(pioche.pop(0))
# Choisit l'action
if not self.bloque:
for i in Jeu.listeKM:
# Conditions : vérifie si <= 50 si limité,
# ne dépasse pas 1000 et 2 * 200 km max
if (i in self.main and (not self.limite or int(i) <= 50)
and int(i) + self.km <= 1000
and (i != '200'
or len(self.cartesEmplacements[2]) < 2)):
# Pose des km
self.cartesEmplacements[
Jeu.nomsEmplacements.index(i)].append(
self.main.pop(self.main.index(i)))
self.km += int(i)
if i == '200':
Sons.woohooSound.play()
return self.nom + " a posé " + i + " km.", False
else:
if (Jeu.dicoAttaquesBottes[self.cartesEmplacements[0][-1]]
in self.main):
# Contre l'attaque avec une botte
self.bottes.append(self.main.pop(
self.main.index(
Jeu.dicoAttaquesBottes[
self.cartesEmplacements[0][-1]])))
pot.append(self.cartesEmplacements[0].pop(-1))
self.bloque = False
if self.bottes[-1] == 'vehicule_prioritaire':
Sons.sireneSound.play()
if self.limite:
# Cas de coup fourré avec feu rouge + limite
pot.append(self.cartesEmplacements[1].pop(-1))
self.limite = False
else:
Sons.winningSound.play()
self.points += 400
return ''.join([
self.nom,
" a posé la botte : ",
Jeu.dicoNomsCartes[self.bottes[-1]],
" ! 400 points"]), True
elif (Jeu.dicoAttaquesParades[self.cartesEmplacements[0][-1]]
in self.main):
# Contre l'attaque avec une parade
self.cartesEmplacements[0].append(
self.main.pop(self.main.index(
Jeu.dicoAttaquesParades[
self.cartesEmplacements[0][-1]])))
self.bloque = False
if self.cartesEmplacements[0][-1] == 'feu_vert':
Sons.startSound.play()
return ''.join([
self.nom,
" a contré l'attaque : ",
Jeu.dicoNomsCartes[self.cartesEmplacements[0][-1]],
"."]), False
if (self.limite
and Jeu.dicoAttaquesBottes[self.cartesEmplacements[1][-1]]
in self.main):
# Contre la limite de vitesse avec une botte
self.bottes.append(self.main.pop(
self.main.index(
Jeu.dicoAttaquesBottes[
self.cartesEmplacements[1][-1]])))
pot.append(self.cartesEmplacements[1].pop(-1))
self.limite = False
Sons.sireneSound.play()
self.points += 400
return ''.join([
self.nom,
" a posé la botte : ",
Jeu.dicoNomsCartes[self.bottes[-1]],
" ! 400 points"]), True
elif (self.limite
and Jeu.dicoAttaquesParades[self.cartesEmplacements[1][-1]]
in self.main):
# Contre la limite de vitesse avec une fin de limite
self.cartesEmplacements[1].append(
self.main.pop(self.main.index(
Jeu.dicoAttaquesParades[
self.cartesEmplacements[1][-1]])))
self.limite = False
return self.nom + " a contré la limite de vitesse.", False
# Regarde si il peut attaquer
for i in Jeu.listeAttaques:
if i in self.main:
for joueur in listeJoueursTri(listeJoueurs):
if joueur.orientation != self.orientation:
if i != 'limite_de_vitesse':
if ((len(joueur.cartesEmplacements[0]) == 0
or joueur.cartesEmplacements[0][-1]
in Jeu.listeParades)
and Jeu.dicoAttaquesBottes[i]
not in joueur.bottes):
# Pose une attaque
joueur.cartesEmplacements[0].append(
self.main.pop(self.main.index(i)))
joueur.bloque = True
if (joueur.cartesEmplacements[0][-1]
== 'accident'):
Sons.crashSound.play()
elif (joueur.cartesEmplacements[0][-1]
== 'panne_essence'):
Sons.panneSound.play(maxtime=3000)
return ''.join([
self.nom,
" a attaqué ",
joueur.nom,
" : ",
Jeu.dicoNomsCartes[
joueur.cartesEmplacements[0][-1]],
"."]), False
else:
if ((len(joueur.cartesEmplacements[1]) == 0
or joueur.cartesEmplacements[1][-1]
in Jeu.listeParades)
and Jeu.dicoAttaquesBottes[i]
not in joueur.bottes):
# Pose une limitation
joueur.cartesEmplacements[1].append(
self.main.pop(self.main.index(i)))
joueur.limite = True
Sons.hornSound.play()
return ''.join([
self.nom,
" a attaqué ",
joueur.nom,
" : ",
Jeu.dicoNomsCartes[
joueur.cartesEmplacements[1][-1]],
"."]), False
# Pose une botte (pas de coup fourré)
for i in Jeu.listeBottes:
if i in self.main:
self.bottes.append(self.main.pop(self.main.index(i)))
if i == 'vehicule_prioritaire':
Sons.sireneSound.play()
else:
Sons.winningSound.play()
self.points += 100
return ''.join([
self.nom,
" a posé la botte : ",
Jeu.dicoNomsCartes[i],
" ! 100 points"]), True
# Doit jeter une carte
for i in self.main:
if (i in Jeu.listeParades
and Jeu.dicoParadesBottes[i] in self.bottes):
# Jette une parade dont le joueur a la botte correspondante
pot.append(self.main.pop(self.main.index(i)))
return ''.join([
self.nom,
" a jeté la carte : ",
Jeu.dicoNomsCartes[i],
"."]), False
listeOrdreAJetter = (['25', '50'] + Jeu.listeParades + ['75', '100']
+ Jeu.listeAttaques + ['200'])
for i in listeOrdreAJetter:
if i in self.main:
# Jette une carte
pot.append(self.main.pop(self.main.index(i)))
return ''.join([
self.nom,
" a jeté la carte : ",
Jeu.dicoNomsCartes[i],
"."]), False
return '', False
def phaseCritique(self):
"""
Fais jouer un joueur quand il n'y a plus de pioche.
"""
if not self.bloque:
for i in Jeu.listeKM:
if (i in self.main and (not self.limite or int(i) <= 50)
and int(i) + self.km <= 1000
and (i != '200'
or len(self.cartesEmplacements[2]) < 2)):
# Pose des km
self.cartesEmplacements[
Jeu.nomsEmplacements.index(i)].append(
self.main.pop(self.main.index(i)))
self.km += int(i)
if i == '200':
Sons.woohooSound.play()
return self.nom + " a posé " + i + " km."
# Le joueur est bloqué
self.bloque = True
return self.nom + " est bloqué."
| StarcoderdataPython |
8138918 | def load_file_lines (file):
""" Loads a file as a list of lines.
Args:
file (str): The path of the file.
Returns:
list of str: A list of lines in the file.
"""
data = []
with open(file, mode='r') as target:
for line in target:
data.append(line.rstrip('\n'))
return data
def load_float_file (file):
""" Loads a data file of newline-delimited floating-point values.
Args:
file (str): The path of the file.
Returns:
list of float: The data from the file.
"""
data = []
with open(file, 'r') as target:
for entry in target:
data.append(float(entry))
return data
| StarcoderdataPython |
246735 | __author__ = "<NAME>, <NAME>"
__copyright__ = None
__credits__ = ["<NAME>", "Rosettacommons"]
__license__ = "MIT"
__version__ = "0.1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Prototype"
# Python standard library
import argparse, binascii, bz2, collections, json, os, sys
from typing import * # TODO explicit imports
# 3rd party library imports
# Rosetta library imports
import pyrosetta
from pyrosetta.distributed import cluster
import pyrosetta.distributed.io as io
from pyrosetta.distributed.packed_pose.core import PackedPose
from pyrosetta.distributed import maybe_init, requires_init
"""
This script wraps a pyrosetta function and manages outputs in a manner similar to
pyrosetta distributed. It takes three required arguments:
-k or --kwargs: a JSON-formatted string as the single argument on the command line,
read as a dict of keyword arguments.
-f or --function: the name of the function to be wrapped.
-d or --directory: the directory, the absolute path to the .py file, of the function.
This can be an empty string, in which case the script will just try to import the
function from the current working directory and installed libraries.
"""
parser = argparse.ArgumentParser(
description="Use to distribute a pyrosetta function on a distributed system."
)
# required arguments
parser.add_argument("-k", "--kwargs", type=str, default="", required=True)
# flags = "-out:level 300 -corrections::beta_nov16 true -holes:dalphaball /home/bcov/ppi/tutorial_build/main/source/external/DAlpahBall/DAlphaBall.gcc -indexed_structure_store:fragment_store /net/databases/VALL_clustered/connect_chains/ss_grouped_vall_helix_shortLoop.h5"
def main():
if len(sys.argv) == 1:
parser.print_help()
else:
pass
params = vars(parser.parse_args(sys.argv[1:]))
print("Run will proceed with the following kwargs:")
print(params)
pyrosetta.distributed.maybe_init(**params)
handle = str(binascii.b2a_hex(os.urandom(24)).decode("utf-8"))
ppose = detail_design(None, **detail_kwargs)
if ppose is not None:
pose = io.to_pose(ppose)
pdbstring = io.to_pdbstring(pose)
remark = "REMARK PyRosettaCluster: "
scores_dict = collections.OrderedDict(sorted(pose.scores.items()))
pdbfile_data = json.dumps(
{
"instance": {},
"metadata": {},
"scores": scores_dict,
}
)
pdbstring_data = pdbstring + os.linesep + remark + pdbfile_data
output_file = f"{handle}.pdb.bz2"
with open(output_file, "wb") as f:
f.write(bz2.compress(str.encode(pdbstring_data)))
with open(f"{handle}.json", "w+") as f:
print(json.dumps(dict(pose.scores)), file=f)
if __name__ == "__main__":
main()
| StarcoderdataPython |
5127235 | <reponame>abhishekagarwalnpl/QHAL-copy
from typing import Dict, Tuple
import numpy as np
from . import command_unpacker, string_to_opcode
from ..quantum_simulators import IQuantumSimulator
class HALMetadata:
"""Class for storing HAL metadata items in pre-defined form.
"""
def __init__(
self,
num_qubits: int = 0,
max_depth: int = 0,
native_gates: Dict[int, Tuple[int, np.array]] = {},
connectivity: np.array = np.array([])
):
def _error_raiser(metadata_item: str) -> None:
raise ValueError(
f"Metadata item {metadata_item} inconsistent with other items!"
)
self.num_qubits = num_qubits
if max_depth > 0 and num_qubits == 0:
_error_raiser("max_depth")
else:
self.max_depth = max_depth
self.connectivity = connectivity if \
connectivity.shape[0] == num_qubits \
else _error_raiser("connectivity")
self.native_gates = native_gates if \
all([
mat.shape[0] <= num_qubits for mat in
[t[1] for t in native_gates.values()]
]) \
else _error_raiser("native_gates")
class HardwareAbstractionLayer:
"""Encapsulates a process which receives HAL commands and uses them to
perform operations on a quantum device.
Parameters
----------
quantum_simulator : IQuantumSimulator
Object with the IQuantumSimulator interface that accepts commands
and returns measurement results.
hal_metadata : HALMetadata
Object that holds a series of metadata items using a pre-defined
structure.
"""
def __init__(
self,
quantum_simulator: IQuantumSimulator,
hal_metadata: HALMetadata
):
self._quantum_simulator = quantum_simulator
# set up some of the metadata in correct format
self._hal_metadata = hal_metadata
self._encoded_metadata = {}
self._final_mask = (1 << 60)
self._encoded_metadata["NUM_QUBITS"] = \
(1 << 61) + self._hal_metadata.num_qubits
self._encoded_metadata["MAX_DEPTH"] = \
(2 << 61) + self._hal_metadata.max_depth
native_gates = {}
for i, (gate, gate_data) in enumerate(hal_metadata.native_gates.items()):
native_gates[i] = []
native_gates[i].append(
(3 << 61) +
(i << 57) +
(string_to_opcode(gate).code << 45) +
gate_data[0]
)
self._encoded_metadata["NATIVE_GATES"] = native_gates
# useful state flags
self._metadata_index = 0 # keep track of previously sent data chunk
self._previous_metadata_request_index = 0 # previous metadata request index
def accept_command(self, hal_command: np.uint64) -> np.uint64:
"""Interface for ``quantum_simulator.accept_command``.
Parameters
----------
command : uint64
The HAL command to deconstruct and use to perform actions.
Returns
-------
uint64
Result of a measurement command or metadata request.
NOTE: Metadata requests are designed to be streamed back as a
series of 64-bit data chunks, while the caller waits for a flag
that specifies the final chunk has been sent.
Since this is a Python implementation we can't stream back
multiple returns from a single function call, so the caller must
simulate receiving the stream by sending multiple metadata request
calls until the "final" flag is receieved.
"""
# check if we've receieved a metadata request
opcode, _, param, idx = command_unpacker(hal_command)
if opcode == "REQUEST_METADATA":
# reset the internal counter for streaming back data
if param[0] != self._previous_metadata_request_index:
self._metadata_index == 0
if param[0] == 1: # num_qubits request
return self._encoded_metadata["NUM_QUBITS"] + self._final_mask
elif param[0] == 2: # max depth request
return self._encoded_metadata["MAX_DEPTH"] + self._final_mask
elif param[0] == 3: # native gate request
self._previous_metadata_request_index = param[0]
if len(self._encoded_metadata["NATIVE_GATES"]) == 0:
return (3 << 61) + self._final_mask
gate_list = [
i[0] for i in list(
self._encoded_metadata["NATIVE_GATES"].values()
)
]
data = gate_list[self._metadata_index]
self._metadata_index += 1
if self._metadata_index == len(gate_list):
data = data + self._final_mask # add final flag
self._metadata_index = 0
return data
elif param[0] == 4: # connectivity matrix request
def encode_connectivity_mat(upper_mat_array, row_index=None):
# get all non-zero off-diagonal indexes
row_col_indexes = np.transpose(np.nonzero(upper_mat_array))
encoded_metadata = []
encoded_indexes = 0
count = 2
for i, row_col in enumerate(row_col_indexes):
if len(row_col) > 1:
indexes = ((row_col[0] << 10) + row_col[1])
else:
indexes = ((row_index << 10) + row_col[0])
encoded_indexes += indexes << (count * 20)
count -= 1
if count == -1 or i == len(row_col_indexes) - 1:
encoded_metadata.append(
int(encoded_indexes) | (4 << 61)
)
encoded_indexes = 0
count = 2
return encoded_metadata
if len(self._hal_metadata.connectivity) == 0:
return (4 << 61) + self._final_mask
upper_mat_array = np.triu(self._hal_metadata.connectivity, 1)
# are we requesting a single row?
if param[1] >> 15:
row_index = idx[0] + idx[1]
upper_mat_array = upper_mat_array[row_index]
# build 64-bit encoded response
encoded_list = encode_connectivity_mat(
upper_mat_array, row_index
)
else: # request the whole matrix
# keep internal store so we dont construct every time
if "CONNECTIVITY" not in self._encoded_metadata:
# build 64-bit encoded response
self._encoded_metadata["CONNECTIVITY"] = \
encode_connectivity_mat(upper_mat_array)
encoded_list = self._encoded_metadata["CONNECTIVITY"]
self._previous_metadata_request_index = param[0]
data = encoded_list[self._metadata_index]
self._metadata_index += 1
if self._metadata_index == len(encoded_list):
data = data + self._final_mask # add final flag
self._metadata_index = 0
return int(data)
elif param[0] == 5: # error rate matrix request
def encode_error_mat(error_rate_matrix):
# build up 64-bit encoded response
encoded_metadata = []
encoded_error_rates = 0
count = 3
for i, error_rate in enumerate(error_rate_matrix):
# encode the error rate (mantissa, exp)
exp = -1
while error_rate - int(error_rate) != 0:
if error_rate < 1:
exp += 1
error_rate = float(f'{error_rate:.3g}') * 10
encoded_error_rate = (int(error_rate) << 4) + exp
encoded_error_rates += \
int(encoded_error_rate) << (count * 14)
count -= 1
if count == -1 or i == len(error_rate_matrix) - 1:
encoded_metadata.append(
(5 << 61) | int(encoded_error_rates)
)
encoded_error_rates = 0
count = 3
return encoded_metadata
if len(self._encoded_metadata["NATIVE_GATES"]) == 0:
return (5 << 61) + self._final_mask
gate_index = param[1] >> 13
diagonal = False
error_rate_matrix = self._hal_metadata.native_gates[
list(self._hal_metadata.native_gates.keys())[gate_index]
][1]
# are we requesting a single row?
if (param[1] >> 12) & 1:
row_index = idx[0] + idx[1]
# set up data to be encoded
if len(error_rate_matrix.shape) > 1: # 1- or 2-qubit gate?
mat_upper = np.triu(error_rate_matrix)
mat_lower = np.tril(error_rate_matrix)
new_mat = np.concatenate(
(
mat_upper[row_index],
np.transpose(mat_lower)[row_index]
)
)
c = np.nonzero(new_mat)
error_rate_matrix = new_mat[c]
else:
error_rate_matrix = [error_rate_matrix[row_index]]
diagonal = True
# build 64-bit encoded response
gate_data_list = encode_error_mat(error_rate_matrix)
else: # return thr whole matrix
gate_data_list = self._encoded_metadata["NATIVE_GATES"][
gate_index
][1:]
# if there is no encoded data yet
# keep internal store so we dont construct every time
if len(gate_data_list) == 0:
# 1- or 2-qubit gate?
if len(error_rate_matrix.shape) > 1:
mat_upper = np.triu(error_rate_matrix)
mat_lower = np.tril(error_rate_matrix)
new_mat = np.concatenate(
(mat_upper, np.transpose(mat_lower)),
axis=1
)
r, c = np.nonzero(new_mat)
error_rate_matrix = new_mat[r, c]
else:
diagonal = True
# build 64-bit encoded response
gate_data_list.extend(
encode_error_mat(error_rate_matrix)
)
self._previous_metadata_request_index = param[0]
data = gate_data_list[self._metadata_index]
data = data + (diagonal << 59) # add diagonal flag
data = data + (gate_index << 56) # add gate index
if self._metadata_index == len(gate_data_list) - 1:
data = data + self._final_mask # add final flag
self._metadata_index = 0
else:
self._metadata_index += 1
return int(data)
else:
return self._quantum_simulator.accept_command(hal_command)
| StarcoderdataPython |
4801249 | <filename>apps/netsuite/helpers.py
from datetime import datetime, timezone
import logging
from django.utils.module_loading import import_string
from rest_framework.exceptions import AuthenticationFailed
from apps.workspaces.models import Configuration, Workspace, NetSuiteCredentials
from .tasks import schedule_vendor_payment_creation, schedule_netsuite_objects_status_sync, \
schedule_reimbursements_sync
logger = logging.getLogger(__name__)
def schedule_payment_sync(configuration: Configuration):
"""
:param configuration: Workspace Configuration Intance
:return: None
"""
schedule_vendor_payment_creation(
sync_fyle_to_netsuite_payments=configuration.sync_fyle_to_netsuite_payments,
workspace_id=configuration.workspace_id
)
schedule_netsuite_objects_status_sync(
sync_netsuite_to_fyle_payments=configuration.sync_netsuite_to_fyle_payments,
workspace_id=configuration.workspace_id
)
schedule_reimbursements_sync(
sync_netsuite_to_fyle_payments=configuration.sync_netsuite_to_fyle_payments,
workspace_id=configuration.workspace_id
)
def check_interval_and_sync_dimension(workspace: Workspace, netsuite_credentials: NetSuiteCredentials) -> bool:
"""
Check sync interval and sync dimension
:param workspace: Workspace Instance
:param netsuite_credentials: NetSuiteCredentials Instance
return: True/False based on sync
"""
if workspace.destination_synced_at:
time_interval = datetime.now(timezone.utc) - workspace.source_synced_at
if workspace.destination_synced_at is None or time_interval.days > 0:
sync_dimensions(netsuite_credentials, workspace.id)
return True
return False
def sync_dimensions(ns_credentials: NetSuiteCredentials, workspace_id: int, dimensions: list = []) -> None:
netsuite_connection = import_string('apps.netsuite.connector.NetSuiteConnector')(ns_credentials, workspace_id)
if not dimensions:
dimensions = [
'expense_categories', 'locations', 'vendors', 'currencies', 'classifications',
'departments', 'employees', 'accounts', 'custom_segments', 'projects', 'customers', 'tax_items'
]
for dimension in dimensions:
try:
sync = getattr(netsuite_connection, 'sync_{}'.format(dimension))
sync()
except Exception as exception:
logger.exception(exception)
| StarcoderdataPython |
56327 | from subprocess import Popen, PIPE
import sys
import os
def osascript(scpt):
p = Popen(['osascript', '-'], stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate(scpt.encode('utf-8'))
return stdout, stderr
def openTab():
script = f"""
tell application "System Events"
tell process "Terminal" to keystroke "t" using command down
end
tell application "Terminal"
activate
do script with command "cd {os.getcwd()}" in window 1
end tell
"""
stdout, stderr = osascript(script)
if stderr:
sys.stderr.write('Error in Applescript: {}\n'.format(stderr)) | StarcoderdataPython |
3245630 | import json
from adapters.base_adapter import Adapter
from devices.switch.on_off_switch import OnOffSwitch
class TS0012(Adapter):
def __init__(self):
super().__init__()
self.devices.append(OnOffSwitch('left', 'state_left'))
self.devices.append(OnOffSwitch('right', 'state_right'))
def handle_command(self, alias, device, command, level, color):
return {
'topic': '/'.join([self.name, alias, 'set']),
'payload': json.dumps({
"state": command.upper()
})
}
| StarcoderdataPython |
3280755 | from flask import Flask, jsonify, render_template, request
app = Flask(__name__)
# ----------------------------------------------------------------------------------------------------------------------
@app.route('/')
def index():
return render_template('base.html')
# ----------------------------------------------------------------------------------------------------------------------
@app.route('/ping', methods=['GET', 'POST'])
def ping():
return jsonify('pong')
# # ----------------------------------------------------------------------------------------------------------------------
# @app.route('/add', methods=['GET', 'POST'])
# def add():
# a = request.values.get('a', 0, type=float)
# b = request.values.get('b', 0, type=float)
# return jsonify(res = a + b)
# ----------------------------------------------------------------------------------------------------------------------
@app.route('/sim01', methods=['GET', 'POST'])
def sim01():
from pram.data import GroupSizeProbe, ProbeMsgMode
from pram.entity import Site
from pram.rule import GoToAndBackTimeAtRule, ResetSchoolDayRule, TimePoint
from pram.sim import Simulation
sites = { s:Site(s) for s in ['home', 'school-a', 'school-b']}
probe_grp_size_site = GroupSizeProbe.by_rel('site', Site.AT, sites.values(), msg_mode=ProbeMsgMode.CUMUL)
(Simulation().
add().
rule(ResetSchoolDayRule(TimePoint(7))).
rule(GoToAndBackTimeAtRule(t_at_attr='t@school')).
probe(probe_grp_size_site).
commit().
new_group(500).
set_rel(Site.AT, sites['home']).
set_rel('home', sites['home']).
set_rel('school', sites['school-a']).
commit().
new_group(500).
set_rel(Site.AT, sites['home']).
set_rel('home', sites['home']).
set_rel('school', sites['school-b']).
commit().
run(18)
)
return probe_grp_size_site.get_msg()
| StarcoderdataPython |
5085656 | import asyncio
from asgiref import sync
from . import pool
def gentask(corofunc):
def wrapper(*args, **kwargs):
coro = corofunc(*args, **kwargs)
return asyncio.create_task(coro)
return wrapper
def to_async(callback, as_task=True):
corofunc = sync.sync_to_async(callback)
if as_task and pool.pool:
corofunc = gentask(corofunc)
return corofunc
| StarcoderdataPython |
9793468 | <filename>commercialoperator/migrations/0033_auto_20200402_1732.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2020-04-02 09:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('commercialoperator', '0032_auto_20200402_0908'),
]
operations = [
migrations.AlterField(
model_name='proposalfilmingactivity',
name='sponsorship',
field=models.CharField(blank=True, choices=[('yes', 'Yes'), ('no', 'No'), ('other', 'other')], max_length=40, null=True, verbose_name='Sponsorship Type'),
),
]
| StarcoderdataPython |
185045 | import unittest
import sympy
from means.approximation.mea.eq_central_moments import eq_central_moments
from means.core import Moment
from means.util.sympyhelpers import to_sympy_matrix, assert_sympy_expressions_equal
class CentralMomentsTestCase(unittest.TestCase):
def test_centralmoments_using_p53model(self):
"""
Given the p53 model hard codded bellow,the result of central moment should match exactly the expected one
:return:
"""
counter_nvecs = [[0, 0, 0], [0, 0, 2], [0, 1, 1], [0, 2, 0], [1, 0, 1], [1, 1, 0], [2, 0, 0]]
mcounter_nvecs = [[0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0], [0, 0, 2], [0, 1, 1], [0, 2, 0],
[1, 0, 1], [1, 1, 0], [2, 0, 0]]
counter = [Moment(c,sympy.Symbol("YU{0}".format(i))) for i,c in enumerate(counter_nvecs)]
mcounter = [Moment(c,sympy.Symbol("y_{0}".format(i))) for i,c in enumerate(mcounter_nvecs)]
m = to_sympy_matrix([
['c_0 - c_1*y_0 - c_2*y_0*y_2/(c_6 + y_0)',
0,
0,
0,
'c_2*y_0/(c_6 + y_0)**2 - c_2/(c_6 + y_0)',
0,
'-c_2*y_0*y_2/(c_6 + y_0)**3 + c_2*y_2/(c_6 + y_0)**2'],
[
'c_3*y_0 - c_4*y_1',
0,
0,
0,
0,
0,
0],
[
'c_4*y_1 - c_5*y_2',
0,
0,
0,
0,
0,
0
]])
species = to_sympy_matrix(['y_0', 'y_1', 'y_2'])
propensities = to_sympy_matrix(['c_0',
'c_1 * y_0',
'c_2*y_0*y_2/(c_6 + y_0)',
'c_3*y_0',
'c_4*y_1',
'c_5*y_2'])
stoichiometry_matrix = to_sympy_matrix([[1, -1, -1, 0, 0, 0],
[0, 0, 0, 1, -1, 0],
[0, 0, 0, 0, 1, -1]])
answer = eq_central_moments(counter, mcounter, m, species, propensities, stoichiometry_matrix, 2)
expected = to_sympy_matrix([
[" 2*c_4*y_1*y_2 + c_4*y_1 - 2*c_5*y_2**2 + c_5*y_2 - 2*y_1*(c_4*y_1 - c_5*y_2)"," -2*c_5"," 2*c_4"," 0"," 0"," 0"," 0"],
["c_3*y_0*y_2 + c_4*y_1**2 - c_4*y_1*y_2 - c_4*y_1 - c_5*y_1*y_2 - y_1*(c_3*y_0 - c_4*y_1) - y_2*(c_4*y_1 - c_5*y_2)"," 0"," -c_4 - c_5"," c_4"," c_3"," 0"," 0"],
["2*c_3*y_0*y_1 + c_3*y_0 - 2*c_4*y_1**2 + c_4*y_1 - 2*y_2*(c_3*y_0 - c_4*y_1)"," 0"," 0"," -2*c_4"," 0"," 2*c_3","0"],
["c_0*y_2 - c_1*y_0*y_2 - c_2*y_0*y_2**2/(c_6 + y_0) + c_4*y_0*y_1 - c_5*y_0*y_2 - y_1*(c_0 - c_1*y_0 - c_2*y_0*y_2/(c_6 + y_0)) - y_3*(c_4*y_1 - c_5*y_2)"," -c_2*y_0/(c_6 + y_0)"," 0"," 0"," -c_1 + 2*c_2*y_0*y_2/(c_6 + y_0)**2 - 2*c_2*y_2/(c_6 + y_0) - c_5 - y_1*(c_2*y_0/(c_6 + y_0)**2 - c_2/(c_6 + y_0))","c_4"," -c_2*y_0*y_2**2/(c_6 + y_0)**3 + c_2*y_2**2/(c_6 + y_0)**2 - y_1*(-c_2*y_0*y_2/(c_6 + y_0)**3 + c_2*y_2/(c_6 + y_0)**2)"],
["c_0*y_1 - c_1*y_0*y_1 - c_2*y_0*y_1*y_2/(c_6 + y_0) + c_3*y_0**2 - c_4*y_0*y_1 - y_2*(c_0 - c_1*y_0 - c_2*y_0*y_2/(c_6 + y_0)) - y_3*(c_3*y_0 - c_4*y_1)"," 0"," -c_2*y_0/(c_6 + y_0)"," 0"," c_2*y_0*y_1/(c_6 + y_0)**2 - c_2*y_1/(c_6 + y_0) - y_2*(c_2*y_0/(c_6 + y_0)**2 - c_2/(c_6 + y_0))"," -c_1 + c_2*y_0*y_2/(c_6 + y_0)**2 - c_2*y_2/(c_6 + y_0) - c_4"," -c_2*y_0*y_1*y_2/(c_6 + y_0)**3 + c_2*y_1*y_2/(c_6 + y_0)**2 + c_3 - y_2*(-c_2*y_0*y_2/(c_6 + y_0)**3 + c_2*y_2/(c_6 + y_0)**2)"],
["2*c_0*y_0 + c_0 - 2*c_1*y_0**2 + c_1*y_0 - 2*c_2*y_0**2*y_2/(c_6 + y_0) + c_2*y_0*y_2/(c_6 + y_0) - 2*y_3*(c_0 - c_1*y_0 - c_2*y_0*y_2/(c_6 + y_0))"," 0"," 0"," 0"," 2*c_2*y_0**2/(c_6 + y_0)**2 - 4*c_2*y_0/(c_6 + y_0) - c_2*y_0/(c_6 + y_0)**2 + c_2/(c_6 + y_0) - 2*y_3*(c_2*y_0/(c_6 + y_0)**2 - c_2/(c_6 + y_0))"," 0"," -2*c_1 - 2*c_2*y_0**2*y_2/(c_6 + y_0)**3 + 4*c_2*y_0*y_2/(c_6 + y_0)**2 + c_2*y_0*y_2/(c_6 + y_0)**3 - 2*c_2*y_2/(c_6 + y_0) - c_2*y_2/(c_6 + y_0)**2 - 2*y_3*(-c_2*y_0*y_2/(c_6 + y_0)**3 + c_2*y_2/(c_6 + y_0)**2)"]
])
assert_sympy_expressions_equal(answer, expected)
def test_centralmoments_using_MM_model(self):
"""
Given the MM model hard codded bellow,the result of central moment should match exactly the expected one
:return:
"""
counter_nvecs = [[0, 0], [0, 2], [1, 1], [2, 0]]
mcounter_nvecs = [[0, 0], [0, 1], [1, 0], [0, 2], [1, 1], [2, 0]]
counter = [Moment(c,sympy.Symbol("YU{0}".format(i))) for i,c in enumerate(counter_nvecs)]
mcounter = [Moment(c,sympy.Symbol("y_{0}".format(i))) for i,c in enumerate(mcounter_nvecs)]
m = to_sympy_matrix([
['-c_0*y_0*(y_0 + y_1 - 181) + c_1*(-y_0 - y_1 + 301)',
0,
'-c_0',
'-c_0'],
[
'c_2*(-y_0 - y_1 + 301)',
0,
0,
0]
])
species = sympy.Matrix(map(sympy.var, ['y_0', 'y_1']))
propensities = to_sympy_matrix(['c_0*y_0*(y_0 + y_1 - 181)',
'c_1*(-y_0 - y_1 + 301)',
'c_2*(-y_0 - y_1 + 301)'])
stoichiometry_matrix = sympy.Matrix([[-1, 1, 0],
[0, 0, 1]])
expected = to_sympy_matrix([
["c_2*(-y_0 - y_1 + 301)"," -2*c_2"," -2*c_2"," 0"],
["-c_0*y_0*y_1*(y_0 + y_1 - 181) + c_1*y_1*(-y_0 - y_1 + 301) + c_2*y_0*(-y_0 - y_1 + 301) - c_2*y_2*(-y_0 - y_1 + 301) - y_1*(-c_0*y_0*(y_0 + y_1 - 181) + c_1*(-y_0 - y_1 + 301))"," -c_0*y_0 - c_1"," -c_0*y_0 - c_0*(y_0 + y_1 - 181) - c_1 - c_2"," -c_2"],
["-2*c_0*y_0**2*(y_0 + y_1 - 181) + c_0*y_0*(y_0 + y_1 - 181) + 2*c_1*y_0*(-y_0 - y_1 + 301) + c_1*(-y_0 - y_1 + 301) - 2*y_2*(-c_0*y_0*(y_0 + y_1 - 181) + c_1*(-y_0 - y_1 + 301))"," 0"," -4*c_0*y_0 + 2*c_0*y_2 + c_0 - 2*c_1"," -4*c_0*y_0 + 2*c_0*y_2 - 2*c_0*(y_0 + y_1 - 181) + c_0 - 2*c_1"]
])
answer = eq_central_moments(counter, mcounter, m, species, propensities, stoichiometry_matrix, 2)
assert_sympy_expressions_equal(answer, expected) | StarcoderdataPython |
1623175 | """
Lawful, the simple test framework.
"""
import traceback
import textwrap
import sys
import argparse
def report(message=''):
"""Reports a message."""
print(message)
def report_exc(message, exc):
"""Reports an unexpected Error."""
report(message)
report(textwrap.indent(exc, ' > '))
def checker(f):
"""Produces a callable that checks the given function f."""
def check():
name = f.__name__
message = ''
if f.__doc__:
message = '\n' + textwrap.indent(f.__doc__, ' """ ')
try:
f()
return True
except AssertionError as e:
if e.args:
message = e.args[0].strip()
exception_class, exception, trace = sys.exc_info()
frames = traceback.extract_tb(trace)
last = frames[len(frames)-1]
message_hr = textwrap.indent(message, ' ')
assertion = "{3}".format(*last)
position = "{0}:{1}".format(*last)
report("{} ({}):".format(name, position))
if message_hr:
report(' --------------------------------')
report("{}".format(message_hr))
report(' --------------------------------')
report(" {}".format(assertion))
report('')
return False
except Exception as e:
report_exc("{}:{}".format(name, message), traceback.format_exc())
return False
check._test_function = f
return check
checks = []
class Test:
"""Test decorator to collect tests."""
def __init__(self, category='default'):
self.category = category
def __call__(self, f):
suite = checker(f)
checks.append((self.category, suite))
return f
def __getattr__(self, name):
return Test(name)
test = Test()
def run_tests():
"""
Runs the tests and outputs the failures and score tally.
"""
parser = argparse.ArgumentParser(description='Tests for the project.')
parser.add_argument('-l', '--list', default=False, action='store_true',
help='Lists all categories and tests.')
parser.add_argument('-c', '--category', default=[], nargs='*',
help='The categories to run.')
parser.add_argument('-t', '--test', default=[], nargs='*',
help='The tests to run.')
options = parser.parse_args()
# handle list option
if options.list:
categories = {c for c,t in checks}
for cat in sorted(categories):
report(cat)
for t in (t for c,t in checks if c==cat):
report(' '+t._test_function.__name__)
return
# actual test run
categories = options.category
selected = options.test
report('=' * 80)
report()
selection = []
if selected:
selection.append(', '.join(selected))
if categories:
selection.append("all {}".format(categories))
selection_hr = ' and '.join(selection) if selection else 'all'
report("Running {} tests for {}".format(selection_hr, sys.argv[0]))
report()
success = 0
failure = 0
tests = []
if not categories and not selected:
tests = checks
else:
for c, t in checks:
if c in categories or t._test_function.__name__ in selected:
tests.append((c,t))
for category, check in tests:
if check():
success += 1
else:
failure += 1
print("{} / {}".format(success, success + failure))
report()
if not failure:
report('+++ SUCCESS')
else:
report('--- FAILURE')
report()
report('='*80)
sys.exit(1 if failure > 0 else 0)
| StarcoderdataPython |
3581547 | <reponame>garthur/cds-util
import argparse
import xarray as xr
import pandas as pd
NCEP_REMOTE_RESOURCES = {
'temperature':'https://psl.noaa.gov/thredds/dodsC/Datasets/ncep.reanalysis/surface/air.sig995.{}.nc'
}
def slice_gridded_data(
dates=None,
area=[90, -180, -90, 180],
round_area_to_grid=False
):
'''Slice gridded data to specified area and dates.
Parameters
----------
dates: list of strings
area: list, default [90,-180,-90, 180]
area extent to download [N, W, S, E]
Returns
----------
slice_gridded_data_: Callable
gridded data slicer, usable on an xarray.Dataset
'''
# convert longitude to [0,360]
LONGITUDE_CONSTANT = 180
area[1] = area[1] + LONGITUDE_CONSTANT
area[3] = area[3] + LONGITUDE_CONSTANT
#
if round_area_to_grid:
COORD_GRID = 2.5
area = [COORD_GRID * round(coord / COORD_GRID) for coord in area]
def slice_gridded_data_(resource) -> xr.Dataset:
# open remote resource
dataset = xr.open_dataset(resource)
# slice by latitude and longitude
location_slice = dataset.sel(
lat=slice(area[0], area[2]),
lon=slice(area[1], area[3])
)
# restrict to relevent times
sliced_dataset = location_slice.sel(time=slice(*dates))
return sliced_dataset
return slice_gridded_data_
def get_ncep(
var='temperature',
#TODO specify functional defaults for dates
dates=None,
area=[90, -180, -90, 180],
round_area_to_grid=False,
download_flag=False,
download_file='./output.nc'
) -> xr.Dataset:
'''Get NCEP/NCAR reanalysis data from NOAA's PSL.
National Centers for Environmental Prediction/National Weather Service/NOAA/U.S. Department of Commerce.
1994, updated monthly. NCEP/NCAR Global Reanalysis Products, 1948-continuing.
Research Data Archive at NOAA/PSL: /data/gridded/data.ncep.reanalysis.html.
Parameters
----------
var: str, default temperature
name of variable to download. corresponding resource must be added to NCEP_REMOTE_RESOURCES
dates: list of strings
area: list, default [90,-180,-90, 180]
area extent to download [N, W, S, E]
round_area_to_grid: True or False, default False
Returns
-------
merged_data: xarrayDataset
data merged from variouse NCER files, sliced by location and time
'''
dates_dt = pd.to_datetime(dates)
years_in_scope = range(dates_dt[0].year, dates_dt[1].year + 1)
remote_resources = list(map(
lambda x: NCEP_REMOTE_RESOURCES[var].format(x),
years_in_scope
))
sliced_datasets = list(map(
slice_gridded_data(dates, area),
remote_resources
))
merged_data = xr.concat(sliced_datasets, dim="time")
if download_flag:
merged_data.to_netcdf(f"{download_file}")
return merged_data
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument(
"--var",
choices=list(NCEP_REMOTE_RESOURCES.keys())
)
parser.add_argument(
"--dates",
nargs=2
)
parser.add_argument(
"--area",
nargs=4,
type=float,
default=[90, -180, -90, 180]
)
parser.add_argument(
"--round_area",
action="store_true"
)
parser.add_argument(
"--outfile",
default=None
)
return parser.parse_args()
def main() -> None:
args = parse_args()
_ = get_ncep(
var=args.var,
dates=args.dates,
area=args.area,
round_area_to_grid=args.round_area,
download_flag=(args.outfile is not None),
download_file=args.outfile
) | StarcoderdataPython |
3310259 | #E<NAME>
student = dict(firstname = "Joe", lastname = "Bloggs")
print(student)
| StarcoderdataPython |
1913083 | #-*- coding:utf-8 -*-
from typing import Iterable
from winsound import Beep
from .musicLaw import AbsoluteFreqMap, RelativeFreqMap
import time
class InvalidNotationError(Exception):
def __init__(self, *args: object) -> None:
super().__init__(*args)
class FreqOutRangeError(Exception):
def __init__(self, *args: object) -> None:
super().__init__(*args)
class Pitch(object):
def __init__(self, base_pitch: str, level: int):
self.base_pitch = base_pitch
self.level = level
def __str__(self) -> str:
return f"Pitch(base={self.base_pitch}, level={self.level})"
def __repr__(self) -> str:
return self.__str__()
class MusicNotation(object):
def __init__(self):
self.notation = None
self.pitch = None
self.duration = None
def parse_notation(self, notation: str):
fullNotations = [s.strip() for s in notation.split(',')]
if len(fullNotations) == 1:
pitch = fullNotations[0]
duration = 1.
elif len(fullNotations) == 2:
pitch = fullNotations[0]
try:
duration = float(fullNotations[1])
except ValueError:
raise InvalidNotationError(f"Unrecognized notation {notation}")
# Parse Pitch
level = 0
for index, c in enumerate(pitch):
if c == '<':
level -= 1
elif c == '>':
level += 1
else:
break
base_pitch = pitch[index:]
# duration need no parsing
self.pitch = Pitch(base_pitch, level)
self.duration = duration
return self
def isPause(self) -> bool:
return self.pitch.base_pitch == '-'
def __str__(self) -> str:
return f"Notation({str(self.pitch)}, dur={self.duration})"
def __repr__(self) -> str:
return self.__str__()
class BeepPlayer(object):
"""
Player using system Beep function.
Play beep music with specific tone and speed parameters.
Example
player = BeepPlayer(base_freq=261, time=700)
player.load_stylus(stylus)
player.play()
Notes
base_freq and tone_marker, time and bpm, you only need to specify one parameter
in these pairs. And the modification of one parameter will cause the automatic
modification of the other parameter
"""
def __init__(self,
base_freq: int = None,
tone_marker: str = None,
time: int = None,
bpm: float = None):
"""
Arguments
base_freq: The frequency of 'do'(or, '1' notation).
tone_marker: Tone marker. Takes the format of '[notation]=[absolute-notation]',
like '1=C', '2=E', '<#4=C'
time: Time of duration=1, in miliseconds
bpm: beats per minute.
"""
self.stylus = None
self.tick = None
self.loc = 0
self.__base_freq = None
self.__tone_marker = None
self.__time = None
self.__bpm = None
if base_freq != None:
self.base_freq = base_freq
elif tone_marker != None:
self.tone_marker = tone_marker
else:
self.tone_marker = "1=C"
if time != None:
self.time = time
elif bpm != None:
self.bpm = bpm
else:
self.bpm = 60
def load_stylus(self, stylus:Iterable):
"""
Arguments
stylus: Iterable[MusicNotaion]
Notes
Some iterable objects in python can't iterate twice.
For these objects, self.reset() method won't work.
"""
self.stylus = stylus
self.tick = iter(stylus)
self.loc = 0
@property
def base_freq(self):
return self.__base_freq
@base_freq.setter
def base_freq(self, base_freq:int):
self.tone_marker = None
self.base_freq = base_freq
@property
def tone_marker(self):
return self.__tone_marker
@tone_marker.setter
def tone_marker(self, tone_marker:str):
rela, abso = [s.strip() for s in tone_marker.split('=')]
try:
absoFreq = AbsoluteFreqMap[abso]
except KeyError:
raise InvalidNotationError(f"Unrecognized Absolute Notation: {abso}")
relaNotation = MusicNotation().parse_notation(rela)
try:
relaFreq = 2 ** (RelativeFreqMap[relaNotation.pitch.base_pitch] + relaNotation.pitch.level)
except KeyError:
raise InvalidNotationError(f"Unrecognized Relative Notation: {rela}")
self.__base_freq = absoFreq / relaFreq
self.__tone_marker = tone_marker
@property
def time(self):
return self.__time
@time.setter
def time(self, time: int):
self.__time = time
self.__bpm = self.time_to_bpm(time)
@property
def bpm(self):
return self.__bpm
@bpm.setter
def bpm(self, bpm: float):
self.__bpm = bpm
self.__time = self.bpm_to_time(bpm)
def play(self, num: int = None) -> None:
"""
Play beep music.
Arguments:
num: How many notations to play.
If not specified, all notations will be played.
"""
if num == None:
for notation in self.tick:
try:
self.play_notation(notation)
except Exception as e:
raise type(e)(f"loc {self.loc}", *e.args)
self.loc += 1
else:
for _, notation in zip(range(num), self.tick):
try:
self.play_notation(notation)
except Exception as e:
raise type(e)(f"loc {self.loc}", *e.args)
self.loc += 1
def reset(self) -> None:
"""
Reset the stylus to the beginning.
Notes
Some iterable objects in python can't iterate twice.
For these objects, self.reset() method won't work.
"""
self.tick = iter(self.stylus)
self.loc = 0
def play_notation(self, notation: MusicNotation) -> None:
duration = int(notation.duration * self.time)
if notation.isPause():
time.sleep(duration/1000)
return
try:
baseLogFreq = RelativeFreqMap[notation.pitch.base_pitch]
except KeyError:
raise InvalidNotationError(
f"Unrecognized notation {notation.notation}")
LogFreq = baseLogFreq + notation.pitch.level
freq = int(self.base_freq * 2**LogFreq)
if freq < 37:
raise FreqOutRangeError(
f"pitch {notation.notation} is too low: {freq} Hz")
if freq > 32767:
raise FreqOutRangeError(
f"pitch {notation.notation} is too high: {freq} Hz")
Beep(freq, duration)
@classmethod
def bpm_to_time(cls, bpm: float) -> int:
return int(60000 / bpm)
@classmethod
def time_to_bpm(cls, time: int) -> float:
return 60000 / time
| StarcoderdataPython |
9647754 | <gh_stars>1-10
import unittest
import unittest.mock as mock
from unittest.mock import patch
import os
import sys
sys.path.append(os.path.abspath('../../'))
from app import addNewUserDB
import models
KEY_INPUT = "input"
KEY_EXPECTED = "expected"
USER_W = "UserWrong"
INITIAL_USERNAME = 'Oscar'
class AddUserTestCase(unittest.TestCase):
def setUp(self):
self.success_test_params = [{
KEY_INPUT: ['testid1','<EMAIL>', 'Oscar', 'hadfvisdklfvklids' ],
KEY_EXPECTED: {'testid1':['<EMAIL>','Oscar','hadfvisdklfvklids']}
}, {
KEY_INPUT: ['testid2','<EMAIL>', 'Oscar2', 'hadfvisdklfvklids1' ],
KEY_EXPECTED: {'testid2':['<EMAIL>', 'Oscar2', 'hadfvisdklfvklids1']}
}]
self.failure_test_paramus = [
]
initial_person = models.UserG(name=INITIAL_USERNAME, email='<EMAIL>')
self.initial_db_mock = [initial_person]
#this is called in test_success
def mocked_db_session_add(self, name):
self.initial_db_mock.append(name)
#to simulate commit to the db it also called in test_success
def mocked_db_session_commit(self):
pass
#this is called in test_success
def mocked_person_query_all(self):
return self.initial_db_mock
#this is the actual test
def test_success(self):
for test in self.success_test_params:
with patch('app.db.session.add', self.mocked_db_session_add):
with patch('app.db.session.commit',
self.mocked_db_session_commit):
print(self.initial_db_mock)
actual_userLResult = addNewUserDB(
test[KEY_INPUT])
print(actual_userLResult)
expected_ruserLResult = test[KEY_EXPECTED]
print(self.initial_db_mock)
print(actual_userLResult)
self.assertEqual(len(actual_userLResult),
len(expected_ruserLResult))
self.assertEqual(actual_userLResult[1],
expected_ruserLResult[1])
self.assertEqual(actual_userLResult[0],
expected_ruserLResult[0])
if (len(expected_ruserLResult) == 3):
self.assertEqual(actual_userLResult[2],
expected_ruserLResult[2])
def test_failure(self):
for test in self.failure_test_paramus:
with patch('app.db.session.add', self.mocked_db_session_add):
with patch('app.db.session.commit',
self.mocked_db_session_commit):
with patch('models.Person.query') as mocked_query:
mocked_query.all = self.mocked_person_query_all
print(self.initial_db_mock)
actual_userLResult, actual_scoresLResult = addNewUserDB(
test[KEY_INPUT])
print(actual_userLResult)
ruserLResult = test[USER_W]
print(self.initial_db_mock)
print(actual_userLResult)
self.assertNotEqual(actual_userLResult[0],
ruserLResult[0])
self.assertNotEqual(actual_userLResult[1],
ruserLResult[1])
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
6406939 | <gh_stars>1-10
#
# Copyright (C) 2012-2020 Euclid Science Ground Segment
#
# This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General
# Public License as published by the Free Software Foundation; either version 3.0 of the License, or (at your option)
# any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
"""
Overview
--------
general info about this module
Summary
---------
.. autosummary::
stratified_kfold_cv
Module API
----------
"""
from __future__ import absolute_import, division, print_function
from builtins import (bytes, str, open, super, range,
zip, round, input, int, pow, object, map, zip)
__author__ = "<NAME>"
# Standard library
# eg copy
# absolute import rg:from copy import deepcopy
# Dependencies
# eg numpy
# absolute import eg: import numpy as np
from sklearn.model_selection import StratifiedKFold
import numpy as np
# Project
# relative import eg: from .mod import f
def stratified_kfold_cv(model,training_dataset,n_splits=10,random_state=None):
"""
Parameters
----------
model
training_dataset
n_splits
random_state
Returns
-------
"""
kfold = StratifiedKFold(n_splits=n_splits,random_state=random_state).split(training_dataset.features, training_dataset.target_array)
cv_score = np.zeros((n_splits))
for k, (train, test) in enumerate(kfold):
model.clf.fit(training_dataset.features[train], training_dataset.target_array[train])
cv_score[k] = model.clf.score(training_dataset.features[test], training_dataset.target_array[test])
print('fold',k,'score',cv_score[k])
print('\nCV accuracy: %.3f +/- %.3f' % (np.mean(cv_score), np.std(cv_score)))
return cv_score, np.mean(cv_score), np.std(cv_score) | StarcoderdataPython |
12833286 | <filename>2021/19/scan.py
#!/usr/bin/env python
import numpy as np
from io import StringIO
from itertools import permutations
def perms(arr):
for columns in permutations(range(3)):
for x in (1, -1):
for y in (1, -1):
for z in (1, -1):
a = arr[:, columns] * [x, y, z]
a = a[np.lexsort(np.rot90(a))]
for r in range(len(a)):
yield np.roll(a, r, 0)
def parse(data):
scans = [np.genfromtxt(StringIO(s), delimiter=',',
dtype=int, skip_header=1)
for s in data.split("\n\n")]
return [s[np.lexsort(np.rot90(s))] for s in scans]
def align(scanners):
remain = list(enumerate(scanners))
done = [remain.pop(0) + (np.array((0, 0, 0)),)]
while remain:
found = False
for ai, a, _ in done:
aset = {tuple(p) for p in a}
for i, (bi, b) in enumerate(remain):
sz = min(len(b), len(a))
for bb in perms(b):
delta = a[:sz] - bb[:sz]
unq, cnt = np.unique(delta, axis=0, return_counts=True)
if max(cnt) < 2:
continue
for j, c in sorted(enumerate(cnt), key=lambda x: x[1]):
offset = unq[j]
aligned = bb + offset
bset = {tuple(p) for p in aligned}
common = aset.intersection(bset)
if len(common) >= 12:
remain.pop(i)
done.append((bi, aligned, offset))
print(f"{len(done)} done, {len(remain)} remain")
found = True
break
if found:
break
if found:
break
if not found:
print("uh oh, found none on entire iteration, giving up")
with open('output', 'w') as f:
f.write(str(remain))
f.write("\n\n")
f.write(str(done))
return None
return done
def solve(scanners):
beacons = set()
positions = list()
for i, scan, pos in scanners:
beacons.update({tuple(p) for p in scan})
positions.append(pos)
p1 = len(beacons)
p2 = max([np.abs(a - b).sum() for a in positions for b in positions])
return p1, p2
if __name__ == "__main__":
with open('sample') as f:
sample = f.read().strip()
scanners = parse(sample)
print(solve(align(scanners)))
with open('input') as f:
data = parse(f.read().strip())
scanners = align(data)
print(solve(align(data)))
| StarcoderdataPython |
11289867 | question_style = "bold blue"
answer_style = "italic"
answer_padding_style = (1, 0, 1, 4)
special_answer_padding_style = (0, 0, 0, 4)
warning_style = "bold red"
answer_link_style = "white"
answer_link_padding_style = (0, 0, 0, 4)
| StarcoderdataPython |
1918143 | import os
import biothings, config
biothings.config_for_app(config)
from config import DATA_ARCHIVE_ROOT
import biothings.hub.dataload.dumper
import datetime
class FigshareDumper(biothings.hub.dataload.dumper.DummyDumper):
# type: resource
SRC_NAME = "covid_figshare"
# override in subclass accordingly
SRC_ROOT_FOLDER = os.path.join(DATA_ARCHIVE_ROOT, SRC_NAME)
__metadata__ = {
"src_meta": {
'license_url': 'https://figshare.com/terms',
'url': 'https://covid19.figshare.com/'
}
}
SCHEDULE = "0 6 * * *"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.set_release()
def set_release(self):
self.release = datetime.datetime.now().strftime('%Y-%m-%d-%H:%M')
| StarcoderdataPython |
11292911 | <reponame>sufengniu/kaggle_ai2
from nltk.stem.porter import *
import itertools
import os
import string
exclude = set(string.punctuation)
stemmer = PorterStemmer()
def load_d_word_count(path = 'data/ck-12-word-count.txt'):
d = {}
for line in open(path):
lst = line.strip('\n').split('\t')
d[lst[0]] = int(lst[1])
return d
def combination_index(N, n_com):
res = []
s = ''
for i in range(N):
s += str(i)
for i in range(1, n_com + 1):
iter_com = itertools.combinations(s, i)
for com in iter_com:
com_tmp = [int(c) for c in com]
res.append(com_tmp)
return res
def norm_word(word):
# v1
#word = word.lower().strip('?').strip('.').strip(',').strip('!').strip(':').strip(';').strip('\"').strip('\'').strip()
word = word.lower().strip('?').strip('.').strip(',').strip('!')
# v2
#word = ''.join(ch for ch in word.lower() if ch not in exclude)
return word
'''
# Not work based on experiment 10004 and 10008.
try:
word = stemmer.stem(word.lower().strip('?').strip('.').strip(',').strip('!'))
except:
word = word.lower().strip('?').strip('.').strip(',').strip('!')
return word
'''
def get_sentence(dir):
lst_sentence = []
for path in os.listdir(dir):
path = dir + path
for line in open(path):
lst = line.strip('\n').split(' ')
lst_norm = [norm_word(word) for word in lst]
lst_sentence.append(lst_norm)
return lst_sentence
def get_d_word_count_train_question():
path_train = 'data/training_set.tsv'
d_word_count = {}
for line in open(path_train):
lst = line.strip('\n').split('\t')
for word in lst[1].split(' '):
word = norm_word(word)
d_word_count.setdefault(word, 0)
d_word_count[word] += 1
return d_word_count
def get_d_word_count_train_choice():
path_train = 'data/training_set.tsv'
d_word_count = {}
for line in open(path_train):
lst = line.strip('\n').split('\t')
for choice in lst[3:]:
for word in choice.split(' '):
word = norm_word(word)
d_word_count.setdefault(word, 0)
d_word_count[word] += 1
return d_word_count
def get_d_word_count_validation_question():
path_validation = 'data/validation_set.tsv'
d_word_count = {}
for line in open(path_validation):
lst = line.strip('\n').split('\t')
for word in lst[1].split(' '):
word = norm_word(word)
d_word_count.setdefault(word, 0)
d_word_count[word] += 1
return d_word_count
def get_d_word_count_validation_choice():
path_validation = 'data/validation_set.tsv'
d_word_count = {}
for line in open(path_validation):
lst = line.strip('\n').split('\t')
for choice in lst[2:]:
for word in choice.split(' '):
word = norm_word(word)
d_word_count.setdefault(word, 0)
d_word_count[word] += 1
return d_word_count
| StarcoderdataPython |
147136 | <reponame>simple2source/roadkarma<filename>sorting/shell_sort.py
# shell sort
# 时间复杂度: 最优时间复杂度O(n^1.3)
# 最坏时间复杂度O(n^2)
# 平均时间复杂度O(nlogn) ~ O(n^2)
# 稳定性:不稳定
# 不需要额外空间 O(1)
def shell_sort(alist):
length = len(alist)
gap = length // 2
while gap > 0:
for i in range(gap, length):
j = i
while j >= gap and alist[j-gap] > alist[j]:
alist[j-gap], alist[j] = alist[j], alist[j-gap]
j -= gap
gap = gap // 2
return alist
# test
l1 = [3, 2, 1, 0, 7, 11, 56, 23]
l2 = [8, 4, 1, 8, 4, 9, 3, 2]
print(shell_sort(l1))
print(shell_sort(l2))
def gap_insertion_sort(alist, start, gap):
for i in range(start+gap, len(alist), gap):
current_value = alist[i]
j = i
while j >= gap and alist[j-gap] > current_value:
alist[j] = alist[j-gap]
j = j - gap
alist[j] = current_value
def shell_sort2(alist):
gap = len(alist) // 2
while gap > 0:
for i in range(gap):
gap_insertion_sort(alist, i, gap)
gap = gap // 2
return alist
# test
l3 = [3, 2, 1, 0, 7, 11, 56, 23]
l4 = [8, 4, 1, 8, 4, 9, 3, 2]
print(shell_sort2(l3))
print(shell_sort2(l4)) | StarcoderdataPython |
1876702 | <filename>src/unittest_parallel/main.py
# Licensed under the MIT License
# https://github.com/craigahobbs/unittest-parallel/blob/main/LICENSE
"""
unittest-parallel command-line script main module
"""
import argparse
from contextlib import contextmanager
from io import StringIO
import multiprocessing
import os
import sys
import tempfile
import time
import unittest
import coverage
def main(argv=None):
"""
unittest-parallel command-line script main entry point
"""
# Command line arguments
parser = argparse.ArgumentParser(prog='unittest-parallel')
parser.add_argument('-v', '--verbose', action='store_const', const=2, default=1,
help='Verbose output')
parser.add_argument('-q', '--quiet', dest='verbose', action='store_const', const=0, default=1,
help='Quiet output')
parser.add_argument('-f', '--failfast', action='store_true', default=False,
help='Stop on first fail or error')
parser.add_argument('-b', '--buffer', action='store_true', default=False,
help='Buffer stdout and stderr during tests')
parser.add_argument('-j', '--jobs', metavar='COUNT', type=int, default=0,
help='The number of test processes (default is 0, all cores)')
parser.add_argument('--class-fixtures', action='store_true', default=False,
help='One or more TestCase class has a setUpClass method')
parser.add_argument('--module-fixtures', action='store_true', default=False,
help='One or more test module has a setUpModule method')
parser.add_argument('--disable-process-pooling', action='store_true', default=False,
help='Do not reuse processes used to run test suites')
group_unittest = parser.add_argument_group('unittest options')
group_unittest.add_argument('-s', '--start-directory', metavar='START', default='.',
help="Directory to start discovery ('.' default)")
group_unittest.add_argument('-p', '--pattern', metavar='PATTERN', default='test*.py',
help="Pattern to match tests ('test*.py' default)")
group_unittest.add_argument('-t', '--top-level-directory', metavar='TOP',
help='Top level directory of project (defaults to start directory)')
group_coverage = parser.add_argument_group('coverage options')
group_coverage.add_argument('--coverage', action='store_true',
help='Run tests with coverage')
group_coverage.add_argument('--coverage-branch', action='store_true',
help='Run tests with branch coverage')
group_coverage.add_argument('--coverage-rcfile', metavar='RCFILE',
help='Specify coverage configuration file')
group_coverage.add_argument('--coverage-include', metavar='PAT', action='append',
help='Include only files matching one of these patterns. Accepts shell-style (quoted) wildcards.')
group_coverage.add_argument('--coverage-omit', metavar='PAT', action='append',
help='Omit files matching one of these patterns. Accepts shell-style (quoted) wildcards.')
group_coverage.add_argument('--coverage-source', metavar='SRC', action='append',
help='A list of packages or directories of code to be measured')
group_coverage.add_argument('--coverage-html', metavar='DIR',
help='Generate coverage HTML report')
group_coverage.add_argument('--coverage-xml', metavar='FILE',
help='Generate coverage XML report')
group_coverage.add_argument('--coverage-fail-under', metavar='MIN', type=float,
help='Fail if coverage percentage under min')
args = parser.parse_args(args=argv)
if args.coverage_branch:
args.coverage = args.coverage_branch
process_count = max(0, args.jobs)
if process_count == 0:
process_count = multiprocessing.cpu_count()
# Create the temporary directory (for coverage files)
with tempfile.TemporaryDirectory() as temp_dir:
# Discover tests
with _coverage(args, temp_dir):
test_loader = unittest.TestLoader()
discover_suite = test_loader.discover(args.start_directory, pattern=args.pattern, top_level_dir=args.top_level_directory)
# Get the parallelizable test suites
if args.module_fixtures:
test_suites = list(_iter_module_suites(discover_suite))
elif args.class_fixtures:
test_suites = list(_iter_class_suites(discover_suite))
else:
test_suites = list(_iter_test_cases(discover_suite))
# Don't use more processes than test suites
process_count = max(1, min(len(test_suites), process_count))
# Report test suites and processes
print(
f'Running {len(test_suites)} test suites ({discover_suite.countTestCases()} total tests) across {process_count} processes',
file=sys.stderr
)
if args.verbose > 1:
print(file=sys.stderr)
# Run the tests in parallel
start_time = time.perf_counter()
multiprocessing_context = multiprocessing.get_context(method='spawn')
maxtasksperchild = 1 if args.disable_process_pooling else None
with multiprocessing_context.Pool(process_count, maxtasksperchild=maxtasksperchild) as pool, \
multiprocessing.Manager() as manager:
test_manager = ParallelTestManager(manager, args, temp_dir)
results = pool.map(test_manager.run_tests, test_suites)
stop_time = time.perf_counter()
test_duration = stop_time - start_time
# Aggregate parallel test run results
tests_run = 0
errors = []
failures = []
skipped = 0
expected_failures = 0
unexpected_successes = 0
for result in results:
tests_run += result[0]
errors.extend(result[1])
failures.extend(result[2])
skipped += result[3]
expected_failures += result[4]
unexpected_successes += result[5]
is_success = not(errors or failures or unexpected_successes)
# Compute test info
infos = []
if failures:
infos.append(f'failures={len(failures)}')
if errors:
infos.append(f'errors={len(errors)}')
if skipped:
infos.append(f'skipped={skipped}')
if expected_failures:
infos.append(f'expected failures={expected_failures}')
if unexpected_successes:
infos.append(f'unexpected successes={unexpected_successes}')
# Report test errors
if errors or failures:
print(file=sys.stderr)
for error in errors:
print(error, file=sys.stderr)
for failure in failures:
print(failure, file=sys.stderr)
elif args.verbose > 0:
print(file=sys.stderr)
# Test report
print(unittest.TextTestResult.separator2, file=sys.stderr)
print(f'Ran {tests_run} {"tests" if tests_run > 1 else "test"} in {test_duration:.3f}s', file=sys.stderr)
print(file=sys.stderr)
print(f'{"OK" if is_success else "FAILED"}{" (" + ", ".join(infos) + ")" if infos else ""}', file=sys.stderr)
# Return an error status on failure
if not is_success:
parser.exit(status=len(errors) + len(failures) + unexpected_successes)
# Coverage?
if args.coverage:
# Combine the coverage files
cov = coverage.Coverage(config_file=args.coverage_rcfile)
cov.combine(data_paths=[os.path.join(temp_dir, x) for x in os.listdir(temp_dir)])
# Coverage report
print(file=sys.stderr)
percent_covered = cov.report(ignore_errors=True, file=sys.stderr)
print(f'Total coverage is {percent_covered:.2f}%', file=sys.stderr)
# HTML coverage report
if args.coverage_html:
cov.html_report(directory=args.coverage_html, ignore_errors=True)
# XML coverage report
if args.coverage_xml:
cov.xml_report(outfile=args.coverage_xml, ignore_errors=True)
# Fail under
if args.coverage_fail_under and percent_covered < args.coverage_fail_under:
parser.exit(status=2)
@contextmanager
def _coverage(args, temp_dir):
# Running tests with coverage?
if args.coverage:
# Generate a random coverage data file name - file is deleted along with containing directory
with tempfile.NamedTemporaryFile(dir=temp_dir, delete=False) as coverage_file:
pass
# Create the coverage object
cov = coverage.Coverage(
config_file=args.coverage_rcfile,
data_file=coverage_file.name,
branch=args.coverage_branch,
include=args.coverage_include,
omit=(args.coverage_omit if args.coverage_omit else []) + [__file__],
source=args.coverage_source
)
try:
# Start measuring code coverage
cov.start()
# Yield for unit test running
yield cov
finally:
# Stop measuring code coverage
cov.stop()
# Save the collected coverage data to the data file
cov.save()
else:
# Not running tests with coverage - yield for unit test running
yield None
# Iterate module-level test suites - all top-level test suites returned from TestLoader.discover
def _iter_module_suites(test_suite):
for module_suite in test_suite:
if module_suite.countTestCases():
yield module_suite
# Iterate class-level test suites - test suites that contains test cases
def _iter_class_suites(test_suite):
has_cases = any(isinstance(suite, unittest.TestCase) for suite in test_suite)
if has_cases:
yield test_suite
else:
for suite in test_suite:
yield from _iter_class_suites(suite)
# Iterate test cases (methods)
def _iter_test_cases(test_suite):
if isinstance(test_suite, unittest.TestCase):
yield test_suite
else:
for suite in test_suite:
yield from _iter_test_cases(suite)
class ParallelTestManager:
def __init__(self, manager, args, temp_dir):
self.args = args
self.temp_dir = temp_dir
self.failfast = manager.Event()
def run_tests(self, test_suite):
# Fail fast?
if self.failfast.is_set():
return [0, [], [], 0, 0, 0]
# Run unit tests
with _coverage(self.args, self.temp_dir):
runner = unittest.TextTestRunner(
stream=StringIO(),
resultclass=ParallelTextTestResult,
verbosity=self.args.verbose,
failfast=self.args.failfast,
buffer=self.args.buffer
)
result = runner.run(test_suite)
# Set failfast, if necessary
if result.shouldStop:
self.failfast.set()
# Return (test_count, errors, failures, skipped_count, expected_failure_count, unexpected_success_count)
return (
result.testsRun,
[self._format_error(result, error) for error in result.errors],
[self._format_error(result, failure) for failure in result.failures],
len(result.skipped),
len(result.expectedFailures),
len(result.unexpectedSuccesses)
)
@staticmethod
def _format_error(result, error):
return '\n'.join([
unittest.TextTestResult.separator1,
result.getDescription(error[0]),
unittest.TextTestResult.separator2,
error[1]
])
class ParallelTextTestResult(unittest.TextTestResult):
def __init__(self, stream, descriptions, verbosity):
stream = type(stream)(sys.stderr)
super().__init__(stream, descriptions, verbosity)
def startTest(self, test):
if self.showAll:
self.stream.writeln(f'{self.getDescription(test)} ...')
self.stream.flush()
# pylint: disable=bad-super-call
super(unittest.TextTestResult, self).startTest(test)
def _add_helper(self, test, dots_message, show_all_message):
if self.showAll:
self.stream.writeln(f'{self.getDescription(test)} ... {show_all_message}')
elif self.dots:
self.stream.write(dots_message)
self.stream.flush()
def addSuccess(self, test):
# pylint: disable=bad-super-call
super(unittest.TextTestResult, self).addSuccess(test)
self._add_helper(test, '.', 'ok')
def addError(self, test, err):
# pylint: disable=bad-super-call
super(unittest.TextTestResult, self).addError(test, err)
self._add_helper(test, 'E', 'ERROR')
def addFailure(self, test, err):
# pylint: disable=bad-super-call
super(unittest.TextTestResult, self).addFailure(test, err)
self._add_helper(test, 'F', 'FAIL')
def addSkip(self, test, reason):
# pylint: disable=bad-super-call
super(unittest.TextTestResult, self).addSkip(test, reason)
self._add_helper(test, 's', f'skipped {reason!r}')
def addExpectedFailure(self, test, err):
# pylint: disable=bad-super-call
super(unittest.TextTestResult, self).addExpectedFailure(test, err)
self._add_helper(test, 'x', 'expected failure')
def addUnexpectedSuccess(self, test):
# pylint: disable=bad-super-call
super(unittest.TextTestResult, self).addUnexpectedSuccess(test)
self._add_helper(test, 'u', 'unexpected success')
def printErrors(self):
pass
| StarcoderdataPython |
3409064 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['PredictionArgs', 'Prediction']
@pulumi.input_type
class PredictionArgs:
def __init__(__self__, *,
auto_analyze: pulumi.Input[bool],
hub_name: pulumi.Input[str],
mappings: pulumi.Input['PredictionMappingsArgs'],
negative_outcome_expression: pulumi.Input[str],
positive_outcome_expression: pulumi.Input[str],
primary_profile_type: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
scope_expression: pulumi.Input[str],
score_label: pulumi.Input[str],
description: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
display_name: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
grades: Optional[pulumi.Input[Sequence[pulumi.Input['PredictionGradesArgs']]]] = None,
involved_interaction_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
involved_kpi_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
involved_relationships: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
prediction_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Prediction resource.
:param pulumi.Input[bool] auto_analyze: Whether do auto analyze.
:param pulumi.Input[str] hub_name: The name of the hub.
:param pulumi.Input['PredictionMappingsArgs'] mappings: Definition of the link mapping of prediction.
:param pulumi.Input[str] negative_outcome_expression: Negative outcome expression.
:param pulumi.Input[str] positive_outcome_expression: Positive outcome expression.
:param pulumi.Input[str] primary_profile_type: Primary profile type.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] scope_expression: Scope expression.
:param pulumi.Input[str] score_label: Score label.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] description: Description of the prediction.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] display_name: Display name of the prediction.
:param pulumi.Input[Sequence[pulumi.Input['PredictionGradesArgs']]] grades: The prediction grades.
:param pulumi.Input[Sequence[pulumi.Input[str]]] involved_interaction_types: Interaction types involved in the prediction.
:param pulumi.Input[Sequence[pulumi.Input[str]]] involved_kpi_types: KPI types involved in the prediction.
:param pulumi.Input[Sequence[pulumi.Input[str]]] involved_relationships: Relationships involved in the prediction.
:param pulumi.Input[str] prediction_name: Name of the prediction.
"""
pulumi.set(__self__, "auto_analyze", auto_analyze)
pulumi.set(__self__, "hub_name", hub_name)
pulumi.set(__self__, "mappings", mappings)
pulumi.set(__self__, "negative_outcome_expression", negative_outcome_expression)
pulumi.set(__self__, "positive_outcome_expression", positive_outcome_expression)
pulumi.set(__self__, "primary_profile_type", primary_profile_type)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "scope_expression", scope_expression)
pulumi.set(__self__, "score_label", score_label)
if description is not None:
pulumi.set(__self__, "description", description)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if grades is not None:
pulumi.set(__self__, "grades", grades)
if involved_interaction_types is not None:
pulumi.set(__self__, "involved_interaction_types", involved_interaction_types)
if involved_kpi_types is not None:
pulumi.set(__self__, "involved_kpi_types", involved_kpi_types)
if involved_relationships is not None:
pulumi.set(__self__, "involved_relationships", involved_relationships)
if prediction_name is not None:
pulumi.set(__self__, "prediction_name", prediction_name)
@property
@pulumi.getter(name="autoAnalyze")
def auto_analyze(self) -> pulumi.Input[bool]:
"""
Whether do auto analyze.
"""
return pulumi.get(self, "auto_analyze")
@auto_analyze.setter
def auto_analyze(self, value: pulumi.Input[bool]):
pulumi.set(self, "auto_analyze", value)
@property
@pulumi.getter(name="hubName")
def hub_name(self) -> pulumi.Input[str]:
"""
The name of the hub.
"""
return pulumi.get(self, "hub_name")
@hub_name.setter
def hub_name(self, value: pulumi.Input[str]):
pulumi.set(self, "hub_name", value)
@property
@pulumi.getter
def mappings(self) -> pulumi.Input['PredictionMappingsArgs']:
"""
Definition of the link mapping of prediction.
"""
return pulumi.get(self, "mappings")
@mappings.setter
def mappings(self, value: pulumi.Input['PredictionMappingsArgs']):
pulumi.set(self, "mappings", value)
@property
@pulumi.getter(name="negativeOutcomeExpression")
def negative_outcome_expression(self) -> pulumi.Input[str]:
"""
Negative outcome expression.
"""
return pulumi.get(self, "negative_outcome_expression")
@negative_outcome_expression.setter
def negative_outcome_expression(self, value: pulumi.Input[str]):
pulumi.set(self, "negative_outcome_expression", value)
@property
@pulumi.getter(name="positiveOutcomeExpression")
def positive_outcome_expression(self) -> pulumi.Input[str]:
"""
Positive outcome expression.
"""
return pulumi.get(self, "positive_outcome_expression")
@positive_outcome_expression.setter
def positive_outcome_expression(self, value: pulumi.Input[str]):
pulumi.set(self, "positive_outcome_expression", value)
@property
@pulumi.getter(name="primaryProfileType")
def primary_profile_type(self) -> pulumi.Input[str]:
"""
Primary profile type.
"""
return pulumi.get(self, "primary_profile_type")
@primary_profile_type.setter
def primary_profile_type(self, value: pulumi.Input[str]):
pulumi.set(self, "primary_profile_type", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="scopeExpression")
def scope_expression(self) -> pulumi.Input[str]:
"""
Scope expression.
"""
return pulumi.get(self, "scope_expression")
@scope_expression.setter
def scope_expression(self, value: pulumi.Input[str]):
pulumi.set(self, "scope_expression", value)
@property
@pulumi.getter(name="scoreLabel")
def score_label(self) -> pulumi.Input[str]:
"""
Score label.
"""
return pulumi.get(self, "score_label")
@score_label.setter
def score_label(self, value: pulumi.Input[str]):
pulumi.set(self, "score_label", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Description of the prediction.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Display name of the prediction.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter
def grades(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PredictionGradesArgs']]]]:
"""
The prediction grades.
"""
return pulumi.get(self, "grades")
@grades.setter
def grades(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['PredictionGradesArgs']]]]):
pulumi.set(self, "grades", value)
@property
@pulumi.getter(name="involvedInteractionTypes")
def involved_interaction_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Interaction types involved in the prediction.
"""
return pulumi.get(self, "involved_interaction_types")
@involved_interaction_types.setter
def involved_interaction_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "involved_interaction_types", value)
@property
@pulumi.getter(name="involvedKpiTypes")
def involved_kpi_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
KPI types involved in the prediction.
"""
return pulumi.get(self, "involved_kpi_types")
@involved_kpi_types.setter
def involved_kpi_types(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "involved_kpi_types", value)
@property
@pulumi.getter(name="involvedRelationships")
def involved_relationships(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Relationships involved in the prediction.
"""
return pulumi.get(self, "involved_relationships")
@involved_relationships.setter
def involved_relationships(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "involved_relationships", value)
@property
@pulumi.getter(name="predictionName")
def prediction_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the prediction.
"""
return pulumi.get(self, "prediction_name")
@prediction_name.setter
def prediction_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "prediction_name", value)
class Prediction(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auto_analyze: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
display_name: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
grades: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PredictionGradesArgs']]]]] = None,
hub_name: Optional[pulumi.Input[str]] = None,
involved_interaction_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
involved_kpi_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
involved_relationships: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
mappings: Optional[pulumi.Input[pulumi.InputType['PredictionMappingsArgs']]] = None,
negative_outcome_expression: Optional[pulumi.Input[str]] = None,
positive_outcome_expression: Optional[pulumi.Input[str]] = None,
prediction_name: Optional[pulumi.Input[str]] = None,
primary_profile_type: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
scope_expression: Optional[pulumi.Input[str]] = None,
score_label: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
The prediction resource format.
API Version: 2017-04-26.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] auto_analyze: Whether do auto analyze.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] description: Description of the prediction.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] display_name: Display name of the prediction.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PredictionGradesArgs']]]] grades: The prediction grades.
:param pulumi.Input[str] hub_name: The name of the hub.
:param pulumi.Input[Sequence[pulumi.Input[str]]] involved_interaction_types: Interaction types involved in the prediction.
:param pulumi.Input[Sequence[pulumi.Input[str]]] involved_kpi_types: KPI types involved in the prediction.
:param pulumi.Input[Sequence[pulumi.Input[str]]] involved_relationships: Relationships involved in the prediction.
:param pulumi.Input[pulumi.InputType['PredictionMappingsArgs']] mappings: Definition of the link mapping of prediction.
:param pulumi.Input[str] negative_outcome_expression: Negative outcome expression.
:param pulumi.Input[str] positive_outcome_expression: Positive outcome expression.
:param pulumi.Input[str] prediction_name: Name of the prediction.
:param pulumi.Input[str] primary_profile_type: Primary profile type.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] scope_expression: Scope expression.
:param pulumi.Input[str] score_label: Score label.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PredictionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The prediction resource format.
API Version: 2017-04-26.
:param str resource_name: The name of the resource.
:param PredictionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PredictionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
auto_analyze: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
display_name: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
grades: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PredictionGradesArgs']]]]] = None,
hub_name: Optional[pulumi.Input[str]] = None,
involved_interaction_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
involved_kpi_types: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
involved_relationships: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
mappings: Optional[pulumi.Input[pulumi.InputType['PredictionMappingsArgs']]] = None,
negative_outcome_expression: Optional[pulumi.Input[str]] = None,
positive_outcome_expression: Optional[pulumi.Input[str]] = None,
prediction_name: Optional[pulumi.Input[str]] = None,
primary_profile_type: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
scope_expression: Optional[pulumi.Input[str]] = None,
score_label: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PredictionArgs.__new__(PredictionArgs)
if auto_analyze is None and not opts.urn:
raise TypeError("Missing required property 'auto_analyze'")
__props__.__dict__["auto_analyze"] = auto_analyze
__props__.__dict__["description"] = description
__props__.__dict__["display_name"] = display_name
__props__.__dict__["grades"] = grades
if hub_name is None and not opts.urn:
raise TypeError("Missing required property 'hub_name'")
__props__.__dict__["hub_name"] = hub_name
__props__.__dict__["involved_interaction_types"] = involved_interaction_types
__props__.__dict__["involved_kpi_types"] = involved_kpi_types
__props__.__dict__["involved_relationships"] = involved_relationships
if mappings is None and not opts.urn:
raise TypeError("Missing required property 'mappings'")
__props__.__dict__["mappings"] = mappings
if negative_outcome_expression is None and not opts.urn:
raise TypeError("Missing required property 'negative_outcome_expression'")
__props__.__dict__["negative_outcome_expression"] = negative_outcome_expression
if positive_outcome_expression is None and not opts.urn:
raise TypeError("Missing required property 'positive_outcome_expression'")
__props__.__dict__["positive_outcome_expression"] = positive_outcome_expression
__props__.__dict__["prediction_name"] = prediction_name
if primary_profile_type is None and not opts.urn:
raise TypeError("Missing required property 'primary_profile_type'")
__props__.__dict__["primary_profile_type"] = primary_profile_type
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if scope_expression is None and not opts.urn:
raise TypeError("Missing required property 'scope_expression'")
__props__.__dict__["scope_expression"] = scope_expression
if score_label is None and not opts.urn:
raise TypeError("Missing required property 'score_label'")
__props__.__dict__["score_label"] = score_label
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["system_generated_entities"] = None
__props__.__dict__["tenant_id"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:customerinsights:Prediction"), pulumi.Alias(type_="azure-native:customerinsights/v20170426:Prediction"), pulumi.Alias(type_="azure-nextgen:customerinsights/v20170426:Prediction")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Prediction, __self__).__init__(
'azure-native:customerinsights:Prediction',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Prediction':
"""
Get an existing Prediction resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = PredictionArgs.__new__(PredictionArgs)
__props__.__dict__["auto_analyze"] = None
__props__.__dict__["description"] = None
__props__.__dict__["display_name"] = None
__props__.__dict__["grades"] = None
__props__.__dict__["involved_interaction_types"] = None
__props__.__dict__["involved_kpi_types"] = None
__props__.__dict__["involved_relationships"] = None
__props__.__dict__["mappings"] = None
__props__.__dict__["name"] = None
__props__.__dict__["negative_outcome_expression"] = None
__props__.__dict__["positive_outcome_expression"] = None
__props__.__dict__["prediction_name"] = None
__props__.__dict__["primary_profile_type"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["scope_expression"] = None
__props__.__dict__["score_label"] = None
__props__.__dict__["system_generated_entities"] = None
__props__.__dict__["tenant_id"] = None
__props__.__dict__["type"] = None
return Prediction(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="autoAnalyze")
def auto_analyze(self) -> pulumi.Output[bool]:
"""
Whether do auto analyze.
"""
return pulumi.get(self, "auto_analyze")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Description of the prediction.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Display name of the prediction.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def grades(self) -> pulumi.Output[Optional[Sequence['outputs.PredictionResponseGrades']]]:
"""
The prediction grades.
"""
return pulumi.get(self, "grades")
@property
@pulumi.getter(name="involvedInteractionTypes")
def involved_interaction_types(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Interaction types involved in the prediction.
"""
return pulumi.get(self, "involved_interaction_types")
@property
@pulumi.getter(name="involvedKpiTypes")
def involved_kpi_types(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
KPI types involved in the prediction.
"""
return pulumi.get(self, "involved_kpi_types")
@property
@pulumi.getter(name="involvedRelationships")
def involved_relationships(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Relationships involved in the prediction.
"""
return pulumi.get(self, "involved_relationships")
@property
@pulumi.getter
def mappings(self) -> pulumi.Output['outputs.PredictionResponseMappings']:
"""
Definition of the link mapping of prediction.
"""
return pulumi.get(self, "mappings")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="negativeOutcomeExpression")
def negative_outcome_expression(self) -> pulumi.Output[str]:
"""
Negative outcome expression.
"""
return pulumi.get(self, "negative_outcome_expression")
@property
@pulumi.getter(name="positiveOutcomeExpression")
def positive_outcome_expression(self) -> pulumi.Output[str]:
"""
Positive outcome expression.
"""
return pulumi.get(self, "positive_outcome_expression")
@property
@pulumi.getter(name="predictionName")
def prediction_name(self) -> pulumi.Output[Optional[str]]:
"""
Name of the prediction.
"""
return pulumi.get(self, "prediction_name")
@property
@pulumi.getter(name="primaryProfileType")
def primary_profile_type(self) -> pulumi.Output[str]:
"""
Primary profile type.
"""
return pulumi.get(self, "primary_profile_type")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
Provisioning state.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="scopeExpression")
def scope_expression(self) -> pulumi.Output[str]:
"""
Scope expression.
"""
return pulumi.get(self, "scope_expression")
@property
@pulumi.getter(name="scoreLabel")
def score_label(self) -> pulumi.Output[str]:
"""
Score label.
"""
return pulumi.get(self, "score_label")
@property
@pulumi.getter(name="systemGeneratedEntities")
def system_generated_entities(self) -> pulumi.Output['outputs.PredictionResponseSystemGeneratedEntities']:
"""
System generated entities.
"""
return pulumi.get(self, "system_generated_entities")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> pulumi.Output[str]:
"""
The hub name.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
| StarcoderdataPython |
12836737 | from collections import namedtuple
import requests, pickle
from abc import ABCMeta, abstractmethod
from bs4 import BeautifulSoup
from utilities import *
from random import randint
from threading import Thread
# Tuple object
Quote = namedtuple('Quote', ['text', 'author'])
class QuoteException(Exception):
def __init__(self, message):
super().__init__(self, message)
# Base abstract class
class QuoteProvider:
__metaclass__ = ABCMeta
def __init__(self, filename='quotes.txt', url=None):
self.url = url
self.filename = filename
self.quotes = list()
# Public API
def save(self, quote):
''' Saves a quote object in a pickle file '''
try:
with open(self.filename, 'ab') as quotefile:
pickle.dump(quote, quotefile)
return True
except Exception as err:
raise QuoteException("Could not save quote!\nErr: %s" % err)
return False
def exists(self, quote):
''' Checks if a quote object exists in a pickle file '''
try:
with open(self.filename, 'rb') as quotefile:
while True:
data = pickle.load(quotefile)
if quote == data:
return True
except:
pass
return False
def randomize(self):
''' Return a random quote from the list '''
if len(self.quotes) > 0:
number = randint(0, len(self.quotes) - 1)
return self.quotes[number]
@abstractmethod
def load(self):
''' Function that must be overwritten in sub-classes, it handles loading all the quotes into 'self.quotes' '''
return
# Private API
@abstractmethod
def __parse__(self, input):
''' Function that must be overwritten in sub-classes, it handles parsing the return output from 'self.html' '''
return
@abstractmethod
def __fetch__(self, url):
''' abstract method that handles fetching data and adding to 'self.quotes' '''
pass
def __request__(self, url):
''' Make a GET request on a specific uri and return all the response from said GET request. '''
url = url or self.url
if not url or not Utilities.validate_uri(url):
raise QuoteException("Url not valid!")
r = requests.get(url)
if r.status_code == 200:
return r.text
else:
raise QuoteException("%s could not return quotes!" % self.url)
def __html__(self, html):
''' Return a BeautifulSoup object from a given text string '''
if not html:
raise QuoteException("No html arg!")
try:
return BeautifulSoup(html)
except Exception as err:
raise QuoteException('Could not parse text into BeautifulSoup!')
# Subclass
class GoodreadQuote(QuoteProvider):
def __init__(self):
return super().__init__(url='')
def __parse__(self, input):
return
def load(self):
return
def __fetch__(self, url):
return
# Subclass
class BrainyQuote(QuoteProvider):
def __init__(self):
super().__init__(url='http://www.brainyquote.com/quotes/keywords/list%s.html')
# Overwritten
def __parse__(self, input):
try:
if not input:
raise QuoteException("Can't parse input!")
# find all divs with correct class
for div in [ x for x in input.find_all('div', attrs={'class': 'boxyPaddingBig'}) ]:
# get text and author
text, auth = [ y for y in div.text.split('\n') if y != '"' and y ]
yield (text, auth)
except Exception as err:
raise QuoteException("Can't parse input!\nErr: %s" % err)
def load(self):
''' Load all data in a multi threaded env '''
threads = []
for i in range(14): # 13 pages
url = self.url % ('_{0}'.format(i) if i > 0 else '')
t = Thread(target=self.__fetch__, args=(url,))
threads.append(t)
t.start()
for thread in threads:
thread.join()
def __fetch__(self, url):
''' Utilizes all methods to fetch the data from pre specfied configuration '''
# GET request for data
data = self.__request__(url)
# Change into HTML
html = self.__html__(data)
# Parse html and iterate
for data in self.__parse__(html):
text, auth = data
quote = Quote(text, auth)
self.quotes.append(quote)
| StarcoderdataPython |
4873703 | '''
Output Format:
Output the integer number indicating the total number of occurrences of the substring in the original string.
Sample Input:
ABCDCDC
CDC
Sample Output:
2
'''
CODE:
def count_substring(string, sub_string):
count = 0
for i in range(len(string)-len(sub_string)+1):
if (string[i:i+len(sub_string)] == sub_string):
count += 1
return count
if __name__ == '__main__':
string = input().strip()
sub_string = input().strip()
count = count_substring(string, sub_string)
print(count)
| StarcoderdataPython |
6639906 | <filename>zodiwiki/__init__.py
from zodiwiki.app import app
import zodiwiki.routes
from zodiwiki.__data__ import __version__, __author__
try:
from zodiwiki.secrets import __secret_key__
except ImportError:
pass
else:
app.secret_key = __secret_key__
del __secret_key__
| StarcoderdataPython |
5051889 | <filename>Python_Tello(DJI_UAV)/pratice/01_Takeoff.py
from djitellopy import tello
from time import sleep
# 官方的demo 写复杂了,用了线程 和 命令行响应 还有Python 版本变换
# 这个第三方教程比较简单 但是 有点 问题 就是执行的时候 用 Python3, python2 会不到大疆的库
me = tello.Tello()
me.connect()
print(me.get_battery())
me.takeoff()
me.send_rc_control(0,50,0,0)
sleep(2)
me.land | StarcoderdataPython |
1710268 | <gh_stars>0
#!/usr/bin/python3
"""
returns an object (Python data structure) represented by a JSON string
"""
import json
def from_json_string(my_str):
"""
returns an object (Python data structure) represented by a JSON string:
"""
return json.loads(my_str)
| StarcoderdataPython |
6571986 | import os
import sys
import time
from joblib import Parallel, delayed
import argparse
def render(dst, item_id):
f_out = dst
f_item = item_id
os.system(
'blender --background --python render_custom.py -- --output_folder %s --item %s' % (f_out, f_item)
)
def main():
dst = 'D:/Data/ShapeNetRendering_high\\03001627'
items_list = os.listdir('D:\\Data\\shapenetrendering_compressed\\ShapeNetRendering\\ShapeNetRendering\\03001627')
with Parallel(n_jobs=4) as parallel:
parallel(delayed(render)(dst, item_id)
for item_id in items_list)
if __name__ == '__main__':
main() | StarcoderdataPython |
65251 | <reponame>jacobcheatley/trpg
from .base_function import Function
# OTHER/NORMAL STATS
# CURRENT VALUE SETTERS
class IncStatFunction(Function):
def __init__(self, args):
self.name = args[0]
self.value = args[1]
def _do_function(self, campaign):
campaign.player.stats.other[self.name].current += self.value
class DecStatFunction(Function):
def __init__(self, args):
self.name = args[0]
self.value = args[1]
def _do_function(self, campaign):
campaign.player.stats.other[self.name].current -= self.value
class SetStatFunction(Function):
def __init__(self, args):
self.name = args[0]
self.value = args[1]
def _do_function(self, campaign):
campaign.player.stats.other[self.name].current = self.value
# GETTERS
class GetStatFunction(Function):
def __init__(self, args):
self.name = args[0]
def _do_function(self, campaign):
return campaign.player.stats.other[self.name].current
# RESOURCE STATS
# CURRENT VALUE SETTERS
class IncResFunction(Function):
def __init__(self, args):
self.name = args[0]
self.value = args[1]
def _do_function(self, campaign):
campaign.player.stats.resource[self.name].current += self.value
class DecResFunction(Function):
def __init__(self, args):
self.name = args[0]
self.value = args[1]
def _do_function(self, campaign):
campaign.player.stats.resource[self.name].current -= self.value
class SetResFunction(Function):
def __init__(self, args):
self.name = args[0]
self.value = args[1]
def _do_function(self, campaign):
campaign.player.stats.resource[self.name].current = self.value
# MIN VALUE SETTERS
class IncResMinFunction(Function):
def __init__(self, args):
self.name = args[0]
self.value = args[1]
def _do_function(self, campaign):
campaign.player.stats.resource[self.name].min += self.value
class DecResMinFunction(Function):
def __init__(self, args):
self.name = args[0]
self.value = args[1]
def _do_function(self, campaign):
campaign.player.stats.resource[self.name].min -= self.value
class SetResMinFunction(Function):
def __init__(self, args):
self.name = args[0]
self.value = args[1]
def _do_function(self, campaign):
campaign.player.stats.resource[self.name].min = self.value
# MAX VALUE SETTERS
class IncResMaxFunction(Function):
def __init__(self, args):
self.name = args[0]
self.value = args[1]
def _do_function(self, campaign):
campaign.player.stats.resource[self.name].max += self.value
class DecResMaxFunction(Function):
def __init__(self, args):
self.name = args[0]
self.value = args[1]
def _do_function(self, campaign):
campaign.player.stats.resource[self.name].max -= self.value
class SetResMaxFunction(Function):
def __init__(self, args):
self.name = args[0]
self.value = args[1]
def _do_function(self, campaign):
campaign.player.stats.resource[self.name].max = self.value
# GETTERS
class GetResFunction(Function):
def __init__(self, args):
self.name = args[0]
def _do_function(self, campaign):
return campaign.player.stats.resource[self.name].current
class GetResMinFunction(Function):
def __init__(self, args):
self.name = args[0]
def _do_function(self, campaign):
return campaign.player.stats.resource[self.name].min
class GetResMaxFunction(Function):
def __init__(self, args):
self.name = args[0]
def _do_function(self, campaign):
return campaign.player.stats.resource[self.name].max
# HEALTH STATS
# CURRENT VALUE SETTERS
class IncHealthFunction(Function):
def __init__(self, args):
self.value = args[0]
def _do_function(self, campaign):
campaign.player.stats.health.current += self.value
class DecHealthFunction(Function):
def __init__(self, args):
self.value = args[0]
def _do_function(self, campaign):
campaign.player.stats.health.current -= self.value
class SetHealthFunction(Function):
def __init__(self, args):
self.value = args[0]
def _do_function(self, campaign):
campaign.player.stats.health.current = self.value
# MIN VALUE SETTERS
class IncHealthMinFunction(Function):
def __init__(self, args):
self.value = args[0]
def _do_function(self, campaign):
campaign.player.stats.health.min += self.value
class DecHealthMinFunction(Function):
def __init__(self, args):
self.value = args[0]
def _do_function(self, campaign):
campaign.player.stats.health.min -= self.value
class SetHealthMinFunction(Function):
def __init__(self, args):
self.value = args[0]
def _do_function(self, campaign):
campaign.player.stats.health.min = self.value
# MAX VALUE SETTERS
class IncHealthMaxFunction(Function):
def __init__(self, args):
self.value = args[0]
def _do_function(self, campaign):
campaign.player.stats.health.max += self.value
class DecHealthMaxFunction(Function):
def __init__(self, args):
self.value = args[0]
def _do_function(self, campaign):
campaign.player.stats.health.max -= self.value
class SetHealthMaxFunction(Function):
def __init__(self, args):
self.value = args[0]
def _do_function(self, campaign):
campaign.player.stats.health.max = self.value
# GETTERS
class GetHealthFunction(Function):
def __init__(self, args):
pass
def _do_function(self, campaign):
return campaign.player.stats.health.current
class GetHealthMinFunction(Function):
def __init__(self, args):
pass
def _do_function(self, campaign):
return campaign.player.stats.health.min
class GetHealthMaxFunction(Function):
def __init__(self, args):
pass
def _do_function(self, campaign):
return campaign.player.stats.health.max
| StarcoderdataPython |
1990180 | n = int(input())
print(n % 3) | StarcoderdataPython |
11215528 | <gh_stars>10-100
from sciwing.tokenizers.word_tokenizer import WordTokenizer
import pytest
class TestWordTokenizer:
def test_sample_word_tokenization(self):
sample_sentence = "I like big apple."
tokenizer = WordTokenizer()
tokens = tokenizer.tokenize(sample_sentence)
assert tokens == ["I", "like", "big", "apple", "."]
def test_sample_apostrophe_tokenization(self):
sample_sentence = "I don't like apples."
tokenizer = WordTokenizer()
tokens = tokenizer.tokenize(sample_sentence)
assert tokens == ["I", "do", "n't", "like", "apples", "."]
def test_len_sample_batch(self):
sample_sentences = ["I like big apple.", "We process text"]
tokenizer = WordTokenizer()
tokenized = tokenizer.tokenize_batch(sample_sentences)
assert len(tokenized) == 2
def test_word_tokenization_types(self):
with pytest.raises(AssertionError):
tokenizer = WordTokenizer(tokenizer="moses")
# TODO: Remove this after you have implemented nltk tokenization
def test_other_tokenizer(self):
tokenizer = WordTokenizer(tokenizer="nltk")
assert tokenizer.tokenize("First string") is None
def test_vanilla_tokenizer(self):
tokenizer = WordTokenizer(tokenizer="vanilla")
tokenized = tokenizer.tokenize(
"(1999). & P., W. The Control of Discrete Event Systems."
)
assert tokenized == [
"(1999).",
"&",
"P.,",
"W.",
"The",
"Control",
"of",
"Discrete",
"Event",
"Systems.",
]
def test_spacy_whitespace_tokenizer(self):
tokenizer = WordTokenizer(tokenizer="spacy-whitespace")
tokenized = tokenizer.tokenize(
"(1999). & P., W. The Control of Discrete Event Systems."
)
assert tokenized == [
"(1999).",
"&",
"P.,",
"W.",
"The",
"Control",
"of",
"Discrete",
"Event",
"Systems.",
]
| StarcoderdataPython |
8184135 | __all__ = (
"TrackmaniaException",
"TMIOException",
"InvalidPlayerException",
"NoUserAgentSetError",
"InvalidUsernameError",
"InvalidIDError",
"InvalidTrophyNumber",
"InvalidTOTDDate",
)
# pylint: disable=unnecessary-pass
class TrackmaniaException(Exception):
"""BASE exception class for py-tmio"""
pass
class TMIOException(Exception):
"""BASE exception class for errors from trackmania.io"""
pass
class TMXException(Exception):
"""BASE exception class for errors from trackmania.exchange"""
pass
class InvalidPlayerException(TrackmaniaException):
"""Base Exception class for Player-Related exceptions"""
pass
class NoUserAgentSetError(Exception):
"""Raised when a User-Agent is not set."""
def __init__(self):
message = "No User Agent has been set.\nPlease read the README for instructions on how to set the USER_AGENT."
super().__init__(message)
class InvalidUsernameError(InvalidPlayerException):
"""Raised when a username is not valid."""
def __init__(self, *args):
if args:
message = args[0]
else:
message = None
super().__init__(message)
class InvalidIDError(InvalidPlayerException):
"""Raised when an Invalid ID is given."""
def __init__(self, *args):
if args:
message = args[0]
else:
message = None
super().__init__(message)
class InvalidTrophyNumber(TrackmaniaException):
""" """
def __init__(self, *args):
if args:
message = args[0]
else:
message = None
super().__init__(message)
class InvalidTOTDDate(TrackmaniaException):
"""Raised when an invalid TOTD Date is given."""
def __init__(self, *args):
if args:
message = args[0]
else:
message = None
super().__init__(message)
class InvalidTMXCode(TMXException):
"""Raised when an invalid TMX Code is given."""
def __init__(self, *args):
if args:
message = args[0]
else:
message = None
super().__init__(message)
| StarcoderdataPython |
6599127 | <filename>write.py
#!/usr/bin/python
#-*- encoding:utf-8 -*-
import os
import time
import random
chars="<KEY>"
length=input("Lines?\n:")
firstline=raw_input("First word?\n:")
times=input("Number of poems?\n:")
os.system("open /Applications/TextEdit.app/")
time.sleep(1)
os.system("""osascript -e 'tell application "System Events" to keystroke "n" using command down'""")
time.sleep(1)
for x in range(0,times):
os.system("""osascript -e 'tell application "System Events" to keystroke "#########"'""")
os.system(""" osascript -e 'tell application "System Events"
key code 76
end tell' """)
os.system("""osascript -e 'tell application "System Events" to keystroke "%s"'""" %(firstline))
for i in range(0,length):
words=random.randint(2,5)
for w in range(0,words):
if random.randint(0,2) == 2:
char=chars[random.randint(0,19)]
os.system("""osascript -e 'tell application "System Events" to keystroke "%s"'""" %(char))
os.system(""" osascript -e 'tell application "System Events"
key code 53
end tell' """)
option=random.randint(1,20)
for a in range (0,option):
os.system("""osascript -e 'tell application "System Events" to key code 125 using option down'""")
os.system(""" osascript -e 'tell application "System Events"
key code 76
end tell' """)
os.system(""" osascript -e 'tell application "System Events"
key code 49
end tell' """)
os.system(""" osascript -e 'tell application "System Events"
key code 76
end tell' """)
| StarcoderdataPython |
8012410 | import tensorflow as tf
import numpy as np
#Convolution implementation
def conv(batch_input, out_channels, stride, filterSize=4, initScale = 0.02, useXavier=False, paddingSize = 1, useBias=False):
with tf.variable_scope("conv"):
in_height, in_width, in_channels = [batch_input.get_shape()[1], batch_input.get_shape()[2], int(batch_input.get_shape()[-1])]
filter = tf.get_variable("filter", [filterSize, filterSize, in_channels, out_channels], dtype=tf.float32, initializer=tf.random_normal_initializer(0, np.sqrt(2.0/(int(in_channels) + int(out_channels))) * initScale) if useXavier else tf.random_normal_initializer(0, initScale))
padded_input = tf.pad(batch_input, [[0, 0], [paddingSize, paddingSize], [paddingSize, paddingSize], [0, 0]], mode="CONSTANT")#SYMMETRIC
conv = tf.nn.conv2d(padded_input, filter, [1, stride, stride, 1], padding="VALID")
if useBias:
offset = tf.get_variable("offset", [1, 1, 1, out_channels], dtype=tf.float32, initializer=tf.zeros_initializer())
conv = conv + offset
return conv
def lrelu(x, a):
with tf.name_scope("lrelu"):
# adding these together creates the leak part and linear part
# then cancels them out by subtracting/adding an absolute value term
# leak: a*x/2 - a*abs(x)/2
# linear: x/2 + abs(x)/2
# this block looks like it has 2 inputs on the graph unless we do this
x = tf.identity(x)
return (0.5 * (1 + a)) * x + (0.5 * (1 - a)) * tf.abs(x)
#Deconvolution used in the method
def deconv(batch_input, out_channels):
with tf.variable_scope("deconv"):
in_height, in_width, in_channels = [int(batch_input.get_shape()[1]), int(batch_input.get_shape()[2]), int(batch_input.get_shape()[3])]
filter = tf.get_variable("filter", [4, 4, in_channels, out_channels], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.02))
filter1 = tf.get_variable("filter1", [4, 4, out_channels, out_channels], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.02))
# [batch, in_height, in_width, in_channels], [filter_width, filter_height, out_channels, in_channels]
# => [batch, out_height, out_width, out_channels]
resized_images = tf.image.resize_images(batch_input, [in_height * 2, in_width * 2], method = tf.image.ResizeMethod.NEAREST_NEIGHBOR)#BILINEAR
conv = tf.nn.conv2d(resized_images, filter, [1, 1, 1, 1], padding="SAME")
conv = tf.nn.conv2d(conv, filter1, [1, 1, 1, 1], padding="SAME")
#conv = tf.nn.conv2d_transpose(batch_input, filter, [batch, in_height * 2, in_width * 2, out_channels], [1, 2, 2, 1], padding="SAME")
return conv
#Theoritically correct convolution
def deconv_bis(batch_input, out_channels):
with tf.variable_scope("deconv"):
in_height, in_width, in_channels = [int(batch_input.get_shape()[1]), int(batch_input.get_shape()[2]), int(batch_input.get_shape()[3])]
filter = tf.get_variable("filter", [3, 3, in_channels, out_channels], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.02))
filter1 = tf.get_variable("filter1", [3, 3, out_channels, out_channels], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.02))
# [batch, in_height, in_width, in_channels], [filter_width, filter_height, out_channels, in_channels]
# => [batch, out_height, out_width, out_channels]
resized_images = tf.image.resize_images(batch_input, [in_height * 2, in_width * 2], method = tf.image.ResizeMethod.BILINEAR, align_corners=True)#NEAREST_NEIGHBOR BILINEAR
paddingSize = 1
padded = tf.pad(resized_images, [[0, 0], [paddingSize, paddingSize], [paddingSize, paddingSize], [0, 0]], mode="REFLECT")#CONSTANT
conv = tf.nn.conv2d(padded, filter, [1, 1, 1, 1], padding="VALID")
padded = tf.pad(conv, [[0, 0], [paddingSize, paddingSize], [paddingSize, paddingSize], [0, 0]], mode="SYMMETRIC")#CONSTANT
conv = tf.nn.conv2d(padded, filter1, [1, 1, 1, 1], padding="VALID")
#conv = tf.nn.conv2d_transpose(batch_input, filter, [batch, in_height * 2, in_width * 2, out_channels], [1, 2, 2, 1], padding="SAME")
return conv
#input is of shape [batch, X]. Returns the outputs of the layer.
def fullyConnected(input, outputDim, useBias, layerName = "fully_connected", initMultiplyer = 1.0):
with tf.variable_scope(layerName):
batchSize = tf.shape(input)[0];
inputChannels = int(input.get_shape()[-1])
weights = tf.get_variable("weight", [inputChannels, outputDim ], dtype=tf.float32, initializer=tf.random_normal_initializer(0, initMultiplyer * tf.sqrt(1.0/float(inputChannels)))) #TODO Is this init a good idea ?
weightsTiled = tf.tile(tf.expand_dims(weights, axis = 0), [batchSize, 1,1])
squeezedInput = input
if (len(input.get_shape()) > 3) :
squeezedInput = tf.squeeze(squeezedInput, [1])
squeezedInput = tf.squeeze(squeezedInput, [1])
outputs = tf.matmul(tf.expand_dims(squeezedInput, axis = 1), weightsTiled)
outputs = tf.squeeze(outputs, [1])
if(useBias):
bias = tf.get_variable("bias", [outputDim], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.002))
outputs = outputs + tf.expand_dims(bias, axis = 0)
return outputs
#Takes a globalGenerator output as input and transforms it so it can be added to the main U-NET track
def GlobalToGenerator(inputs, channels):
with tf.variable_scope("GlobalToGenerator1"):
fc1 = fullyConnected(inputs, channels, False, "fullyConnected_global_to_unet" ,0.01) #Why so low ?
return tf.expand_dims(tf.expand_dims(fc1, axis = 1), axis=1)
#Pooling implementation (max or mean depending on what is requested by the user)
def pooling(pooling_type, inputs, dynamic_batch_size):
outputs = inputs#tf.reshape(inputs, [dynamic_batch_size, -1, int(inputs.get_shape()[1]), int(inputs.get_shape()[2]), int(inputs.get_shape()[3])])
# outputs should be [batch, nbInputs, W, H,C]
print(pooling_type)
if pooling_type == "max":
outputs = tf.reduce_max(outputs, axis=1)
elif pooling_type == "mean":
outputs = tf.reduce_mean(outputs, axis=1)
#outputs should be [batch, W, H, C]
tf.Print(outputs, [tf.shape(outputs)], "outputs shape after pooling: ")
return outputs
#The instance normalization implementation
def instancenorm(input):
with tf.variable_scope("instancenorm"):
# this block looks like it has 3 inputs on the graph unless we do this
input = tf.identity(input)
channels = input.get_shape()[3]
offset = tf.get_variable("offset", [1, 1, 1, channels], dtype=tf.float32, initializer=tf.zeros_initializer())
scale = tf.get_variable("scale", [1, 1, 1, channels], dtype=tf.float32, initializer=tf.random_normal_initializer(1.0, 0.02))
mean, variance = tf.nn.moments(input, axes=[1, 2], keep_dims=True)
#[batchsize ,1,1, channelNb]
variance_epsilon = 1e-5
#Batch normalization function does the mean substraction then divide by the standard deviation (to normalize it). It finally multiply by scale and adds offset.
#normalized = tf.nn.batch_normalization(input, mean, variance, offset, scale, variance_epsilon=variance_epsilon)
#For instanceNorm we do it ourselves :
normalized = (((input - mean) / tf.sqrt(variance + variance_epsilon)) * scale) + offset
return normalized, mean, variance
| StarcoderdataPython |
12804989 | from setuptools import setup
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pyarc',
packages=['pyarc', "pyarc.data_structures", "pyarc.algorithms", "pyarc.qcba", "pyarc.utils","pyarc.qcba.data_structures"],
version='1.1.1',
description='An implementation of CBA algorithm',
author='<NAME>',
author_email='<EMAIL>',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/jirifilip/pyARC',
download_url='https://github.com/jirifilip/pyARC/archive/1.0.tar.gz',
keywords='classification CBA association rules machine learning',
classifiers=[],
install_requires=['pandas', 'numpy', 'sklearn', "pyfim"]
)
| StarcoderdataPython |
3210623 | <gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-02-08 07:43
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('polls', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='choice',
options={'verbose_name': '\u7b54\u6848', 'verbose_name_plural': '\u7b54\u6848'},
),
migrations.AlterModelOptions(
name='question',
options={'verbose_name': '\u95ee\u9898', 'verbose_name_plural': '\u95ee\u9898'},
),
]
| StarcoderdataPython |
6521555 | <filename>test/unit/test_version.py
import unittest
from unittest.mock import patch, MagicMock
import dbt.main
import dbt.version
import sys
class VersionTest(unittest.TestCase):
@patch("dbt.version.__version__", "0.10.0")
@patch('dbt.version.requests.get')
def test_versions_equal(self, mock_get):
mock_get.return_value.json.return_value = {'info': {'version': '0.10.0'}}
latest_version = dbt.version.get_latest_version()
installed_version = dbt.version.get_installed_version()
version_information = dbt.version.get_version_information()
expected_version_information = "installed version: 0.10.0\n" \
" latest version: 0.10.0\n\n" \
"Up to date!"
self.assertEqual(latest_version, installed_version)
self.assertEqual(latest_version, installed_version)
self.assertMultiLineEqual(version_information,
expected_version_information)
@patch("dbt.version.__version__", "0.10.2-a1")
@patch('dbt.version.requests.get')
def test_installed_version_greater(self, mock_get):
mock_get.return_value.json.return_value = {'info': {'version': '0.10.1'}}
latest_version = dbt.version.get_latest_version()
installed_version = dbt.version.get_installed_version()
version_information = dbt.version.get_version_information()
expected_version_information = "installed version: 0.10.2-a1\n" \
" latest version: 0.10.1\n\n" \
"Your version of dbt is ahead of the latest release!"
assert installed_version > latest_version
self.assertMultiLineEqual(version_information,
expected_version_information)
@patch("dbt.version.__version__", "0.9.5")
@patch('dbt.version.requests.get')
def test_installed_version_lower(self, mock_get):
mock_get.return_value.json.return_value = {'info': {'version': '0.10.0'}}
latest_version = dbt.version.get_latest_version()
installed_version = dbt.version.get_installed_version()
version_information = dbt.version.get_version_information()
expected_version_information = "installed version: 0.9.5\n" \
" latest version: 0.10.0\n\n" \
"Your version of dbt is out of date! " \
"You can find instructions for upgrading here:\n" \
"https://docs.getdbt.com/docs/installation"
assert installed_version < latest_version
self.assertMultiLineEqual(version_information,
expected_version_information)
# suppress having version info printed to the screen during tests.
@patch('sys.stderr')
@patch('dbt.version.requests.get')
def test_dbt_version_flag(self, mock_get, stderr):
mock_get.return_value.json.return_value = {'info': {'version': '0.10.1'}}
with self.assertRaises(SystemExit) as exc:
dbt.main.handle_and_check(['--version'])
self.assertEqual(exc.exception.code, 0)
| StarcoderdataPython |
384149 | <filename>src/COCCharGen/Generator.py
class Generator():
"""Virtual class for Generators."""
def generate_char(self, char):
self.generate_basic_info(char)
self.generate_characteristics(char)
self.generate_status(char)
self.generate_combact_stat(char)
self.generate_skills(char)
self.generate_weapons(char)
self.generate_backstory(char)
self.generate_inventory(char)
self.generate_financial_status(char)
def generate_basic_info(self, char):
pass
def generate_characteristics(self, char):
pass
def generate_status(self, char):
pass
def generate_combact_stat(self, char):
pass
def generate_skills(self, char):
pass
def generate_weapons(self, char):
pass
def generate_backstory(self, char):
pass
def generate_inventory(self, char):
pass
def generate_financial_status(self, char):
pass
| StarcoderdataPython |
3491212 | # simple script for removing exif data in training and validation images
# issues that can be caused are: https://github.com/codelucas/newspaper/issues/542
import glob
import piexif
from multiprocessing import Pool
from multiprocessing import cpu_count
dirs = ['../external_data/ImageNet/ILSVRC2012_img_train/**/*.JPEG',
'../external_data/ImageNet/ILSVRC2012_img_val/**/*.JPEG']
def clean_file(file_name):
piexif.remove(file_name)
for folder in dirs:
file_list = glob.glob(folder, recursive=True)
pool = Pool(cpu_count()-1)
results = pool.map(clean_file, file_list)
pool.close() # 'TERM'
pool.join()
| StarcoderdataPython |
9707544 | # -*- coding: utf-8 -*-
from zope.interface import Interface, implements
from twisted.internet import protocol
from protocol.AgentControlProtocol import AgentControlProtocol
class IBasicAgentFactory(Interface):
def doSomething(stuff):
"""Return a deferred returning a string"""
def buildProtocol(addr):
"""Return a protocol returning a string"""
class BasicAgentFactoryFromService(protocol.ClientFactory):
implements(IBasicAgentFactory)
#protocol = BasicAgentProtocol
protocol = AgentControlProtocol
def __init__(self, service):
self.service = service
def doSomething(self, stuff):
return self.service.my_func(stuff)
def getName(self):
return self.service.getName()
def query(self, sqlQuery):
return self.service.query(sqlQuery)
| StarcoderdataPython |
11216822 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import prettytable as pt
from fiblary.client import Client
logging.basicConfig(
format='%(asctime)-15s %(levelname)s: %(module)s:%(funcName)s'
':%(lineno)d: %(message)s',
level=logging.CRITICAL)
def print_section_table(sections):
table = pt.PrettyTable([
"id",
"name",
"sortOrder"])
for section in sections:
table.add_row([
section.id,
section.name,
section.sortOrder])
print(table)
def print_room_table(rooms):
table = pt.PrettyTable([
"id",
"name",
"sectionID",
"icon",
"temperature",
"humidity",
"light",
"thermostat"])
for room in rooms:
table.add_row([
room.id,
room.name,
room.sectionID,
room.icon,
room.defaultSensors['temperature'],
room.defaultSensors['humidity'],
room.defaultSensors['light'],
room.defaultThermostat])
print(table)
def main():
hc2 = Client(
'v3',
'http://192.168.1.230/api/',
'admin',
'admin'
)
sections = hc2.sections.list()
print_section_table(sections)
print("Adding new section")
section = hc2.sections.create(name="fiblaro_test_section")
sections = hc2.sections.list()
print_section_table(sections)
rooms = hc2.rooms.list()
print_room_table(rooms)
print("Adding new room")
room = hc2.rooms.create(name="fiblaro_test_room", sectionID=section.id)
rooms = hc2.rooms.list()
print_room_table(rooms)
print("Changing the room name")
room.name = "fiblaro_test_room_1"
room = hc2.rooms.update(room)
print("Changing the room name to: {}".format(room.name))
rooms = hc2.rooms.list(name="fiblaro_test_room_1")
print_room_table(rooms)
print("Deleting rooms")
for room in hc2.rooms.list(name="fiblaro_test_room_1"):
hc2.rooms.delete(room.id)
rooms = hc2.rooms.list(name="fiblaro_test_room_1")
print_room_table(rooms)
sections = hc2.sections.list(name="fiblaro_test_section")
print_section_table(sections)
print("Deleting sections")
for section in hc2.sections.list(name="fiblaro_test_section"):
hc2.sections.delete(section.id)
sections = hc2.sections.list(name="fiblaro_test_section")
print_section_table(sections)
exit()
if __name__ == '__main__':
main()
| StarcoderdataPython |
3510531 | <reponame>robot2051/dto-digitalmarketplace-supplier-frontend
import os
import jinja2
from dmutils.status import enabled_since, get_version_label
class Config(object):
VERSION = get_version_label(
os.path.abspath(os.path.dirname(__file__))
)
SESSION_COOKIE_NAME = 'dm_session'
SESSION_COOKIE_PATH = '/'
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_SECURE = True
BASE_PREFIX = ''
URL_PREFIX = BASE_PREFIX + '/sellers'
CSRF_ENABLED = True
CSRF_TIME_LIMIT = 8*3600
PERMANENT_SESSION_LIFETIME = 4*3600
DM_DEFAULT_CACHE_MAX_AGE = 48*3600
DM_DATA_API_URL = None
DM_DATA_API_AUTH_TOKEN = None
DM_CLARIFICATION_QUESTION_EMAIL = '<EMAIL>'
DM_FRAMEWORK_AGREEMENTS_EMAIL = '<EMAIL>'
DM_AGREEMENTS_BUCKET = None
DM_COMMUNICATIONS_BUCKET = None
DM_DOCUMENTS_BUCKET = None
DM_SUBMISSIONS_BUCKET = None
DM_ASSETS_URL = None
DM_HTTP_PROTO = 'http'
DM_SEND_EMAIL_TO_STDERR = False
DM_CACHE_TYPE = 'dev'
DEBUG = False
GENERIC_CONTACT_EMAIL = '<EMAIL>'
DM_GENERIC_NOREPLY_EMAIL = '<EMAIL>'
DM_GENERIC_ADMIN_NAME = 'Digital Marketplace Admin'
RESET_PASSWORD_EMAIL_NAME = DM_GENERIC_ADMIN_NAME
RESET_PASSWORD_EMAIL_FROM = '<EMAIL>'
RESET_PASSWORD_EMAIL_SUBJECT = 'Reset your Digital Marketplace password'
INVITE_EMAIL_NAME = DM_GENERIC_ADMIN_NAME
INVITE_EMAIL_FROM = '<EMAIL>'
INVITE_EMAIL_SUBJECT = 'Your Digital Marketplace invitation'
NEW_SUPPLIER_INVITE_SUBJECT = 'Digital Marketplace - invitation to create seller account'
CLARIFICATION_EMAIL_NAME = DM_GENERIC_ADMIN_NAME
CLARIFICATION_EMAIL_FROM = '<EMAIL>'
CLARIFICATION_EMAIL_SUBJECT = 'Thanks for your clarification question'
DM_FOLLOW_UP_EMAIL_TO = '<EMAIL>'
FRAMEWORK_AGREEMENT_RETURNED_NAME = DM_GENERIC_ADMIN_NAME
CREATE_USER_SUBJECT = 'Create your Digital Marketplace account'
SECRET_KEY = None
SHARED_EMAIL_KEY = None
RESET_PASSWORD_SALT = 'ResetPasswordSalt'
SUPPLIER_INVITE_TOKEN_SALT = 'SupplierInviteEmail'
ASSET_PATH = URL_PREFIX + '/static'
# List all you feature flags below
FEATURE_FLAGS = {
'INVITE_CONTRIBUTOR': False,
'EDIT_SECTIONS': False,
}
# Logging
DM_LOG_LEVEL = 'DEBUG'
DM_LOG_PATH = None
DM_APP_NAME = 'supplier-frontend'
DM_DOWNSTREAM_REQUEST_ID_HEADER = 'X-Amz-Cf-Id'
@staticmethod
def init_app(app):
repo_root = os.path.abspath(os.path.dirname(__file__))
template_folders = [
os.path.join(repo_root, 'app/templates')
]
jinja_loader = jinja2.FileSystemLoader(template_folders)
app.jinja_loader = jinja_loader
class Test(Config):
DEBUG = True
CSRF_ENABLED = False
CSRF_FAKED = True
DM_LOG_LEVEL = 'CRITICAL'
SERVER_NAME = 'localhost'
# Throw an exception in dev when a feature flag is used in code but not defined. Otherwise it is assumed False.
RAISE_ERROR_ON_MISSING_FEATURES = True
# List all you feature flags below
FEATURE_FLAGS = {
'INVITE_CONTRIBUTOR': True,
'EDIT_SECTIONS': True,
}
DM_DATA_API_AUTH_TOKEN = '<PASSWORD>'
SECRET_KEY = 'TestKeyTestKeyTestKeyTestKeyTestKeyTestKeyX='
SHARED_EMAIL_KEY = SECRET_KEY
DM_SUBMISSIONS_BUCKET = 'digitalmarketplace-submissions-dev-dev'
DM_COMMUNICATIONS_BUCKET = 'digitalmarketplace-communications-dev-dev'
DM_ASSETS_URL = 'http://asset-host'
class Development(Config):
DEBUG = True
SESSION_COOKIE_SECURE = False
# Throw an exception in dev when a feature flag is used in code but not defined. Otherwise it is assumed False.
RAISE_ERROR_ON_MISSING_FEATURES = True
# List all you feature flags below
FEATURE_FLAGS = {
'INVITE_CONTRIBUTOR': True,
'EDIT_SECTIONS': True,
}
DM_DATA_API_URL = "http://localhost:5000"
DM_DATA_API_AUTH_TOKEN = "<PASSWORD>"
DM_API_AUTH_TOKEN = "my<PASSWORD>"
DM_SUBMISSIONS_BUCKET = "digitalmarketplace-submissions-dev-dev"
DM_COMMUNICATIONS_BUCKET = "digitalmarketplace-communications-dev-dev"
DM_AGREEMENTS_BUCKET = "digitalmarketplace-agreements-dev-dev"
DM_DOCUMENTS_BUCKET = "digitalmarketplace-documents-dev-dev"
DM_ASSETS_URL = "https://{}.s3-eu-west-1.amazonaws.com".format(DM_SUBMISSIONS_BUCKET)
SECRET_KEY = 'DevKeyDevKeyDevKeyDevKeyDevKeyDevKeyDevKeyX='
SHARED_EMAIL_KEY = SECRET_KEY
class Live(Config):
"""Base config for deployed environments"""
DEBUG = False
DM_HTTP_PROTO = 'https'
DM_CACHE_TYPE = 'prod'
SERVER_NAME = 'marketplace.service.gov.au'
DM_FRAMEWORK_AGREEMENTS_EMAIL = '<EMAIL>'
class Preview(Live):
pass
class Production(Live):
pass
class Staging(Production):
pass
configs = {
'development': Development,
'preview': Preview,
'staging': Staging,
'production': Production,
'test': Test,
}
| StarcoderdataPython |
6501650 | <reponame>mminamina/311-data
import pandas as pd
from .data_access import load_batch, load_meta
def get_batch_nums(table, startDate, endDate):
batches = load_meta(table)['batches']
return [batch_num for batch_num, batch in enumerate(batches) if (
startDate <= pd.to_datetime(batch['endDate']) and
endDate >= pd.to_datetime(batch['startDate'])
)]
def query(table, fields, filters):
print('QUERYING PICKLEBASE')
startDate = pd.to_datetime(filters['startDate'])
endDate = pd.to_datetime(filters['endDate'])
requestTypes = filters['requestTypes']
ncList = filters.get('ncList', [])
cdList = filters.get('cdList', [])
batches = []
for batch_num in get_batch_nums(table, startDate, endDate):
df = load_batch(table, batch_num)
if len(ncList) > 0:
district_filter = df['nc'].isin(ncList)
else:
district_filter = df['cd'].isin(cdList)
batch = df.loc[(
(df['createddate'] > startDate) &
(df['createddate'] < endDate) &
df['requesttype'].isin(requestTypes) &
district_filter
), fields]
batches.append(batch)
if len(batches) > 0:
all = pd.concat(batches, ignore_index=True)
for c in all.columns:
if hasattr(all[c], 'cat'):
all[c].cat.remove_unused_categories(inplace=True)
return all
else:
return pd.DataFrame(columns=fields)
| StarcoderdataPython |
4971489 | #/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 The F4PGA Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
import os
import unittest
import pprint
import tempfile
from fpga_interchange.interchange_capnp import Interchange, write_capnp_file, \
CompressionFormat
from fpga_interchange.logical_netlist import LogicalNetlist
from fpga_interchange.physical_netlist import PhysicalNetlist
from example_netlist import example_logical_netlist, example_physical_netlist
class TestRoundTrip(unittest.TestCase):
def test_logical_netlist(self):
logical_netlist = example_logical_netlist()
interchange = Interchange(
schema_directory=os.environ['INTERCHANGE_SCHEMA_PATH'])
with tempfile.NamedTemporaryFile('w+b') as f:
netlist_capnp = logical_netlist.convert_to_capnp(interchange)
write_capnp_file(netlist_capnp, f)
f.seek(0)
read_logical_netlist = LogicalNetlist.read_from_capnp(
f, interchange)
self.assertEqual(read_logical_netlist.name, logical_netlist.name)
self.assertEqual(read_logical_netlist.top_instance,
logical_netlist.top_instance)
self.assertEqual(read_logical_netlist.libraries.keys(),
logical_netlist.libraries.keys())
for library_name, library in logical_netlist.libraries.items():
read_library = read_logical_netlist.libraries[library_name]
self.assertEqual(library.cells.keys(), read_library.cells.keys())
for cell_name, cell in library.cells.items():
read_cell = read_library.cells[cell_name]
self.assertEqual(cell.name, read_cell.name)
self.assertEqual(cell.property_map, read_cell.property_map)
self.assertEqual(cell.view, read_cell.view)
self.assertEqual(cell.nets.keys(), read_cell.nets.keys())
self.assertEqual(cell.ports.keys(), read_cell.ports.keys())
self.assertEqual(cell.cell_instances.keys(),
read_cell.cell_instances.keys())
def test_physical_netlist(self):
phys_netlist = example_physical_netlist()
interchange = Interchange(
schema_directory=os.environ['INTERCHANGE_SCHEMA_PATH'])
with tempfile.NamedTemporaryFile('w+b') as f:
netlist_capnp = phys_netlist.convert_to_capnp(interchange)
write_capnp_file(netlist_capnp, f)
f.seek(0)
read_phys_netlist = PhysicalNetlist.read_from_capnp(f, interchange)
self.assertEqual(
len(phys_netlist.placements), len(read_phys_netlist.placements))
def test_check_routing_tree_and_stitch_segments(self):
phys_netlist = example_physical_netlist()
interchange = Interchange(
schema_directory=os.environ['INTERCHANGE_SCHEMA_PATH'])
with open(
os.path.join(os.environ['DEVICE_RESOURCE_PATH'],
phys_netlist.part + '.device'), 'rb') as f:
device_resources = interchange.read_device_resources(f)
phys_netlist.check_physical_nets(device_resources)
before_stitch = phys_netlist.get_normalized_tuple_tree(
device_resources)
phys_netlist.stitch_physical_nets(device_resources)
after_stitch = phys_netlist.get_normalized_tuple_tree(device_resources)
phys_netlist.stitch_physical_nets(device_resources, flatten=True)
after_stitch_from_flat = phys_netlist.get_normalized_tuple_tree(
device_resources)
self.assertEqual(len(before_stitch), len(after_stitch))
self.assertEqual(len(before_stitch), len(after_stitch_from_flat))
bad_nets = set()
for net in before_stitch:
if before_stitch[net] != after_stitch[net]:
bad_nets.add(net)
print(net)
pprint.pprint(before_stitch[net])
pprint.pprint(after_stitch[net])
if before_stitch[net] != after_stitch_from_flat[net]:
bad_nets.add(net)
print(net)
pprint.pprint(before_stitch[net])
pprint.pprint(after_stitch_from_flat[net])
self.assertEqual(set(), bad_nets)
def test_capnp_modes(self):
logical_netlist = example_logical_netlist()
interchange = Interchange(
schema_directory=os.environ['INTERCHANGE_SCHEMA_PATH'])
for compression_format in [
CompressionFormat.UNCOMPRESSED, CompressionFormat.GZIP
]:
for packed in [True, False]:
with tempfile.NamedTemporaryFile('w+b') as f:
netlist_capnp = logical_netlist.convert_to_capnp(
interchange)
write_capnp_file(
netlist_capnp,
f,
compression_format=compression_format,
is_packed=packed)
f.seek(0)
_ = LogicalNetlist.read_from_capnp(
f,
interchange,
compression_format=compression_format,
is_packed=packed)
| StarcoderdataPython |
389380 | <filename>product/migrations/0015_auto_20170126_0132.py
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2017-01-26 01:32
from __future__ import unicode_literals
import ckeditor_uploader.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('product', '0014_auto_20170126_0045'),
]
operations = [
migrations.AddField(
model_name='accessory',
name='text',
field=ckeditor_uploader.fields.RichTextUploadingField(blank=True, verbose_name='Text'),
),
migrations.AlterField(
model_name='accessory',
name='slogan',
field=models.CharField(max_length=250, verbose_name='Accessory Slogan'),
),
]
| StarcoderdataPython |
3554456 | # ========================================================================================================================================================================================================================================================================================================================================
# *
# * ##### ##### ##### ###### ######
# * ## ## ## ## ## ## ## ##
# * ##### ## ## ## ## ## ##
# * ## ## ## ## ## ## ## ##
# * ##### ##### ##### ## ######
# *
# * CREATED BY <NAME>
# * -- SETTINGS.PY --
# ========================================================================================================================================================================================================================================================================================================================================
from selenium import webdriver
from random_user_agent.user_agent import UserAgent
from random_user_agent.params import SoftwareName, OperatingSystem
# =================================================
# * FUNCTIONS
# =================================================
def rotate_user_agent():
# Rotate user_agent
software_names = [SoftwareName.CHROME.value]
operating_systems = [OperatingSystem.WINDOWS.value,
OperatingSystem.LINUX.value]
user_agent_rotator = UserAgent(
software_names=software_names, operating_systems=operating_systems, limit=100)
user_agent = user_agent_rotator.get_random_user_agent()
return user_agent
class DriverSettings():
PATH = '/Applications/chromedriver'
WAIT = 10
SAVE = 50
OPTIONS = webdriver.ChromeOptions()
OPTIONS.add_argument("--headless")
OPTIONS.add_argument("--disable-dev-shm-usage")
OPTIONS.add_argument("--no-sandbox")
OPTIONS.add_argument("user-agent={}".format(rotate_user_agent()))
DRIVER = webdriver.Chrome(PATH, 0,OPTIONS)
| StarcoderdataPython |
3317221 | import io
from typing import Any, List
import numpy as np
import numpy.typing as npt
AR_i8: npt.NDArray[np.int64]
REC_AR_V: np.recarray[Any, np.dtype[np.record]]
AR_LIST: List[npt.NDArray[np.int64]]
format_parser: np.format_parser
record: np.record
file_obj: io.BufferedIOBase
reveal_type(np.format_parser( # E: numpy.format_parser
formats=[np.float64, np.int64, np.bool_],
names=["f8", "i8", "?"],
titles=None,
aligned=True,
))
reveal_type(format_parser.dtype) # E: numpy.dtype[numpy.void]
reveal_type(record.field_a) # E: Any
reveal_type(record.field_b) # E: Any
reveal_type(record["field_a"]) # E: Any
reveal_type(record["field_b"]) # E: Any
reveal_type(record.pprint()) # E: str
record.field_c = 5
reveal_type(REC_AR_V.field(0)) # E: Any
reveal_type(REC_AR_V.field("field_a")) # E: Any
reveal_type(REC_AR_V.field(0, AR_i8)) # E: None
reveal_type(REC_AR_V.field("field_a", AR_i8)) # E: None
reveal_type(REC_AR_V["field_a"]) # E: Any
reveal_type(REC_AR_V.field_a) # E: Any
reveal_type(np.recarray( # numpy.recarray[Any, numpy.dtype[numpy.record]]
shape=(10, 5),
formats=[np.float64, np.int64, np.bool_],
order="K",
byteorder="|",
))
reveal_type(np.recarray( # numpy.recarray[Any, numpy.dtype[Any]]
shape=(10, 5),
dtype=[("f8", np.float64), ("i8", np.int64)],
strides=(5, 5),
))
reveal_type(np.rec.fromarrays( # numpy.recarray[Any, numpy.dtype[numpy.record]]
AR_LIST,
))
reveal_type(np.rec.fromarrays( # numpy.recarray[Any, numpy.dtype[Any]]
AR_LIST,
dtype=np.int64,
))
reveal_type(np.rec.fromarrays( # numpy.recarray[Any, numpy.dtype[Any]]
AR_LIST,
formats=[np.int64, np.float64],
names=["i8", "f8"]
))
reveal_type(np.rec.fromrecords( # numpy.recarray[Any, numpy.dtype[numpy.record]]
(1, 1.5),
))
reveal_type(np.rec.fromrecords( # numpy.recarray[Any, numpy.dtype[numpy.record]]
[(1, 1.5)],
dtype=[("i8", np.int64), ("f8", np.float64)],
))
reveal_type(np.rec.fromrecords( # numpy.recarray[Any, numpy.dtype[numpy.record]]
REC_AR_V,
formats=[np.int64, np.float64],
names=["i8", "f8"]
))
reveal_type(np.rec.fromstring( # numpy.recarray[Any, numpy.dtype[numpy.record]]
b"(1, 1.5)",
dtype=[("i8", np.int64), ("f8", np.float64)],
))
reveal_type(np.rec.fromstring( # numpy.recarray[Any, numpy.dtype[numpy.record]]
REC_AR_V,
formats=[np.int64, np.float64],
names=["i8", "f8"]
))
reveal_type(np.rec.fromfile( # numpy.recarray[Any, numpy.dtype[Any]]
"test_file.txt",
dtype=[("i8", np.int64), ("f8", np.float64)],
))
reveal_type(np.rec.fromfile( # numpy.recarray[Any, numpy.dtype[numpy.record]]
file_obj,
formats=[np.int64, np.float64],
names=["i8", "f8"]
))
reveal_type(np.rec.array( # numpy.recarray[Any, numpy.dtype[{int64}]]
AR_i8,
))
reveal_type(np.rec.array( # numpy.recarray[Any, numpy.dtype[Any]]
[(1, 1.5)],
dtype=[("i8", np.int64), ("f8", np.float64)],
))
reveal_type(np.rec.array( # numpy.recarray[Any, numpy.dtype[numpy.record]]
[(1, 1.5)],
formats=[np.int64, np.float64],
names=["i8", "f8"]
))
| StarcoderdataPython |
1627678 | from django.contrib.syndication.views import Feed
from wagtail.wagtailcore.url_routing import RouteResult
from datetime import datetime
import pytz
eastern = pytz.timezone('US/Eastern')
class FilterableFeed(Feed):
item_guid_is_permalink = False
def __init__(self, page, context):
self.page = page
self.context = context
def link(self):
return self.page.full_url
def author_name(self):
return "Consumer Financial Protection Bureau"
def title(self):
return "%s | Consumer Financial Protection Bureau" % self.page.title
def items(self):
posts = self.context['filter_data']['page_sets'].pop(0)
return posts
def item_link(self, item):
return item.full_url
def item_pubdate(self, item):
# this seems to require a datetime
item_date = item.date_published
naive = datetime.combine(item_date, datetime.min.time())
return eastern.localize(naive)
def item_description(self, item):
return item.preview_description
def item_categories(self, item):
categories = [cat.get_name_display() for cat in item.categories.all()]
tags = [tag.name for tag in item.tags.all()]
return categories + tags
def item_author_name(self, item):
if hasattr(item, 'authors'):
author_names = [a.name for a in item.authors.all()]
author_string = ', '.join(author_names)
return author_string
def item_guid(self, item):
return "%s<>consumerfinance.gov" % item.page_ptr_id
class FilterableFeedPageMixin(object):
def route(self, request, path_components):
if len(path_components) == 1 and path_components[0] == 'feed':
return RouteResult(self, kwargs={'format': 'rss'})
return super(FilterableFeedPageMixin,
self).route(request, path_components)
def serve(self, request, format='html'):
if format == 'rss':
context = self.get_context(request)
return FilterableFeed(self, context)(request)
else:
return super(FilterableFeedPageMixin, self).serve(request)
| StarcoderdataPython |
3282547 | <reponame>Wyss/customarrayformatter
# file openpyxl/reader/workbook.py
# Copyright (c) 2010-2011 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: see AUTHORS file
"""Read in global settings to be maintained by the workbook object."""
# package imports
from openpyxl.shared.xmltools import fromstring
from openpyxl.shared.ooxml import NAMESPACES, DCORE_NS, COREPROPS_NS, DCTERMS_NS, SHEET_MAIN_NS, CONTYPES_NS
from openpyxl.workbook import DocumentProperties
from openpyxl.shared.date_time import W3CDTF_to_datetime,CALENDAR_WINDOWS_1900,CALENDAR_MAC_1904
from openpyxl.namedrange import NamedRange, NamedRangeContainingValue, split_named_range, refers_to_range
import datetime
# constants
BUGGY_NAMED_RANGES = ['NA()', '#REF!']
DISCARDED_RANGES = ['Excel_BuiltIn', 'Print_Area']
def get_sheet_ids(xml_source):
sheet_names = read_sheets_titles(xml_source)
return dict((sheet, 'sheet%d.xml' % (i + 1)) for i, sheet in enumerate(sheet_names))
def read_properties_core(xml_source):
"""Read assorted file properties."""
properties = DocumentProperties()
root = fromstring(xml_source)
properties.creator = root.findtext('{%s}creator' % DCORE_NS, '')
properties.last_modified_by = root.findtext('{%s}lastModifiedBy' % COREPROPS_NS, '')
created_node = root.find('{%s}created' % DCTERMS_NS)
if created_node is not None:
properties.created = W3CDTF_to_datetime(created_node.text)
else:
properties.created = datetime.datetime.now()
modified_node = root.find('{%s}modified' % DCTERMS_NS)
if modified_node is not None:
properties.modified = W3CDTF_to_datetime(modified_node.text)
else:
properties.modified = properties.created
return properties
def read_excel_base_date(xml_source):
root = fromstring(text = xml_source)
wbPr = root.find('{%s}workbookPr' % SHEET_MAIN_NS)
if wbPr is not None and wbPr.get('date1904') in ('1', 'true'):
return CALENDAR_MAC_1904
return CALENDAR_WINDOWS_1900
# <NAME>, 2013-06-03
def read_content_types(xml_source):
"""Read content types."""
root = fromstring(xml_source)
contents_root = root.findall('{%s}Override' % CONTYPES_NS)
for type in contents_root:
yield type.get('PartName'), type.get('ContentType')
def read_sheets_titles(xml_source):
"""Read titles for all sheets."""
root = fromstring(xml_source)
titles_root = root.find('{%s}sheets' % SHEET_MAIN_NS)
return [sheet.get('name') for sheet in titles_root]
def read_named_ranges(xml_source, workbook):
"""Read named ranges, excluding poorly defined ranges."""
named_ranges = []
root = fromstring(xml_source)
names_root = root.find('{%s}definedNames' %SHEET_MAIN_NS)
if names_root is not None:
for name_node in names_root:
range_name = name_node.get('name')
node_text = name_node.text or ''
if name_node.get("hidden", '0') == '1':
continue
valid = True
for discarded_range in DISCARDED_RANGES:
if discarded_range in range_name:
valid = False
for bad_range in BUGGY_NAMED_RANGES:
if bad_range in node_text:
valid = False
if valid:
if refers_to_range(node_text):
destinations = split_named_range(node_text)
new_destinations = []
for worksheet, cells_range in destinations:
# it can happen that a valid named range references
# a missing worksheet, when Excel didn't properly maintain
# the named range list
#
# we just ignore them here
worksheet = workbook.get_sheet_by_name(worksheet)
if worksheet:
new_destinations.append((worksheet, cells_range))
named_range = NamedRange(range_name, new_destinations)
else:
named_range = NamedRangeContainingValue(range_name, node_text)
location_id = name_node.get("localSheetId")
if location_id:
named_range.scope = workbook.worksheets[int(location_id)]
named_ranges.append(named_range)
return named_ranges
| StarcoderdataPython |
5150509 | <reponame>MaxRink/pretix<filename>src/pretix/base/payment.py<gh_stars>0
import logging
from collections import OrderedDict
from decimal import ROUND_HALF_UP, Decimal
from typing import Any, Dict, Union
import pytz
from django import forms
from django.conf import settings
from django.contrib import messages
from django.dispatch import receiver
from django.forms import Form
from django.http import HttpRequest
from django.template.loader import get_template
from django.utils.timezone import now
from django.utils.translation import pgettext_lazy, ugettext_lazy as _
from i18nfield.forms import I18nFormField, I18nTextarea
from i18nfield.strings import LazyI18nString
from pretix.base.models import CartPosition, Event, Order, Quota
from pretix.base.reldate import RelativeDateField, RelativeDateWrapper
from pretix.base.settings import SettingsSandbox
from pretix.base.signals import register_payment_providers
from pretix.helpers.money import DecimalTextInput
from pretix.presale.views import get_cart_total
from pretix.presale.views.cart import get_or_create_cart_id
logger = logging.getLogger(__name__)
class PaymentProviderForm(Form):
def clean(self):
cleaned_data = super().clean()
for k, v in self.fields.items():
val = cleaned_data.get(k)
if v._required and not val:
self.add_error(k, _('This field is required.'))
class BasePaymentProvider:
"""
This is the base class for all payment providers.
"""
def __init__(self, event: Event):
self.event = event
self.settings = SettingsSandbox('payment', self.identifier, event)
# Default values
if self.settings.get('_fee_reverse_calc') is None:
self.settings.set('_fee_reverse_calc', True)
def __str__(self):
return self.identifier
@property
def is_implicit(self) -> bool:
"""
Returns whether or whether not this payment provider is an "implicit" payment provider that will
*always* and unconditionally be used if is_allowed() returns True and does not require any input.
This is intended to be used by the FreePaymentProvider, which skips the payment choice page.
By default, this returns ``False``. Please do not set this if you don't know exactly what you are doing.
"""
return False
@property
def is_meta(self) -> bool:
"""
Returns whether or whether not this payment provider is a "meta" payment provider that only
works as a settings holder for other payment providers and should never be used directly. This
is a trick to implement payment gateways with multiple payment methods but unified payment settings.
Take a look at the built-in stripe provider to see how this might be used.
By default, this returns ``False``.
"""
return False
@property
def is_enabled(self) -> bool:
"""
Returns whether or whether not this payment provider is enabled.
By default, this is determined by the value of the ``_enabled`` setting.
"""
return self.settings.get('_enabled', as_type=bool)
def calculate_fee(self, price: Decimal) -> Decimal:
"""
Calculate the fee for this payment provider which will be added to
final price before fees (but after taxes). It should include any taxes.
The default implementation makes use of the setting ``_fee_abs`` for an
absolute fee and ``_fee_percent`` for a percentage.
:param price: The total value without the payment method fee, after taxes.
"""
fee_abs = self.settings.get('_fee_abs', as_type=Decimal, default=0)
fee_percent = self.settings.get('_fee_percent', as_type=Decimal, default=0)
fee_reverse_calc = self.settings.get('_fee_reverse_calc', as_type=bool, default=True)
places = settings.CURRENCY_PLACES.get(self.event.currency, 2)
if fee_reverse_calc:
return ((price + fee_abs) * (1 / (1 - fee_percent / 100)) - price).quantize(
Decimal('1') / 10 ** places, ROUND_HALF_UP
)
else:
return (price * fee_percent / 100 + fee_abs).quantize(
Decimal('1') / 10 ** places, ROUND_HALF_UP
)
@property
def verbose_name(self) -> str:
"""
A human-readable name for this payment provider. This should
be short but self-explaining. Good examples include 'Bank transfer'
and 'Credit card via Stripe'.
"""
raise NotImplementedError() # NOQA
@property
def public_name(self) -> str:
"""
A human-readable name for this payment provider to be shown to the public.
This should be short but self-explaining. Good examples include 'Bank transfer'
and 'Credit card', but 'Credit card via Stripe' might be to explicit. By default,
this is the same as ``verbose_name``
"""
return self.verbose_name
@property
def identifier(self) -> str:
"""
A short and unique identifier for this payment provider.
This should only contain lowercase letters and in most
cases will be the same as your package name.
"""
raise NotImplementedError() # NOQA
@property
def settings_form_fields(self) -> dict:
"""
When the event's administrator visits the event configuration
page, this method is called to return the configuration fields available.
It should therefore return a dictionary where the keys should be (unprefixed)
settings keys and the values should be corresponding Django form fields.
The default implementation returns the appropriate fields for the ``_enabled``,
``_fee_abs``, ``_fee_percent`` and ``_availability_date`` settings mentioned above.
We suggest that you return an ``OrderedDict`` object instead of a dictionary
and make use of the default implementation. Your implementation could look
like this::
@property
def settings_form_fields(self):
return OrderedDict(
list(super().settings_form_fields.items()) + [
('bank_details',
forms.CharField(
widget=forms.Textarea,
label=_('Bank account details'),
required=False
))
]
)
.. WARNING:: It is highly discouraged to alter the ``_enabled`` field of the default
implementation.
"""
places = settings.CURRENCY_PLACES.get(self.event.currency, 2)
return OrderedDict([
('_enabled',
forms.BooleanField(
label=_('Enable payment method'),
required=False,
)),
('_availability_date',
RelativeDateField(
label=_('Available until'),
help_text=_('Users will not be able to choose this payment provider after the given date.'),
required=False,
)),
('_invoice_text',
I18nFormField(
label=_('Text on invoices'),
help_text=_('Will be printed just below the payment figures and above the closing text on invoices. '
'This will only be used if the invoice is generated before the order is paid. If the '
'invoice is generated later, it will show a text stating that it has already been paid.'),
required=False,
widget=I18nTextarea,
widget_kwargs={'attrs': {'rows': '2'}}
)),
('_fee_abs',
forms.DecimalField(
label=_('Additional fee'),
help_text=_('Absolute value'),
localize=True,
required=False,
decimal_places=places,
widget=DecimalTextInput(places=places)
)),
('_fee_percent',
forms.DecimalField(
label=_('Additional fee'),
help_text=_('Percentage of the order total. Note that this percentage will currently only '
'be calculated on the summed price of sold tickets, not on other fees like e.g. shipping '
'fees, if there are any.'),
localize=True,
required=False,
)),
('_fee_reverse_calc',
forms.BooleanField(
label=_('Calculate the fee from the total value including the fee.'),
help_text=_('We recommend to enable this if you want your users to pay the payment fees of your '
'payment provider. <a href="{docs_url}" target="_blank" rel="noopener">Click here '
'for detailed information on what this does.</a> Don\'t forget to set the correct fees '
'above!').format(docs_url='https://docs.pretix.eu/en/latest/user/payments/fees.html'),
required=False
)),
])
def settings_content_render(self, request: HttpRequest) -> str:
"""
When the event's administrator visits the event configuration
page, this method is called. It may return HTML containing additional information
that is displayed below the form fields configured in ``settings_form_fields``.
"""
return ""
def render_invoice_text(self, order: Order) -> str:
"""
This is called when an invoice for an order with this payment provider is generated.
The default implementation returns the content of the _invoice_text configuration
variable (an I18nString), or an empty string if unconfigured.
"""
if order.status == Order.STATUS_PAID:
return pgettext_lazy('invoice', 'The payment for this invoice has already been received.')
return self.settings.get('_invoice_text', as_type=LazyI18nString, default='')
@property
def payment_form_fields(self) -> dict:
"""
This is used by the default implementation of :py:meth:`checkout_form`.
It should return an object similar to :py:attr:`settings_form_fields`.
The default implementation returns an empty dictionary.
"""
return {}
def payment_form(self, request: HttpRequest) -> Form:
"""
This is called by the default implementation of :py:meth:`checkout_form_render`
to obtain the form that is displayed to the user during the checkout
process. The default implementation constructs the form using
:py:attr:`checkout_form_fields` and sets appropriate prefixes for the form
and all fields and fills the form with data form the user's session.
If you overwrite this, we strongly suggest that you inherit from
``PaymentProviderForm`` (from this module) that handles some nasty issues about
required fields for you.
"""
form = PaymentProviderForm(
data=(request.POST if request.method == 'POST' and request.POST.get("payment") == self.identifier else None),
prefix='payment_%s' % self.identifier,
initial={
k.replace('payment_%s_' % self.identifier, ''): v
for k, v in request.session.items()
if k.startswith('payment_%s_' % self.identifier)
}
)
form.fields = self.payment_form_fields
for k, v in form.fields.items():
v._required = v.required
v.required = False
v.widget.is_required = False
return form
def _is_still_available(self, now_dt=None, cart_id=None, order=None):
now_dt = now_dt or now()
tz = pytz.timezone(self.event.settings.timezone)
availability_date = self.settings.get('_availability_date', as_type=RelativeDateWrapper)
if availability_date:
if self.event.has_subevents and cart_id:
availability_date = min([
availability_date.datetime(se).date()
for se in self.event.subevents.filter(
id__in=CartPosition.objects.filter(
cart_id=cart_id, event=self.event
).values_list('subevent', flat=True)
)
])
elif self.event.has_subevents and order:
availability_date = min([
availability_date.datetime(se).date()
for se in self.event.subevents.filter(
id__in=order.positions.values_list('subevent', flat=True)
)
])
elif self.event.has_subevents:
logger.error('Payment provider is not subevent-ready.')
return False
else:
availability_date = availability_date.datetime(self.event).date()
return availability_date >= now_dt.astimezone(tz).date()
return True
def is_allowed(self, request: HttpRequest) -> bool:
"""
You can use this method to disable this payment provider for certain groups
of users, products or other criteria. If this method returns ``False``, the
user will not be able to select this payment method. This will only be called
during checkout, not on retrying.
The default implementation checks for the _availability_date setting to be either unset or in the future.
"""
return self._is_still_available(cart_id=get_or_create_cart_id(request))
def payment_form_render(self, request: HttpRequest) -> str:
"""
When the user selects this provider as his preferred payment method,
they will be shown the HTML you return from this method.
The default implementation will call :py:meth:`checkout_form`
and render the returned form. If your payment method doesn't require
the user to fill out form fields, you should just return a paragraph
of explanatory text.
"""
form = self.payment_form(request)
template = get_template('pretixpresale/event/checkout_payment_form_default.html')
ctx = {'request': request, 'form': form}
return template.render(ctx)
def checkout_confirm_render(self, request) -> str:
"""
If the user has successfully filled in his payment data, they will be redirected
to a confirmation page which lists all details of his order for a final review.
This method should return the HTML which should be displayed inside the
'Payment' box on this page.
In most cases, this should include a short summary of the user's input and
a short explanation on how the payment process will continue.
"""
raise NotImplementedError() # NOQA
def checkout_prepare(self, request: HttpRequest, cart: Dict[str, Any]) -> Union[bool, str]:
"""
Will be called after the user selects this provider as his payment method.
If you provided a form to the user to enter payment data, this method should
at least store the user's input into his session.
This method should return ``False`` if the user's input was invalid, ``True``
if the input was valid and the frontend should continue with default behavior
or a string containing a URL if the user should be redirected somewhere else.
On errors, you should use Django's message framework to display an error message
to the user (or the normal form validation error messages).
The default implementation stores the input into the form returned by
:py:meth:`payment_form` in the user's session.
If your payment method requires you to redirect the user to an external provider,
this might be the place to do so.
.. IMPORTANT:: If this is called, the user has not yet confirmed his or her order.
You may NOT do anything which actually moves money.
:param cart: This dictionary contains at least the following keys:
positions:
A list of ``CartPosition`` objects that are annotated with the special
attributes ``count`` and ``total`` because multiple objects of the
same content are grouped into one.
raw:
The raw list of ``CartPosition`` objects in the users cart
total:
The overall total *including* the fee for the payment method.
payment_fee:
The fee for the payment method.
"""
form = self.payment_form(request)
if form.is_valid():
for k, v in form.cleaned_data.items():
request.session['payment_%s_%s' % (self.identifier, k)] = v
return True
else:
return False
def payment_is_valid_session(self, request: HttpRequest) -> bool:
"""
This is called at the time the user tries to place the order. It should return
``True`` if the user's session is valid and all data your payment provider requires
in future steps is present.
"""
raise NotImplementedError() # NOQA
def payment_perform(self, request: HttpRequest, order: Order) -> str:
"""
After the user has confirmed their purchase, this method will be called to complete
the payment process. This is the place to actually move the money if applicable.
If you need any special behavior, you can return a string
containing the URL the user will be redirected to. If you are done with your process
you should return the user to the order's detail page.
If the payment is completed, you should call ``pretix.base.services.orders.mark_order_paid(order, provider, info)``
with ``provider`` being your :py:attr:`identifier` and ``info`` being any string
you might want to store for later usage. Please note that ``mark_order_paid`` might
raise a ``Quota.QuotaExceededException`` if (and only if) the payment term of this
order is over and some of the items are sold out. You should use the exception message
to display a meaningful error to the user.
The default implementation just returns ``None`` and therefore leaves the
order unpaid. The user will be redirected to the order's detail page by default.
On errors, you should raise a ``PaymentException``.
:param order: The order object
"""
return None
def order_pending_mail_render(self, order: Order) -> str:
"""
After the user has submitted their order, they will receive a confirmation
email. You can return a string from this method if you want to add additional
information to this email.
:param order: The order object
"""
return ""
def order_pending_render(self, request: HttpRequest, order: Order) -> str:
"""
If the user visits a detail page of an order which has not yet been paid but
this payment method was selected during checkout, this method will be called
to provide HTML content for the 'payment' box on the page.
It should contain instructions on how to continue with the payment process,
either in form of text or buttons/links/etc.
:param order: The order object
"""
raise NotImplementedError() # NOQA
def order_change_allowed(self, order: Order) -> bool:
"""
Will be called to check whether it is allowed to change the payment method of
an order to this one.
The default implementation checks for the _availability_date setting to be either unset or in the future.
:param order: The order object
"""
return self._is_still_available(order=order)
def order_can_retry(self, order: Order) -> bool:
"""
Will be called if the user views the detail page of an unpaid order to determine
whether the user should be presented with an option to retry the payment. The default
implementation always returns False.
If you want to enable retrials for your payment method, the best is to just return
``self._is_still_available()`` from this method to disable it as soon as the method
gets disabled or the methods end date is reached.
The retry workflow is also used if a user switches to this payment method for an existing
order!
:param order: The order object
"""
return False
def retry_prepare(self, request: HttpRequest, order: Order) -> Union[bool, str]:
"""
Deprecated, use order_prepare instead
"""
raise DeprecationWarning('retry_prepare is deprecated, use order_prepare instead')
return self.order_prepare(request, order)
def order_prepare(self, request: HttpRequest, order: Order) -> Union[bool, str]:
"""
Will be called if the user retries to pay an unpaid order (after the user filled in
e.g. the form returned by :py:meth:`payment_form`) or if the user changes the payment
method.
It should return and report errors the same way as :py:meth:`checkout_prepare`, but
receives an ``Order`` object instead of a cart object.
Note: The ``Order`` object given to this method might be different from the version
stored in the database as it's total will already contain the payment fee for the
new payment method.
"""
form = self.payment_form(request)
if form.is_valid():
for k, v in form.cleaned_data.items():
request.session['payment_%s_%s' % (self.identifier, k)] = v
return True
else:
return False
def order_paid_render(self, request: HttpRequest, order: Order) -> str:
"""
Will be called if the user views the detail page of a paid order which is
associated with this payment provider.
It should return HTML code which should be displayed to the user or None,
if there is nothing to say (like the default implementation does).
:param order: The order object
"""
return None
def order_control_render(self, request: HttpRequest, order: Order) -> str:
"""
Will be called if the *event administrator* views the detail page of an order
which is associated with this payment provider.
It should return HTML code containing information regarding the current payment
status and, if applicable, next steps.
The default implementation returns the verbose name of the payment provider.
:param order: The order object
"""
return _('Payment provider: %s' % self.verbose_name)
def order_control_refund_render(self, order: Order, request: HttpRequest=None) -> str:
"""
Will be called if the event administrator clicks an order's 'refund' button.
This can be used to display information *before* the order is being refunded.
It should return HTML code which should be displayed to the user. It should
contain information about to which extend the money will be refunded
automatically.
:param order: The order object
:param request: The HTTP request
.. versionchanged:: 1.6
The parameter ``request`` has been added.
"""
return '<div class="alert alert-warning">%s</div>' % _('The money can not be automatically refunded, '
'please transfer the money back manually.')
def order_control_refund_perform(self, request: HttpRequest, order: Order) -> Union[bool, str]:
"""
Will be called if the event administrator confirms the refund.
This should transfer the money back (if possible). You can return the URL the
user should be redirected to if you need special behavior or None to continue
with default behavior.
On failure, you should use Django's message framework to display an error message
to the user.
The default implementation sets the Order's state to refunded and shows a success
message.
:param request: The HTTP request
:param order: The order object
"""
from pretix.base.services.orders import mark_order_refunded
mark_order_refunded(order, user=request.user)
messages.success(request, _('The order has been marked as refunded. Please transfer the money '
'back to the buyer manually.'))
class PaymentException(Exception):
pass
class FreeOrderProvider(BasePaymentProvider):
@property
def is_implicit(self) -> bool:
return True
@property
def is_enabled(self) -> bool:
return True
@property
def identifier(self) -> str:
return "free"
def checkout_confirm_render(self, request: HttpRequest) -> str:
return _("No payment is required as this order only includes products which are free of charge.")
def order_pending_render(self, request: HttpRequest, order: Order) -> str:
pass
def payment_is_valid_session(self, request: HttpRequest) -> bool:
return True
@property
def verbose_name(self) -> str:
return _("Free of charge")
def payment_perform(self, request: HttpRequest, order: Order):
from pretix.base.services.orders import mark_order_paid
try:
mark_order_paid(order, 'free', send_mail=False)
except Quota.QuotaExceededException as e:
raise PaymentException(str(e))
@property
def settings_form_fields(self) -> dict:
return {}
def order_control_refund_render(self, order: Order) -> str:
return ''
def order_control_refund_perform(self, request: HttpRequest, order: Order) -> Union[bool, str]:
"""
Will be called if the event administrator confirms the refund.
This should transfer the money back (if possible). You can return the URL the
user should be redirected to if you need special behavior or None to continue
with default behavior.
On failure, you should use Django's message framework to display an error message
to the user.
The default implementation sets the Order's state to refunded and shows a success
message.
:param request: The HTTP request
:param order: The order object
"""
from pretix.base.services.orders import mark_order_refunded
mark_order_refunded(order, user=request.user)
messages.success(request, _('The order has been marked as refunded.'))
def is_allowed(self, request: HttpRequest) -> bool:
from .services.cart import get_fees
total = get_cart_total(request)
total += sum([f.value for f in get_fees(self.event, request, total, None, None)])
return total == 0
def order_change_allowed(self, order: Order) -> bool:
return False
@receiver(register_payment_providers, dispatch_uid="payment_free")
def register_payment_provider(sender, **kwargs):
return FreeOrderProvider
| StarcoderdataPython |
11386206 | from typing import List, Dict
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
def listnode_to_array(li: ListNode) -> List[int]:
array = []
while li is not None:
array.append(li.val)
li = li.next
return array
def array_to_listnode(array: List[int]) -> ListNode:
head = ListNode(val=0, next=None)
p = head
for a in array:
p.next = ListNode(val=a, next=None)
p = p.next
return head.next
def delete_duplicates_array(array: List[int]) -> List[int]:
if len(array) <= 1:
return array
obj: Dict[int, int] = {}
for i in array:
if i in obj:
obj[i] += 1
else:
obj[i] = 0
return [k for k, v in obj.items() if v == 0]
class Solution:
# 转换成数组,去重,转换成链表
def deleteDuplicates(self, head: ListNode) -> ListNode:
array = listnode_to_array(head)
array = delete_duplicates_array(array)
return array_to_listnode(array)
# 双指针迭代
# def deleteDuplicatesV2(self, head: ListNode) -> ListNode:
# if head is None:
# return head
# li = ListNode()
# p = li
# p.next = head
# cur = head
# while cur is not None and cur.next is not None:
# pass
# return li.next
if __name__ == "__main__":
# print(delete_duplicates_array([1, 1, 2, 2, 3, 4, 5, 6, 6]))
print(listnode_to_array(Solution().deleteDuplicates(array_to_listnode([1, 2, 3, 3, 4, 4, 5]))))
print(listnode_to_array(Solution().deleteDuplicatesV2(array_to_listnode([1, 2, 3, 3, 4, 4, 5]))))
| StarcoderdataPython |
197290 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Operations on genomic intervals stored in GTF file
note:
- all the exons of a gene should be on the same strand. Genes with exons trans-
spliced from the other strand, like mod(mdg4) in D. melanogaster, should be
excluded (before or after).
- stop codon is not part of the CDS, at least for Ensembl GTF
-----------------------------------------------------------------
@author: zh (mt1022)
@date: Fri Aug 27 2021
"""
import sys
import argparse
import re
import gzip
import fileinput
import csv
from dataclasses import dataclass
from typing import List
###############################################################################
# class definiton #############################################################
###############################################################################
@dataclass
class Region:
start: int
end: int
def __post_init__(self):
if self.start > self.end:
raise ValueError('Invalid region boundary!')
def __len__(self):
return self.end - self.start + 1
@dataclass
class Gene:
gene_id: str
chrom: str = ''
strand: str = '+'
class Transcript:
def __init__(self, tx_id: str, gene: Gene):
self.tx_id: str = tx_id
self.gene: Gene = gene
self.exons: List[Region] = []
self.cdss: List[Region] = []
self.stop_codon: List[Region] = []
def add_region(self, region, region_type):
if region_type == 'exon':
self.exons.append(region)
elif region_type == 'CDS':
self.cdss.append(region)
elif region_type == 'stop_codon':
self.stop_codon.append(region)
return
def update(self):
"""
Update the order of regions so that operations related to intervals can
work correctly.
"""
self.exons = sorted(self.exons, key=lambda r: r.start)
self.cdss = sorted(self.cdss, key=lambda r: r.start)
self.stop_codon = sorted(self.stop_codon, key=lambda r: r.start)
return
def __len__(self):
return sum(len(i) for i in self.exons)
@property
def n_exons(self):
return len(self.exons)
@property
def cds_len(self):
return sum(len(i) for i in self.cdss)
@property
def tx_start(self):
if self.gene.strand == '+':
return self.exons[0].start
else:
return self.exons[-1].end
@property
def tx_end(self):
if self.gene.strand == '+':
return self.exons[-1].end
else:
return self.exons[0].start
@property
def cds_start(self):
if len(self.cdss) == 0:
return None
elif self.gene.strand == '+':
return self.cdss[0].start
else:
return self.cdss[-1].end
@property
def cds_end(self):
if len(self.cdss) == 0:
return None
elif self.gene.strand == '+':
return self.cdss[-1].end
else:
return self.cdss[0].start
@property
def stop_codon_start(self):
if len(self.stop_codon) == 0:
return None
elif self.gene.strand == '+':
return self.stop_codon[-1].end
else:
return self.stop_codon[0].start
@property
def stop_codon_end(self):
if len(self.stop_codon) == 0:
return None
elif self.gene.strand == '+':
return self.stop_codon[-1].end
else:
return self.stop_codon[0].start
@property
def introns(self):
if len(self.exons) == 1:
return []
else:
return [Region(self.exons[i].end + 1, self.exons[i+1].start - 1)
for i in range(self.n_exons - 1)]
def tpos_to_gpos(self, pos: int):
"""
transform transcript coordinate to genomic coordinate
param pos: int, position on transcript, 1-based.
"""
if pos < 1:
return 0
elif pos > len(self):
return -1
else:
if self.gene.strand == '-':
pos = len(self) - pos + 1
for i in range(self.n_exons):
if len(self.exons[i]) < pos:
pos -= len(self.exons[i])
else:
return self.exons[i].start + pos - 1
def gpos_to_tpos(self, pos):
"""
transform genomic coordinate to transcript coordinate
param pos: int, position on genome, 1-based.
"""
if pos < self.exons[0].start:
tpos = self.exons[0].start - pos
ptype = 'upstream' if self.gene.strand == '+' else 'downstream'
return tpos, ptype
elif pos > self.exons[-1].end:
tpos = pos - self.exons[-1].end
ptype = 'downstream' if self.gene.strand == '+' else 'upstream'
return tpos, ptype
else:
tpos = 0
for i in range(self.n_exons):
if self.exons[i].start <= pos:
if self.exons[i].end <= pos:
tpos += len(self.exons[i])
else:
tpos += pos - self.exons[i].start + 1
else:
if self.exons[i-1].end < pos:
if self.gene.strand == '+':
ptype = 'intron_' + str(i)
tpos = pos - self.exons[i - 1].end
else:
ptype = 'intron_' + str(len(self.exons) - i)
tpos = self.exons[i].start - pos
return tpos, ptype
break
ptype = 'exon'
tpos = tpos if self.gene.strand == '+' else len(self) - tpos + 1
return tpos, ptype
def cpos_to_gpos(self, pos):
"""
transform CDS coordinate to genomic coordinate
param pos: int position on CDS, 1-based.
"""
tpos = self.gpos_to_tpos(self.cds_start)[0] + pos - 1
gpos = self.tpos_to_gpos(tpos)
return gpos
def gpos_to_cpos(self, pos):
"""
transform genomic coordinate to CDS coordinate
param: int, position on genome, 1-based.
"""
tpos = self.gpos_to_tpos(pos)[0]
cpos = tpos - self.gpos_to_tpos(self.cds_start)[0] + 1
return cpos
def tiv_to_giv(self, pos1, pos2):
"""
given transcript region boundary:
return one or more(for features spanning more than one exon)
exonic region interval(s) in list of string interval
param pos1: int, left transcript coordinate, 1-based.
param pos2: int, right transcript coordinate, 1-based.
"""
cod1 = self.tpos_to_gpos(pos1)
cod2 = self.tpos_to_gpos(pos2)
start = min(cod1, cod2)
end = max(cod1, cod2)
givs = []
for i in range(self.n_exons):
if self.exons[i].end < start:
continue
if self.exons[i].start > end:
break
if self.exons[i].start <= start:
if self.exons[i].end <= end:
givs.append(Region(start, self.exons[i].end))
else:
givs.append(Region(start, end))
else:
if self.exons[i].end <= end:
givs.append(Region(self.exons[i].start, self.exons[i].end))
else:
givs.append(Region(self.exons[i].start, end))
return givs
@property
def five_prime_utrs(self):
if len(self.cdss) == 0 or self.cds_start == self.tx_start:
return []
else:
return self.tiv_to_giv(1, self.gpos_to_tpos(self.cds_start)[0] - 1)
@property
def three_prime_utrs(self):
if len(self.cdss) == 0 or self.stop_codon_end == self.tx_end or self.cds_end == self.tx_end:
return []
else:
if len(self.stop_codon) > 0:
return self.tiv_to_giv(self.gpos_to_tpos(self.stop_codon_end)[0] + 1, len(self))
else:
return self.tiv_to_giv(self.gpos_to_tpos(self.cds_end)[0] + 1, len(self))
def format_region_bed12(self, rs, flank=0):
"""
format a spliced region in a transcript into bed12 format
param rs: a list of items of class Region
"""
rs = sorted(rs, key=lambda r: r.start)
if flank > 0:
rs[0].start -= flank
rs[-1].end += flank
starts = [r.start - 1 for r in rs]
ends = [r.end for r in rs]
blockstart = [str(x - starts[0]) for x in starts]
blocksize = [str(len(r)) for r in rs]
s = [self.gene.chrom, starts[0], ends[-1], self.tx_id, self.gene.gene_id,
self.gene.strand, '0', '0', '0', len(starts)]
s = s + [','.join(blocksize) + ',', ','.join(blockstart) + ',']
return s
###############################################################################
# functions ###################################################################
###############################################################################
def parse_gtf(gtf_file):
"""
read GTF file
param: path to GTF file, gzipped format allowed.
"""
gtf = {}
if gtf_file.endswith('.gz'):
f = gzip.open(gtf_file, 'rt')
elif gtf_file == '-':
f = sys.stdin
else:
f = open(gtf_file)
for line in f:
if line[0] == '#':
continue
ary = line.strip().split('\t')
m = re.search(r'gene_id "(.*?)".*?transcript_id "(.*?)"', ary[8])
if m:
if m.group(2) in gtf:
gtf[m.group(2)].add_region(region = Region(int(ary[3]), int(ary[4])), region_type=ary[2])
else:
gene = Gene(gene_id=m.group(1), chrom=ary[0], strand=ary[6])
tx = Transcript(tx_id=m.group(2), gene=gene)
tx.add_region(region = Region(int(ary[3]), int(ary[4])), region_type=ary[2])
gtf[m.group(2)] = tx
f.close()
for tx in gtf:
gtf[tx].update()
return gtf
def exon_to_bed(gtf_file, extend=0):
"""
print exons of each transcript in bed12 format
param: path to GTF file, gzipped format allowed.
"""
gtf = parse_gtf(gtf_file)
for tx_id in gtf:
tx = gtf[tx_id]
items = tx.format_region_bed12(tx.exons, flank=extend)
print('\t'.join(str(i) for i in items))
return
def cds_to_bed(gtf_file, extend=0):
"""
print CDSs of each transcript in bed12 format
param: path to GTF file, gzipped format allowed.
"""
gtf = parse_gtf(gtf_file)
for tx_id in gtf:
tx = gtf[tx_id]
if len(tx.cdss) > 0:
items = tx.format_region_bed12(tx.cdss, flank=extend)
print('\t'.join(str(i) for i in items))
return
def utr5_to_bed(gtf_file, extend=0):
"""
print UTR5 of each transcript in bed12 format
param: path to GTF file, gzipped format allowed.
"""
gtf = parse_gtf(gtf_file)
for tx_id in gtf:
tx = gtf[tx_id]
tx_utr5 = tx.five_prime_utrs
if len(tx_utr5) > 0:
items = tx.format_region_bed12(tx_utr5)
print('\t'.join(str(i) for i in items))
return
def utr3_to_bed(gtf_file, extend=0):
"""
print UTR3 of each transcript in bed12 format
param: path to GTF file, gzipped format allowed.
"""
gtf = parse_gtf(gtf_file)
for tx_id in gtf:
tx = gtf[tx_id]
tx_utr3 = tx.three_prime_utrs
if len(tx_utr3) > 0:
items = tx.format_region_bed12(tx_utr3, flank=extend)
print('\t'.join(str(i) for i in items))
return
def t2g(gtf_file, tfile):
"""
convert transcript coordinates to genomic coordinates
param: path to GTF file, gzipped format allowed.
param tfile: tab-delimited file, 1st column=tx, 2nd column = tpos
"""
gtf = parse_gtf(gtf_file)
with open(tfile) as fh:
for row in csv.reader(fh, delimiter="\t"):
try:
tx = gtf[row[0]]
gpos = tx.tpos_to_gpos(int(row[1]))
row += [tx.gene.chrom, tx.gene.strand, str(gpos)]
except KeyError:
print('Tx isoform {} was not found in GTF file!'.format(row[0]), file=sys.stderr)
row += ['NA'] * 3
print('\t'.join(row))
return
def g2t(gtf_file, gfile):
"""
convert genomic coordinates ot transcript coordinates
param: path to GTF file, gzipped format allowed.
param gfile: tab-delimited file, 1st column=tx, 2nd column = gpos
"""
gtf = parse_gtf(gtf_file)
with open(gfile) as fh:
for row in csv.reader(fh, delimiter='\t'):
try:
tx = gtf[row[0]]
tpos, ptype = tx.gpos_to_tpos(int(row[1]))
row += [str(tpos), ptype]
except KeyError:
print('Tx isoform {} was not found in GTF file!'.format(row[0]), file=sys.stderr)
row += ['NA'] * 2
print('\t'.join(row))
return
def tiv2giv(gtf_file, tivfile):
"""
convert transcript intervals to genomic intervals
param: path to GTF file, gzipped format allowed.
param tivfile: tab-delimited, first three columns are tx_id, start, and end, 1-based
"""
gtf = parse_gtf(gtf_file)
with open(tivfile) as fh:
for row in csv.reader(fh, delimiter='\t'):
try:
tx = gtf[row[0]]
givs = tx.tiv_to_giv(int(row[1]), int(row[2]))
print('\t'.join(str(i) for i in tx.format_region_bed12(givs)))
except KeyError:
print('Tx isoform {} was not found in GTF file!'.format(row[0]), file=sys.stderr)
return
def giv2tiv(gtf_file, givfile):
"""
convert genomic intervals to transcript intervals
param: path to GTF file, gzipped format allowed.
param givfile: tab-delimited, first three columns are tx_id, start, and end, 1-based
"""
gtf = parse_gtf(gtf_file)
with open(givfile) as fh:
for row in csv.reader(fh, delimiter='\t'):
try:
tx = gtf[row[0]]
if tx.gene.strand == '+':
tiv_l = list(tx.gpos_to_tpos(int(row[1])))
tiv_r = list(tx.gpos_to_tpos(int(row[2])))
else:
tiv_l = list(tx.gpos_to_tpos(int(row[2])))
tiv_r = list(tx.gpos_to_tpos(int(row[1])))
tiv = [str(tiv_l[0]), str(tiv_r[0]), tiv_l[1], tiv_r[1]]
row += tiv
except KeyError:
row += ['NA'] * 4
print('Tx isoform {} was not found in GTF file!'.format(row[0]), file=sys.stderr)
print('\t'.join(row))
return
def tx_info(gtf_file):
"""
print summary information of each transcript
param: path to GTF file, gzipped format allowed.
note: stop codon is counted for CDS length, so that cds + utr5 + utr3 = transcript length
"""
gtf = parse_gtf(gtf_file)
header = ['tx_id', 'gene_id', 'chrom', 'strand', 'len', 'len_cds', 'len_utr5', 'len_utr3']
print('\t'.join(header))
for tx_id in gtf:
tx = gtf[tx_id]
out = [tx.tx_id, tx.gene.gene_id, tx.gene.chrom, tx.gene.strand]
len_tx = len(tx)
len_utr5 = sum(len(i) for i in tx.five_prime_utrs)
len_cds = sum(len(i) for i in tx.cdss) + sum(len(i) for i in tx.stop_codon)
len_utr3 = len_tx - len_cds - len_utr5
out += [str(i) for i in [len_tx, len_cds, len_utr5, len_utr3]]
print('\t'.join(out))
return
if __name__ == "__main__":
# parent parser that holds common argument
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser.add_argument('-g', '--gtf',
type=str, default='-', help='input gtf file')
# main parser with subparsers
parser = argparse.ArgumentParser(prog='GTFtools.py',
description='GTF file manipulation')
subparsers = parser.add_subparsers(title='GTF operations',
help='supported operations', dest='subcmd')
parser_txinfo = subparsers.add_parser('txinfo',
help='summary information of each transcript',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_tobed = subparsers.add_parser('convert2bed',
help='convert GTF to bed12 format', parents=[parent_parser],
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_tobed.add_argument('-t', '--type',
type=str, default='exon',
choices=['exon', 'cds', 'utr5', 'utr3'],
help='types of intervals to be converted to bed for each transcript')
parser_tobed.add_argument('-e', '--extend',
type=int, default=0,
help='number of bases to extend at both sides')
parser_t2g = subparsers.add_parser('t2g',
help='convert tpos to gpos', parents=[parent_parser],
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_t2g.add_argument('-i', '--infile', type = str,
help='tab-delimited file with the first two columns composed of'
'tx_id and transcript coordinates')
parser_g2t = subparsers.add_parser('g2t',
help='convert gpos to tpos', parents=[parent_parser],
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_g2t.add_argument('-i', '--infile', type = str,
help='tab-delimited file with the first two columns composed of '
'tx_id and genomic coordinates')
parser_tiv2giv = subparsers.add_parser('tiv2giv',
help='convert tiv to giv', parents=[parent_parser],
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_tiv2giv.add_argument('-i', '--infile', type = str,
help='tab-delimited file with the first three columns composed of '
'tx_id, start and end coordinates')
parser_giv2tiv = subparsers.add_parser('giv2tiv',
help='convert giv to tiv', parents=[parent_parser],
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_giv2tiv.add_argument('-i', '--infile', type = str,
help='tab-delimited file with the first three columns composed of '
'tx_id, start and end coordinates')
args = parser.parse_args()
if args.subcmd == 'convert2bed':
if args.type == 'exon':
exon_to_bed(args.gtf, args.extend)
elif args.type == 'cds':
cds_to_bed(args.gtf, args.extend)
elif args.type == 'utr5':
utr5_to_bed(args.gtf, args.extend)
else:
utr3_to_bed(args.gtf, args.extend)
elif args.subcmd == 'txinfo':
tx_info(args.gtf)
elif args.subcmd == 't2g':
t2g(gtf=args.gtf, tfile=args.infile)
elif args.subcmd == 'g2t':
g2t(gtf=args.gtf, gfile=args.infile)
elif args.subcmd == 'tiv2giv':
tiv2giv(gtf=args.gtf, tivfile=args.infile)
elif args.subcmd == 'giv2tiv':
giv2tiv(gtf=args.gtf, givfile=args.infile)
| StarcoderdataPython |
3354795 | <filename>movieapp_backend/movie_app/models.py
from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
# Create your models here.
class MoviePost(models.Model):
post_id = models.AutoField(primary_key=True)
title = models.CharField(max_length=100)
image_url = models.CharField(max_length=255, blank=True, null=True)
rating = models.IntegerField()
url = models.CharField(max_length=255, blank=True, null=True)
user = models.ForeignKey(User, related_name='posts')
send_to = models.ManyToManyField(User, related_name='received_posts')
content = models.TextField(blank=True, null=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(null=True, blank=True)
class Meta:
db_table = 'movie_post'
class Friendship(models.Model):
created = models.DateTimeField(auto_now_add=True, editable=False)
creator = models.ForeignKey(User, related_name="friendship_creator_set")
friend = models.ForeignKey(User, related_name="friend_set")
def delete(self):
user = self.creator
friend = self.friend
for post in user.received_posts.filter(user=friend):
post.send_to.remove(user)
for post in friend.received_posts.filter(user=user):
post.send_to.remove(friend)
super(Friendship, self).delete()
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
name = models.CharField(max_length=50, blank=True, null=True)
avatar = models.ImageField(default="avatar.svg")
is_logged_in = models.BooleanField(default=False)
def is_friend(self, friend):
return friend in self.user.profile.get_friends()
def get_not_viewed_messages(self):
user = self.user
chat_messages = ChatMessage.objects.filter(models.Q(receiver=user) & models.Q(viewed=False))
return chat_messages
def get_chat_messages(self, friend):
user = self.user
chat_messages = ChatMessage.objects.filter(
models.Q(creator=user) & models.Q(receiver=friend) |
models.Q(receiver=user) & models.Q(creator=friend)
).order_by('created')
return chat_messages
def get_friends(self):
user = self.user
friendships = Friendship.objects.filter(models.Q(creator=user) | models.Q(friend=user))
friends = []
for x in friendships:
if x.creator == user:
friends.append(x.friend)
else:
friends.append(x.creator)
return friends
class FriendshipRequest(models.Model):
from_user = models.ForeignKey(User, related_name="friendship_requests_sent")
to_user = models.ForeignKey(User, related_name="friendship_requests_received")
created = models.DateTimeField(auto_now_add=True)
rejected = models.DateTimeField(blank=True, null=True)
viewed = models.DateTimeField(blank=True, null=True)
def accept(self):
f = Friendship()
f.creator = self.from_user
f.friend = self.to_user
f.save()
self.delete()
FriendshipRequest.objects.filter(
from_user=self.to_user,
to_user=self.from_user
).delete()
return True
def reject(self):
""" reject this friendship request """
self.rejected = timezone.now()
self.save()
def cancel(self):
""" cancel this friendship request """
self.delete()
return True
def mark_viewed(self):
self.viewed = timezone.now()
self.save()
return True
class ChatMessage(models.Model):
creator = models.ForeignKey(User, related_name='chat_message_sent')
receiver = models.ForeignKey(User, related_name='chat_message_received')
message = models.TextField()
created = models.DateTimeField(auto_now_add=True, editable=False)
viewed = models.BooleanField(default=False)
| StarcoderdataPython |
6438525 | <filename>db/dev/weighted_strings.py
import bisect
import csv
import functools
import glob
import itertools
import random
################################################################################
class WeightedStrings:
def __init__(self, data):
if not isinstance(data, dict):
raise TypeError('Data must be of type dict!')
data, keys, values = data.copy(), [], []
for key, value in sorted(data.items()):
if not isinstance(key, str):
raise TypeError('Keys must be of type str!')
if not isinstance(value, int):
raise TypeError('Values must be of type int!')
keys.append(key)
values.append(value)
self.__data, self.__keys, self.__values, self.__hash = \
data, tuple(keys), tuple(values), None
def __contains__(self, item):
return item in self.__data
def __eq__(self, other):
return self.keys == other.keys and self.values == other.values
def __getitem__(self, key):
return self.__data[key]
def __hash__(self):
if self.__hash is None:
self.__hash = hash(self.keys) ^ hash(self.values)
return self.__hash
def __iter__(self):
return iter(self.keys)
def __len__(self):
return len(self.__data)
def __ne__(self, other):
return self.keys != other.keys or self.values != other.values
def __repr__(self):
return '{!s}({!r})'.format(self.__class__.__name__, self.__data)
def get(self, key, default=None):
return self.__data.get(key, default)
@property
def keys(self):
return self.__keys
@property
def values(self):
return self.__values
@property
def items(self):
return zip(self.keys, self.values)
################################################################################
class WeightedRandom:
def __init__(self, data):
self.__data = data
self.__total = tuple(itertools.accumulate(data.values))
self.__range = functools.partial(random.SystemRandom().randrange,
self.__total[-1])
def __iter__(self):
return self
def __next__(self):
return self.__data.keys[bisect.bisect(self.__total, self.__range())]
################################################################################
class WeightedNames:
def __init__(self, pattern, name, weight):
data = {}
for path in glob.iglob(pattern):
with open(path, newline='') as file:
for row in csv.DictReader(file, dialect='excel-tab'):
data[row[name]] = int(row[weight].replace(',', ''))
self.__iter = iter(WeightedRandom(WeightedStrings(data)))
def __iter__(self):
return self
def __next__(self):
return next(self.__iter).capitalize()
| StarcoderdataPython |
3297189 | import unittest
from models import *
class TestUser(unittest.TestCase):
def setUp(self):
self.user = User("Dfdf", "Dfdf", "dfdfdf", "dfdf")
if __name__ == '__main__':
unittest.main()
class TestBlog(unittest.TestCase):
def setUp(self):
self.blog = Blog("vcvcv", "sdsd", "dsd","sdsd", 12/3/2020, "dsd")
if __name__ == '__main__':
unittest.main()
class TestComment(unittest.TestCase):
def setUp(self):
self.comment = Comment(1, "sdsd", "dsd")
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
4815272 | """ Minimal abstract regressor class for ESL exercises."""
import numpy as np
from sklearn.base import BaseEstimator, RegressorMixin
class EslRegressor(BaseEstimator, RegressorMixin):
""" Abstract regressor class for ESL."""
def __init__(self):
self._n_fit_points = None
self._n_features = None
self._fit_responses_shape = None
self._n_responses = None
@property
def n_fit_points(self) -> int:
""" Returns the number of datapoints seen in the fit."""
return self._n_fit_points
@property
def n_features(self) -> int:
""" Returns the number of features seen in the fit."""
return self._n_features
@property
def n_responses(self) -> int:
""" Returns the number of responses seen in the fit."""
return self._n_responses
def fit(self, X: np.ndarray, Y: np.ndarray) -> RegressorMixin:
""" Trains the regressor.
Args:
X: numpy matrix of input features, dimensions ``(N, n_features)``.
Y: 1d or 2d numpy matrix of responses.
"""
self._fit_responses_shape = Y.shape
Y = Y if len(Y.shape) == 2 else Y[:, np.newaxis]
self._n_fit_points, self._n_responses = Y.shape
self._n_features = X.shape[1]
self._fit(X, Y)
return self
def _fit(self, X: np.ndarray, Y: np.ndarray) -> None:
""" Trains the regressor.
Args:
X: numpy matrix of input features, dimensions ``(N, n_features)``.
Y: 2d numpy array of responses, dimensions ``(N, n_responses)``.
"""
raise NotImplementedError("EslRegressor does not implement _train - did you forget something?")
def _predict(self, X: np.ndarray) -> np.ndarray:
""" Predicts, returning a 2d array."""
raise NotImplementedError("EslRegressor does not implement predict - did you forget something?")
def predict(self, X: np.ndarray) -> np.ndarray:
""" Predicts, returning a 1d or 2d array depending on the shape of responses seen during fit."""
Y = self._predict(X)
return Y if len(self._fit_responses_shape) == 2 else Y[:, 0]
def score(self, X, y=None):
raise NotImplementedError("score not implemented in abstract class EslRegressor")
| StarcoderdataPython |
1627381 | <filename>repos/system_upgrade/el7toel8/actors/registeryumadjustment/actor.py
from leapp.actors import Actor
from leapp.models import DNFWorkaround
from leapp.tags import FactsPhaseTag, IPUWorkflowTag
class RegisterYumAdjustment(Actor):
"""
Registers a workaround which will adjust the yum directories during the upgrade.
"""
name = 'register_yum_adjustment'
consumes = ()
produces = (DNFWorkaround,)
tags = (IPUWorkflowTag, FactsPhaseTag)
def process(self):
self.produce(
DNFWorkaround(
display_name='yum config fix',
script_path=self.get_tool_path('handleyumconfig'),
)
)
| StarcoderdataPython |
1749806 | <reponame>gideontong/CodeQuest<gh_stars>1-10
"""
Source: Stack Abuse
Binary search follows a divide and conquer methodology. It is faster than linear search but
requires that the array be sorted before the algorithm is executed
"""
def BinarySearch(lys, val):
first = 0
last = len(lys)-1
index = -1
while (first <= last) and (index == -1):
mid = (first+last)//2
if lys[mid] == val:
index = mid
else:
if val<lys[mid]:
last = mid -1
else:
first = mid +1
return index | StarcoderdataPython |
11396436 | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# ===========================================================================
# Copyright © 2020 <NAME>. All rights reserved.
# Created by <NAME> on 2020-03-05.
#
# This file is distributed under the BSD License.
# License text is included with the source distribution.
# ===========================================================================
import argparse
import glob
import os
import re
import sys
INCLUDE_FILE_REGEX = re.compile(r"\s*#include\s*(?:\"([^\"]+)\"|<([^>]+)>).*")
def get_all_files_names(dir_name):
result = {}
for top_dir, subdirs, file_names in os.walk(dir_name):
for file in file_names:
path = os.path.join(top_dir, file)
relpath = os.path.relpath(path, dir_name)
abspath = os.path.realpath(path)
if relpath not in result:
result[relpath] = abspath
return result
def get_includes(file_path):
includes = []
for line in open(file_path):
m = INCLUDE_FILE_REGEX.match(line)
if m:
includes.append(m.group(1) or m.group(2))
return includes
def get_dependency_map(files, known_files):
dependencies = {}
next_files_to_check = [os.path.realpath(f) for f in files]
while next_files_to_check:
files_to_check = next_files_to_check
next_files_to_check = []
for file_path in files_to_check:
real_file_path = os.path.realpath(file_path)
if real_file_path in dependencies:
continue
current_known_files = known_files.copy()
dir_name = os.path.dirname(file_path)
if not dir_name:
dir_name = os.getcwd()
current_known_files.update(get_all_files_names(dir_name))
dependencies[real_file_path] = []
visited = set()
for include in get_includes(file_path):
real_include_path = current_known_files.get(include)
if not real_include_path or real_include_path in visited:
continue
visited.add(real_include_path)
dependencies[real_file_path].append(real_include_path)
if real_include_path not in dependencies:
next_files_to_check.append(real_include_path)
return dependencies
def get_file_inclusion_order_rec(result, file_path, dependencies, visited):
visited.add(file_path)
for dependency in dependencies[file_path]:
if dependency not in visited:
get_file_inclusion_order_rec(result, dependency,
dependencies, visited)
result.append(file_path)
def get_file_inclusion_order(file_paths, dependencies, ignore_files):
visited = set(ignore_files)
result = []
for file_path in file_paths:
get_file_inclusion_order_rec(result, os.path.realpath(file_path),
dependencies, visited)
return result
def get_file_contents(file_path, known_files):
file_path = os.path.realpath(file_path)
output = []
current_known_files = known_files.copy()
dir_name = os.path.dirname(file_path)
if not dir_name:
dir_name = os.getcwd()
current_known_files.update(get_all_files_names(dir_name))
for line in open(file_path, encoding="utf-8-sig"):
m = INCLUDE_FILE_REGEX.match(line)
if not m or not current_known_files.get(m.group(1) or m.group(2)):
output.append(line)
return output
def get_all_file_contents(file_paths, known_files):
output = []
for file_path in file_paths:
# if output and not output[-1].isspace():
# output.append("\n")
output.extend(get_file_contents(file_path, known_files))
return output
def remove_pragma_once(lines, keep_first):
output = []
regex = re.compile(r"\s*#pragma\s+once\b.*")
for line in lines:
m = regex.match(line)
if not m:
output.append(line)
elif keep_first:
output.append(line)
keep_first = False
return output
def remove_redundant_includes(lines):
output = []
known_includes = set()
for line in lines:
m = INCLUDE_FILE_REGEX.match(line)
if not m:
output.append(line)
elif (m.group(1) or m.group(2)) not in known_includes:
output.append(line)
known_includes.add(m.group(1) or m.group(2))
return output
def remove_matching_lines(lines, regex):
output = []
regex = re.compile(regex)
for line in lines:
m = regex.match(line)
if not m:
output.append(line)
return output
def remove_successive_empty_lines(lines):
output = []
for line in lines:
if not output or not line.isspace() or not output[-1].isspace():
output.append(line)
return output
def make_argument_parser():
ap = argparse.ArgumentParser(
description='Generates source files for a C++ command line argument'
' parser.')
ap.add_argument("files", metavar="C++ files", nargs="+",
help="The C++ files that are to be merged.")
ap.add_argument("-o", "--output", metavar="FILE",
help="The output file.")
ap.add_argument("--no-pragma-once", action="store_const", const=True,
default=False,
help="Don't insert a pragma once at the beginning of the"
" output file.")
ap.add_argument("-p", "--prepend", metavar="TEXT", action="append",
help="Write TEXT at the start of the output file.")
ap.add_argument("-i", "--include", metavar="DIR", action="append",
help="Add DIR to the list of include and source dirs.")
ap.add_argument("-f", "--filter", metavar="FILE", action="append",
help="Filter out FILE or files in FILE if FILE is a directory.")
return ap
def main():
args = make_argument_parser().parse_args()
paths = []
visited = set()
for path in args.files:
if not os.path.exists(path):
tmp = glob.glob(path)
if not tmp:
print(f"WARNING: {path} not found.")
for p in tmp:
if p not in visited:
paths.append(p)
visited.add(p)
elif path not in visited:
paths.append(path)
visited.add(path)
else:
print(f"WARNING: {path} is listed more than once among the input"
f" files. All but the first will be ignored.")
known_files = {}
if args.include:
for dir_name in args.include[::-1]:
known_files.update(get_all_files_names(dir_name))
ignore_files = set()
if args.filter:
for name in args.filter:
if os.path.isdir(name):
ignore_files.update(get_all_files_names(name).values())
elif os.path.isfile(name):
ignore_files.add(os.path.realpath(name))
dependencies = get_dependency_map(paths, known_files)
file_order = get_file_inclusion_order(paths, dependencies, ignore_files)
lines = get_all_file_contents(file_order, known_files)
lines = remove_pragma_once(lines, not args.no_pragma_once)
lines = remove_redundant_includes(lines)
lines = remove_successive_empty_lines(lines)
if args.prepend:
text = "".join(args.prepend) + "".join(lines)
else:
text = "".join(lines)
if args.output:
if os.path.exists(args.output) and open(args.output).read() == text:
return 0
if not os.path.exists(os.path.dirname(args.output)):
os.makedirs(os.path.dirname(args.output))
open(args.output, "w").write(text)
print(f"Updated {args.output}")
else:
sys.stdout.write(text)
return 0
if __name__ == "__main__":
sys.exit(main())
| StarcoderdataPython |
182574 | <filename>app/scrape/not_a_bot.py
from selenium import webdriver
import os
import time
class Not_a_IG_bot:
def __init__(self, username, password):
self.username = username
self.password = password
options = webdriver.ChromeOptions()
options.add_argument('headless')
self.driver = webdriver.Chrome(os.environ.get('WEB_CHROME'),
chrome_options=options)
self.URL = 'https://www.instagram.com'
self.login()
def login(self):
self.driver.get('{}/accounts/login/'.format(self.URL))
time.sleep(1)
username_input = self.driver.find_element_by_name("username")
password_input = self.driver.find_element_by_name("password")
username_input.send_keys(self.username)
password_input.send_keys(<PASSWORD>)
self.driver.find_element_by_xpath('//*[@id="react-root"]/section/main/div/article/div/div[1]/div/form/div[4]/button/div').click()
time.sleep(2)
def nav_user(self, user):
self.driver.get('{}/{}'.format(self.URL, user))
def open_post(self, user):
self.nav_user(user)
time.sleep(2)
post = self.driver.find_element_by_class_name('eLAPa')
post.click()
def get_text(self, user):
self.open_post(user)
time.sleep(2)
text = self.driver.find_element_by_class_name('C4VMK').text
print(text)
def get_img(self, user):
self.open_post(user)
time.sleep(2)
img = self.driver.find_element_by_class_name('FFVAD')
print(img.get_attribute('src'))
if __name__ == "__main__":
ig_bot = Not_a_IG_bot(os.environ.get('BOT_USER'),
os.environ.get('BOT_PASS'))
ig_bot.get_text('medialunes')
ig_bot.get_img('medialunes')
| StarcoderdataPython |
9796559 | import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal, assert_equal
from tadataka.so3 import is_rotation_matrix, exp_so3, log_so3, tangent_so3
def test_is_rotation_matrix():
M = np.array([[-1, 0, 0],
[0, -1, 0],
[0, 0, 1]])
assert(is_rotation_matrix(M))
M = np.array([[0, 0, 1],
[0, 1, 0],
[-1, 0, 0]])
assert(is_rotation_matrix(M))
M = np.array([[1, 0, 0],
[0, 1 / np.sqrt(2), -1 / np.sqrt(2)],
[0, 1 / np.sqrt(2), 1 / np.sqrt(2)]])
assert(is_rotation_matrix(M))
M = np.array([[-7 / 25, 0, -24 / 25],
[0, -1, 0],
[-24 / 25, 0, 7 / 25]])
assert(is_rotation_matrix(M))
M = np.array([[0, 0, 1],
[0, 1, 0],
[-2, 0, 0]])
assert(not is_rotation_matrix(M))
M = np.array([[0, 0, 1],
[0, 1, 0],
[-1, 1, 0]])
assert(not is_rotation_matrix(M))
def test_tangent_so3():
assert_array_equal(tangent_so3([1, 2, 3]),
[[0, -3, 2],
[3, 0, -1],
[-2, 1, 0]])
assert_array_equal(tangent_so3([4, 5, 6]),
[[0, -6, 5],
[6, 0, -4],
[-5, 4, 0]])
| StarcoderdataPython |
8129514 | class CountFromBy:
def __init__(self,v: int=0,i: int=1) -> None:
self.val = v
self.incr = i
def increase(self) -> None:
self.val += self.incr
def __repr__(self) -> str:
return str(self.val)
| StarcoderdataPython |
11325447 | <gh_stars>0
'''
Suite of tests for the Consensus module
'''
import unittest
import ncov.parser
test_lineage = ncov.parser.Lineage(file='data/sample_lineages.csv')
class LineageTest(unittest.TestCase):
def test_create_lineage_dictionary(self):
lineage_dict = test_lineage.create_lineage_dictionary()
self.assertEqual(lineage_dict['sampleA'], 'B.1.1.43')
self.assertEqual(lineage_dict['sampleB'], 'B.1.36')
self.assertEqual(lineage_dict['sampleC'], 'B.1.1.7')
def test_get_sample_name(self):
sample_row = {'taxon' : 'sampleA/ARTIC/nanopolish',
'lineage' : 'B.1.1.43',
'probability' : 1.0,
'pangoLEARN_version' : '2021-01-06',
'passed_qc' : 'passed_qc'}
expected_sample_name = 'sampleA'
sample_name = test_lineage.get_sample_name(row=sample_row)
self.assertEqual(sample_name, expected_sample_name)
| StarcoderdataPython |
3372716 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import yaml
import glob
import logging
import os
import pandas as pd
import gspread
from oauth2client.service_account import ServiceAccountCredentials
__author__ = '<NAME>'
class _GoogleAPI(object):
"""
Google API Client base class
"""
SERVICE_NAME = None
VERSIONS = None
def __init__(self, path, client_secret, setting_file='/setting.yml'):
"""
Google API Client
:param path:
:param client_secret:
:param setting_file:
"""
self.setting = yaml.load(open(''.join([os.path.abspath('.'), setting_file]), 'r'))
self.client_secret = client_secret
self.path = path
self.credentials = self.get_credentials()
def get_credentials(self):
"""
get credential
:return: credentials
"""
return ServiceAccountCredentials.from_json_keyfile_name(
filename=self.client_secret,
scopes=self.setting['gcloud']['scopes']
)
class GoogleDrive(_GoogleAPI):
def __init__(self, path, client_secret, setting_file='/setting.yml'):
"""
Google API Client
:param path:
:param client_secret:
:param setting_file:
"""
super(GoogleDrive, self).__init__(path, client_secret, setting_file)
self.gc = gspread.authorize(self.credentials)
self.sh = self.spread_open()
def spread_open(self):
"""
Open Spread sheet
"""
return self.gc.open(self.setting['gcloud']['spreadsheet']['name'])
def spread_update(self):
"""
Open Spread update
"""
for file_path in glob.glob('{}/{}*'.format(self.path, self.setting['config']['file_prefix'])):
_, filename = os.path.split(file_path)
try:
datasets = pd.read_csv(file_path)
except pd.io.common.EmptyDataError as e:
logging.error('error:{} filename:{}'.format(e, file_path))
continue
logging.info('add to worksheet:{}'.format(filename))
rows, cols = datasets.shape
logging.info('add to worksheet:{} rows:{} cols:{}'.format(filename, rows, cols))
self.sh.add_worksheet(title=filename, rows=rows, cols=cols)
| StarcoderdataPython |
3297277 | <filename>app/external/pytaw/setup.py
from setuptools import setup
setup(
name='pytaw',
version='0.0.1',
packages=['pytaw'],
url='https://github.com/6000hulls/pytaw',
license='',
author='6000hulls',
author_email='<EMAIL>',
description='PYTAW: Python YouTube API Wrapper'
)
| StarcoderdataPython |
4902840 | <reponame>ni/grpc-device<filename>source/codegen/metadata/nirfmxinstr/functions.py
functions = {
'BuildCalibrationPlaneString': {
'parameters': [
{
'direction': 'in',
'name': 'calibrationPlaneName',
'type': 'char[]'
},
{
'direction': 'in',
'name': 'selectorStringLength',
'type': 'int32'
},
{
'direction': 'out',
'name': 'selectorString',
'size': {
'mechanism': 'ivi-dance',
'value': 'selectorStringLength'
},
'type': 'char[]'
}
],
'returns': 'int32'
},
'BuildInstrumentString': {
'parameters': [
{
'direction': 'in',
'name': 'selectorString',
'type': 'char[]'
},
{
'direction': 'in',
'name': 'instrumentNumber',
'type': 'int32'
},
{
'direction': 'in',
'name': 'selectorStringOutLength',
'type': 'int32'
},
{
'direction': 'out',
'name': 'selectorStringOut',
'size': {
'mechanism': 'ivi-dance',
'value': 'selectorStringOutLength'
},
'type': 'char[]'
}
],
'returns': 'int32'
},
'BuildLOString': {
'parameters': [
{
'direction': 'in',
'name': 'selectorString',
'type': 'char[]'
},
{
'direction': 'in',
'name': 'loIndex',
'type': 'int32'
},
{
'direction': 'in',
'name': 'selectorStringOutLength',
'type': 'int32'
},
{
'direction': 'out',
'name': 'selectorStringOut',
'size': {
'mechanism': 'ivi-dance',
'value': 'selectorStringOutLength'
},
'type': 'char[]'
}
],
'returns': 'int32'
},
'BuildModuleString': {
'parameters': [
{
'direction': 'in',
'name': 'selectorString',
'type': 'char[]'
},
{
'direction': 'in',
'name': 'moduleName',
'type': 'char[]'
},
{
'direction': 'in',
'name': 'selectorStringOutLength',
'type': 'int32'
},
{
'direction': 'out',
'name': 'selectorStringOut',
'size': {
'mechanism': 'ivi-dance',
'value': 'selectorStringOutLength'
},
'type': 'char[]'
}
],
'returns': 'int32'
},
'BuildPortString': {
'cname': 'RFmxInstr_BuildPortString2',
'parameters': [
{
'direction': 'in',
'name': 'selectorString',
'type': 'char[]'
},
{
'direction': 'in',
'name': 'portName',
'type': 'char[]'
},
{
'direction': 'in',
'name': 'deviceName',
'type': 'char[]'
},
{
'direction': 'in',
'name': 'channelNumber',
'type': 'int32'
},
{
'direction': 'in',
'name': 'selectorStringOutLength',
'type': 'int32'
},
{
'direction': 'out',
'name': 'selectorStringOut',
'size': {
'mechanism': 'ivi-dance',
'value': 'selectorStringOutLength'
},
'type': 'char[]'
}
],
'returns': 'int32'
},
'CfgExternalAttenuationInterpolationLinear': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'selectorString',
'type': 'char[]'
},
{
'direction': 'in',
'name': 'tableName',
'type': 'char[]'
},
{
'direction': 'in',
'name': 'format',
'type': 'int32'
}
],
'returns': 'int32'
},
'CfgExternalAttenuationInterpolationNearest': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'selectorString',
'type': 'char[]'
},
{
'direction': 'in',
'name': 'tableName',
'type': 'char[]'
}
],
'returns': 'int32'
},
'CfgExternalAttenuationInterpolationSpline': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'selectorString',
'type': 'char[]'
},
{
'direction': 'in',
'name': 'tableName',
'type': 'char[]'
}
],
'returns': 'int32'
},
'CfgExternalAttenuationTable': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'selectorString',
'type': 'char[]'
},
{
'direction': 'in',
'name': 'tableName',
'type': 'char[]'
},
{
'direction': 'in',
'name': 'frequency',
'size': {
'mechanism': 'len',
'value': 'arraySize'
},
'type': 'float64[]'
},
{
'direction': 'in',
'name': 'externalAttenuation',
'size': {
'mechanism': 'len',
'value': 'arraySize'
},
'type': 'float64[]'
},
{
'direction': 'in',
'name': 'arraySize',
'type': 'int32'
}
],
'returns': 'int32'
},
'CfgFrequencyReference': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'name': 'frequencyReferenceSource',
'type': 'char[]'
},
{
'direction': 'in',
'name': 'frequencyReferenceFrequency',
'type': 'float64'
}
],
'returns': 'int32'
},
'CfgMechanicalAttenuation': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'name': 'mechanicalAttenuationAuto',
'type': 'int32'
},
{
'direction': 'in',
'name': 'mechanicalAttenuationValue',
'type': 'float64'
}
],
'returns': 'int32'
},
'CfgRFAttenuation': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'name': 'rfAttenuationAuto',
'type': 'int32'
},
{
'direction': 'in',
'name': 'rfAttenuationValue',
'type': 'float64'
}
],
'returns': 'int32'
},
'CfgSParameterExternalAttenuationTable': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'selectorString',
'type': 'char[]'
},
{
'direction': 'in',
'name': 'tableName',
'type': 'char[]'
},
{
'direction': 'in',
'name': 'frequency',
'size': {
'mechanism': 'len',
'value': 'frequencyArraySize'
},
'type': 'float64[]'
},
{
'direction': 'in',
'name': 'frequencyArraySize',
'type': 'int32'
},
{
'direction': 'in',
'name': 'sParameters',
'size': {
'mechanism': 'len',
'value': 'sParameterTableSize'
},
'type': 'NIComplexDouble[]'
},
{
'direction': 'in',
'name': 'sParameterTableSize',
'type': 'int32'
},
{
'direction': 'in',
'name': 'numberOfPorts',
'type': 'int32'
},
{
'direction': 'in',
'name': 'sParameterOrientation',
'type': 'int32'
}
],
'returns': 'int32'
},
'CfgSParameterExternalAttenuationType': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'selectorString',
'type': 'char[]'
},
{
'direction': 'in',
'name': 'sParameterType',
'type': 'int32'
}
],
'returns': 'int32'
},
'CheckAcquisitionStatus': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'out',
'name': 'acquisitionDone',
'type': 'int32'
}
],
'returns': 'int32'
},
'CheckIfListExists': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'listName',
'type': 'char[]'
},
{
'direction': 'out',
'name': 'listExists',
'type': 'int32'
},
{
'direction': 'out',
'name': 'personality',
'type': 'int32'
}
],
'returns': 'int32'
},
'CheckIfSignalConfigurationExists': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'signalName',
'type': 'char[]'
},
{
'direction': 'out',
'name': 'signalConfigurationExists',
'type': 'int32'
},
{
'direction': 'out',
'name': 'personality',
'type': 'int32'
}
],
'returns': 'int32'
},
'Close': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'forceDestroy',
'type': 'int32'
}
],
'returns': 'int32'
},
'DeleteAllExternalAttenuationTables': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'selectorString',
'type': 'char[]'
}
],
'returns': 'int32'
},
'DeleteExternalAttenuationTable': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'selectorString',
'type': 'char[]'
},
{
'direction': 'in',
'name': 'tableName',
'type': 'char[]'
}
],
'returns': 'int32'
},
'DisableCalibrationPlane': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'selectorString',
'type': 'char[]'
}
],
'returns': 'int32'
},
'EnableCalibrationPlane': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'selectorString',
'type': 'char[]'
}
],
'returns': 'int32'
},
'ExportSignal': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'exportSignalSource',
'type': 'int32'
},
{
'direction': 'in',
'name': 'exportSignalOutputTerminal',
'type': 'char[]'
}
],
'returns': 'int32'
},
'GetAttributeF32': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'direction': 'out',
'name': 'attrVal',
'type': 'float32'
}
],
'returns': 'int32'
},
'GetAttributeF32Array': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'direction': 'out',
'name': 'attrVal',
'size': {
'mechanism': 'ivi-dance-with-a-twist',
'value': 'arraySize',
'value_twist': 'actualArraySize'
},
'type': 'float32[]'
},
{
'direction': 'in',
'name': 'arraySize',
'type': 'int32'
},
{
'direction': 'out',
'name': 'actualArraySize',
'type': 'int32'
}
],
'returns': 'int32'
},
'GetAttributeF64': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'direction': 'out',
'name': 'attrVal',
'type': 'float64'
}
],
'returns': 'int32'
},
'GetAttributeF64Array': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'direction': 'out',
'name': 'attrVal',
'size': {
'mechanism': 'ivi-dance-with-a-twist',
'value': 'arraySize',
'value_twist': 'actualArraySize'
},
'type': 'float64[]'
},
{
'direction': 'in',
'name': 'arraySize',
'type': 'int32'
},
{
'direction': 'out',
'name': 'actualArraySize',
'type': 'int32'
}
],
'returns': 'int32'
},
'GetAttributeI16': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'direction': 'out',
'name': 'attrVal',
'type': 'int16'
}
],
'returns': 'int32'
},
'GetAttributeI32': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'direction': 'out',
'name': 'attrVal',
'type': 'int32'
}
],
'returns': 'int32'
},
'GetAttributeI32Array': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'direction': 'out',
'name': 'attrVal',
'size': {
'mechanism': 'ivi-dance-with-a-twist',
'value': 'arraySize',
'value_twist': 'actualArraySize'
},
'type': 'int32[]'
},
{
'direction': 'in',
'name': 'arraySize',
'type': 'int32'
},
{
'direction': 'out',
'name': 'actualArraySize',
'type': 'int32'
}
],
'returns': 'int32'
},
'GetAttributeI64': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'direction': 'out',
'name': 'attrVal',
'type': 'int64'
}
],
'returns': 'int32'
},
'GetAttributeI64Array': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'direction': 'out',
'name': 'attrVal',
'size': {
'mechanism': 'ivi-dance-with-a-twist',
'value': 'arraySize',
'value_twist': 'actualArraySize'
},
'type': 'int64[]'
},
{
'direction': 'in',
'name': 'arraySize',
'type': 'int32'
},
{
'direction': 'out',
'name': 'actualArraySize',
'type': 'int32'
}
],
'returns': 'int32'
},
'GetAttributeI8': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'coerced': True,
'direction': 'out',
'name': 'attrVal',
'type': 'int8'
}
],
'returns': 'int32'
},
'GetAttributeI8Array': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'coerced': True,
'direction': 'out',
'name': 'attrVal',
'size': {
'mechanism': 'ivi-dance-with-a-twist',
'value': 'arraySize',
'value_twist': 'actualArraySize'
},
'type': 'int8[]'
},
{
'direction': 'in',
'name': 'arraySize',
'type': 'int32'
},
{
'direction': 'out',
'name': 'actualArraySize',
'type': 'int32'
}
],
'returns': 'int32'
},
'GetAttributeNIComplexDoubleArray': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'direction': 'out',
'name': 'attrVal',
'size': {
'mechanism': 'ivi-dance-with-a-twist',
'value': 'arraySize',
'value_twist': 'actualArraySize'
},
'type': 'NIComplexDouble[]'
},
{
'direction': 'in',
'name': 'arraySize',
'type': 'int32'
},
{
'direction': 'out',
'name': 'actualArraySize',
'type': 'int32'
}
],
'returns': 'int32'
},
'GetAttributeNIComplexSingleArray': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'direction': 'out',
'name': 'attrVal',
'size': {
'mechanism': 'ivi-dance-with-a-twist',
'value': 'arraySize',
'value_twist': 'actualArraySize'
},
'type': 'NIComplexSingle[]'
},
{
'direction': 'in',
'name': 'arraySize',
'type': 'int32'
},
{
'direction': 'out',
'name': 'actualArraySize',
'type': 'int32'
}
],
'returns': 'int32'
},
'GetAttributeString': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'direction': 'in',
'name': 'arraySize',
'type': 'int32'
},
{
'direction': 'out',
'name': 'attrVal',
'size': {
'mechanism': 'ivi-dance',
'value': 'arraySize'
},
'type': 'char[]'
}
],
'returns': 'int32'
},
'GetAttributeU16': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'direction': 'out',
'name': 'attrVal',
'type': 'uInt16'
}
],
'returns': 'int32'
},
'GetAttributeU32': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'direction': 'out',
'name': 'attrVal',
'type': 'uInt32'
}
],
'returns': 'int32'
},
'GetAttributeU32Array': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'direction': 'out',
'name': 'attrVal',
'size': {
'mechanism': 'ivi-dance-with-a-twist',
'value': 'arraySize',
'value_twist': 'actualArraySize'
},
'type': 'uInt32[]'
},
{
'direction': 'in',
'name': 'arraySize',
'type': 'int32'
},
{
'direction': 'out',
'name': 'actualArraySize',
'type': 'int32'
}
],
'returns': 'int32'
},
'GetAttributeU64Array': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'direction': 'out',
'name': 'attrVal',
'size': {
'mechanism': 'ivi-dance-with-a-twist',
'value': 'arraySize',
'value_twist': 'actualArraySize'
},
'type': 'uInt64[]'
},
{
'direction': 'in',
'name': 'arraySize',
'type': 'int32'
},
{
'direction': 'out',
'name': 'actualArraySize',
'type': 'int32'
}
],
'returns': 'int32'
},
'GetAttributeU8': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'direction': 'out',
'name': 'attrVal',
'type': 'uInt8'
}
],
'returns': 'int32'
},
'GetAttributeU8Array': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'direction': 'out',
'name': 'attrVal',
'size': {
'mechanism': 'ivi-dance-with-a-twist',
'value': 'arraySize',
'value_twist': 'actualArraySize'
},
'type': 'uInt8[]'
},
{
'direction': 'in',
'name': 'arraySize',
'type': 'int32'
},
{
'direction': 'out',
'name': 'actualArraySize',
'type': 'int32'
}
],
'returns': 'int32'
},
'GetAvailablePorts': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'selectorString',
'type': 'char[]'
},
{
'direction': 'in',
'name': 'arraySize',
'type': 'int32'
},
{
'direction': 'out',
'name': 'availablePorts',
'size': {
'mechanism': 'ivi-dance',
'value': 'arraySize'
},
'type': 'char[]'
}
],
'returns': 'int32'
},
'GetError': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'out',
'name': 'errorCode',
'type': 'int32'
},
{
'direction': 'in',
'name': 'errorDescriptionBufferSize',
'type': 'int32'
},
{
'direction': 'out',
'name': 'errorDescription',
'size': {
'mechanism': 'ivi-dance',
'value': 'errorDescriptionBufferSize'
},
'type': 'char[]'
}
],
'returns': 'int32'
},
'GetErrorString': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'errorCode',
'type': 'int32'
},
{
'direction': 'in',
'name': 'errorDescriptionBufferSize',
'type': 'int32'
},
{
'direction': 'out',
'name': 'errorDescription',
'size': {
'mechanism': 'ivi-dance',
'value': 'errorDescriptionBufferSize'
},
'type': 'char[]'
}
],
'returns': 'int32'
},
'GetExternalAttenuationTableActualValue': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'selectorString',
'type': 'char[]'
},
{
'direction': 'out',
'name': 'externalAttenuation',
'type': 'float64'
}
],
'returns': 'int32'
},
'GetNIRFSASession': {
'init_method': True,
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'cross_driver_session': 'ViSession',
'direction': 'out',
'grpc_type': "nidevice_grpc.Session",
'name': 'niRfsaSession',
'type': 'uInt32'
}
],
'returns': 'int32'
},
'GetNIRFSASessionArray': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'out',
'name': 'nirfsaSessions',
'size': {
'mechanism': 'ivi-dance-with-a-twist',
'value': 'arraySize',
'value_twist': 'actualArraySize'
},
'type': 'uInt32[]'
},
{
'direction': 'in',
'name': 'arraySize',
'type': 'int32'
},
{
'direction': 'out',
'name': 'actualArraySize',
'type': 'int32'
}
],
'returns': 'int32'
},
'GetSelfCalibrateLastDateAndTime': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'selectorString',
'type': 'char[]'
},
{
'direction': 'in',
'name': 'selfCalibrateStep',
'type': 'int64'
},
{
'direction': 'out',
'name': 'timestamp',
'type': 'CVIAbsoluteTime'
}
],
'returns': 'int32'
},
'GetSelfCalibrateLastTemperature': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'selectorString',
'type': 'char[]'
},
{
'direction': 'in',
'name': 'selfCalibrateStep',
'type': 'int64'
},
{
'direction': 'out',
'name': 'temperature',
'type': 'float64'
}
],
'returns': 'int32'
},
'Initialize': {
'custom_close': 'Close(id, RFMXINSTR_VAL_FALSE)',
'init_method': True,
'parameters': [
{
'direction': 'in',
'name': 'resourceName',
'type': 'char[]'
},
{
'direction': 'in',
'name': 'optionString',
'type': 'char[]'
},
{
'direction': 'out',
'grpc_name': 'instrument',
'name': 'handleOut',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'out',
'name': 'isNewSession',
'type': 'int32'
}
],
'returns': 'int32'
},
'InitializeFromNIRFSASession': {
'custom_close': 'Close(id, RFMXINSTR_VAL_FALSE)',
'init_method': True,
'parameters': [
{
'cross_driver_session': 'ViSession',
'direction': 'in',
'grpc_type': "nidevice_grpc.Session",
'name': 'nirfsaSession',
'type': 'uInt32'
},
{
'direction': 'out',
'grpc_name': 'instrument',
'name': 'handleOut',
'type': 'niRFmxInstrHandle'
}
],
'returns': 'int32'
},
'InitializeFromNIRFSASessionArray': {
'custom_close': 'Close(id, RFMXINSTR_VAL_FALSE)',
'init_method': True,
'parameters': [
{
'direction': 'in',
'name': 'nirfsaSessions',
'size': {
'mechanism': 'len',
'value': 'numberOfNIRFSASessions'
},
'type': 'uInt32[]'
},
{
'direction': 'in',
'name': 'numberOfNIRFSASessions',
'type': 'int32'
},
{
'direction': 'out',
'grpc_name': 'instrument',
'name': 'handleOut',
'type': 'niRFmxInstrHandle'
}
],
'returns': 'int32'
},
'IsSelfCalibrateValid': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'selectorString',
'type': 'char[]'
},
{
'direction': 'out',
'name': 'selfCalibrateValid',
'type': 'int32'
},
{
'direction': 'out',
'name': 'validSteps',
'type': 'int32'
}
],
'returns': 'int32'
},
'LoadAllConfigurations': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'filePath',
'type': 'char[]'
},
{
'direction': 'in',
'name': 'loadRFInstrConfiguration',
'type': 'int32'
}
],
'returns': 'int32'
},
'LoadSParameterExternalAttenuationTableFromS2PFile': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'selectorString',
'type': 'char[]'
},
{
'direction': 'in',
'name': 'tableName',
'type': 'char[]'
},
{
'direction': 'in',
'name': 's2PFilePath',
'type': 'char[]'
},
{
'direction': 'in',
'name': 'sParameterOrientation',
'type': 'int32'
}
],
'returns': 'int32'
},
'ResetAttribute': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
}
],
'returns': 'int32'
},
'ResetDriver': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
}
],
'returns': 'int32'
},
'ResetEntireSession': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
}
],
'returns': 'int32'
},
'ResetToDefault': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
}
],
'returns': 'int32'
},
'SaveAllConfigurations': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'filePath',
'type': 'char[]'
}
],
'returns': 'int32'
},
'SelectActiveExternalAttenuationTable': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'selectorString',
'type': 'char[]'
},
{
'direction': 'in',
'name': 'tableName',
'type': 'char[]'
}
],
'returns': 'int32'
},
'SelfCalibrate': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'selectorString',
'type': 'char[]'
},
{
'direction': 'in',
'name': 'stepsToOmit',
'type': 'int32'
}
],
'returns': 'int32'
},
'SelfCalibrateRange': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'selectorString',
'type': 'char[]'
},
{
'direction': 'in',
'name': 'stepsToOmit',
'type': 'int32'
},
{
'direction': 'in',
'name': 'minimumFrequency',
'type': 'float64'
},
{
'direction': 'in',
'name': 'maximumFrequency',
'type': 'float64'
},
{
'direction': 'in',
'name': 'minimumReferenceLevel',
'type': 'float64'
},
{
'direction': 'in',
'name': 'maximumReferenceLevel',
'type': 'float64'
}
],
'returns': 'int32'
},
'SendSoftwareEdgeAdvanceTrigger': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
}
],
'returns': 'int32'
},
'SendSoftwareEdgeStartTrigger': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
}
],
'returns': 'int32'
},
'SetAttributeF32': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'direction': 'in',
'name': 'attrVal',
'type': 'float32'
}
],
'returns': 'int32'
},
'SetAttributeF32Array': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'direction': 'in',
'name': 'attrVal',
'size': {
'mechanism': 'len',
'value': 'arraySize'
},
'type': 'float32[]'
},
{
'direction': 'in',
'name': 'arraySize',
'type': 'int32'
}
],
'returns': 'int32'
},
'SetAttributeF64': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'direction': 'in',
'name': 'attrVal',
'type': 'float64'
}
],
'returns': 'int32'
},
'SetAttributeF64Array': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'direction': 'in',
'name': 'attrVal',
'size': {
'mechanism': 'len',
'value': 'arraySize'
},
'type': 'float64[]'
},
{
'direction': 'in',
'name': 'arraySize',
'type': 'int32'
}
],
'returns': 'int32'
},
'SetAttributeI16': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'direction': 'in',
'name': 'attrVal',
'type': 'int16'
}
],
'returns': 'int32'
},
'SetAttributeI32': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'direction': 'in',
'name': 'attrVal',
'type': 'int32'
}
],
'returns': 'int32'
},
'SetAttributeI32Array': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'direction': 'in',
'name': 'attrVal',
'size': {
'mechanism': 'len',
'value': 'arraySize'
},
'type': 'int32[]'
},
{
'direction': 'in',
'name': 'arraySize',
'type': 'int32'
}
],
'returns': 'int32'
},
'SetAttributeI64': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'direction': 'in',
'name': 'attrVal',
'type': 'int64'
}
],
'returns': 'int32'
},
'SetAttributeI64Array': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'direction': 'in',
'name': 'attrVal',
'size': {
'mechanism': 'len',
'value': 'arraySize'
},
'type': 'int64[]'
},
{
'direction': 'in',
'name': 'arraySize',
'type': 'int32'
}
],
'returns': 'int32'
},
'SetAttributeI8': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'coerced': True,
'direction': 'in',
'name': 'attrVal',
'type': 'int8'
}
],
'returns': 'int32'
},
'SetAttributeI8Array': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'coerced': True,
'direction': 'in',
'name': 'attrVal',
'size': {
'mechanism': 'len',
'value': 'arraySize'
},
'type': 'int8[]'
},
{
'direction': 'in',
'name': 'arraySize',
'type': 'int32'
}
],
'returns': 'int32'
},
'SetAttributeNIComplexDoubleArray': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'direction': 'in',
'name': 'attrVal',
'size': {
'mechanism': 'len',
'value': 'arraySize'
},
'type': 'NIComplexDouble[]'
},
{
'direction': 'in',
'name': 'arraySize',
'type': 'int32'
}
],
'returns': 'int32'
},
'SetAttributeNIComplexSingleArray': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'direction': 'in',
'name': 'attrVal',
'size': {
'mechanism': 'len',
'value': 'arraySize'
},
'type': 'NIComplexSingle[]'
},
{
'direction': 'in',
'name': 'arraySize',
'type': 'int32'
}
],
'returns': 'int32'
},
'SetAttributeString': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'direction': 'in',
'name': 'attrVal',
'type': 'char[]'
}
],
'returns': 'int32'
},
'SetAttributeU16': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'direction': 'in',
'name': 'attrVal',
'type': 'uInt16'
}
],
'returns': 'int32'
},
'SetAttributeU32': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'direction': 'in',
'name': 'attrVal',
'type': 'uInt32'
}
],
'returns': 'int32'
},
'SetAttributeU32Array': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'direction': 'in',
'name': 'attrVal',
'size': {
'mechanism': 'len',
'value': 'arraySize'
},
'type': 'uInt32[]'
},
{
'direction': 'in',
'name': 'arraySize',
'type': 'int32'
}
],
'returns': 'int32'
},
'SetAttributeU64Array': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'direction': 'in',
'name': 'attrVal',
'size': {
'mechanism': 'len',
'value': 'arraySize'
},
'type': 'uInt64[]'
},
{
'direction': 'in',
'name': 'arraySize',
'type': 'int32'
}
],
'returns': 'int32'
},
'SetAttributeU8': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'direction': 'in',
'name': 'attrVal',
'type': 'uInt8'
}
],
'returns': 'int32'
},
'SetAttributeU8Array': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'channelName',
'type': 'char[]'
},
{
'direction': 'in',
'grpc_type': 'NiRFmxInstrAttribute',
'name': 'attributeID',
'type': 'int32'
},
{
'direction': 'in',
'name': 'attrVal',
'size': {
'mechanism': 'len',
'value': 'arraySize'
},
'type': 'uInt8[]'
},
{
'direction': 'in',
'name': 'arraySize',
'type': 'int32'
}
],
'returns': 'int32'
},
'TimestampFromValues': {
'parameters': [
{
'direction': 'in',
'name': 'secondsSince1970',
'type': 'int64'
},
{
'direction': 'in',
'name': 'fractionalSeconds',
'type': 'float64'
},
{
'direction': 'out',
'name': 'timestamp',
'type': 'CVIAbsoluteTime'
}
],
'returns': 'int32'
},
'ValuesFromTimestamp': {
'parameters': [
{
'direction': 'in',
'name': 'timestamp',
'type': 'CVIAbsoluteTime'
},
{
'direction': 'out',
'name': 'secondsSince1970',
'type': 'int64'
},
{
'direction': 'out',
'name': 'fractionalSeconds',
'type': 'float64'
}
],
'returns': 'int32'
},
'WaitForAcquisitionComplete': {
'parameters': [
{
'direction': 'in',
'grpc_name': 'instrument',
'name': 'instrumentHandle',
'type': 'niRFmxInstrHandle'
},
{
'direction': 'in',
'name': 'timeout',
'type': 'float64'
}
],
'returns': 'int32'
}
}
| StarcoderdataPython |
1661671 | <reponame>vidyadeepa/the-coding-interview
def increment(occurences, i):
if i in occurences.keys():
occurences[i] += 1
else:
occurences[i] = 1
def even_element(l):
"""
Runtime: O(n)
"""
occurences = {}
[increment(occurences, i) for i in l]
for k in occurences:
if occurences[k] % 2 == 0:
return k
print even_element([1,2,32,634,664,32])
| StarcoderdataPython |
3479190 | import logging
from collections import Callable
from pathlib import Path
import time
from pygraphviz import AGraph
from src.env import get_env
def draw_nf_graph(add_nodes_edges: Callable[[AGraph]]):
if not get_env().debug.nf_visualize:
return
graph = AGraph(directed=True)
logging.info("DRAWING NF GRAPH...")
add_nodes_edges(graph)
graph.layout(prog="fdp")
vis_dir = Path(__file__).parent / "nf_viz"
vis_dir.mkdir(parents=True, exist_ok=True)
graph.draw(path=vis_dir / f"nf_viz_{time.time()}.svg")
| StarcoderdataPython |
6616923 | import pyjadx
import argparse
import threading
from multiprocessing import Process
from threading import Thread
import time
def load_apk(apk, i):
print(apk, i)
jadx = pyjadx.Jadx()
jadx.load(args.apk)
#time.sleep(10)
def test_threads(apk):
th = []
for i in range(3):
process = Thread(target=load_apk, args=[apk, i])
process.start()
th.append(process)
for t in th:
t.join()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Load an APK with Pyjadx")
parser.add_argument("apk", help="Target apk")
args = parser.parse_args()
test_threads(args.apk)
| StarcoderdataPython |
9607599 | import re
def word_count(phrase):
pat = re.compile(r"""[a-zA-Z0-9]+'*[a-zA-Z]*""")
words = [i.strip("'").lower() for i in pat.findall(phrase)]
counter = dict()
for i in words:
counter[i] = counter.get(i, 0) + 1
return counter
| StarcoderdataPython |
3576317 | class Board:
'''A Board object that keeps track of which tiles are being occupied and by
which player each tile is being occupied by'''
def __init__(self):
"""Initializes an 8x8 Board with 12 pieces for player1 and 12 for player2
"""
#"o" is empty tile
#"p1" is tile filled by a piece belonging to player1
#"p2" is tile filled by a piece belonging to player2
self.board = []
self.board.append(["p2", "o", "p2", "o", "p2", "o", "p2", "o"])
self.board.append(["o", "p2", "o", "p2", "o", "p2", "o", "p2"])
self.board.append(["p2", "o", "p2", "o", "p2", "o", "p2", "o"])
self.board.append(["o", "o", "o", "o", "o", "o", "o", "o"])
self.board.append(["o", "o", "o", "o", "o", "o", "o", "o"])
self.board.append(["o", "p1", "o", "p1", "o", "p1", "o", "p1"])
self.board.append(["p1", "o", "p1", "o", "p1", "o", "p1", "o"])
self.board.append(["o", "p1", "o", "p1", "o", "p1", "o", "p1"])
def is_tile_empty(self, x, y):
"""Returns the tile at the specified location is empty or occupied by
a player
@param x horizontal index of the tile
@param y vertical index of the tile
@return a string indicating either an empty space or a space occupied by
player1 or player2
"""
return self.board[x][y]
def empty_tile(self, x, y):
"""Empties the specified tile
@param x horizontal index of the tile
@param y vertical index of the tile
"""
self.board[x][y] = "o"
def fill_tile(self, x, y, player):
"""Occupy the spicified tile with the provided player
@param x horizontal index of the tile
@param y vertical index of the tile
@param player a string indicating if player1 or player2 are occupying
the tile
"""
if((y == 0 and player == "p1") or player == "p1c"):
self.board[y][y] = "p1c"
elif((y == 7 and player == "p2") or player == "p2c"):
self.board[y][x] = "p2c"
else:
self.board[y][x] = player
def clear_board(self):
"""Clear the game board
"""
for x in range(8):
for y in range(8):
self.board[x][y] = "o"
| StarcoderdataPython |
6502511 | <gh_stars>100-1000
import collections
import sys
from lib.core import Tokenizer
from lib.utilities import url_to_json
# Query counts the number of distinct authors contributing to a project.
QUERY = '''
SELECT c.author_id, COUNT(*)
FROM commits c JOIN project_commits pc
ON pc.commit_id = c.id
WHERE pc.project_id = {0}
GROUP BY c.author_id
ORDER BY COUNT(*) DESC
'''
def run(project_id, repo_path, cursor, **options):
num_core_contributors = 0
cursor.execute(QUERY.format(project_id))
rows = cursor.fetchall()
if cursor.rowcount == 0: # Non-existent history
return False, num_core_contributors
commits = collections.OrderedDict()
for row in rows:
commits[row[0]] = row[1]
num_commits = sum(commits.values())
cutoff = options.get('cutoff', 1.0)
aggregate = 0
for (_, v) in commits.items():
num_core_contributors += 1
aggregate += v
if (aggregate / num_commits) >= cutoff:
break
threshold = options['threshold']
return (num_core_contributors >= threshold, num_core_contributors)
if __name__ == '__main__':
print('Attribute plugins are not meant to be executed directly.')
sys.exit(1)
| StarcoderdataPython |
298749 | <gh_stars>0
import time
# FrogRiverOne (Easy)
def solution(X, A):
if X > len(A):
return -1
places = [0 for v in range(len(A) + 1)]
counter = X
for index, second in enumerate(A):
if places[second] == 0:
places[second] = 1
counter -= 1
if counter == 0:
return index
return -1
if __name__ == "__main__":
start = time.time()
print(solution(5, [1, 3, 1, 4, 2, 3, 5, 4]))
end = time.time()
print(end - start)
start = time.time()
print(solution(5, [1, 3, 1, 4, 2, 3, 1, 4]))
end = time.time()
print(end - start)
start = time.time()
print(solution(10, [1, 3, 1, 4, 2, 3, 5, 4, 8, 7, 7, 7, 1, 6, 9, 2, 10]))
end = time.time()
print(end - start)
start = time.time()
print(solution(10, []))
end = time.time()
print(end - start)
start = time.time()
print(solution(10, [1]))
end = time.time()
print(end - start)
start = time.time()
print(solution(3, [1, 1, 1, 1, 1, 1, 1, 1, 1]))
end = time.time()
print(end - start)
| StarcoderdataPython |
3563644 | <filename>src/scrap008.py
# https://medium.com/@JuanPabloHerrera/use-python-and-web-scraping-to-go-on-your-dream-vacation-ba965687e4b5
import requests
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
import time
import pandas as pd
#######################################################################
import pandas as pd
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication
from email.mime.text import MIMEText
from password import password
#######################################################################
# This is the path where I stored my chromedriver
PATH = "/Users/juanpih19/Desktop/Programs/chromedriver"
class AirbnbBot:
# Class constructor that takes location, stay (Month, Week, Weekend)
# Number of guests and type of guests (Adults, Children, Infants)
def __init__(self, location, stay, number_guests, type_guests):
self.location = location
self.stay = stay
self.number_guests = number_guests
self.type_guests = type_guests
self.driver = webdriver.Chrome(PATH)
# The 'search()' function will do the searching based on user input
def search(self):
# The driver will take us to the Airbnb website
self.driver.get('https://www.airbnb.com')
time.sleep(1)
# This will find the location's tab xpath, type the desired location
# and hit enter so we move the driver to the next tab (check in)
location = self.driver.find_element_by_xpath('//*[@id="bigsearch-query-detached-query-input"]')
location.send_keys(Keys.RETURN)
location.send_keys(self.location)
location.send_keys(Keys.RETURN)
# It was difficult to scrape every number on the calendar
# so both the check in and check out dates are flexible.
flexible = location.find_element_by_xpath('//*[@id="tab--tabs--1"]')
flexible.click()
# Even though we have flexible dates, we can choose if
# the stay is for the weekend or for a week or month
# if stay is for a weekend we find the xpath, click it and hit enter
if self.stay in ['Weekend', 'weekend']:
weekend = self.driver.find_element_by_xpath('//*[@id="flexible_trip_lengths-weekend_trip"]/button')
weekend.click()
weekend.send_keys(Keys.RETURN)
# if stay is for a week we find the xpath, click it and hit enter
elif self.stay in ['Week', 'week']:
week = self.driver.find_element_by_xpath('//*[@id="flexible_trip_lengths-one_week"]/button')
week.click()
week.send_keys(Keys.RETURN)
# if stay is for a month we find the xpath, click it and hit enter
elif self.stay in ['Month', 'month']:
month = self.driver.find_element_by_xpath('//*[@id="flexible_trip_lengths-one_month"]/button')
month.click()
month.send_keys(Keys.RETURN)
else:
pass
# Finds the guests xpath and clicks it
guest_button = self.driver.find_element_by_xpath('/html/body/div[5]/div/div/div[1]/div/div/div[1]/div[1]/div/header/div/div[2]/div[2]/div/div/div/form/div[2]/div/div[5]/div[1]')
guest_button.click()
# Based on user input self.type_guests and self.number_guests
# if type_guests are adults
# it will add as many adults as assigned on self.number_guests
if self.type_guests in ['Adults', 'adults']:
adults = self.driver.find_element_by_xpath('//*[@id="stepper-adults"]/button[2]')
for num in range(int(self.number_guests)):
adults.click()
# if type_guests are children
# it will add as many children as assigned on self.number_guests
elif self.type_guests in ['Children', 'children']:
children = self.driver.find_element_by_xpath('//*[@id="stepper-children"]/button[2]')
for num in range(int(self.number_guests)):
children.click()
# if type_guests are infants
# it will add as many infants as assigned on self.number_guests
elif self.type_guests in ['Infants', 'infants']:
infants = self.driver.find_element_by_xpath('//*[@id="stepper-infants"]/button[2]')
for num in range(int(self.number_guests)):
infants.click()
else:
pass
# Guests tab is the last tab that we need to fill before searching
# If I hit enter the driver would not search
# I decided to click on a random place so I could find the search's button xpath
x = self.driver.find_element_by_xpath('//*[@id="field-guide-toggle"]')
x.click()
x.send_keys(Keys.RETURN)
# I find the search button snd click in it to search for all options
search = self.driver.find_element_by_css_selector('button._sxfp92z')
search.click()
# This function will scrape all the information about every option
# on the first page
def scraping_aribnb(self):
# Maximize the window
self.driver.maximize_window()
# Gets the current page sourse
src = self.driver.page_source
# We create a BeautifulSoup object and feed it the current page source
soup = BeautifulSoup(src, features='lxml')
# Find the class that contains all the options and store it
# on list_of_houses variable
list_of_houses = soup.find('div', class_ = "_fhph4u")
# Type of properties list - using find_all function
# found the class that contains all the types of properties
# Used a list comp to append them to list_type_property
type_of_property = list_of_houses.find_all('div', class_="_1tanv1h")
list_type_property = [ i.text for i in type_of_property]
# Host description list - using find_all function
# found the class that contains all the host descriptions
# Used a list comp to append them to list_host_description
host_description = list_of_houses.find_all('div', class_='_5kaapu')
list_host_description = [ i.text for i in host_description]
# Number of bedrooms and bathrooms - using find_all function
# bedrooms_bathrooms and other_amenities used the same class
# Did some slicing so I could append each item to the right list
number_of_bedrooms_bathrooms = list_of_houses.find_all('div', class_="_3c0zz1")
list_bedrooms_bathrooms = [ i.text for i in number_of_bedrooms_bathrooms]
bedrooms_bathrooms = []
other_amenities = []
bedrooms_bathrooms = list_bedrooms_bathrooms[::2]
other_amenities = list_bedrooms_bathrooms[1::2]
# Date - using find_all function
# found the class that contains all the dates
# Used a list comp to append them to list_date
dates = list_of_houses.find_all('div', class_="_1v92qf0")
list_dates = [date.text for date in dates]
# Stars - using find_all function
# found the class that contains all the stars
# Used a list comp to append them to list_stars
stars = list_of_houses.find_all('div', class_ = "_1hxyyw3")
list_stars = [star.text[:3] for star in stars]
# Price - using find_all function
# found the class that contains all the prices
# Used a list comp to append them to list_prices
prices = list_of_houses.find_all('div', class_ = "_1gi6jw3f" )
list_prices = [price.text for price in prices ]
# putting the lists with data into a Pandas data frame
airbnb_data = pd.DataFrame({'Type' : list_type_property, 'Host description': list_host_description, 'Bedrooms & bathrooms': bedrooms_bathrooms, 'Other amenities': other_amenities,
'Date': list_dates, 'Price': list_prices})
# Saving the DataFrame to a csv file
airbnb_data.to_csv('Airbnb_data.csv', index=False)
########################################################################################
class Traveler:
# Email Address so user can received the filtered data
# Stay: checks if it will be a week, month or weekend
def __init__(self, email, stay):
self.email = email
self.stay = stay
# This functtion creates a new csv file based on the options
# that the user can afford
def price_filter(self, amount):
# The user will stay a month
if self.stay in ['Month', 'month']:
data = pd.read_csv('Airbnb_data.csv')
# Monthly prices are usually over a $1,000.
# Airbnb includes a comma in thousands making it hard to transform it
# from string to int.
# This will create a column that takes only the digits
# For example: $1,600 / month, this slicing will only take 1,600
data['cleaned price'] = data['Price'].str[1:6]
# list comp to replace every comma of every row with an empty space
_l = [i.replace(',', '') for i in data['cleaned price']]
data['cleaned price'] = _l
# Once we got rid of commas, we convert every row to an int value
int_ = [int(i) for i in data['cleaned price']]
data['cleaned price'] = int_
# We look for prices that are within the user's range
# and save that to a new csv file
result = data[data['cleaned price'] <= amount]
return result.to_csv('filtered_data.csv', index=False)
# The user will stay a weekend
elif self.stay in ['Weekend', 'weekend', 'week', 'Week']:
data = pd.read_csv('Airbnb_data.csv')
# Prices per night are usually between 2 and 3 digits. Example: $50 or $100
# This will create a column that takes only the digits
# For example: $80 / night, this slicing will only take 80
data['cleaned price'] = data['Price'].str[1:4]
# This time I used the map() instead of list comp but it does the same thing.
data['cleaned price'] = list(map(int, data['cleaned price']))
# We look for prices that are within the user's range
# and save that to a new csv file
filtered_data = data[data['cleaned price'] <= amount]
return filtered_data.to_csv('filtered_data.csv', index=False)
else:
pass
def send_mail(self):
# Create a multipart message
# It takes the message body, subject, sender, receiver
msg = MIMEMultipart()
MESSAGE_BODY = 'Here is the list with possible options for your dream vacation'
body_part = MIMEText(MESSAGE_BODY, 'plain')
msg['Subject'] = "Filtered list of possible airbnb's"
msg['From'] = '<EMAIL>'
msg['To'] = self.email
# Attaching the body part to the message
msg.attach(body_part)
# open and read the CSV file in binary
with open('filtered_data.csv','rb') as file:
# Attach the file with filename to the email
msg.attach(MIMEApplication(file.read(), Name='filtered_data.csv'))
# Create SMTP object
smtp_obj = smtplib.SMTP('smtp.gmail.com', 587)
smtp_obj.starttls()
# Login to the server, email and password of the sender
smtp_obj.login('<EMAIL>', password)
# Convert the message to a string and send it
smtp_obj.sendmail(msg['From'], msg['To'], msg.as_string())
smtp_obj.quit()
if __name__ == '__main__':
vacation = AirbnbBot('New York', 'week', '2', 'adults')
vacation.search()
time.sleep(2)
vacation.scraping_aribnb()
'''
if __name__ == "__main__":
my_traveler = Traveler( '<EMAIL>', 'week' )
my_traveler.price_filter(80)
my_traveler.send_mail()
'''
| StarcoderdataPython |
12818653 | <reponame>artem7902/OSBot-AWS
import sys ; sys.path.append('..')
import unittest
from unittest import TestCase
from pbx_gs_python_utils.utils.Dev import Dev
from pbx_gs_python_utils.utils.Misc import Misc
from osbot_aws.apis.Fargate import Fargate
@unittest.skip("Needs test that create and destroy the test data")
class test_Fargate(TestCase):
def setUp(self):
self.fargate = Fargate(account_id='244560807427')
def test__init__(self):
assert type(self.fargate.ecs).__name__ == 'ECS'
# def test_cluster_delete(self):
# clusters_arns = self.fargate.clusters()
# result = self.fargate.cluster_delete(clusters_arns[0])
# Dev.pprint(result)
def test_clusters(self):
result = self.fargate.clusters()
if len(result) > 0:
assert 'arn:aws:ecs:' in result[0]
def test_policy_create_for_task_role(self):
self.fargate.policy_create_for_task_role('task_role_create_and_run_task')
def test_policy_create_for_execution_role(self):
self.fargate.policy_create_for_execution_role('task_execution_role_create_and_run_task')
# def test_task_create(self):
# task_role_arn = 'aaa'
# execution_role_arn = 'bbb'
# result = self.fargate.task_create(task_role_arn,execution_role_arn)
# task_arn = result.get('taskDefinitionArn')
# print()
# print(task_arn)
#self.fargate.task_delete(task_arn) # delete it
#print(Misc.word_wrap(result,60))
def test_task_delete(self):
tasks = self.fargate.tasks()
for task in tasks:
if 'family_created_via' in task:
Dev.pprint('deleting task: {0}'.format(task))
self.fargate.task_delete(task)
def test_task_run(self):
# ec2 = Ec2()
cluster = 'FargateCluster'
subnet_id = ' subnet-49391932'
security_group_id = 'sg-e6ea548e'
task_name = 'create_and_run_task:45'
task_arn = 'arn:aws:ecs:eu-west-2:244560807427:task-definition/{0}'.format(task_name)
task_run_arn = self.fargate.task_run(cluster,task_arn, subnet_id, security_group_id).get('taskArn')
task_details = self.fargate.task_wait_for_completion(cluster, task_run_arn, sleep_for=1, log_status = True)
Dev.pprint(task_details)
def test_task_details(self):
task_id ='9400bb6b-f76f-4bce-b35d-5a4b15e79bfd'
cluster = 'FargateCluster'
tast_arn = 'arn:aws:ecs:eu-west-2:244560807427:task/{0}'.format(task_id)
result = self.fargate.task_details(cluster, tast_arn)
Dev.pprint(result)
Dev.pprint(result.get('lastStatus'))
Dev.pprint(result.get('containers'))
def test_tasks(self):
result = self.fargate.tasks()
Dev.pprint(result)
assert len(result) > 1
assert ':task-definition/' in result[0]
def test_run_container_on_temp_cluster(self):
subnet_id = ' subnet-49391932'
security_group_id = 'sg-e6ea548e'
cluster_name = Misc.random_string_and_numbers(6,'temp_cluster_')
task_name = 'temp_task_on_{0}'.format(cluster_name)
task_role = 'task_role_{0}'.format(task_name)
execution_role = 'execution_role_{0}'.format(task_name)
docker_image_name = 'gs-docker-codebuild'
log_group_name = "awslogs-{0}".format(task_name)
log_group_region = "eu-west-2"
log_group_stream_prefix = "awslogs-example"
# Create Cluster
self.fargate.cluster_create(cluster_name)
# Create Roles
self.fargate.policy_create_for_task_role(task_role)
self.fargate.policy_create_for_execution_role(execution_role)
#Create Task
task_arn = self.fargate.task_create(task_name, task_role, execution_role).get('taskDefinitionArn')
# Run Task
task_run = self.fargate.task_run(cluster_name, task_arn, subnet_id, security_group_id)
task_run_arn = task_run.get('taskArn')
task_details = self.fargate.task_wait_for_completion(cluster_name, task_run_arn)
task_id = 'asd'
# Get logs
#group_name = 'awslogs-temp_task_on_temp_cluster_X29B3K'
#stream_name = 'awslogs-example/gs-docker-codebuild/f8ccf213-b642-466c-8458-86af9933eca9'
stream_name = "{0}/{1}{2}".format(log_group_stream_prefix,docker_image_name,task_id)
messages = self.cloud_watch.logs_get_messages(log_group_name, stream_name)
# Print details
Dev.pprint(messages) | StarcoderdataPython |
1959771 | def main():
n,k = map(int,input().split())
s = list(int(input()) for _ in range(n))
ans = 0
right = 1
length = 1
index = 0
mul = s[0]
while index < n:
while True:
if right < n and mul*s[right] <= k:
mul *= s[right]
right += 1
length = right-index
else:
break
if s[index] <= mul <= k:
ans = max(ans,length)
if s[index] != 0:
mul /= s[index]
index += 1
else:
print(n)
return
print(ans)
if __name__ == "__main__":
main() | StarcoderdataPython |
1858909 |
class BaseLRSchedule(object):
def _update_optimizer(self, lr, optimizer=None):
if optimizer is not None:
for param_group in optimizer.param_groups:
param_group['lr'] = lr
class Constant(BaseLRSchedule):
def __init__(self, base_lr=0.001):
self.base_lr = base_lr
def __call__(self, epoch, optimizer=None):
"""
If optimizer is specified, its learning rate is modified inplace.
"""
lr = self.base_lr
self._update_optimizer(lr, optimizer)
return lr
class Exponential(BaseLRSchedule):
"""
Decay learning rate by a factor of `gamma` every `stepsize` epochs.
Example:
>>> from ibeis.algo.verif.torch.lr_schedule import *
>>> from clab.torch.lr_schedule import *
>>> lr_scheduler = Exponential(stepsize=2)
>>> rates = np.array([lr_scheduler(i) for i in range(6)])
>>> target = np.array([1E-3, 1E-3, 1E-5, 1E-5, 1E-7, 1E-7])
>>> assert all(list(np.isclose(target, rates)))
"""
def __init__(self, base_lr=0.001, gamma=1.0, stepsize=100):
self.base_lr = base_lr
self.gamma = gamma
self.stepsize = stepsize
def __call__(self, epoch, optimizer=None):
"""
If optimizer is specified, its learning rate is modified inplace.
"""
n_decays = epoch // self.stepsize
lr = self.base_lr * (self.gamma ** n_decays)
self._update_optimizer(lr, optimizer)
return lr
| StarcoderdataPython |
9617065 | from threading import Thread
import os
import time
def square_numbers():
for i in range(100):
i * i
time.sleep(0.1)
def main():
threads = []
num_threads = 10
# create processes
for i in range(num_threads):
t = Thread(target=square_numbers)
threads.append(t)
# start
for t in threads:
t.start()
# join
for t in threads:
t.join()
print("end main")
if __name__ == '__main__':
main() | StarcoderdataPython |
6427062 | """Test which packages give errors when trying to document."""
import importlib
import pip
import os
def builtin_packages():
"""Test builtin packages."""
builtin_packages = ""
with open('../builtin_list.txt') as f:
builtin_packages = f.readlines()
list_builtin_errors = list()
for i in builtin_packages:
itm = i.strip(" ").strip("\n")
try:
importlib.import_module(itm)
except:
list_builtin_errors.append(itm)
return list_builtin_errors
def installed_packages():
"""Test installed packages."""
installed_packages = pip.get_installed_distributions()
list_installed_errors = list()
for i in installed_packages:
itm = str(i.key)
try:
importlib.import_module(itm)
except:
list_installed_errors.append(itm)
return list_installed_errors
def test_packages():
"""Test all packages, verbose."""
os.system('cls')
print("Checking builtin packages... ")
builtin_errors = builtin_packages()
print("Done.")
print("Builtin Packages w/ errors:\n-------------------------")
for err in builtin_errors:
print(err)
print("Checking installed packages... ")
installed_errors = installed_packages()
print("Done.")
print("Installed Packages w/ errors:\n----------------------------")
for err in installed_errors:
print(err)
errors = builtin_errors + installed_errors
return errors
if __name__ == '__main__':
test_packages()
| StarcoderdataPython |
11237903 | <filename>src/bund/element.py<gh_stars>0
##
##
##
from ctx import Ctx
import zmq.green as zmq
import gevent
from gevent import monkey; monkey.patch_all()
from keyring import KeyRing
class Element:
def __init__(self, name, ctx):
self.ctx = ctx
self.name = name
self.namespace = ctx.namespace
self.keys = KeyRing(namespace, name)
| StarcoderdataPython |
9754086 | def avg(n):
if not n:
return none
return sum(n)/len(n)
l = [1,2,3,4,5]
expected = 3
assert expected = avg(l), "Inccorect cal"
print() | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.